xref: /linux/drivers/base/power/main.c (revision ac6a0cf6716bb46813d0161024c66c2af66e53d1)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/rwsem.h>
27 #include <linux/interrupt.h>
28 
29 #include "../base.h"
30 #include "power.h"
31 
32 /*
33  * The entries in the dpm_list list are in a depth first order, simply
34  * because children are guaranteed to be discovered after parents, and
35  * are inserted at the back of the list on discovery.
36  *
37  * Since device_pm_add() may be called with a device semaphore held,
38  * we must never try to acquire a device semaphore while holding
39  * dpm_list_mutex.
40  */
41 
42 LIST_HEAD(dpm_list);
43 
44 static DEFINE_MUTEX(dpm_list_mtx);
45 
46 /*
47  * Set once the preparation of devices for a PM transition has started, reset
48  * before starting to resume devices.  Protected by dpm_list_mtx.
49  */
50 static bool transition_started;
51 
52 /**
53  * device_pm_init - Initialize the PM-related part of a device object
54  * @dev: Device object being initialized.
55  */
56 void device_pm_init(struct device *dev)
57 {
58 	dev->power.status = DPM_ON;
59 	pm_runtime_init(dev);
60 }
61 
62 /**
63  *	device_pm_lock - lock the list of active devices used by the PM core
64  */
65 void device_pm_lock(void)
66 {
67 	mutex_lock(&dpm_list_mtx);
68 }
69 
70 /**
71  *	device_pm_unlock - unlock the list of active devices used by the PM core
72  */
73 void device_pm_unlock(void)
74 {
75 	mutex_unlock(&dpm_list_mtx);
76 }
77 
78 /**
79  *	device_pm_add - add a device to the list of active devices
80  *	@dev:	Device to be added to the list
81  */
82 void device_pm_add(struct device *dev)
83 {
84 	pr_debug("PM: Adding info for %s:%s\n",
85 		 dev->bus ? dev->bus->name : "No Bus",
86 		 kobject_name(&dev->kobj));
87 	mutex_lock(&dpm_list_mtx);
88 	if (dev->parent) {
89 		if (dev->parent->power.status >= DPM_SUSPENDING)
90 			dev_warn(dev, "parent %s should not be sleeping\n",
91 				 dev_name(dev->parent));
92 	} else if (transition_started) {
93 		/*
94 		 * We refuse to register parentless devices while a PM
95 		 * transition is in progress in order to avoid leaving them
96 		 * unhandled down the road
97 		 */
98 		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
99 	}
100 
101 	list_add_tail(&dev->power.entry, &dpm_list);
102 	mutex_unlock(&dpm_list_mtx);
103 }
104 
105 /**
106  *	device_pm_remove - remove a device from the list of active devices
107  *	@dev:	Device to be removed from the list
108  *
109  *	This function also removes the device's PM-related sysfs attributes.
110  */
111 void device_pm_remove(struct device *dev)
112 {
113 	pr_debug("PM: Removing info for %s:%s\n",
114 		 dev->bus ? dev->bus->name : "No Bus",
115 		 kobject_name(&dev->kobj));
116 	mutex_lock(&dpm_list_mtx);
117 	list_del_init(&dev->power.entry);
118 	mutex_unlock(&dpm_list_mtx);
119 	pm_runtime_remove(dev);
120 }
121 
122 /**
123  *	device_pm_move_before - move device in dpm_list
124  *	@deva:  Device to move in dpm_list
125  *	@devb:  Device @deva should come before
126  */
127 void device_pm_move_before(struct device *deva, struct device *devb)
128 {
129 	pr_debug("PM: Moving %s:%s before %s:%s\n",
130 		 deva->bus ? deva->bus->name : "No Bus",
131 		 kobject_name(&deva->kobj),
132 		 devb->bus ? devb->bus->name : "No Bus",
133 		 kobject_name(&devb->kobj));
134 	/* Delete deva from dpm_list and reinsert before devb. */
135 	list_move_tail(&deva->power.entry, &devb->power.entry);
136 }
137 
138 /**
139  *	device_pm_move_after - move device in dpm_list
140  *	@deva:  Device to move in dpm_list
141  *	@devb:  Device @deva should come after
142  */
143 void device_pm_move_after(struct device *deva, struct device *devb)
144 {
145 	pr_debug("PM: Moving %s:%s after %s:%s\n",
146 		 deva->bus ? deva->bus->name : "No Bus",
147 		 kobject_name(&deva->kobj),
148 		 devb->bus ? devb->bus->name : "No Bus",
149 		 kobject_name(&devb->kobj));
150 	/* Delete deva from dpm_list and reinsert after devb. */
151 	list_move(&deva->power.entry, &devb->power.entry);
152 }
153 
154 /**
155  * 	device_pm_move_last - move device to end of dpm_list
156  * 	@dev:   Device to move in dpm_list
157  */
158 void device_pm_move_last(struct device *dev)
159 {
160 	pr_debug("PM: Moving %s:%s to end of list\n",
161 		 dev->bus ? dev->bus->name : "No Bus",
162 		 kobject_name(&dev->kobj));
163 	list_move_tail(&dev->power.entry, &dpm_list);
164 }
165 
166 /**
167  *	pm_op - execute the PM operation appropiate for given PM event
168  *	@dev:	Device.
169  *	@ops:	PM operations to choose from.
170  *	@state:	PM transition of the system being carried out.
171  */
172 static int pm_op(struct device *dev,
173 		 const struct dev_pm_ops *ops,
174 		 pm_message_t state)
175 {
176 	int error = 0;
177 
178 	switch (state.event) {
179 #ifdef CONFIG_SUSPEND
180 	case PM_EVENT_SUSPEND:
181 		if (ops->suspend) {
182 			error = ops->suspend(dev);
183 			suspend_report_result(ops->suspend, error);
184 		}
185 		break;
186 	case PM_EVENT_RESUME:
187 		if (ops->resume) {
188 			error = ops->resume(dev);
189 			suspend_report_result(ops->resume, error);
190 		}
191 		break;
192 #endif /* CONFIG_SUSPEND */
193 #ifdef CONFIG_HIBERNATION
194 	case PM_EVENT_FREEZE:
195 	case PM_EVENT_QUIESCE:
196 		if (ops->freeze) {
197 			error = ops->freeze(dev);
198 			suspend_report_result(ops->freeze, error);
199 		}
200 		break;
201 	case PM_EVENT_HIBERNATE:
202 		if (ops->poweroff) {
203 			error = ops->poweroff(dev);
204 			suspend_report_result(ops->poweroff, error);
205 		}
206 		break;
207 	case PM_EVENT_THAW:
208 	case PM_EVENT_RECOVER:
209 		if (ops->thaw) {
210 			error = ops->thaw(dev);
211 			suspend_report_result(ops->thaw, error);
212 		}
213 		break;
214 	case PM_EVENT_RESTORE:
215 		if (ops->restore) {
216 			error = ops->restore(dev);
217 			suspend_report_result(ops->restore, error);
218 		}
219 		break;
220 #endif /* CONFIG_HIBERNATION */
221 	default:
222 		error = -EINVAL;
223 	}
224 	return error;
225 }
226 
227 /**
228  *	pm_noirq_op - execute the PM operation appropiate for given PM event
229  *	@dev:	Device.
230  *	@ops:	PM operations to choose from.
231  *	@state: PM transition of the system being carried out.
232  *
233  *	The operation is executed with interrupts disabled by the only remaining
234  *	functional CPU in the system.
235  */
236 static int pm_noirq_op(struct device *dev,
237 			const struct dev_pm_ops *ops,
238 			pm_message_t state)
239 {
240 	int error = 0;
241 
242 	switch (state.event) {
243 #ifdef CONFIG_SUSPEND
244 	case PM_EVENT_SUSPEND:
245 		if (ops->suspend_noirq) {
246 			error = ops->suspend_noirq(dev);
247 			suspend_report_result(ops->suspend_noirq, error);
248 		}
249 		break;
250 	case PM_EVENT_RESUME:
251 		if (ops->resume_noirq) {
252 			error = ops->resume_noirq(dev);
253 			suspend_report_result(ops->resume_noirq, error);
254 		}
255 		break;
256 #endif /* CONFIG_SUSPEND */
257 #ifdef CONFIG_HIBERNATION
258 	case PM_EVENT_FREEZE:
259 	case PM_EVENT_QUIESCE:
260 		if (ops->freeze_noirq) {
261 			error = ops->freeze_noirq(dev);
262 			suspend_report_result(ops->freeze_noirq, error);
263 		}
264 		break;
265 	case PM_EVENT_HIBERNATE:
266 		if (ops->poweroff_noirq) {
267 			error = ops->poweroff_noirq(dev);
268 			suspend_report_result(ops->poweroff_noirq, error);
269 		}
270 		break;
271 	case PM_EVENT_THAW:
272 	case PM_EVENT_RECOVER:
273 		if (ops->thaw_noirq) {
274 			error = ops->thaw_noirq(dev);
275 			suspend_report_result(ops->thaw_noirq, error);
276 		}
277 		break;
278 	case PM_EVENT_RESTORE:
279 		if (ops->restore_noirq) {
280 			error = ops->restore_noirq(dev);
281 			suspend_report_result(ops->restore_noirq, error);
282 		}
283 		break;
284 #endif /* CONFIG_HIBERNATION */
285 	default:
286 		error = -EINVAL;
287 	}
288 	return error;
289 }
290 
291 static char *pm_verb(int event)
292 {
293 	switch (event) {
294 	case PM_EVENT_SUSPEND:
295 		return "suspend";
296 	case PM_EVENT_RESUME:
297 		return "resume";
298 	case PM_EVENT_FREEZE:
299 		return "freeze";
300 	case PM_EVENT_QUIESCE:
301 		return "quiesce";
302 	case PM_EVENT_HIBERNATE:
303 		return "hibernate";
304 	case PM_EVENT_THAW:
305 		return "thaw";
306 	case PM_EVENT_RESTORE:
307 		return "restore";
308 	case PM_EVENT_RECOVER:
309 		return "recover";
310 	default:
311 		return "(unknown PM event)";
312 	}
313 }
314 
315 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
316 {
317 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
318 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
319 		", may wakeup" : "");
320 }
321 
322 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
323 			int error)
324 {
325 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
326 		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
327 }
328 
329 /*------------------------- Resume routines -------------------------*/
330 
331 /**
332  *	device_resume_noirq - Power on one device (early resume).
333  *	@dev:	Device.
334  *	@state: PM transition of the system being carried out.
335  *
336  *	Must be called with interrupts disabled.
337  */
338 static int device_resume_noirq(struct device *dev, pm_message_t state)
339 {
340 	int error = 0;
341 
342 	TRACE_DEVICE(dev);
343 	TRACE_RESUME(0);
344 
345 	if (!dev->bus)
346 		goto End;
347 
348 	if (dev->bus->pm) {
349 		pm_dev_dbg(dev, state, "EARLY ");
350 		error = pm_noirq_op(dev, dev->bus->pm, state);
351 	}
352  End:
353 	TRACE_RESUME(error);
354 	return error;
355 }
356 
357 /**
358  *	dpm_resume_noirq - Power on all regular (non-sysdev) devices.
359  *	@state: PM transition of the system being carried out.
360  *
361  *	Call the "noirq" resume handlers for all devices marked as
362  *	DPM_OFF_IRQ and enable device drivers to receive interrupts.
363  *
364  *	Must be called under dpm_list_mtx.  Device drivers should not receive
365  *	interrupts while it's being executed.
366  */
367 void dpm_resume_noirq(pm_message_t state)
368 {
369 	struct device *dev;
370 
371 	mutex_lock(&dpm_list_mtx);
372 	list_for_each_entry(dev, &dpm_list, power.entry)
373 		if (dev->power.status > DPM_OFF) {
374 			int error;
375 
376 			dev->power.status = DPM_OFF;
377 			error = device_resume_noirq(dev, state);
378 			if (error)
379 				pm_dev_err(dev, state, " early", error);
380 		}
381 	mutex_unlock(&dpm_list_mtx);
382 	resume_device_irqs();
383 }
384 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
385 
386 /**
387  *	device_resume - Restore state for one device.
388  *	@dev:	Device.
389  *	@state: PM transition of the system being carried out.
390  */
391 static int device_resume(struct device *dev, pm_message_t state)
392 {
393 	int error = 0;
394 
395 	TRACE_DEVICE(dev);
396 	TRACE_RESUME(0);
397 
398 	down(&dev->sem);
399 
400 	if (dev->bus) {
401 		if (dev->bus->pm) {
402 			pm_dev_dbg(dev, state, "");
403 			error = pm_op(dev, dev->bus->pm, state);
404 		} else if (dev->bus->resume) {
405 			pm_dev_dbg(dev, state, "legacy ");
406 			error = dev->bus->resume(dev);
407 		}
408 		if (error)
409 			goto End;
410 	}
411 
412 	if (dev->type) {
413 		if (dev->type->pm) {
414 			pm_dev_dbg(dev, state, "type ");
415 			error = pm_op(dev, dev->type->pm, state);
416 		}
417 		if (error)
418 			goto End;
419 	}
420 
421 	if (dev->class) {
422 		if (dev->class->pm) {
423 			pm_dev_dbg(dev, state, "class ");
424 			error = pm_op(dev, dev->class->pm, state);
425 		} else if (dev->class->resume) {
426 			pm_dev_dbg(dev, state, "legacy class ");
427 			error = dev->class->resume(dev);
428 		}
429 	}
430  End:
431 	up(&dev->sem);
432 
433 	TRACE_RESUME(error);
434 	return error;
435 }
436 
437 /**
438  *	dpm_resume - Resume every device.
439  *	@state: PM transition of the system being carried out.
440  *
441  *	Execute the appropriate "resume" callback for all devices the status of
442  *	which indicates that they are inactive.
443  */
444 static void dpm_resume(pm_message_t state)
445 {
446 	struct list_head list;
447 
448 	INIT_LIST_HEAD(&list);
449 	mutex_lock(&dpm_list_mtx);
450 	transition_started = false;
451 	while (!list_empty(&dpm_list)) {
452 		struct device *dev = to_device(dpm_list.next);
453 
454 		get_device(dev);
455 		if (dev->power.status >= DPM_OFF) {
456 			int error;
457 
458 			dev->power.status = DPM_RESUMING;
459 			mutex_unlock(&dpm_list_mtx);
460 
461 			error = device_resume(dev, state);
462 
463 			mutex_lock(&dpm_list_mtx);
464 			if (error)
465 				pm_dev_err(dev, state, "", error);
466 		} else if (dev->power.status == DPM_SUSPENDING) {
467 			/* Allow new children of the device to be registered */
468 			dev->power.status = DPM_RESUMING;
469 		}
470 		if (!list_empty(&dev->power.entry))
471 			list_move_tail(&dev->power.entry, &list);
472 		put_device(dev);
473 	}
474 	list_splice(&list, &dpm_list);
475 	mutex_unlock(&dpm_list_mtx);
476 }
477 
478 /**
479  *	device_complete - Complete a PM transition for given device
480  *	@dev:	Device.
481  *	@state: PM transition of the system being carried out.
482  */
483 static void device_complete(struct device *dev, pm_message_t state)
484 {
485 	down(&dev->sem);
486 
487 	if (dev->class && dev->class->pm && dev->class->pm->complete) {
488 		pm_dev_dbg(dev, state, "completing class ");
489 		dev->class->pm->complete(dev);
490 	}
491 
492 	if (dev->type && dev->type->pm && dev->type->pm->complete) {
493 		pm_dev_dbg(dev, state, "completing type ");
494 		dev->type->pm->complete(dev);
495 	}
496 
497 	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
498 		pm_dev_dbg(dev, state, "completing ");
499 		dev->bus->pm->complete(dev);
500 	}
501 
502 	up(&dev->sem);
503 }
504 
505 /**
506  *	dpm_complete - Complete a PM transition for all devices.
507  *	@state: PM transition of the system being carried out.
508  *
509  *	Execute the ->complete() callbacks for all devices that are not marked
510  *	as DPM_ON.
511  */
512 static void dpm_complete(pm_message_t state)
513 {
514 	struct list_head list;
515 
516 	INIT_LIST_HEAD(&list);
517 	mutex_lock(&dpm_list_mtx);
518 	while (!list_empty(&dpm_list)) {
519 		struct device *dev = to_device(dpm_list.prev);
520 
521 		get_device(dev);
522 		if (dev->power.status > DPM_ON) {
523 			dev->power.status = DPM_ON;
524 			mutex_unlock(&dpm_list_mtx);
525 
526 			device_complete(dev, state);
527 			pm_runtime_put_noidle(dev);
528 
529 			mutex_lock(&dpm_list_mtx);
530 		}
531 		if (!list_empty(&dev->power.entry))
532 			list_move(&dev->power.entry, &list);
533 		put_device(dev);
534 	}
535 	list_splice(&list, &dpm_list);
536 	mutex_unlock(&dpm_list_mtx);
537 }
538 
539 /**
540  *	dpm_resume_end - Restore state of each device in system.
541  *	@state: PM transition of the system being carried out.
542  *
543  *	Resume all the devices, unlock them all, and allow new
544  *	devices to be registered once again.
545  */
546 void dpm_resume_end(pm_message_t state)
547 {
548 	might_sleep();
549 	dpm_resume(state);
550 	dpm_complete(state);
551 }
552 EXPORT_SYMBOL_GPL(dpm_resume_end);
553 
554 
555 /*------------------------- Suspend routines -------------------------*/
556 
557 /**
558  *	resume_event - return a PM message representing the resume event
559  *	               corresponding to given sleep state.
560  *	@sleep_state: PM message representing a sleep state.
561  */
562 static pm_message_t resume_event(pm_message_t sleep_state)
563 {
564 	switch (sleep_state.event) {
565 	case PM_EVENT_SUSPEND:
566 		return PMSG_RESUME;
567 	case PM_EVENT_FREEZE:
568 	case PM_EVENT_QUIESCE:
569 		return PMSG_RECOVER;
570 	case PM_EVENT_HIBERNATE:
571 		return PMSG_RESTORE;
572 	}
573 	return PMSG_ON;
574 }
575 
576 /**
577  *	device_suspend_noirq - Shut down one device (late suspend).
578  *	@dev:	Device.
579  *	@state: PM transition of the system being carried out.
580  *
581  *	This is called with interrupts off and only a single CPU running.
582  */
583 static int device_suspend_noirq(struct device *dev, pm_message_t state)
584 {
585 	int error = 0;
586 
587 	if (!dev->bus)
588 		return 0;
589 
590 	if (dev->bus->pm) {
591 		pm_dev_dbg(dev, state, "LATE ");
592 		error = pm_noirq_op(dev, dev->bus->pm, state);
593 	}
594 	return error;
595 }
596 
597 /**
598  *	dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
599  *	@state: PM transition of the system being carried out.
600  *
601  *	Prevent device drivers from receiving interrupts and call the "noirq"
602  *	suspend handlers.
603  *
604  *	Must be called under dpm_list_mtx.
605  */
606 int dpm_suspend_noirq(pm_message_t state)
607 {
608 	struct device *dev;
609 	int error = 0;
610 
611 	suspend_device_irqs();
612 	mutex_lock(&dpm_list_mtx);
613 	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
614 		error = device_suspend_noirq(dev, state);
615 		if (error) {
616 			pm_dev_err(dev, state, " late", error);
617 			break;
618 		}
619 		dev->power.status = DPM_OFF_IRQ;
620 	}
621 	mutex_unlock(&dpm_list_mtx);
622 	if (error)
623 		dpm_resume_noirq(resume_event(state));
624 	return error;
625 }
626 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
627 
628 /**
629  *	device_suspend - Save state of one device.
630  *	@dev:	Device.
631  *	@state: PM transition of the system being carried out.
632  */
633 static int device_suspend(struct device *dev, pm_message_t state)
634 {
635 	int error = 0;
636 
637 	down(&dev->sem);
638 
639 	if (dev->class) {
640 		if (dev->class->pm) {
641 			pm_dev_dbg(dev, state, "class ");
642 			error = pm_op(dev, dev->class->pm, state);
643 		} else if (dev->class->suspend) {
644 			pm_dev_dbg(dev, state, "legacy class ");
645 			error = dev->class->suspend(dev, state);
646 			suspend_report_result(dev->class->suspend, error);
647 		}
648 		if (error)
649 			goto End;
650 	}
651 
652 	if (dev->type) {
653 		if (dev->type->pm) {
654 			pm_dev_dbg(dev, state, "type ");
655 			error = pm_op(dev, dev->type->pm, state);
656 		}
657 		if (error)
658 			goto End;
659 	}
660 
661 	if (dev->bus) {
662 		if (dev->bus->pm) {
663 			pm_dev_dbg(dev, state, "");
664 			error = pm_op(dev, dev->bus->pm, state);
665 		} else if (dev->bus->suspend) {
666 			pm_dev_dbg(dev, state, "legacy ");
667 			error = dev->bus->suspend(dev, state);
668 			suspend_report_result(dev->bus->suspend, error);
669 		}
670 	}
671  End:
672 	up(&dev->sem);
673 
674 	return error;
675 }
676 
677 /**
678  *	dpm_suspend - Suspend every device.
679  *	@state: PM transition of the system being carried out.
680  *
681  *	Execute the appropriate "suspend" callbacks for all devices.
682  */
683 static int dpm_suspend(pm_message_t state)
684 {
685 	struct list_head list;
686 	int error = 0;
687 
688 	INIT_LIST_HEAD(&list);
689 	mutex_lock(&dpm_list_mtx);
690 	while (!list_empty(&dpm_list)) {
691 		struct device *dev = to_device(dpm_list.prev);
692 
693 		get_device(dev);
694 		mutex_unlock(&dpm_list_mtx);
695 
696 		error = device_suspend(dev, state);
697 
698 		mutex_lock(&dpm_list_mtx);
699 		if (error) {
700 			pm_dev_err(dev, state, "", error);
701 			put_device(dev);
702 			break;
703 		}
704 		dev->power.status = DPM_OFF;
705 		if (!list_empty(&dev->power.entry))
706 			list_move(&dev->power.entry, &list);
707 		put_device(dev);
708 	}
709 	list_splice(&list, dpm_list.prev);
710 	mutex_unlock(&dpm_list_mtx);
711 	return error;
712 }
713 
714 /**
715  *	device_prepare - Execute the ->prepare() callback(s) for given device.
716  *	@dev:	Device.
717  *	@state: PM transition of the system being carried out.
718  */
719 static int device_prepare(struct device *dev, pm_message_t state)
720 {
721 	int error = 0;
722 
723 	down(&dev->sem);
724 
725 	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
726 		pm_dev_dbg(dev, state, "preparing ");
727 		error = dev->bus->pm->prepare(dev);
728 		suspend_report_result(dev->bus->pm->prepare, error);
729 		if (error)
730 			goto End;
731 	}
732 
733 	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
734 		pm_dev_dbg(dev, state, "preparing type ");
735 		error = dev->type->pm->prepare(dev);
736 		suspend_report_result(dev->type->pm->prepare, error);
737 		if (error)
738 			goto End;
739 	}
740 
741 	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
742 		pm_dev_dbg(dev, state, "preparing class ");
743 		error = dev->class->pm->prepare(dev);
744 		suspend_report_result(dev->class->pm->prepare, error);
745 	}
746  End:
747 	up(&dev->sem);
748 
749 	return error;
750 }
751 
752 /**
753  *	dpm_prepare - Prepare all devices for a PM transition.
754  *	@state: PM transition of the system being carried out.
755  *
756  *	Execute the ->prepare() callback for all devices.
757  */
758 static int dpm_prepare(pm_message_t state)
759 {
760 	struct list_head list;
761 	int error = 0;
762 
763 	INIT_LIST_HEAD(&list);
764 	mutex_lock(&dpm_list_mtx);
765 	transition_started = true;
766 	while (!list_empty(&dpm_list)) {
767 		struct device *dev = to_device(dpm_list.next);
768 
769 		get_device(dev);
770 		dev->power.status = DPM_PREPARING;
771 		mutex_unlock(&dpm_list_mtx);
772 
773 		pm_runtime_get_noresume(dev);
774 		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
775 			/* Wake-up requested during system sleep transition. */
776 			pm_runtime_put_noidle(dev);
777 			error = -EBUSY;
778 		} else {
779 			error = device_prepare(dev, state);
780 		}
781 
782 		mutex_lock(&dpm_list_mtx);
783 		if (error) {
784 			dev->power.status = DPM_ON;
785 			if (error == -EAGAIN) {
786 				put_device(dev);
787 				error = 0;
788 				continue;
789 			}
790 			printk(KERN_ERR "PM: Failed to prepare device %s "
791 				"for power transition: error %d\n",
792 				kobject_name(&dev->kobj), error);
793 			put_device(dev);
794 			break;
795 		}
796 		dev->power.status = DPM_SUSPENDING;
797 		if (!list_empty(&dev->power.entry))
798 			list_move_tail(&dev->power.entry, &list);
799 		put_device(dev);
800 	}
801 	list_splice(&list, &dpm_list);
802 	mutex_unlock(&dpm_list_mtx);
803 	return error;
804 }
805 
806 /**
807  *	dpm_suspend_start - Save state and stop all devices in system.
808  *	@state: PM transition of the system being carried out.
809  *
810  *	Prepare and suspend all devices.
811  */
812 int dpm_suspend_start(pm_message_t state)
813 {
814 	int error;
815 
816 	might_sleep();
817 	error = dpm_prepare(state);
818 	if (!error)
819 		error = dpm_suspend(state);
820 	return error;
821 }
822 EXPORT_SYMBOL_GPL(dpm_suspend_start);
823 
824 void __suspend_report_result(const char *function, void *fn, int ret)
825 {
826 	if (ret)
827 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
828 }
829 EXPORT_SYMBOL_GPL(__suspend_report_result);
830