xref: /linux/drivers/base/power/main.c (revision c54ea4918c2b7722d7242ea53271356501988a9b)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30 
31 #include "../base.h"
32 #include "power.h"
33 
34 /*
35  * The entries in the dpm_list list are in a depth first order, simply
36  * because children are guaranteed to be discovered after parents, and
37  * are inserted at the back of the list on discovery.
38  *
39  * Since device_pm_add() may be called with a device lock held,
40  * we must never try to acquire a device lock while holding
41  * dpm_list_mutex.
42  */
43 
44 LIST_HEAD(dpm_list);
45 LIST_HEAD(dpm_prepared_list);
46 LIST_HEAD(dpm_suspended_list);
47 LIST_HEAD(dpm_noirq_list);
48 
49 static DEFINE_MUTEX(dpm_list_mtx);
50 static pm_message_t pm_transition;
51 
52 static int async_error;
53 
54 /**
55  * device_pm_init - Initialize the PM-related part of a device object.
56  * @dev: Device object being initialized.
57  */
58 void device_pm_init(struct device *dev)
59 {
60 	dev->power.in_suspend = false;
61 	init_completion(&dev->power.completion);
62 	complete_all(&dev->power.completion);
63 	dev->power.wakeup = NULL;
64 	spin_lock_init(&dev->power.lock);
65 	pm_runtime_init(dev);
66 }
67 
68 /**
69  * device_pm_lock - Lock the list of active devices used by the PM core.
70  */
71 void device_pm_lock(void)
72 {
73 	mutex_lock(&dpm_list_mtx);
74 }
75 
76 /**
77  * device_pm_unlock - Unlock the list of active devices used by the PM core.
78  */
79 void device_pm_unlock(void)
80 {
81 	mutex_unlock(&dpm_list_mtx);
82 }
83 
84 /**
85  * device_pm_add - Add a device to the PM core's list of active devices.
86  * @dev: Device to add to the list.
87  */
88 void device_pm_add(struct device *dev)
89 {
90 	pr_debug("PM: Adding info for %s:%s\n",
91 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
92 	mutex_lock(&dpm_list_mtx);
93 	if (dev->parent && dev->parent->power.in_suspend)
94 		dev_warn(dev, "parent %s should not be sleeping\n",
95 			dev_name(dev->parent));
96 	list_add_tail(&dev->power.entry, &dpm_list);
97 	mutex_unlock(&dpm_list_mtx);
98 }
99 
100 /**
101  * device_pm_remove - Remove a device from the PM core's list of active devices.
102  * @dev: Device to be removed from the list.
103  */
104 void device_pm_remove(struct device *dev)
105 {
106 	pr_debug("PM: Removing info for %s:%s\n",
107 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
108 	complete_all(&dev->power.completion);
109 	mutex_lock(&dpm_list_mtx);
110 	list_del_init(&dev->power.entry);
111 	mutex_unlock(&dpm_list_mtx);
112 	device_wakeup_disable(dev);
113 	pm_runtime_remove(dev);
114 }
115 
116 /**
117  * device_pm_move_before - Move device in the PM core's list of active devices.
118  * @deva: Device to move in dpm_list.
119  * @devb: Device @deva should come before.
120  */
121 void device_pm_move_before(struct device *deva, struct device *devb)
122 {
123 	pr_debug("PM: Moving %s:%s before %s:%s\n",
124 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
125 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
126 	/* Delete deva from dpm_list and reinsert before devb. */
127 	list_move_tail(&deva->power.entry, &devb->power.entry);
128 }
129 
130 /**
131  * device_pm_move_after - Move device in the PM core's list of active devices.
132  * @deva: Device to move in dpm_list.
133  * @devb: Device @deva should come after.
134  */
135 void device_pm_move_after(struct device *deva, struct device *devb)
136 {
137 	pr_debug("PM: Moving %s:%s after %s:%s\n",
138 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
139 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
140 	/* Delete deva from dpm_list and reinsert after devb. */
141 	list_move(&deva->power.entry, &devb->power.entry);
142 }
143 
144 /**
145  * device_pm_move_last - Move device to end of the PM core's list of devices.
146  * @dev: Device to move in dpm_list.
147  */
148 void device_pm_move_last(struct device *dev)
149 {
150 	pr_debug("PM: Moving %s:%s to end of list\n",
151 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
152 	list_move_tail(&dev->power.entry, &dpm_list);
153 }
154 
155 static ktime_t initcall_debug_start(struct device *dev)
156 {
157 	ktime_t calltime = ktime_set(0, 0);
158 
159 	if (initcall_debug) {
160 		pr_info("calling  %s+ @ %i\n",
161 				dev_name(dev), task_pid_nr(current));
162 		calltime = ktime_get();
163 	}
164 
165 	return calltime;
166 }
167 
168 static void initcall_debug_report(struct device *dev, ktime_t calltime,
169 				  int error)
170 {
171 	ktime_t delta, rettime;
172 
173 	if (initcall_debug) {
174 		rettime = ktime_get();
175 		delta = ktime_sub(rettime, calltime);
176 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
177 			error, (unsigned long long)ktime_to_ns(delta) >> 10);
178 	}
179 }
180 
181 /**
182  * dpm_wait - Wait for a PM operation to complete.
183  * @dev: Device to wait for.
184  * @async: If unset, wait only if the device's power.async_suspend flag is set.
185  */
186 static void dpm_wait(struct device *dev, bool async)
187 {
188 	if (!dev)
189 		return;
190 
191 	if (async || (pm_async_enabled && dev->power.async_suspend))
192 		wait_for_completion(&dev->power.completion);
193 }
194 
195 static int dpm_wait_fn(struct device *dev, void *async_ptr)
196 {
197 	dpm_wait(dev, *((bool *)async_ptr));
198 	return 0;
199 }
200 
201 static void dpm_wait_for_children(struct device *dev, bool async)
202 {
203        device_for_each_child(dev, &async, dpm_wait_fn);
204 }
205 
206 /**
207  * pm_op - Execute the PM operation appropriate for given PM event.
208  * @dev: Device to handle.
209  * @ops: PM operations to choose from.
210  * @state: PM transition of the system being carried out.
211  */
212 static int pm_op(struct device *dev,
213 		 const struct dev_pm_ops *ops,
214 		 pm_message_t state)
215 {
216 	int error = 0;
217 	ktime_t calltime;
218 
219 	calltime = initcall_debug_start(dev);
220 
221 	switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223 	case PM_EVENT_SUSPEND:
224 		if (ops->suspend) {
225 			error = ops->suspend(dev);
226 			suspend_report_result(ops->suspend, error);
227 		}
228 		break;
229 	case PM_EVENT_RESUME:
230 		if (ops->resume) {
231 			error = ops->resume(dev);
232 			suspend_report_result(ops->resume, error);
233 		}
234 		break;
235 #endif /* CONFIG_SUSPEND */
236 #ifdef CONFIG_HIBERNATION
237 	case PM_EVENT_FREEZE:
238 	case PM_EVENT_QUIESCE:
239 		if (ops->freeze) {
240 			error = ops->freeze(dev);
241 			suspend_report_result(ops->freeze, error);
242 		}
243 		break;
244 	case PM_EVENT_HIBERNATE:
245 		if (ops->poweroff) {
246 			error = ops->poweroff(dev);
247 			suspend_report_result(ops->poweroff, error);
248 		}
249 		break;
250 	case PM_EVENT_THAW:
251 	case PM_EVENT_RECOVER:
252 		if (ops->thaw) {
253 			error = ops->thaw(dev);
254 			suspend_report_result(ops->thaw, error);
255 		}
256 		break;
257 	case PM_EVENT_RESTORE:
258 		if (ops->restore) {
259 			error = ops->restore(dev);
260 			suspend_report_result(ops->restore, error);
261 		}
262 		break;
263 #endif /* CONFIG_HIBERNATION */
264 	default:
265 		error = -EINVAL;
266 	}
267 
268 	initcall_debug_report(dev, calltime, error);
269 
270 	return error;
271 }
272 
273 /**
274  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
275  * @dev: Device to handle.
276  * @ops: PM operations to choose from.
277  * @state: PM transition of the system being carried out.
278  *
279  * The driver of @dev will not receive interrupts while this function is being
280  * executed.
281  */
282 static int pm_noirq_op(struct device *dev,
283 			const struct dev_pm_ops *ops,
284 			pm_message_t state)
285 {
286 	int error = 0;
287 	ktime_t calltime = ktime_set(0, 0), delta, rettime;
288 
289 	if (initcall_debug) {
290 		pr_info("calling  %s+ @ %i, parent: %s\n",
291 				dev_name(dev), task_pid_nr(current),
292 				dev->parent ? dev_name(dev->parent) : "none");
293 		calltime = ktime_get();
294 	}
295 
296 	switch (state.event) {
297 #ifdef CONFIG_SUSPEND
298 	case PM_EVENT_SUSPEND:
299 		if (ops->suspend_noirq) {
300 			error = ops->suspend_noirq(dev);
301 			suspend_report_result(ops->suspend_noirq, error);
302 		}
303 		break;
304 	case PM_EVENT_RESUME:
305 		if (ops->resume_noirq) {
306 			error = ops->resume_noirq(dev);
307 			suspend_report_result(ops->resume_noirq, error);
308 		}
309 		break;
310 #endif /* CONFIG_SUSPEND */
311 #ifdef CONFIG_HIBERNATION
312 	case PM_EVENT_FREEZE:
313 	case PM_EVENT_QUIESCE:
314 		if (ops->freeze_noirq) {
315 			error = ops->freeze_noirq(dev);
316 			suspend_report_result(ops->freeze_noirq, error);
317 		}
318 		break;
319 	case PM_EVENT_HIBERNATE:
320 		if (ops->poweroff_noirq) {
321 			error = ops->poweroff_noirq(dev);
322 			suspend_report_result(ops->poweroff_noirq, error);
323 		}
324 		break;
325 	case PM_EVENT_THAW:
326 	case PM_EVENT_RECOVER:
327 		if (ops->thaw_noirq) {
328 			error = ops->thaw_noirq(dev);
329 			suspend_report_result(ops->thaw_noirq, error);
330 		}
331 		break;
332 	case PM_EVENT_RESTORE:
333 		if (ops->restore_noirq) {
334 			error = ops->restore_noirq(dev);
335 			suspend_report_result(ops->restore_noirq, error);
336 		}
337 		break;
338 #endif /* CONFIG_HIBERNATION */
339 	default:
340 		error = -EINVAL;
341 	}
342 
343 	if (initcall_debug) {
344 		rettime = ktime_get();
345 		delta = ktime_sub(rettime, calltime);
346 		printk("initcall %s_i+ returned %d after %Ld usecs\n",
347 			dev_name(dev), error,
348 			(unsigned long long)ktime_to_ns(delta) >> 10);
349 	}
350 
351 	return error;
352 }
353 
354 static char *pm_verb(int event)
355 {
356 	switch (event) {
357 	case PM_EVENT_SUSPEND:
358 		return "suspend";
359 	case PM_EVENT_RESUME:
360 		return "resume";
361 	case PM_EVENT_FREEZE:
362 		return "freeze";
363 	case PM_EVENT_QUIESCE:
364 		return "quiesce";
365 	case PM_EVENT_HIBERNATE:
366 		return "hibernate";
367 	case PM_EVENT_THAW:
368 		return "thaw";
369 	case PM_EVENT_RESTORE:
370 		return "restore";
371 	case PM_EVENT_RECOVER:
372 		return "recover";
373 	default:
374 		return "(unknown PM event)";
375 	}
376 }
377 
378 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
379 {
380 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
381 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
382 		", may wakeup" : "");
383 }
384 
385 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
386 			int error)
387 {
388 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
389 		dev_name(dev), pm_verb(state.event), info, error);
390 }
391 
392 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
393 {
394 	ktime_t calltime;
395 	u64 usecs64;
396 	int usecs;
397 
398 	calltime = ktime_get();
399 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
400 	do_div(usecs64, NSEC_PER_USEC);
401 	usecs = usecs64;
402 	if (usecs == 0)
403 		usecs = 1;
404 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
405 		info ?: "", info ? " " : "", pm_verb(state.event),
406 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
407 }
408 
409 /*------------------------- Resume routines -------------------------*/
410 
411 /**
412  * device_resume_noirq - Execute an "early resume" callback for given device.
413  * @dev: Device to handle.
414  * @state: PM transition of the system being carried out.
415  *
416  * The driver of @dev will not receive interrupts while this function is being
417  * executed.
418  */
419 static int device_resume_noirq(struct device *dev, pm_message_t state)
420 {
421 	int error = 0;
422 
423 	TRACE_DEVICE(dev);
424 	TRACE_RESUME(0);
425 
426 	if (dev->pwr_domain) {
427 		pm_dev_dbg(dev, state, "EARLY power domain ");
428 		pm_noirq_op(dev, &dev->pwr_domain->ops, state);
429 	}
430 
431 	if (dev->type && dev->type->pm) {
432 		pm_dev_dbg(dev, state, "EARLY type ");
433 		error = pm_noirq_op(dev, dev->type->pm, state);
434 	} else if (dev->class && dev->class->pm) {
435 		pm_dev_dbg(dev, state, "EARLY class ");
436 		error = pm_noirq_op(dev, dev->class->pm, state);
437 	} else if (dev->bus && dev->bus->pm) {
438 		pm_dev_dbg(dev, state, "EARLY ");
439 		error = pm_noirq_op(dev, dev->bus->pm, state);
440 	}
441 
442 	TRACE_RESUME(error);
443 	return error;
444 }
445 
446 /**
447  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
448  * @state: PM transition of the system being carried out.
449  *
450  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
451  * enable device drivers to receive interrupts.
452  */
453 void dpm_resume_noirq(pm_message_t state)
454 {
455 	ktime_t starttime = ktime_get();
456 
457 	mutex_lock(&dpm_list_mtx);
458 	while (!list_empty(&dpm_noirq_list)) {
459 		struct device *dev = to_device(dpm_noirq_list.next);
460 		int error;
461 
462 		get_device(dev);
463 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
464 		mutex_unlock(&dpm_list_mtx);
465 
466 		error = device_resume_noirq(dev, state);
467 		if (error)
468 			pm_dev_err(dev, state, " early", error);
469 
470 		mutex_lock(&dpm_list_mtx);
471 		put_device(dev);
472 	}
473 	mutex_unlock(&dpm_list_mtx);
474 	dpm_show_time(starttime, state, "early");
475 	resume_device_irqs();
476 }
477 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
478 
479 /**
480  * legacy_resume - Execute a legacy (bus or class) resume callback for device.
481  * @dev: Device to resume.
482  * @cb: Resume callback to execute.
483  */
484 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
485 {
486 	int error;
487 	ktime_t calltime;
488 
489 	calltime = initcall_debug_start(dev);
490 
491 	error = cb(dev);
492 	suspend_report_result(cb, error);
493 
494 	initcall_debug_report(dev, calltime, error);
495 
496 	return error;
497 }
498 
499 /**
500  * device_resume - Execute "resume" callbacks for given device.
501  * @dev: Device to handle.
502  * @state: PM transition of the system being carried out.
503  * @async: If true, the device is being resumed asynchronously.
504  */
505 static int device_resume(struct device *dev, pm_message_t state, bool async)
506 {
507 	int error = 0;
508 
509 	TRACE_DEVICE(dev);
510 	TRACE_RESUME(0);
511 
512 	dpm_wait(dev->parent, async);
513 	device_lock(dev);
514 
515 	dev->power.in_suspend = false;
516 
517 	if (dev->pwr_domain) {
518 		pm_dev_dbg(dev, state, "power domain ");
519 		pm_op(dev, &dev->pwr_domain->ops, state);
520 	}
521 
522 	if (dev->type && dev->type->pm) {
523 		pm_dev_dbg(dev, state, "type ");
524 		error = pm_op(dev, dev->type->pm, state);
525 		goto End;
526 	}
527 
528 	if (dev->class) {
529 		if (dev->class->pm) {
530 			pm_dev_dbg(dev, state, "class ");
531 			error = pm_op(dev, dev->class->pm, state);
532 			goto End;
533 		} else if (dev->class->resume) {
534 			pm_dev_dbg(dev, state, "legacy class ");
535 			error = legacy_resume(dev, dev->class->resume);
536 			goto End;
537 		}
538 	}
539 
540 	if (dev->bus) {
541 		if (dev->bus->pm) {
542 			pm_dev_dbg(dev, state, "");
543 			error = pm_op(dev, dev->bus->pm, state);
544 		} else if (dev->bus->resume) {
545 			pm_dev_dbg(dev, state, "legacy ");
546 			error = legacy_resume(dev, dev->bus->resume);
547 		}
548 	}
549 
550  End:
551 	device_unlock(dev);
552 	complete_all(&dev->power.completion);
553 
554 	TRACE_RESUME(error);
555 	return error;
556 }
557 
558 static void async_resume(void *data, async_cookie_t cookie)
559 {
560 	struct device *dev = (struct device *)data;
561 	int error;
562 
563 	error = device_resume(dev, pm_transition, true);
564 	if (error)
565 		pm_dev_err(dev, pm_transition, " async", error);
566 	put_device(dev);
567 }
568 
569 static bool is_async(struct device *dev)
570 {
571 	return dev->power.async_suspend && pm_async_enabled
572 		&& !pm_trace_is_enabled();
573 }
574 
575 /**
576  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
577  * @state: PM transition of the system being carried out.
578  *
579  * Execute the appropriate "resume" callback for all devices whose status
580  * indicates that they are suspended.
581  */
582 static void dpm_resume(pm_message_t state)
583 {
584 	struct device *dev;
585 	ktime_t starttime = ktime_get();
586 
587 	mutex_lock(&dpm_list_mtx);
588 	pm_transition = state;
589 	async_error = 0;
590 
591 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
592 		INIT_COMPLETION(dev->power.completion);
593 		if (is_async(dev)) {
594 			get_device(dev);
595 			async_schedule(async_resume, dev);
596 		}
597 	}
598 
599 	while (!list_empty(&dpm_suspended_list)) {
600 		dev = to_device(dpm_suspended_list.next);
601 		get_device(dev);
602 		if (!is_async(dev)) {
603 			int error;
604 
605 			mutex_unlock(&dpm_list_mtx);
606 
607 			error = device_resume(dev, state, false);
608 			if (error)
609 				pm_dev_err(dev, state, "", error);
610 
611 			mutex_lock(&dpm_list_mtx);
612 		}
613 		if (!list_empty(&dev->power.entry))
614 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
615 		put_device(dev);
616 	}
617 	mutex_unlock(&dpm_list_mtx);
618 	async_synchronize_full();
619 	dpm_show_time(starttime, state, NULL);
620 }
621 
622 /**
623  * device_complete - Complete a PM transition for given device.
624  * @dev: Device to handle.
625  * @state: PM transition of the system being carried out.
626  */
627 static void device_complete(struct device *dev, pm_message_t state)
628 {
629 	device_lock(dev);
630 
631 	if (dev->pwr_domain && dev->pwr_domain->ops.complete) {
632 		pm_dev_dbg(dev, state, "completing power domain ");
633 		dev->pwr_domain->ops.complete(dev);
634 	}
635 
636 	if (dev->type && dev->type->pm) {
637 		pm_dev_dbg(dev, state, "completing type ");
638 		if (dev->type->pm->complete)
639 			dev->type->pm->complete(dev);
640 	} else if (dev->class && dev->class->pm) {
641 		pm_dev_dbg(dev, state, "completing class ");
642 		if (dev->class->pm->complete)
643 			dev->class->pm->complete(dev);
644 	} else if (dev->bus && dev->bus->pm) {
645 		pm_dev_dbg(dev, state, "completing ");
646 		if (dev->bus->pm->complete)
647 			dev->bus->pm->complete(dev);
648 	}
649 
650 	device_unlock(dev);
651 }
652 
653 /**
654  * dpm_complete - Complete a PM transition for all non-sysdev devices.
655  * @state: PM transition of the system being carried out.
656  *
657  * Execute the ->complete() callbacks for all devices whose PM status is not
658  * DPM_ON (this allows new devices to be registered).
659  */
660 static void dpm_complete(pm_message_t state)
661 {
662 	struct list_head list;
663 
664 	INIT_LIST_HEAD(&list);
665 	mutex_lock(&dpm_list_mtx);
666 	while (!list_empty(&dpm_prepared_list)) {
667 		struct device *dev = to_device(dpm_prepared_list.prev);
668 
669 		get_device(dev);
670 		dev->power.in_suspend = false;
671 		list_move(&dev->power.entry, &list);
672 		mutex_unlock(&dpm_list_mtx);
673 
674 		device_complete(dev, state);
675 
676 		mutex_lock(&dpm_list_mtx);
677 		put_device(dev);
678 	}
679 	list_splice(&list, &dpm_list);
680 	mutex_unlock(&dpm_list_mtx);
681 }
682 
683 /**
684  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
685  * @state: PM transition of the system being carried out.
686  *
687  * Execute "resume" callbacks for all devices and complete the PM transition of
688  * the system.
689  */
690 void dpm_resume_end(pm_message_t state)
691 {
692 	might_sleep();
693 	dpm_resume(state);
694 	dpm_complete(state);
695 }
696 EXPORT_SYMBOL_GPL(dpm_resume_end);
697 
698 
699 /*------------------------- Suspend routines -------------------------*/
700 
701 /**
702  * resume_event - Return a "resume" message for given "suspend" sleep state.
703  * @sleep_state: PM message representing a sleep state.
704  *
705  * Return a PM message representing the resume event corresponding to given
706  * sleep state.
707  */
708 static pm_message_t resume_event(pm_message_t sleep_state)
709 {
710 	switch (sleep_state.event) {
711 	case PM_EVENT_SUSPEND:
712 		return PMSG_RESUME;
713 	case PM_EVENT_FREEZE:
714 	case PM_EVENT_QUIESCE:
715 		return PMSG_RECOVER;
716 	case PM_EVENT_HIBERNATE:
717 		return PMSG_RESTORE;
718 	}
719 	return PMSG_ON;
720 }
721 
722 /**
723  * device_suspend_noirq - Execute a "late suspend" callback for given device.
724  * @dev: Device to handle.
725  * @state: PM transition of the system being carried out.
726  *
727  * The driver of @dev will not receive interrupts while this function is being
728  * executed.
729  */
730 static int device_suspend_noirq(struct device *dev, pm_message_t state)
731 {
732 	int error;
733 
734 	if (dev->type && dev->type->pm) {
735 		pm_dev_dbg(dev, state, "LATE type ");
736 		error = pm_noirq_op(dev, dev->type->pm, state);
737 		if (error)
738 			return error;
739 	} else if (dev->class && dev->class->pm) {
740 		pm_dev_dbg(dev, state, "LATE class ");
741 		error = pm_noirq_op(dev, dev->class->pm, state);
742 		if (error)
743 			return error;
744 	} else if (dev->bus && dev->bus->pm) {
745 		pm_dev_dbg(dev, state, "LATE ");
746 		error = pm_noirq_op(dev, dev->bus->pm, state);
747 		if (error)
748 			return error;
749 	}
750 
751 	if (dev->pwr_domain) {
752 		pm_dev_dbg(dev, state, "LATE power domain ");
753 		pm_noirq_op(dev, &dev->pwr_domain->ops, state);
754 	}
755 
756 	return 0;
757 }
758 
759 /**
760  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
761  * @state: PM transition of the system being carried out.
762  *
763  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
764  * handlers for all non-sysdev devices.
765  */
766 int dpm_suspend_noirq(pm_message_t state)
767 {
768 	ktime_t starttime = ktime_get();
769 	int error = 0;
770 
771 	suspend_device_irqs();
772 	mutex_lock(&dpm_list_mtx);
773 	while (!list_empty(&dpm_suspended_list)) {
774 		struct device *dev = to_device(dpm_suspended_list.prev);
775 
776 		get_device(dev);
777 		mutex_unlock(&dpm_list_mtx);
778 
779 		error = device_suspend_noirq(dev, state);
780 
781 		mutex_lock(&dpm_list_mtx);
782 		if (error) {
783 			pm_dev_err(dev, state, " late", error);
784 			put_device(dev);
785 			break;
786 		}
787 		if (!list_empty(&dev->power.entry))
788 			list_move(&dev->power.entry, &dpm_noirq_list);
789 		put_device(dev);
790 	}
791 	mutex_unlock(&dpm_list_mtx);
792 	if (error)
793 		dpm_resume_noirq(resume_event(state));
794 	else
795 		dpm_show_time(starttime, state, "late");
796 	return error;
797 }
798 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
799 
800 /**
801  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
802  * @dev: Device to suspend.
803  * @state: PM transition of the system being carried out.
804  * @cb: Suspend callback to execute.
805  */
806 static int legacy_suspend(struct device *dev, pm_message_t state,
807 			  int (*cb)(struct device *dev, pm_message_t state))
808 {
809 	int error;
810 	ktime_t calltime;
811 
812 	calltime = initcall_debug_start(dev);
813 
814 	error = cb(dev, state);
815 	suspend_report_result(cb, error);
816 
817 	initcall_debug_report(dev, calltime, error);
818 
819 	return error;
820 }
821 
822 /**
823  * device_suspend - Execute "suspend" callbacks for given device.
824  * @dev: Device to handle.
825  * @state: PM transition of the system being carried out.
826  * @async: If true, the device is being suspended asynchronously.
827  */
828 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
829 {
830 	int error = 0;
831 
832 	dpm_wait_for_children(dev, async);
833 	device_lock(dev);
834 
835 	if (async_error)
836 		goto End;
837 
838 	if (pm_wakeup_pending()) {
839 		async_error = -EBUSY;
840 		goto End;
841 	}
842 
843 	if (dev->type && dev->type->pm) {
844 		pm_dev_dbg(dev, state, "type ");
845 		error = pm_op(dev, dev->type->pm, state);
846 		goto Domain;
847 	}
848 
849 	if (dev->class) {
850 		if (dev->class->pm) {
851 			pm_dev_dbg(dev, state, "class ");
852 			error = pm_op(dev, dev->class->pm, state);
853 			goto Domain;
854 		} else if (dev->class->suspend) {
855 			pm_dev_dbg(dev, state, "legacy class ");
856 			error = legacy_suspend(dev, state, dev->class->suspend);
857 			goto Domain;
858 		}
859 	}
860 
861 	if (dev->bus) {
862 		if (dev->bus->pm) {
863 			pm_dev_dbg(dev, state, "");
864 			error = pm_op(dev, dev->bus->pm, state);
865 		} else if (dev->bus->suspend) {
866 			pm_dev_dbg(dev, state, "legacy ");
867 			error = legacy_suspend(dev, state, dev->bus->suspend);
868 		}
869 	}
870 
871  Domain:
872 	if (!error && dev->pwr_domain) {
873 		pm_dev_dbg(dev, state, "power domain ");
874 		pm_op(dev, &dev->pwr_domain->ops, state);
875 	}
876 
877  End:
878 	device_unlock(dev);
879 	complete_all(&dev->power.completion);
880 
881 	if (error)
882 		async_error = error;
883 
884 	return error;
885 }
886 
887 static void async_suspend(void *data, async_cookie_t cookie)
888 {
889 	struct device *dev = (struct device *)data;
890 	int error;
891 
892 	error = __device_suspend(dev, pm_transition, true);
893 	if (error)
894 		pm_dev_err(dev, pm_transition, " async", error);
895 
896 	put_device(dev);
897 }
898 
899 static int device_suspend(struct device *dev)
900 {
901 	INIT_COMPLETION(dev->power.completion);
902 
903 	if (pm_async_enabled && dev->power.async_suspend) {
904 		get_device(dev);
905 		async_schedule(async_suspend, dev);
906 		return 0;
907 	}
908 
909 	return __device_suspend(dev, pm_transition, false);
910 }
911 
912 /**
913  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
914  * @state: PM transition of the system being carried out.
915  */
916 static int dpm_suspend(pm_message_t state)
917 {
918 	ktime_t starttime = ktime_get();
919 	int error = 0;
920 
921 	mutex_lock(&dpm_list_mtx);
922 	pm_transition = state;
923 	async_error = 0;
924 	while (!list_empty(&dpm_prepared_list)) {
925 		struct device *dev = to_device(dpm_prepared_list.prev);
926 
927 		get_device(dev);
928 		mutex_unlock(&dpm_list_mtx);
929 
930 		error = device_suspend(dev);
931 
932 		mutex_lock(&dpm_list_mtx);
933 		if (error) {
934 			pm_dev_err(dev, state, "", error);
935 			put_device(dev);
936 			break;
937 		}
938 		if (!list_empty(&dev->power.entry))
939 			list_move(&dev->power.entry, &dpm_suspended_list);
940 		put_device(dev);
941 		if (async_error)
942 			break;
943 	}
944 	mutex_unlock(&dpm_list_mtx);
945 	async_synchronize_full();
946 	if (!error)
947 		error = async_error;
948 	if (!error)
949 		dpm_show_time(starttime, state, NULL);
950 	return error;
951 }
952 
953 /**
954  * device_prepare - Prepare a device for system power transition.
955  * @dev: Device to handle.
956  * @state: PM transition of the system being carried out.
957  *
958  * Execute the ->prepare() callback(s) for given device.  No new children of the
959  * device may be registered after this function has returned.
960  */
961 static int device_prepare(struct device *dev, pm_message_t state)
962 {
963 	int error = 0;
964 
965 	device_lock(dev);
966 
967 	if (dev->type && dev->type->pm) {
968 		pm_dev_dbg(dev, state, "preparing type ");
969 		if (dev->type->pm->prepare)
970 			error = dev->type->pm->prepare(dev);
971 		suspend_report_result(dev->type->pm->prepare, error);
972 		if (error)
973 			goto End;
974 	} else if (dev->class && dev->class->pm) {
975 		pm_dev_dbg(dev, state, "preparing class ");
976 		if (dev->class->pm->prepare)
977 			error = dev->class->pm->prepare(dev);
978 		suspend_report_result(dev->class->pm->prepare, error);
979 		if (error)
980 			goto End;
981 	} else if (dev->bus && dev->bus->pm) {
982 		pm_dev_dbg(dev, state, "preparing ");
983 		if (dev->bus->pm->prepare)
984 			error = dev->bus->pm->prepare(dev);
985 		suspend_report_result(dev->bus->pm->prepare, error);
986 		if (error)
987 			goto End;
988 	}
989 
990 	if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
991 		pm_dev_dbg(dev, state, "preparing power domain ");
992 		dev->pwr_domain->ops.prepare(dev);
993 	}
994 
995  End:
996 	device_unlock(dev);
997 
998 	return error;
999 }
1000 
1001 /**
1002  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1003  * @state: PM transition of the system being carried out.
1004  *
1005  * Execute the ->prepare() callback(s) for all devices.
1006  */
1007 static int dpm_prepare(pm_message_t state)
1008 {
1009 	int error = 0;
1010 
1011 	mutex_lock(&dpm_list_mtx);
1012 	while (!list_empty(&dpm_list)) {
1013 		struct device *dev = to_device(dpm_list.next);
1014 
1015 		get_device(dev);
1016 		mutex_unlock(&dpm_list_mtx);
1017 
1018 		pm_runtime_get_noresume(dev);
1019 		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1020 			pm_wakeup_event(dev, 0);
1021 
1022 		pm_runtime_put_sync(dev);
1023 		error = pm_wakeup_pending() ?
1024 				-EBUSY : device_prepare(dev, state);
1025 
1026 		mutex_lock(&dpm_list_mtx);
1027 		if (error) {
1028 			if (error == -EAGAIN) {
1029 				put_device(dev);
1030 				error = 0;
1031 				continue;
1032 			}
1033 			printk(KERN_INFO "PM: Device %s not prepared "
1034 				"for power transition: code %d\n",
1035 				dev_name(dev), error);
1036 			put_device(dev);
1037 			break;
1038 		}
1039 		dev->power.in_suspend = true;
1040 		if (!list_empty(&dev->power.entry))
1041 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1042 		put_device(dev);
1043 	}
1044 	mutex_unlock(&dpm_list_mtx);
1045 	return error;
1046 }
1047 
1048 /**
1049  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1050  * @state: PM transition of the system being carried out.
1051  *
1052  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1053  * callbacks for them.
1054  */
1055 int dpm_suspend_start(pm_message_t state)
1056 {
1057 	int error;
1058 
1059 	might_sleep();
1060 	error = dpm_prepare(state);
1061 	if (!error)
1062 		error = dpm_suspend(state);
1063 	return error;
1064 }
1065 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1066 
1067 void __suspend_report_result(const char *function, void *fn, int ret)
1068 {
1069 	if (ret)
1070 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1071 }
1072 EXPORT_SYMBOL_GPL(__suspend_report_result);
1073 
1074 /**
1075  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1076  * @dev: Device to wait for.
1077  * @subordinate: Device that needs to wait for @dev.
1078  */
1079 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1080 {
1081 	dpm_wait(dev, subordinate->power.async_suspend);
1082 	return async_error;
1083 }
1084 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1085