xref: /linux/drivers/base/power/main.c (revision 16cd1c2657762c62a00ac78eecaa25868f7e601b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static int async_error;
67 
pm_verb(int event)68 static const char *pm_verb(int event)
69 {
70 	switch (event) {
71 	case PM_EVENT_SUSPEND:
72 		return "suspend";
73 	case PM_EVENT_RESUME:
74 		return "resume";
75 	case PM_EVENT_FREEZE:
76 		return "freeze";
77 	case PM_EVENT_QUIESCE:
78 		return "quiesce";
79 	case PM_EVENT_HIBERNATE:
80 		return "hibernate";
81 	case PM_EVENT_THAW:
82 		return "thaw";
83 	case PM_EVENT_RESTORE:
84 		return "restore";
85 	case PM_EVENT_RECOVER:
86 		return "recover";
87 	default:
88 		return "(unknown PM event)";
89 	}
90 }
91 
92 /**
93  * device_pm_sleep_init - Initialize system suspend-related device fields.
94  * @dev: Device object being initialized.
95  */
device_pm_sleep_init(struct device * dev)96 void device_pm_sleep_init(struct device *dev)
97 {
98 	dev->power.is_prepared = false;
99 	dev->power.is_suspended = false;
100 	dev->power.is_noirq_suspended = false;
101 	dev->power.is_late_suspended = false;
102 	init_completion(&dev->power.completion);
103 	complete_all(&dev->power.completion);
104 	dev->power.wakeup = NULL;
105 	INIT_LIST_HEAD(&dev->power.entry);
106 }
107 
108 /**
109  * device_pm_lock - Lock the list of active devices used by the PM core.
110  */
device_pm_lock(void)111 void device_pm_lock(void)
112 {
113 	mutex_lock(&dpm_list_mtx);
114 }
115 
116 /**
117  * device_pm_unlock - Unlock the list of active devices used by the PM core.
118  */
device_pm_unlock(void)119 void device_pm_unlock(void)
120 {
121 	mutex_unlock(&dpm_list_mtx);
122 }
123 
124 /**
125  * device_pm_add - Add a device to the PM core's list of active devices.
126  * @dev: Device to add to the list.
127  */
device_pm_add(struct device * dev)128 void device_pm_add(struct device *dev)
129 {
130 	/* Skip PM setup/initialization. */
131 	if (device_pm_not_required(dev))
132 		return;
133 
134 	pr_debug("Adding info for %s:%s\n",
135 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
136 	device_pm_check_callbacks(dev);
137 	mutex_lock(&dpm_list_mtx);
138 	if (dev->parent && dev->parent->power.is_prepared)
139 		dev_warn(dev, "parent %s should not be sleeping\n",
140 			dev_name(dev->parent));
141 	list_add_tail(&dev->power.entry, &dpm_list);
142 	dev->power.in_dpm_list = true;
143 	mutex_unlock(&dpm_list_mtx);
144 }
145 
146 /**
147  * device_pm_remove - Remove a device from the PM core's list of active devices.
148  * @dev: Device to be removed from the list.
149  */
device_pm_remove(struct device * dev)150 void device_pm_remove(struct device *dev)
151 {
152 	if (device_pm_not_required(dev))
153 		return;
154 
155 	pr_debug("Removing info for %s:%s\n",
156 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 	complete_all(&dev->power.completion);
158 	mutex_lock(&dpm_list_mtx);
159 	list_del_init(&dev->power.entry);
160 	dev->power.in_dpm_list = false;
161 	mutex_unlock(&dpm_list_mtx);
162 	device_wakeup_disable(dev);
163 	pm_runtime_remove(dev);
164 	device_pm_check_callbacks(dev);
165 }
166 
167 /**
168  * device_pm_move_before - Move device in the PM core's list of active devices.
169  * @deva: Device to move in dpm_list.
170  * @devb: Device @deva should come before.
171  */
device_pm_move_before(struct device * deva,struct device * devb)172 void device_pm_move_before(struct device *deva, struct device *devb)
173 {
174 	pr_debug("Moving %s:%s before %s:%s\n",
175 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
176 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
177 	/* Delete deva from dpm_list and reinsert before devb. */
178 	list_move_tail(&deva->power.entry, &devb->power.entry);
179 }
180 
181 /**
182  * device_pm_move_after - Move device in the PM core's list of active devices.
183  * @deva: Device to move in dpm_list.
184  * @devb: Device @deva should come after.
185  */
device_pm_move_after(struct device * deva,struct device * devb)186 void device_pm_move_after(struct device *deva, struct device *devb)
187 {
188 	pr_debug("Moving %s:%s after %s:%s\n",
189 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
190 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
191 	/* Delete deva from dpm_list and reinsert after devb. */
192 	list_move(&deva->power.entry, &devb->power.entry);
193 }
194 
195 /**
196  * device_pm_move_last - Move device to end of the PM core's list of devices.
197  * @dev: Device to move in dpm_list.
198  */
device_pm_move_last(struct device * dev)199 void device_pm_move_last(struct device *dev)
200 {
201 	pr_debug("Moving %s:%s to end of list\n",
202 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
203 	list_move_tail(&dev->power.entry, &dpm_list);
204 }
205 
initcall_debug_start(struct device * dev,void * cb)206 static ktime_t initcall_debug_start(struct device *dev, void *cb)
207 {
208 	if (!pm_print_times_enabled)
209 		return 0;
210 
211 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
212 		 task_pid_nr(current),
213 		 dev->parent ? dev_name(dev->parent) : "none");
214 	return ktime_get();
215 }
216 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)217 static void initcall_debug_report(struct device *dev, ktime_t calltime,
218 				  void *cb, int error)
219 {
220 	ktime_t rettime;
221 
222 	if (!pm_print_times_enabled)
223 		return;
224 
225 	rettime = ktime_get();
226 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
227 		 (unsigned long long)ktime_us_delta(rettime, calltime));
228 }
229 
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
dpm_wait(struct device * dev,bool async)235 static void dpm_wait(struct device *dev, bool async)
236 {
237 	if (!dev)
238 		return;
239 
240 	if (async || (pm_async_enabled && dev->power.async_suspend))
241 		wait_for_completion(&dev->power.completion);
242 }
243 
dpm_wait_fn(struct device * dev,void * async_ptr)244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246 	dpm_wait(dev, *((bool *)async_ptr));
247 	return 0;
248 }
249 
dpm_wait_for_children(struct device * dev,bool async)250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252 	device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254 
dpm_wait_for_suppliers(struct device * dev,bool async)255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257 	struct device_link *link;
258 	int idx;
259 
260 	idx = device_links_read_lock();
261 
262 	/*
263 	 * If the supplier goes away right after we've checked the link to it,
264 	 * we'll wait for its completion to change the state, but that's fine,
265 	 * because the only things that will block as a result are the SRCU
266 	 * callbacks freeing the link objects for the links in the list we're
267 	 * walking.
268 	 */
269 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
270 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 			dpm_wait(link->supplier, async);
272 
273 	device_links_read_unlock(idx);
274 }
275 
dpm_wait_for_superior(struct device * dev,bool async)276 static bool dpm_wait_for_superior(struct device *dev, bool async)
277 {
278 	struct device *parent;
279 
280 	/*
281 	 * If the device is resumed asynchronously and the parent's callback
282 	 * deletes both the device and the parent itself, the parent object may
283 	 * be freed while this function is running, so avoid that by reference
284 	 * counting the parent once more unless the device has been deleted
285 	 * already (in which case return right away).
286 	 */
287 	mutex_lock(&dpm_list_mtx);
288 
289 	if (!device_pm_initialized(dev)) {
290 		mutex_unlock(&dpm_list_mtx);
291 		return false;
292 	}
293 
294 	parent = get_device(dev->parent);
295 
296 	mutex_unlock(&dpm_list_mtx);
297 
298 	dpm_wait(parent, async);
299 	put_device(parent);
300 
301 	dpm_wait_for_suppliers(dev, async);
302 
303 	/*
304 	 * If the parent's callback has deleted the device, attempting to resume
305 	 * it would be invalid, so avoid doing that then.
306 	 */
307 	return device_pm_initialized(dev);
308 }
309 
dpm_wait_for_consumers(struct device * dev,bool async)310 static void dpm_wait_for_consumers(struct device *dev, bool async)
311 {
312 	struct device_link *link;
313 	int idx;
314 
315 	idx = device_links_read_lock();
316 
317 	/*
318 	 * The status of a device link can only be changed from "dormant" by a
319 	 * probe, but that cannot happen during system suspend/resume.  In
320 	 * theory it can change to "dormant" at that time, but then it is
321 	 * reasonable to wait for the target device anyway (eg. if it goes
322 	 * away, it's better to wait for it to go away completely and then
323 	 * continue instead of trying to continue in parallel with its
324 	 * unregistration).
325 	 */
326 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
327 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328 			dpm_wait(link->consumer, async);
329 
330 	device_links_read_unlock(idx);
331 }
332 
dpm_wait_for_subordinate(struct device * dev,bool async)333 static void dpm_wait_for_subordinate(struct device *dev, bool async)
334 {
335 	dpm_wait_for_children(dev, async);
336 	dpm_wait_for_consumers(dev, async);
337 }
338 
339 /**
340  * pm_op - Return the PM operation appropriate for given PM event.
341  * @ops: PM operations to choose from.
342  * @state: PM transition of the system being carried out.
343  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)344 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
345 {
346 	switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348 	case PM_EVENT_SUSPEND:
349 		return ops->suspend;
350 	case PM_EVENT_RESUME:
351 		return ops->resume;
352 #endif /* CONFIG_SUSPEND */
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354 	case PM_EVENT_FREEZE:
355 	case PM_EVENT_QUIESCE:
356 		return ops->freeze;
357 	case PM_EVENT_HIBERNATE:
358 		return ops->poweroff;
359 	case PM_EVENT_THAW:
360 	case PM_EVENT_RECOVER:
361 		return ops->thaw;
362 	case PM_EVENT_RESTORE:
363 		return ops->restore;
364 #endif /* CONFIG_HIBERNATE_CALLBACKS */
365 	}
366 
367 	return NULL;
368 }
369 
370 /**
371  * pm_late_early_op - Return the PM operation appropriate for given PM event.
372  * @ops: PM operations to choose from.
373  * @state: PM transition of the system being carried out.
374  *
375  * Runtime PM is disabled for @dev while this function is being executed.
376  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)377 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
378 				      pm_message_t state)
379 {
380 	switch (state.event) {
381 #ifdef CONFIG_SUSPEND
382 	case PM_EVENT_SUSPEND:
383 		return ops->suspend_late;
384 	case PM_EVENT_RESUME:
385 		return ops->resume_early;
386 #endif /* CONFIG_SUSPEND */
387 #ifdef CONFIG_HIBERNATE_CALLBACKS
388 	case PM_EVENT_FREEZE:
389 	case PM_EVENT_QUIESCE:
390 		return ops->freeze_late;
391 	case PM_EVENT_HIBERNATE:
392 		return ops->poweroff_late;
393 	case PM_EVENT_THAW:
394 	case PM_EVENT_RECOVER:
395 		return ops->thaw_early;
396 	case PM_EVENT_RESTORE:
397 		return ops->restore_early;
398 #endif /* CONFIG_HIBERNATE_CALLBACKS */
399 	}
400 
401 	return NULL;
402 }
403 
404 /**
405  * pm_noirq_op - Return the PM operation appropriate for given PM event.
406  * @ops: PM operations to choose from.
407  * @state: PM transition of the system being carried out.
408  *
409  * The driver of @dev will not receive interrupts while this function is being
410  * executed.
411  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)412 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
413 {
414 	switch (state.event) {
415 #ifdef CONFIG_SUSPEND
416 	case PM_EVENT_SUSPEND:
417 		return ops->suspend_noirq;
418 	case PM_EVENT_RESUME:
419 		return ops->resume_noirq;
420 #endif /* CONFIG_SUSPEND */
421 #ifdef CONFIG_HIBERNATE_CALLBACKS
422 	case PM_EVENT_FREEZE:
423 	case PM_EVENT_QUIESCE:
424 		return ops->freeze_noirq;
425 	case PM_EVENT_HIBERNATE:
426 		return ops->poweroff_noirq;
427 	case PM_EVENT_THAW:
428 	case PM_EVENT_RECOVER:
429 		return ops->thaw_noirq;
430 	case PM_EVENT_RESTORE:
431 		return ops->restore_noirq;
432 #endif /* CONFIG_HIBERNATE_CALLBACKS */
433 	}
434 
435 	return NULL;
436 }
437 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)438 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
439 {
440 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
441 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
442 		", may wakeup" : "", dev->power.driver_flags);
443 }
444 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)445 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
446 			int error)
447 {
448 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
449 		error);
450 }
451 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)452 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
453 			  const char *info)
454 {
455 	ktime_t calltime;
456 	u64 usecs64;
457 	int usecs;
458 
459 	calltime = ktime_get();
460 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
461 	do_div(usecs64, NSEC_PER_USEC);
462 	usecs = usecs64;
463 	if (usecs == 0)
464 		usecs = 1;
465 
466 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
467 		  info ?: "", info ? " " : "", pm_verb(state.event),
468 		  error ? "aborted" : "complete",
469 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
470 }
471 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)472 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
473 			    pm_message_t state, const char *info)
474 {
475 	ktime_t calltime;
476 	int error;
477 
478 	if (!cb)
479 		return 0;
480 
481 	calltime = initcall_debug_start(dev, cb);
482 
483 	pm_dev_dbg(dev, state, info);
484 	trace_device_pm_callback_start(dev, info, state.event);
485 	error = cb(dev);
486 	trace_device_pm_callback_end(dev, error);
487 	suspend_report_result(dev, cb, error);
488 
489 	initcall_debug_report(dev, calltime, cb, error);
490 
491 	return error;
492 }
493 
494 #ifdef CONFIG_DPM_WATCHDOG
495 struct dpm_watchdog {
496 	struct device		*dev;
497 	struct task_struct	*tsk;
498 	struct timer_list	timer;
499 	bool			fatal;
500 };
501 
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 	struct dpm_watchdog wd
504 
505 /**
506  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507  * @t: The timer that PM watchdog depends on.
508  *
509  * Called when a driver has timed out suspending or resuming.
510  * There's not much we can do here to recover so panic() to
511  * capture a crash-dump in pstore.
512  */
dpm_watchdog_handler(struct timer_list * t)513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
516 	struct timer_list *timer = &wd->timer;
517 	unsigned int time_left;
518 
519 	if (wd->fatal) {
520 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521 		show_stack(wd->tsk, NULL, KERN_EMERG);
522 		panic("%s %s: unrecoverable failure\n",
523 			dev_driver_string(wd->dev), dev_name(wd->dev));
524 	}
525 
526 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
527 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
528 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
529 	show_stack(wd->tsk, NULL, KERN_WARNING);
530 
531 	wd->fatal = true;
532 	mod_timer(timer, jiffies + HZ * time_left);
533 }
534 
535 /**
536  * dpm_watchdog_set - Enable pm watchdog for given device.
537  * @wd: Watchdog. Must be allocated on the stack.
538  * @dev: Device to handle.
539  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)540 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
541 {
542 	struct timer_list *timer = &wd->timer;
543 
544 	wd->dev = dev;
545 	wd->tsk = current;
546 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
547 
548 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
549 	/* use same timeout value for both suspend and resume */
550 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
551 	add_timer(timer);
552 }
553 
554 /**
555  * dpm_watchdog_clear - Disable suspend/resume watchdog.
556  * @wd: Watchdog to disable.
557  */
dpm_watchdog_clear(struct dpm_watchdog * wd)558 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
559 {
560 	struct timer_list *timer = &wd->timer;
561 
562 	timer_delete_sync(timer);
563 	destroy_timer_on_stack(timer);
564 }
565 #else
566 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
567 #define dpm_watchdog_set(x, y)
568 #define dpm_watchdog_clear(x)
569 #endif
570 
571 /*------------------------- Resume routines -------------------------*/
572 
573 /**
574  * dev_pm_skip_resume - System-wide device resume optimization check.
575  * @dev: Target device.
576  *
577  * Return:
578  * - %false if the transition under way is RESTORE.
579  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
580  * - The logical negation of %power.must_resume otherwise (that is, when the
581  *   transition under way is RESUME).
582  */
dev_pm_skip_resume(struct device * dev)583 bool dev_pm_skip_resume(struct device *dev)
584 {
585 	if (pm_transition.event == PM_EVENT_RESTORE)
586 		return false;
587 
588 	if (pm_transition.event == PM_EVENT_THAW)
589 		return dev_pm_skip_suspend(dev);
590 
591 	return !dev->power.must_resume;
592 }
593 
is_async(struct device * dev)594 static bool is_async(struct device *dev)
595 {
596 	return dev->power.async_suspend && pm_async_enabled
597 		&& !pm_trace_is_enabled();
598 }
599 
dpm_async_fn(struct device * dev,async_func_t func)600 static bool dpm_async_fn(struct device *dev, async_func_t func)
601 {
602 	if (!is_async(dev))
603 		return false;
604 
605 	dev->power.work_in_progress = true;
606 
607 	get_device(dev);
608 
609 	if (async_schedule_dev_nocall(func, dev))
610 		return true;
611 
612 	put_device(dev);
613 
614 	/*
615 	 * async_schedule_dev_nocall() above has returned false, so func() is
616 	 * not running and it is safe to update power.work_in_progress without
617 	 * extra synchronization.
618 	 */
619 	dev->power.work_in_progress = false;
620 
621 	return false;
622 }
623 
dpm_clear_async_state(struct device * dev)624 static void dpm_clear_async_state(struct device *dev)
625 {
626 	reinit_completion(&dev->power.completion);
627 	dev->power.work_in_progress = false;
628 }
629 
630 /**
631  * device_resume_noirq - Execute a "noirq resume" callback for given device.
632  * @dev: Device to handle.
633  * @state: PM transition of the system being carried out.
634  * @async: If true, the device is being resumed asynchronously.
635  *
636  * The driver of @dev will not receive interrupts while this function is being
637  * executed.
638  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)639 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
640 {
641 	pm_callback_t callback = NULL;
642 	const char *info = NULL;
643 	bool skip_resume;
644 	int error = 0;
645 
646 	TRACE_DEVICE(dev);
647 	TRACE_RESUME(0);
648 
649 	if (dev->power.syscore || dev->power.direct_complete)
650 		goto Out;
651 
652 	if (!dev->power.is_noirq_suspended)
653 		goto Out;
654 
655 	if (!dpm_wait_for_superior(dev, async))
656 		goto Out;
657 
658 	skip_resume = dev_pm_skip_resume(dev);
659 	/*
660 	 * If the driver callback is skipped below or by the middle layer
661 	 * callback and device_resume_early() also skips the driver callback for
662 	 * this device later, it needs to appear as "suspended" to PM-runtime,
663 	 * so change its status accordingly.
664 	 *
665 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
666 	 * status to "active" unless its power.smart_suspend flag is clear, in
667 	 * which case it is not necessary to update its PM-runtime status.
668 	 */
669 	if (skip_resume)
670 		pm_runtime_set_suspended(dev);
671 	else if (dev_pm_smart_suspend(dev))
672 		pm_runtime_set_active(dev);
673 
674 	if (dev->pm_domain) {
675 		info = "noirq power domain ";
676 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
677 	} else if (dev->type && dev->type->pm) {
678 		info = "noirq type ";
679 		callback = pm_noirq_op(dev->type->pm, state);
680 	} else if (dev->class && dev->class->pm) {
681 		info = "noirq class ";
682 		callback = pm_noirq_op(dev->class->pm, state);
683 	} else if (dev->bus && dev->bus->pm) {
684 		info = "noirq bus ";
685 		callback = pm_noirq_op(dev->bus->pm, state);
686 	}
687 	if (callback)
688 		goto Run;
689 
690 	if (skip_resume)
691 		goto Skip;
692 
693 	if (dev->driver && dev->driver->pm) {
694 		info = "noirq driver ";
695 		callback = pm_noirq_op(dev->driver->pm, state);
696 	}
697 
698 Run:
699 	error = dpm_run_callback(callback, dev, state, info);
700 
701 Skip:
702 	dev->power.is_noirq_suspended = false;
703 
704 Out:
705 	complete_all(&dev->power.completion);
706 	TRACE_RESUME(error);
707 
708 	if (error) {
709 		async_error = error;
710 		dpm_save_failed_dev(dev_name(dev));
711 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
712 	}
713 }
714 
async_resume_noirq(void * data,async_cookie_t cookie)715 static void async_resume_noirq(void *data, async_cookie_t cookie)
716 {
717 	struct device *dev = data;
718 
719 	device_resume_noirq(dev, pm_transition, true);
720 	put_device(dev);
721 }
722 
dpm_noirq_resume_devices(pm_message_t state)723 static void dpm_noirq_resume_devices(pm_message_t state)
724 {
725 	struct device *dev;
726 	ktime_t starttime = ktime_get();
727 
728 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
729 
730 	async_error = 0;
731 	pm_transition = state;
732 
733 	mutex_lock(&dpm_list_mtx);
734 
735 	/*
736 	 * Trigger the resume of "async" devices upfront so they don't have to
737 	 * wait for the "non-async" ones they don't depend on.
738 	 */
739 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
740 		dpm_clear_async_state(dev);
741 		dpm_async_fn(dev, async_resume_noirq);
742 	}
743 
744 	while (!list_empty(&dpm_noirq_list)) {
745 		dev = to_device(dpm_noirq_list.next);
746 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
747 
748 		if (!dev->power.work_in_progress) {
749 			get_device(dev);
750 
751 			mutex_unlock(&dpm_list_mtx);
752 
753 			device_resume_noirq(dev, state, false);
754 
755 			put_device(dev);
756 
757 			mutex_lock(&dpm_list_mtx);
758 		}
759 	}
760 	mutex_unlock(&dpm_list_mtx);
761 	async_synchronize_full();
762 	dpm_show_time(starttime, state, 0, "noirq");
763 	if (async_error)
764 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
765 
766 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
767 }
768 
769 /**
770  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
771  * @state: PM transition of the system being carried out.
772  *
773  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
774  * allow device drivers' interrupt handlers to be called.
775  */
dpm_resume_noirq(pm_message_t state)776 void dpm_resume_noirq(pm_message_t state)
777 {
778 	dpm_noirq_resume_devices(state);
779 
780 	resume_device_irqs();
781 	device_wakeup_disarm_wake_irqs();
782 }
783 
784 /**
785  * device_resume_early - Execute an "early resume" callback for given device.
786  * @dev: Device to handle.
787  * @state: PM transition of the system being carried out.
788  * @async: If true, the device is being resumed asynchronously.
789  *
790  * Runtime PM is disabled for @dev while this function is being executed.
791  */
device_resume_early(struct device * dev,pm_message_t state,bool async)792 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
793 {
794 	pm_callback_t callback = NULL;
795 	const char *info = NULL;
796 	int error = 0;
797 
798 	TRACE_DEVICE(dev);
799 	TRACE_RESUME(0);
800 
801 	if (dev->power.syscore || dev->power.direct_complete)
802 		goto Out;
803 
804 	if (!dev->power.is_late_suspended)
805 		goto Out;
806 
807 	if (!dpm_wait_for_superior(dev, async))
808 		goto Out;
809 
810 	if (dev->pm_domain) {
811 		info = "early power domain ";
812 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
813 	} else if (dev->type && dev->type->pm) {
814 		info = "early type ";
815 		callback = pm_late_early_op(dev->type->pm, state);
816 	} else if (dev->class && dev->class->pm) {
817 		info = "early class ";
818 		callback = pm_late_early_op(dev->class->pm, state);
819 	} else if (dev->bus && dev->bus->pm) {
820 		info = "early bus ";
821 		callback = pm_late_early_op(dev->bus->pm, state);
822 	}
823 	if (callback)
824 		goto Run;
825 
826 	if (dev_pm_skip_resume(dev))
827 		goto Skip;
828 
829 	if (dev->driver && dev->driver->pm) {
830 		info = "early driver ";
831 		callback = pm_late_early_op(dev->driver->pm, state);
832 	}
833 
834 Run:
835 	error = dpm_run_callback(callback, dev, state, info);
836 
837 Skip:
838 	dev->power.is_late_suspended = false;
839 
840 Out:
841 	TRACE_RESUME(error);
842 
843 	pm_runtime_enable(dev);
844 	complete_all(&dev->power.completion);
845 
846 	if (error) {
847 		async_error = error;
848 		dpm_save_failed_dev(dev_name(dev));
849 		pm_dev_err(dev, state, async ? " async early" : " early", error);
850 	}
851 }
852 
async_resume_early(void * data,async_cookie_t cookie)853 static void async_resume_early(void *data, async_cookie_t cookie)
854 {
855 	struct device *dev = data;
856 
857 	device_resume_early(dev, pm_transition, true);
858 	put_device(dev);
859 }
860 
861 /**
862  * dpm_resume_early - Execute "early resume" callbacks for all devices.
863  * @state: PM transition of the system being carried out.
864  */
dpm_resume_early(pm_message_t state)865 void dpm_resume_early(pm_message_t state)
866 {
867 	struct device *dev;
868 	ktime_t starttime = ktime_get();
869 
870 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
871 
872 	async_error = 0;
873 	pm_transition = state;
874 
875 	mutex_lock(&dpm_list_mtx);
876 
877 	/*
878 	 * Trigger the resume of "async" devices upfront so they don't have to
879 	 * wait for the "non-async" ones they don't depend on.
880 	 */
881 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
882 		dpm_clear_async_state(dev);
883 		dpm_async_fn(dev, async_resume_early);
884 	}
885 
886 	while (!list_empty(&dpm_late_early_list)) {
887 		dev = to_device(dpm_late_early_list.next);
888 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
889 
890 		if (!dev->power.work_in_progress) {
891 			get_device(dev);
892 
893 			mutex_unlock(&dpm_list_mtx);
894 
895 			device_resume_early(dev, state, false);
896 
897 			put_device(dev);
898 
899 			mutex_lock(&dpm_list_mtx);
900 		}
901 	}
902 	mutex_unlock(&dpm_list_mtx);
903 	async_synchronize_full();
904 	dpm_show_time(starttime, state, 0, "early");
905 	if (async_error)
906 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
907 
908 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
909 }
910 
911 /**
912  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
913  * @state: PM transition of the system being carried out.
914  */
dpm_resume_start(pm_message_t state)915 void dpm_resume_start(pm_message_t state)
916 {
917 	dpm_resume_noirq(state);
918 	dpm_resume_early(state);
919 }
920 EXPORT_SYMBOL_GPL(dpm_resume_start);
921 
922 /**
923  * device_resume - Execute "resume" callbacks for given device.
924  * @dev: Device to handle.
925  * @state: PM transition of the system being carried out.
926  * @async: If true, the device is being resumed asynchronously.
927  */
device_resume(struct device * dev,pm_message_t state,bool async)928 static void device_resume(struct device *dev, pm_message_t state, bool async)
929 {
930 	pm_callback_t callback = NULL;
931 	const char *info = NULL;
932 	int error = 0;
933 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
934 
935 	TRACE_DEVICE(dev);
936 	TRACE_RESUME(0);
937 
938 	if (dev->power.syscore)
939 		goto Complete;
940 
941 	if (!dev->power.is_suspended)
942 		goto Complete;
943 
944 	if (dev->power.direct_complete) {
945 		/*
946 		 * Allow new children to be added under the device after this
947 		 * point if it has no PM callbacks.
948 		 */
949 		if (dev->power.no_pm_callbacks)
950 			dev->power.is_prepared = false;
951 
952 		/* Match the pm_runtime_disable() in device_suspend(). */
953 		pm_runtime_enable(dev);
954 		goto Complete;
955 	}
956 
957 	if (!dpm_wait_for_superior(dev, async))
958 		goto Complete;
959 
960 	dpm_watchdog_set(&wd, dev);
961 	device_lock(dev);
962 
963 	/*
964 	 * This is a fib.  But we'll allow new children to be added below
965 	 * a resumed device, even if the device hasn't been completed yet.
966 	 */
967 	dev->power.is_prepared = false;
968 
969 	if (dev->pm_domain) {
970 		info = "power domain ";
971 		callback = pm_op(&dev->pm_domain->ops, state);
972 		goto Driver;
973 	}
974 
975 	if (dev->type && dev->type->pm) {
976 		info = "type ";
977 		callback = pm_op(dev->type->pm, state);
978 		goto Driver;
979 	}
980 
981 	if (dev->class && dev->class->pm) {
982 		info = "class ";
983 		callback = pm_op(dev->class->pm, state);
984 		goto Driver;
985 	}
986 
987 	if (dev->bus) {
988 		if (dev->bus->pm) {
989 			info = "bus ";
990 			callback = pm_op(dev->bus->pm, state);
991 		} else if (dev->bus->resume) {
992 			info = "legacy bus ";
993 			callback = dev->bus->resume;
994 			goto End;
995 		}
996 	}
997 
998  Driver:
999 	if (!callback && dev->driver && dev->driver->pm) {
1000 		info = "driver ";
1001 		callback = pm_op(dev->driver->pm, state);
1002 	}
1003 
1004  End:
1005 	error = dpm_run_callback(callback, dev, state, info);
1006 	dev->power.is_suspended = false;
1007 
1008 	device_unlock(dev);
1009 	dpm_watchdog_clear(&wd);
1010 
1011  Complete:
1012 	complete_all(&dev->power.completion);
1013 
1014 	TRACE_RESUME(error);
1015 
1016 	if (error) {
1017 		async_error = error;
1018 		dpm_save_failed_dev(dev_name(dev));
1019 		pm_dev_err(dev, state, async ? " async" : "", error);
1020 	}
1021 }
1022 
async_resume(void * data,async_cookie_t cookie)1023 static void async_resume(void *data, async_cookie_t cookie)
1024 {
1025 	struct device *dev = data;
1026 
1027 	device_resume(dev, pm_transition, true);
1028 	put_device(dev);
1029 }
1030 
1031 /**
1032  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1033  * @state: PM transition of the system being carried out.
1034  *
1035  * Execute the appropriate "resume" callback for all devices whose status
1036  * indicates that they are suspended.
1037  */
dpm_resume(pm_message_t state)1038 void dpm_resume(pm_message_t state)
1039 {
1040 	struct device *dev;
1041 	ktime_t starttime = ktime_get();
1042 
1043 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1044 	might_sleep();
1045 
1046 	pm_transition = state;
1047 	async_error = 0;
1048 
1049 	mutex_lock(&dpm_list_mtx);
1050 
1051 	/*
1052 	 * Trigger the resume of "async" devices upfront so they don't have to
1053 	 * wait for the "non-async" ones they don't depend on.
1054 	 */
1055 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1056 		dpm_clear_async_state(dev);
1057 		dpm_async_fn(dev, async_resume);
1058 	}
1059 
1060 	while (!list_empty(&dpm_suspended_list)) {
1061 		dev = to_device(dpm_suspended_list.next);
1062 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1063 
1064 		if (!dev->power.work_in_progress) {
1065 			get_device(dev);
1066 
1067 			mutex_unlock(&dpm_list_mtx);
1068 
1069 			device_resume(dev, state, false);
1070 
1071 			put_device(dev);
1072 
1073 			mutex_lock(&dpm_list_mtx);
1074 		}
1075 	}
1076 	mutex_unlock(&dpm_list_mtx);
1077 	async_synchronize_full();
1078 	dpm_show_time(starttime, state, 0, NULL);
1079 	if (async_error)
1080 		dpm_save_failed_step(SUSPEND_RESUME);
1081 
1082 	cpufreq_resume();
1083 	devfreq_resume();
1084 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1085 }
1086 
1087 /**
1088  * device_complete - Complete a PM transition for given device.
1089  * @dev: Device to handle.
1090  * @state: PM transition of the system being carried out.
1091  */
device_complete(struct device * dev,pm_message_t state)1092 static void device_complete(struct device *dev, pm_message_t state)
1093 {
1094 	void (*callback)(struct device *) = NULL;
1095 	const char *info = NULL;
1096 
1097 	if (dev->power.syscore)
1098 		goto out;
1099 
1100 	device_lock(dev);
1101 
1102 	if (dev->pm_domain) {
1103 		info = "completing power domain ";
1104 		callback = dev->pm_domain->ops.complete;
1105 	} else if (dev->type && dev->type->pm) {
1106 		info = "completing type ";
1107 		callback = dev->type->pm->complete;
1108 	} else if (dev->class && dev->class->pm) {
1109 		info = "completing class ";
1110 		callback = dev->class->pm->complete;
1111 	} else if (dev->bus && dev->bus->pm) {
1112 		info = "completing bus ";
1113 		callback = dev->bus->pm->complete;
1114 	}
1115 
1116 	if (!callback && dev->driver && dev->driver->pm) {
1117 		info = "completing driver ";
1118 		callback = dev->driver->pm->complete;
1119 	}
1120 
1121 	if (callback) {
1122 		pm_dev_dbg(dev, state, info);
1123 		callback(dev);
1124 	}
1125 
1126 	device_unlock(dev);
1127 
1128 out:
1129 	/* If enabling runtime PM for the device is blocked, unblock it. */
1130 	pm_runtime_unblock(dev);
1131 	pm_runtime_put(dev);
1132 }
1133 
1134 /**
1135  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1136  * @state: PM transition of the system being carried out.
1137  *
1138  * Execute the ->complete() callbacks for all devices whose PM status is not
1139  * DPM_ON (this allows new devices to be registered).
1140  */
dpm_complete(pm_message_t state)1141 void dpm_complete(pm_message_t state)
1142 {
1143 	struct list_head list;
1144 
1145 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1146 	might_sleep();
1147 
1148 	INIT_LIST_HEAD(&list);
1149 	mutex_lock(&dpm_list_mtx);
1150 	while (!list_empty(&dpm_prepared_list)) {
1151 		struct device *dev = to_device(dpm_prepared_list.prev);
1152 
1153 		get_device(dev);
1154 		dev->power.is_prepared = false;
1155 		list_move(&dev->power.entry, &list);
1156 
1157 		mutex_unlock(&dpm_list_mtx);
1158 
1159 		trace_device_pm_callback_start(dev, "", state.event);
1160 		device_complete(dev, state);
1161 		trace_device_pm_callback_end(dev, 0);
1162 
1163 		put_device(dev);
1164 
1165 		mutex_lock(&dpm_list_mtx);
1166 	}
1167 	list_splice(&list, &dpm_list);
1168 	mutex_unlock(&dpm_list_mtx);
1169 
1170 	/* Allow device probing and trigger re-probing of deferred devices */
1171 	device_unblock_probing();
1172 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1173 }
1174 
1175 /**
1176  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1177  * @state: PM transition of the system being carried out.
1178  *
1179  * Execute "resume" callbacks for all devices and complete the PM transition of
1180  * the system.
1181  */
dpm_resume_end(pm_message_t state)1182 void dpm_resume_end(pm_message_t state)
1183 {
1184 	dpm_resume(state);
1185 	dpm_complete(state);
1186 }
1187 EXPORT_SYMBOL_GPL(dpm_resume_end);
1188 
1189 
1190 /*------------------------- Suspend routines -------------------------*/
1191 
1192 /**
1193  * resume_event - Return a "resume" message for given "suspend" sleep state.
1194  * @sleep_state: PM message representing a sleep state.
1195  *
1196  * Return a PM message representing the resume event corresponding to given
1197  * sleep state.
1198  */
resume_event(pm_message_t sleep_state)1199 static pm_message_t resume_event(pm_message_t sleep_state)
1200 {
1201 	switch (sleep_state.event) {
1202 	case PM_EVENT_SUSPEND:
1203 		return PMSG_RESUME;
1204 	case PM_EVENT_FREEZE:
1205 	case PM_EVENT_QUIESCE:
1206 		return PMSG_RECOVER;
1207 	case PM_EVENT_HIBERNATE:
1208 		return PMSG_RESTORE;
1209 	}
1210 	return PMSG_ON;
1211 }
1212 
dpm_superior_set_must_resume(struct device * dev)1213 static void dpm_superior_set_must_resume(struct device *dev)
1214 {
1215 	struct device_link *link;
1216 	int idx;
1217 
1218 	if (dev->parent)
1219 		dev->parent->power.must_resume = true;
1220 
1221 	idx = device_links_read_lock();
1222 
1223 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1224 		link->supplier->power.must_resume = true;
1225 
1226 	device_links_read_unlock(idx);
1227 }
1228 
1229 /**
1230  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1231  * @dev: Device to handle.
1232  * @state: PM transition of the system being carried out.
1233  * @async: If true, the device is being suspended asynchronously.
1234  *
1235  * The driver of @dev will not receive interrupts while this function is being
1236  * executed.
1237  */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1238 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1239 {
1240 	pm_callback_t callback = NULL;
1241 	const char *info = NULL;
1242 	int error = 0;
1243 
1244 	TRACE_DEVICE(dev);
1245 	TRACE_SUSPEND(0);
1246 
1247 	dpm_wait_for_subordinate(dev, async);
1248 
1249 	if (async_error)
1250 		goto Complete;
1251 
1252 	if (dev->power.syscore || dev->power.direct_complete)
1253 		goto Complete;
1254 
1255 	if (dev->pm_domain) {
1256 		info = "noirq power domain ";
1257 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1258 	} else if (dev->type && dev->type->pm) {
1259 		info = "noirq type ";
1260 		callback = pm_noirq_op(dev->type->pm, state);
1261 	} else if (dev->class && dev->class->pm) {
1262 		info = "noirq class ";
1263 		callback = pm_noirq_op(dev->class->pm, state);
1264 	} else if (dev->bus && dev->bus->pm) {
1265 		info = "noirq bus ";
1266 		callback = pm_noirq_op(dev->bus->pm, state);
1267 	}
1268 	if (callback)
1269 		goto Run;
1270 
1271 	if (dev_pm_skip_suspend(dev))
1272 		goto Skip;
1273 
1274 	if (dev->driver && dev->driver->pm) {
1275 		info = "noirq driver ";
1276 		callback = pm_noirq_op(dev->driver->pm, state);
1277 	}
1278 
1279 Run:
1280 	error = dpm_run_callback(callback, dev, state, info);
1281 	if (error) {
1282 		async_error = error;
1283 		dpm_save_failed_dev(dev_name(dev));
1284 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1285 		goto Complete;
1286 	}
1287 
1288 Skip:
1289 	dev->power.is_noirq_suspended = true;
1290 
1291 	/*
1292 	 * Devices must be resumed unless they are explicitly allowed to be left
1293 	 * in suspend, but even in that case skipping the resume of devices that
1294 	 * were in use right before the system suspend (as indicated by their
1295 	 * runtime PM usage counters and child counters) would be suboptimal.
1296 	 */
1297 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1298 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1299 		dev->power.must_resume = true;
1300 
1301 	if (dev->power.must_resume)
1302 		dpm_superior_set_must_resume(dev);
1303 
1304 Complete:
1305 	complete_all(&dev->power.completion);
1306 	TRACE_SUSPEND(error);
1307 	return error;
1308 }
1309 
async_suspend_noirq(void * data,async_cookie_t cookie)1310 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1311 {
1312 	struct device *dev = data;
1313 
1314 	device_suspend_noirq(dev, pm_transition, true);
1315 	put_device(dev);
1316 }
1317 
dpm_noirq_suspend_devices(pm_message_t state)1318 static int dpm_noirq_suspend_devices(pm_message_t state)
1319 {
1320 	ktime_t starttime = ktime_get();
1321 	int error = 0;
1322 
1323 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1324 
1325 	pm_transition = state;
1326 	async_error = 0;
1327 
1328 	mutex_lock(&dpm_list_mtx);
1329 
1330 	while (!list_empty(&dpm_late_early_list)) {
1331 		struct device *dev = to_device(dpm_late_early_list.prev);
1332 
1333 		list_move(&dev->power.entry, &dpm_noirq_list);
1334 
1335 		dpm_clear_async_state(dev);
1336 		if (dpm_async_fn(dev, async_suspend_noirq))
1337 			continue;
1338 
1339 		get_device(dev);
1340 
1341 		mutex_unlock(&dpm_list_mtx);
1342 
1343 		error = device_suspend_noirq(dev, state, false);
1344 
1345 		put_device(dev);
1346 
1347 		mutex_lock(&dpm_list_mtx);
1348 
1349 		if (error || async_error)
1350 			break;
1351 	}
1352 
1353 	mutex_unlock(&dpm_list_mtx);
1354 
1355 	async_synchronize_full();
1356 	if (!error)
1357 		error = async_error;
1358 
1359 	if (error)
1360 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1361 
1362 	dpm_show_time(starttime, state, error, "noirq");
1363 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1364 	return error;
1365 }
1366 
1367 /**
1368  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1369  * @state: PM transition of the system being carried out.
1370  *
1371  * Prevent device drivers' interrupt handlers from being called and invoke
1372  * "noirq" suspend callbacks for all non-sysdev devices.
1373  */
dpm_suspend_noirq(pm_message_t state)1374 int dpm_suspend_noirq(pm_message_t state)
1375 {
1376 	int ret;
1377 
1378 	device_wakeup_arm_wake_irqs();
1379 	suspend_device_irqs();
1380 
1381 	ret = dpm_noirq_suspend_devices(state);
1382 	if (ret)
1383 		dpm_resume_noirq(resume_event(state));
1384 
1385 	return ret;
1386 }
1387 
dpm_propagate_wakeup_to_parent(struct device * dev)1388 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1389 {
1390 	struct device *parent = dev->parent;
1391 
1392 	if (!parent)
1393 		return;
1394 
1395 	spin_lock_irq(&parent->power.lock);
1396 
1397 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1398 		parent->power.wakeup_path = true;
1399 
1400 	spin_unlock_irq(&parent->power.lock);
1401 }
1402 
1403 /**
1404  * device_suspend_late - Execute a "late suspend" callback for given device.
1405  * @dev: Device to handle.
1406  * @state: PM transition of the system being carried out.
1407  * @async: If true, the device is being suspended asynchronously.
1408  *
1409  * Runtime PM is disabled for @dev while this function is being executed.
1410  */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1411 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1412 {
1413 	pm_callback_t callback = NULL;
1414 	const char *info = NULL;
1415 	int error = 0;
1416 
1417 	TRACE_DEVICE(dev);
1418 	TRACE_SUSPEND(0);
1419 
1420 	/*
1421 	 * Disable runtime PM for the device without checking if there is a
1422 	 * pending resume request for it.
1423 	 */
1424 	__pm_runtime_disable(dev, false);
1425 
1426 	dpm_wait_for_subordinate(dev, async);
1427 
1428 	if (async_error)
1429 		goto Complete;
1430 
1431 	if (pm_wakeup_pending()) {
1432 		async_error = -EBUSY;
1433 		goto Complete;
1434 	}
1435 
1436 	if (dev->power.syscore || dev->power.direct_complete)
1437 		goto Complete;
1438 
1439 	if (dev->pm_domain) {
1440 		info = "late power domain ";
1441 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1442 	} else if (dev->type && dev->type->pm) {
1443 		info = "late type ";
1444 		callback = pm_late_early_op(dev->type->pm, state);
1445 	} else if (dev->class && dev->class->pm) {
1446 		info = "late class ";
1447 		callback = pm_late_early_op(dev->class->pm, state);
1448 	} else if (dev->bus && dev->bus->pm) {
1449 		info = "late bus ";
1450 		callback = pm_late_early_op(dev->bus->pm, state);
1451 	}
1452 	if (callback)
1453 		goto Run;
1454 
1455 	if (dev_pm_skip_suspend(dev))
1456 		goto Skip;
1457 
1458 	if (dev->driver && dev->driver->pm) {
1459 		info = "late driver ";
1460 		callback = pm_late_early_op(dev->driver->pm, state);
1461 	}
1462 
1463 Run:
1464 	error = dpm_run_callback(callback, dev, state, info);
1465 	if (error) {
1466 		async_error = error;
1467 		dpm_save_failed_dev(dev_name(dev));
1468 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1469 		goto Complete;
1470 	}
1471 	dpm_propagate_wakeup_to_parent(dev);
1472 
1473 Skip:
1474 	dev->power.is_late_suspended = true;
1475 
1476 Complete:
1477 	TRACE_SUSPEND(error);
1478 	complete_all(&dev->power.completion);
1479 	return error;
1480 }
1481 
async_suspend_late(void * data,async_cookie_t cookie)1482 static void async_suspend_late(void *data, async_cookie_t cookie)
1483 {
1484 	struct device *dev = data;
1485 
1486 	device_suspend_late(dev, pm_transition, true);
1487 	put_device(dev);
1488 }
1489 
1490 /**
1491  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1492  * @state: PM transition of the system being carried out.
1493  */
dpm_suspend_late(pm_message_t state)1494 int dpm_suspend_late(pm_message_t state)
1495 {
1496 	ktime_t starttime = ktime_get();
1497 	int error = 0;
1498 
1499 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1500 
1501 	pm_transition = state;
1502 	async_error = 0;
1503 
1504 	wake_up_all_idle_cpus();
1505 
1506 	mutex_lock(&dpm_list_mtx);
1507 
1508 	while (!list_empty(&dpm_suspended_list)) {
1509 		struct device *dev = to_device(dpm_suspended_list.prev);
1510 
1511 		list_move(&dev->power.entry, &dpm_late_early_list);
1512 
1513 		dpm_clear_async_state(dev);
1514 		if (dpm_async_fn(dev, async_suspend_late))
1515 			continue;
1516 
1517 		get_device(dev);
1518 
1519 		mutex_unlock(&dpm_list_mtx);
1520 
1521 		error = device_suspend_late(dev, state, false);
1522 
1523 		put_device(dev);
1524 
1525 		mutex_lock(&dpm_list_mtx);
1526 
1527 		if (error || async_error)
1528 			break;
1529 	}
1530 
1531 	mutex_unlock(&dpm_list_mtx);
1532 
1533 	async_synchronize_full();
1534 	if (!error)
1535 		error = async_error;
1536 
1537 	if (error) {
1538 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1539 		dpm_resume_early(resume_event(state));
1540 	}
1541 	dpm_show_time(starttime, state, error, "late");
1542 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1543 	return error;
1544 }
1545 
1546 /**
1547  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1548  * @state: PM transition of the system being carried out.
1549  */
dpm_suspend_end(pm_message_t state)1550 int dpm_suspend_end(pm_message_t state)
1551 {
1552 	ktime_t starttime = ktime_get();
1553 	int error;
1554 
1555 	error = dpm_suspend_late(state);
1556 	if (error)
1557 		goto out;
1558 
1559 	error = dpm_suspend_noirq(state);
1560 	if (error)
1561 		dpm_resume_early(resume_event(state));
1562 
1563 out:
1564 	dpm_show_time(starttime, state, error, "end");
1565 	return error;
1566 }
1567 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1568 
1569 /**
1570  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1571  * @dev: Device to suspend.
1572  * @state: PM transition of the system being carried out.
1573  * @cb: Suspend callback to execute.
1574  * @info: string description of caller.
1575  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1576 static int legacy_suspend(struct device *dev, pm_message_t state,
1577 			  int (*cb)(struct device *dev, pm_message_t state),
1578 			  const char *info)
1579 {
1580 	int error;
1581 	ktime_t calltime;
1582 
1583 	calltime = initcall_debug_start(dev, cb);
1584 
1585 	trace_device_pm_callback_start(dev, info, state.event);
1586 	error = cb(dev, state);
1587 	trace_device_pm_callback_end(dev, error);
1588 	suspend_report_result(dev, cb, error);
1589 
1590 	initcall_debug_report(dev, calltime, cb, error);
1591 
1592 	return error;
1593 }
1594 
dpm_clear_superiors_direct_complete(struct device * dev)1595 static void dpm_clear_superiors_direct_complete(struct device *dev)
1596 {
1597 	struct device_link *link;
1598 	int idx;
1599 
1600 	if (dev->parent) {
1601 		spin_lock_irq(&dev->parent->power.lock);
1602 		dev->parent->power.direct_complete = false;
1603 		spin_unlock_irq(&dev->parent->power.lock);
1604 	}
1605 
1606 	idx = device_links_read_lock();
1607 
1608 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1609 		spin_lock_irq(&link->supplier->power.lock);
1610 		link->supplier->power.direct_complete = false;
1611 		spin_unlock_irq(&link->supplier->power.lock);
1612 	}
1613 
1614 	device_links_read_unlock(idx);
1615 }
1616 
1617 /**
1618  * device_suspend - Execute "suspend" callbacks for given device.
1619  * @dev: Device to handle.
1620  * @state: PM transition of the system being carried out.
1621  * @async: If true, the device is being suspended asynchronously.
1622  */
device_suspend(struct device * dev,pm_message_t state,bool async)1623 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1624 {
1625 	pm_callback_t callback = NULL;
1626 	const char *info = NULL;
1627 	int error = 0;
1628 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1629 
1630 	TRACE_DEVICE(dev);
1631 	TRACE_SUSPEND(0);
1632 
1633 	dpm_wait_for_subordinate(dev, async);
1634 
1635 	if (async_error) {
1636 		dev->power.direct_complete = false;
1637 		goto Complete;
1638 	}
1639 
1640 	/*
1641 	 * Wait for possible runtime PM transitions of the device in progress
1642 	 * to complete and if there's a runtime resume request pending for it,
1643 	 * resume it before proceeding with invoking the system-wide suspend
1644 	 * callbacks for it.
1645 	 *
1646 	 * If the system-wide suspend callbacks below change the configuration
1647 	 * of the device, they must disable runtime PM for it or otherwise
1648 	 * ensure that its runtime-resume callbacks will not be confused by that
1649 	 * change in case they are invoked going forward.
1650 	 */
1651 	pm_runtime_barrier(dev);
1652 
1653 	if (pm_wakeup_pending()) {
1654 		dev->power.direct_complete = false;
1655 		async_error = -EBUSY;
1656 		goto Complete;
1657 	}
1658 
1659 	if (dev->power.syscore)
1660 		goto Complete;
1661 
1662 	/* Avoid direct_complete to let wakeup_path propagate. */
1663 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1664 		dev->power.direct_complete = false;
1665 
1666 	if (dev->power.direct_complete) {
1667 		if (pm_runtime_status_suspended(dev)) {
1668 			pm_runtime_disable(dev);
1669 			if (pm_runtime_status_suspended(dev)) {
1670 				pm_dev_dbg(dev, state, "direct-complete ");
1671 				dev->power.is_suspended = true;
1672 				goto Complete;
1673 			}
1674 
1675 			pm_runtime_enable(dev);
1676 		}
1677 		dev->power.direct_complete = false;
1678 	}
1679 
1680 	dev->power.may_skip_resume = true;
1681 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1682 
1683 	dpm_watchdog_set(&wd, dev);
1684 	device_lock(dev);
1685 
1686 	if (dev->pm_domain) {
1687 		info = "power domain ";
1688 		callback = pm_op(&dev->pm_domain->ops, state);
1689 		goto Run;
1690 	}
1691 
1692 	if (dev->type && dev->type->pm) {
1693 		info = "type ";
1694 		callback = pm_op(dev->type->pm, state);
1695 		goto Run;
1696 	}
1697 
1698 	if (dev->class && dev->class->pm) {
1699 		info = "class ";
1700 		callback = pm_op(dev->class->pm, state);
1701 		goto Run;
1702 	}
1703 
1704 	if (dev->bus) {
1705 		if (dev->bus->pm) {
1706 			info = "bus ";
1707 			callback = pm_op(dev->bus->pm, state);
1708 		} else if (dev->bus->suspend) {
1709 			pm_dev_dbg(dev, state, "legacy bus ");
1710 			error = legacy_suspend(dev, state, dev->bus->suspend,
1711 						"legacy bus ");
1712 			goto End;
1713 		}
1714 	}
1715 
1716  Run:
1717 	if (!callback && dev->driver && dev->driver->pm) {
1718 		info = "driver ";
1719 		callback = pm_op(dev->driver->pm, state);
1720 	}
1721 
1722 	error = dpm_run_callback(callback, dev, state, info);
1723 
1724  End:
1725 	if (!error) {
1726 		dev->power.is_suspended = true;
1727 		if (device_may_wakeup(dev))
1728 			dev->power.wakeup_path = true;
1729 
1730 		dpm_propagate_wakeup_to_parent(dev);
1731 		dpm_clear_superiors_direct_complete(dev);
1732 	}
1733 
1734 	device_unlock(dev);
1735 	dpm_watchdog_clear(&wd);
1736 
1737  Complete:
1738 	if (error) {
1739 		async_error = error;
1740 		dpm_save_failed_dev(dev_name(dev));
1741 		pm_dev_err(dev, state, async ? " async" : "", error);
1742 	}
1743 
1744 	complete_all(&dev->power.completion);
1745 	TRACE_SUSPEND(error);
1746 	return error;
1747 }
1748 
async_suspend(void * data,async_cookie_t cookie)1749 static void async_suspend(void *data, async_cookie_t cookie)
1750 {
1751 	struct device *dev = data;
1752 
1753 	device_suspend(dev, pm_transition, true);
1754 	put_device(dev);
1755 }
1756 
1757 /**
1758  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1759  * @state: PM transition of the system being carried out.
1760  */
dpm_suspend(pm_message_t state)1761 int dpm_suspend(pm_message_t state)
1762 {
1763 	ktime_t starttime = ktime_get();
1764 	int error = 0;
1765 
1766 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1767 	might_sleep();
1768 
1769 	devfreq_suspend();
1770 	cpufreq_suspend();
1771 
1772 	pm_transition = state;
1773 	async_error = 0;
1774 
1775 	mutex_lock(&dpm_list_mtx);
1776 
1777 	while (!list_empty(&dpm_prepared_list)) {
1778 		struct device *dev = to_device(dpm_prepared_list.prev);
1779 
1780 		list_move(&dev->power.entry, &dpm_suspended_list);
1781 
1782 		dpm_clear_async_state(dev);
1783 		if (dpm_async_fn(dev, async_suspend))
1784 			continue;
1785 
1786 		get_device(dev);
1787 
1788 		mutex_unlock(&dpm_list_mtx);
1789 
1790 		error = device_suspend(dev, state, false);
1791 
1792 		put_device(dev);
1793 
1794 		mutex_lock(&dpm_list_mtx);
1795 
1796 		if (error || async_error)
1797 			break;
1798 	}
1799 
1800 	mutex_unlock(&dpm_list_mtx);
1801 
1802 	async_synchronize_full();
1803 	if (!error)
1804 		error = async_error;
1805 
1806 	if (error)
1807 		dpm_save_failed_step(SUSPEND_SUSPEND);
1808 
1809 	dpm_show_time(starttime, state, error, NULL);
1810 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1811 	return error;
1812 }
1813 
device_prepare_smart_suspend(struct device * dev)1814 static bool device_prepare_smart_suspend(struct device *dev)
1815 {
1816 	struct device_link *link;
1817 	bool ret = true;
1818 	int idx;
1819 
1820 	/*
1821 	 * The "smart suspend" feature is enabled for devices whose drivers ask
1822 	 * for it and for devices without PM callbacks.
1823 	 *
1824 	 * However, if "smart suspend" is not enabled for the device's parent
1825 	 * or any of its suppliers that take runtime PM into account, it cannot
1826 	 * be enabled for the device either.
1827 	 */
1828 	if (!dev->power.no_pm_callbacks &&
1829 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
1830 		return false;
1831 
1832 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
1833 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
1834 		return false;
1835 
1836 	idx = device_links_read_lock();
1837 
1838 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1839 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
1840 			continue;
1841 
1842 		if (!dev_pm_smart_suspend(link->supplier) &&
1843 		    !pm_runtime_blocked(link->supplier)) {
1844 			ret = false;
1845 			break;
1846 		}
1847 	}
1848 
1849 	device_links_read_unlock(idx);
1850 
1851 	return ret;
1852 }
1853 
1854 /**
1855  * device_prepare - Prepare a device for system power transition.
1856  * @dev: Device to handle.
1857  * @state: PM transition of the system being carried out.
1858  *
1859  * Execute the ->prepare() callback(s) for given device.  No new children of the
1860  * device may be registered after this function has returned.
1861  */
device_prepare(struct device * dev,pm_message_t state)1862 static int device_prepare(struct device *dev, pm_message_t state)
1863 {
1864 	int (*callback)(struct device *) = NULL;
1865 	bool smart_suspend;
1866 	int ret = 0;
1867 
1868 	/*
1869 	 * If a device's parent goes into runtime suspend at the wrong time,
1870 	 * it won't be possible to resume the device.  To prevent this we
1871 	 * block runtime suspend here, during the prepare phase, and allow
1872 	 * it again during the complete phase.
1873 	 */
1874 	pm_runtime_get_noresume(dev);
1875 	/*
1876 	 * If runtime PM is disabled for the device at this point and it has
1877 	 * never been enabled so far, it should not be enabled until this system
1878 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
1879 	 * subsequent attempts to enable it.
1880 	 */
1881 	smart_suspend = !pm_runtime_block_if_disabled(dev);
1882 
1883 	if (dev->power.syscore)
1884 		return 0;
1885 
1886 	device_lock(dev);
1887 
1888 	dev->power.wakeup_path = false;
1889 
1890 	if (dev->power.no_pm_callbacks)
1891 		goto unlock;
1892 
1893 	if (dev->pm_domain)
1894 		callback = dev->pm_domain->ops.prepare;
1895 	else if (dev->type && dev->type->pm)
1896 		callback = dev->type->pm->prepare;
1897 	else if (dev->class && dev->class->pm)
1898 		callback = dev->class->pm->prepare;
1899 	else if (dev->bus && dev->bus->pm)
1900 		callback = dev->bus->pm->prepare;
1901 
1902 	if (!callback && dev->driver && dev->driver->pm)
1903 		callback = dev->driver->pm->prepare;
1904 
1905 	if (callback)
1906 		ret = callback(dev);
1907 
1908 unlock:
1909 	device_unlock(dev);
1910 
1911 	if (ret < 0) {
1912 		suspend_report_result(dev, callback, ret);
1913 		pm_runtime_put(dev);
1914 		return ret;
1915 	}
1916 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
1917 	if (smart_suspend)
1918 		smart_suspend = device_prepare_smart_suspend(dev);
1919 
1920 	spin_lock_irq(&dev->power.lock);
1921 
1922 	dev->power.smart_suspend = smart_suspend;
1923 	/*
1924 	 * A positive return value from ->prepare() means "this device appears
1925 	 * to be runtime-suspended and its state is fine, so if it really is
1926 	 * runtime-suspended, you can leave it in that state provided that you
1927 	 * will do the same thing with all of its descendants".  This only
1928 	 * applies to suspend transitions, however.
1929 	 */
1930 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1931 		(ret > 0 || dev->power.no_pm_callbacks) &&
1932 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1933 
1934 	spin_unlock_irq(&dev->power.lock);
1935 
1936 	return 0;
1937 }
1938 
1939 /**
1940  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1941  * @state: PM transition of the system being carried out.
1942  *
1943  * Execute the ->prepare() callback(s) for all devices.
1944  */
dpm_prepare(pm_message_t state)1945 int dpm_prepare(pm_message_t state)
1946 {
1947 	int error = 0;
1948 
1949 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1950 	might_sleep();
1951 
1952 	/*
1953 	 * Give a chance for the known devices to complete their probes, before
1954 	 * disable probing of devices. This sync point is important at least
1955 	 * at boot time + hibernation restore.
1956 	 */
1957 	wait_for_device_probe();
1958 	/*
1959 	 * It is unsafe if probing of devices will happen during suspend or
1960 	 * hibernation and system behavior will be unpredictable in this case.
1961 	 * So, let's prohibit device's probing here and defer their probes
1962 	 * instead. The normal behavior will be restored in dpm_complete().
1963 	 */
1964 	device_block_probing();
1965 
1966 	mutex_lock(&dpm_list_mtx);
1967 	while (!list_empty(&dpm_list) && !error) {
1968 		struct device *dev = to_device(dpm_list.next);
1969 
1970 		get_device(dev);
1971 
1972 		mutex_unlock(&dpm_list_mtx);
1973 
1974 		trace_device_pm_callback_start(dev, "", state.event);
1975 		error = device_prepare(dev, state);
1976 		trace_device_pm_callback_end(dev, error);
1977 
1978 		mutex_lock(&dpm_list_mtx);
1979 
1980 		if (!error) {
1981 			dev->power.is_prepared = true;
1982 			if (!list_empty(&dev->power.entry))
1983 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1984 		} else if (error == -EAGAIN) {
1985 			error = 0;
1986 		} else {
1987 			dev_info(dev, "not prepared for power transition: code %d\n",
1988 				 error);
1989 		}
1990 
1991 		mutex_unlock(&dpm_list_mtx);
1992 
1993 		put_device(dev);
1994 
1995 		mutex_lock(&dpm_list_mtx);
1996 	}
1997 	mutex_unlock(&dpm_list_mtx);
1998 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1999 	return error;
2000 }
2001 
2002 /**
2003  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2004  * @state: PM transition of the system being carried out.
2005  *
2006  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2007  * callbacks for them.
2008  */
dpm_suspend_start(pm_message_t state)2009 int dpm_suspend_start(pm_message_t state)
2010 {
2011 	ktime_t starttime = ktime_get();
2012 	int error;
2013 
2014 	error = dpm_prepare(state);
2015 	if (error)
2016 		dpm_save_failed_step(SUSPEND_PREPARE);
2017 	else
2018 		error = dpm_suspend(state);
2019 
2020 	dpm_show_time(starttime, state, error, "start");
2021 	return error;
2022 }
2023 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2024 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2025 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2026 {
2027 	if (ret)
2028 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2029 }
2030 EXPORT_SYMBOL_GPL(__suspend_report_result);
2031 
2032 /**
2033  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2034  * @subordinate: Device that needs to wait for @dev.
2035  * @dev: Device to wait for.
2036  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2037 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2038 {
2039 	dpm_wait(dev, subordinate->power.async_suspend);
2040 	return async_error;
2041 }
2042 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2043 
2044 /**
2045  * dpm_for_each_dev - device iterator.
2046  * @data: data for the callback.
2047  * @fn: function to be called for each device.
2048  *
2049  * Iterate over devices in dpm_list, and call @fn for each device,
2050  * passing it @data.
2051  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2052 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2053 {
2054 	struct device *dev;
2055 
2056 	if (!fn)
2057 		return;
2058 
2059 	device_pm_lock();
2060 	list_for_each_entry(dev, &dpm_list, power.entry)
2061 		fn(dev, data);
2062 	device_pm_unlock();
2063 }
2064 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2065 
pm_ops_is_empty(const struct dev_pm_ops * ops)2066 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2067 {
2068 	if (!ops)
2069 		return true;
2070 
2071 	return !ops->prepare &&
2072 	       !ops->suspend &&
2073 	       !ops->suspend_late &&
2074 	       !ops->suspend_noirq &&
2075 	       !ops->resume_noirq &&
2076 	       !ops->resume_early &&
2077 	       !ops->resume &&
2078 	       !ops->complete;
2079 }
2080 
device_pm_check_callbacks(struct device * dev)2081 void device_pm_check_callbacks(struct device *dev)
2082 {
2083 	unsigned long flags;
2084 
2085 	spin_lock_irqsave(&dev->power.lock, flags);
2086 	dev->power.no_pm_callbacks =
2087 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2088 		 !dev->bus->suspend && !dev->bus->resume)) &&
2089 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2090 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2091 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2092 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2093 		 !dev->driver->suspend && !dev->driver->resume));
2094 	spin_unlock_irqrestore(&dev->power.lock, flags);
2095 }
2096 
dev_pm_skip_suspend(struct device * dev)2097 bool dev_pm_skip_suspend(struct device *dev)
2098 {
2099 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2100 }
2101