xref: /linux/drivers/base/power/main.c (revision ddb7a62af2e766eabb4ab7080e6ed8d6b8915302)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static DEFINE_MUTEX(async_wip_mtx);
67 static int async_error;
68 
69 /**
70  * pm_hibernate_is_recovering - if recovering from hibernate due to error.
71  *
72  * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
73  * recovering from some error.
74  *
75  * Return: true for error case, false for normal case.
76  */
pm_hibernate_is_recovering(void)77 bool pm_hibernate_is_recovering(void)
78 {
79 	return pm_transition.event == PM_EVENT_RECOVER;
80 }
81 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
82 
pm_verb(int event)83 static const char *pm_verb(int event)
84 {
85 	switch (event) {
86 	case PM_EVENT_SUSPEND:
87 		return "suspend";
88 	case PM_EVENT_RESUME:
89 		return "resume";
90 	case PM_EVENT_FREEZE:
91 		return "freeze";
92 	case PM_EVENT_QUIESCE:
93 		return "quiesce";
94 	case PM_EVENT_HIBERNATE:
95 		return "hibernate";
96 	case PM_EVENT_THAW:
97 		return "thaw";
98 	case PM_EVENT_RESTORE:
99 		return "restore";
100 	case PM_EVENT_RECOVER:
101 		return "recover";
102 	default:
103 		return "(unknown PM event)";
104 	}
105 }
106 
107 /**
108  * device_pm_sleep_init - Initialize system suspend-related device fields.
109  * @dev: Device object being initialized.
110  */
device_pm_sleep_init(struct device * dev)111 void device_pm_sleep_init(struct device *dev)
112 {
113 	dev->power.is_prepared = false;
114 	dev->power.is_suspended = false;
115 	dev->power.is_noirq_suspended = false;
116 	dev->power.is_late_suspended = false;
117 	init_completion(&dev->power.completion);
118 	complete_all(&dev->power.completion);
119 	dev->power.wakeup = NULL;
120 	INIT_LIST_HEAD(&dev->power.entry);
121 }
122 
123 /**
124  * device_pm_lock - Lock the list of active devices used by the PM core.
125  */
device_pm_lock(void)126 void device_pm_lock(void)
127 {
128 	mutex_lock(&dpm_list_mtx);
129 }
130 
131 /**
132  * device_pm_unlock - Unlock the list of active devices used by the PM core.
133  */
device_pm_unlock(void)134 void device_pm_unlock(void)
135 {
136 	mutex_unlock(&dpm_list_mtx);
137 }
138 
139 /**
140  * device_pm_add - Add a device to the PM core's list of active devices.
141  * @dev: Device to add to the list.
142  */
device_pm_add(struct device * dev)143 void device_pm_add(struct device *dev)
144 {
145 	/* Skip PM setup/initialization. */
146 	if (device_pm_not_required(dev))
147 		return;
148 
149 	pr_debug("Adding info for %s:%s\n",
150 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
151 	device_pm_check_callbacks(dev);
152 	mutex_lock(&dpm_list_mtx);
153 	if (dev->parent && dev->parent->power.is_prepared)
154 		dev_warn(dev, "parent %s should not be sleeping\n",
155 			dev_name(dev->parent));
156 	list_add_tail(&dev->power.entry, &dpm_list);
157 	dev->power.in_dpm_list = true;
158 	mutex_unlock(&dpm_list_mtx);
159 }
160 
161 /**
162  * device_pm_remove - Remove a device from the PM core's list of active devices.
163  * @dev: Device to be removed from the list.
164  */
device_pm_remove(struct device * dev)165 void device_pm_remove(struct device *dev)
166 {
167 	if (device_pm_not_required(dev))
168 		return;
169 
170 	pr_debug("Removing info for %s:%s\n",
171 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
172 	complete_all(&dev->power.completion);
173 	mutex_lock(&dpm_list_mtx);
174 	list_del_init(&dev->power.entry);
175 	dev->power.in_dpm_list = false;
176 	mutex_unlock(&dpm_list_mtx);
177 	device_wakeup_disable(dev);
178 	pm_runtime_remove(dev);
179 	device_pm_check_callbacks(dev);
180 }
181 
182 /**
183  * device_pm_move_before - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come before.
186  */
device_pm_move_before(struct device * deva,struct device * devb)187 void device_pm_move_before(struct device *deva, struct device *devb)
188 {
189 	pr_debug("Moving %s:%s before %s:%s\n",
190 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 	/* Delete deva from dpm_list and reinsert before devb. */
193 	list_move_tail(&deva->power.entry, &devb->power.entry);
194 }
195 
196 /**
197  * device_pm_move_after - Move device in the PM core's list of active devices.
198  * @deva: Device to move in dpm_list.
199  * @devb: Device @deva should come after.
200  */
device_pm_move_after(struct device * deva,struct device * devb)201 void device_pm_move_after(struct device *deva, struct device *devb)
202 {
203 	pr_debug("Moving %s:%s after %s:%s\n",
204 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
205 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
206 	/* Delete deva from dpm_list and reinsert after devb. */
207 	list_move(&deva->power.entry, &devb->power.entry);
208 }
209 
210 /**
211  * device_pm_move_last - Move device to end of the PM core's list of devices.
212  * @dev: Device to move in dpm_list.
213  */
device_pm_move_last(struct device * dev)214 void device_pm_move_last(struct device *dev)
215 {
216 	pr_debug("Moving %s:%s to end of list\n",
217 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
218 	list_move_tail(&dev->power.entry, &dpm_list);
219 }
220 
initcall_debug_start(struct device * dev,void * cb)221 static ktime_t initcall_debug_start(struct device *dev, void *cb)
222 {
223 	if (!pm_print_times_enabled)
224 		return 0;
225 
226 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
227 		 task_pid_nr(current),
228 		 dev->parent ? dev_name(dev->parent) : "none");
229 	return ktime_get();
230 }
231 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)232 static void initcall_debug_report(struct device *dev, ktime_t calltime,
233 				  void *cb, int error)
234 {
235 	ktime_t rettime;
236 
237 	if (!pm_print_times_enabled)
238 		return;
239 
240 	rettime = ktime_get();
241 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
242 		 (unsigned long long)ktime_us_delta(rettime, calltime));
243 }
244 
245 /**
246  * dpm_wait - Wait for a PM operation to complete.
247  * @dev: Device to wait for.
248  * @async: If unset, wait only if the device's power.async_suspend flag is set.
249  */
dpm_wait(struct device * dev,bool async)250 static void dpm_wait(struct device *dev, bool async)
251 {
252 	if (!dev)
253 		return;
254 
255 	if (async || (pm_async_enabled && dev->power.async_suspend))
256 		wait_for_completion(&dev->power.completion);
257 }
258 
dpm_wait_fn(struct device * dev,void * async_ptr)259 static int dpm_wait_fn(struct device *dev, void *async_ptr)
260 {
261 	dpm_wait(dev, *((bool *)async_ptr));
262 	return 0;
263 }
264 
dpm_wait_for_children(struct device * dev,bool async)265 static void dpm_wait_for_children(struct device *dev, bool async)
266 {
267 	device_for_each_child(dev, &async, dpm_wait_fn);
268 }
269 
dpm_wait_for_suppliers(struct device * dev,bool async)270 static void dpm_wait_for_suppliers(struct device *dev, bool async)
271 {
272 	struct device_link *link;
273 	int idx;
274 
275 	idx = device_links_read_lock();
276 
277 	/*
278 	 * If the supplier goes away right after we've checked the link to it,
279 	 * we'll wait for its completion to change the state, but that's fine,
280 	 * because the only things that will block as a result are the SRCU
281 	 * callbacks freeing the link objects for the links in the list we're
282 	 * walking.
283 	 */
284 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
285 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
286 			dpm_wait(link->supplier, async);
287 
288 	device_links_read_unlock(idx);
289 }
290 
dpm_wait_for_superior(struct device * dev,bool async)291 static bool dpm_wait_for_superior(struct device *dev, bool async)
292 {
293 	struct device *parent;
294 
295 	/*
296 	 * If the device is resumed asynchronously and the parent's callback
297 	 * deletes both the device and the parent itself, the parent object may
298 	 * be freed while this function is running, so avoid that by reference
299 	 * counting the parent once more unless the device has been deleted
300 	 * already (in which case return right away).
301 	 */
302 	mutex_lock(&dpm_list_mtx);
303 
304 	if (!device_pm_initialized(dev)) {
305 		mutex_unlock(&dpm_list_mtx);
306 		return false;
307 	}
308 
309 	parent = get_device(dev->parent);
310 
311 	mutex_unlock(&dpm_list_mtx);
312 
313 	dpm_wait(parent, async);
314 	put_device(parent);
315 
316 	dpm_wait_for_suppliers(dev, async);
317 
318 	/*
319 	 * If the parent's callback has deleted the device, attempting to resume
320 	 * it would be invalid, so avoid doing that then.
321 	 */
322 	return device_pm_initialized(dev);
323 }
324 
dpm_wait_for_consumers(struct device * dev,bool async)325 static void dpm_wait_for_consumers(struct device *dev, bool async)
326 {
327 	struct device_link *link;
328 	int idx;
329 
330 	idx = device_links_read_lock();
331 
332 	/*
333 	 * The status of a device link can only be changed from "dormant" by a
334 	 * probe, but that cannot happen during system suspend/resume.  In
335 	 * theory it can change to "dormant" at that time, but then it is
336 	 * reasonable to wait for the target device anyway (eg. if it goes
337 	 * away, it's better to wait for it to go away completely and then
338 	 * continue instead of trying to continue in parallel with its
339 	 * unregistration).
340 	 */
341 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
342 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
343 			dpm_wait(link->consumer, async);
344 
345 	device_links_read_unlock(idx);
346 }
347 
dpm_wait_for_subordinate(struct device * dev,bool async)348 static void dpm_wait_for_subordinate(struct device *dev, bool async)
349 {
350 	dpm_wait_for_children(dev, async);
351 	dpm_wait_for_consumers(dev, async);
352 }
353 
354 /**
355  * pm_op - Return the PM operation appropriate for given PM event.
356  * @ops: PM operations to choose from.
357  * @state: PM transition of the system being carried out.
358  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)359 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
360 {
361 	switch (state.event) {
362 #ifdef CONFIG_SUSPEND
363 	case PM_EVENT_SUSPEND:
364 		return ops->suspend;
365 	case PM_EVENT_RESUME:
366 		return ops->resume;
367 #endif /* CONFIG_SUSPEND */
368 #ifdef CONFIG_HIBERNATE_CALLBACKS
369 	case PM_EVENT_FREEZE:
370 	case PM_EVENT_QUIESCE:
371 		return ops->freeze;
372 	case PM_EVENT_HIBERNATE:
373 		return ops->poweroff;
374 	case PM_EVENT_THAW:
375 	case PM_EVENT_RECOVER:
376 		return ops->thaw;
377 	case PM_EVENT_RESTORE:
378 		return ops->restore;
379 #endif /* CONFIG_HIBERNATE_CALLBACKS */
380 	}
381 
382 	return NULL;
383 }
384 
385 /**
386  * pm_late_early_op - Return the PM operation appropriate for given PM event.
387  * @ops: PM operations to choose from.
388  * @state: PM transition of the system being carried out.
389  *
390  * Runtime PM is disabled for @dev while this function is being executed.
391  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)392 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
393 				      pm_message_t state)
394 {
395 	switch (state.event) {
396 #ifdef CONFIG_SUSPEND
397 	case PM_EVENT_SUSPEND:
398 		return ops->suspend_late;
399 	case PM_EVENT_RESUME:
400 		return ops->resume_early;
401 #endif /* CONFIG_SUSPEND */
402 #ifdef CONFIG_HIBERNATE_CALLBACKS
403 	case PM_EVENT_FREEZE:
404 	case PM_EVENT_QUIESCE:
405 		return ops->freeze_late;
406 	case PM_EVENT_HIBERNATE:
407 		return ops->poweroff_late;
408 	case PM_EVENT_THAW:
409 	case PM_EVENT_RECOVER:
410 		return ops->thaw_early;
411 	case PM_EVENT_RESTORE:
412 		return ops->restore_early;
413 #endif /* CONFIG_HIBERNATE_CALLBACKS */
414 	}
415 
416 	return NULL;
417 }
418 
419 /**
420  * pm_noirq_op - Return the PM operation appropriate for given PM event.
421  * @ops: PM operations to choose from.
422  * @state: PM transition of the system being carried out.
423  *
424  * The driver of @dev will not receive interrupts while this function is being
425  * executed.
426  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)427 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
428 {
429 	switch (state.event) {
430 #ifdef CONFIG_SUSPEND
431 	case PM_EVENT_SUSPEND:
432 		return ops->suspend_noirq;
433 	case PM_EVENT_RESUME:
434 		return ops->resume_noirq;
435 #endif /* CONFIG_SUSPEND */
436 #ifdef CONFIG_HIBERNATE_CALLBACKS
437 	case PM_EVENT_FREEZE:
438 	case PM_EVENT_QUIESCE:
439 		return ops->freeze_noirq;
440 	case PM_EVENT_HIBERNATE:
441 		return ops->poweroff_noirq;
442 	case PM_EVENT_THAW:
443 	case PM_EVENT_RECOVER:
444 		return ops->thaw_noirq;
445 	case PM_EVENT_RESTORE:
446 		return ops->restore_noirq;
447 #endif /* CONFIG_HIBERNATE_CALLBACKS */
448 	}
449 
450 	return NULL;
451 }
452 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)453 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
454 {
455 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
456 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
457 		", may wakeup" : "", dev->power.driver_flags);
458 }
459 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)460 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
461 			int error)
462 {
463 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
464 		error);
465 }
466 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)467 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
468 			  const char *info)
469 {
470 	ktime_t calltime;
471 	u64 usecs64;
472 	int usecs;
473 
474 	calltime = ktime_get();
475 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
476 	do_div(usecs64, NSEC_PER_USEC);
477 	usecs = usecs64;
478 	if (usecs == 0)
479 		usecs = 1;
480 
481 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
482 		  info ?: "", info ? " " : "", pm_verb(state.event),
483 		  error ? "aborted" : "complete",
484 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
485 }
486 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)487 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
488 			    pm_message_t state, const char *info)
489 {
490 	ktime_t calltime;
491 	int error;
492 
493 	if (!cb)
494 		return 0;
495 
496 	calltime = initcall_debug_start(dev, cb);
497 
498 	pm_dev_dbg(dev, state, info);
499 	trace_device_pm_callback_start(dev, info, state.event);
500 	error = cb(dev);
501 	trace_device_pm_callback_end(dev, error);
502 	suspend_report_result(dev, cb, error);
503 
504 	initcall_debug_report(dev, calltime, cb, error);
505 
506 	return error;
507 }
508 
509 #ifdef CONFIG_DPM_WATCHDOG
510 struct dpm_watchdog {
511 	struct device		*dev;
512 	struct task_struct	*tsk;
513 	struct timer_list	timer;
514 	bool			fatal;
515 };
516 
517 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
518 	struct dpm_watchdog wd
519 
520 /**
521  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
522  * @t: The timer that PM watchdog depends on.
523  *
524  * Called when a driver has timed out suspending or resuming.
525  * There's not much we can do here to recover so panic() to
526  * capture a crash-dump in pstore.
527  */
dpm_watchdog_handler(struct timer_list * t)528 static void dpm_watchdog_handler(struct timer_list *t)
529 {
530 	struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
531 	struct timer_list *timer = &wd->timer;
532 	unsigned int time_left;
533 
534 	if (wd->fatal) {
535 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
536 		show_stack(wd->tsk, NULL, KERN_EMERG);
537 		panic("%s %s: unrecoverable failure\n",
538 			dev_driver_string(wd->dev), dev_name(wd->dev));
539 	}
540 
541 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
542 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
543 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
544 	show_stack(wd->tsk, NULL, KERN_WARNING);
545 
546 	wd->fatal = true;
547 	mod_timer(timer, jiffies + HZ * time_left);
548 }
549 
550 /**
551  * dpm_watchdog_set - Enable pm watchdog for given device.
552  * @wd: Watchdog. Must be allocated on the stack.
553  * @dev: Device to handle.
554  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)555 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
556 {
557 	struct timer_list *timer = &wd->timer;
558 
559 	wd->dev = dev;
560 	wd->tsk = current;
561 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
562 
563 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
564 	/* use same timeout value for both suspend and resume */
565 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
566 	add_timer(timer);
567 }
568 
569 /**
570  * dpm_watchdog_clear - Disable suspend/resume watchdog.
571  * @wd: Watchdog to disable.
572  */
dpm_watchdog_clear(struct dpm_watchdog * wd)573 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
574 {
575 	struct timer_list *timer = &wd->timer;
576 
577 	timer_delete_sync(timer);
578 	timer_destroy_on_stack(timer);
579 }
580 #else
581 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
582 #define dpm_watchdog_set(x, y)
583 #define dpm_watchdog_clear(x)
584 #endif
585 
586 /*------------------------- Resume routines -------------------------*/
587 
588 /**
589  * dev_pm_skip_resume - System-wide device resume optimization check.
590  * @dev: Target device.
591  *
592  * Return:
593  * - %false if the transition under way is RESTORE.
594  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
595  * - The logical negation of %power.must_resume otherwise (that is, when the
596  *   transition under way is RESUME).
597  */
dev_pm_skip_resume(struct device * dev)598 bool dev_pm_skip_resume(struct device *dev)
599 {
600 	if (pm_transition.event == PM_EVENT_RESTORE)
601 		return false;
602 
603 	if (pm_transition.event == PM_EVENT_THAW)
604 		return dev_pm_skip_suspend(dev);
605 
606 	return !dev->power.must_resume;
607 }
608 
is_async(struct device * dev)609 static bool is_async(struct device *dev)
610 {
611 	return dev->power.async_suspend && pm_async_enabled
612 		&& !pm_trace_is_enabled();
613 }
614 
__dpm_async(struct device * dev,async_func_t func)615 static bool __dpm_async(struct device *dev, async_func_t func)
616 {
617 	if (dev->power.work_in_progress)
618 		return true;
619 
620 	if (!is_async(dev))
621 		return false;
622 
623 	dev->power.work_in_progress = true;
624 
625 	get_device(dev);
626 
627 	if (async_schedule_dev_nocall(func, dev))
628 		return true;
629 
630 	put_device(dev);
631 
632 	return false;
633 }
634 
dpm_async_fn(struct device * dev,async_func_t func)635 static bool dpm_async_fn(struct device *dev, async_func_t func)
636 {
637 	guard(mutex)(&async_wip_mtx);
638 
639 	return __dpm_async(dev, func);
640 }
641 
dpm_async_with_cleanup(struct device * dev,void * fn)642 static int dpm_async_with_cleanup(struct device *dev, void *fn)
643 {
644 	guard(mutex)(&async_wip_mtx);
645 
646 	if (!__dpm_async(dev, fn))
647 		dev->power.work_in_progress = false;
648 
649 	return 0;
650 }
651 
dpm_async_resume_children(struct device * dev,async_func_t func)652 static void dpm_async_resume_children(struct device *dev, async_func_t func)
653 {
654 	/*
655 	 * Prevent racing with dpm_clear_async_state() during initial list
656 	 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
657 	 * dpm_resume().
658 	 */
659 	guard(mutex)(&dpm_list_mtx);
660 
661 	/*
662 	 * Start processing "async" children of the device unless it's been
663 	 * started already for them.
664 	 */
665 	device_for_each_child(dev, func, dpm_async_with_cleanup);
666 }
667 
dpm_async_resume_subordinate(struct device * dev,async_func_t func)668 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
669 {
670 	struct device_link *link;
671 	int idx;
672 
673 	dpm_async_resume_children(dev, func);
674 
675 	idx = device_links_read_lock();
676 
677 	/* Start processing the device's "async" consumers. */
678 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
679 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
680 			dpm_async_with_cleanup(link->consumer, func);
681 
682 	device_links_read_unlock(idx);
683 }
684 
dpm_clear_async_state(struct device * dev)685 static void dpm_clear_async_state(struct device *dev)
686 {
687 	reinit_completion(&dev->power.completion);
688 	dev->power.work_in_progress = false;
689 }
690 
dpm_root_device(struct device * dev)691 static bool dpm_root_device(struct device *dev)
692 {
693 	lockdep_assert_held(&dpm_list_mtx);
694 
695 	/*
696 	 * Since this function is required to run under dpm_list_mtx, the
697 	 * list_empty() below will only return true if the device's list of
698 	 * consumers is actually empty before calling it.
699 	 */
700 	return !dev->parent && list_empty(&dev->links.suppliers);
701 }
702 
703 static void async_resume_noirq(void *data, async_cookie_t cookie);
704 
705 /**
706  * device_resume_noirq - Execute a "noirq resume" callback for given device.
707  * @dev: Device to handle.
708  * @state: PM transition of the system being carried out.
709  * @async: If true, the device is being resumed asynchronously.
710  *
711  * The driver of @dev will not receive interrupts while this function is being
712  * executed.
713  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)714 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
715 {
716 	pm_callback_t callback = NULL;
717 	const char *info = NULL;
718 	bool skip_resume;
719 	int error = 0;
720 
721 	TRACE_DEVICE(dev);
722 	TRACE_RESUME(0);
723 
724 	if (dev->power.syscore || dev->power.direct_complete)
725 		goto Out;
726 
727 	if (!dev->power.is_noirq_suspended)
728 		goto Out;
729 
730 	if (!dpm_wait_for_superior(dev, async))
731 		goto Out;
732 
733 	skip_resume = dev_pm_skip_resume(dev);
734 	/*
735 	 * If the driver callback is skipped below or by the middle layer
736 	 * callback and device_resume_early() also skips the driver callback for
737 	 * this device later, it needs to appear as "suspended" to PM-runtime,
738 	 * so change its status accordingly.
739 	 *
740 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
741 	 * status to "active" unless its power.smart_suspend flag is clear, in
742 	 * which case it is not necessary to update its PM-runtime status.
743 	 */
744 	if (skip_resume)
745 		pm_runtime_set_suspended(dev);
746 	else if (dev_pm_smart_suspend(dev))
747 		pm_runtime_set_active(dev);
748 
749 	if (dev->pm_domain) {
750 		info = "noirq power domain ";
751 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
752 	} else if (dev->type && dev->type->pm) {
753 		info = "noirq type ";
754 		callback = pm_noirq_op(dev->type->pm, state);
755 	} else if (dev->class && dev->class->pm) {
756 		info = "noirq class ";
757 		callback = pm_noirq_op(dev->class->pm, state);
758 	} else if (dev->bus && dev->bus->pm) {
759 		info = "noirq bus ";
760 		callback = pm_noirq_op(dev->bus->pm, state);
761 	}
762 	if (callback)
763 		goto Run;
764 
765 	if (skip_resume)
766 		goto Skip;
767 
768 	if (dev->driver && dev->driver->pm) {
769 		info = "noirq driver ";
770 		callback = pm_noirq_op(dev->driver->pm, state);
771 	}
772 
773 Run:
774 	error = dpm_run_callback(callback, dev, state, info);
775 
776 Skip:
777 	dev->power.is_noirq_suspended = false;
778 
779 Out:
780 	complete_all(&dev->power.completion);
781 	TRACE_RESUME(error);
782 
783 	if (error) {
784 		WRITE_ONCE(async_error, error);
785 		dpm_save_failed_dev(dev_name(dev));
786 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
787 	}
788 
789 	dpm_async_resume_subordinate(dev, async_resume_noirq);
790 }
791 
async_resume_noirq(void * data,async_cookie_t cookie)792 static void async_resume_noirq(void *data, async_cookie_t cookie)
793 {
794 	struct device *dev = data;
795 
796 	device_resume_noirq(dev, pm_transition, true);
797 	put_device(dev);
798 }
799 
dpm_noirq_resume_devices(pm_message_t state)800 static void dpm_noirq_resume_devices(pm_message_t state)
801 {
802 	struct device *dev;
803 	ktime_t starttime = ktime_get();
804 
805 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
806 
807 	async_error = 0;
808 	pm_transition = state;
809 
810 	mutex_lock(&dpm_list_mtx);
811 
812 	/*
813 	 * Start processing "async" root devices upfront so they don't wait for
814 	 * the "sync" devices they don't depend on.
815 	 */
816 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
817 		dpm_clear_async_state(dev);
818 		if (dpm_root_device(dev))
819 			dpm_async_with_cleanup(dev, async_resume_noirq);
820 	}
821 
822 	while (!list_empty(&dpm_noirq_list)) {
823 		dev = to_device(dpm_noirq_list.next);
824 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
825 
826 		if (!dpm_async_fn(dev, async_resume_noirq)) {
827 			get_device(dev);
828 
829 			mutex_unlock(&dpm_list_mtx);
830 
831 			device_resume_noirq(dev, state, false);
832 
833 			put_device(dev);
834 
835 			mutex_lock(&dpm_list_mtx);
836 		}
837 	}
838 	mutex_unlock(&dpm_list_mtx);
839 	async_synchronize_full();
840 	dpm_show_time(starttime, state, 0, "noirq");
841 	if (READ_ONCE(async_error))
842 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
843 
844 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
845 }
846 
847 /**
848  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
849  * @state: PM transition of the system being carried out.
850  *
851  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
852  * allow device drivers' interrupt handlers to be called.
853  */
dpm_resume_noirq(pm_message_t state)854 void dpm_resume_noirq(pm_message_t state)
855 {
856 	dpm_noirq_resume_devices(state);
857 
858 	resume_device_irqs();
859 	device_wakeup_disarm_wake_irqs();
860 }
861 
862 static void async_resume_early(void *data, async_cookie_t cookie);
863 
864 /**
865  * device_resume_early - Execute an "early resume" callback for given device.
866  * @dev: Device to handle.
867  * @state: PM transition of the system being carried out.
868  * @async: If true, the device is being resumed asynchronously.
869  *
870  * Runtime PM is disabled for @dev while this function is being executed.
871  */
device_resume_early(struct device * dev,pm_message_t state,bool async)872 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
873 {
874 	pm_callback_t callback = NULL;
875 	const char *info = NULL;
876 	int error = 0;
877 
878 	TRACE_DEVICE(dev);
879 	TRACE_RESUME(0);
880 
881 	if (dev->power.syscore || dev->power.direct_complete)
882 		goto Out;
883 
884 	if (!dev->power.is_late_suspended)
885 		goto Out;
886 
887 	if (!dpm_wait_for_superior(dev, async))
888 		goto Out;
889 
890 	if (dev->pm_domain) {
891 		info = "early power domain ";
892 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
893 	} else if (dev->type && dev->type->pm) {
894 		info = "early type ";
895 		callback = pm_late_early_op(dev->type->pm, state);
896 	} else if (dev->class && dev->class->pm) {
897 		info = "early class ";
898 		callback = pm_late_early_op(dev->class->pm, state);
899 	} else if (dev->bus && dev->bus->pm) {
900 		info = "early bus ";
901 		callback = pm_late_early_op(dev->bus->pm, state);
902 	}
903 	if (callback)
904 		goto Run;
905 
906 	if (dev_pm_skip_resume(dev))
907 		goto Skip;
908 
909 	if (dev->driver && dev->driver->pm) {
910 		info = "early driver ";
911 		callback = pm_late_early_op(dev->driver->pm, state);
912 	}
913 
914 Run:
915 	error = dpm_run_callback(callback, dev, state, info);
916 
917 Skip:
918 	dev->power.is_late_suspended = false;
919 
920 Out:
921 	TRACE_RESUME(error);
922 
923 	pm_runtime_enable(dev);
924 	complete_all(&dev->power.completion);
925 
926 	if (error) {
927 		WRITE_ONCE(async_error, error);
928 		dpm_save_failed_dev(dev_name(dev));
929 		pm_dev_err(dev, state, async ? " async early" : " early", error);
930 	}
931 
932 	dpm_async_resume_subordinate(dev, async_resume_early);
933 }
934 
async_resume_early(void * data,async_cookie_t cookie)935 static void async_resume_early(void *data, async_cookie_t cookie)
936 {
937 	struct device *dev = data;
938 
939 	device_resume_early(dev, pm_transition, true);
940 	put_device(dev);
941 }
942 
943 /**
944  * dpm_resume_early - Execute "early resume" callbacks for all devices.
945  * @state: PM transition of the system being carried out.
946  */
dpm_resume_early(pm_message_t state)947 void dpm_resume_early(pm_message_t state)
948 {
949 	struct device *dev;
950 	ktime_t starttime = ktime_get();
951 
952 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
953 
954 	async_error = 0;
955 	pm_transition = state;
956 
957 	mutex_lock(&dpm_list_mtx);
958 
959 	/*
960 	 * Start processing "async" root devices upfront so they don't wait for
961 	 * the "sync" devices they don't depend on.
962 	 */
963 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
964 		dpm_clear_async_state(dev);
965 		if (dpm_root_device(dev))
966 			dpm_async_with_cleanup(dev, async_resume_early);
967 	}
968 
969 	while (!list_empty(&dpm_late_early_list)) {
970 		dev = to_device(dpm_late_early_list.next);
971 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
972 
973 		if (!dpm_async_fn(dev, async_resume_early)) {
974 			get_device(dev);
975 
976 			mutex_unlock(&dpm_list_mtx);
977 
978 			device_resume_early(dev, state, false);
979 
980 			put_device(dev);
981 
982 			mutex_lock(&dpm_list_mtx);
983 		}
984 	}
985 	mutex_unlock(&dpm_list_mtx);
986 	async_synchronize_full();
987 	dpm_show_time(starttime, state, 0, "early");
988 	if (READ_ONCE(async_error))
989 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
990 
991 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
992 }
993 
994 /**
995  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
996  * @state: PM transition of the system being carried out.
997  */
dpm_resume_start(pm_message_t state)998 void dpm_resume_start(pm_message_t state)
999 {
1000 	dpm_resume_noirq(state);
1001 	dpm_resume_early(state);
1002 }
1003 EXPORT_SYMBOL_GPL(dpm_resume_start);
1004 
1005 static void async_resume(void *data, async_cookie_t cookie);
1006 
1007 /**
1008  * device_resume - Execute "resume" callbacks for given device.
1009  * @dev: Device to handle.
1010  * @state: PM transition of the system being carried out.
1011  * @async: If true, the device is being resumed asynchronously.
1012  */
device_resume(struct device * dev,pm_message_t state,bool async)1013 static void device_resume(struct device *dev, pm_message_t state, bool async)
1014 {
1015 	pm_callback_t callback = NULL;
1016 	const char *info = NULL;
1017 	int error = 0;
1018 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1019 
1020 	TRACE_DEVICE(dev);
1021 	TRACE_RESUME(0);
1022 
1023 	if (dev->power.syscore)
1024 		goto Complete;
1025 
1026 	if (!dev->power.is_suspended)
1027 		goto Complete;
1028 
1029 	dev->power.is_suspended = false;
1030 
1031 	if (dev->power.direct_complete) {
1032 		/*
1033 		 * Allow new children to be added under the device after this
1034 		 * point if it has no PM callbacks.
1035 		 */
1036 		if (dev->power.no_pm_callbacks)
1037 			dev->power.is_prepared = false;
1038 
1039 		/* Match the pm_runtime_disable() in device_suspend(). */
1040 		pm_runtime_enable(dev);
1041 		goto Complete;
1042 	}
1043 
1044 	if (!dpm_wait_for_superior(dev, async))
1045 		goto Complete;
1046 
1047 	dpm_watchdog_set(&wd, dev);
1048 	device_lock(dev);
1049 
1050 	/*
1051 	 * This is a fib.  But we'll allow new children to be added below
1052 	 * a resumed device, even if the device hasn't been completed yet.
1053 	 */
1054 	dev->power.is_prepared = false;
1055 
1056 	if (dev->pm_domain) {
1057 		info = "power domain ";
1058 		callback = pm_op(&dev->pm_domain->ops, state);
1059 		goto Driver;
1060 	}
1061 
1062 	if (dev->type && dev->type->pm) {
1063 		info = "type ";
1064 		callback = pm_op(dev->type->pm, state);
1065 		goto Driver;
1066 	}
1067 
1068 	if (dev->class && dev->class->pm) {
1069 		info = "class ";
1070 		callback = pm_op(dev->class->pm, state);
1071 		goto Driver;
1072 	}
1073 
1074 	if (dev->bus) {
1075 		if (dev->bus->pm) {
1076 			info = "bus ";
1077 			callback = pm_op(dev->bus->pm, state);
1078 		} else if (dev->bus->resume) {
1079 			info = "legacy bus ";
1080 			callback = dev->bus->resume;
1081 			goto End;
1082 		}
1083 	}
1084 
1085  Driver:
1086 	if (!callback && dev->driver && dev->driver->pm) {
1087 		info = "driver ";
1088 		callback = pm_op(dev->driver->pm, state);
1089 	}
1090 
1091  End:
1092 	error = dpm_run_callback(callback, dev, state, info);
1093 
1094 	device_unlock(dev);
1095 	dpm_watchdog_clear(&wd);
1096 
1097  Complete:
1098 	complete_all(&dev->power.completion);
1099 
1100 	TRACE_RESUME(error);
1101 
1102 	if (error) {
1103 		WRITE_ONCE(async_error, error);
1104 		dpm_save_failed_dev(dev_name(dev));
1105 		pm_dev_err(dev, state, async ? " async" : "", error);
1106 	}
1107 
1108 	dpm_async_resume_subordinate(dev, async_resume);
1109 }
1110 
async_resume(void * data,async_cookie_t cookie)1111 static void async_resume(void *data, async_cookie_t cookie)
1112 {
1113 	struct device *dev = data;
1114 
1115 	device_resume(dev, pm_transition, true);
1116 	put_device(dev);
1117 }
1118 
1119 /**
1120  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1121  * @state: PM transition of the system being carried out.
1122  *
1123  * Execute the appropriate "resume" callback for all devices whose status
1124  * indicates that they are suspended.
1125  */
dpm_resume(pm_message_t state)1126 void dpm_resume(pm_message_t state)
1127 {
1128 	struct device *dev;
1129 	ktime_t starttime = ktime_get();
1130 
1131 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1132 
1133 	pm_transition = state;
1134 	async_error = 0;
1135 
1136 	mutex_lock(&dpm_list_mtx);
1137 
1138 	/*
1139 	 * Start processing "async" root devices upfront so they don't wait for
1140 	 * the "sync" devices they don't depend on.
1141 	 */
1142 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1143 		dpm_clear_async_state(dev);
1144 		if (dpm_root_device(dev))
1145 			dpm_async_with_cleanup(dev, async_resume);
1146 	}
1147 
1148 	while (!list_empty(&dpm_suspended_list)) {
1149 		dev = to_device(dpm_suspended_list.next);
1150 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1151 
1152 		if (!dpm_async_fn(dev, async_resume)) {
1153 			get_device(dev);
1154 
1155 			mutex_unlock(&dpm_list_mtx);
1156 
1157 			device_resume(dev, state, false);
1158 
1159 			put_device(dev);
1160 
1161 			mutex_lock(&dpm_list_mtx);
1162 		}
1163 	}
1164 	mutex_unlock(&dpm_list_mtx);
1165 	async_synchronize_full();
1166 	dpm_show_time(starttime, state, 0, NULL);
1167 	if (READ_ONCE(async_error))
1168 		dpm_save_failed_step(SUSPEND_RESUME);
1169 
1170 	cpufreq_resume();
1171 	devfreq_resume();
1172 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1173 }
1174 
1175 /**
1176  * device_complete - Complete a PM transition for given device.
1177  * @dev: Device to handle.
1178  * @state: PM transition of the system being carried out.
1179  */
device_complete(struct device * dev,pm_message_t state)1180 static void device_complete(struct device *dev, pm_message_t state)
1181 {
1182 	void (*callback)(struct device *) = NULL;
1183 	const char *info = NULL;
1184 
1185 	if (dev->power.syscore)
1186 		goto out;
1187 
1188 	device_lock(dev);
1189 
1190 	if (dev->pm_domain) {
1191 		info = "completing power domain ";
1192 		callback = dev->pm_domain->ops.complete;
1193 	} else if (dev->type && dev->type->pm) {
1194 		info = "completing type ";
1195 		callback = dev->type->pm->complete;
1196 	} else if (dev->class && dev->class->pm) {
1197 		info = "completing class ";
1198 		callback = dev->class->pm->complete;
1199 	} else if (dev->bus && dev->bus->pm) {
1200 		info = "completing bus ";
1201 		callback = dev->bus->pm->complete;
1202 	}
1203 
1204 	if (!callback && dev->driver && dev->driver->pm) {
1205 		info = "completing driver ";
1206 		callback = dev->driver->pm->complete;
1207 	}
1208 
1209 	if (callback) {
1210 		pm_dev_dbg(dev, state, info);
1211 		callback(dev);
1212 	}
1213 
1214 	device_unlock(dev);
1215 
1216 out:
1217 	/* If enabling runtime PM for the device is blocked, unblock it. */
1218 	pm_runtime_unblock(dev);
1219 	pm_runtime_put(dev);
1220 }
1221 
1222 /**
1223  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1224  * @state: PM transition of the system being carried out.
1225  *
1226  * Execute the ->complete() callbacks for all devices whose PM status is not
1227  * DPM_ON (this allows new devices to be registered).
1228  */
dpm_complete(pm_message_t state)1229 void dpm_complete(pm_message_t state)
1230 {
1231 	struct list_head list;
1232 
1233 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1234 
1235 	INIT_LIST_HEAD(&list);
1236 	mutex_lock(&dpm_list_mtx);
1237 	while (!list_empty(&dpm_prepared_list)) {
1238 		struct device *dev = to_device(dpm_prepared_list.prev);
1239 
1240 		get_device(dev);
1241 		dev->power.is_prepared = false;
1242 		list_move(&dev->power.entry, &list);
1243 
1244 		mutex_unlock(&dpm_list_mtx);
1245 
1246 		trace_device_pm_callback_start(dev, "", state.event);
1247 		device_complete(dev, state);
1248 		trace_device_pm_callback_end(dev, 0);
1249 
1250 		put_device(dev);
1251 
1252 		mutex_lock(&dpm_list_mtx);
1253 	}
1254 	list_splice(&list, &dpm_list);
1255 	mutex_unlock(&dpm_list_mtx);
1256 
1257 	/* Allow device probing and trigger re-probing of deferred devices */
1258 	device_unblock_probing();
1259 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1260 }
1261 
1262 /**
1263  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1264  * @state: PM transition of the system being carried out.
1265  *
1266  * Execute "resume" callbacks for all devices and complete the PM transition of
1267  * the system.
1268  */
dpm_resume_end(pm_message_t state)1269 void dpm_resume_end(pm_message_t state)
1270 {
1271 	dpm_resume(state);
1272 	pm_restore_gfp_mask();
1273 	dpm_complete(state);
1274 }
1275 EXPORT_SYMBOL_GPL(dpm_resume_end);
1276 
1277 
1278 /*------------------------- Suspend routines -------------------------*/
1279 
dpm_leaf_device(struct device * dev)1280 static bool dpm_leaf_device(struct device *dev)
1281 {
1282 	struct device *child;
1283 
1284 	lockdep_assert_held(&dpm_list_mtx);
1285 
1286 	child = device_find_any_child(dev);
1287 	if (child) {
1288 		put_device(child);
1289 
1290 		return false;
1291 	}
1292 
1293 	/*
1294 	 * Since this function is required to run under dpm_list_mtx, the
1295 	 * list_empty() below will only return true if the device's list of
1296 	 * consumers is actually empty before calling it.
1297 	 */
1298 	return list_empty(&dev->links.consumers);
1299 }
1300 
dpm_async_suspend_parent(struct device * dev,async_func_t func)1301 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1302 {
1303 	guard(mutex)(&dpm_list_mtx);
1304 
1305 	/*
1306 	 * If the device is suspended asynchronously and the parent's callback
1307 	 * deletes both the device and the parent itself, the parent object may
1308 	 * be freed while this function is running, so avoid that by checking
1309 	 * if the device has been deleted already as the parent cannot be
1310 	 * deleted before it.
1311 	 */
1312 	if (!device_pm_initialized(dev))
1313 		return false;
1314 
1315 	/* Start processing the device's parent if it is "async". */
1316 	if (dev->parent)
1317 		dpm_async_with_cleanup(dev->parent, func);
1318 
1319 	return true;
1320 }
1321 
dpm_async_suspend_superior(struct device * dev,async_func_t func)1322 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1323 {
1324 	struct device_link *link;
1325 	int idx;
1326 
1327 	if (!dpm_async_suspend_parent(dev, func))
1328 		return;
1329 
1330 	idx = device_links_read_lock();
1331 
1332 	/* Start processing the device's "async" suppliers. */
1333 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1334 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1335 			dpm_async_with_cleanup(link->supplier, func);
1336 
1337 	device_links_read_unlock(idx);
1338 }
1339 
dpm_async_suspend_complete_all(struct list_head * device_list)1340 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1341 {
1342 	struct device *dev;
1343 
1344 	guard(mutex)(&async_wip_mtx);
1345 
1346 	list_for_each_entry_reverse(dev, device_list, power.entry) {
1347 		/*
1348 		 * In case the device is being waited for and async processing
1349 		 * has not started for it yet, let the waiters make progress.
1350 		 */
1351 		if (!dev->power.work_in_progress)
1352 			complete_all(&dev->power.completion);
1353 	}
1354 }
1355 
1356 /**
1357  * resume_event - Return a "resume" message for given "suspend" sleep state.
1358  * @sleep_state: PM message representing a sleep state.
1359  *
1360  * Return a PM message representing the resume event corresponding to given
1361  * sleep state.
1362  */
resume_event(pm_message_t sleep_state)1363 static pm_message_t resume_event(pm_message_t sleep_state)
1364 {
1365 	switch (sleep_state.event) {
1366 	case PM_EVENT_SUSPEND:
1367 		return PMSG_RESUME;
1368 	case PM_EVENT_FREEZE:
1369 	case PM_EVENT_QUIESCE:
1370 		return PMSG_RECOVER;
1371 	case PM_EVENT_HIBERNATE:
1372 		return PMSG_RESTORE;
1373 	}
1374 	return PMSG_ON;
1375 }
1376 
dpm_superior_set_must_resume(struct device * dev)1377 static void dpm_superior_set_must_resume(struct device *dev)
1378 {
1379 	struct device_link *link;
1380 	int idx;
1381 
1382 	if (dev->parent)
1383 		dev->parent->power.must_resume = true;
1384 
1385 	idx = device_links_read_lock();
1386 
1387 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1388 		link->supplier->power.must_resume = true;
1389 
1390 	device_links_read_unlock(idx);
1391 }
1392 
1393 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1394 
1395 /**
1396  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1397  * @dev: Device to handle.
1398  * @state: PM transition of the system being carried out.
1399  * @async: If true, the device is being suspended asynchronously.
1400  *
1401  * The driver of @dev will not receive interrupts while this function is being
1402  * executed.
1403  */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1404 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1405 {
1406 	pm_callback_t callback = NULL;
1407 	const char *info = NULL;
1408 	int error = 0;
1409 
1410 	TRACE_DEVICE(dev);
1411 	TRACE_SUSPEND(0);
1412 
1413 	dpm_wait_for_subordinate(dev, async);
1414 
1415 	if (READ_ONCE(async_error))
1416 		goto Complete;
1417 
1418 	if (dev->power.syscore || dev->power.direct_complete)
1419 		goto Complete;
1420 
1421 	if (dev->pm_domain) {
1422 		info = "noirq power domain ";
1423 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1424 	} else if (dev->type && dev->type->pm) {
1425 		info = "noirq type ";
1426 		callback = pm_noirq_op(dev->type->pm, state);
1427 	} else if (dev->class && dev->class->pm) {
1428 		info = "noirq class ";
1429 		callback = pm_noirq_op(dev->class->pm, state);
1430 	} else if (dev->bus && dev->bus->pm) {
1431 		info = "noirq bus ";
1432 		callback = pm_noirq_op(dev->bus->pm, state);
1433 	}
1434 	if (callback)
1435 		goto Run;
1436 
1437 	if (dev_pm_skip_suspend(dev))
1438 		goto Skip;
1439 
1440 	if (dev->driver && dev->driver->pm) {
1441 		info = "noirq driver ";
1442 		callback = pm_noirq_op(dev->driver->pm, state);
1443 	}
1444 
1445 Run:
1446 	error = dpm_run_callback(callback, dev, state, info);
1447 	if (error) {
1448 		WRITE_ONCE(async_error, error);
1449 		dpm_save_failed_dev(dev_name(dev));
1450 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1451 		goto Complete;
1452 	}
1453 
1454 Skip:
1455 	dev->power.is_noirq_suspended = true;
1456 
1457 	/*
1458 	 * Devices must be resumed unless they are explicitly allowed to be left
1459 	 * in suspend, but even in that case skipping the resume of devices that
1460 	 * were in use right before the system suspend (as indicated by their
1461 	 * runtime PM usage counters and child counters) would be suboptimal.
1462 	 */
1463 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1464 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1465 		dev->power.must_resume = true;
1466 
1467 	if (dev->power.must_resume)
1468 		dpm_superior_set_must_resume(dev);
1469 
1470 Complete:
1471 	complete_all(&dev->power.completion);
1472 	TRACE_SUSPEND(error);
1473 
1474 	if (error || READ_ONCE(async_error))
1475 		return;
1476 
1477 	dpm_async_suspend_superior(dev, async_suspend_noirq);
1478 }
1479 
async_suspend_noirq(void * data,async_cookie_t cookie)1480 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1481 {
1482 	struct device *dev = data;
1483 
1484 	device_suspend_noirq(dev, pm_transition, true);
1485 	put_device(dev);
1486 }
1487 
dpm_noirq_suspend_devices(pm_message_t state)1488 static int dpm_noirq_suspend_devices(pm_message_t state)
1489 {
1490 	ktime_t starttime = ktime_get();
1491 	struct device *dev;
1492 	int error;
1493 
1494 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1495 
1496 	pm_transition = state;
1497 	async_error = 0;
1498 
1499 	mutex_lock(&dpm_list_mtx);
1500 
1501 	/*
1502 	 * Start processing "async" leaf devices upfront so they don't need to
1503 	 * wait for the "sync" devices they don't depend on.
1504 	 */
1505 	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1506 		dpm_clear_async_state(dev);
1507 		if (dpm_leaf_device(dev))
1508 			dpm_async_with_cleanup(dev, async_suspend_noirq);
1509 	}
1510 
1511 	while (!list_empty(&dpm_late_early_list)) {
1512 		dev = to_device(dpm_late_early_list.prev);
1513 
1514 		list_move(&dev->power.entry, &dpm_noirq_list);
1515 
1516 		if (dpm_async_fn(dev, async_suspend_noirq))
1517 			continue;
1518 
1519 		get_device(dev);
1520 
1521 		mutex_unlock(&dpm_list_mtx);
1522 
1523 		device_suspend_noirq(dev, state, false);
1524 
1525 		put_device(dev);
1526 
1527 		mutex_lock(&dpm_list_mtx);
1528 
1529 		if (READ_ONCE(async_error)) {
1530 			dpm_async_suspend_complete_all(&dpm_late_early_list);
1531 			/*
1532 			 * Move all devices to the target list to resume them
1533 			 * properly.
1534 			 */
1535 			list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1536 			break;
1537 		}
1538 	}
1539 
1540 	mutex_unlock(&dpm_list_mtx);
1541 
1542 	async_synchronize_full();
1543 
1544 	error = READ_ONCE(async_error);
1545 	if (error)
1546 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1547 
1548 	dpm_show_time(starttime, state, error, "noirq");
1549 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1550 	return error;
1551 }
1552 
1553 /**
1554  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1555  * @state: PM transition of the system being carried out.
1556  *
1557  * Prevent device drivers' interrupt handlers from being called and invoke
1558  * "noirq" suspend callbacks for all non-sysdev devices.
1559  */
dpm_suspend_noirq(pm_message_t state)1560 int dpm_suspend_noirq(pm_message_t state)
1561 {
1562 	int ret;
1563 
1564 	device_wakeup_arm_wake_irqs();
1565 	suspend_device_irqs();
1566 
1567 	ret = dpm_noirq_suspend_devices(state);
1568 	if (ret)
1569 		dpm_resume_noirq(resume_event(state));
1570 
1571 	return ret;
1572 }
1573 
dpm_propagate_wakeup_to_parent(struct device * dev)1574 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1575 {
1576 	struct device *parent = dev->parent;
1577 
1578 	if (!parent)
1579 		return;
1580 
1581 	spin_lock_irq(&parent->power.lock);
1582 
1583 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1584 		parent->power.wakeup_path = true;
1585 
1586 	spin_unlock_irq(&parent->power.lock);
1587 }
1588 
1589 static void async_suspend_late(void *data, async_cookie_t cookie);
1590 
1591 /**
1592  * device_suspend_late - Execute a "late suspend" callback for given device.
1593  * @dev: Device to handle.
1594  * @state: PM transition of the system being carried out.
1595  * @async: If true, the device is being suspended asynchronously.
1596  *
1597  * Runtime PM is disabled for @dev while this function is being executed.
1598  */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1599 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1600 {
1601 	pm_callback_t callback = NULL;
1602 	const char *info = NULL;
1603 	int error = 0;
1604 
1605 	TRACE_DEVICE(dev);
1606 	TRACE_SUSPEND(0);
1607 
1608 	/*
1609 	 * Disable runtime PM for the device without checking if there is a
1610 	 * pending resume request for it.
1611 	 */
1612 	__pm_runtime_disable(dev, false);
1613 
1614 	dpm_wait_for_subordinate(dev, async);
1615 
1616 	if (READ_ONCE(async_error))
1617 		goto Complete;
1618 
1619 	if (pm_wakeup_pending()) {
1620 		WRITE_ONCE(async_error, -EBUSY);
1621 		goto Complete;
1622 	}
1623 
1624 	if (dev->power.syscore || dev->power.direct_complete)
1625 		goto Complete;
1626 
1627 	if (dev->pm_domain) {
1628 		info = "late power domain ";
1629 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1630 	} else if (dev->type && dev->type->pm) {
1631 		info = "late type ";
1632 		callback = pm_late_early_op(dev->type->pm, state);
1633 	} else if (dev->class && dev->class->pm) {
1634 		info = "late class ";
1635 		callback = pm_late_early_op(dev->class->pm, state);
1636 	} else if (dev->bus && dev->bus->pm) {
1637 		info = "late bus ";
1638 		callback = pm_late_early_op(dev->bus->pm, state);
1639 	}
1640 	if (callback)
1641 		goto Run;
1642 
1643 	if (dev_pm_skip_suspend(dev))
1644 		goto Skip;
1645 
1646 	if (dev->driver && dev->driver->pm) {
1647 		info = "late driver ";
1648 		callback = pm_late_early_op(dev->driver->pm, state);
1649 	}
1650 
1651 Run:
1652 	error = dpm_run_callback(callback, dev, state, info);
1653 	if (error) {
1654 		WRITE_ONCE(async_error, error);
1655 		dpm_save_failed_dev(dev_name(dev));
1656 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1657 		goto Complete;
1658 	}
1659 	dpm_propagate_wakeup_to_parent(dev);
1660 
1661 Skip:
1662 	dev->power.is_late_suspended = true;
1663 
1664 Complete:
1665 	TRACE_SUSPEND(error);
1666 	complete_all(&dev->power.completion);
1667 
1668 	if (error || READ_ONCE(async_error))
1669 		return;
1670 
1671 	dpm_async_suspend_superior(dev, async_suspend_late);
1672 }
1673 
async_suspend_late(void * data,async_cookie_t cookie)1674 static void async_suspend_late(void *data, async_cookie_t cookie)
1675 {
1676 	struct device *dev = data;
1677 
1678 	device_suspend_late(dev, pm_transition, true);
1679 	put_device(dev);
1680 }
1681 
1682 /**
1683  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1684  * @state: PM transition of the system being carried out.
1685  */
dpm_suspend_late(pm_message_t state)1686 int dpm_suspend_late(pm_message_t state)
1687 {
1688 	ktime_t starttime = ktime_get();
1689 	struct device *dev;
1690 	int error;
1691 
1692 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1693 
1694 	pm_transition = state;
1695 	async_error = 0;
1696 
1697 	wake_up_all_idle_cpus();
1698 
1699 	mutex_lock(&dpm_list_mtx);
1700 
1701 	/*
1702 	 * Start processing "async" leaf devices upfront so they don't need to
1703 	 * wait for the "sync" devices they don't depend on.
1704 	 */
1705 	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1706 		dpm_clear_async_state(dev);
1707 		if (dpm_leaf_device(dev))
1708 			dpm_async_with_cleanup(dev, async_suspend_late);
1709 	}
1710 
1711 	while (!list_empty(&dpm_suspended_list)) {
1712 		dev = to_device(dpm_suspended_list.prev);
1713 
1714 		list_move(&dev->power.entry, &dpm_late_early_list);
1715 
1716 		if (dpm_async_fn(dev, async_suspend_late))
1717 			continue;
1718 
1719 		get_device(dev);
1720 
1721 		mutex_unlock(&dpm_list_mtx);
1722 
1723 		device_suspend_late(dev, state, false);
1724 
1725 		put_device(dev);
1726 
1727 		mutex_lock(&dpm_list_mtx);
1728 
1729 		if (READ_ONCE(async_error)) {
1730 			dpm_async_suspend_complete_all(&dpm_suspended_list);
1731 			/*
1732 			 * Move all devices to the target list to resume them
1733 			 * properly.
1734 			 */
1735 			list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1736 			break;
1737 		}
1738 	}
1739 
1740 	mutex_unlock(&dpm_list_mtx);
1741 
1742 	async_synchronize_full();
1743 
1744 	error = READ_ONCE(async_error);
1745 	if (error) {
1746 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1747 		dpm_resume_early(resume_event(state));
1748 	}
1749 	dpm_show_time(starttime, state, error, "late");
1750 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1751 	return error;
1752 }
1753 
1754 /**
1755  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1756  * @state: PM transition of the system being carried out.
1757  */
dpm_suspend_end(pm_message_t state)1758 int dpm_suspend_end(pm_message_t state)
1759 {
1760 	ktime_t starttime = ktime_get();
1761 	int error;
1762 
1763 	error = dpm_suspend_late(state);
1764 	if (error)
1765 		goto out;
1766 
1767 	error = dpm_suspend_noirq(state);
1768 	if (error)
1769 		dpm_resume_early(resume_event(state));
1770 
1771 out:
1772 	dpm_show_time(starttime, state, error, "end");
1773 	return error;
1774 }
1775 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1776 
1777 /**
1778  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1779  * @dev: Device to suspend.
1780  * @state: PM transition of the system being carried out.
1781  * @cb: Suspend callback to execute.
1782  * @info: string description of caller.
1783  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1784 static int legacy_suspend(struct device *dev, pm_message_t state,
1785 			  int (*cb)(struct device *dev, pm_message_t state),
1786 			  const char *info)
1787 {
1788 	int error;
1789 	ktime_t calltime;
1790 
1791 	calltime = initcall_debug_start(dev, cb);
1792 
1793 	trace_device_pm_callback_start(dev, info, state.event);
1794 	error = cb(dev, state);
1795 	trace_device_pm_callback_end(dev, error);
1796 	suspend_report_result(dev, cb, error);
1797 
1798 	initcall_debug_report(dev, calltime, cb, error);
1799 
1800 	return error;
1801 }
1802 
dpm_clear_superiors_direct_complete(struct device * dev)1803 static void dpm_clear_superiors_direct_complete(struct device *dev)
1804 {
1805 	struct device_link *link;
1806 	int idx;
1807 
1808 	if (dev->parent) {
1809 		spin_lock_irq(&dev->parent->power.lock);
1810 		dev->parent->power.direct_complete = false;
1811 		spin_unlock_irq(&dev->parent->power.lock);
1812 	}
1813 
1814 	idx = device_links_read_lock();
1815 
1816 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1817 		spin_lock_irq(&link->supplier->power.lock);
1818 		link->supplier->power.direct_complete = false;
1819 		spin_unlock_irq(&link->supplier->power.lock);
1820 	}
1821 
1822 	device_links_read_unlock(idx);
1823 }
1824 
1825 static void async_suspend(void *data, async_cookie_t cookie);
1826 
1827 /**
1828  * device_suspend - Execute "suspend" callbacks for given device.
1829  * @dev: Device to handle.
1830  * @state: PM transition of the system being carried out.
1831  * @async: If true, the device is being suspended asynchronously.
1832  */
device_suspend(struct device * dev,pm_message_t state,bool async)1833 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1834 {
1835 	pm_callback_t callback = NULL;
1836 	const char *info = NULL;
1837 	int error = 0;
1838 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1839 
1840 	TRACE_DEVICE(dev);
1841 	TRACE_SUSPEND(0);
1842 
1843 	dpm_wait_for_subordinate(dev, async);
1844 
1845 	if (READ_ONCE(async_error)) {
1846 		dev->power.direct_complete = false;
1847 		goto Complete;
1848 	}
1849 
1850 	/*
1851 	 * Wait for possible runtime PM transitions of the device in progress
1852 	 * to complete and if there's a runtime resume request pending for it,
1853 	 * resume it before proceeding with invoking the system-wide suspend
1854 	 * callbacks for it.
1855 	 *
1856 	 * If the system-wide suspend callbacks below change the configuration
1857 	 * of the device, they must disable runtime PM for it or otherwise
1858 	 * ensure that its runtime-resume callbacks will not be confused by that
1859 	 * change in case they are invoked going forward.
1860 	 */
1861 	pm_runtime_barrier(dev);
1862 
1863 	if (pm_wakeup_pending()) {
1864 		dev->power.direct_complete = false;
1865 		WRITE_ONCE(async_error, -EBUSY);
1866 		goto Complete;
1867 	}
1868 
1869 	if (dev->power.syscore)
1870 		goto Complete;
1871 
1872 	/* Avoid direct_complete to let wakeup_path propagate. */
1873 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1874 		dev->power.direct_complete = false;
1875 
1876 	if (dev->power.direct_complete) {
1877 		if (pm_runtime_status_suspended(dev)) {
1878 			pm_runtime_disable(dev);
1879 			if (pm_runtime_status_suspended(dev)) {
1880 				pm_dev_dbg(dev, state, "direct-complete ");
1881 				dev->power.is_suspended = true;
1882 				goto Complete;
1883 			}
1884 
1885 			pm_runtime_enable(dev);
1886 		}
1887 		dev->power.direct_complete = false;
1888 	}
1889 
1890 	dev->power.may_skip_resume = true;
1891 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1892 
1893 	dpm_watchdog_set(&wd, dev);
1894 	device_lock(dev);
1895 
1896 	if (dev->pm_domain) {
1897 		info = "power domain ";
1898 		callback = pm_op(&dev->pm_domain->ops, state);
1899 		goto Run;
1900 	}
1901 
1902 	if (dev->type && dev->type->pm) {
1903 		info = "type ";
1904 		callback = pm_op(dev->type->pm, state);
1905 		goto Run;
1906 	}
1907 
1908 	if (dev->class && dev->class->pm) {
1909 		info = "class ";
1910 		callback = pm_op(dev->class->pm, state);
1911 		goto Run;
1912 	}
1913 
1914 	if (dev->bus) {
1915 		if (dev->bus->pm) {
1916 			info = "bus ";
1917 			callback = pm_op(dev->bus->pm, state);
1918 		} else if (dev->bus->suspend) {
1919 			pm_dev_dbg(dev, state, "legacy bus ");
1920 			error = legacy_suspend(dev, state, dev->bus->suspend,
1921 						"legacy bus ");
1922 			goto End;
1923 		}
1924 	}
1925 
1926  Run:
1927 	if (!callback && dev->driver && dev->driver->pm) {
1928 		info = "driver ";
1929 		callback = pm_op(dev->driver->pm, state);
1930 	}
1931 
1932 	error = dpm_run_callback(callback, dev, state, info);
1933 
1934  End:
1935 	if (!error) {
1936 		dev->power.is_suspended = true;
1937 		if (device_may_wakeup(dev))
1938 			dev->power.wakeup_path = true;
1939 
1940 		dpm_propagate_wakeup_to_parent(dev);
1941 		dpm_clear_superiors_direct_complete(dev);
1942 	}
1943 
1944 	device_unlock(dev);
1945 	dpm_watchdog_clear(&wd);
1946 
1947  Complete:
1948 	if (error) {
1949 		WRITE_ONCE(async_error, error);
1950 		dpm_save_failed_dev(dev_name(dev));
1951 		pm_dev_err(dev, state, async ? " async" : "", error);
1952 	}
1953 
1954 	complete_all(&dev->power.completion);
1955 	TRACE_SUSPEND(error);
1956 
1957 	if (error || READ_ONCE(async_error))
1958 		return;
1959 
1960 	dpm_async_suspend_superior(dev, async_suspend);
1961 }
1962 
async_suspend(void * data,async_cookie_t cookie)1963 static void async_suspend(void *data, async_cookie_t cookie)
1964 {
1965 	struct device *dev = data;
1966 
1967 	device_suspend(dev, pm_transition, true);
1968 	put_device(dev);
1969 }
1970 
1971 /**
1972  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1973  * @state: PM transition of the system being carried out.
1974  */
dpm_suspend(pm_message_t state)1975 int dpm_suspend(pm_message_t state)
1976 {
1977 	ktime_t starttime = ktime_get();
1978 	struct device *dev;
1979 	int error;
1980 
1981 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1982 	might_sleep();
1983 
1984 	devfreq_suspend();
1985 	cpufreq_suspend();
1986 
1987 	pm_transition = state;
1988 	async_error = 0;
1989 
1990 	mutex_lock(&dpm_list_mtx);
1991 
1992 	/*
1993 	 * Start processing "async" leaf devices upfront so they don't need to
1994 	 * wait for the "sync" devices they don't depend on.
1995 	 */
1996 	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
1997 		dpm_clear_async_state(dev);
1998 		if (dpm_leaf_device(dev))
1999 			dpm_async_with_cleanup(dev, async_suspend);
2000 	}
2001 
2002 	while (!list_empty(&dpm_prepared_list)) {
2003 		dev = to_device(dpm_prepared_list.prev);
2004 
2005 		list_move(&dev->power.entry, &dpm_suspended_list);
2006 
2007 		if (dpm_async_fn(dev, async_suspend))
2008 			continue;
2009 
2010 		get_device(dev);
2011 
2012 		mutex_unlock(&dpm_list_mtx);
2013 
2014 		device_suspend(dev, state, false);
2015 
2016 		put_device(dev);
2017 
2018 		mutex_lock(&dpm_list_mtx);
2019 
2020 		if (READ_ONCE(async_error)) {
2021 			dpm_async_suspend_complete_all(&dpm_prepared_list);
2022 			/*
2023 			 * Move all devices to the target list to resume them
2024 			 * properly.
2025 			 */
2026 			list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2027 			break;
2028 		}
2029 	}
2030 
2031 	mutex_unlock(&dpm_list_mtx);
2032 
2033 	async_synchronize_full();
2034 
2035 	error = READ_ONCE(async_error);
2036 	if (error)
2037 		dpm_save_failed_step(SUSPEND_SUSPEND);
2038 
2039 	dpm_show_time(starttime, state, error, NULL);
2040 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2041 	return error;
2042 }
2043 
device_prepare_smart_suspend(struct device * dev)2044 static bool device_prepare_smart_suspend(struct device *dev)
2045 {
2046 	struct device_link *link;
2047 	bool ret = true;
2048 	int idx;
2049 
2050 	/*
2051 	 * The "smart suspend" feature is enabled for devices whose drivers ask
2052 	 * for it and for devices without PM callbacks.
2053 	 *
2054 	 * However, if "smart suspend" is not enabled for the device's parent
2055 	 * or any of its suppliers that take runtime PM into account, it cannot
2056 	 * be enabled for the device either.
2057 	 */
2058 	if (!dev->power.no_pm_callbacks &&
2059 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2060 		return false;
2061 
2062 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2063 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2064 		return false;
2065 
2066 	idx = device_links_read_lock();
2067 
2068 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
2069 		if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2070 			continue;
2071 
2072 		if (!dev_pm_smart_suspend(link->supplier) &&
2073 		    !pm_runtime_blocked(link->supplier)) {
2074 			ret = false;
2075 			break;
2076 		}
2077 	}
2078 
2079 	device_links_read_unlock(idx);
2080 
2081 	return ret;
2082 }
2083 
2084 /**
2085  * device_prepare - Prepare a device for system power transition.
2086  * @dev: Device to handle.
2087  * @state: PM transition of the system being carried out.
2088  *
2089  * Execute the ->prepare() callback(s) for given device.  No new children of the
2090  * device may be registered after this function has returned.
2091  */
device_prepare(struct device * dev,pm_message_t state)2092 static int device_prepare(struct device *dev, pm_message_t state)
2093 {
2094 	int (*callback)(struct device *) = NULL;
2095 	bool smart_suspend;
2096 	int ret = 0;
2097 
2098 	/*
2099 	 * If a device's parent goes into runtime suspend at the wrong time,
2100 	 * it won't be possible to resume the device.  To prevent this we
2101 	 * block runtime suspend here, during the prepare phase, and allow
2102 	 * it again during the complete phase.
2103 	 */
2104 	pm_runtime_get_noresume(dev);
2105 	/*
2106 	 * If runtime PM is disabled for the device at this point and it has
2107 	 * never been enabled so far, it should not be enabled until this system
2108 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
2109 	 * subsequent attempts to enable it.
2110 	 */
2111 	smart_suspend = !pm_runtime_block_if_disabled(dev);
2112 
2113 	if (dev->power.syscore)
2114 		return 0;
2115 
2116 	device_lock(dev);
2117 
2118 	dev->power.wakeup_path = false;
2119 
2120 	if (dev->power.no_pm_callbacks)
2121 		goto unlock;
2122 
2123 	if (dev->pm_domain)
2124 		callback = dev->pm_domain->ops.prepare;
2125 	else if (dev->type && dev->type->pm)
2126 		callback = dev->type->pm->prepare;
2127 	else if (dev->class && dev->class->pm)
2128 		callback = dev->class->pm->prepare;
2129 	else if (dev->bus && dev->bus->pm)
2130 		callback = dev->bus->pm->prepare;
2131 
2132 	if (!callback && dev->driver && dev->driver->pm)
2133 		callback = dev->driver->pm->prepare;
2134 
2135 	if (callback)
2136 		ret = callback(dev);
2137 
2138 unlock:
2139 	device_unlock(dev);
2140 
2141 	if (ret < 0) {
2142 		suspend_report_result(dev, callback, ret);
2143 		pm_runtime_put(dev);
2144 		return ret;
2145 	}
2146 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
2147 	if (smart_suspend)
2148 		smart_suspend = device_prepare_smart_suspend(dev);
2149 
2150 	spin_lock_irq(&dev->power.lock);
2151 
2152 	dev->power.smart_suspend = smart_suspend;
2153 	/*
2154 	 * A positive return value from ->prepare() means "this device appears
2155 	 * to be runtime-suspended and its state is fine, so if it really is
2156 	 * runtime-suspended, you can leave it in that state provided that you
2157 	 * will do the same thing with all of its descendants".  This only
2158 	 * applies to suspend transitions, however.
2159 	 */
2160 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2161 		(ret > 0 || dev->power.no_pm_callbacks) &&
2162 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2163 
2164 	spin_unlock_irq(&dev->power.lock);
2165 
2166 	return 0;
2167 }
2168 
2169 /**
2170  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2171  * @state: PM transition of the system being carried out.
2172  *
2173  * Execute the ->prepare() callback(s) for all devices.
2174  */
dpm_prepare(pm_message_t state)2175 int dpm_prepare(pm_message_t state)
2176 {
2177 	int error = 0;
2178 
2179 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2180 
2181 	/*
2182 	 * Give a chance for the known devices to complete their probes, before
2183 	 * disable probing of devices. This sync point is important at least
2184 	 * at boot time + hibernation restore.
2185 	 */
2186 	wait_for_device_probe();
2187 	/*
2188 	 * It is unsafe if probing of devices will happen during suspend or
2189 	 * hibernation and system behavior will be unpredictable in this case.
2190 	 * So, let's prohibit device's probing here and defer their probes
2191 	 * instead. The normal behavior will be restored in dpm_complete().
2192 	 */
2193 	device_block_probing();
2194 
2195 	mutex_lock(&dpm_list_mtx);
2196 	while (!list_empty(&dpm_list) && !error) {
2197 		struct device *dev = to_device(dpm_list.next);
2198 
2199 		get_device(dev);
2200 
2201 		mutex_unlock(&dpm_list_mtx);
2202 
2203 		trace_device_pm_callback_start(dev, "", state.event);
2204 		error = device_prepare(dev, state);
2205 		trace_device_pm_callback_end(dev, error);
2206 
2207 		mutex_lock(&dpm_list_mtx);
2208 
2209 		if (!error) {
2210 			dev->power.is_prepared = true;
2211 			if (!list_empty(&dev->power.entry))
2212 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
2213 		} else if (error == -EAGAIN) {
2214 			error = 0;
2215 		} else {
2216 			dev_info(dev, "not prepared for power transition: code %d\n",
2217 				 error);
2218 		}
2219 
2220 		mutex_unlock(&dpm_list_mtx);
2221 
2222 		put_device(dev);
2223 
2224 		mutex_lock(&dpm_list_mtx);
2225 	}
2226 	mutex_unlock(&dpm_list_mtx);
2227 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2228 	return error;
2229 }
2230 
2231 /**
2232  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2233  * @state: PM transition of the system being carried out.
2234  *
2235  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2236  * callbacks for them.
2237  */
dpm_suspend_start(pm_message_t state)2238 int dpm_suspend_start(pm_message_t state)
2239 {
2240 	ktime_t starttime = ktime_get();
2241 	int error;
2242 
2243 	error = dpm_prepare(state);
2244 	if (error)
2245 		dpm_save_failed_step(SUSPEND_PREPARE);
2246 	else {
2247 		pm_restrict_gfp_mask();
2248 		error = dpm_suspend(state);
2249 	}
2250 
2251 	dpm_show_time(starttime, state, error, "start");
2252 	return error;
2253 }
2254 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2255 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2256 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2257 {
2258 	if (ret)
2259 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2260 }
2261 EXPORT_SYMBOL_GPL(__suspend_report_result);
2262 
2263 /**
2264  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2265  * @subordinate: Device that needs to wait for @dev.
2266  * @dev: Device to wait for.
2267  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2268 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2269 {
2270 	dpm_wait(dev, subordinate->power.async_suspend);
2271 	return async_error;
2272 }
2273 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2274 
2275 /**
2276  * dpm_for_each_dev - device iterator.
2277  * @data: data for the callback.
2278  * @fn: function to be called for each device.
2279  *
2280  * Iterate over devices in dpm_list, and call @fn for each device,
2281  * passing it @data.
2282  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2283 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2284 {
2285 	struct device *dev;
2286 
2287 	if (!fn)
2288 		return;
2289 
2290 	device_pm_lock();
2291 	list_for_each_entry(dev, &dpm_list, power.entry)
2292 		fn(dev, data);
2293 	device_pm_unlock();
2294 }
2295 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2296 
pm_ops_is_empty(const struct dev_pm_ops * ops)2297 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2298 {
2299 	if (!ops)
2300 		return true;
2301 
2302 	return !ops->prepare &&
2303 	       !ops->suspend &&
2304 	       !ops->suspend_late &&
2305 	       !ops->suspend_noirq &&
2306 	       !ops->resume_noirq &&
2307 	       !ops->resume_early &&
2308 	       !ops->resume &&
2309 	       !ops->complete;
2310 }
2311 
device_pm_check_callbacks(struct device * dev)2312 void device_pm_check_callbacks(struct device *dev)
2313 {
2314 	unsigned long flags;
2315 
2316 	spin_lock_irqsave(&dev->power.lock, flags);
2317 	dev->power.no_pm_callbacks =
2318 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2319 		 !dev->bus->suspend && !dev->bus->resume)) &&
2320 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2321 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2322 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2323 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2324 		 !dev->driver->suspend && !dev->driver->resume));
2325 	spin_unlock_irqrestore(&dev->power.lock, flags);
2326 }
2327 
dev_pm_skip_suspend(struct device * dev)2328 bool dev_pm_skip_suspend(struct device *dev)
2329 {
2330 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2331 }
2332