xref: /linux/drivers/base/power/main.c (revision a79a588fc1761dc12a3064fc2f648ae66cea3c5a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static DEFINE_MUTEX(async_wip_mtx);
67 static int async_error;
68 
pm_verb(int event)69 static const char *pm_verb(int event)
70 {
71 	switch (event) {
72 	case PM_EVENT_SUSPEND:
73 		return "suspend";
74 	case PM_EVENT_RESUME:
75 		return "resume";
76 	case PM_EVENT_FREEZE:
77 		return "freeze";
78 	case PM_EVENT_QUIESCE:
79 		return "quiesce";
80 	case PM_EVENT_HIBERNATE:
81 		return "hibernate";
82 	case PM_EVENT_THAW:
83 		return "thaw";
84 	case PM_EVENT_RESTORE:
85 		return "restore";
86 	case PM_EVENT_RECOVER:
87 		return "recover";
88 	default:
89 		return "(unknown PM event)";
90 	}
91 }
92 
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
device_pm_sleep_init(struct device * dev)97 void device_pm_sleep_init(struct device *dev)
98 {
99 	dev->power.is_prepared = false;
100 	dev->power.is_suspended = false;
101 	dev->power.is_noirq_suspended = false;
102 	dev->power.is_late_suspended = false;
103 	init_completion(&dev->power.completion);
104 	complete_all(&dev->power.completion);
105 	dev->power.wakeup = NULL;
106 	INIT_LIST_HEAD(&dev->power.entry);
107 }
108 
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
device_pm_lock(void)112 void device_pm_lock(void)
113 {
114 	mutex_lock(&dpm_list_mtx);
115 }
116 
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
device_pm_unlock(void)120 void device_pm_unlock(void)
121 {
122 	mutex_unlock(&dpm_list_mtx);
123 }
124 
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
device_pm_add(struct device * dev)129 void device_pm_add(struct device *dev)
130 {
131 	/* Skip PM setup/initialization. */
132 	if (device_pm_not_required(dev))
133 		return;
134 
135 	pr_debug("Adding info for %s:%s\n",
136 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 	device_pm_check_callbacks(dev);
138 	mutex_lock(&dpm_list_mtx);
139 	if (dev->parent && dev->parent->power.is_prepared)
140 		dev_warn(dev, "parent %s should not be sleeping\n",
141 			dev_name(dev->parent));
142 	list_add_tail(&dev->power.entry, &dpm_list);
143 	dev->power.in_dpm_list = true;
144 	mutex_unlock(&dpm_list_mtx);
145 }
146 
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
device_pm_remove(struct device * dev)151 void device_pm_remove(struct device *dev)
152 {
153 	if (device_pm_not_required(dev))
154 		return;
155 
156 	pr_debug("Removing info for %s:%s\n",
157 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 	complete_all(&dev->power.completion);
159 	mutex_lock(&dpm_list_mtx);
160 	list_del_init(&dev->power.entry);
161 	dev->power.in_dpm_list = false;
162 	mutex_unlock(&dpm_list_mtx);
163 	device_wakeup_disable(dev);
164 	pm_runtime_remove(dev);
165 	device_pm_check_callbacks(dev);
166 }
167 
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
device_pm_move_before(struct device * deva,struct device * devb)173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175 	pr_debug("Moving %s:%s before %s:%s\n",
176 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 	/* Delete deva from dpm_list and reinsert before devb. */
179 	list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181 
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
device_pm_move_after(struct device * deva,struct device * devb)187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189 	pr_debug("Moving %s:%s after %s:%s\n",
190 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 	/* Delete deva from dpm_list and reinsert after devb. */
193 	list_move(&deva->power.entry, &devb->power.entry);
194 }
195 
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
device_pm_move_last(struct device * dev)200 void device_pm_move_last(struct device *dev)
201 {
202 	pr_debug("Moving %s:%s to end of list\n",
203 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 	list_move_tail(&dev->power.entry, &dpm_list);
205 }
206 
initcall_debug_start(struct device * dev,void * cb)207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209 	if (!pm_print_times_enabled)
210 		return 0;
211 
212 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
213 		 task_pid_nr(current),
214 		 dev->parent ? dev_name(dev->parent) : "none");
215 	return ktime_get();
216 }
217 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 				  void *cb, int error)
220 {
221 	ktime_t rettime;
222 
223 	if (!pm_print_times_enabled)
224 		return;
225 
226 	rettime = ktime_get();
227 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
228 		 (unsigned long long)ktime_us_delta(rettime, calltime));
229 }
230 
231 /**
232  * dpm_wait - Wait for a PM operation to complete.
233  * @dev: Device to wait for.
234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
235  */
dpm_wait(struct device * dev,bool async)236 static void dpm_wait(struct device *dev, bool async)
237 {
238 	if (!dev)
239 		return;
240 
241 	if (async || (pm_async_enabled && dev->power.async_suspend))
242 		wait_for_completion(&dev->power.completion);
243 }
244 
dpm_wait_fn(struct device * dev,void * async_ptr)245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 	dpm_wait(dev, *((bool *)async_ptr));
248 	return 0;
249 }
250 
dpm_wait_for_children(struct device * dev,bool async)251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253 	device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255 
dpm_wait_for_suppliers(struct device * dev,bool async)256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 	struct device_link *link;
259 	int idx;
260 
261 	idx = device_links_read_lock();
262 
263 	/*
264 	 * If the supplier goes away right after we've checked the link to it,
265 	 * we'll wait for its completion to change the state, but that's fine,
266 	 * because the only things that will block as a result are the SRCU
267 	 * callbacks freeing the link objects for the links in the list we're
268 	 * walking.
269 	 */
270 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 			dpm_wait(link->supplier, async);
273 
274 	device_links_read_unlock(idx);
275 }
276 
dpm_wait_for_superior(struct device * dev,bool async)277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 	struct device *parent;
280 
281 	/*
282 	 * If the device is resumed asynchronously and the parent's callback
283 	 * deletes both the device and the parent itself, the parent object may
284 	 * be freed while this function is running, so avoid that by reference
285 	 * counting the parent once more unless the device has been deleted
286 	 * already (in which case return right away).
287 	 */
288 	mutex_lock(&dpm_list_mtx);
289 
290 	if (!device_pm_initialized(dev)) {
291 		mutex_unlock(&dpm_list_mtx);
292 		return false;
293 	}
294 
295 	parent = get_device(dev->parent);
296 
297 	mutex_unlock(&dpm_list_mtx);
298 
299 	dpm_wait(parent, async);
300 	put_device(parent);
301 
302 	dpm_wait_for_suppliers(dev, async);
303 
304 	/*
305 	 * If the parent's callback has deleted the device, attempting to resume
306 	 * it would be invalid, so avoid doing that then.
307 	 */
308 	return device_pm_initialized(dev);
309 }
310 
dpm_wait_for_consumers(struct device * dev,bool async)311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313 	struct device_link *link;
314 	int idx;
315 
316 	idx = device_links_read_lock();
317 
318 	/*
319 	 * The status of a device link can only be changed from "dormant" by a
320 	 * probe, but that cannot happen during system suspend/resume.  In
321 	 * theory it can change to "dormant" at that time, but then it is
322 	 * reasonable to wait for the target device anyway (eg. if it goes
323 	 * away, it's better to wait for it to go away completely and then
324 	 * continue instead of trying to continue in parallel with its
325 	 * unregistration).
326 	 */
327 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 			dpm_wait(link->consumer, async);
330 
331 	device_links_read_unlock(idx);
332 }
333 
dpm_wait_for_subordinate(struct device * dev,bool async)334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336 	dpm_wait_for_children(dev, async);
337 	dpm_wait_for_consumers(dev, async);
338 }
339 
340 /**
341  * pm_op - Return the PM operation appropriate for given PM event.
342  * @ops: PM operations to choose from.
343  * @state: PM transition of the system being carried out.
344  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347 	switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 	case PM_EVENT_SUSPEND:
350 		return ops->suspend;
351 	case PM_EVENT_RESUME:
352 		return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 	case PM_EVENT_FREEZE:
356 	case PM_EVENT_QUIESCE:
357 		return ops->freeze;
358 	case PM_EVENT_HIBERNATE:
359 		return ops->poweroff;
360 	case PM_EVENT_THAW:
361 	case PM_EVENT_RECOVER:
362 		return ops->thaw;
363 	case PM_EVENT_RESTORE:
364 		return ops->restore;
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366 	}
367 
368 	return NULL;
369 }
370 
371 /**
372  * pm_late_early_op - Return the PM operation appropriate for given PM event.
373  * @ops: PM operations to choose from.
374  * @state: PM transition of the system being carried out.
375  *
376  * Runtime PM is disabled for @dev while this function is being executed.
377  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 				      pm_message_t state)
380 {
381 	switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 	case PM_EVENT_SUSPEND:
384 		return ops->suspend_late;
385 	case PM_EVENT_RESUME:
386 		return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 	case PM_EVENT_FREEZE:
390 	case PM_EVENT_QUIESCE:
391 		return ops->freeze_late;
392 	case PM_EVENT_HIBERNATE:
393 		return ops->poweroff_late;
394 	case PM_EVENT_THAW:
395 	case PM_EVENT_RECOVER:
396 		return ops->thaw_early;
397 	case PM_EVENT_RESTORE:
398 		return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400 	}
401 
402 	return NULL;
403 }
404 
405 /**
406  * pm_noirq_op - Return the PM operation appropriate for given PM event.
407  * @ops: PM operations to choose from.
408  * @state: PM transition of the system being carried out.
409  *
410  * The driver of @dev will not receive interrupts while this function is being
411  * executed.
412  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415 	switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 	case PM_EVENT_SUSPEND:
418 		return ops->suspend_noirq;
419 	case PM_EVENT_RESUME:
420 		return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 	case PM_EVENT_FREEZE:
424 	case PM_EVENT_QUIESCE:
425 		return ops->freeze_noirq;
426 	case PM_EVENT_HIBERNATE:
427 		return ops->poweroff_noirq;
428 	case PM_EVENT_THAW:
429 	case PM_EVENT_RECOVER:
430 		return ops->thaw_noirq;
431 	case PM_EVENT_RESTORE:
432 		return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
434 	}
435 
436 	return NULL;
437 }
438 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 		", may wakeup" : "", dev->power.driver_flags);
444 }
445 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 			int error)
448 {
449 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 		error);
451 }
452 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 			  const char *info)
455 {
456 	ktime_t calltime;
457 	u64 usecs64;
458 	int usecs;
459 
460 	calltime = ktime_get();
461 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 	do_div(usecs64, NSEC_PER_USEC);
463 	usecs = usecs64;
464 	if (usecs == 0)
465 		usecs = 1;
466 
467 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 		  info ?: "", info ? " " : "", pm_verb(state.event),
469 		  error ? "aborted" : "complete",
470 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 			    pm_message_t state, const char *info)
475 {
476 	ktime_t calltime;
477 	int error;
478 
479 	if (!cb)
480 		return 0;
481 
482 	calltime = initcall_debug_start(dev, cb);
483 
484 	pm_dev_dbg(dev, state, info);
485 	trace_device_pm_callback_start(dev, info, state.event);
486 	error = cb(dev);
487 	trace_device_pm_callback_end(dev, error);
488 	suspend_report_result(dev, cb, error);
489 
490 	initcall_debug_report(dev, calltime, cb, error);
491 
492 	return error;
493 }
494 
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497 	struct device		*dev;
498 	struct task_struct	*tsk;
499 	struct timer_list	timer;
500 	bool			fatal;
501 };
502 
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 	struct dpm_watchdog wd
505 
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
dpm_watchdog_handler(struct timer_list * t)514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 	struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
517 	struct timer_list *timer = &wd->timer;
518 	unsigned int time_left;
519 
520 	if (wd->fatal) {
521 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 		show_stack(wd->tsk, NULL, KERN_EMERG);
523 		panic("%s %s: unrecoverable failure\n",
524 			dev_driver_string(wd->dev), dev_name(wd->dev));
525 	}
526 
527 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
528 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
529 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
530 	show_stack(wd->tsk, NULL, KERN_WARNING);
531 
532 	wd->fatal = true;
533 	mod_timer(timer, jiffies + HZ * time_left);
534 }
535 
536 /**
537  * dpm_watchdog_set - Enable pm watchdog for given device.
538  * @wd: Watchdog. Must be allocated on the stack.
539  * @dev: Device to handle.
540  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)541 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
542 {
543 	struct timer_list *timer = &wd->timer;
544 
545 	wd->dev = dev;
546 	wd->tsk = current;
547 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
548 
549 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
550 	/* use same timeout value for both suspend and resume */
551 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
552 	add_timer(timer);
553 }
554 
555 /**
556  * dpm_watchdog_clear - Disable suspend/resume watchdog.
557  * @wd: Watchdog to disable.
558  */
dpm_watchdog_clear(struct dpm_watchdog * wd)559 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
560 {
561 	struct timer_list *timer = &wd->timer;
562 
563 	timer_delete_sync(timer);
564 	timer_destroy_on_stack(timer);
565 }
566 #else
567 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
568 #define dpm_watchdog_set(x, y)
569 #define dpm_watchdog_clear(x)
570 #endif
571 
572 /*------------------------- Resume routines -------------------------*/
573 
574 /**
575  * dev_pm_skip_resume - System-wide device resume optimization check.
576  * @dev: Target device.
577  *
578  * Return:
579  * - %false if the transition under way is RESTORE.
580  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
581  * - The logical negation of %power.must_resume otherwise (that is, when the
582  *   transition under way is RESUME).
583  */
dev_pm_skip_resume(struct device * dev)584 bool dev_pm_skip_resume(struct device *dev)
585 {
586 	if (pm_transition.event == PM_EVENT_RESTORE)
587 		return false;
588 
589 	if (pm_transition.event == PM_EVENT_THAW)
590 		return dev_pm_skip_suspend(dev);
591 
592 	return !dev->power.must_resume;
593 }
594 
is_async(struct device * dev)595 static bool is_async(struct device *dev)
596 {
597 	return dev->power.async_suspend && pm_async_enabled
598 		&& !pm_trace_is_enabled();
599 }
600 
__dpm_async(struct device * dev,async_func_t func)601 static bool __dpm_async(struct device *dev, async_func_t func)
602 {
603 	if (dev->power.work_in_progress)
604 		return true;
605 
606 	if (!is_async(dev))
607 		return false;
608 
609 	dev->power.work_in_progress = true;
610 
611 	get_device(dev);
612 
613 	if (async_schedule_dev_nocall(func, dev))
614 		return true;
615 
616 	put_device(dev);
617 
618 	return false;
619 }
620 
dpm_async_fn(struct device * dev,async_func_t func)621 static bool dpm_async_fn(struct device *dev, async_func_t func)
622 {
623 	guard(mutex)(&async_wip_mtx);
624 
625 	return __dpm_async(dev, func);
626 }
627 
dpm_async_with_cleanup(struct device * dev,void * fn)628 static int dpm_async_with_cleanup(struct device *dev, void *fn)
629 {
630 	guard(mutex)(&async_wip_mtx);
631 
632 	if (!__dpm_async(dev, fn))
633 		dev->power.work_in_progress = false;
634 
635 	return 0;
636 }
637 
dpm_async_resume_children(struct device * dev,async_func_t func)638 static void dpm_async_resume_children(struct device *dev, async_func_t func)
639 {
640 	/*
641 	 * Prevent racing with dpm_clear_async_state() during initial list
642 	 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
643 	 * dpm_resume().
644 	 */
645 	guard(mutex)(&dpm_list_mtx);
646 
647 	/*
648 	 * Start processing "async" children of the device unless it's been
649 	 * started already for them.
650 	 *
651 	 * This could have been done for the device's "async" consumers too, but
652 	 * they either need to wait for their parents or the processing has
653 	 * already started for them after their parents were processed.
654 	 */
655 	device_for_each_child(dev, func, dpm_async_with_cleanup);
656 }
657 
dpm_clear_async_state(struct device * dev)658 static void dpm_clear_async_state(struct device *dev)
659 {
660 	reinit_completion(&dev->power.completion);
661 	dev->power.work_in_progress = false;
662 }
663 
dpm_root_device(struct device * dev)664 static bool dpm_root_device(struct device *dev)
665 {
666 	return !dev->parent;
667 }
668 
669 static void async_resume_noirq(void *data, async_cookie_t cookie);
670 
671 /**
672  * device_resume_noirq - Execute a "noirq resume" callback for given device.
673  * @dev: Device to handle.
674  * @state: PM transition of the system being carried out.
675  * @async: If true, the device is being resumed asynchronously.
676  *
677  * The driver of @dev will not receive interrupts while this function is being
678  * executed.
679  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)680 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
681 {
682 	pm_callback_t callback = NULL;
683 	const char *info = NULL;
684 	bool skip_resume;
685 	int error = 0;
686 
687 	TRACE_DEVICE(dev);
688 	TRACE_RESUME(0);
689 
690 	if (dev->power.syscore || dev->power.direct_complete)
691 		goto Out;
692 
693 	if (!dev->power.is_noirq_suspended)
694 		goto Out;
695 
696 	if (!dpm_wait_for_superior(dev, async))
697 		goto Out;
698 
699 	skip_resume = dev_pm_skip_resume(dev);
700 	/*
701 	 * If the driver callback is skipped below or by the middle layer
702 	 * callback and device_resume_early() also skips the driver callback for
703 	 * this device later, it needs to appear as "suspended" to PM-runtime,
704 	 * so change its status accordingly.
705 	 *
706 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
707 	 * status to "active" unless its power.smart_suspend flag is clear, in
708 	 * which case it is not necessary to update its PM-runtime status.
709 	 */
710 	if (skip_resume)
711 		pm_runtime_set_suspended(dev);
712 	else if (dev_pm_smart_suspend(dev))
713 		pm_runtime_set_active(dev);
714 
715 	if (dev->pm_domain) {
716 		info = "noirq power domain ";
717 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
718 	} else if (dev->type && dev->type->pm) {
719 		info = "noirq type ";
720 		callback = pm_noirq_op(dev->type->pm, state);
721 	} else if (dev->class && dev->class->pm) {
722 		info = "noirq class ";
723 		callback = pm_noirq_op(dev->class->pm, state);
724 	} else if (dev->bus && dev->bus->pm) {
725 		info = "noirq bus ";
726 		callback = pm_noirq_op(dev->bus->pm, state);
727 	}
728 	if (callback)
729 		goto Run;
730 
731 	if (skip_resume)
732 		goto Skip;
733 
734 	if (dev->driver && dev->driver->pm) {
735 		info = "noirq driver ";
736 		callback = pm_noirq_op(dev->driver->pm, state);
737 	}
738 
739 Run:
740 	error = dpm_run_callback(callback, dev, state, info);
741 
742 Skip:
743 	dev->power.is_noirq_suspended = false;
744 
745 Out:
746 	complete_all(&dev->power.completion);
747 	TRACE_RESUME(error);
748 
749 	if (error) {
750 		async_error = error;
751 		dpm_save_failed_dev(dev_name(dev));
752 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
753 	}
754 
755 	dpm_async_resume_children(dev, async_resume_noirq);
756 }
757 
async_resume_noirq(void * data,async_cookie_t cookie)758 static void async_resume_noirq(void *data, async_cookie_t cookie)
759 {
760 	struct device *dev = data;
761 
762 	device_resume_noirq(dev, pm_transition, true);
763 	put_device(dev);
764 }
765 
dpm_noirq_resume_devices(pm_message_t state)766 static void dpm_noirq_resume_devices(pm_message_t state)
767 {
768 	struct device *dev;
769 	ktime_t starttime = ktime_get();
770 
771 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
772 
773 	async_error = 0;
774 	pm_transition = state;
775 
776 	mutex_lock(&dpm_list_mtx);
777 
778 	/*
779 	 * Start processing "async" root devices upfront so they don't wait for
780 	 * the "sync" devices they don't depend on.
781 	 */
782 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
783 		dpm_clear_async_state(dev);
784 		if (dpm_root_device(dev))
785 			dpm_async_with_cleanup(dev, async_resume_noirq);
786 	}
787 
788 	while (!list_empty(&dpm_noirq_list)) {
789 		dev = to_device(dpm_noirq_list.next);
790 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
791 
792 		if (!dpm_async_fn(dev, async_resume_noirq)) {
793 			get_device(dev);
794 
795 			mutex_unlock(&dpm_list_mtx);
796 
797 			device_resume_noirq(dev, state, false);
798 
799 			put_device(dev);
800 
801 			mutex_lock(&dpm_list_mtx);
802 		}
803 	}
804 	mutex_unlock(&dpm_list_mtx);
805 	async_synchronize_full();
806 	dpm_show_time(starttime, state, 0, "noirq");
807 	if (async_error)
808 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
809 
810 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
811 }
812 
813 /**
814  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
815  * @state: PM transition of the system being carried out.
816  *
817  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
818  * allow device drivers' interrupt handlers to be called.
819  */
dpm_resume_noirq(pm_message_t state)820 void dpm_resume_noirq(pm_message_t state)
821 {
822 	dpm_noirq_resume_devices(state);
823 
824 	resume_device_irqs();
825 	device_wakeup_disarm_wake_irqs();
826 }
827 
828 static void async_resume_early(void *data, async_cookie_t cookie);
829 
830 /**
831  * device_resume_early - Execute an "early resume" callback for given device.
832  * @dev: Device to handle.
833  * @state: PM transition of the system being carried out.
834  * @async: If true, the device is being resumed asynchronously.
835  *
836  * Runtime PM is disabled for @dev while this function is being executed.
837  */
device_resume_early(struct device * dev,pm_message_t state,bool async)838 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
839 {
840 	pm_callback_t callback = NULL;
841 	const char *info = NULL;
842 	int error = 0;
843 
844 	TRACE_DEVICE(dev);
845 	TRACE_RESUME(0);
846 
847 	if (dev->power.syscore || dev->power.direct_complete)
848 		goto Out;
849 
850 	if (!dev->power.is_late_suspended)
851 		goto Out;
852 
853 	if (!dpm_wait_for_superior(dev, async))
854 		goto Out;
855 
856 	if (dev->pm_domain) {
857 		info = "early power domain ";
858 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
859 	} else if (dev->type && dev->type->pm) {
860 		info = "early type ";
861 		callback = pm_late_early_op(dev->type->pm, state);
862 	} else if (dev->class && dev->class->pm) {
863 		info = "early class ";
864 		callback = pm_late_early_op(dev->class->pm, state);
865 	} else if (dev->bus && dev->bus->pm) {
866 		info = "early bus ";
867 		callback = pm_late_early_op(dev->bus->pm, state);
868 	}
869 	if (callback)
870 		goto Run;
871 
872 	if (dev_pm_skip_resume(dev))
873 		goto Skip;
874 
875 	if (dev->driver && dev->driver->pm) {
876 		info = "early driver ";
877 		callback = pm_late_early_op(dev->driver->pm, state);
878 	}
879 
880 Run:
881 	error = dpm_run_callback(callback, dev, state, info);
882 
883 Skip:
884 	dev->power.is_late_suspended = false;
885 
886 Out:
887 	TRACE_RESUME(error);
888 
889 	pm_runtime_enable(dev);
890 	complete_all(&dev->power.completion);
891 
892 	if (error) {
893 		async_error = error;
894 		dpm_save_failed_dev(dev_name(dev));
895 		pm_dev_err(dev, state, async ? " async early" : " early", error);
896 	}
897 
898 	dpm_async_resume_children(dev, async_resume_early);
899 }
900 
async_resume_early(void * data,async_cookie_t cookie)901 static void async_resume_early(void *data, async_cookie_t cookie)
902 {
903 	struct device *dev = data;
904 
905 	device_resume_early(dev, pm_transition, true);
906 	put_device(dev);
907 }
908 
909 /**
910  * dpm_resume_early - Execute "early resume" callbacks for all devices.
911  * @state: PM transition of the system being carried out.
912  */
dpm_resume_early(pm_message_t state)913 void dpm_resume_early(pm_message_t state)
914 {
915 	struct device *dev;
916 	ktime_t starttime = ktime_get();
917 
918 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
919 
920 	async_error = 0;
921 	pm_transition = state;
922 
923 	mutex_lock(&dpm_list_mtx);
924 
925 	/*
926 	 * Start processing "async" root devices upfront so they don't wait for
927 	 * the "sync" devices they don't depend on.
928 	 */
929 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
930 		dpm_clear_async_state(dev);
931 		if (dpm_root_device(dev))
932 			dpm_async_with_cleanup(dev, async_resume_early);
933 	}
934 
935 	while (!list_empty(&dpm_late_early_list)) {
936 		dev = to_device(dpm_late_early_list.next);
937 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
938 
939 		if (!dpm_async_fn(dev, async_resume_early)) {
940 			get_device(dev);
941 
942 			mutex_unlock(&dpm_list_mtx);
943 
944 			device_resume_early(dev, state, false);
945 
946 			put_device(dev);
947 
948 			mutex_lock(&dpm_list_mtx);
949 		}
950 	}
951 	mutex_unlock(&dpm_list_mtx);
952 	async_synchronize_full();
953 	dpm_show_time(starttime, state, 0, "early");
954 	if (async_error)
955 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
956 
957 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
958 }
959 
960 /**
961  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
962  * @state: PM transition of the system being carried out.
963  */
dpm_resume_start(pm_message_t state)964 void dpm_resume_start(pm_message_t state)
965 {
966 	dpm_resume_noirq(state);
967 	dpm_resume_early(state);
968 }
969 EXPORT_SYMBOL_GPL(dpm_resume_start);
970 
971 static void async_resume(void *data, async_cookie_t cookie);
972 
973 /**
974  * device_resume - Execute "resume" callbacks for given device.
975  * @dev: Device to handle.
976  * @state: PM transition of the system being carried out.
977  * @async: If true, the device is being resumed asynchronously.
978  */
device_resume(struct device * dev,pm_message_t state,bool async)979 static void device_resume(struct device *dev, pm_message_t state, bool async)
980 {
981 	pm_callback_t callback = NULL;
982 	const char *info = NULL;
983 	int error = 0;
984 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
985 
986 	TRACE_DEVICE(dev);
987 	TRACE_RESUME(0);
988 
989 	if (dev->power.syscore)
990 		goto Complete;
991 
992 	if (!dev->power.is_suspended)
993 		goto Complete;
994 
995 	dev->power.is_suspended = false;
996 
997 	if (dev->power.direct_complete) {
998 		/*
999 		 * Allow new children to be added under the device after this
1000 		 * point if it has no PM callbacks.
1001 		 */
1002 		if (dev->power.no_pm_callbacks)
1003 			dev->power.is_prepared = false;
1004 
1005 		/* Match the pm_runtime_disable() in device_suspend(). */
1006 		pm_runtime_enable(dev);
1007 		goto Complete;
1008 	}
1009 
1010 	if (!dpm_wait_for_superior(dev, async))
1011 		goto Complete;
1012 
1013 	dpm_watchdog_set(&wd, dev);
1014 	device_lock(dev);
1015 
1016 	/*
1017 	 * This is a fib.  But we'll allow new children to be added below
1018 	 * a resumed device, even if the device hasn't been completed yet.
1019 	 */
1020 	dev->power.is_prepared = false;
1021 
1022 	if (dev->pm_domain) {
1023 		info = "power domain ";
1024 		callback = pm_op(&dev->pm_domain->ops, state);
1025 		goto Driver;
1026 	}
1027 
1028 	if (dev->type && dev->type->pm) {
1029 		info = "type ";
1030 		callback = pm_op(dev->type->pm, state);
1031 		goto Driver;
1032 	}
1033 
1034 	if (dev->class && dev->class->pm) {
1035 		info = "class ";
1036 		callback = pm_op(dev->class->pm, state);
1037 		goto Driver;
1038 	}
1039 
1040 	if (dev->bus) {
1041 		if (dev->bus->pm) {
1042 			info = "bus ";
1043 			callback = pm_op(dev->bus->pm, state);
1044 		} else if (dev->bus->resume) {
1045 			info = "legacy bus ";
1046 			callback = dev->bus->resume;
1047 			goto End;
1048 		}
1049 	}
1050 
1051  Driver:
1052 	if (!callback && dev->driver && dev->driver->pm) {
1053 		info = "driver ";
1054 		callback = pm_op(dev->driver->pm, state);
1055 	}
1056 
1057  End:
1058 	error = dpm_run_callback(callback, dev, state, info);
1059 
1060 	device_unlock(dev);
1061 	dpm_watchdog_clear(&wd);
1062 
1063  Complete:
1064 	complete_all(&dev->power.completion);
1065 
1066 	TRACE_RESUME(error);
1067 
1068 	if (error) {
1069 		async_error = error;
1070 		dpm_save_failed_dev(dev_name(dev));
1071 		pm_dev_err(dev, state, async ? " async" : "", error);
1072 	}
1073 
1074 	dpm_async_resume_children(dev, async_resume);
1075 }
1076 
async_resume(void * data,async_cookie_t cookie)1077 static void async_resume(void *data, async_cookie_t cookie)
1078 {
1079 	struct device *dev = data;
1080 
1081 	device_resume(dev, pm_transition, true);
1082 	put_device(dev);
1083 }
1084 
1085 /**
1086  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1087  * @state: PM transition of the system being carried out.
1088  *
1089  * Execute the appropriate "resume" callback for all devices whose status
1090  * indicates that they are suspended.
1091  */
dpm_resume(pm_message_t state)1092 void dpm_resume(pm_message_t state)
1093 {
1094 	struct device *dev;
1095 	ktime_t starttime = ktime_get();
1096 
1097 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1098 	might_sleep();
1099 
1100 	pm_transition = state;
1101 	async_error = 0;
1102 
1103 	mutex_lock(&dpm_list_mtx);
1104 
1105 	/*
1106 	 * Start processing "async" root devices upfront so they don't wait for
1107 	 * the "sync" devices they don't depend on.
1108 	 */
1109 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1110 		dpm_clear_async_state(dev);
1111 		if (dpm_root_device(dev))
1112 			dpm_async_with_cleanup(dev, async_resume);
1113 	}
1114 
1115 	while (!list_empty(&dpm_suspended_list)) {
1116 		dev = to_device(dpm_suspended_list.next);
1117 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1118 
1119 		if (!dpm_async_fn(dev, async_resume)) {
1120 			get_device(dev);
1121 
1122 			mutex_unlock(&dpm_list_mtx);
1123 
1124 			device_resume(dev, state, false);
1125 
1126 			put_device(dev);
1127 
1128 			mutex_lock(&dpm_list_mtx);
1129 		}
1130 	}
1131 	mutex_unlock(&dpm_list_mtx);
1132 	async_synchronize_full();
1133 	dpm_show_time(starttime, state, 0, NULL);
1134 	if (async_error)
1135 		dpm_save_failed_step(SUSPEND_RESUME);
1136 
1137 	cpufreq_resume();
1138 	devfreq_resume();
1139 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1140 }
1141 
1142 /**
1143  * device_complete - Complete a PM transition for given device.
1144  * @dev: Device to handle.
1145  * @state: PM transition of the system being carried out.
1146  */
device_complete(struct device * dev,pm_message_t state)1147 static void device_complete(struct device *dev, pm_message_t state)
1148 {
1149 	void (*callback)(struct device *) = NULL;
1150 	const char *info = NULL;
1151 
1152 	if (dev->power.syscore)
1153 		goto out;
1154 
1155 	device_lock(dev);
1156 
1157 	if (dev->pm_domain) {
1158 		info = "completing power domain ";
1159 		callback = dev->pm_domain->ops.complete;
1160 	} else if (dev->type && dev->type->pm) {
1161 		info = "completing type ";
1162 		callback = dev->type->pm->complete;
1163 	} else if (dev->class && dev->class->pm) {
1164 		info = "completing class ";
1165 		callback = dev->class->pm->complete;
1166 	} else if (dev->bus && dev->bus->pm) {
1167 		info = "completing bus ";
1168 		callback = dev->bus->pm->complete;
1169 	}
1170 
1171 	if (!callback && dev->driver && dev->driver->pm) {
1172 		info = "completing driver ";
1173 		callback = dev->driver->pm->complete;
1174 	}
1175 
1176 	if (callback) {
1177 		pm_dev_dbg(dev, state, info);
1178 		callback(dev);
1179 	}
1180 
1181 	device_unlock(dev);
1182 
1183 out:
1184 	/* If enabling runtime PM for the device is blocked, unblock it. */
1185 	pm_runtime_unblock(dev);
1186 	pm_runtime_put(dev);
1187 }
1188 
1189 /**
1190  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1191  * @state: PM transition of the system being carried out.
1192  *
1193  * Execute the ->complete() callbacks for all devices whose PM status is not
1194  * DPM_ON (this allows new devices to be registered).
1195  */
dpm_complete(pm_message_t state)1196 void dpm_complete(pm_message_t state)
1197 {
1198 	struct list_head list;
1199 
1200 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1201 	might_sleep();
1202 
1203 	INIT_LIST_HEAD(&list);
1204 	mutex_lock(&dpm_list_mtx);
1205 	while (!list_empty(&dpm_prepared_list)) {
1206 		struct device *dev = to_device(dpm_prepared_list.prev);
1207 
1208 		get_device(dev);
1209 		dev->power.is_prepared = false;
1210 		list_move(&dev->power.entry, &list);
1211 
1212 		mutex_unlock(&dpm_list_mtx);
1213 
1214 		trace_device_pm_callback_start(dev, "", state.event);
1215 		device_complete(dev, state);
1216 		trace_device_pm_callback_end(dev, 0);
1217 
1218 		put_device(dev);
1219 
1220 		mutex_lock(&dpm_list_mtx);
1221 	}
1222 	list_splice(&list, &dpm_list);
1223 	mutex_unlock(&dpm_list_mtx);
1224 
1225 	/* Allow device probing and trigger re-probing of deferred devices */
1226 	device_unblock_probing();
1227 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1228 }
1229 
1230 /**
1231  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1232  * @state: PM transition of the system being carried out.
1233  *
1234  * Execute "resume" callbacks for all devices and complete the PM transition of
1235  * the system.
1236  */
dpm_resume_end(pm_message_t state)1237 void dpm_resume_end(pm_message_t state)
1238 {
1239 	pm_restore_gfp_mask();
1240 	dpm_resume(state);
1241 	dpm_complete(state);
1242 }
1243 EXPORT_SYMBOL_GPL(dpm_resume_end);
1244 
1245 
1246 /*------------------------- Suspend routines -------------------------*/
1247 
dpm_leaf_device(struct device * dev)1248 static bool dpm_leaf_device(struct device *dev)
1249 {
1250 	struct device *child;
1251 
1252 	lockdep_assert_held(&dpm_list_mtx);
1253 
1254 	child = device_find_any_child(dev);
1255 	if (child) {
1256 		put_device(child);
1257 
1258 		return false;
1259 	}
1260 
1261 	return true;
1262 }
1263 
dpm_async_suspend_parent(struct device * dev,async_func_t func)1264 static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
1265 {
1266 	guard(mutex)(&dpm_list_mtx);
1267 
1268 	/*
1269 	 * If the device is suspended asynchronously and the parent's callback
1270 	 * deletes both the device and the parent itself, the parent object may
1271 	 * be freed while this function is running, so avoid that by checking
1272 	 * if the device has been deleted already as the parent cannot be
1273 	 * deleted before it.
1274 	 */
1275 	if (!device_pm_initialized(dev))
1276 		return;
1277 
1278 	/* Start processing the device's parent if it is "async". */
1279 	if (dev->parent)
1280 		dpm_async_with_cleanup(dev->parent, func);
1281 }
1282 
1283 /**
1284  * resume_event - Return a "resume" message for given "suspend" sleep state.
1285  * @sleep_state: PM message representing a sleep state.
1286  *
1287  * Return a PM message representing the resume event corresponding to given
1288  * sleep state.
1289  */
resume_event(pm_message_t sleep_state)1290 static pm_message_t resume_event(pm_message_t sleep_state)
1291 {
1292 	switch (sleep_state.event) {
1293 	case PM_EVENT_SUSPEND:
1294 		return PMSG_RESUME;
1295 	case PM_EVENT_FREEZE:
1296 	case PM_EVENT_QUIESCE:
1297 		return PMSG_RECOVER;
1298 	case PM_EVENT_HIBERNATE:
1299 		return PMSG_RESTORE;
1300 	}
1301 	return PMSG_ON;
1302 }
1303 
dpm_superior_set_must_resume(struct device * dev)1304 static void dpm_superior_set_must_resume(struct device *dev)
1305 {
1306 	struct device_link *link;
1307 	int idx;
1308 
1309 	if (dev->parent)
1310 		dev->parent->power.must_resume = true;
1311 
1312 	idx = device_links_read_lock();
1313 
1314 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1315 		link->supplier->power.must_resume = true;
1316 
1317 	device_links_read_unlock(idx);
1318 }
1319 
1320 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1321 
1322 /**
1323  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1324  * @dev: Device to handle.
1325  * @state: PM transition of the system being carried out.
1326  * @async: If true, the device is being suspended asynchronously.
1327  *
1328  * The driver of @dev will not receive interrupts while this function is being
1329  * executed.
1330  */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1331 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1332 {
1333 	pm_callback_t callback = NULL;
1334 	const char *info = NULL;
1335 	int error = 0;
1336 
1337 	TRACE_DEVICE(dev);
1338 	TRACE_SUSPEND(0);
1339 
1340 	dpm_wait_for_subordinate(dev, async);
1341 
1342 	if (async_error)
1343 		goto Complete;
1344 
1345 	if (dev->power.syscore || dev->power.direct_complete)
1346 		goto Complete;
1347 
1348 	if (dev->pm_domain) {
1349 		info = "noirq power domain ";
1350 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1351 	} else if (dev->type && dev->type->pm) {
1352 		info = "noirq type ";
1353 		callback = pm_noirq_op(dev->type->pm, state);
1354 	} else if (dev->class && dev->class->pm) {
1355 		info = "noirq class ";
1356 		callback = pm_noirq_op(dev->class->pm, state);
1357 	} else if (dev->bus && dev->bus->pm) {
1358 		info = "noirq bus ";
1359 		callback = pm_noirq_op(dev->bus->pm, state);
1360 	}
1361 	if (callback)
1362 		goto Run;
1363 
1364 	if (dev_pm_skip_suspend(dev))
1365 		goto Skip;
1366 
1367 	if (dev->driver && dev->driver->pm) {
1368 		info = "noirq driver ";
1369 		callback = pm_noirq_op(dev->driver->pm, state);
1370 	}
1371 
1372 Run:
1373 	error = dpm_run_callback(callback, dev, state, info);
1374 	if (error) {
1375 		async_error = error;
1376 		dpm_save_failed_dev(dev_name(dev));
1377 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1378 		goto Complete;
1379 	}
1380 
1381 Skip:
1382 	dev->power.is_noirq_suspended = true;
1383 
1384 	/*
1385 	 * Devices must be resumed unless they are explicitly allowed to be left
1386 	 * in suspend, but even in that case skipping the resume of devices that
1387 	 * were in use right before the system suspend (as indicated by their
1388 	 * runtime PM usage counters and child counters) would be suboptimal.
1389 	 */
1390 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1391 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1392 		dev->power.must_resume = true;
1393 
1394 	if (dev->power.must_resume)
1395 		dpm_superior_set_must_resume(dev);
1396 
1397 Complete:
1398 	complete_all(&dev->power.completion);
1399 	TRACE_SUSPEND(error);
1400 
1401 	if (error || async_error)
1402 		return error;
1403 
1404 	dpm_async_suspend_parent(dev, async_suspend_noirq);
1405 
1406 	return 0;
1407 }
1408 
async_suspend_noirq(void * data,async_cookie_t cookie)1409 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1410 {
1411 	struct device *dev = data;
1412 
1413 	device_suspend_noirq(dev, pm_transition, true);
1414 	put_device(dev);
1415 }
1416 
dpm_noirq_suspend_devices(pm_message_t state)1417 static int dpm_noirq_suspend_devices(pm_message_t state)
1418 {
1419 	ktime_t starttime = ktime_get();
1420 	struct device *dev;
1421 	int error = 0;
1422 
1423 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1424 
1425 	pm_transition = state;
1426 	async_error = 0;
1427 
1428 	mutex_lock(&dpm_list_mtx);
1429 
1430 	/*
1431 	 * Start processing "async" leaf devices upfront so they don't need to
1432 	 * wait for the "sync" devices they don't depend on.
1433 	 */
1434 	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1435 		dpm_clear_async_state(dev);
1436 		if (dpm_leaf_device(dev))
1437 			dpm_async_with_cleanup(dev, async_suspend_noirq);
1438 	}
1439 
1440 	while (!list_empty(&dpm_late_early_list)) {
1441 		dev = to_device(dpm_late_early_list.prev);
1442 
1443 		list_move(&dev->power.entry, &dpm_noirq_list);
1444 
1445 		if (dpm_async_fn(dev, async_suspend_noirq))
1446 			continue;
1447 
1448 		get_device(dev);
1449 
1450 		mutex_unlock(&dpm_list_mtx);
1451 
1452 		error = device_suspend_noirq(dev, state, false);
1453 
1454 		put_device(dev);
1455 
1456 		mutex_lock(&dpm_list_mtx);
1457 
1458 		if (error || async_error) {
1459 			/*
1460 			 * Move all devices to the target list to resume them
1461 			 * properly.
1462 			 */
1463 			list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1464 			break;
1465 		}
1466 	}
1467 
1468 	mutex_unlock(&dpm_list_mtx);
1469 
1470 	async_synchronize_full();
1471 	if (!error)
1472 		error = async_error;
1473 
1474 	if (error)
1475 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1476 
1477 	dpm_show_time(starttime, state, error, "noirq");
1478 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1479 	return error;
1480 }
1481 
1482 /**
1483  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1484  * @state: PM transition of the system being carried out.
1485  *
1486  * Prevent device drivers' interrupt handlers from being called and invoke
1487  * "noirq" suspend callbacks for all non-sysdev devices.
1488  */
dpm_suspend_noirq(pm_message_t state)1489 int dpm_suspend_noirq(pm_message_t state)
1490 {
1491 	int ret;
1492 
1493 	device_wakeup_arm_wake_irqs();
1494 	suspend_device_irqs();
1495 
1496 	ret = dpm_noirq_suspend_devices(state);
1497 	if (ret)
1498 		dpm_resume_noirq(resume_event(state));
1499 
1500 	return ret;
1501 }
1502 
dpm_propagate_wakeup_to_parent(struct device * dev)1503 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1504 {
1505 	struct device *parent = dev->parent;
1506 
1507 	if (!parent)
1508 		return;
1509 
1510 	spin_lock_irq(&parent->power.lock);
1511 
1512 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1513 		parent->power.wakeup_path = true;
1514 
1515 	spin_unlock_irq(&parent->power.lock);
1516 }
1517 
1518 static void async_suspend_late(void *data, async_cookie_t cookie);
1519 
1520 /**
1521  * device_suspend_late - Execute a "late suspend" callback for given device.
1522  * @dev: Device to handle.
1523  * @state: PM transition of the system being carried out.
1524  * @async: If true, the device is being suspended asynchronously.
1525  *
1526  * Runtime PM is disabled for @dev while this function is being executed.
1527  */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1528 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1529 {
1530 	pm_callback_t callback = NULL;
1531 	const char *info = NULL;
1532 	int error = 0;
1533 
1534 	TRACE_DEVICE(dev);
1535 	TRACE_SUSPEND(0);
1536 
1537 	/*
1538 	 * Disable runtime PM for the device without checking if there is a
1539 	 * pending resume request for it.
1540 	 */
1541 	__pm_runtime_disable(dev, false);
1542 
1543 	dpm_wait_for_subordinate(dev, async);
1544 
1545 	if (async_error)
1546 		goto Complete;
1547 
1548 	if (pm_wakeup_pending()) {
1549 		async_error = -EBUSY;
1550 		goto Complete;
1551 	}
1552 
1553 	if (dev->power.syscore || dev->power.direct_complete)
1554 		goto Complete;
1555 
1556 	if (dev->pm_domain) {
1557 		info = "late power domain ";
1558 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1559 	} else if (dev->type && dev->type->pm) {
1560 		info = "late type ";
1561 		callback = pm_late_early_op(dev->type->pm, state);
1562 	} else if (dev->class && dev->class->pm) {
1563 		info = "late class ";
1564 		callback = pm_late_early_op(dev->class->pm, state);
1565 	} else if (dev->bus && dev->bus->pm) {
1566 		info = "late bus ";
1567 		callback = pm_late_early_op(dev->bus->pm, state);
1568 	}
1569 	if (callback)
1570 		goto Run;
1571 
1572 	if (dev_pm_skip_suspend(dev))
1573 		goto Skip;
1574 
1575 	if (dev->driver && dev->driver->pm) {
1576 		info = "late driver ";
1577 		callback = pm_late_early_op(dev->driver->pm, state);
1578 	}
1579 
1580 Run:
1581 	error = dpm_run_callback(callback, dev, state, info);
1582 	if (error) {
1583 		async_error = error;
1584 		dpm_save_failed_dev(dev_name(dev));
1585 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1586 		goto Complete;
1587 	}
1588 	dpm_propagate_wakeup_to_parent(dev);
1589 
1590 Skip:
1591 	dev->power.is_late_suspended = true;
1592 
1593 Complete:
1594 	TRACE_SUSPEND(error);
1595 	complete_all(&dev->power.completion);
1596 
1597 	if (error || async_error)
1598 		return error;
1599 
1600 	dpm_async_suspend_parent(dev, async_suspend_late);
1601 
1602 	return 0;
1603 }
1604 
async_suspend_late(void * data,async_cookie_t cookie)1605 static void async_suspend_late(void *data, async_cookie_t cookie)
1606 {
1607 	struct device *dev = data;
1608 
1609 	device_suspend_late(dev, pm_transition, true);
1610 	put_device(dev);
1611 }
1612 
1613 /**
1614  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1615  * @state: PM transition of the system being carried out.
1616  */
dpm_suspend_late(pm_message_t state)1617 int dpm_suspend_late(pm_message_t state)
1618 {
1619 	ktime_t starttime = ktime_get();
1620 	struct device *dev;
1621 	int error = 0;
1622 
1623 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1624 
1625 	pm_transition = state;
1626 	async_error = 0;
1627 
1628 	wake_up_all_idle_cpus();
1629 
1630 	mutex_lock(&dpm_list_mtx);
1631 
1632 	/*
1633 	 * Start processing "async" leaf devices upfront so they don't need to
1634 	 * wait for the "sync" devices they don't depend on.
1635 	 */
1636 	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1637 		dpm_clear_async_state(dev);
1638 		if (dpm_leaf_device(dev))
1639 			dpm_async_with_cleanup(dev, async_suspend_late);
1640 	}
1641 
1642 	while (!list_empty(&dpm_suspended_list)) {
1643 		dev = to_device(dpm_suspended_list.prev);
1644 
1645 		list_move(&dev->power.entry, &dpm_late_early_list);
1646 
1647 		if (dpm_async_fn(dev, async_suspend_late))
1648 			continue;
1649 
1650 		get_device(dev);
1651 
1652 		mutex_unlock(&dpm_list_mtx);
1653 
1654 		error = device_suspend_late(dev, state, false);
1655 
1656 		put_device(dev);
1657 
1658 		mutex_lock(&dpm_list_mtx);
1659 
1660 		if (error || async_error) {
1661 			/*
1662 			 * Move all devices to the target list to resume them
1663 			 * properly.
1664 			 */
1665 			list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1666 			break;
1667 		}
1668 	}
1669 
1670 	mutex_unlock(&dpm_list_mtx);
1671 
1672 	async_synchronize_full();
1673 	if (!error)
1674 		error = async_error;
1675 
1676 	if (error) {
1677 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1678 		dpm_resume_early(resume_event(state));
1679 	}
1680 	dpm_show_time(starttime, state, error, "late");
1681 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1682 	return error;
1683 }
1684 
1685 /**
1686  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1687  * @state: PM transition of the system being carried out.
1688  */
dpm_suspend_end(pm_message_t state)1689 int dpm_suspend_end(pm_message_t state)
1690 {
1691 	ktime_t starttime = ktime_get();
1692 	int error;
1693 
1694 	error = dpm_suspend_late(state);
1695 	if (error)
1696 		goto out;
1697 
1698 	error = dpm_suspend_noirq(state);
1699 	if (error)
1700 		dpm_resume_early(resume_event(state));
1701 
1702 out:
1703 	dpm_show_time(starttime, state, error, "end");
1704 	return error;
1705 }
1706 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1707 
1708 /**
1709  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1710  * @dev: Device to suspend.
1711  * @state: PM transition of the system being carried out.
1712  * @cb: Suspend callback to execute.
1713  * @info: string description of caller.
1714  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1715 static int legacy_suspend(struct device *dev, pm_message_t state,
1716 			  int (*cb)(struct device *dev, pm_message_t state),
1717 			  const char *info)
1718 {
1719 	int error;
1720 	ktime_t calltime;
1721 
1722 	calltime = initcall_debug_start(dev, cb);
1723 
1724 	trace_device_pm_callback_start(dev, info, state.event);
1725 	error = cb(dev, state);
1726 	trace_device_pm_callback_end(dev, error);
1727 	suspend_report_result(dev, cb, error);
1728 
1729 	initcall_debug_report(dev, calltime, cb, error);
1730 
1731 	return error;
1732 }
1733 
dpm_clear_superiors_direct_complete(struct device * dev)1734 static void dpm_clear_superiors_direct_complete(struct device *dev)
1735 {
1736 	struct device_link *link;
1737 	int idx;
1738 
1739 	if (dev->parent) {
1740 		spin_lock_irq(&dev->parent->power.lock);
1741 		dev->parent->power.direct_complete = false;
1742 		spin_unlock_irq(&dev->parent->power.lock);
1743 	}
1744 
1745 	idx = device_links_read_lock();
1746 
1747 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1748 		spin_lock_irq(&link->supplier->power.lock);
1749 		link->supplier->power.direct_complete = false;
1750 		spin_unlock_irq(&link->supplier->power.lock);
1751 	}
1752 
1753 	device_links_read_unlock(idx);
1754 }
1755 
1756 static void async_suspend(void *data, async_cookie_t cookie);
1757 
1758 /**
1759  * device_suspend - Execute "suspend" callbacks for given device.
1760  * @dev: Device to handle.
1761  * @state: PM transition of the system being carried out.
1762  * @async: If true, the device is being suspended asynchronously.
1763  */
device_suspend(struct device * dev,pm_message_t state,bool async)1764 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1765 {
1766 	pm_callback_t callback = NULL;
1767 	const char *info = NULL;
1768 	int error = 0;
1769 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1770 
1771 	TRACE_DEVICE(dev);
1772 	TRACE_SUSPEND(0);
1773 
1774 	dpm_wait_for_subordinate(dev, async);
1775 
1776 	if (async_error) {
1777 		dev->power.direct_complete = false;
1778 		goto Complete;
1779 	}
1780 
1781 	/*
1782 	 * Wait for possible runtime PM transitions of the device in progress
1783 	 * to complete and if there's a runtime resume request pending for it,
1784 	 * resume it before proceeding with invoking the system-wide suspend
1785 	 * callbacks for it.
1786 	 *
1787 	 * If the system-wide suspend callbacks below change the configuration
1788 	 * of the device, they must disable runtime PM for it or otherwise
1789 	 * ensure that its runtime-resume callbacks will not be confused by that
1790 	 * change in case they are invoked going forward.
1791 	 */
1792 	pm_runtime_barrier(dev);
1793 
1794 	if (pm_wakeup_pending()) {
1795 		dev->power.direct_complete = false;
1796 		async_error = -EBUSY;
1797 		goto Complete;
1798 	}
1799 
1800 	if (dev->power.syscore)
1801 		goto Complete;
1802 
1803 	/* Avoid direct_complete to let wakeup_path propagate. */
1804 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1805 		dev->power.direct_complete = false;
1806 
1807 	if (dev->power.direct_complete) {
1808 		if (pm_runtime_status_suspended(dev)) {
1809 			pm_runtime_disable(dev);
1810 			if (pm_runtime_status_suspended(dev)) {
1811 				pm_dev_dbg(dev, state, "direct-complete ");
1812 				dev->power.is_suspended = true;
1813 				goto Complete;
1814 			}
1815 
1816 			pm_runtime_enable(dev);
1817 		}
1818 		dev->power.direct_complete = false;
1819 	}
1820 
1821 	dev->power.may_skip_resume = true;
1822 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1823 
1824 	dpm_watchdog_set(&wd, dev);
1825 	device_lock(dev);
1826 
1827 	if (dev->pm_domain) {
1828 		info = "power domain ";
1829 		callback = pm_op(&dev->pm_domain->ops, state);
1830 		goto Run;
1831 	}
1832 
1833 	if (dev->type && dev->type->pm) {
1834 		info = "type ";
1835 		callback = pm_op(dev->type->pm, state);
1836 		goto Run;
1837 	}
1838 
1839 	if (dev->class && dev->class->pm) {
1840 		info = "class ";
1841 		callback = pm_op(dev->class->pm, state);
1842 		goto Run;
1843 	}
1844 
1845 	if (dev->bus) {
1846 		if (dev->bus->pm) {
1847 			info = "bus ";
1848 			callback = pm_op(dev->bus->pm, state);
1849 		} else if (dev->bus->suspend) {
1850 			pm_dev_dbg(dev, state, "legacy bus ");
1851 			error = legacy_suspend(dev, state, dev->bus->suspend,
1852 						"legacy bus ");
1853 			goto End;
1854 		}
1855 	}
1856 
1857  Run:
1858 	if (!callback && dev->driver && dev->driver->pm) {
1859 		info = "driver ";
1860 		callback = pm_op(dev->driver->pm, state);
1861 	}
1862 
1863 	error = dpm_run_callback(callback, dev, state, info);
1864 
1865  End:
1866 	if (!error) {
1867 		dev->power.is_suspended = true;
1868 		if (device_may_wakeup(dev))
1869 			dev->power.wakeup_path = true;
1870 
1871 		dpm_propagate_wakeup_to_parent(dev);
1872 		dpm_clear_superiors_direct_complete(dev);
1873 	}
1874 
1875 	device_unlock(dev);
1876 	dpm_watchdog_clear(&wd);
1877 
1878  Complete:
1879 	if (error) {
1880 		async_error = error;
1881 		dpm_save_failed_dev(dev_name(dev));
1882 		pm_dev_err(dev, state, async ? " async" : "", error);
1883 	}
1884 
1885 	complete_all(&dev->power.completion);
1886 	TRACE_SUSPEND(error);
1887 
1888 	if (error || async_error)
1889 		return error;
1890 
1891 	dpm_async_suspend_parent(dev, async_suspend);
1892 
1893 	return 0;
1894 }
1895 
async_suspend(void * data,async_cookie_t cookie)1896 static void async_suspend(void *data, async_cookie_t cookie)
1897 {
1898 	struct device *dev = data;
1899 
1900 	device_suspend(dev, pm_transition, true);
1901 	put_device(dev);
1902 }
1903 
1904 /**
1905  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1906  * @state: PM transition of the system being carried out.
1907  */
dpm_suspend(pm_message_t state)1908 int dpm_suspend(pm_message_t state)
1909 {
1910 	ktime_t starttime = ktime_get();
1911 	struct device *dev;
1912 	int error = 0;
1913 
1914 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1915 	might_sleep();
1916 
1917 	devfreq_suspend();
1918 	cpufreq_suspend();
1919 
1920 	pm_transition = state;
1921 	async_error = 0;
1922 
1923 	mutex_lock(&dpm_list_mtx);
1924 
1925 	/*
1926 	 * Start processing "async" leaf devices upfront so they don't need to
1927 	 * wait for the "sync" devices they don't depend on.
1928 	 */
1929 	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
1930 		dpm_clear_async_state(dev);
1931 		if (dpm_leaf_device(dev))
1932 			dpm_async_with_cleanup(dev, async_suspend);
1933 	}
1934 
1935 	while (!list_empty(&dpm_prepared_list)) {
1936 		dev = to_device(dpm_prepared_list.prev);
1937 
1938 		list_move(&dev->power.entry, &dpm_suspended_list);
1939 
1940 		if (dpm_async_fn(dev, async_suspend))
1941 			continue;
1942 
1943 		get_device(dev);
1944 
1945 		mutex_unlock(&dpm_list_mtx);
1946 
1947 		error = device_suspend(dev, state, false);
1948 
1949 		put_device(dev);
1950 
1951 		mutex_lock(&dpm_list_mtx);
1952 
1953 		if (error || async_error) {
1954 			/*
1955 			 * Move all devices to the target list to resume them
1956 			 * properly.
1957 			 */
1958 			list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
1959 			break;
1960 		}
1961 	}
1962 
1963 	mutex_unlock(&dpm_list_mtx);
1964 
1965 	async_synchronize_full();
1966 	if (!error)
1967 		error = async_error;
1968 
1969 	if (error)
1970 		dpm_save_failed_step(SUSPEND_SUSPEND);
1971 
1972 	dpm_show_time(starttime, state, error, NULL);
1973 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1974 	return error;
1975 }
1976 
device_prepare_smart_suspend(struct device * dev)1977 static bool device_prepare_smart_suspend(struct device *dev)
1978 {
1979 	struct device_link *link;
1980 	bool ret = true;
1981 	int idx;
1982 
1983 	/*
1984 	 * The "smart suspend" feature is enabled for devices whose drivers ask
1985 	 * for it and for devices without PM callbacks.
1986 	 *
1987 	 * However, if "smart suspend" is not enabled for the device's parent
1988 	 * or any of its suppliers that take runtime PM into account, it cannot
1989 	 * be enabled for the device either.
1990 	 */
1991 	if (!dev->power.no_pm_callbacks &&
1992 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
1993 		return false;
1994 
1995 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
1996 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
1997 		return false;
1998 
1999 	idx = device_links_read_lock();
2000 
2001 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
2002 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
2003 			continue;
2004 
2005 		if (!dev_pm_smart_suspend(link->supplier) &&
2006 		    !pm_runtime_blocked(link->supplier)) {
2007 			ret = false;
2008 			break;
2009 		}
2010 	}
2011 
2012 	device_links_read_unlock(idx);
2013 
2014 	return ret;
2015 }
2016 
2017 /**
2018  * device_prepare - Prepare a device for system power transition.
2019  * @dev: Device to handle.
2020  * @state: PM transition of the system being carried out.
2021  *
2022  * Execute the ->prepare() callback(s) for given device.  No new children of the
2023  * device may be registered after this function has returned.
2024  */
device_prepare(struct device * dev,pm_message_t state)2025 static int device_prepare(struct device *dev, pm_message_t state)
2026 {
2027 	int (*callback)(struct device *) = NULL;
2028 	bool smart_suspend;
2029 	int ret = 0;
2030 
2031 	/*
2032 	 * If a device's parent goes into runtime suspend at the wrong time,
2033 	 * it won't be possible to resume the device.  To prevent this we
2034 	 * block runtime suspend here, during the prepare phase, and allow
2035 	 * it again during the complete phase.
2036 	 */
2037 	pm_runtime_get_noresume(dev);
2038 	/*
2039 	 * If runtime PM is disabled for the device at this point and it has
2040 	 * never been enabled so far, it should not be enabled until this system
2041 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
2042 	 * subsequent attempts to enable it.
2043 	 */
2044 	smart_suspend = !pm_runtime_block_if_disabled(dev);
2045 
2046 	if (dev->power.syscore)
2047 		return 0;
2048 
2049 	device_lock(dev);
2050 
2051 	dev->power.wakeup_path = false;
2052 
2053 	if (dev->power.no_pm_callbacks)
2054 		goto unlock;
2055 
2056 	if (dev->pm_domain)
2057 		callback = dev->pm_domain->ops.prepare;
2058 	else if (dev->type && dev->type->pm)
2059 		callback = dev->type->pm->prepare;
2060 	else if (dev->class && dev->class->pm)
2061 		callback = dev->class->pm->prepare;
2062 	else if (dev->bus && dev->bus->pm)
2063 		callback = dev->bus->pm->prepare;
2064 
2065 	if (!callback && dev->driver && dev->driver->pm)
2066 		callback = dev->driver->pm->prepare;
2067 
2068 	if (callback)
2069 		ret = callback(dev);
2070 
2071 unlock:
2072 	device_unlock(dev);
2073 
2074 	if (ret < 0) {
2075 		suspend_report_result(dev, callback, ret);
2076 		pm_runtime_put(dev);
2077 		return ret;
2078 	}
2079 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
2080 	if (smart_suspend)
2081 		smart_suspend = device_prepare_smart_suspend(dev);
2082 
2083 	spin_lock_irq(&dev->power.lock);
2084 
2085 	dev->power.smart_suspend = smart_suspend;
2086 	/*
2087 	 * A positive return value from ->prepare() means "this device appears
2088 	 * to be runtime-suspended and its state is fine, so if it really is
2089 	 * runtime-suspended, you can leave it in that state provided that you
2090 	 * will do the same thing with all of its descendants".  This only
2091 	 * applies to suspend transitions, however.
2092 	 */
2093 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2094 		(ret > 0 || dev->power.no_pm_callbacks) &&
2095 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2096 
2097 	spin_unlock_irq(&dev->power.lock);
2098 
2099 	return 0;
2100 }
2101 
2102 /**
2103  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2104  * @state: PM transition of the system being carried out.
2105  *
2106  * Execute the ->prepare() callback(s) for all devices.
2107  */
dpm_prepare(pm_message_t state)2108 int dpm_prepare(pm_message_t state)
2109 {
2110 	int error = 0;
2111 
2112 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2113 	might_sleep();
2114 
2115 	/*
2116 	 * Give a chance for the known devices to complete their probes, before
2117 	 * disable probing of devices. This sync point is important at least
2118 	 * at boot time + hibernation restore.
2119 	 */
2120 	wait_for_device_probe();
2121 	/*
2122 	 * It is unsafe if probing of devices will happen during suspend or
2123 	 * hibernation and system behavior will be unpredictable in this case.
2124 	 * So, let's prohibit device's probing here and defer their probes
2125 	 * instead. The normal behavior will be restored in dpm_complete().
2126 	 */
2127 	device_block_probing();
2128 
2129 	mutex_lock(&dpm_list_mtx);
2130 	while (!list_empty(&dpm_list) && !error) {
2131 		struct device *dev = to_device(dpm_list.next);
2132 
2133 		get_device(dev);
2134 
2135 		mutex_unlock(&dpm_list_mtx);
2136 
2137 		trace_device_pm_callback_start(dev, "", state.event);
2138 		error = device_prepare(dev, state);
2139 		trace_device_pm_callback_end(dev, error);
2140 
2141 		mutex_lock(&dpm_list_mtx);
2142 
2143 		if (!error) {
2144 			dev->power.is_prepared = true;
2145 			if (!list_empty(&dev->power.entry))
2146 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
2147 		} else if (error == -EAGAIN) {
2148 			error = 0;
2149 		} else {
2150 			dev_info(dev, "not prepared for power transition: code %d\n",
2151 				 error);
2152 		}
2153 
2154 		mutex_unlock(&dpm_list_mtx);
2155 
2156 		put_device(dev);
2157 
2158 		mutex_lock(&dpm_list_mtx);
2159 	}
2160 	mutex_unlock(&dpm_list_mtx);
2161 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2162 	return error;
2163 }
2164 
2165 /**
2166  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2167  * @state: PM transition of the system being carried out.
2168  *
2169  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2170  * callbacks for them.
2171  */
dpm_suspend_start(pm_message_t state)2172 int dpm_suspend_start(pm_message_t state)
2173 {
2174 	ktime_t starttime = ktime_get();
2175 	int error;
2176 
2177 	error = dpm_prepare(state);
2178 	if (error)
2179 		dpm_save_failed_step(SUSPEND_PREPARE);
2180 	else {
2181 		pm_restrict_gfp_mask();
2182 		error = dpm_suspend(state);
2183 	}
2184 
2185 	dpm_show_time(starttime, state, error, "start");
2186 	return error;
2187 }
2188 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2189 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2190 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2191 {
2192 	if (ret)
2193 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2194 }
2195 EXPORT_SYMBOL_GPL(__suspend_report_result);
2196 
2197 /**
2198  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2199  * @subordinate: Device that needs to wait for @dev.
2200  * @dev: Device to wait for.
2201  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2202 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2203 {
2204 	dpm_wait(dev, subordinate->power.async_suspend);
2205 	return async_error;
2206 }
2207 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2208 
2209 /**
2210  * dpm_for_each_dev - device iterator.
2211  * @data: data for the callback.
2212  * @fn: function to be called for each device.
2213  *
2214  * Iterate over devices in dpm_list, and call @fn for each device,
2215  * passing it @data.
2216  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2217 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2218 {
2219 	struct device *dev;
2220 
2221 	if (!fn)
2222 		return;
2223 
2224 	device_pm_lock();
2225 	list_for_each_entry(dev, &dpm_list, power.entry)
2226 		fn(dev, data);
2227 	device_pm_unlock();
2228 }
2229 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2230 
pm_ops_is_empty(const struct dev_pm_ops * ops)2231 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2232 {
2233 	if (!ops)
2234 		return true;
2235 
2236 	return !ops->prepare &&
2237 	       !ops->suspend &&
2238 	       !ops->suspend_late &&
2239 	       !ops->suspend_noirq &&
2240 	       !ops->resume_noirq &&
2241 	       !ops->resume_early &&
2242 	       !ops->resume &&
2243 	       !ops->complete;
2244 }
2245 
device_pm_check_callbacks(struct device * dev)2246 void device_pm_check_callbacks(struct device *dev)
2247 {
2248 	unsigned long flags;
2249 
2250 	spin_lock_irqsave(&dev->power.lock, flags);
2251 	dev->power.no_pm_callbacks =
2252 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2253 		 !dev->bus->suspend && !dev->bus->resume)) &&
2254 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2255 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2256 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2257 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2258 		 !dev->driver->suspend && !dev->driver->resume));
2259 	spin_unlock_irqrestore(&dev->power.lock, flags);
2260 }
2261 
dev_pm_skip_suspend(struct device * dev)2262 bool dev_pm_skip_suspend(struct device *dev)
2263 {
2264 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2265 }
2266