1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47 /*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65
66 static DEFINE_MUTEX(async_wip_mtx);
67 static int async_error;
68
pm_verb(int event)69 static const char *pm_verb(int event)
70 {
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91 }
92
93 /**
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
96 */
device_pm_sleep_init(struct device * dev)97 void device_pm_sleep_init(struct device *dev)
98 {
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110 * device_pm_lock - Lock the list of active devices used by the PM core.
111 */
device_pm_lock(void)112 void device_pm_lock(void)
113 {
114 mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
119 */
device_pm_unlock(void)120 void device_pm_unlock(void)
121 {
122 mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
128 */
device_pm_add(struct device * dev)129 void device_pm_add(struct device *dev)
130 {
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
150 */
device_pm_remove(struct device * dev)151 void device_pm_remove(struct device *dev)
152 {
153 if (device_pm_not_required(dev))
154 return;
155
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
166 }
167
168 /**
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
172 */
device_pm_move_before(struct device * deva,struct device * devb)173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
186 */
device_pm_move_after(struct device * deva,struct device * devb)187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
199 */
device_pm_move_last(struct device * dev)200 void device_pm_move_last(struct device *dev)
201 {
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
initcall_debug_start(struct device * dev,void * cb)207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209 if (!pm_print_times_enabled)
210 return 0;
211
212 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
215 return ktime_get();
216 }
217
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 void *cb, int error)
220 {
221 ktime_t rettime;
222
223 if (!pm_print_times_enabled)
224 return;
225
226 rettime = ktime_get();
227 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
229 }
230
231 /**
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
235 */
dpm_wait(struct device * dev,bool async)236 static void dpm_wait(struct device *dev, bool async)
237 {
238 if (!dev)
239 return;
240
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
243 }
244
dpm_wait_fn(struct device * dev,void * async_ptr)245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 dpm_wait(dev, *((bool *)async_ptr));
248 return 0;
249 }
250
dpm_wait_for_children(struct device * dev,bool async)251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253 device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255
dpm_wait_for_suppliers(struct device * dev,bool async)256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 struct device_link *link;
259 int idx;
260
261 idx = device_links_read_lock();
262
263 /*
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
268 * walking.
269 */
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
273
274 device_links_read_unlock(idx);
275 }
276
dpm_wait_for_superior(struct device * dev,bool async)277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 struct device *parent;
280
281 /*
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
287 */
288 mutex_lock(&dpm_list_mtx);
289
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
292 return false;
293 }
294
295 parent = get_device(dev->parent);
296
297 mutex_unlock(&dpm_list_mtx);
298
299 dpm_wait(parent, async);
300 put_device(parent);
301
302 dpm_wait_for_suppliers(dev, async);
303
304 /*
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
307 */
308 return device_pm_initialized(dev);
309 }
310
dpm_wait_for_consumers(struct device * dev,bool async)311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313 struct device_link *link;
314 int idx;
315
316 idx = device_links_read_lock();
317
318 /*
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
325 * unregistration).
326 */
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
330
331 device_links_read_unlock(idx);
332 }
333
dpm_wait_for_subordinate(struct device * dev,bool async)334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
338 }
339
340 /**
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
344 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347 switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
350 return ops->suspend;
351 case PM_EVENT_RESUME:
352 return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
357 return ops->freeze;
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
360 case PM_EVENT_THAW:
361 case PM_EVENT_RECOVER:
362 return ops->thaw;
363 case PM_EVENT_RESTORE:
364 return ops->restore;
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366 }
367
368 return NULL;
369 }
370
371 /**
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
375 *
376 * Runtime PM is disabled for @dev while this function is being executed.
377 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 pm_message_t state)
380 {
381 switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
394 case PM_EVENT_THAW:
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400 }
401
402 return NULL;
403 }
404
405 /**
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
409 *
410 * The driver of @dev will not receive interrupts while this function is being
411 * executed.
412 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415 switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
428 case PM_EVENT_THAW:
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
434 }
435
436 return NULL;
437 }
438
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
444 }
445
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 int error)
448 {
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 error);
451 }
452
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 const char *info)
455 {
456 ktime_t calltime;
457 u64 usecs64;
458 int usecs;
459
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
463 usecs = usecs64;
464 if (usecs == 0)
465 usecs = 1;
466
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
475 {
476 ktime_t calltime;
477 int error;
478
479 if (!cb)
480 return 0;
481
482 calltime = initcall_debug_start(dev, cb);
483
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
486 error = cb(dev);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
489
490 initcall_debug_report(dev, calltime, cb, error);
491
492 return error;
493 }
494
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497 struct device *dev;
498 struct task_struct *tsk;
499 struct timer_list timer;
500 bool fatal;
501 };
502
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 struct dpm_watchdog wd
505
506 /**
507 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508 * @t: The timer that PM watchdog depends on.
509 *
510 * Called when a driver has timed out suspending or resuming.
511 * There's not much we can do here to recover so panic() to
512 * capture a crash-dump in pstore.
513 */
dpm_watchdog_handler(struct timer_list * t)514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
517 struct timer_list *timer = &wd->timer;
518 unsigned int time_left;
519
520 if (wd->fatal) {
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL, KERN_EMERG);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
525 }
526
527 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
528 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
529 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
530 show_stack(wd->tsk, NULL, KERN_WARNING);
531
532 wd->fatal = true;
533 mod_timer(timer, jiffies + HZ * time_left);
534 }
535
536 /**
537 * dpm_watchdog_set - Enable pm watchdog for given device.
538 * @wd: Watchdog. Must be allocated on the stack.
539 * @dev: Device to handle.
540 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)541 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
542 {
543 struct timer_list *timer = &wd->timer;
544
545 wd->dev = dev;
546 wd->tsk = current;
547 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
548
549 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
550 /* use same timeout value for both suspend and resume */
551 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
552 add_timer(timer);
553 }
554
555 /**
556 * dpm_watchdog_clear - Disable suspend/resume watchdog.
557 * @wd: Watchdog to disable.
558 */
dpm_watchdog_clear(struct dpm_watchdog * wd)559 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
560 {
561 struct timer_list *timer = &wd->timer;
562
563 timer_delete_sync(timer);
564 timer_destroy_on_stack(timer);
565 }
566 #else
567 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
568 #define dpm_watchdog_set(x, y)
569 #define dpm_watchdog_clear(x)
570 #endif
571
572 /*------------------------- Resume routines -------------------------*/
573
574 /**
575 * dev_pm_skip_resume - System-wide device resume optimization check.
576 * @dev: Target device.
577 *
578 * Return:
579 * - %false if the transition under way is RESTORE.
580 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
581 * - The logical negation of %power.must_resume otherwise (that is, when the
582 * transition under way is RESUME).
583 */
dev_pm_skip_resume(struct device * dev)584 bool dev_pm_skip_resume(struct device *dev)
585 {
586 if (pm_transition.event == PM_EVENT_RESTORE)
587 return false;
588
589 if (pm_transition.event == PM_EVENT_THAW)
590 return dev_pm_skip_suspend(dev);
591
592 return !dev->power.must_resume;
593 }
594
is_async(struct device * dev)595 static bool is_async(struct device *dev)
596 {
597 return dev->power.async_suspend && pm_async_enabled
598 && !pm_trace_is_enabled();
599 }
600
__dpm_async(struct device * dev,async_func_t func)601 static bool __dpm_async(struct device *dev, async_func_t func)
602 {
603 if (dev->power.work_in_progress)
604 return true;
605
606 if (!is_async(dev))
607 return false;
608
609 dev->power.work_in_progress = true;
610
611 get_device(dev);
612
613 if (async_schedule_dev_nocall(func, dev))
614 return true;
615
616 put_device(dev);
617
618 return false;
619 }
620
dpm_async_fn(struct device * dev,async_func_t func)621 static bool dpm_async_fn(struct device *dev, async_func_t func)
622 {
623 guard(mutex)(&async_wip_mtx);
624
625 return __dpm_async(dev, func);
626 }
627
dpm_async_with_cleanup(struct device * dev,void * fn)628 static int dpm_async_with_cleanup(struct device *dev, void *fn)
629 {
630 guard(mutex)(&async_wip_mtx);
631
632 if (!__dpm_async(dev, fn))
633 dev->power.work_in_progress = false;
634
635 return 0;
636 }
637
dpm_async_resume_children(struct device * dev,async_func_t func)638 static void dpm_async_resume_children(struct device *dev, async_func_t func)
639 {
640 /*
641 * Prevent racing with dpm_clear_async_state() during initial list
642 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
643 * dpm_resume().
644 */
645 guard(mutex)(&dpm_list_mtx);
646
647 /*
648 * Start processing "async" children of the device unless it's been
649 * started already for them.
650 *
651 * This could have been done for the device's "async" consumers too, but
652 * they either need to wait for their parents or the processing has
653 * already started for them after their parents were processed.
654 */
655 device_for_each_child(dev, func, dpm_async_with_cleanup);
656 }
657
dpm_clear_async_state(struct device * dev)658 static void dpm_clear_async_state(struct device *dev)
659 {
660 reinit_completion(&dev->power.completion);
661 dev->power.work_in_progress = false;
662 }
663
dpm_root_device(struct device * dev)664 static bool dpm_root_device(struct device *dev)
665 {
666 return !dev->parent;
667 }
668
669 static void async_resume_noirq(void *data, async_cookie_t cookie);
670
671 /**
672 * device_resume_noirq - Execute a "noirq resume" callback for given device.
673 * @dev: Device to handle.
674 * @state: PM transition of the system being carried out.
675 * @async: If true, the device is being resumed asynchronously.
676 *
677 * The driver of @dev will not receive interrupts while this function is being
678 * executed.
679 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)680 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
681 {
682 pm_callback_t callback = NULL;
683 const char *info = NULL;
684 bool skip_resume;
685 int error = 0;
686
687 TRACE_DEVICE(dev);
688 TRACE_RESUME(0);
689
690 if (dev->power.syscore || dev->power.direct_complete)
691 goto Out;
692
693 if (!dev->power.is_noirq_suspended)
694 goto Out;
695
696 if (!dpm_wait_for_superior(dev, async))
697 goto Out;
698
699 skip_resume = dev_pm_skip_resume(dev);
700 /*
701 * If the driver callback is skipped below or by the middle layer
702 * callback and device_resume_early() also skips the driver callback for
703 * this device later, it needs to appear as "suspended" to PM-runtime,
704 * so change its status accordingly.
705 *
706 * Otherwise, the device is going to be resumed, so set its PM-runtime
707 * status to "active" unless its power.smart_suspend flag is clear, in
708 * which case it is not necessary to update its PM-runtime status.
709 */
710 if (skip_resume)
711 pm_runtime_set_suspended(dev);
712 else if (dev_pm_smart_suspend(dev))
713 pm_runtime_set_active(dev);
714
715 if (dev->pm_domain) {
716 info = "noirq power domain ";
717 callback = pm_noirq_op(&dev->pm_domain->ops, state);
718 } else if (dev->type && dev->type->pm) {
719 info = "noirq type ";
720 callback = pm_noirq_op(dev->type->pm, state);
721 } else if (dev->class && dev->class->pm) {
722 info = "noirq class ";
723 callback = pm_noirq_op(dev->class->pm, state);
724 } else if (dev->bus && dev->bus->pm) {
725 info = "noirq bus ";
726 callback = pm_noirq_op(dev->bus->pm, state);
727 }
728 if (callback)
729 goto Run;
730
731 if (skip_resume)
732 goto Skip;
733
734 if (dev->driver && dev->driver->pm) {
735 info = "noirq driver ";
736 callback = pm_noirq_op(dev->driver->pm, state);
737 }
738
739 Run:
740 error = dpm_run_callback(callback, dev, state, info);
741
742 Skip:
743 dev->power.is_noirq_suspended = false;
744
745 Out:
746 complete_all(&dev->power.completion);
747 TRACE_RESUME(error);
748
749 if (error) {
750 async_error = error;
751 dpm_save_failed_dev(dev_name(dev));
752 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
753 }
754
755 dpm_async_resume_children(dev, async_resume_noirq);
756 }
757
async_resume_noirq(void * data,async_cookie_t cookie)758 static void async_resume_noirq(void *data, async_cookie_t cookie)
759 {
760 struct device *dev = data;
761
762 device_resume_noirq(dev, pm_transition, true);
763 put_device(dev);
764 }
765
dpm_noirq_resume_devices(pm_message_t state)766 static void dpm_noirq_resume_devices(pm_message_t state)
767 {
768 struct device *dev;
769 ktime_t starttime = ktime_get();
770
771 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
772
773 async_error = 0;
774 pm_transition = state;
775
776 mutex_lock(&dpm_list_mtx);
777
778 /*
779 * Start processing "async" root devices upfront so they don't wait for
780 * the "sync" devices they don't depend on.
781 */
782 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
783 dpm_clear_async_state(dev);
784 if (dpm_root_device(dev))
785 dpm_async_with_cleanup(dev, async_resume_noirq);
786 }
787
788 while (!list_empty(&dpm_noirq_list)) {
789 dev = to_device(dpm_noirq_list.next);
790 list_move_tail(&dev->power.entry, &dpm_late_early_list);
791
792 if (!dpm_async_fn(dev, async_resume_noirq)) {
793 get_device(dev);
794
795 mutex_unlock(&dpm_list_mtx);
796
797 device_resume_noirq(dev, state, false);
798
799 put_device(dev);
800
801 mutex_lock(&dpm_list_mtx);
802 }
803 }
804 mutex_unlock(&dpm_list_mtx);
805 async_synchronize_full();
806 dpm_show_time(starttime, state, 0, "noirq");
807 if (async_error)
808 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
809
810 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
811 }
812
813 /**
814 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
815 * @state: PM transition of the system being carried out.
816 *
817 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
818 * allow device drivers' interrupt handlers to be called.
819 */
dpm_resume_noirq(pm_message_t state)820 void dpm_resume_noirq(pm_message_t state)
821 {
822 dpm_noirq_resume_devices(state);
823
824 resume_device_irqs();
825 device_wakeup_disarm_wake_irqs();
826 }
827
828 static void async_resume_early(void *data, async_cookie_t cookie);
829
830 /**
831 * device_resume_early - Execute an "early resume" callback for given device.
832 * @dev: Device to handle.
833 * @state: PM transition of the system being carried out.
834 * @async: If true, the device is being resumed asynchronously.
835 *
836 * Runtime PM is disabled for @dev while this function is being executed.
837 */
device_resume_early(struct device * dev,pm_message_t state,bool async)838 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
839 {
840 pm_callback_t callback = NULL;
841 const char *info = NULL;
842 int error = 0;
843
844 TRACE_DEVICE(dev);
845 TRACE_RESUME(0);
846
847 if (dev->power.syscore || dev->power.direct_complete)
848 goto Out;
849
850 if (!dev->power.is_late_suspended)
851 goto Out;
852
853 if (!dpm_wait_for_superior(dev, async))
854 goto Out;
855
856 if (dev->pm_domain) {
857 info = "early power domain ";
858 callback = pm_late_early_op(&dev->pm_domain->ops, state);
859 } else if (dev->type && dev->type->pm) {
860 info = "early type ";
861 callback = pm_late_early_op(dev->type->pm, state);
862 } else if (dev->class && dev->class->pm) {
863 info = "early class ";
864 callback = pm_late_early_op(dev->class->pm, state);
865 } else if (dev->bus && dev->bus->pm) {
866 info = "early bus ";
867 callback = pm_late_early_op(dev->bus->pm, state);
868 }
869 if (callback)
870 goto Run;
871
872 if (dev_pm_skip_resume(dev))
873 goto Skip;
874
875 if (dev->driver && dev->driver->pm) {
876 info = "early driver ";
877 callback = pm_late_early_op(dev->driver->pm, state);
878 }
879
880 Run:
881 error = dpm_run_callback(callback, dev, state, info);
882
883 Skip:
884 dev->power.is_late_suspended = false;
885
886 Out:
887 TRACE_RESUME(error);
888
889 pm_runtime_enable(dev);
890 complete_all(&dev->power.completion);
891
892 if (error) {
893 async_error = error;
894 dpm_save_failed_dev(dev_name(dev));
895 pm_dev_err(dev, state, async ? " async early" : " early", error);
896 }
897
898 dpm_async_resume_children(dev, async_resume_early);
899 }
900
async_resume_early(void * data,async_cookie_t cookie)901 static void async_resume_early(void *data, async_cookie_t cookie)
902 {
903 struct device *dev = data;
904
905 device_resume_early(dev, pm_transition, true);
906 put_device(dev);
907 }
908
909 /**
910 * dpm_resume_early - Execute "early resume" callbacks for all devices.
911 * @state: PM transition of the system being carried out.
912 */
dpm_resume_early(pm_message_t state)913 void dpm_resume_early(pm_message_t state)
914 {
915 struct device *dev;
916 ktime_t starttime = ktime_get();
917
918 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
919
920 async_error = 0;
921 pm_transition = state;
922
923 mutex_lock(&dpm_list_mtx);
924
925 /*
926 * Start processing "async" root devices upfront so they don't wait for
927 * the "sync" devices they don't depend on.
928 */
929 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
930 dpm_clear_async_state(dev);
931 if (dpm_root_device(dev))
932 dpm_async_with_cleanup(dev, async_resume_early);
933 }
934
935 while (!list_empty(&dpm_late_early_list)) {
936 dev = to_device(dpm_late_early_list.next);
937 list_move_tail(&dev->power.entry, &dpm_suspended_list);
938
939 if (!dpm_async_fn(dev, async_resume_early)) {
940 get_device(dev);
941
942 mutex_unlock(&dpm_list_mtx);
943
944 device_resume_early(dev, state, false);
945
946 put_device(dev);
947
948 mutex_lock(&dpm_list_mtx);
949 }
950 }
951 mutex_unlock(&dpm_list_mtx);
952 async_synchronize_full();
953 dpm_show_time(starttime, state, 0, "early");
954 if (async_error)
955 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
956
957 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
958 }
959
960 /**
961 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
962 * @state: PM transition of the system being carried out.
963 */
dpm_resume_start(pm_message_t state)964 void dpm_resume_start(pm_message_t state)
965 {
966 dpm_resume_noirq(state);
967 dpm_resume_early(state);
968 }
969 EXPORT_SYMBOL_GPL(dpm_resume_start);
970
971 static void async_resume(void *data, async_cookie_t cookie);
972
973 /**
974 * device_resume - Execute "resume" callbacks for given device.
975 * @dev: Device to handle.
976 * @state: PM transition of the system being carried out.
977 * @async: If true, the device is being resumed asynchronously.
978 */
device_resume(struct device * dev,pm_message_t state,bool async)979 static void device_resume(struct device *dev, pm_message_t state, bool async)
980 {
981 pm_callback_t callback = NULL;
982 const char *info = NULL;
983 int error = 0;
984 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
985
986 TRACE_DEVICE(dev);
987 TRACE_RESUME(0);
988
989 if (dev->power.syscore)
990 goto Complete;
991
992 if (!dev->power.is_suspended)
993 goto Complete;
994
995 dev->power.is_suspended = false;
996
997 if (dev->power.direct_complete) {
998 /*
999 * Allow new children to be added under the device after this
1000 * point if it has no PM callbacks.
1001 */
1002 if (dev->power.no_pm_callbacks)
1003 dev->power.is_prepared = false;
1004
1005 /* Match the pm_runtime_disable() in device_suspend(). */
1006 pm_runtime_enable(dev);
1007 goto Complete;
1008 }
1009
1010 if (!dpm_wait_for_superior(dev, async))
1011 goto Complete;
1012
1013 dpm_watchdog_set(&wd, dev);
1014 device_lock(dev);
1015
1016 /*
1017 * This is a fib. But we'll allow new children to be added below
1018 * a resumed device, even if the device hasn't been completed yet.
1019 */
1020 dev->power.is_prepared = false;
1021
1022 if (dev->pm_domain) {
1023 info = "power domain ";
1024 callback = pm_op(&dev->pm_domain->ops, state);
1025 goto Driver;
1026 }
1027
1028 if (dev->type && dev->type->pm) {
1029 info = "type ";
1030 callback = pm_op(dev->type->pm, state);
1031 goto Driver;
1032 }
1033
1034 if (dev->class && dev->class->pm) {
1035 info = "class ";
1036 callback = pm_op(dev->class->pm, state);
1037 goto Driver;
1038 }
1039
1040 if (dev->bus) {
1041 if (dev->bus->pm) {
1042 info = "bus ";
1043 callback = pm_op(dev->bus->pm, state);
1044 } else if (dev->bus->resume) {
1045 info = "legacy bus ";
1046 callback = dev->bus->resume;
1047 goto End;
1048 }
1049 }
1050
1051 Driver:
1052 if (!callback && dev->driver && dev->driver->pm) {
1053 info = "driver ";
1054 callback = pm_op(dev->driver->pm, state);
1055 }
1056
1057 End:
1058 error = dpm_run_callback(callback, dev, state, info);
1059
1060 device_unlock(dev);
1061 dpm_watchdog_clear(&wd);
1062
1063 Complete:
1064 complete_all(&dev->power.completion);
1065
1066 TRACE_RESUME(error);
1067
1068 if (error) {
1069 async_error = error;
1070 dpm_save_failed_dev(dev_name(dev));
1071 pm_dev_err(dev, state, async ? " async" : "", error);
1072 }
1073
1074 dpm_async_resume_children(dev, async_resume);
1075 }
1076
async_resume(void * data,async_cookie_t cookie)1077 static void async_resume(void *data, async_cookie_t cookie)
1078 {
1079 struct device *dev = data;
1080
1081 device_resume(dev, pm_transition, true);
1082 put_device(dev);
1083 }
1084
1085 /**
1086 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1087 * @state: PM transition of the system being carried out.
1088 *
1089 * Execute the appropriate "resume" callback for all devices whose status
1090 * indicates that they are suspended.
1091 */
dpm_resume(pm_message_t state)1092 void dpm_resume(pm_message_t state)
1093 {
1094 struct device *dev;
1095 ktime_t starttime = ktime_get();
1096
1097 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1098 might_sleep();
1099
1100 pm_transition = state;
1101 async_error = 0;
1102
1103 mutex_lock(&dpm_list_mtx);
1104
1105 /*
1106 * Start processing "async" root devices upfront so they don't wait for
1107 * the "sync" devices they don't depend on.
1108 */
1109 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1110 dpm_clear_async_state(dev);
1111 if (dpm_root_device(dev))
1112 dpm_async_with_cleanup(dev, async_resume);
1113 }
1114
1115 while (!list_empty(&dpm_suspended_list)) {
1116 dev = to_device(dpm_suspended_list.next);
1117 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1118
1119 if (!dpm_async_fn(dev, async_resume)) {
1120 get_device(dev);
1121
1122 mutex_unlock(&dpm_list_mtx);
1123
1124 device_resume(dev, state, false);
1125
1126 put_device(dev);
1127
1128 mutex_lock(&dpm_list_mtx);
1129 }
1130 }
1131 mutex_unlock(&dpm_list_mtx);
1132 async_synchronize_full();
1133 dpm_show_time(starttime, state, 0, NULL);
1134 if (async_error)
1135 dpm_save_failed_step(SUSPEND_RESUME);
1136
1137 cpufreq_resume();
1138 devfreq_resume();
1139 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1140 }
1141
1142 /**
1143 * device_complete - Complete a PM transition for given device.
1144 * @dev: Device to handle.
1145 * @state: PM transition of the system being carried out.
1146 */
device_complete(struct device * dev,pm_message_t state)1147 static void device_complete(struct device *dev, pm_message_t state)
1148 {
1149 void (*callback)(struct device *) = NULL;
1150 const char *info = NULL;
1151
1152 if (dev->power.syscore)
1153 goto out;
1154
1155 device_lock(dev);
1156
1157 if (dev->pm_domain) {
1158 info = "completing power domain ";
1159 callback = dev->pm_domain->ops.complete;
1160 } else if (dev->type && dev->type->pm) {
1161 info = "completing type ";
1162 callback = dev->type->pm->complete;
1163 } else if (dev->class && dev->class->pm) {
1164 info = "completing class ";
1165 callback = dev->class->pm->complete;
1166 } else if (dev->bus && dev->bus->pm) {
1167 info = "completing bus ";
1168 callback = dev->bus->pm->complete;
1169 }
1170
1171 if (!callback && dev->driver && dev->driver->pm) {
1172 info = "completing driver ";
1173 callback = dev->driver->pm->complete;
1174 }
1175
1176 if (callback) {
1177 pm_dev_dbg(dev, state, info);
1178 callback(dev);
1179 }
1180
1181 device_unlock(dev);
1182
1183 out:
1184 /* If enabling runtime PM for the device is blocked, unblock it. */
1185 pm_runtime_unblock(dev);
1186 pm_runtime_put(dev);
1187 }
1188
1189 /**
1190 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1191 * @state: PM transition of the system being carried out.
1192 *
1193 * Execute the ->complete() callbacks for all devices whose PM status is not
1194 * DPM_ON (this allows new devices to be registered).
1195 */
dpm_complete(pm_message_t state)1196 void dpm_complete(pm_message_t state)
1197 {
1198 struct list_head list;
1199
1200 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1201 might_sleep();
1202
1203 INIT_LIST_HEAD(&list);
1204 mutex_lock(&dpm_list_mtx);
1205 while (!list_empty(&dpm_prepared_list)) {
1206 struct device *dev = to_device(dpm_prepared_list.prev);
1207
1208 get_device(dev);
1209 dev->power.is_prepared = false;
1210 list_move(&dev->power.entry, &list);
1211
1212 mutex_unlock(&dpm_list_mtx);
1213
1214 trace_device_pm_callback_start(dev, "", state.event);
1215 device_complete(dev, state);
1216 trace_device_pm_callback_end(dev, 0);
1217
1218 put_device(dev);
1219
1220 mutex_lock(&dpm_list_mtx);
1221 }
1222 list_splice(&list, &dpm_list);
1223 mutex_unlock(&dpm_list_mtx);
1224
1225 /* Allow device probing and trigger re-probing of deferred devices */
1226 device_unblock_probing();
1227 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1228 }
1229
1230 /**
1231 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1232 * @state: PM transition of the system being carried out.
1233 *
1234 * Execute "resume" callbacks for all devices and complete the PM transition of
1235 * the system.
1236 */
dpm_resume_end(pm_message_t state)1237 void dpm_resume_end(pm_message_t state)
1238 {
1239 dpm_resume(state);
1240 dpm_complete(state);
1241 }
1242 EXPORT_SYMBOL_GPL(dpm_resume_end);
1243
1244
1245 /*------------------------- Suspend routines -------------------------*/
1246
dpm_leaf_device(struct device * dev)1247 static bool dpm_leaf_device(struct device *dev)
1248 {
1249 struct device *child;
1250
1251 lockdep_assert_held(&dpm_list_mtx);
1252
1253 child = device_find_any_child(dev);
1254 if (child) {
1255 put_device(child);
1256
1257 return false;
1258 }
1259
1260 return true;
1261 }
1262
dpm_async_suspend_parent(struct device * dev,async_func_t func)1263 static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
1264 {
1265 guard(mutex)(&dpm_list_mtx);
1266
1267 /*
1268 * If the device is suspended asynchronously and the parent's callback
1269 * deletes both the device and the parent itself, the parent object may
1270 * be freed while this function is running, so avoid that by checking
1271 * if the device has been deleted already as the parent cannot be
1272 * deleted before it.
1273 */
1274 if (!device_pm_initialized(dev))
1275 return;
1276
1277 /* Start processing the device's parent if it is "async". */
1278 if (dev->parent)
1279 dpm_async_with_cleanup(dev->parent, func);
1280 }
1281
1282 /**
1283 * resume_event - Return a "resume" message for given "suspend" sleep state.
1284 * @sleep_state: PM message representing a sleep state.
1285 *
1286 * Return a PM message representing the resume event corresponding to given
1287 * sleep state.
1288 */
resume_event(pm_message_t sleep_state)1289 static pm_message_t resume_event(pm_message_t sleep_state)
1290 {
1291 switch (sleep_state.event) {
1292 case PM_EVENT_SUSPEND:
1293 return PMSG_RESUME;
1294 case PM_EVENT_FREEZE:
1295 case PM_EVENT_QUIESCE:
1296 return PMSG_RECOVER;
1297 case PM_EVENT_HIBERNATE:
1298 return PMSG_RESTORE;
1299 }
1300 return PMSG_ON;
1301 }
1302
dpm_superior_set_must_resume(struct device * dev)1303 static void dpm_superior_set_must_resume(struct device *dev)
1304 {
1305 struct device_link *link;
1306 int idx;
1307
1308 if (dev->parent)
1309 dev->parent->power.must_resume = true;
1310
1311 idx = device_links_read_lock();
1312
1313 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1314 link->supplier->power.must_resume = true;
1315
1316 device_links_read_unlock(idx);
1317 }
1318
1319 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1320
1321 /**
1322 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1323 * @dev: Device to handle.
1324 * @state: PM transition of the system being carried out.
1325 * @async: If true, the device is being suspended asynchronously.
1326 *
1327 * The driver of @dev will not receive interrupts while this function is being
1328 * executed.
1329 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1330 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1331 {
1332 pm_callback_t callback = NULL;
1333 const char *info = NULL;
1334 int error = 0;
1335
1336 TRACE_DEVICE(dev);
1337 TRACE_SUSPEND(0);
1338
1339 dpm_wait_for_subordinate(dev, async);
1340
1341 if (async_error)
1342 goto Complete;
1343
1344 if (dev->power.syscore || dev->power.direct_complete)
1345 goto Complete;
1346
1347 if (dev->pm_domain) {
1348 info = "noirq power domain ";
1349 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1350 } else if (dev->type && dev->type->pm) {
1351 info = "noirq type ";
1352 callback = pm_noirq_op(dev->type->pm, state);
1353 } else if (dev->class && dev->class->pm) {
1354 info = "noirq class ";
1355 callback = pm_noirq_op(dev->class->pm, state);
1356 } else if (dev->bus && dev->bus->pm) {
1357 info = "noirq bus ";
1358 callback = pm_noirq_op(dev->bus->pm, state);
1359 }
1360 if (callback)
1361 goto Run;
1362
1363 if (dev_pm_skip_suspend(dev))
1364 goto Skip;
1365
1366 if (dev->driver && dev->driver->pm) {
1367 info = "noirq driver ";
1368 callback = pm_noirq_op(dev->driver->pm, state);
1369 }
1370
1371 Run:
1372 error = dpm_run_callback(callback, dev, state, info);
1373 if (error) {
1374 async_error = error;
1375 dpm_save_failed_dev(dev_name(dev));
1376 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1377 goto Complete;
1378 }
1379
1380 Skip:
1381 dev->power.is_noirq_suspended = true;
1382
1383 /*
1384 * Devices must be resumed unless they are explicitly allowed to be left
1385 * in suspend, but even in that case skipping the resume of devices that
1386 * were in use right before the system suspend (as indicated by their
1387 * runtime PM usage counters and child counters) would be suboptimal.
1388 */
1389 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1390 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1391 dev->power.must_resume = true;
1392
1393 if (dev->power.must_resume)
1394 dpm_superior_set_must_resume(dev);
1395
1396 Complete:
1397 complete_all(&dev->power.completion);
1398 TRACE_SUSPEND(error);
1399
1400 if (error || async_error)
1401 return error;
1402
1403 dpm_async_suspend_parent(dev, async_suspend_noirq);
1404
1405 return 0;
1406 }
1407
async_suspend_noirq(void * data,async_cookie_t cookie)1408 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1409 {
1410 struct device *dev = data;
1411
1412 device_suspend_noirq(dev, pm_transition, true);
1413 put_device(dev);
1414 }
1415
dpm_noirq_suspend_devices(pm_message_t state)1416 static int dpm_noirq_suspend_devices(pm_message_t state)
1417 {
1418 ktime_t starttime = ktime_get();
1419 struct device *dev;
1420 int error = 0;
1421
1422 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1423
1424 pm_transition = state;
1425 async_error = 0;
1426
1427 mutex_lock(&dpm_list_mtx);
1428
1429 /*
1430 * Start processing "async" leaf devices upfront so they don't need to
1431 * wait for the "sync" devices they don't depend on.
1432 */
1433 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1434 dpm_clear_async_state(dev);
1435 if (dpm_leaf_device(dev))
1436 dpm_async_with_cleanup(dev, async_suspend_noirq);
1437 }
1438
1439 while (!list_empty(&dpm_late_early_list)) {
1440 dev = to_device(dpm_late_early_list.prev);
1441
1442 list_move(&dev->power.entry, &dpm_noirq_list);
1443
1444 if (dpm_async_fn(dev, async_suspend_noirq))
1445 continue;
1446
1447 get_device(dev);
1448
1449 mutex_unlock(&dpm_list_mtx);
1450
1451 error = device_suspend_noirq(dev, state, false);
1452
1453 put_device(dev);
1454
1455 mutex_lock(&dpm_list_mtx);
1456
1457 if (error || async_error) {
1458 /*
1459 * Move all devices to the target list to resume them
1460 * properly.
1461 */
1462 list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1463 break;
1464 }
1465 }
1466
1467 mutex_unlock(&dpm_list_mtx);
1468
1469 async_synchronize_full();
1470 if (!error)
1471 error = async_error;
1472
1473 if (error)
1474 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1475
1476 dpm_show_time(starttime, state, error, "noirq");
1477 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1478 return error;
1479 }
1480
1481 /**
1482 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1483 * @state: PM transition of the system being carried out.
1484 *
1485 * Prevent device drivers' interrupt handlers from being called and invoke
1486 * "noirq" suspend callbacks for all non-sysdev devices.
1487 */
dpm_suspend_noirq(pm_message_t state)1488 int dpm_suspend_noirq(pm_message_t state)
1489 {
1490 int ret;
1491
1492 device_wakeup_arm_wake_irqs();
1493 suspend_device_irqs();
1494
1495 ret = dpm_noirq_suspend_devices(state);
1496 if (ret)
1497 dpm_resume_noirq(resume_event(state));
1498
1499 return ret;
1500 }
1501
dpm_propagate_wakeup_to_parent(struct device * dev)1502 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1503 {
1504 struct device *parent = dev->parent;
1505
1506 if (!parent)
1507 return;
1508
1509 spin_lock_irq(&parent->power.lock);
1510
1511 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1512 parent->power.wakeup_path = true;
1513
1514 spin_unlock_irq(&parent->power.lock);
1515 }
1516
1517 static void async_suspend_late(void *data, async_cookie_t cookie);
1518
1519 /**
1520 * device_suspend_late - Execute a "late suspend" callback for given device.
1521 * @dev: Device to handle.
1522 * @state: PM transition of the system being carried out.
1523 * @async: If true, the device is being suspended asynchronously.
1524 *
1525 * Runtime PM is disabled for @dev while this function is being executed.
1526 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1527 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1528 {
1529 pm_callback_t callback = NULL;
1530 const char *info = NULL;
1531 int error = 0;
1532
1533 TRACE_DEVICE(dev);
1534 TRACE_SUSPEND(0);
1535
1536 /*
1537 * Disable runtime PM for the device without checking if there is a
1538 * pending resume request for it.
1539 */
1540 __pm_runtime_disable(dev, false);
1541
1542 dpm_wait_for_subordinate(dev, async);
1543
1544 if (async_error)
1545 goto Complete;
1546
1547 if (pm_wakeup_pending()) {
1548 async_error = -EBUSY;
1549 goto Complete;
1550 }
1551
1552 if (dev->power.syscore || dev->power.direct_complete)
1553 goto Complete;
1554
1555 if (dev->pm_domain) {
1556 info = "late power domain ";
1557 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1558 } else if (dev->type && dev->type->pm) {
1559 info = "late type ";
1560 callback = pm_late_early_op(dev->type->pm, state);
1561 } else if (dev->class && dev->class->pm) {
1562 info = "late class ";
1563 callback = pm_late_early_op(dev->class->pm, state);
1564 } else if (dev->bus && dev->bus->pm) {
1565 info = "late bus ";
1566 callback = pm_late_early_op(dev->bus->pm, state);
1567 }
1568 if (callback)
1569 goto Run;
1570
1571 if (dev_pm_skip_suspend(dev))
1572 goto Skip;
1573
1574 if (dev->driver && dev->driver->pm) {
1575 info = "late driver ";
1576 callback = pm_late_early_op(dev->driver->pm, state);
1577 }
1578
1579 Run:
1580 error = dpm_run_callback(callback, dev, state, info);
1581 if (error) {
1582 async_error = error;
1583 dpm_save_failed_dev(dev_name(dev));
1584 pm_dev_err(dev, state, async ? " async late" : " late", error);
1585 goto Complete;
1586 }
1587 dpm_propagate_wakeup_to_parent(dev);
1588
1589 Skip:
1590 dev->power.is_late_suspended = true;
1591
1592 Complete:
1593 TRACE_SUSPEND(error);
1594 complete_all(&dev->power.completion);
1595
1596 if (error || async_error)
1597 return error;
1598
1599 dpm_async_suspend_parent(dev, async_suspend_late);
1600
1601 return 0;
1602 }
1603
async_suspend_late(void * data,async_cookie_t cookie)1604 static void async_suspend_late(void *data, async_cookie_t cookie)
1605 {
1606 struct device *dev = data;
1607
1608 device_suspend_late(dev, pm_transition, true);
1609 put_device(dev);
1610 }
1611
1612 /**
1613 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1614 * @state: PM transition of the system being carried out.
1615 */
dpm_suspend_late(pm_message_t state)1616 int dpm_suspend_late(pm_message_t state)
1617 {
1618 ktime_t starttime = ktime_get();
1619 struct device *dev;
1620 int error = 0;
1621
1622 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1623
1624 pm_transition = state;
1625 async_error = 0;
1626
1627 wake_up_all_idle_cpus();
1628
1629 mutex_lock(&dpm_list_mtx);
1630
1631 /*
1632 * Start processing "async" leaf devices upfront so they don't need to
1633 * wait for the "sync" devices they don't depend on.
1634 */
1635 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1636 dpm_clear_async_state(dev);
1637 if (dpm_leaf_device(dev))
1638 dpm_async_with_cleanup(dev, async_suspend_late);
1639 }
1640
1641 while (!list_empty(&dpm_suspended_list)) {
1642 dev = to_device(dpm_suspended_list.prev);
1643
1644 list_move(&dev->power.entry, &dpm_late_early_list);
1645
1646 if (dpm_async_fn(dev, async_suspend_late))
1647 continue;
1648
1649 get_device(dev);
1650
1651 mutex_unlock(&dpm_list_mtx);
1652
1653 error = device_suspend_late(dev, state, false);
1654
1655 put_device(dev);
1656
1657 mutex_lock(&dpm_list_mtx);
1658
1659 if (error || async_error) {
1660 /*
1661 * Move all devices to the target list to resume them
1662 * properly.
1663 */
1664 list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1665 break;
1666 }
1667 }
1668
1669 mutex_unlock(&dpm_list_mtx);
1670
1671 async_synchronize_full();
1672 if (!error)
1673 error = async_error;
1674
1675 if (error) {
1676 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1677 dpm_resume_early(resume_event(state));
1678 }
1679 dpm_show_time(starttime, state, error, "late");
1680 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1681 return error;
1682 }
1683
1684 /**
1685 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1686 * @state: PM transition of the system being carried out.
1687 */
dpm_suspend_end(pm_message_t state)1688 int dpm_suspend_end(pm_message_t state)
1689 {
1690 ktime_t starttime = ktime_get();
1691 int error;
1692
1693 error = dpm_suspend_late(state);
1694 if (error)
1695 goto out;
1696
1697 error = dpm_suspend_noirq(state);
1698 if (error)
1699 dpm_resume_early(resume_event(state));
1700
1701 out:
1702 dpm_show_time(starttime, state, error, "end");
1703 return error;
1704 }
1705 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1706
1707 /**
1708 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1709 * @dev: Device to suspend.
1710 * @state: PM transition of the system being carried out.
1711 * @cb: Suspend callback to execute.
1712 * @info: string description of caller.
1713 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1714 static int legacy_suspend(struct device *dev, pm_message_t state,
1715 int (*cb)(struct device *dev, pm_message_t state),
1716 const char *info)
1717 {
1718 int error;
1719 ktime_t calltime;
1720
1721 calltime = initcall_debug_start(dev, cb);
1722
1723 trace_device_pm_callback_start(dev, info, state.event);
1724 error = cb(dev, state);
1725 trace_device_pm_callback_end(dev, error);
1726 suspend_report_result(dev, cb, error);
1727
1728 initcall_debug_report(dev, calltime, cb, error);
1729
1730 return error;
1731 }
1732
dpm_clear_superiors_direct_complete(struct device * dev)1733 static void dpm_clear_superiors_direct_complete(struct device *dev)
1734 {
1735 struct device_link *link;
1736 int idx;
1737
1738 if (dev->parent) {
1739 spin_lock_irq(&dev->parent->power.lock);
1740 dev->parent->power.direct_complete = false;
1741 spin_unlock_irq(&dev->parent->power.lock);
1742 }
1743
1744 idx = device_links_read_lock();
1745
1746 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1747 spin_lock_irq(&link->supplier->power.lock);
1748 link->supplier->power.direct_complete = false;
1749 spin_unlock_irq(&link->supplier->power.lock);
1750 }
1751
1752 device_links_read_unlock(idx);
1753 }
1754
1755 static void async_suspend(void *data, async_cookie_t cookie);
1756
1757 /**
1758 * device_suspend - Execute "suspend" callbacks for given device.
1759 * @dev: Device to handle.
1760 * @state: PM transition of the system being carried out.
1761 * @async: If true, the device is being suspended asynchronously.
1762 */
device_suspend(struct device * dev,pm_message_t state,bool async)1763 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1764 {
1765 pm_callback_t callback = NULL;
1766 const char *info = NULL;
1767 int error = 0;
1768 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1769
1770 TRACE_DEVICE(dev);
1771 TRACE_SUSPEND(0);
1772
1773 dpm_wait_for_subordinate(dev, async);
1774
1775 if (async_error) {
1776 dev->power.direct_complete = false;
1777 goto Complete;
1778 }
1779
1780 /*
1781 * Wait for possible runtime PM transitions of the device in progress
1782 * to complete and if there's a runtime resume request pending for it,
1783 * resume it before proceeding with invoking the system-wide suspend
1784 * callbacks for it.
1785 *
1786 * If the system-wide suspend callbacks below change the configuration
1787 * of the device, they must disable runtime PM for it or otherwise
1788 * ensure that its runtime-resume callbacks will not be confused by that
1789 * change in case they are invoked going forward.
1790 */
1791 pm_runtime_barrier(dev);
1792
1793 if (pm_wakeup_pending()) {
1794 dev->power.direct_complete = false;
1795 async_error = -EBUSY;
1796 goto Complete;
1797 }
1798
1799 if (dev->power.syscore)
1800 goto Complete;
1801
1802 /* Avoid direct_complete to let wakeup_path propagate. */
1803 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1804 dev->power.direct_complete = false;
1805
1806 if (dev->power.direct_complete) {
1807 if (pm_runtime_status_suspended(dev)) {
1808 pm_runtime_disable(dev);
1809 if (pm_runtime_status_suspended(dev)) {
1810 pm_dev_dbg(dev, state, "direct-complete ");
1811 dev->power.is_suspended = true;
1812 goto Complete;
1813 }
1814
1815 pm_runtime_enable(dev);
1816 }
1817 dev->power.direct_complete = false;
1818 }
1819
1820 dev->power.may_skip_resume = true;
1821 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1822
1823 dpm_watchdog_set(&wd, dev);
1824 device_lock(dev);
1825
1826 if (dev->pm_domain) {
1827 info = "power domain ";
1828 callback = pm_op(&dev->pm_domain->ops, state);
1829 goto Run;
1830 }
1831
1832 if (dev->type && dev->type->pm) {
1833 info = "type ";
1834 callback = pm_op(dev->type->pm, state);
1835 goto Run;
1836 }
1837
1838 if (dev->class && dev->class->pm) {
1839 info = "class ";
1840 callback = pm_op(dev->class->pm, state);
1841 goto Run;
1842 }
1843
1844 if (dev->bus) {
1845 if (dev->bus->pm) {
1846 info = "bus ";
1847 callback = pm_op(dev->bus->pm, state);
1848 } else if (dev->bus->suspend) {
1849 pm_dev_dbg(dev, state, "legacy bus ");
1850 error = legacy_suspend(dev, state, dev->bus->suspend,
1851 "legacy bus ");
1852 goto End;
1853 }
1854 }
1855
1856 Run:
1857 if (!callback && dev->driver && dev->driver->pm) {
1858 info = "driver ";
1859 callback = pm_op(dev->driver->pm, state);
1860 }
1861
1862 error = dpm_run_callback(callback, dev, state, info);
1863
1864 End:
1865 if (!error) {
1866 dev->power.is_suspended = true;
1867 if (device_may_wakeup(dev))
1868 dev->power.wakeup_path = true;
1869
1870 dpm_propagate_wakeup_to_parent(dev);
1871 dpm_clear_superiors_direct_complete(dev);
1872 }
1873
1874 device_unlock(dev);
1875 dpm_watchdog_clear(&wd);
1876
1877 Complete:
1878 if (error) {
1879 async_error = error;
1880 dpm_save_failed_dev(dev_name(dev));
1881 pm_dev_err(dev, state, async ? " async" : "", error);
1882 }
1883
1884 complete_all(&dev->power.completion);
1885 TRACE_SUSPEND(error);
1886
1887 if (error || async_error)
1888 return error;
1889
1890 dpm_async_suspend_parent(dev, async_suspend);
1891
1892 return 0;
1893 }
1894
async_suspend(void * data,async_cookie_t cookie)1895 static void async_suspend(void *data, async_cookie_t cookie)
1896 {
1897 struct device *dev = data;
1898
1899 device_suspend(dev, pm_transition, true);
1900 put_device(dev);
1901 }
1902
1903 /**
1904 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1905 * @state: PM transition of the system being carried out.
1906 */
dpm_suspend(pm_message_t state)1907 int dpm_suspend(pm_message_t state)
1908 {
1909 ktime_t starttime = ktime_get();
1910 struct device *dev;
1911 int error = 0;
1912
1913 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1914 might_sleep();
1915
1916 devfreq_suspend();
1917 cpufreq_suspend();
1918
1919 pm_transition = state;
1920 async_error = 0;
1921
1922 mutex_lock(&dpm_list_mtx);
1923
1924 /*
1925 * Start processing "async" leaf devices upfront so they don't need to
1926 * wait for the "sync" devices they don't depend on.
1927 */
1928 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
1929 dpm_clear_async_state(dev);
1930 if (dpm_leaf_device(dev))
1931 dpm_async_with_cleanup(dev, async_suspend);
1932 }
1933
1934 while (!list_empty(&dpm_prepared_list)) {
1935 dev = to_device(dpm_prepared_list.prev);
1936
1937 list_move(&dev->power.entry, &dpm_suspended_list);
1938
1939 if (dpm_async_fn(dev, async_suspend))
1940 continue;
1941
1942 get_device(dev);
1943
1944 mutex_unlock(&dpm_list_mtx);
1945
1946 error = device_suspend(dev, state, false);
1947
1948 put_device(dev);
1949
1950 mutex_lock(&dpm_list_mtx);
1951
1952 if (error || async_error) {
1953 /*
1954 * Move all devices to the target list to resume them
1955 * properly.
1956 */
1957 list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
1958 break;
1959 }
1960 }
1961
1962 mutex_unlock(&dpm_list_mtx);
1963
1964 async_synchronize_full();
1965 if (!error)
1966 error = async_error;
1967
1968 if (error)
1969 dpm_save_failed_step(SUSPEND_SUSPEND);
1970
1971 dpm_show_time(starttime, state, error, NULL);
1972 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1973 return error;
1974 }
1975
device_prepare_smart_suspend(struct device * dev)1976 static bool device_prepare_smart_suspend(struct device *dev)
1977 {
1978 struct device_link *link;
1979 bool ret = true;
1980 int idx;
1981
1982 /*
1983 * The "smart suspend" feature is enabled for devices whose drivers ask
1984 * for it and for devices without PM callbacks.
1985 *
1986 * However, if "smart suspend" is not enabled for the device's parent
1987 * or any of its suppliers that take runtime PM into account, it cannot
1988 * be enabled for the device either.
1989 */
1990 if (!dev->power.no_pm_callbacks &&
1991 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
1992 return false;
1993
1994 if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
1995 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
1996 return false;
1997
1998 idx = device_links_read_lock();
1999
2000 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
2001 if (!(link->flags & DL_FLAG_PM_RUNTIME))
2002 continue;
2003
2004 if (!dev_pm_smart_suspend(link->supplier) &&
2005 !pm_runtime_blocked(link->supplier)) {
2006 ret = false;
2007 break;
2008 }
2009 }
2010
2011 device_links_read_unlock(idx);
2012
2013 return ret;
2014 }
2015
2016 /**
2017 * device_prepare - Prepare a device for system power transition.
2018 * @dev: Device to handle.
2019 * @state: PM transition of the system being carried out.
2020 *
2021 * Execute the ->prepare() callback(s) for given device. No new children of the
2022 * device may be registered after this function has returned.
2023 */
device_prepare(struct device * dev,pm_message_t state)2024 static int device_prepare(struct device *dev, pm_message_t state)
2025 {
2026 int (*callback)(struct device *) = NULL;
2027 bool smart_suspend;
2028 int ret = 0;
2029
2030 /*
2031 * If a device's parent goes into runtime suspend at the wrong time,
2032 * it won't be possible to resume the device. To prevent this we
2033 * block runtime suspend here, during the prepare phase, and allow
2034 * it again during the complete phase.
2035 */
2036 pm_runtime_get_noresume(dev);
2037 /*
2038 * If runtime PM is disabled for the device at this point and it has
2039 * never been enabled so far, it should not be enabled until this system
2040 * suspend-resume cycle is complete, so prepare to trigger a warning on
2041 * subsequent attempts to enable it.
2042 */
2043 smart_suspend = !pm_runtime_block_if_disabled(dev);
2044
2045 if (dev->power.syscore)
2046 return 0;
2047
2048 device_lock(dev);
2049
2050 dev->power.wakeup_path = false;
2051
2052 if (dev->power.no_pm_callbacks)
2053 goto unlock;
2054
2055 if (dev->pm_domain)
2056 callback = dev->pm_domain->ops.prepare;
2057 else if (dev->type && dev->type->pm)
2058 callback = dev->type->pm->prepare;
2059 else if (dev->class && dev->class->pm)
2060 callback = dev->class->pm->prepare;
2061 else if (dev->bus && dev->bus->pm)
2062 callback = dev->bus->pm->prepare;
2063
2064 if (!callback && dev->driver && dev->driver->pm)
2065 callback = dev->driver->pm->prepare;
2066
2067 if (callback)
2068 ret = callback(dev);
2069
2070 unlock:
2071 device_unlock(dev);
2072
2073 if (ret < 0) {
2074 suspend_report_result(dev, callback, ret);
2075 pm_runtime_put(dev);
2076 return ret;
2077 }
2078 /* Do not enable "smart suspend" for devices with disabled runtime PM. */
2079 if (smart_suspend)
2080 smart_suspend = device_prepare_smart_suspend(dev);
2081
2082 spin_lock_irq(&dev->power.lock);
2083
2084 dev->power.smart_suspend = smart_suspend;
2085 /*
2086 * A positive return value from ->prepare() means "this device appears
2087 * to be runtime-suspended and its state is fine, so if it really is
2088 * runtime-suspended, you can leave it in that state provided that you
2089 * will do the same thing with all of its descendants". This only
2090 * applies to suspend transitions, however.
2091 */
2092 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2093 (ret > 0 || dev->power.no_pm_callbacks) &&
2094 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2095
2096 spin_unlock_irq(&dev->power.lock);
2097
2098 return 0;
2099 }
2100
2101 /**
2102 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2103 * @state: PM transition of the system being carried out.
2104 *
2105 * Execute the ->prepare() callback(s) for all devices.
2106 */
dpm_prepare(pm_message_t state)2107 int dpm_prepare(pm_message_t state)
2108 {
2109 int error = 0;
2110
2111 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2112 might_sleep();
2113
2114 /*
2115 * Give a chance for the known devices to complete their probes, before
2116 * disable probing of devices. This sync point is important at least
2117 * at boot time + hibernation restore.
2118 */
2119 wait_for_device_probe();
2120 /*
2121 * It is unsafe if probing of devices will happen during suspend or
2122 * hibernation and system behavior will be unpredictable in this case.
2123 * So, let's prohibit device's probing here and defer their probes
2124 * instead. The normal behavior will be restored in dpm_complete().
2125 */
2126 device_block_probing();
2127
2128 mutex_lock(&dpm_list_mtx);
2129 while (!list_empty(&dpm_list) && !error) {
2130 struct device *dev = to_device(dpm_list.next);
2131
2132 get_device(dev);
2133
2134 mutex_unlock(&dpm_list_mtx);
2135
2136 trace_device_pm_callback_start(dev, "", state.event);
2137 error = device_prepare(dev, state);
2138 trace_device_pm_callback_end(dev, error);
2139
2140 mutex_lock(&dpm_list_mtx);
2141
2142 if (!error) {
2143 dev->power.is_prepared = true;
2144 if (!list_empty(&dev->power.entry))
2145 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2146 } else if (error == -EAGAIN) {
2147 error = 0;
2148 } else {
2149 dev_info(dev, "not prepared for power transition: code %d\n",
2150 error);
2151 }
2152
2153 mutex_unlock(&dpm_list_mtx);
2154
2155 put_device(dev);
2156
2157 mutex_lock(&dpm_list_mtx);
2158 }
2159 mutex_unlock(&dpm_list_mtx);
2160 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2161 return error;
2162 }
2163
2164 /**
2165 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2166 * @state: PM transition of the system being carried out.
2167 *
2168 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2169 * callbacks for them.
2170 */
dpm_suspend_start(pm_message_t state)2171 int dpm_suspend_start(pm_message_t state)
2172 {
2173 ktime_t starttime = ktime_get();
2174 int error;
2175
2176 error = dpm_prepare(state);
2177 if (error)
2178 dpm_save_failed_step(SUSPEND_PREPARE);
2179 else
2180 error = dpm_suspend(state);
2181
2182 dpm_show_time(starttime, state, error, "start");
2183 return error;
2184 }
2185 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2186
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2187 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2188 {
2189 if (ret)
2190 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2191 }
2192 EXPORT_SYMBOL_GPL(__suspend_report_result);
2193
2194 /**
2195 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2196 * @subordinate: Device that needs to wait for @dev.
2197 * @dev: Device to wait for.
2198 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2199 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2200 {
2201 dpm_wait(dev, subordinate->power.async_suspend);
2202 return async_error;
2203 }
2204 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2205
2206 /**
2207 * dpm_for_each_dev - device iterator.
2208 * @data: data for the callback.
2209 * @fn: function to be called for each device.
2210 *
2211 * Iterate over devices in dpm_list, and call @fn for each device,
2212 * passing it @data.
2213 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2214 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2215 {
2216 struct device *dev;
2217
2218 if (!fn)
2219 return;
2220
2221 device_pm_lock();
2222 list_for_each_entry(dev, &dpm_list, power.entry)
2223 fn(dev, data);
2224 device_pm_unlock();
2225 }
2226 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2227
pm_ops_is_empty(const struct dev_pm_ops * ops)2228 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2229 {
2230 if (!ops)
2231 return true;
2232
2233 return !ops->prepare &&
2234 !ops->suspend &&
2235 !ops->suspend_late &&
2236 !ops->suspend_noirq &&
2237 !ops->resume_noirq &&
2238 !ops->resume_early &&
2239 !ops->resume &&
2240 !ops->complete;
2241 }
2242
device_pm_check_callbacks(struct device * dev)2243 void device_pm_check_callbacks(struct device *dev)
2244 {
2245 unsigned long flags;
2246
2247 spin_lock_irqsave(&dev->power.lock, flags);
2248 dev->power.no_pm_callbacks =
2249 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2250 !dev->bus->suspend && !dev->bus->resume)) &&
2251 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2252 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2253 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2254 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2255 !dev->driver->suspend && !dev->driver->resume));
2256 spin_unlock_irqrestore(&dev->power.lock, flags);
2257 }
2258
dev_pm_skip_suspend(struct device * dev)2259 bool dev_pm_skip_suspend(struct device *dev)
2260 {
2261 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2262 }
2263