1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
47 /*
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
54 * dpm_list_mutex.
55 */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65
66 static int async_error;
67
pm_verb(int event)68 static const char *pm_verb(int event)
69 {
70 switch (event) {
71 case PM_EVENT_SUSPEND:
72 return "suspend";
73 case PM_EVENT_RESUME:
74 return "resume";
75 case PM_EVENT_FREEZE:
76 return "freeze";
77 case PM_EVENT_QUIESCE:
78 return "quiesce";
79 case PM_EVENT_HIBERNATE:
80 return "hibernate";
81 case PM_EVENT_THAW:
82 return "thaw";
83 case PM_EVENT_RESTORE:
84 return "restore";
85 case PM_EVENT_RECOVER:
86 return "recover";
87 default:
88 return "(unknown PM event)";
89 }
90 }
91
92 /**
93 * device_pm_sleep_init - Initialize system suspend-related device fields.
94 * @dev: Device object being initialized.
95 */
device_pm_sleep_init(struct device * dev)96 void device_pm_sleep_init(struct device *dev)
97 {
98 dev->power.is_prepared = false;
99 dev->power.is_suspended = false;
100 dev->power.is_noirq_suspended = false;
101 dev->power.is_late_suspended = false;
102 init_completion(&dev->power.completion);
103 complete_all(&dev->power.completion);
104 dev->power.wakeup = NULL;
105 INIT_LIST_HEAD(&dev->power.entry);
106 }
107
108 /**
109 * device_pm_lock - Lock the list of active devices used by the PM core.
110 */
device_pm_lock(void)111 void device_pm_lock(void)
112 {
113 mutex_lock(&dpm_list_mtx);
114 }
115
116 /**
117 * device_pm_unlock - Unlock the list of active devices used by the PM core.
118 */
device_pm_unlock(void)119 void device_pm_unlock(void)
120 {
121 mutex_unlock(&dpm_list_mtx);
122 }
123
124 /**
125 * device_pm_add - Add a device to the PM core's list of active devices.
126 * @dev: Device to add to the list.
127 */
device_pm_add(struct device * dev)128 void device_pm_add(struct device *dev)
129 {
130 /* Skip PM setup/initialization. */
131 if (device_pm_not_required(dev))
132 return;
133
134 pr_debug("Adding info for %s:%s\n",
135 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
136 device_pm_check_callbacks(dev);
137 mutex_lock(&dpm_list_mtx);
138 if (dev->parent && dev->parent->power.is_prepared)
139 dev_warn(dev, "parent %s should not be sleeping\n",
140 dev_name(dev->parent));
141 list_add_tail(&dev->power.entry, &dpm_list);
142 dev->power.in_dpm_list = true;
143 mutex_unlock(&dpm_list_mtx);
144 }
145
146 /**
147 * device_pm_remove - Remove a device from the PM core's list of active devices.
148 * @dev: Device to be removed from the list.
149 */
device_pm_remove(struct device * dev)150 void device_pm_remove(struct device *dev)
151 {
152 if (device_pm_not_required(dev))
153 return;
154
155 pr_debug("Removing info for %s:%s\n",
156 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 complete_all(&dev->power.completion);
158 mutex_lock(&dpm_list_mtx);
159 list_del_init(&dev->power.entry);
160 dev->power.in_dpm_list = false;
161 mutex_unlock(&dpm_list_mtx);
162 device_wakeup_disable(dev);
163 pm_runtime_remove(dev);
164 device_pm_check_callbacks(dev);
165 }
166
167 /**
168 * device_pm_move_before - Move device in the PM core's list of active devices.
169 * @deva: Device to move in dpm_list.
170 * @devb: Device @deva should come before.
171 */
device_pm_move_before(struct device * deva,struct device * devb)172 void device_pm_move_before(struct device *deva, struct device *devb)
173 {
174 pr_debug("Moving %s:%s before %s:%s\n",
175 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
176 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
177 /* Delete deva from dpm_list and reinsert before devb. */
178 list_move_tail(&deva->power.entry, &devb->power.entry);
179 }
180
181 /**
182 * device_pm_move_after - Move device in the PM core's list of active devices.
183 * @deva: Device to move in dpm_list.
184 * @devb: Device @deva should come after.
185 */
device_pm_move_after(struct device * deva,struct device * devb)186 void device_pm_move_after(struct device *deva, struct device *devb)
187 {
188 pr_debug("Moving %s:%s after %s:%s\n",
189 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
190 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
191 /* Delete deva from dpm_list and reinsert after devb. */
192 list_move(&deva->power.entry, &devb->power.entry);
193 }
194
195 /**
196 * device_pm_move_last - Move device to end of the PM core's list of devices.
197 * @dev: Device to move in dpm_list.
198 */
device_pm_move_last(struct device * dev)199 void device_pm_move_last(struct device *dev)
200 {
201 pr_debug("Moving %s:%s to end of list\n",
202 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
203 list_move_tail(&dev->power.entry, &dpm_list);
204 }
205
initcall_debug_start(struct device * dev,void * cb)206 static ktime_t initcall_debug_start(struct device *dev, void *cb)
207 {
208 if (!pm_print_times_enabled)
209 return 0;
210
211 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
212 task_pid_nr(current),
213 dev->parent ? dev_name(dev->parent) : "none");
214 return ktime_get();
215 }
216
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)217 static void initcall_debug_report(struct device *dev, ktime_t calltime,
218 void *cb, int error)
219 {
220 ktime_t rettime;
221
222 if (!pm_print_times_enabled)
223 return;
224
225 rettime = ktime_get();
226 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
227 (unsigned long long)ktime_us_delta(rettime, calltime));
228 }
229
230 /**
231 * dpm_wait - Wait for a PM operation to complete.
232 * @dev: Device to wait for.
233 * @async: If unset, wait only if the device's power.async_suspend flag is set.
234 */
dpm_wait(struct device * dev,bool async)235 static void dpm_wait(struct device *dev, bool async)
236 {
237 if (!dev)
238 return;
239
240 if (async || (pm_async_enabled && dev->power.async_suspend))
241 wait_for_completion(&dev->power.completion);
242 }
243
dpm_wait_fn(struct device * dev,void * async_ptr)244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246 dpm_wait(dev, *((bool *)async_ptr));
247 return 0;
248 }
249
dpm_wait_for_children(struct device * dev,bool async)250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252 device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254
dpm_wait_for_suppliers(struct device * dev,bool async)255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257 struct device_link *link;
258 int idx;
259
260 idx = device_links_read_lock();
261
262 /*
263 * If the supplier goes away right after we've checked the link to it,
264 * we'll wait for its completion to change the state, but that's fine,
265 * because the only things that will block as a result are the SRCU
266 * callbacks freeing the link objects for the links in the list we're
267 * walking.
268 */
269 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
270 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 dpm_wait(link->supplier, async);
272
273 device_links_read_unlock(idx);
274 }
275
dpm_wait_for_superior(struct device * dev,bool async)276 static bool dpm_wait_for_superior(struct device *dev, bool async)
277 {
278 struct device *parent;
279
280 /*
281 * If the device is resumed asynchronously and the parent's callback
282 * deletes both the device and the parent itself, the parent object may
283 * be freed while this function is running, so avoid that by reference
284 * counting the parent once more unless the device has been deleted
285 * already (in which case return right away).
286 */
287 mutex_lock(&dpm_list_mtx);
288
289 if (!device_pm_initialized(dev)) {
290 mutex_unlock(&dpm_list_mtx);
291 return false;
292 }
293
294 parent = get_device(dev->parent);
295
296 mutex_unlock(&dpm_list_mtx);
297
298 dpm_wait(parent, async);
299 put_device(parent);
300
301 dpm_wait_for_suppliers(dev, async);
302
303 /*
304 * If the parent's callback has deleted the device, attempting to resume
305 * it would be invalid, so avoid doing that then.
306 */
307 return device_pm_initialized(dev);
308 }
309
dpm_wait_for_consumers(struct device * dev,bool async)310 static void dpm_wait_for_consumers(struct device *dev, bool async)
311 {
312 struct device_link *link;
313 int idx;
314
315 idx = device_links_read_lock();
316
317 /*
318 * The status of a device link can only be changed from "dormant" by a
319 * probe, but that cannot happen during system suspend/resume. In
320 * theory it can change to "dormant" at that time, but then it is
321 * reasonable to wait for the target device anyway (eg. if it goes
322 * away, it's better to wait for it to go away completely and then
323 * continue instead of trying to continue in parallel with its
324 * unregistration).
325 */
326 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
327 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328 dpm_wait(link->consumer, async);
329
330 device_links_read_unlock(idx);
331 }
332
dpm_wait_for_subordinate(struct device * dev,bool async)333 static void dpm_wait_for_subordinate(struct device *dev, bool async)
334 {
335 dpm_wait_for_children(dev, async);
336 dpm_wait_for_consumers(dev, async);
337 }
338
339 /**
340 * pm_op - Return the PM operation appropriate for given PM event.
341 * @ops: PM operations to choose from.
342 * @state: PM transition of the system being carried out.
343 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)344 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
345 {
346 switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348 case PM_EVENT_SUSPEND:
349 return ops->suspend;
350 case PM_EVENT_RESUME:
351 return ops->resume;
352 #endif /* CONFIG_SUSPEND */
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354 case PM_EVENT_FREEZE:
355 case PM_EVENT_QUIESCE:
356 return ops->freeze;
357 case PM_EVENT_HIBERNATE:
358 return ops->poweroff;
359 case PM_EVENT_THAW:
360 case PM_EVENT_RECOVER:
361 return ops->thaw;
362 case PM_EVENT_RESTORE:
363 return ops->restore;
364 #endif /* CONFIG_HIBERNATE_CALLBACKS */
365 }
366
367 return NULL;
368 }
369
370 /**
371 * pm_late_early_op - Return the PM operation appropriate for given PM event.
372 * @ops: PM operations to choose from.
373 * @state: PM transition of the system being carried out.
374 *
375 * Runtime PM is disabled for @dev while this function is being executed.
376 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)377 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
378 pm_message_t state)
379 {
380 switch (state.event) {
381 #ifdef CONFIG_SUSPEND
382 case PM_EVENT_SUSPEND:
383 return ops->suspend_late;
384 case PM_EVENT_RESUME:
385 return ops->resume_early;
386 #endif /* CONFIG_SUSPEND */
387 #ifdef CONFIG_HIBERNATE_CALLBACKS
388 case PM_EVENT_FREEZE:
389 case PM_EVENT_QUIESCE:
390 return ops->freeze_late;
391 case PM_EVENT_HIBERNATE:
392 return ops->poweroff_late;
393 case PM_EVENT_THAW:
394 case PM_EVENT_RECOVER:
395 return ops->thaw_early;
396 case PM_EVENT_RESTORE:
397 return ops->restore_early;
398 #endif /* CONFIG_HIBERNATE_CALLBACKS */
399 }
400
401 return NULL;
402 }
403
404 /**
405 * pm_noirq_op - Return the PM operation appropriate for given PM event.
406 * @ops: PM operations to choose from.
407 * @state: PM transition of the system being carried out.
408 *
409 * The driver of @dev will not receive interrupts while this function is being
410 * executed.
411 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)412 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
413 {
414 switch (state.event) {
415 #ifdef CONFIG_SUSPEND
416 case PM_EVENT_SUSPEND:
417 return ops->suspend_noirq;
418 case PM_EVENT_RESUME:
419 return ops->resume_noirq;
420 #endif /* CONFIG_SUSPEND */
421 #ifdef CONFIG_HIBERNATE_CALLBACKS
422 case PM_EVENT_FREEZE:
423 case PM_EVENT_QUIESCE:
424 return ops->freeze_noirq;
425 case PM_EVENT_HIBERNATE:
426 return ops->poweroff_noirq;
427 case PM_EVENT_THAW:
428 case PM_EVENT_RECOVER:
429 return ops->thaw_noirq;
430 case PM_EVENT_RESTORE:
431 return ops->restore_noirq;
432 #endif /* CONFIG_HIBERNATE_CALLBACKS */
433 }
434
435 return NULL;
436 }
437
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)438 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
439 {
440 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
441 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
442 ", may wakeup" : "", dev->power.driver_flags);
443 }
444
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)445 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
446 int error)
447 {
448 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
449 error);
450 }
451
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)452 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
453 const char *info)
454 {
455 ktime_t calltime;
456 u64 usecs64;
457 int usecs;
458
459 calltime = ktime_get();
460 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
461 do_div(usecs64, NSEC_PER_USEC);
462 usecs = usecs64;
463 if (usecs == 0)
464 usecs = 1;
465
466 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
467 info ?: "", info ? " " : "", pm_verb(state.event),
468 error ? "aborted" : "complete",
469 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
470 }
471
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)472 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
473 pm_message_t state, const char *info)
474 {
475 ktime_t calltime;
476 int error;
477
478 if (!cb)
479 return 0;
480
481 calltime = initcall_debug_start(dev, cb);
482
483 pm_dev_dbg(dev, state, info);
484 trace_device_pm_callback_start(dev, info, state.event);
485 error = cb(dev);
486 trace_device_pm_callback_end(dev, error);
487 suspend_report_result(dev, cb, error);
488
489 initcall_debug_report(dev, calltime, cb, error);
490
491 return error;
492 }
493
494 #ifdef CONFIG_DPM_WATCHDOG
495 struct dpm_watchdog {
496 struct device *dev;
497 struct task_struct *tsk;
498 struct timer_list timer;
499 bool fatal;
500 };
501
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
504
505 /**
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
508 *
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
512 */
dpm_watchdog_handler(struct timer_list * t)513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
516 struct timer_list *timer = &wd->timer;
517 unsigned int time_left;
518
519 if (wd->fatal) {
520 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521 show_stack(wd->tsk, NULL, KERN_EMERG);
522 panic("%s %s: unrecoverable failure\n",
523 dev_driver_string(wd->dev), dev_name(wd->dev));
524 }
525
526 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
527 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
528 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
529 show_stack(wd->tsk, NULL, KERN_WARNING);
530
531 wd->fatal = true;
532 mod_timer(timer, jiffies + HZ * time_left);
533 }
534
535 /**
536 * dpm_watchdog_set - Enable pm watchdog for given device.
537 * @wd: Watchdog. Must be allocated on the stack.
538 * @dev: Device to handle.
539 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)540 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
541 {
542 struct timer_list *timer = &wd->timer;
543
544 wd->dev = dev;
545 wd->tsk = current;
546 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
547
548 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
549 /* use same timeout value for both suspend and resume */
550 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
551 add_timer(timer);
552 }
553
554 /**
555 * dpm_watchdog_clear - Disable suspend/resume watchdog.
556 * @wd: Watchdog to disable.
557 */
dpm_watchdog_clear(struct dpm_watchdog * wd)558 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
559 {
560 struct timer_list *timer = &wd->timer;
561
562 del_timer_sync(timer);
563 destroy_timer_on_stack(timer);
564 }
565 #else
566 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
567 #define dpm_watchdog_set(x, y)
568 #define dpm_watchdog_clear(x)
569 #endif
570
571 /*------------------------- Resume routines -------------------------*/
572
573 /**
574 * dev_pm_skip_resume - System-wide device resume optimization check.
575 * @dev: Target device.
576 *
577 * Return:
578 * - %false if the transition under way is RESTORE.
579 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
580 * - The logical negation of %power.must_resume otherwise (that is, when the
581 * transition under way is RESUME).
582 */
dev_pm_skip_resume(struct device * dev)583 bool dev_pm_skip_resume(struct device *dev)
584 {
585 if (pm_transition.event == PM_EVENT_RESTORE)
586 return false;
587
588 if (pm_transition.event == PM_EVENT_THAW)
589 return dev_pm_skip_suspend(dev);
590
591 return !dev->power.must_resume;
592 }
593
is_async(struct device * dev)594 static bool is_async(struct device *dev)
595 {
596 return dev->power.async_suspend && pm_async_enabled
597 && !pm_trace_is_enabled();
598 }
599
dpm_async_fn(struct device * dev,async_func_t func)600 static bool dpm_async_fn(struct device *dev, async_func_t func)
601 {
602 reinit_completion(&dev->power.completion);
603
604 if (is_async(dev)) {
605 dev->power.async_in_progress = true;
606
607 get_device(dev);
608
609 if (async_schedule_dev_nocall(func, dev))
610 return true;
611
612 put_device(dev);
613 }
614 /*
615 * Because async_schedule_dev_nocall() above has returned false or it
616 * has not been called at all, func() is not running and it is safe to
617 * update the async_in_progress flag without extra synchronization.
618 */
619 dev->power.async_in_progress = false;
620 return false;
621 }
622
623 /**
624 * device_resume_noirq - Execute a "noirq resume" callback for given device.
625 * @dev: Device to handle.
626 * @state: PM transition of the system being carried out.
627 * @async: If true, the device is being resumed asynchronously.
628 *
629 * The driver of @dev will not receive interrupts while this function is being
630 * executed.
631 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)632 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
633 {
634 pm_callback_t callback = NULL;
635 const char *info = NULL;
636 bool skip_resume;
637 int error = 0;
638
639 TRACE_DEVICE(dev);
640 TRACE_RESUME(0);
641
642 if (dev->power.syscore || dev->power.direct_complete)
643 goto Out;
644
645 if (!dev->power.is_noirq_suspended)
646 goto Out;
647
648 if (!dpm_wait_for_superior(dev, async))
649 goto Out;
650
651 skip_resume = dev_pm_skip_resume(dev);
652 /*
653 * If the driver callback is skipped below or by the middle layer
654 * callback and device_resume_early() also skips the driver callback for
655 * this device later, it needs to appear as "suspended" to PM-runtime,
656 * so change its status accordingly.
657 *
658 * Otherwise, the device is going to be resumed, so set its PM-runtime
659 * status to "active" unless its power.set_active flag is clear, in
660 * which case it is not necessary to update its PM-runtime status.
661 */
662 if (skip_resume) {
663 pm_runtime_set_suspended(dev);
664 } else if (dev->power.set_active) {
665 pm_runtime_set_active(dev);
666 dev->power.set_active = false;
667 }
668
669 if (dev->pm_domain) {
670 info = "noirq power domain ";
671 callback = pm_noirq_op(&dev->pm_domain->ops, state);
672 } else if (dev->type && dev->type->pm) {
673 info = "noirq type ";
674 callback = pm_noirq_op(dev->type->pm, state);
675 } else if (dev->class && dev->class->pm) {
676 info = "noirq class ";
677 callback = pm_noirq_op(dev->class->pm, state);
678 } else if (dev->bus && dev->bus->pm) {
679 info = "noirq bus ";
680 callback = pm_noirq_op(dev->bus->pm, state);
681 }
682 if (callback)
683 goto Run;
684
685 if (skip_resume)
686 goto Skip;
687
688 if (dev->driver && dev->driver->pm) {
689 info = "noirq driver ";
690 callback = pm_noirq_op(dev->driver->pm, state);
691 }
692
693 Run:
694 error = dpm_run_callback(callback, dev, state, info);
695
696 Skip:
697 dev->power.is_noirq_suspended = false;
698
699 Out:
700 complete_all(&dev->power.completion);
701 TRACE_RESUME(error);
702
703 if (error) {
704 async_error = error;
705 dpm_save_failed_dev(dev_name(dev));
706 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
707 }
708 }
709
async_resume_noirq(void * data,async_cookie_t cookie)710 static void async_resume_noirq(void *data, async_cookie_t cookie)
711 {
712 struct device *dev = data;
713
714 device_resume_noirq(dev, pm_transition, true);
715 put_device(dev);
716 }
717
dpm_noirq_resume_devices(pm_message_t state)718 static void dpm_noirq_resume_devices(pm_message_t state)
719 {
720 struct device *dev;
721 ktime_t starttime = ktime_get();
722
723 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
724
725 async_error = 0;
726 pm_transition = state;
727
728 mutex_lock(&dpm_list_mtx);
729
730 /*
731 * Trigger the resume of "async" devices upfront so they don't have to
732 * wait for the "non-async" ones they don't depend on.
733 */
734 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
735 dpm_async_fn(dev, async_resume_noirq);
736
737 while (!list_empty(&dpm_noirq_list)) {
738 dev = to_device(dpm_noirq_list.next);
739 list_move_tail(&dev->power.entry, &dpm_late_early_list);
740
741 if (!dev->power.async_in_progress) {
742 get_device(dev);
743
744 mutex_unlock(&dpm_list_mtx);
745
746 device_resume_noirq(dev, state, false);
747
748 put_device(dev);
749
750 mutex_lock(&dpm_list_mtx);
751 }
752 }
753 mutex_unlock(&dpm_list_mtx);
754 async_synchronize_full();
755 dpm_show_time(starttime, state, 0, "noirq");
756 if (async_error)
757 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
758
759 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
760 }
761
762 /**
763 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
764 * @state: PM transition of the system being carried out.
765 *
766 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
767 * allow device drivers' interrupt handlers to be called.
768 */
dpm_resume_noirq(pm_message_t state)769 void dpm_resume_noirq(pm_message_t state)
770 {
771 dpm_noirq_resume_devices(state);
772
773 resume_device_irqs();
774 device_wakeup_disarm_wake_irqs();
775 }
776
777 /**
778 * device_resume_early - Execute an "early resume" callback for given device.
779 * @dev: Device to handle.
780 * @state: PM transition of the system being carried out.
781 * @async: If true, the device is being resumed asynchronously.
782 *
783 * Runtime PM is disabled for @dev while this function is being executed.
784 */
device_resume_early(struct device * dev,pm_message_t state,bool async)785 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
786 {
787 pm_callback_t callback = NULL;
788 const char *info = NULL;
789 int error = 0;
790
791 TRACE_DEVICE(dev);
792 TRACE_RESUME(0);
793
794 if (dev->power.syscore || dev->power.direct_complete)
795 goto Out;
796
797 if (!dev->power.is_late_suspended)
798 goto Out;
799
800 if (!dpm_wait_for_superior(dev, async))
801 goto Out;
802
803 if (dev->pm_domain) {
804 info = "early power domain ";
805 callback = pm_late_early_op(&dev->pm_domain->ops, state);
806 } else if (dev->type && dev->type->pm) {
807 info = "early type ";
808 callback = pm_late_early_op(dev->type->pm, state);
809 } else if (dev->class && dev->class->pm) {
810 info = "early class ";
811 callback = pm_late_early_op(dev->class->pm, state);
812 } else if (dev->bus && dev->bus->pm) {
813 info = "early bus ";
814 callback = pm_late_early_op(dev->bus->pm, state);
815 }
816 if (callback)
817 goto Run;
818
819 if (dev_pm_skip_resume(dev))
820 goto Skip;
821
822 if (dev->driver && dev->driver->pm) {
823 info = "early driver ";
824 callback = pm_late_early_op(dev->driver->pm, state);
825 }
826
827 Run:
828 error = dpm_run_callback(callback, dev, state, info);
829
830 Skip:
831 dev->power.is_late_suspended = false;
832
833 Out:
834 TRACE_RESUME(error);
835
836 pm_runtime_enable(dev);
837 complete_all(&dev->power.completion);
838
839 if (error) {
840 async_error = error;
841 dpm_save_failed_dev(dev_name(dev));
842 pm_dev_err(dev, state, async ? " async early" : " early", error);
843 }
844 }
845
async_resume_early(void * data,async_cookie_t cookie)846 static void async_resume_early(void *data, async_cookie_t cookie)
847 {
848 struct device *dev = data;
849
850 device_resume_early(dev, pm_transition, true);
851 put_device(dev);
852 }
853
854 /**
855 * dpm_resume_early - Execute "early resume" callbacks for all devices.
856 * @state: PM transition of the system being carried out.
857 */
dpm_resume_early(pm_message_t state)858 void dpm_resume_early(pm_message_t state)
859 {
860 struct device *dev;
861 ktime_t starttime = ktime_get();
862
863 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
864
865 async_error = 0;
866 pm_transition = state;
867
868 mutex_lock(&dpm_list_mtx);
869
870 /*
871 * Trigger the resume of "async" devices upfront so they don't have to
872 * wait for the "non-async" ones they don't depend on.
873 */
874 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
875 dpm_async_fn(dev, async_resume_early);
876
877 while (!list_empty(&dpm_late_early_list)) {
878 dev = to_device(dpm_late_early_list.next);
879 list_move_tail(&dev->power.entry, &dpm_suspended_list);
880
881 if (!dev->power.async_in_progress) {
882 get_device(dev);
883
884 mutex_unlock(&dpm_list_mtx);
885
886 device_resume_early(dev, state, false);
887
888 put_device(dev);
889
890 mutex_lock(&dpm_list_mtx);
891 }
892 }
893 mutex_unlock(&dpm_list_mtx);
894 async_synchronize_full();
895 dpm_show_time(starttime, state, 0, "early");
896 if (async_error)
897 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
898
899 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
900 }
901
902 /**
903 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
904 * @state: PM transition of the system being carried out.
905 */
dpm_resume_start(pm_message_t state)906 void dpm_resume_start(pm_message_t state)
907 {
908 dpm_resume_noirq(state);
909 dpm_resume_early(state);
910 }
911 EXPORT_SYMBOL_GPL(dpm_resume_start);
912
913 /**
914 * device_resume - Execute "resume" callbacks for given device.
915 * @dev: Device to handle.
916 * @state: PM transition of the system being carried out.
917 * @async: If true, the device is being resumed asynchronously.
918 */
device_resume(struct device * dev,pm_message_t state,bool async)919 static void device_resume(struct device *dev, pm_message_t state, bool async)
920 {
921 pm_callback_t callback = NULL;
922 const char *info = NULL;
923 int error = 0;
924 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
925
926 TRACE_DEVICE(dev);
927 TRACE_RESUME(0);
928
929 if (dev->power.syscore)
930 goto Complete;
931
932 if (dev->power.direct_complete) {
933 /* Match the pm_runtime_disable() in device_suspend(). */
934 pm_runtime_enable(dev);
935 goto Complete;
936 }
937
938 if (!dpm_wait_for_superior(dev, async))
939 goto Complete;
940
941 dpm_watchdog_set(&wd, dev);
942 device_lock(dev);
943
944 /*
945 * This is a fib. But we'll allow new children to be added below
946 * a resumed device, even if the device hasn't been completed yet.
947 */
948 dev->power.is_prepared = false;
949
950 if (!dev->power.is_suspended)
951 goto Unlock;
952
953 if (dev->pm_domain) {
954 info = "power domain ";
955 callback = pm_op(&dev->pm_domain->ops, state);
956 goto Driver;
957 }
958
959 if (dev->type && dev->type->pm) {
960 info = "type ";
961 callback = pm_op(dev->type->pm, state);
962 goto Driver;
963 }
964
965 if (dev->class && dev->class->pm) {
966 info = "class ";
967 callback = pm_op(dev->class->pm, state);
968 goto Driver;
969 }
970
971 if (dev->bus) {
972 if (dev->bus->pm) {
973 info = "bus ";
974 callback = pm_op(dev->bus->pm, state);
975 } else if (dev->bus->resume) {
976 info = "legacy bus ";
977 callback = dev->bus->resume;
978 goto End;
979 }
980 }
981
982 Driver:
983 if (!callback && dev->driver && dev->driver->pm) {
984 info = "driver ";
985 callback = pm_op(dev->driver->pm, state);
986 }
987
988 End:
989 error = dpm_run_callback(callback, dev, state, info);
990 dev->power.is_suspended = false;
991
992 Unlock:
993 device_unlock(dev);
994 dpm_watchdog_clear(&wd);
995
996 Complete:
997 complete_all(&dev->power.completion);
998
999 TRACE_RESUME(error);
1000
1001 if (error) {
1002 async_error = error;
1003 dpm_save_failed_dev(dev_name(dev));
1004 pm_dev_err(dev, state, async ? " async" : "", error);
1005 }
1006 }
1007
async_resume(void * data,async_cookie_t cookie)1008 static void async_resume(void *data, async_cookie_t cookie)
1009 {
1010 struct device *dev = data;
1011
1012 device_resume(dev, pm_transition, true);
1013 put_device(dev);
1014 }
1015
1016 /**
1017 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1018 * @state: PM transition of the system being carried out.
1019 *
1020 * Execute the appropriate "resume" callback for all devices whose status
1021 * indicates that they are suspended.
1022 */
dpm_resume(pm_message_t state)1023 void dpm_resume(pm_message_t state)
1024 {
1025 struct device *dev;
1026 ktime_t starttime = ktime_get();
1027
1028 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1029 might_sleep();
1030
1031 pm_transition = state;
1032 async_error = 0;
1033
1034 mutex_lock(&dpm_list_mtx);
1035
1036 /*
1037 * Trigger the resume of "async" devices upfront so they don't have to
1038 * wait for the "non-async" ones they don't depend on.
1039 */
1040 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1041 dpm_async_fn(dev, async_resume);
1042
1043 while (!list_empty(&dpm_suspended_list)) {
1044 dev = to_device(dpm_suspended_list.next);
1045 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1046
1047 if (!dev->power.async_in_progress) {
1048 get_device(dev);
1049
1050 mutex_unlock(&dpm_list_mtx);
1051
1052 device_resume(dev, state, false);
1053
1054 put_device(dev);
1055
1056 mutex_lock(&dpm_list_mtx);
1057 }
1058 }
1059 mutex_unlock(&dpm_list_mtx);
1060 async_synchronize_full();
1061 dpm_show_time(starttime, state, 0, NULL);
1062 if (async_error)
1063 dpm_save_failed_step(SUSPEND_RESUME);
1064
1065 cpufreq_resume();
1066 devfreq_resume();
1067 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1068 }
1069
1070 /**
1071 * device_complete - Complete a PM transition for given device.
1072 * @dev: Device to handle.
1073 * @state: PM transition of the system being carried out.
1074 */
device_complete(struct device * dev,pm_message_t state)1075 static void device_complete(struct device *dev, pm_message_t state)
1076 {
1077 void (*callback)(struct device *) = NULL;
1078 const char *info = NULL;
1079
1080 if (dev->power.syscore)
1081 goto out;
1082
1083 device_lock(dev);
1084
1085 if (dev->pm_domain) {
1086 info = "completing power domain ";
1087 callback = dev->pm_domain->ops.complete;
1088 } else if (dev->type && dev->type->pm) {
1089 info = "completing type ";
1090 callback = dev->type->pm->complete;
1091 } else if (dev->class && dev->class->pm) {
1092 info = "completing class ";
1093 callback = dev->class->pm->complete;
1094 } else if (dev->bus && dev->bus->pm) {
1095 info = "completing bus ";
1096 callback = dev->bus->pm->complete;
1097 }
1098
1099 if (!callback && dev->driver && dev->driver->pm) {
1100 info = "completing driver ";
1101 callback = dev->driver->pm->complete;
1102 }
1103
1104 if (callback) {
1105 pm_dev_dbg(dev, state, info);
1106 callback(dev);
1107 }
1108
1109 device_unlock(dev);
1110
1111 out:
1112 pm_runtime_put(dev);
1113 }
1114
1115 /**
1116 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1117 * @state: PM transition of the system being carried out.
1118 *
1119 * Execute the ->complete() callbacks for all devices whose PM status is not
1120 * DPM_ON (this allows new devices to be registered).
1121 */
dpm_complete(pm_message_t state)1122 void dpm_complete(pm_message_t state)
1123 {
1124 struct list_head list;
1125
1126 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1127 might_sleep();
1128
1129 INIT_LIST_HEAD(&list);
1130 mutex_lock(&dpm_list_mtx);
1131 while (!list_empty(&dpm_prepared_list)) {
1132 struct device *dev = to_device(dpm_prepared_list.prev);
1133
1134 get_device(dev);
1135 dev->power.is_prepared = false;
1136 list_move(&dev->power.entry, &list);
1137
1138 mutex_unlock(&dpm_list_mtx);
1139
1140 trace_device_pm_callback_start(dev, "", state.event);
1141 device_complete(dev, state);
1142 trace_device_pm_callback_end(dev, 0);
1143
1144 put_device(dev);
1145
1146 mutex_lock(&dpm_list_mtx);
1147 }
1148 list_splice(&list, &dpm_list);
1149 mutex_unlock(&dpm_list_mtx);
1150
1151 /* Allow device probing and trigger re-probing of deferred devices */
1152 device_unblock_probing();
1153 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1154 }
1155
1156 /**
1157 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1158 * @state: PM transition of the system being carried out.
1159 *
1160 * Execute "resume" callbacks for all devices and complete the PM transition of
1161 * the system.
1162 */
dpm_resume_end(pm_message_t state)1163 void dpm_resume_end(pm_message_t state)
1164 {
1165 dpm_resume(state);
1166 dpm_complete(state);
1167 }
1168 EXPORT_SYMBOL_GPL(dpm_resume_end);
1169
1170
1171 /*------------------------- Suspend routines -------------------------*/
1172
1173 /**
1174 * resume_event - Return a "resume" message for given "suspend" sleep state.
1175 * @sleep_state: PM message representing a sleep state.
1176 *
1177 * Return a PM message representing the resume event corresponding to given
1178 * sleep state.
1179 */
resume_event(pm_message_t sleep_state)1180 static pm_message_t resume_event(pm_message_t sleep_state)
1181 {
1182 switch (sleep_state.event) {
1183 case PM_EVENT_SUSPEND:
1184 return PMSG_RESUME;
1185 case PM_EVENT_FREEZE:
1186 case PM_EVENT_QUIESCE:
1187 return PMSG_RECOVER;
1188 case PM_EVENT_HIBERNATE:
1189 return PMSG_RESTORE;
1190 }
1191 return PMSG_ON;
1192 }
1193
dpm_superior_set_must_resume(struct device * dev)1194 static void dpm_superior_set_must_resume(struct device *dev)
1195 {
1196 struct device_link *link;
1197 int idx;
1198
1199 if (dev->parent)
1200 dev->parent->power.must_resume = true;
1201
1202 idx = device_links_read_lock();
1203
1204 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1205 link->supplier->power.must_resume = true;
1206
1207 device_links_read_unlock(idx);
1208 }
1209
1210 /**
1211 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1212 * @dev: Device to handle.
1213 * @state: PM transition of the system being carried out.
1214 * @async: If true, the device is being suspended asynchronously.
1215 *
1216 * The driver of @dev will not receive interrupts while this function is being
1217 * executed.
1218 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1219 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1220 {
1221 pm_callback_t callback = NULL;
1222 const char *info = NULL;
1223 int error = 0;
1224
1225 TRACE_DEVICE(dev);
1226 TRACE_SUSPEND(0);
1227
1228 dpm_wait_for_subordinate(dev, async);
1229
1230 if (async_error)
1231 goto Complete;
1232
1233 if (dev->power.syscore || dev->power.direct_complete)
1234 goto Complete;
1235
1236 if (dev->pm_domain) {
1237 info = "noirq power domain ";
1238 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1239 } else if (dev->type && dev->type->pm) {
1240 info = "noirq type ";
1241 callback = pm_noirq_op(dev->type->pm, state);
1242 } else if (dev->class && dev->class->pm) {
1243 info = "noirq class ";
1244 callback = pm_noirq_op(dev->class->pm, state);
1245 } else if (dev->bus && dev->bus->pm) {
1246 info = "noirq bus ";
1247 callback = pm_noirq_op(dev->bus->pm, state);
1248 }
1249 if (callback)
1250 goto Run;
1251
1252 if (dev_pm_skip_suspend(dev))
1253 goto Skip;
1254
1255 if (dev->driver && dev->driver->pm) {
1256 info = "noirq driver ";
1257 callback = pm_noirq_op(dev->driver->pm, state);
1258 }
1259
1260 Run:
1261 error = dpm_run_callback(callback, dev, state, info);
1262 if (error) {
1263 async_error = error;
1264 dpm_save_failed_dev(dev_name(dev));
1265 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1266 goto Complete;
1267 }
1268
1269 Skip:
1270 dev->power.is_noirq_suspended = true;
1271
1272 /*
1273 * Skipping the resume of devices that were in use right before the
1274 * system suspend (as indicated by their PM-runtime usage counters)
1275 * would be suboptimal. Also resume them if doing that is not allowed
1276 * to be skipped.
1277 */
1278 if (atomic_read(&dev->power.usage_count) > 1 ||
1279 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1280 dev->power.may_skip_resume))
1281 dev->power.must_resume = true;
1282
1283 if (dev->power.must_resume) {
1284 if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
1285 dev->power.set_active = true;
1286 if (dev->parent && !dev->parent->power.ignore_children)
1287 dev->parent->power.set_active = true;
1288 }
1289 dpm_superior_set_must_resume(dev);
1290 }
1291
1292 Complete:
1293 complete_all(&dev->power.completion);
1294 TRACE_SUSPEND(error);
1295 return error;
1296 }
1297
async_suspend_noirq(void * data,async_cookie_t cookie)1298 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1299 {
1300 struct device *dev = data;
1301
1302 device_suspend_noirq(dev, pm_transition, true);
1303 put_device(dev);
1304 }
1305
dpm_noirq_suspend_devices(pm_message_t state)1306 static int dpm_noirq_suspend_devices(pm_message_t state)
1307 {
1308 ktime_t starttime = ktime_get();
1309 int error = 0;
1310
1311 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1312
1313 pm_transition = state;
1314 async_error = 0;
1315
1316 mutex_lock(&dpm_list_mtx);
1317
1318 while (!list_empty(&dpm_late_early_list)) {
1319 struct device *dev = to_device(dpm_late_early_list.prev);
1320
1321 list_move(&dev->power.entry, &dpm_noirq_list);
1322
1323 if (dpm_async_fn(dev, async_suspend_noirq))
1324 continue;
1325
1326 get_device(dev);
1327
1328 mutex_unlock(&dpm_list_mtx);
1329
1330 error = device_suspend_noirq(dev, state, false);
1331
1332 put_device(dev);
1333
1334 mutex_lock(&dpm_list_mtx);
1335
1336 if (error || async_error)
1337 break;
1338 }
1339
1340 mutex_unlock(&dpm_list_mtx);
1341
1342 async_synchronize_full();
1343 if (!error)
1344 error = async_error;
1345
1346 if (error)
1347 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1348
1349 dpm_show_time(starttime, state, error, "noirq");
1350 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1351 return error;
1352 }
1353
1354 /**
1355 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1356 * @state: PM transition of the system being carried out.
1357 *
1358 * Prevent device drivers' interrupt handlers from being called and invoke
1359 * "noirq" suspend callbacks for all non-sysdev devices.
1360 */
dpm_suspend_noirq(pm_message_t state)1361 int dpm_suspend_noirq(pm_message_t state)
1362 {
1363 int ret;
1364
1365 device_wakeup_arm_wake_irqs();
1366 suspend_device_irqs();
1367
1368 ret = dpm_noirq_suspend_devices(state);
1369 if (ret)
1370 dpm_resume_noirq(resume_event(state));
1371
1372 return ret;
1373 }
1374
dpm_propagate_wakeup_to_parent(struct device * dev)1375 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1376 {
1377 struct device *parent = dev->parent;
1378
1379 if (!parent)
1380 return;
1381
1382 spin_lock_irq(&parent->power.lock);
1383
1384 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1385 parent->power.wakeup_path = true;
1386
1387 spin_unlock_irq(&parent->power.lock);
1388 }
1389
1390 /**
1391 * device_suspend_late - Execute a "late suspend" callback for given device.
1392 * @dev: Device to handle.
1393 * @state: PM transition of the system being carried out.
1394 * @async: If true, the device is being suspended asynchronously.
1395 *
1396 * Runtime PM is disabled for @dev while this function is being executed.
1397 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1398 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1399 {
1400 pm_callback_t callback = NULL;
1401 const char *info = NULL;
1402 int error = 0;
1403
1404 TRACE_DEVICE(dev);
1405 TRACE_SUSPEND(0);
1406
1407 __pm_runtime_disable(dev, false);
1408
1409 dpm_wait_for_subordinate(dev, async);
1410
1411 if (async_error)
1412 goto Complete;
1413
1414 if (pm_wakeup_pending()) {
1415 async_error = -EBUSY;
1416 goto Complete;
1417 }
1418
1419 if (dev->power.syscore || dev->power.direct_complete)
1420 goto Complete;
1421
1422 if (dev->pm_domain) {
1423 info = "late power domain ";
1424 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1425 } else if (dev->type && dev->type->pm) {
1426 info = "late type ";
1427 callback = pm_late_early_op(dev->type->pm, state);
1428 } else if (dev->class && dev->class->pm) {
1429 info = "late class ";
1430 callback = pm_late_early_op(dev->class->pm, state);
1431 } else if (dev->bus && dev->bus->pm) {
1432 info = "late bus ";
1433 callback = pm_late_early_op(dev->bus->pm, state);
1434 }
1435 if (callback)
1436 goto Run;
1437
1438 if (dev_pm_skip_suspend(dev))
1439 goto Skip;
1440
1441 if (dev->driver && dev->driver->pm) {
1442 info = "late driver ";
1443 callback = pm_late_early_op(dev->driver->pm, state);
1444 }
1445
1446 Run:
1447 error = dpm_run_callback(callback, dev, state, info);
1448 if (error) {
1449 async_error = error;
1450 dpm_save_failed_dev(dev_name(dev));
1451 pm_dev_err(dev, state, async ? " async late" : " late", error);
1452 goto Complete;
1453 }
1454 dpm_propagate_wakeup_to_parent(dev);
1455
1456 Skip:
1457 dev->power.is_late_suspended = true;
1458
1459 Complete:
1460 TRACE_SUSPEND(error);
1461 complete_all(&dev->power.completion);
1462 return error;
1463 }
1464
async_suspend_late(void * data,async_cookie_t cookie)1465 static void async_suspend_late(void *data, async_cookie_t cookie)
1466 {
1467 struct device *dev = data;
1468
1469 device_suspend_late(dev, pm_transition, true);
1470 put_device(dev);
1471 }
1472
1473 /**
1474 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1475 * @state: PM transition of the system being carried out.
1476 */
dpm_suspend_late(pm_message_t state)1477 int dpm_suspend_late(pm_message_t state)
1478 {
1479 ktime_t starttime = ktime_get();
1480 int error = 0;
1481
1482 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1483
1484 pm_transition = state;
1485 async_error = 0;
1486
1487 wake_up_all_idle_cpus();
1488
1489 mutex_lock(&dpm_list_mtx);
1490
1491 while (!list_empty(&dpm_suspended_list)) {
1492 struct device *dev = to_device(dpm_suspended_list.prev);
1493
1494 list_move(&dev->power.entry, &dpm_late_early_list);
1495
1496 if (dpm_async_fn(dev, async_suspend_late))
1497 continue;
1498
1499 get_device(dev);
1500
1501 mutex_unlock(&dpm_list_mtx);
1502
1503 error = device_suspend_late(dev, state, false);
1504
1505 put_device(dev);
1506
1507 mutex_lock(&dpm_list_mtx);
1508
1509 if (error || async_error)
1510 break;
1511 }
1512
1513 mutex_unlock(&dpm_list_mtx);
1514
1515 async_synchronize_full();
1516 if (!error)
1517 error = async_error;
1518
1519 if (error) {
1520 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1521 dpm_resume_early(resume_event(state));
1522 }
1523 dpm_show_time(starttime, state, error, "late");
1524 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1525 return error;
1526 }
1527
1528 /**
1529 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1530 * @state: PM transition of the system being carried out.
1531 */
dpm_suspend_end(pm_message_t state)1532 int dpm_suspend_end(pm_message_t state)
1533 {
1534 ktime_t starttime = ktime_get();
1535 int error;
1536
1537 error = dpm_suspend_late(state);
1538 if (error)
1539 goto out;
1540
1541 error = dpm_suspend_noirq(state);
1542 if (error)
1543 dpm_resume_early(resume_event(state));
1544
1545 out:
1546 dpm_show_time(starttime, state, error, "end");
1547 return error;
1548 }
1549 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1550
1551 /**
1552 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1553 * @dev: Device to suspend.
1554 * @state: PM transition of the system being carried out.
1555 * @cb: Suspend callback to execute.
1556 * @info: string description of caller.
1557 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1558 static int legacy_suspend(struct device *dev, pm_message_t state,
1559 int (*cb)(struct device *dev, pm_message_t state),
1560 const char *info)
1561 {
1562 int error;
1563 ktime_t calltime;
1564
1565 calltime = initcall_debug_start(dev, cb);
1566
1567 trace_device_pm_callback_start(dev, info, state.event);
1568 error = cb(dev, state);
1569 trace_device_pm_callback_end(dev, error);
1570 suspend_report_result(dev, cb, error);
1571
1572 initcall_debug_report(dev, calltime, cb, error);
1573
1574 return error;
1575 }
1576
dpm_clear_superiors_direct_complete(struct device * dev)1577 static void dpm_clear_superiors_direct_complete(struct device *dev)
1578 {
1579 struct device_link *link;
1580 int idx;
1581
1582 if (dev->parent) {
1583 spin_lock_irq(&dev->parent->power.lock);
1584 dev->parent->power.direct_complete = false;
1585 spin_unlock_irq(&dev->parent->power.lock);
1586 }
1587
1588 idx = device_links_read_lock();
1589
1590 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1591 spin_lock_irq(&link->supplier->power.lock);
1592 link->supplier->power.direct_complete = false;
1593 spin_unlock_irq(&link->supplier->power.lock);
1594 }
1595
1596 device_links_read_unlock(idx);
1597 }
1598
1599 /**
1600 * device_suspend - Execute "suspend" callbacks for given device.
1601 * @dev: Device to handle.
1602 * @state: PM transition of the system being carried out.
1603 * @async: If true, the device is being suspended asynchronously.
1604 */
device_suspend(struct device * dev,pm_message_t state,bool async)1605 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1606 {
1607 pm_callback_t callback = NULL;
1608 const char *info = NULL;
1609 int error = 0;
1610 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1611
1612 TRACE_DEVICE(dev);
1613 TRACE_SUSPEND(0);
1614
1615 dpm_wait_for_subordinate(dev, async);
1616
1617 if (async_error) {
1618 dev->power.direct_complete = false;
1619 goto Complete;
1620 }
1621
1622 /*
1623 * Wait for possible runtime PM transitions of the device in progress
1624 * to complete and if there's a runtime resume request pending for it,
1625 * resume it before proceeding with invoking the system-wide suspend
1626 * callbacks for it.
1627 *
1628 * If the system-wide suspend callbacks below change the configuration
1629 * of the device, they must disable runtime PM for it or otherwise
1630 * ensure that its runtime-resume callbacks will not be confused by that
1631 * change in case they are invoked going forward.
1632 */
1633 pm_runtime_barrier(dev);
1634
1635 if (pm_wakeup_pending()) {
1636 dev->power.direct_complete = false;
1637 async_error = -EBUSY;
1638 goto Complete;
1639 }
1640
1641 if (dev->power.syscore)
1642 goto Complete;
1643
1644 /* Avoid direct_complete to let wakeup_path propagate. */
1645 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1646 dev->power.direct_complete = false;
1647
1648 if (dev->power.direct_complete) {
1649 if (pm_runtime_status_suspended(dev)) {
1650 pm_runtime_disable(dev);
1651 if (pm_runtime_status_suspended(dev)) {
1652 pm_dev_dbg(dev, state, "direct-complete ");
1653 goto Complete;
1654 }
1655
1656 pm_runtime_enable(dev);
1657 }
1658 dev->power.direct_complete = false;
1659 }
1660
1661 dev->power.may_skip_resume = true;
1662 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1663
1664 dpm_watchdog_set(&wd, dev);
1665 device_lock(dev);
1666
1667 if (dev->pm_domain) {
1668 info = "power domain ";
1669 callback = pm_op(&dev->pm_domain->ops, state);
1670 goto Run;
1671 }
1672
1673 if (dev->type && dev->type->pm) {
1674 info = "type ";
1675 callback = pm_op(dev->type->pm, state);
1676 goto Run;
1677 }
1678
1679 if (dev->class && dev->class->pm) {
1680 info = "class ";
1681 callback = pm_op(dev->class->pm, state);
1682 goto Run;
1683 }
1684
1685 if (dev->bus) {
1686 if (dev->bus->pm) {
1687 info = "bus ";
1688 callback = pm_op(dev->bus->pm, state);
1689 } else if (dev->bus->suspend) {
1690 pm_dev_dbg(dev, state, "legacy bus ");
1691 error = legacy_suspend(dev, state, dev->bus->suspend,
1692 "legacy bus ");
1693 goto End;
1694 }
1695 }
1696
1697 Run:
1698 if (!callback && dev->driver && dev->driver->pm) {
1699 info = "driver ";
1700 callback = pm_op(dev->driver->pm, state);
1701 }
1702
1703 error = dpm_run_callback(callback, dev, state, info);
1704
1705 End:
1706 if (!error) {
1707 dev->power.is_suspended = true;
1708 if (device_may_wakeup(dev))
1709 dev->power.wakeup_path = true;
1710
1711 dpm_propagate_wakeup_to_parent(dev);
1712 dpm_clear_superiors_direct_complete(dev);
1713 }
1714
1715 device_unlock(dev);
1716 dpm_watchdog_clear(&wd);
1717
1718 Complete:
1719 if (error) {
1720 async_error = error;
1721 dpm_save_failed_dev(dev_name(dev));
1722 pm_dev_err(dev, state, async ? " async" : "", error);
1723 }
1724
1725 complete_all(&dev->power.completion);
1726 TRACE_SUSPEND(error);
1727 return error;
1728 }
1729
async_suspend(void * data,async_cookie_t cookie)1730 static void async_suspend(void *data, async_cookie_t cookie)
1731 {
1732 struct device *dev = data;
1733
1734 device_suspend(dev, pm_transition, true);
1735 put_device(dev);
1736 }
1737
1738 /**
1739 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1740 * @state: PM transition of the system being carried out.
1741 */
dpm_suspend(pm_message_t state)1742 int dpm_suspend(pm_message_t state)
1743 {
1744 ktime_t starttime = ktime_get();
1745 int error = 0;
1746
1747 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1748 might_sleep();
1749
1750 devfreq_suspend();
1751 cpufreq_suspend();
1752
1753 pm_transition = state;
1754 async_error = 0;
1755
1756 mutex_lock(&dpm_list_mtx);
1757
1758 while (!list_empty(&dpm_prepared_list)) {
1759 struct device *dev = to_device(dpm_prepared_list.prev);
1760
1761 list_move(&dev->power.entry, &dpm_suspended_list);
1762
1763 if (dpm_async_fn(dev, async_suspend))
1764 continue;
1765
1766 get_device(dev);
1767
1768 mutex_unlock(&dpm_list_mtx);
1769
1770 error = device_suspend(dev, state, false);
1771
1772 put_device(dev);
1773
1774 mutex_lock(&dpm_list_mtx);
1775
1776 if (error || async_error)
1777 break;
1778 }
1779
1780 mutex_unlock(&dpm_list_mtx);
1781
1782 async_synchronize_full();
1783 if (!error)
1784 error = async_error;
1785
1786 if (error)
1787 dpm_save_failed_step(SUSPEND_SUSPEND);
1788
1789 dpm_show_time(starttime, state, error, NULL);
1790 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1791 return error;
1792 }
1793
1794 /**
1795 * device_prepare - Prepare a device for system power transition.
1796 * @dev: Device to handle.
1797 * @state: PM transition of the system being carried out.
1798 *
1799 * Execute the ->prepare() callback(s) for given device. No new children of the
1800 * device may be registered after this function has returned.
1801 */
device_prepare(struct device * dev,pm_message_t state)1802 static int device_prepare(struct device *dev, pm_message_t state)
1803 {
1804 int (*callback)(struct device *) = NULL;
1805 int ret = 0;
1806
1807 /*
1808 * If a device's parent goes into runtime suspend at the wrong time,
1809 * it won't be possible to resume the device. To prevent this we
1810 * block runtime suspend here, during the prepare phase, and allow
1811 * it again during the complete phase.
1812 */
1813 pm_runtime_get_noresume(dev);
1814
1815 if (dev->power.syscore)
1816 return 0;
1817
1818 device_lock(dev);
1819
1820 dev->power.wakeup_path = false;
1821
1822 if (dev->power.no_pm_callbacks)
1823 goto unlock;
1824
1825 if (dev->pm_domain)
1826 callback = dev->pm_domain->ops.prepare;
1827 else if (dev->type && dev->type->pm)
1828 callback = dev->type->pm->prepare;
1829 else if (dev->class && dev->class->pm)
1830 callback = dev->class->pm->prepare;
1831 else if (dev->bus && dev->bus->pm)
1832 callback = dev->bus->pm->prepare;
1833
1834 if (!callback && dev->driver && dev->driver->pm)
1835 callback = dev->driver->pm->prepare;
1836
1837 if (callback)
1838 ret = callback(dev);
1839
1840 unlock:
1841 device_unlock(dev);
1842
1843 if (ret < 0) {
1844 suspend_report_result(dev, callback, ret);
1845 pm_runtime_put(dev);
1846 return ret;
1847 }
1848 /*
1849 * A positive return value from ->prepare() means "this device appears
1850 * to be runtime-suspended and its state is fine, so if it really is
1851 * runtime-suspended, you can leave it in that state provided that you
1852 * will do the same thing with all of its descendants". This only
1853 * applies to suspend transitions, however.
1854 */
1855 spin_lock_irq(&dev->power.lock);
1856 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1857 (ret > 0 || dev->power.no_pm_callbacks) &&
1858 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1859 spin_unlock_irq(&dev->power.lock);
1860 return 0;
1861 }
1862
1863 /**
1864 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1865 * @state: PM transition of the system being carried out.
1866 *
1867 * Execute the ->prepare() callback(s) for all devices.
1868 */
dpm_prepare(pm_message_t state)1869 int dpm_prepare(pm_message_t state)
1870 {
1871 int error = 0;
1872
1873 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1874 might_sleep();
1875
1876 /*
1877 * Give a chance for the known devices to complete their probes, before
1878 * disable probing of devices. This sync point is important at least
1879 * at boot time + hibernation restore.
1880 */
1881 wait_for_device_probe();
1882 /*
1883 * It is unsafe if probing of devices will happen during suspend or
1884 * hibernation and system behavior will be unpredictable in this case.
1885 * So, let's prohibit device's probing here and defer their probes
1886 * instead. The normal behavior will be restored in dpm_complete().
1887 */
1888 device_block_probing();
1889
1890 mutex_lock(&dpm_list_mtx);
1891 while (!list_empty(&dpm_list) && !error) {
1892 struct device *dev = to_device(dpm_list.next);
1893
1894 get_device(dev);
1895
1896 mutex_unlock(&dpm_list_mtx);
1897
1898 trace_device_pm_callback_start(dev, "", state.event);
1899 error = device_prepare(dev, state);
1900 trace_device_pm_callback_end(dev, error);
1901
1902 mutex_lock(&dpm_list_mtx);
1903
1904 if (!error) {
1905 dev->power.is_prepared = true;
1906 if (!list_empty(&dev->power.entry))
1907 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1908 } else if (error == -EAGAIN) {
1909 error = 0;
1910 } else {
1911 dev_info(dev, "not prepared for power transition: code %d\n",
1912 error);
1913 }
1914
1915 mutex_unlock(&dpm_list_mtx);
1916
1917 put_device(dev);
1918
1919 mutex_lock(&dpm_list_mtx);
1920 }
1921 mutex_unlock(&dpm_list_mtx);
1922 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1923 return error;
1924 }
1925
1926 /**
1927 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1928 * @state: PM transition of the system being carried out.
1929 *
1930 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1931 * callbacks for them.
1932 */
dpm_suspend_start(pm_message_t state)1933 int dpm_suspend_start(pm_message_t state)
1934 {
1935 ktime_t starttime = ktime_get();
1936 int error;
1937
1938 error = dpm_prepare(state);
1939 if (error)
1940 dpm_save_failed_step(SUSPEND_PREPARE);
1941 else
1942 error = dpm_suspend(state);
1943
1944 dpm_show_time(starttime, state, error, "start");
1945 return error;
1946 }
1947 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1948
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)1949 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1950 {
1951 if (ret)
1952 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
1953 }
1954 EXPORT_SYMBOL_GPL(__suspend_report_result);
1955
1956 /**
1957 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1958 * @subordinate: Device that needs to wait for @dev.
1959 * @dev: Device to wait for.
1960 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1961 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1962 {
1963 dpm_wait(dev, subordinate->power.async_suspend);
1964 return async_error;
1965 }
1966 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1967
1968 /**
1969 * dpm_for_each_dev - device iterator.
1970 * @data: data for the callback.
1971 * @fn: function to be called for each device.
1972 *
1973 * Iterate over devices in dpm_list, and call @fn for each device,
1974 * passing it @data.
1975 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1976 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1977 {
1978 struct device *dev;
1979
1980 if (!fn)
1981 return;
1982
1983 device_pm_lock();
1984 list_for_each_entry(dev, &dpm_list, power.entry)
1985 fn(dev, data);
1986 device_pm_unlock();
1987 }
1988 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1989
pm_ops_is_empty(const struct dev_pm_ops * ops)1990 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1991 {
1992 if (!ops)
1993 return true;
1994
1995 return !ops->prepare &&
1996 !ops->suspend &&
1997 !ops->suspend_late &&
1998 !ops->suspend_noirq &&
1999 !ops->resume_noirq &&
2000 !ops->resume_early &&
2001 !ops->resume &&
2002 !ops->complete;
2003 }
2004
device_pm_check_callbacks(struct device * dev)2005 void device_pm_check_callbacks(struct device *dev)
2006 {
2007 unsigned long flags;
2008
2009 spin_lock_irqsave(&dev->power.lock, flags);
2010 dev->power.no_pm_callbacks =
2011 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2012 !dev->bus->suspend && !dev->bus->resume)) &&
2013 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2014 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2015 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2016 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2017 !dev->driver->suspend && !dev->driver->resume));
2018 spin_unlock_irqrestore(&dev->power.lock, flags);
2019 }
2020
dev_pm_skip_suspend(struct device * dev)2021 bool dev_pm_skip_suspend(struct device *dev)
2022 {
2023 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2024 pm_runtime_status_suspended(dev);
2025 }
2026