Lines Matching refs:dev

107 void device_pm_sleep_init(struct device *dev)  in device_pm_sleep_init()  argument
109 dev->power.is_prepared = false; in device_pm_sleep_init()
110 dev->power.is_suspended = false; in device_pm_sleep_init()
111 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
112 dev->power.is_late_suspended = false; in device_pm_sleep_init()
113 init_completion(&dev->power.completion); in device_pm_sleep_init()
114 complete_all(&dev->power.completion); in device_pm_sleep_init()
115 dev->power.wakeup = NULL; in device_pm_sleep_init()
116 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
139 void device_pm_add(struct device *dev) in device_pm_add() argument
142 if (device_pm_not_required(dev)) in device_pm_add()
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
147 device_pm_check_callbacks(dev); in device_pm_add()
149 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
150 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
151 dev_name(dev->parent)); in device_pm_add()
152 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
153 dev->power.in_dpm_list = true; in device_pm_add()
161 void device_pm_remove(struct device *dev) in device_pm_remove() argument
163 if (device_pm_not_required(dev)) in device_pm_remove()
167 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
168 complete_all(&dev->power.completion); in device_pm_remove()
170 list_del_init(&dev->power.entry); in device_pm_remove()
171 dev->power.in_dpm_list = false; in device_pm_remove()
173 device_wakeup_disable(dev); in device_pm_remove()
174 pm_runtime_remove(dev); in device_pm_remove()
175 device_pm_check_callbacks(dev); in device_pm_remove()
210 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
213 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
214 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
217 static ktime_t initcall_debug_start(struct device *dev, void *cb) in initcall_debug_start() argument
222 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb, in initcall_debug_start()
224 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
228 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
237 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error, in initcall_debug_report()
246 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
248 if (!dev) in dpm_wait()
251 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
252 wait_for_completion(&dev->power.completion); in dpm_wait()
255 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
257 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
261 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
263 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
266 static void dpm_wait_for_suppliers(struct device *dev, bool async) in dpm_wait_for_suppliers() argument
280 dev_for_each_link_to_supplier(link, dev) in dpm_wait_for_suppliers()
288 static bool dpm_wait_for_superior(struct device *dev, bool async) in dpm_wait_for_superior() argument
301 if (!device_pm_initialized(dev)) { in dpm_wait_for_superior()
306 parent = get_device(dev->parent); in dpm_wait_for_superior()
313 dpm_wait_for_suppliers(dev, async); in dpm_wait_for_superior()
319 return device_pm_initialized(dev); in dpm_wait_for_superior()
322 static void dpm_wait_for_consumers(struct device *dev, bool async) in dpm_wait_for_consumers() argument
338 dev_for_each_link_to_consumer(link, dev) in dpm_wait_for_consumers()
346 static void dpm_wait_for_subordinate(struct device *dev, bool async) in dpm_wait_for_subordinate() argument
348 dpm_wait_for_children(dev, async); in dpm_wait_for_subordinate()
349 dpm_wait_for_consumers(dev, async); in dpm_wait_for_subordinate()
451 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) in pm_dev_dbg() argument
453 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event), in pm_dev_dbg()
454 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
455 ", may wakeup" : "", dev->power.driver_flags); in pm_dev_dbg()
458 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, in pm_dev_err() argument
461 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, in pm_dev_err()
485 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
494 calltime = initcall_debug_start(dev, cb); in dpm_run_callback()
496 pm_dev_dbg(dev, state, info); in dpm_run_callback()
497 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
498 error = cb(dev); in dpm_run_callback()
499 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
500 suspend_report_result(dev, cb, error); in dpm_run_callback()
502 initcall_debug_report(dev, calltime, cb, error); in dpm_run_callback()
509 struct device *dev; member
533 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
536 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
540 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n", in dpm_watchdog_handler()
553 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
557 wd->dev = dev; in dpm_watchdog_set()
596 bool dev_pm_skip_resume(struct device *dev) in dev_pm_skip_resume() argument
602 return dev_pm_skip_suspend(dev); in dev_pm_skip_resume()
604 return !dev->power.must_resume; in dev_pm_skip_resume()
607 static bool is_async(struct device *dev) in is_async() argument
609 return dev->power.async_suspend && pm_async_enabled in is_async()
613 static bool __dpm_async(struct device *dev, async_func_t func) in __dpm_async() argument
615 if (dev->power.work_in_progress) in __dpm_async()
618 if (!is_async(dev)) in __dpm_async()
621 dev->power.work_in_progress = true; in __dpm_async()
623 get_device(dev); in __dpm_async()
625 if (async_schedule_dev_nocall(func, dev)) in __dpm_async()
628 put_device(dev); in __dpm_async()
633 static bool dpm_async_fn(struct device *dev, async_func_t func) in dpm_async_fn() argument
637 return __dpm_async(dev, func); in dpm_async_fn()
640 static int dpm_async_with_cleanup(struct device *dev, void *fn) in dpm_async_with_cleanup() argument
644 if (!__dpm_async(dev, fn)) in dpm_async_with_cleanup()
645 dev->power.work_in_progress = false; in dpm_async_with_cleanup()
650 static void dpm_async_resume_children(struct device *dev, async_func_t func) in dpm_async_resume_children() argument
663 device_for_each_child(dev, func, dpm_async_with_cleanup); in dpm_async_resume_children()
666 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) in dpm_async_resume_subordinate() argument
671 dpm_async_resume_children(dev, func); in dpm_async_resume_subordinate()
676 dev_for_each_link_to_consumer(link, dev) in dpm_async_resume_subordinate()
683 static void dpm_clear_async_state(struct device *dev) in dpm_clear_async_state() argument
685 reinit_completion(&dev->power.completion); in dpm_clear_async_state()
686 dev->power.work_in_progress = false; in dpm_clear_async_state()
689 static bool dpm_root_device(struct device *dev) in dpm_root_device() argument
698 return !dev->parent && list_empty(&dev->links.suppliers); in dpm_root_device()
712 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) in device_resume_noirq() argument
719 TRACE_DEVICE(dev); in device_resume_noirq()
722 if (dev->power.syscore || dev->power.direct_complete) in device_resume_noirq()
725 if (!dev->power.is_noirq_suspended) { in device_resume_noirq()
734 if (dev_pm_skip_suspend(dev)) in device_resume_noirq()
735 dev->power.must_resume = false; in device_resume_noirq()
740 if (!dpm_wait_for_superior(dev, async)) in device_resume_noirq()
743 skip_resume = dev_pm_skip_resume(dev); in device_resume_noirq()
755 pm_runtime_set_suspended(dev); in device_resume_noirq()
756 else if (dev_pm_smart_suspend(dev)) in device_resume_noirq()
757 pm_runtime_set_active(dev); in device_resume_noirq()
759 if (dev->pm_domain) { in device_resume_noirq()
761 callback = pm_noirq_op(&dev->pm_domain->ops, state); in device_resume_noirq()
762 } else if (dev->type && dev->type->pm) { in device_resume_noirq()
764 callback = pm_noirq_op(dev->type->pm, state); in device_resume_noirq()
765 } else if (dev->class && dev->class->pm) { in device_resume_noirq()
767 callback = pm_noirq_op(dev->class->pm, state); in device_resume_noirq()
768 } else if (dev->bus && dev->bus->pm) { in device_resume_noirq()
770 callback = pm_noirq_op(dev->bus->pm, state); in device_resume_noirq()
778 if (dev->driver && dev->driver->pm) { in device_resume_noirq()
780 callback = pm_noirq_op(dev->driver->pm, state); in device_resume_noirq()
784 error = dpm_run_callback(callback, dev, state, info); in device_resume_noirq()
787 dev->power.is_noirq_suspended = false; in device_resume_noirq()
790 complete_all(&dev->power.completion); in device_resume_noirq()
795 dpm_save_failed_dev(dev_name(dev)); in device_resume_noirq()
796 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); in device_resume_noirq()
799 dpm_async_resume_subordinate(dev, async_resume_noirq); in device_resume_noirq()
804 struct device *dev = data; in async_resume_noirq() local
806 device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
807 put_device(dev); in async_resume_noirq()
812 struct device *dev; in dpm_noirq_resume_devices() local
826 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { in dpm_noirq_resume_devices()
827 dpm_clear_async_state(dev); in dpm_noirq_resume_devices()
828 if (dpm_root_device(dev)) in dpm_noirq_resume_devices()
829 dpm_async_with_cleanup(dev, async_resume_noirq); in dpm_noirq_resume_devices()
833 dev = to_device(dpm_noirq_list.next); in dpm_noirq_resume_devices()
834 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_noirq_resume_devices()
836 if (!dpm_async_fn(dev, async_resume_noirq)) { in dpm_noirq_resume_devices()
837 get_device(dev); in dpm_noirq_resume_devices()
841 device_resume_noirq(dev, state, false); in dpm_noirq_resume_devices()
843 put_device(dev); in dpm_noirq_resume_devices()
882 static void device_resume_early(struct device *dev, pm_message_t state, bool async) in device_resume_early() argument
888 TRACE_DEVICE(dev); in device_resume_early()
891 if (dev->power.syscore || dev->power.direct_complete) in device_resume_early()
894 if (!dev->power.is_late_suspended) in device_resume_early()
897 if (!dpm_wait_for_superior(dev, async)) in device_resume_early()
900 if (dev->pm_domain) { in device_resume_early()
902 callback = pm_late_early_op(&dev->pm_domain->ops, state); in device_resume_early()
903 } else if (dev->type && dev->type->pm) { in device_resume_early()
905 callback = pm_late_early_op(dev->type->pm, state); in device_resume_early()
906 } else if (dev->class && dev->class->pm) { in device_resume_early()
908 callback = pm_late_early_op(dev->class->pm, state); in device_resume_early()
909 } else if (dev->bus && dev->bus->pm) { in device_resume_early()
911 callback = pm_late_early_op(dev->bus->pm, state); in device_resume_early()
916 if (dev_pm_skip_resume(dev)) in device_resume_early()
919 if (dev->driver && dev->driver->pm) { in device_resume_early()
921 callback = pm_late_early_op(dev->driver->pm, state); in device_resume_early()
925 error = dpm_run_callback(callback, dev, state, info); in device_resume_early()
928 dev->power.is_late_suspended = false; in device_resume_early()
933 pm_runtime_enable(dev); in device_resume_early()
934 complete_all(&dev->power.completion); in device_resume_early()
938 dpm_save_failed_dev(dev_name(dev)); in device_resume_early()
939 pm_dev_err(dev, state, async ? " async early" : " early", error); in device_resume_early()
942 dpm_async_resume_subordinate(dev, async_resume_early); in device_resume_early()
947 struct device *dev = data; in async_resume_early() local
949 device_resume_early(dev, pm_transition, true); in async_resume_early()
950 put_device(dev); in async_resume_early()
959 struct device *dev; in dpm_resume_early() local
973 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { in dpm_resume_early()
974 dpm_clear_async_state(dev); in dpm_resume_early()
975 if (dpm_root_device(dev)) in dpm_resume_early()
976 dpm_async_with_cleanup(dev, async_resume_early); in dpm_resume_early()
980 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
981 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
983 if (!dpm_async_fn(dev, async_resume_early)) { in dpm_resume_early()
984 get_device(dev); in dpm_resume_early()
988 device_resume_early(dev, state, false); in dpm_resume_early()
990 put_device(dev); in dpm_resume_early()
1023 static void device_resume(struct device *dev, pm_message_t state, bool async) in device_resume() argument
1030 TRACE_DEVICE(dev); in device_resume()
1033 if (dev->power.syscore) in device_resume()
1036 if (!dev->power.is_suspended) in device_resume()
1039 dev->power.is_suspended = false; in device_resume()
1041 if (dev->power.direct_complete) { in device_resume()
1046 if (dev->power.no_pm_callbacks) in device_resume()
1047 dev->power.is_prepared = false; in device_resume()
1050 pm_runtime_enable(dev); in device_resume()
1054 if (!dpm_wait_for_superior(dev, async)) in device_resume()
1057 dpm_watchdog_set(&wd, dev); in device_resume()
1058 device_lock(dev); in device_resume()
1064 dev->power.is_prepared = false; in device_resume()
1066 if (dev->pm_domain) { in device_resume()
1068 callback = pm_op(&dev->pm_domain->ops, state); in device_resume()
1072 if (dev->type && dev->type->pm) { in device_resume()
1074 callback = pm_op(dev->type->pm, state); in device_resume()
1078 if (dev->class && dev->class->pm) { in device_resume()
1080 callback = pm_op(dev->class->pm, state); in device_resume()
1084 if (dev->bus) { in device_resume()
1085 if (dev->bus->pm) { in device_resume()
1087 callback = pm_op(dev->bus->pm, state); in device_resume()
1088 } else if (dev->bus->resume) { in device_resume()
1090 callback = dev->bus->resume; in device_resume()
1096 if (!callback && dev->driver && dev->driver->pm) { in device_resume()
1098 callback = pm_op(dev->driver->pm, state); in device_resume()
1102 error = dpm_run_callback(callback, dev, state, info); in device_resume()
1104 device_unlock(dev); in device_resume()
1108 complete_all(&dev->power.completion); in device_resume()
1114 dpm_save_failed_dev(dev_name(dev)); in device_resume()
1115 pm_dev_err(dev, state, async ? " async" : "", error); in device_resume()
1118 dpm_async_resume_subordinate(dev, async_resume); in device_resume()
1123 struct device *dev = data; in async_resume() local
1125 device_resume(dev, pm_transition, true); in async_resume()
1126 put_device(dev); in async_resume()
1138 struct device *dev; in dpm_resume() local
1152 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { in dpm_resume()
1153 dpm_clear_async_state(dev); in dpm_resume()
1154 if (dpm_root_device(dev)) in dpm_resume()
1155 dpm_async_with_cleanup(dev, async_resume); in dpm_resume()
1159 dev = to_device(dpm_suspended_list.next); in dpm_resume()
1160 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
1162 if (!dpm_async_fn(dev, async_resume)) { in dpm_resume()
1163 get_device(dev); in dpm_resume()
1167 device_resume(dev, state, false); in dpm_resume()
1169 put_device(dev); in dpm_resume()
1190 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
1195 if (dev->power.syscore) in device_complete()
1198 device_lock(dev); in device_complete()
1200 if (dev->pm_domain) { in device_complete()
1202 callback = dev->pm_domain->ops.complete; in device_complete()
1203 } else if (dev->type && dev->type->pm) { in device_complete()
1205 callback = dev->type->pm->complete; in device_complete()
1206 } else if (dev->class && dev->class->pm) { in device_complete()
1208 callback = dev->class->pm->complete; in device_complete()
1209 } else if (dev->bus && dev->bus->pm) { in device_complete()
1211 callback = dev->bus->pm->complete; in device_complete()
1214 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
1216 callback = dev->driver->pm->complete; in device_complete()
1220 pm_dev_dbg(dev, state, info); in device_complete()
1221 callback(dev); in device_complete()
1224 device_unlock(dev); in device_complete()
1228 pm_runtime_unblock(dev); in device_complete()
1229 pm_runtime_put(dev); in device_complete()
1248 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
1250 get_device(dev); in dpm_complete()
1251 dev->power.is_prepared = false; in dpm_complete()
1252 list_move(&dev->power.entry, &list); in dpm_complete()
1256 trace_device_pm_callback_start(dev, "", state.event); in dpm_complete()
1257 device_complete(dev, state); in dpm_complete()
1258 trace_device_pm_callback_end(dev, 0); in dpm_complete()
1260 put_device(dev); in dpm_complete()
1290 static bool dpm_leaf_device(struct device *dev) in dpm_leaf_device() argument
1296 child = device_find_any_child(dev); in dpm_leaf_device()
1308 return list_empty(&dev->links.consumers); in dpm_leaf_device()
1311 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) in dpm_async_suspend_parent() argument
1322 if (!device_pm_initialized(dev)) in dpm_async_suspend_parent()
1326 if (dev->parent) in dpm_async_suspend_parent()
1327 dpm_async_with_cleanup(dev->parent, func); in dpm_async_suspend_parent()
1332 static void dpm_async_suspend_superior(struct device *dev, async_func_t func) in dpm_async_suspend_superior() argument
1337 if (!dpm_async_suspend_parent(dev, func)) in dpm_async_suspend_superior()
1343 dev_for_each_link_to_supplier(link, dev) in dpm_async_suspend_superior()
1352 struct device *dev; in dpm_async_suspend_complete_all() local
1356 list_for_each_entry_reverse(dev, device_list, power.entry) { in dpm_async_suspend_complete_all()
1361 if (!dev->power.work_in_progress) in dpm_async_suspend_complete_all()
1362 complete_all(&dev->power.completion); in dpm_async_suspend_complete_all()
1387 static void dpm_superior_set_must_resume(struct device *dev) in dpm_superior_set_must_resume() argument
1392 if (dev->parent) in dpm_superior_set_must_resume()
1393 dev->parent->power.must_resume = true; in dpm_superior_set_must_resume()
1397 dev_for_each_link_to_supplier(link, dev) in dpm_superior_set_must_resume()
1414 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in device_suspend_noirq() argument
1420 TRACE_DEVICE(dev); in device_suspend_noirq()
1423 dpm_wait_for_subordinate(dev, async); in device_suspend_noirq()
1428 if (dev->power.syscore || dev->power.direct_complete) in device_suspend_noirq()
1431 if (dev->pm_domain) { in device_suspend_noirq()
1433 callback = pm_noirq_op(&dev->pm_domain->ops, state); in device_suspend_noirq()
1434 } else if (dev->type && dev->type->pm) { in device_suspend_noirq()
1436 callback = pm_noirq_op(dev->type->pm, state); in device_suspend_noirq()
1437 } else if (dev->class && dev->class->pm) { in device_suspend_noirq()
1439 callback = pm_noirq_op(dev->class->pm, state); in device_suspend_noirq()
1440 } else if (dev->bus && dev->bus->pm) { in device_suspend_noirq()
1442 callback = pm_noirq_op(dev->bus->pm, state); in device_suspend_noirq()
1447 if (dev_pm_skip_suspend(dev)) in device_suspend_noirq()
1450 if (dev->driver && dev->driver->pm) { in device_suspend_noirq()
1452 callback = pm_noirq_op(dev->driver->pm, state); in device_suspend_noirq()
1456 error = dpm_run_callback(callback, dev, state, info); in device_suspend_noirq()
1459 dpm_save_failed_dev(dev_name(dev)); in device_suspend_noirq()
1460 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); in device_suspend_noirq()
1465 dev->power.is_noirq_suspended = true; in device_suspend_noirq()
1473 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && in device_suspend_noirq()
1474 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) in device_suspend_noirq()
1475 dev->power.must_resume = true; in device_suspend_noirq()
1477 if (dev->power.must_resume) in device_suspend_noirq()
1478 dpm_superior_set_must_resume(dev); in device_suspend_noirq()
1481 complete_all(&dev->power.completion); in device_suspend_noirq()
1487 dpm_async_suspend_superior(dev, async_suspend_noirq); in device_suspend_noirq()
1492 struct device *dev = data; in async_suspend_noirq() local
1494 device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1495 put_device(dev); in async_suspend_noirq()
1501 struct device *dev; in dpm_noirq_suspend_devices() local
1515 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { in dpm_noirq_suspend_devices()
1516 dpm_clear_async_state(dev); in dpm_noirq_suspend_devices()
1517 if (dpm_leaf_device(dev)) in dpm_noirq_suspend_devices()
1518 dpm_async_with_cleanup(dev, async_suspend_noirq); in dpm_noirq_suspend_devices()
1522 dev = to_device(dpm_late_early_list.prev); in dpm_noirq_suspend_devices()
1524 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_noirq_suspend_devices()
1526 if (dpm_async_fn(dev, async_suspend_noirq)) in dpm_noirq_suspend_devices()
1529 get_device(dev); in dpm_noirq_suspend_devices()
1533 device_suspend_noirq(dev, state, false); in dpm_noirq_suspend_devices()
1535 put_device(dev); in dpm_noirq_suspend_devices()
1584 static void dpm_propagate_wakeup_to_parent(struct device *dev) in dpm_propagate_wakeup_to_parent() argument
1586 struct device *parent = dev->parent; in dpm_propagate_wakeup_to_parent()
1593 if (device_wakeup_path(dev) && !parent->power.ignore_children) in dpm_propagate_wakeup_to_parent()
1609 static void device_suspend_late(struct device *dev, pm_message_t state, bool async) in device_suspend_late() argument
1615 TRACE_DEVICE(dev); in device_suspend_late()
1622 __pm_runtime_disable(dev, false); in device_suspend_late()
1624 dpm_wait_for_subordinate(dev, async); in device_suspend_late()
1634 if (dev->power.syscore || dev->power.direct_complete) in device_suspend_late()
1637 if (dev->pm_domain) { in device_suspend_late()
1639 callback = pm_late_early_op(&dev->pm_domain->ops, state); in device_suspend_late()
1640 } else if (dev->type && dev->type->pm) { in device_suspend_late()
1642 callback = pm_late_early_op(dev->type->pm, state); in device_suspend_late()
1643 } else if (dev->class && dev->class->pm) { in device_suspend_late()
1645 callback = pm_late_early_op(dev->class->pm, state); in device_suspend_late()
1646 } else if (dev->bus && dev->bus->pm) { in device_suspend_late()
1648 callback = pm_late_early_op(dev->bus->pm, state); in device_suspend_late()
1653 if (dev_pm_skip_suspend(dev)) in device_suspend_late()
1656 if (dev->driver && dev->driver->pm) { in device_suspend_late()
1658 callback = pm_late_early_op(dev->driver->pm, state); in device_suspend_late()
1662 error = dpm_run_callback(callback, dev, state, info); in device_suspend_late()
1665 dpm_save_failed_dev(dev_name(dev)); in device_suspend_late()
1666 pm_dev_err(dev, state, async ? " async late" : " late", error); in device_suspend_late()
1669 dpm_propagate_wakeup_to_parent(dev); in device_suspend_late()
1672 dev->power.is_late_suspended = true; in device_suspend_late()
1676 complete_all(&dev->power.completion); in device_suspend_late()
1681 dpm_async_suspend_superior(dev, async_suspend_late); in device_suspend_late()
1686 struct device *dev = data; in async_suspend_late() local
1688 device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1689 put_device(dev); in async_suspend_late()
1699 struct device *dev; in dpm_suspend_late() local
1715 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { in dpm_suspend_late()
1716 dpm_clear_async_state(dev); in dpm_suspend_late()
1717 if (dpm_leaf_device(dev)) in dpm_suspend_late()
1718 dpm_async_with_cleanup(dev, async_suspend_late); in dpm_suspend_late()
1722 dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late()
1724 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1726 if (dpm_async_fn(dev, async_suspend_late)) in dpm_suspend_late()
1729 get_device(dev); in dpm_suspend_late()
1733 device_suspend_late(dev, state, false); in dpm_suspend_late()
1735 put_device(dev); in dpm_suspend_late()
1794 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1795 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1801 calltime = initcall_debug_start(dev, cb); in legacy_suspend()
1803 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1804 error = cb(dev, state); in legacy_suspend()
1805 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1806 suspend_report_result(dev, cb, error); in legacy_suspend()
1808 initcall_debug_report(dev, calltime, cb, error); in legacy_suspend()
1813 static void dpm_clear_superiors_direct_complete(struct device *dev) in dpm_clear_superiors_direct_complete() argument
1818 if (dev->parent) { in dpm_clear_superiors_direct_complete()
1819 spin_lock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1820 dev->parent->power.direct_complete = false; in dpm_clear_superiors_direct_complete()
1821 spin_unlock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1826 dev_for_each_link_to_supplier(link, dev) { in dpm_clear_superiors_direct_complete()
1843 static void device_suspend(struct device *dev, pm_message_t state, bool async) in device_suspend() argument
1850 TRACE_DEVICE(dev); in device_suspend()
1853 dpm_wait_for_subordinate(dev, async); in device_suspend()
1856 dev->power.direct_complete = false; in device_suspend()
1871 pm_runtime_barrier(dev); in device_suspend()
1874 dev->power.direct_complete = false; in device_suspend()
1879 if (dev->power.syscore) in device_suspend()
1883 if (device_may_wakeup(dev) || device_wakeup_path(dev)) in device_suspend()
1884 dev->power.direct_complete = false; in device_suspend()
1886 if (dev->power.direct_complete) { in device_suspend()
1887 if (pm_runtime_status_suspended(dev)) { in device_suspend()
1888 pm_runtime_disable(dev); in device_suspend()
1889 if (pm_runtime_status_suspended(dev)) { in device_suspend()
1890 pm_dev_dbg(dev, state, "direct-complete "); in device_suspend()
1891 dev->power.is_suspended = true; in device_suspend()
1895 pm_runtime_enable(dev); in device_suspend()
1897 dev->power.direct_complete = false; in device_suspend()
1900 dev->power.may_skip_resume = true; in device_suspend()
1901 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); in device_suspend()
1903 dpm_watchdog_set(&wd, dev); in device_suspend()
1904 device_lock(dev); in device_suspend()
1906 if (dev->pm_domain) { in device_suspend()
1908 callback = pm_op(&dev->pm_domain->ops, state); in device_suspend()
1912 if (dev->type && dev->type->pm) { in device_suspend()
1914 callback = pm_op(dev->type->pm, state); in device_suspend()
1918 if (dev->class && dev->class->pm) { in device_suspend()
1920 callback = pm_op(dev->class->pm, state); in device_suspend()
1924 if (dev->bus) { in device_suspend()
1925 if (dev->bus->pm) { in device_suspend()
1927 callback = pm_op(dev->bus->pm, state); in device_suspend()
1928 } else if (dev->bus->suspend) { in device_suspend()
1929 pm_dev_dbg(dev, state, "legacy bus "); in device_suspend()
1930 error = legacy_suspend(dev, state, dev->bus->suspend, in device_suspend()
1937 if (!callback && dev->driver && dev->driver->pm) { in device_suspend()
1939 callback = pm_op(dev->driver->pm, state); in device_suspend()
1942 error = dpm_run_callback(callback, dev, state, info); in device_suspend()
1946 dev->power.is_suspended = true; in device_suspend()
1947 if (device_may_wakeup(dev)) in device_suspend()
1948 dev->power.wakeup_path = true; in device_suspend()
1950 dpm_propagate_wakeup_to_parent(dev); in device_suspend()
1951 dpm_clear_superiors_direct_complete(dev); in device_suspend()
1954 device_unlock(dev); in device_suspend()
1960 dpm_save_failed_dev(dev_name(dev)); in device_suspend()
1961 pm_dev_err(dev, state, async ? " async" : "", error); in device_suspend()
1964 complete_all(&dev->power.completion); in device_suspend()
1970 dpm_async_suspend_superior(dev, async_suspend); in device_suspend()
1975 struct device *dev = data; in async_suspend() local
1977 device_suspend(dev, pm_transition, true); in async_suspend()
1978 put_device(dev); in async_suspend()
1988 struct device *dev; in dpm_suspend() local
2006 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { in dpm_suspend()
2007 dpm_clear_async_state(dev); in dpm_suspend()
2008 if (dpm_leaf_device(dev)) in dpm_suspend()
2009 dpm_async_with_cleanup(dev, async_suspend); in dpm_suspend()
2013 dev = to_device(dpm_prepared_list.prev); in dpm_suspend()
2015 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
2017 if (dpm_async_fn(dev, async_suspend)) in dpm_suspend()
2020 get_device(dev); in dpm_suspend()
2024 device_suspend(dev, state, false); in dpm_suspend()
2026 put_device(dev); in dpm_suspend()
2054 static bool device_prepare_smart_suspend(struct device *dev) in device_prepare_smart_suspend() argument
2068 if (!dev->power.no_pm_callbacks && in device_prepare_smart_suspend()
2069 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) in device_prepare_smart_suspend()
2072 if (dev->parent && !dev_pm_smart_suspend(dev->parent) && in device_prepare_smart_suspend()
2073 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent)) in device_prepare_smart_suspend()
2078 dev_for_each_link_to_supplier(link, dev) { in device_prepare_smart_suspend()
2102 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
2114 pm_runtime_get_noresume(dev); in device_prepare()
2121 smart_suspend = !pm_runtime_block_if_disabled(dev); in device_prepare()
2123 if (dev->power.syscore) in device_prepare()
2126 device_lock(dev); in device_prepare()
2128 dev->power.wakeup_path = false; in device_prepare()
2130 if (dev->power.no_pm_callbacks) in device_prepare()
2133 if (dev->pm_domain) in device_prepare()
2134 callback = dev->pm_domain->ops.prepare; in device_prepare()
2135 else if (dev->type && dev->type->pm) in device_prepare()
2136 callback = dev->type->pm->prepare; in device_prepare()
2137 else if (dev->class && dev->class->pm) in device_prepare()
2138 callback = dev->class->pm->prepare; in device_prepare()
2139 else if (dev->bus && dev->bus->pm) in device_prepare()
2140 callback = dev->bus->pm->prepare; in device_prepare()
2142 if (!callback && dev->driver && dev->driver->pm) in device_prepare()
2143 callback = dev->driver->pm->prepare; in device_prepare()
2146 ret = callback(dev); in device_prepare()
2149 device_unlock(dev); in device_prepare()
2152 suspend_report_result(dev, callback, ret); in device_prepare()
2153 pm_runtime_put(dev); in device_prepare()
2158 smart_suspend = device_prepare_smart_suspend(dev); in device_prepare()
2160 spin_lock_irq(&dev->power.lock); in device_prepare()
2162 dev->power.smart_suspend = smart_suspend; in device_prepare()
2170 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && in device_prepare()
2171 (ret > 0 || dev->power.no_pm_callbacks) && in device_prepare()
2172 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); in device_prepare()
2174 spin_unlock_irq(&dev->power.lock); in device_prepare()
2207 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
2209 get_device(dev); in dpm_prepare()
2213 trace_device_pm_callback_start(dev, "", state.event); in dpm_prepare()
2214 error = device_prepare(dev, state); in dpm_prepare()
2215 trace_device_pm_callback_end(dev, error); in dpm_prepare()
2220 dev->power.is_prepared = true; in dpm_prepare()
2221 if (!list_empty(&dev->power.entry)) in dpm_prepare()
2222 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
2226 dev_info(dev, "not prepared for power transition: code %d\n", in dpm_prepare()
2232 put_device(dev); in dpm_prepare()
2266 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) in __suspend_report_result() argument
2269 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret); in __suspend_report_result()
2278 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
2280 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
2295 struct device *dev; in dpm_for_each_dev() local
2301 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
2302 fn(dev, data); in dpm_for_each_dev()
2322 void device_pm_check_callbacks(struct device *dev) in device_pm_check_callbacks() argument
2326 spin_lock_irqsave(&dev->power.lock, flags); in device_pm_check_callbacks()
2327 dev->power.no_pm_callbacks = in device_pm_check_callbacks()
2328 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && in device_pm_check_callbacks()
2329 !dev->bus->suspend && !dev->bus->resume)) && in device_pm_check_callbacks()
2330 (!dev->class || pm_ops_is_empty(dev->class->pm)) && in device_pm_check_callbacks()
2331 (!dev->type || pm_ops_is_empty(dev->type->pm)) && in device_pm_check_callbacks()
2332 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && in device_pm_check_callbacks()
2333 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && in device_pm_check_callbacks()
2334 !dev->driver->suspend && !dev->driver->resume)); in device_pm_check_callbacks()
2335 spin_unlock_irqrestore(&dev->power.lock, flags); in device_pm_check_callbacks()
2338 bool dev_pm_skip_suspend(struct device *dev) in dev_pm_skip_suspend() argument
2340 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); in dev_pm_skip_suspend()