1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/main.c - Where the driver meets power management. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 * 8 * The driver model core calls device_pm_add() when a device is registered. 9 * This will initialize the embedded device_pm_info object in the device 10 * and add it to the list of power-controlled devices. sysfs entries for 11 * controlling device power management will also be added. 12 * 13 * A separate list is used for keeping track of power info, because the power 14 * domain dependencies may differ from the ancestral dependencies that the 15 * subsystem list maintains. 16 */ 17 18 #define pr_fmt(fmt) "PM: " fmt 19 #define dev_fmt pr_fmt 20 21 #include <linux/device.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm-trace.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/interrupt.h> 29 #include <linux/sched.h> 30 #include <linux/sched/debug.h> 31 #include <linux/async.h> 32 #include <linux/suspend.h> 33 #include <trace/events/power.h> 34 #include <linux/cpufreq.h> 35 #include <linux/devfreq.h> 36 #include <linux/timer.h> 37 38 #include "../base.h" 39 #include "power.h" 40 41 typedef int (*pm_callback_t)(struct device *); 42 43 #define list_for_each_entry_rcu_locked(pos, head, member) \ 44 list_for_each_entry_rcu(pos, head, member, \ 45 device_links_read_lock_held()) 46 47 /* 48 * The entries in the dpm_list list are in a depth first order, simply 49 * because children are guaranteed to be discovered after parents, and 50 * are inserted at the back of the list on discovery. 51 * 52 * Since device_pm_add() may be called with a device lock held, 53 * we must never try to acquire a device lock while holding 54 * dpm_list_mutex. 55 */ 56 57 LIST_HEAD(dpm_list); 58 static LIST_HEAD(dpm_prepared_list); 59 static LIST_HEAD(dpm_suspended_list); 60 static LIST_HEAD(dpm_late_early_list); 61 static LIST_HEAD(dpm_noirq_list); 62 63 static DEFINE_MUTEX(dpm_list_mtx); 64 static pm_message_t pm_transition; 65 66 static DEFINE_MUTEX(async_wip_mtx); 67 static int async_error; 68 69 static const char *pm_verb(int event) 70 { 71 switch (event) { 72 case PM_EVENT_SUSPEND: 73 return "suspend"; 74 case PM_EVENT_RESUME: 75 return "resume"; 76 case PM_EVENT_FREEZE: 77 return "freeze"; 78 case PM_EVENT_QUIESCE: 79 return "quiesce"; 80 case PM_EVENT_HIBERNATE: 81 return "hibernate"; 82 case PM_EVENT_THAW: 83 return "thaw"; 84 case PM_EVENT_RESTORE: 85 return "restore"; 86 case PM_EVENT_RECOVER: 87 return "recover"; 88 default: 89 return "(unknown PM event)"; 90 } 91 } 92 93 /** 94 * device_pm_sleep_init - Initialize system suspend-related device fields. 95 * @dev: Device object being initialized. 96 */ 97 void device_pm_sleep_init(struct device *dev) 98 { 99 dev->power.is_prepared = false; 100 dev->power.is_suspended = false; 101 dev->power.is_noirq_suspended = false; 102 dev->power.is_late_suspended = false; 103 init_completion(&dev->power.completion); 104 complete_all(&dev->power.completion); 105 dev->power.wakeup = NULL; 106 INIT_LIST_HEAD(&dev->power.entry); 107 } 108 109 /** 110 * device_pm_lock - Lock the list of active devices used by the PM core. 111 */ 112 void device_pm_lock(void) 113 { 114 mutex_lock(&dpm_list_mtx); 115 } 116 117 /** 118 * device_pm_unlock - Unlock the list of active devices used by the PM core. 119 */ 120 void device_pm_unlock(void) 121 { 122 mutex_unlock(&dpm_list_mtx); 123 } 124 125 /** 126 * device_pm_add - Add a device to the PM core's list of active devices. 127 * @dev: Device to add to the list. 128 */ 129 void device_pm_add(struct device *dev) 130 { 131 /* Skip PM setup/initialization. */ 132 if (device_pm_not_required(dev)) 133 return; 134 135 pr_debug("Adding info for %s:%s\n", 136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 137 device_pm_check_callbacks(dev); 138 mutex_lock(&dpm_list_mtx); 139 if (dev->parent && dev->parent->power.is_prepared) 140 dev_warn(dev, "parent %s should not be sleeping\n", 141 dev_name(dev->parent)); 142 list_add_tail(&dev->power.entry, &dpm_list); 143 dev->power.in_dpm_list = true; 144 mutex_unlock(&dpm_list_mtx); 145 } 146 147 /** 148 * device_pm_remove - Remove a device from the PM core's list of active devices. 149 * @dev: Device to be removed from the list. 150 */ 151 void device_pm_remove(struct device *dev) 152 { 153 if (device_pm_not_required(dev)) 154 return; 155 156 pr_debug("Removing info for %s:%s\n", 157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 158 complete_all(&dev->power.completion); 159 mutex_lock(&dpm_list_mtx); 160 list_del_init(&dev->power.entry); 161 dev->power.in_dpm_list = false; 162 mutex_unlock(&dpm_list_mtx); 163 device_wakeup_disable(dev); 164 pm_runtime_remove(dev); 165 device_pm_check_callbacks(dev); 166 } 167 168 /** 169 * device_pm_move_before - Move device in the PM core's list of active devices. 170 * @deva: Device to move in dpm_list. 171 * @devb: Device @deva should come before. 172 */ 173 void device_pm_move_before(struct device *deva, struct device *devb) 174 { 175 pr_debug("Moving %s:%s before %s:%s\n", 176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 178 /* Delete deva from dpm_list and reinsert before devb. */ 179 list_move_tail(&deva->power.entry, &devb->power.entry); 180 } 181 182 /** 183 * device_pm_move_after - Move device in the PM core's list of active devices. 184 * @deva: Device to move in dpm_list. 185 * @devb: Device @deva should come after. 186 */ 187 void device_pm_move_after(struct device *deva, struct device *devb) 188 { 189 pr_debug("Moving %s:%s after %s:%s\n", 190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 192 /* Delete deva from dpm_list and reinsert after devb. */ 193 list_move(&deva->power.entry, &devb->power.entry); 194 } 195 196 /** 197 * device_pm_move_last - Move device to end of the PM core's list of devices. 198 * @dev: Device to move in dpm_list. 199 */ 200 void device_pm_move_last(struct device *dev) 201 { 202 pr_debug("Moving %s:%s to end of list\n", 203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 204 list_move_tail(&dev->power.entry, &dpm_list); 205 } 206 207 static ktime_t initcall_debug_start(struct device *dev, void *cb) 208 { 209 if (!pm_print_times_enabled) 210 return 0; 211 212 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb, 213 task_pid_nr(current), 214 dev->parent ? dev_name(dev->parent) : "none"); 215 return ktime_get(); 216 } 217 218 static void initcall_debug_report(struct device *dev, ktime_t calltime, 219 void *cb, int error) 220 { 221 ktime_t rettime; 222 223 if (!pm_print_times_enabled) 224 return; 225 226 rettime = ktime_get(); 227 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error, 228 (unsigned long long)ktime_us_delta(rettime, calltime)); 229 } 230 231 /** 232 * dpm_wait - Wait for a PM operation to complete. 233 * @dev: Device to wait for. 234 * @async: If unset, wait only if the device's power.async_suspend flag is set. 235 */ 236 static void dpm_wait(struct device *dev, bool async) 237 { 238 if (!dev) 239 return; 240 241 if (async || (pm_async_enabled && dev->power.async_suspend)) 242 wait_for_completion(&dev->power.completion); 243 } 244 245 static int dpm_wait_fn(struct device *dev, void *async_ptr) 246 { 247 dpm_wait(dev, *((bool *)async_ptr)); 248 return 0; 249 } 250 251 static void dpm_wait_for_children(struct device *dev, bool async) 252 { 253 device_for_each_child(dev, &async, dpm_wait_fn); 254 } 255 256 static void dpm_wait_for_suppliers(struct device *dev, bool async) 257 { 258 struct device_link *link; 259 int idx; 260 261 idx = device_links_read_lock(); 262 263 /* 264 * If the supplier goes away right after we've checked the link to it, 265 * we'll wait for its completion to change the state, but that's fine, 266 * because the only things that will block as a result are the SRCU 267 * callbacks freeing the link objects for the links in the list we're 268 * walking. 269 */ 270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) 271 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 272 dpm_wait(link->supplier, async); 273 274 device_links_read_unlock(idx); 275 } 276 277 static bool dpm_wait_for_superior(struct device *dev, bool async) 278 { 279 struct device *parent; 280 281 /* 282 * If the device is resumed asynchronously and the parent's callback 283 * deletes both the device and the parent itself, the parent object may 284 * be freed while this function is running, so avoid that by reference 285 * counting the parent once more unless the device has been deleted 286 * already (in which case return right away). 287 */ 288 mutex_lock(&dpm_list_mtx); 289 290 if (!device_pm_initialized(dev)) { 291 mutex_unlock(&dpm_list_mtx); 292 return false; 293 } 294 295 parent = get_device(dev->parent); 296 297 mutex_unlock(&dpm_list_mtx); 298 299 dpm_wait(parent, async); 300 put_device(parent); 301 302 dpm_wait_for_suppliers(dev, async); 303 304 /* 305 * If the parent's callback has deleted the device, attempting to resume 306 * it would be invalid, so avoid doing that then. 307 */ 308 return device_pm_initialized(dev); 309 } 310 311 static void dpm_wait_for_consumers(struct device *dev, bool async) 312 { 313 struct device_link *link; 314 int idx; 315 316 idx = device_links_read_lock(); 317 318 /* 319 * The status of a device link can only be changed from "dormant" by a 320 * probe, but that cannot happen during system suspend/resume. In 321 * theory it can change to "dormant" at that time, but then it is 322 * reasonable to wait for the target device anyway (eg. if it goes 323 * away, it's better to wait for it to go away completely and then 324 * continue instead of trying to continue in parallel with its 325 * unregistration). 326 */ 327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) 328 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 329 dpm_wait(link->consumer, async); 330 331 device_links_read_unlock(idx); 332 } 333 334 static void dpm_wait_for_subordinate(struct device *dev, bool async) 335 { 336 dpm_wait_for_children(dev, async); 337 dpm_wait_for_consumers(dev, async); 338 } 339 340 /** 341 * pm_op - Return the PM operation appropriate for given PM event. 342 * @ops: PM operations to choose from. 343 * @state: PM transition of the system being carried out. 344 */ 345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 346 { 347 switch (state.event) { 348 #ifdef CONFIG_SUSPEND 349 case PM_EVENT_SUSPEND: 350 return ops->suspend; 351 case PM_EVENT_RESUME: 352 return ops->resume; 353 #endif /* CONFIG_SUSPEND */ 354 #ifdef CONFIG_HIBERNATE_CALLBACKS 355 case PM_EVENT_FREEZE: 356 case PM_EVENT_QUIESCE: 357 return ops->freeze; 358 case PM_EVENT_HIBERNATE: 359 return ops->poweroff; 360 case PM_EVENT_THAW: 361 case PM_EVENT_RECOVER: 362 return ops->thaw; 363 case PM_EVENT_RESTORE: 364 return ops->restore; 365 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 366 } 367 368 return NULL; 369 } 370 371 /** 372 * pm_late_early_op - Return the PM operation appropriate for given PM event. 373 * @ops: PM operations to choose from. 374 * @state: PM transition of the system being carried out. 375 * 376 * Runtime PM is disabled for @dev while this function is being executed. 377 */ 378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 379 pm_message_t state) 380 { 381 switch (state.event) { 382 #ifdef CONFIG_SUSPEND 383 case PM_EVENT_SUSPEND: 384 return ops->suspend_late; 385 case PM_EVENT_RESUME: 386 return ops->resume_early; 387 #endif /* CONFIG_SUSPEND */ 388 #ifdef CONFIG_HIBERNATE_CALLBACKS 389 case PM_EVENT_FREEZE: 390 case PM_EVENT_QUIESCE: 391 return ops->freeze_late; 392 case PM_EVENT_HIBERNATE: 393 return ops->poweroff_late; 394 case PM_EVENT_THAW: 395 case PM_EVENT_RECOVER: 396 return ops->thaw_early; 397 case PM_EVENT_RESTORE: 398 return ops->restore_early; 399 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 400 } 401 402 return NULL; 403 } 404 405 /** 406 * pm_noirq_op - Return the PM operation appropriate for given PM event. 407 * @ops: PM operations to choose from. 408 * @state: PM transition of the system being carried out. 409 * 410 * The driver of @dev will not receive interrupts while this function is being 411 * executed. 412 */ 413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 414 { 415 switch (state.event) { 416 #ifdef CONFIG_SUSPEND 417 case PM_EVENT_SUSPEND: 418 return ops->suspend_noirq; 419 case PM_EVENT_RESUME: 420 return ops->resume_noirq; 421 #endif /* CONFIG_SUSPEND */ 422 #ifdef CONFIG_HIBERNATE_CALLBACKS 423 case PM_EVENT_FREEZE: 424 case PM_EVENT_QUIESCE: 425 return ops->freeze_noirq; 426 case PM_EVENT_HIBERNATE: 427 return ops->poweroff_noirq; 428 case PM_EVENT_THAW: 429 case PM_EVENT_RECOVER: 430 return ops->thaw_noirq; 431 case PM_EVENT_RESTORE: 432 return ops->restore_noirq; 433 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 434 } 435 436 return NULL; 437 } 438 439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) 440 { 441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event), 442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 443 ", may wakeup" : "", dev->power.driver_flags); 444 } 445 446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, 447 int error) 448 { 449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, 450 error); 451 } 452 453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, 454 const char *info) 455 { 456 ktime_t calltime; 457 u64 usecs64; 458 int usecs; 459 460 calltime = ktime_get(); 461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 462 do_div(usecs64, NSEC_PER_USEC); 463 usecs = usecs64; 464 if (usecs == 0) 465 usecs = 1; 466 467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", 468 info ?: "", info ? " " : "", pm_verb(state.event), 469 error ? "aborted" : "complete", 470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 471 } 472 473 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 474 pm_message_t state, const char *info) 475 { 476 ktime_t calltime; 477 int error; 478 479 if (!cb) 480 return 0; 481 482 calltime = initcall_debug_start(dev, cb); 483 484 pm_dev_dbg(dev, state, info); 485 trace_device_pm_callback_start(dev, info, state.event); 486 error = cb(dev); 487 trace_device_pm_callback_end(dev, error); 488 suspend_report_result(dev, cb, error); 489 490 initcall_debug_report(dev, calltime, cb, error); 491 492 return error; 493 } 494 495 #ifdef CONFIG_DPM_WATCHDOG 496 struct dpm_watchdog { 497 struct device *dev; 498 struct task_struct *tsk; 499 struct timer_list timer; 500 bool fatal; 501 }; 502 503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 504 struct dpm_watchdog wd 505 506 /** 507 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 508 * @t: The timer that PM watchdog depends on. 509 * 510 * Called when a driver has timed out suspending or resuming. 511 * There's not much we can do here to recover so panic() to 512 * capture a crash-dump in pstore. 513 */ 514 static void dpm_watchdog_handler(struct timer_list *t) 515 { 516 struct dpm_watchdog *wd = from_timer(wd, t, timer); 517 struct timer_list *timer = &wd->timer; 518 unsigned int time_left; 519 520 if (wd->fatal) { 521 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 522 show_stack(wd->tsk, NULL, KERN_EMERG); 523 panic("%s %s: unrecoverable failure\n", 524 dev_driver_string(wd->dev), dev_name(wd->dev)); 525 } 526 527 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 528 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n", 529 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left); 530 show_stack(wd->tsk, NULL, KERN_WARNING); 531 532 wd->fatal = true; 533 mod_timer(timer, jiffies + HZ * time_left); 534 } 535 536 /** 537 * dpm_watchdog_set - Enable pm watchdog for given device. 538 * @wd: Watchdog. Must be allocated on the stack. 539 * @dev: Device to handle. 540 */ 541 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 542 { 543 struct timer_list *timer = &wd->timer; 544 545 wd->dev = dev; 546 wd->tsk = current; 547 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 548 549 timer_setup_on_stack(timer, dpm_watchdog_handler, 0); 550 /* use same timeout value for both suspend and resume */ 551 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; 552 add_timer(timer); 553 } 554 555 /** 556 * dpm_watchdog_clear - Disable suspend/resume watchdog. 557 * @wd: Watchdog to disable. 558 */ 559 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 560 { 561 struct timer_list *timer = &wd->timer; 562 563 timer_delete_sync(timer); 564 timer_destroy_on_stack(timer); 565 } 566 #else 567 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 568 #define dpm_watchdog_set(x, y) 569 #define dpm_watchdog_clear(x) 570 #endif 571 572 /*------------------------- Resume routines -------------------------*/ 573 574 /** 575 * dev_pm_skip_resume - System-wide device resume optimization check. 576 * @dev: Target device. 577 * 578 * Return: 579 * - %false if the transition under way is RESTORE. 580 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW. 581 * - The logical negation of %power.must_resume otherwise (that is, when the 582 * transition under way is RESUME). 583 */ 584 bool dev_pm_skip_resume(struct device *dev) 585 { 586 if (pm_transition.event == PM_EVENT_RESTORE) 587 return false; 588 589 if (pm_transition.event == PM_EVENT_THAW) 590 return dev_pm_skip_suspend(dev); 591 592 return !dev->power.must_resume; 593 } 594 595 static bool is_async(struct device *dev) 596 { 597 return dev->power.async_suspend && pm_async_enabled 598 && !pm_trace_is_enabled(); 599 } 600 601 static bool __dpm_async(struct device *dev, async_func_t func) 602 { 603 if (dev->power.work_in_progress) 604 return true; 605 606 if (!is_async(dev)) 607 return false; 608 609 dev->power.work_in_progress = true; 610 611 get_device(dev); 612 613 if (async_schedule_dev_nocall(func, dev)) 614 return true; 615 616 put_device(dev); 617 618 return false; 619 } 620 621 static bool dpm_async_fn(struct device *dev, async_func_t func) 622 { 623 guard(mutex)(&async_wip_mtx); 624 625 return __dpm_async(dev, func); 626 } 627 628 static int dpm_async_with_cleanup(struct device *dev, void *fn) 629 { 630 guard(mutex)(&async_wip_mtx); 631 632 if (!__dpm_async(dev, fn)) 633 dev->power.work_in_progress = false; 634 635 return 0; 636 } 637 638 static void dpm_async_resume_children(struct device *dev, async_func_t func) 639 { 640 /* 641 * Start processing "async" children of the device unless it's been 642 * started already for them. 643 * 644 * This could have been done for the device's "async" consumers too, but 645 * they either need to wait for their parents or the processing has 646 * already started for them after their parents were processed. 647 */ 648 device_for_each_child(dev, func, dpm_async_with_cleanup); 649 } 650 651 static void dpm_clear_async_state(struct device *dev) 652 { 653 reinit_completion(&dev->power.completion); 654 dev->power.work_in_progress = false; 655 } 656 657 static bool dpm_root_device(struct device *dev) 658 { 659 return !dev->parent; 660 } 661 662 static void async_resume_noirq(void *data, async_cookie_t cookie); 663 664 /** 665 * device_resume_noirq - Execute a "noirq resume" callback for given device. 666 * @dev: Device to handle. 667 * @state: PM transition of the system being carried out. 668 * @async: If true, the device is being resumed asynchronously. 669 * 670 * The driver of @dev will not receive interrupts while this function is being 671 * executed. 672 */ 673 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) 674 { 675 pm_callback_t callback = NULL; 676 const char *info = NULL; 677 bool skip_resume; 678 int error = 0; 679 680 TRACE_DEVICE(dev); 681 TRACE_RESUME(0); 682 683 if (dev->power.syscore || dev->power.direct_complete) 684 goto Out; 685 686 if (!dev->power.is_noirq_suspended) 687 goto Out; 688 689 if (!dpm_wait_for_superior(dev, async)) 690 goto Out; 691 692 skip_resume = dev_pm_skip_resume(dev); 693 /* 694 * If the driver callback is skipped below or by the middle layer 695 * callback and device_resume_early() also skips the driver callback for 696 * this device later, it needs to appear as "suspended" to PM-runtime, 697 * so change its status accordingly. 698 * 699 * Otherwise, the device is going to be resumed, so set its PM-runtime 700 * status to "active" unless its power.smart_suspend flag is clear, in 701 * which case it is not necessary to update its PM-runtime status. 702 */ 703 if (skip_resume) 704 pm_runtime_set_suspended(dev); 705 else if (dev_pm_smart_suspend(dev)) 706 pm_runtime_set_active(dev); 707 708 if (dev->pm_domain) { 709 info = "noirq power domain "; 710 callback = pm_noirq_op(&dev->pm_domain->ops, state); 711 } else if (dev->type && dev->type->pm) { 712 info = "noirq type "; 713 callback = pm_noirq_op(dev->type->pm, state); 714 } else if (dev->class && dev->class->pm) { 715 info = "noirq class "; 716 callback = pm_noirq_op(dev->class->pm, state); 717 } else if (dev->bus && dev->bus->pm) { 718 info = "noirq bus "; 719 callback = pm_noirq_op(dev->bus->pm, state); 720 } 721 if (callback) 722 goto Run; 723 724 if (skip_resume) 725 goto Skip; 726 727 if (dev->driver && dev->driver->pm) { 728 info = "noirq driver "; 729 callback = pm_noirq_op(dev->driver->pm, state); 730 } 731 732 Run: 733 error = dpm_run_callback(callback, dev, state, info); 734 735 Skip: 736 dev->power.is_noirq_suspended = false; 737 738 Out: 739 complete_all(&dev->power.completion); 740 TRACE_RESUME(error); 741 742 if (error) { 743 async_error = error; 744 dpm_save_failed_dev(dev_name(dev)); 745 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); 746 } 747 748 dpm_async_resume_children(dev, async_resume_noirq); 749 } 750 751 static void async_resume_noirq(void *data, async_cookie_t cookie) 752 { 753 struct device *dev = data; 754 755 device_resume_noirq(dev, pm_transition, true); 756 put_device(dev); 757 } 758 759 static void dpm_noirq_resume_devices(pm_message_t state) 760 { 761 struct device *dev; 762 ktime_t starttime = ktime_get(); 763 764 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 765 766 async_error = 0; 767 pm_transition = state; 768 769 mutex_lock(&dpm_list_mtx); 770 771 /* 772 * Start processing "async" root devices upfront so they don't wait for 773 * the "sync" devices they don't depend on. 774 */ 775 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 776 dpm_clear_async_state(dev); 777 if (dpm_root_device(dev)) 778 dpm_async_with_cleanup(dev, async_resume_noirq); 779 } 780 781 while (!list_empty(&dpm_noirq_list)) { 782 dev = to_device(dpm_noirq_list.next); 783 list_move_tail(&dev->power.entry, &dpm_late_early_list); 784 785 if (!dpm_async_fn(dev, async_resume_noirq)) { 786 get_device(dev); 787 788 mutex_unlock(&dpm_list_mtx); 789 790 device_resume_noirq(dev, state, false); 791 792 put_device(dev); 793 794 mutex_lock(&dpm_list_mtx); 795 } 796 } 797 mutex_unlock(&dpm_list_mtx); 798 async_synchronize_full(); 799 dpm_show_time(starttime, state, 0, "noirq"); 800 if (async_error) 801 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 802 803 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 804 } 805 806 /** 807 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 808 * @state: PM transition of the system being carried out. 809 * 810 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and 811 * allow device drivers' interrupt handlers to be called. 812 */ 813 void dpm_resume_noirq(pm_message_t state) 814 { 815 dpm_noirq_resume_devices(state); 816 817 resume_device_irqs(); 818 device_wakeup_disarm_wake_irqs(); 819 } 820 821 static void async_resume_early(void *data, async_cookie_t cookie); 822 823 /** 824 * device_resume_early - Execute an "early resume" callback for given device. 825 * @dev: Device to handle. 826 * @state: PM transition of the system being carried out. 827 * @async: If true, the device is being resumed asynchronously. 828 * 829 * Runtime PM is disabled for @dev while this function is being executed. 830 */ 831 static void device_resume_early(struct device *dev, pm_message_t state, bool async) 832 { 833 pm_callback_t callback = NULL; 834 const char *info = NULL; 835 int error = 0; 836 837 TRACE_DEVICE(dev); 838 TRACE_RESUME(0); 839 840 if (dev->power.syscore || dev->power.direct_complete) 841 goto Out; 842 843 if (!dev->power.is_late_suspended) 844 goto Out; 845 846 if (!dpm_wait_for_superior(dev, async)) 847 goto Out; 848 849 if (dev->pm_domain) { 850 info = "early power domain "; 851 callback = pm_late_early_op(&dev->pm_domain->ops, state); 852 } else if (dev->type && dev->type->pm) { 853 info = "early type "; 854 callback = pm_late_early_op(dev->type->pm, state); 855 } else if (dev->class && dev->class->pm) { 856 info = "early class "; 857 callback = pm_late_early_op(dev->class->pm, state); 858 } else if (dev->bus && dev->bus->pm) { 859 info = "early bus "; 860 callback = pm_late_early_op(dev->bus->pm, state); 861 } 862 if (callback) 863 goto Run; 864 865 if (dev_pm_skip_resume(dev)) 866 goto Skip; 867 868 if (dev->driver && dev->driver->pm) { 869 info = "early driver "; 870 callback = pm_late_early_op(dev->driver->pm, state); 871 } 872 873 Run: 874 error = dpm_run_callback(callback, dev, state, info); 875 876 Skip: 877 dev->power.is_late_suspended = false; 878 879 Out: 880 TRACE_RESUME(error); 881 882 pm_runtime_enable(dev); 883 complete_all(&dev->power.completion); 884 885 if (error) { 886 async_error = error; 887 dpm_save_failed_dev(dev_name(dev)); 888 pm_dev_err(dev, state, async ? " async early" : " early", error); 889 } 890 891 dpm_async_resume_children(dev, async_resume_early); 892 } 893 894 static void async_resume_early(void *data, async_cookie_t cookie) 895 { 896 struct device *dev = data; 897 898 device_resume_early(dev, pm_transition, true); 899 put_device(dev); 900 } 901 902 /** 903 * dpm_resume_early - Execute "early resume" callbacks for all devices. 904 * @state: PM transition of the system being carried out. 905 */ 906 void dpm_resume_early(pm_message_t state) 907 { 908 struct device *dev; 909 ktime_t starttime = ktime_get(); 910 911 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 912 913 async_error = 0; 914 pm_transition = state; 915 916 mutex_lock(&dpm_list_mtx); 917 918 /* 919 * Start processing "async" root devices upfront so they don't wait for 920 * the "sync" devices they don't depend on. 921 */ 922 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 923 dpm_clear_async_state(dev); 924 if (dpm_root_device(dev)) 925 dpm_async_with_cleanup(dev, async_resume_early); 926 } 927 928 while (!list_empty(&dpm_late_early_list)) { 929 dev = to_device(dpm_late_early_list.next); 930 list_move_tail(&dev->power.entry, &dpm_suspended_list); 931 932 if (!dpm_async_fn(dev, async_resume_early)) { 933 get_device(dev); 934 935 mutex_unlock(&dpm_list_mtx); 936 937 device_resume_early(dev, state, false); 938 939 put_device(dev); 940 941 mutex_lock(&dpm_list_mtx); 942 } 943 } 944 mutex_unlock(&dpm_list_mtx); 945 async_synchronize_full(); 946 dpm_show_time(starttime, state, 0, "early"); 947 if (async_error) 948 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 949 950 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 951 } 952 953 /** 954 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 955 * @state: PM transition of the system being carried out. 956 */ 957 void dpm_resume_start(pm_message_t state) 958 { 959 dpm_resume_noirq(state); 960 dpm_resume_early(state); 961 } 962 EXPORT_SYMBOL_GPL(dpm_resume_start); 963 964 static void async_resume(void *data, async_cookie_t cookie); 965 966 /** 967 * device_resume - Execute "resume" callbacks for given device. 968 * @dev: Device to handle. 969 * @state: PM transition of the system being carried out. 970 * @async: If true, the device is being resumed asynchronously. 971 */ 972 static void device_resume(struct device *dev, pm_message_t state, bool async) 973 { 974 pm_callback_t callback = NULL; 975 const char *info = NULL; 976 int error = 0; 977 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 978 979 TRACE_DEVICE(dev); 980 TRACE_RESUME(0); 981 982 if (dev->power.syscore) 983 goto Complete; 984 985 if (!dev->power.is_suspended) 986 goto Complete; 987 988 if (dev->power.direct_complete) { 989 /* 990 * Allow new children to be added under the device after this 991 * point if it has no PM callbacks. 992 */ 993 if (dev->power.no_pm_callbacks) 994 dev->power.is_prepared = false; 995 996 /* Match the pm_runtime_disable() in device_suspend(). */ 997 pm_runtime_enable(dev); 998 goto Complete; 999 } 1000 1001 if (!dpm_wait_for_superior(dev, async)) 1002 goto Complete; 1003 1004 dpm_watchdog_set(&wd, dev); 1005 device_lock(dev); 1006 1007 /* 1008 * This is a fib. But we'll allow new children to be added below 1009 * a resumed device, even if the device hasn't been completed yet. 1010 */ 1011 dev->power.is_prepared = false; 1012 1013 if (dev->pm_domain) { 1014 info = "power domain "; 1015 callback = pm_op(&dev->pm_domain->ops, state); 1016 goto Driver; 1017 } 1018 1019 if (dev->type && dev->type->pm) { 1020 info = "type "; 1021 callback = pm_op(dev->type->pm, state); 1022 goto Driver; 1023 } 1024 1025 if (dev->class && dev->class->pm) { 1026 info = "class "; 1027 callback = pm_op(dev->class->pm, state); 1028 goto Driver; 1029 } 1030 1031 if (dev->bus) { 1032 if (dev->bus->pm) { 1033 info = "bus "; 1034 callback = pm_op(dev->bus->pm, state); 1035 } else if (dev->bus->resume) { 1036 info = "legacy bus "; 1037 callback = dev->bus->resume; 1038 goto End; 1039 } 1040 } 1041 1042 Driver: 1043 if (!callback && dev->driver && dev->driver->pm) { 1044 info = "driver "; 1045 callback = pm_op(dev->driver->pm, state); 1046 } 1047 1048 End: 1049 error = dpm_run_callback(callback, dev, state, info); 1050 dev->power.is_suspended = false; 1051 1052 device_unlock(dev); 1053 dpm_watchdog_clear(&wd); 1054 1055 Complete: 1056 complete_all(&dev->power.completion); 1057 1058 TRACE_RESUME(error); 1059 1060 if (error) { 1061 async_error = error; 1062 dpm_save_failed_dev(dev_name(dev)); 1063 pm_dev_err(dev, state, async ? " async" : "", error); 1064 } 1065 1066 dpm_async_resume_children(dev, async_resume); 1067 } 1068 1069 static void async_resume(void *data, async_cookie_t cookie) 1070 { 1071 struct device *dev = data; 1072 1073 device_resume(dev, pm_transition, true); 1074 put_device(dev); 1075 } 1076 1077 /** 1078 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 1079 * @state: PM transition of the system being carried out. 1080 * 1081 * Execute the appropriate "resume" callback for all devices whose status 1082 * indicates that they are suspended. 1083 */ 1084 void dpm_resume(pm_message_t state) 1085 { 1086 struct device *dev; 1087 ktime_t starttime = ktime_get(); 1088 1089 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 1090 might_sleep(); 1091 1092 pm_transition = state; 1093 async_error = 0; 1094 1095 mutex_lock(&dpm_list_mtx); 1096 1097 /* 1098 * Start processing "async" root devices upfront so they don't wait for 1099 * the "sync" devices they don't depend on. 1100 */ 1101 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 1102 dpm_clear_async_state(dev); 1103 if (dpm_root_device(dev)) 1104 dpm_async_with_cleanup(dev, async_resume); 1105 } 1106 1107 while (!list_empty(&dpm_suspended_list)) { 1108 dev = to_device(dpm_suspended_list.next); 1109 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1110 1111 if (!dpm_async_fn(dev, async_resume)) { 1112 get_device(dev); 1113 1114 mutex_unlock(&dpm_list_mtx); 1115 1116 device_resume(dev, state, false); 1117 1118 put_device(dev); 1119 1120 mutex_lock(&dpm_list_mtx); 1121 } 1122 } 1123 mutex_unlock(&dpm_list_mtx); 1124 async_synchronize_full(); 1125 dpm_show_time(starttime, state, 0, NULL); 1126 if (async_error) 1127 dpm_save_failed_step(SUSPEND_RESUME); 1128 1129 cpufreq_resume(); 1130 devfreq_resume(); 1131 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1132 } 1133 1134 /** 1135 * device_complete - Complete a PM transition for given device. 1136 * @dev: Device to handle. 1137 * @state: PM transition of the system being carried out. 1138 */ 1139 static void device_complete(struct device *dev, pm_message_t state) 1140 { 1141 void (*callback)(struct device *) = NULL; 1142 const char *info = NULL; 1143 1144 if (dev->power.syscore) 1145 goto out; 1146 1147 device_lock(dev); 1148 1149 if (dev->pm_domain) { 1150 info = "completing power domain "; 1151 callback = dev->pm_domain->ops.complete; 1152 } else if (dev->type && dev->type->pm) { 1153 info = "completing type "; 1154 callback = dev->type->pm->complete; 1155 } else if (dev->class && dev->class->pm) { 1156 info = "completing class "; 1157 callback = dev->class->pm->complete; 1158 } else if (dev->bus && dev->bus->pm) { 1159 info = "completing bus "; 1160 callback = dev->bus->pm->complete; 1161 } 1162 1163 if (!callback && dev->driver && dev->driver->pm) { 1164 info = "completing driver "; 1165 callback = dev->driver->pm->complete; 1166 } 1167 1168 if (callback) { 1169 pm_dev_dbg(dev, state, info); 1170 callback(dev); 1171 } 1172 1173 device_unlock(dev); 1174 1175 out: 1176 /* If enabling runtime PM for the device is blocked, unblock it. */ 1177 pm_runtime_unblock(dev); 1178 pm_runtime_put(dev); 1179 } 1180 1181 /** 1182 * dpm_complete - Complete a PM transition for all non-sysdev devices. 1183 * @state: PM transition of the system being carried out. 1184 * 1185 * Execute the ->complete() callbacks for all devices whose PM status is not 1186 * DPM_ON (this allows new devices to be registered). 1187 */ 1188 void dpm_complete(pm_message_t state) 1189 { 1190 struct list_head list; 1191 1192 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1193 might_sleep(); 1194 1195 INIT_LIST_HEAD(&list); 1196 mutex_lock(&dpm_list_mtx); 1197 while (!list_empty(&dpm_prepared_list)) { 1198 struct device *dev = to_device(dpm_prepared_list.prev); 1199 1200 get_device(dev); 1201 dev->power.is_prepared = false; 1202 list_move(&dev->power.entry, &list); 1203 1204 mutex_unlock(&dpm_list_mtx); 1205 1206 trace_device_pm_callback_start(dev, "", state.event); 1207 device_complete(dev, state); 1208 trace_device_pm_callback_end(dev, 0); 1209 1210 put_device(dev); 1211 1212 mutex_lock(&dpm_list_mtx); 1213 } 1214 list_splice(&list, &dpm_list); 1215 mutex_unlock(&dpm_list_mtx); 1216 1217 /* Allow device probing and trigger re-probing of deferred devices */ 1218 device_unblock_probing(); 1219 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1220 } 1221 1222 /** 1223 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1224 * @state: PM transition of the system being carried out. 1225 * 1226 * Execute "resume" callbacks for all devices and complete the PM transition of 1227 * the system. 1228 */ 1229 void dpm_resume_end(pm_message_t state) 1230 { 1231 dpm_resume(state); 1232 dpm_complete(state); 1233 } 1234 EXPORT_SYMBOL_GPL(dpm_resume_end); 1235 1236 1237 /*------------------------- Suspend routines -------------------------*/ 1238 1239 static bool dpm_leaf_device(struct device *dev) 1240 { 1241 struct device *child; 1242 1243 lockdep_assert_held(&dpm_list_mtx); 1244 1245 child = device_find_any_child(dev); 1246 if (child) { 1247 put_device(child); 1248 1249 return false; 1250 } 1251 1252 return true; 1253 } 1254 1255 static void dpm_async_suspend_parent(struct device *dev, async_func_t func) 1256 { 1257 guard(mutex)(&dpm_list_mtx); 1258 1259 /* 1260 * If the device is suspended asynchronously and the parent's callback 1261 * deletes both the device and the parent itself, the parent object may 1262 * be freed while this function is running, so avoid that by checking 1263 * if the device has been deleted already as the parent cannot be 1264 * deleted before it. 1265 */ 1266 if (!device_pm_initialized(dev)) 1267 return; 1268 1269 /* Start processing the device's parent if it is "async". */ 1270 if (dev->parent) 1271 dpm_async_with_cleanup(dev->parent, func); 1272 } 1273 1274 /** 1275 * resume_event - Return a "resume" message for given "suspend" sleep state. 1276 * @sleep_state: PM message representing a sleep state. 1277 * 1278 * Return a PM message representing the resume event corresponding to given 1279 * sleep state. 1280 */ 1281 static pm_message_t resume_event(pm_message_t sleep_state) 1282 { 1283 switch (sleep_state.event) { 1284 case PM_EVENT_SUSPEND: 1285 return PMSG_RESUME; 1286 case PM_EVENT_FREEZE: 1287 case PM_EVENT_QUIESCE: 1288 return PMSG_RECOVER; 1289 case PM_EVENT_HIBERNATE: 1290 return PMSG_RESTORE; 1291 } 1292 return PMSG_ON; 1293 } 1294 1295 static void dpm_superior_set_must_resume(struct device *dev) 1296 { 1297 struct device_link *link; 1298 int idx; 1299 1300 if (dev->parent) 1301 dev->parent->power.must_resume = true; 1302 1303 idx = device_links_read_lock(); 1304 1305 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) 1306 link->supplier->power.must_resume = true; 1307 1308 device_links_read_unlock(idx); 1309 } 1310 1311 static void async_suspend_noirq(void *data, async_cookie_t cookie); 1312 1313 /** 1314 * device_suspend_noirq - Execute a "noirq suspend" callback for given device. 1315 * @dev: Device to handle. 1316 * @state: PM transition of the system being carried out. 1317 * @async: If true, the device is being suspended asynchronously. 1318 * 1319 * The driver of @dev will not receive interrupts while this function is being 1320 * executed. 1321 */ 1322 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1323 { 1324 pm_callback_t callback = NULL; 1325 const char *info = NULL; 1326 int error = 0; 1327 1328 TRACE_DEVICE(dev); 1329 TRACE_SUSPEND(0); 1330 1331 dpm_wait_for_subordinate(dev, async); 1332 1333 if (async_error) 1334 goto Complete; 1335 1336 if (dev->power.syscore || dev->power.direct_complete) 1337 goto Complete; 1338 1339 if (dev->pm_domain) { 1340 info = "noirq power domain "; 1341 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1342 } else if (dev->type && dev->type->pm) { 1343 info = "noirq type "; 1344 callback = pm_noirq_op(dev->type->pm, state); 1345 } else if (dev->class && dev->class->pm) { 1346 info = "noirq class "; 1347 callback = pm_noirq_op(dev->class->pm, state); 1348 } else if (dev->bus && dev->bus->pm) { 1349 info = "noirq bus "; 1350 callback = pm_noirq_op(dev->bus->pm, state); 1351 } 1352 if (callback) 1353 goto Run; 1354 1355 if (dev_pm_skip_suspend(dev)) 1356 goto Skip; 1357 1358 if (dev->driver && dev->driver->pm) { 1359 info = "noirq driver "; 1360 callback = pm_noirq_op(dev->driver->pm, state); 1361 } 1362 1363 Run: 1364 error = dpm_run_callback(callback, dev, state, info); 1365 if (error) { 1366 async_error = error; 1367 dpm_save_failed_dev(dev_name(dev)); 1368 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); 1369 goto Complete; 1370 } 1371 1372 Skip: 1373 dev->power.is_noirq_suspended = true; 1374 1375 /* 1376 * Devices must be resumed unless they are explicitly allowed to be left 1377 * in suspend, but even in that case skipping the resume of devices that 1378 * were in use right before the system suspend (as indicated by their 1379 * runtime PM usage counters and child counters) would be suboptimal. 1380 */ 1381 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && 1382 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) 1383 dev->power.must_resume = true; 1384 1385 if (dev->power.must_resume) 1386 dpm_superior_set_must_resume(dev); 1387 1388 Complete: 1389 complete_all(&dev->power.completion); 1390 TRACE_SUSPEND(error); 1391 1392 if (error || async_error) 1393 return error; 1394 1395 dpm_async_suspend_parent(dev, async_suspend_noirq); 1396 1397 return 0; 1398 } 1399 1400 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1401 { 1402 struct device *dev = data; 1403 1404 device_suspend_noirq(dev, pm_transition, true); 1405 put_device(dev); 1406 } 1407 1408 static int dpm_noirq_suspend_devices(pm_message_t state) 1409 { 1410 ktime_t starttime = ktime_get(); 1411 struct device *dev; 1412 int error = 0; 1413 1414 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1415 1416 pm_transition = state; 1417 async_error = 0; 1418 1419 mutex_lock(&dpm_list_mtx); 1420 1421 /* 1422 * Start processing "async" leaf devices upfront so they don't need to 1423 * wait for the "sync" devices they don't depend on. 1424 */ 1425 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { 1426 dpm_clear_async_state(dev); 1427 if (dpm_leaf_device(dev)) 1428 dpm_async_with_cleanup(dev, async_suspend_noirq); 1429 } 1430 1431 while (!list_empty(&dpm_late_early_list)) { 1432 dev = to_device(dpm_late_early_list.prev); 1433 1434 list_move(&dev->power.entry, &dpm_noirq_list); 1435 1436 if (dpm_async_fn(dev, async_suspend_noirq)) 1437 continue; 1438 1439 get_device(dev); 1440 1441 mutex_unlock(&dpm_list_mtx); 1442 1443 error = device_suspend_noirq(dev, state, false); 1444 1445 put_device(dev); 1446 1447 mutex_lock(&dpm_list_mtx); 1448 1449 if (error || async_error) { 1450 /* 1451 * Move all devices to the target list to resume them 1452 * properly. 1453 */ 1454 list_splice(&dpm_late_early_list, &dpm_noirq_list); 1455 break; 1456 } 1457 } 1458 1459 mutex_unlock(&dpm_list_mtx); 1460 1461 async_synchronize_full(); 1462 if (!error) 1463 error = async_error; 1464 1465 if (error) 1466 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1467 1468 dpm_show_time(starttime, state, error, "noirq"); 1469 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1470 return error; 1471 } 1472 1473 /** 1474 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1475 * @state: PM transition of the system being carried out. 1476 * 1477 * Prevent device drivers' interrupt handlers from being called and invoke 1478 * "noirq" suspend callbacks for all non-sysdev devices. 1479 */ 1480 int dpm_suspend_noirq(pm_message_t state) 1481 { 1482 int ret; 1483 1484 device_wakeup_arm_wake_irqs(); 1485 suspend_device_irqs(); 1486 1487 ret = dpm_noirq_suspend_devices(state); 1488 if (ret) 1489 dpm_resume_noirq(resume_event(state)); 1490 1491 return ret; 1492 } 1493 1494 static void dpm_propagate_wakeup_to_parent(struct device *dev) 1495 { 1496 struct device *parent = dev->parent; 1497 1498 if (!parent) 1499 return; 1500 1501 spin_lock_irq(&parent->power.lock); 1502 1503 if (device_wakeup_path(dev) && !parent->power.ignore_children) 1504 parent->power.wakeup_path = true; 1505 1506 spin_unlock_irq(&parent->power.lock); 1507 } 1508 1509 static void async_suspend_late(void *data, async_cookie_t cookie); 1510 1511 /** 1512 * device_suspend_late - Execute a "late suspend" callback for given device. 1513 * @dev: Device to handle. 1514 * @state: PM transition of the system being carried out. 1515 * @async: If true, the device is being suspended asynchronously. 1516 * 1517 * Runtime PM is disabled for @dev while this function is being executed. 1518 */ 1519 static int device_suspend_late(struct device *dev, pm_message_t state, bool async) 1520 { 1521 pm_callback_t callback = NULL; 1522 const char *info = NULL; 1523 int error = 0; 1524 1525 TRACE_DEVICE(dev); 1526 TRACE_SUSPEND(0); 1527 1528 /* 1529 * Disable runtime PM for the device without checking if there is a 1530 * pending resume request for it. 1531 */ 1532 __pm_runtime_disable(dev, false); 1533 1534 dpm_wait_for_subordinate(dev, async); 1535 1536 if (async_error) 1537 goto Complete; 1538 1539 if (pm_wakeup_pending()) { 1540 async_error = -EBUSY; 1541 goto Complete; 1542 } 1543 1544 if (dev->power.syscore || dev->power.direct_complete) 1545 goto Complete; 1546 1547 if (dev->pm_domain) { 1548 info = "late power domain "; 1549 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1550 } else if (dev->type && dev->type->pm) { 1551 info = "late type "; 1552 callback = pm_late_early_op(dev->type->pm, state); 1553 } else if (dev->class && dev->class->pm) { 1554 info = "late class "; 1555 callback = pm_late_early_op(dev->class->pm, state); 1556 } else if (dev->bus && dev->bus->pm) { 1557 info = "late bus "; 1558 callback = pm_late_early_op(dev->bus->pm, state); 1559 } 1560 if (callback) 1561 goto Run; 1562 1563 if (dev_pm_skip_suspend(dev)) 1564 goto Skip; 1565 1566 if (dev->driver && dev->driver->pm) { 1567 info = "late driver "; 1568 callback = pm_late_early_op(dev->driver->pm, state); 1569 } 1570 1571 Run: 1572 error = dpm_run_callback(callback, dev, state, info); 1573 if (error) { 1574 async_error = error; 1575 dpm_save_failed_dev(dev_name(dev)); 1576 pm_dev_err(dev, state, async ? " async late" : " late", error); 1577 goto Complete; 1578 } 1579 dpm_propagate_wakeup_to_parent(dev); 1580 1581 Skip: 1582 dev->power.is_late_suspended = true; 1583 1584 Complete: 1585 TRACE_SUSPEND(error); 1586 complete_all(&dev->power.completion); 1587 1588 if (error || async_error) 1589 return error; 1590 1591 dpm_async_suspend_parent(dev, async_suspend_late); 1592 1593 return 0; 1594 } 1595 1596 static void async_suspend_late(void *data, async_cookie_t cookie) 1597 { 1598 struct device *dev = data; 1599 1600 device_suspend_late(dev, pm_transition, true); 1601 put_device(dev); 1602 } 1603 1604 /** 1605 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1606 * @state: PM transition of the system being carried out. 1607 */ 1608 int dpm_suspend_late(pm_message_t state) 1609 { 1610 ktime_t starttime = ktime_get(); 1611 struct device *dev; 1612 int error = 0; 1613 1614 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1615 1616 pm_transition = state; 1617 async_error = 0; 1618 1619 wake_up_all_idle_cpus(); 1620 1621 mutex_lock(&dpm_list_mtx); 1622 1623 /* 1624 * Start processing "async" leaf devices upfront so they don't need to 1625 * wait for the "sync" devices they don't depend on. 1626 */ 1627 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { 1628 dpm_clear_async_state(dev); 1629 if (dpm_leaf_device(dev)) 1630 dpm_async_with_cleanup(dev, async_suspend_late); 1631 } 1632 1633 while (!list_empty(&dpm_suspended_list)) { 1634 dev = to_device(dpm_suspended_list.prev); 1635 1636 list_move(&dev->power.entry, &dpm_late_early_list); 1637 1638 if (dpm_async_fn(dev, async_suspend_late)) 1639 continue; 1640 1641 get_device(dev); 1642 1643 mutex_unlock(&dpm_list_mtx); 1644 1645 error = device_suspend_late(dev, state, false); 1646 1647 put_device(dev); 1648 1649 mutex_lock(&dpm_list_mtx); 1650 1651 if (error || async_error) { 1652 /* 1653 * Move all devices to the target list to resume them 1654 * properly. 1655 */ 1656 list_splice(&dpm_suspended_list, &dpm_late_early_list); 1657 break; 1658 } 1659 } 1660 1661 mutex_unlock(&dpm_list_mtx); 1662 1663 async_synchronize_full(); 1664 if (!error) 1665 error = async_error; 1666 1667 if (error) { 1668 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1669 dpm_resume_early(resume_event(state)); 1670 } 1671 dpm_show_time(starttime, state, error, "late"); 1672 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1673 return error; 1674 } 1675 1676 /** 1677 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1678 * @state: PM transition of the system being carried out. 1679 */ 1680 int dpm_suspend_end(pm_message_t state) 1681 { 1682 ktime_t starttime = ktime_get(); 1683 int error; 1684 1685 error = dpm_suspend_late(state); 1686 if (error) 1687 goto out; 1688 1689 error = dpm_suspend_noirq(state); 1690 if (error) 1691 dpm_resume_early(resume_event(state)); 1692 1693 out: 1694 dpm_show_time(starttime, state, error, "end"); 1695 return error; 1696 } 1697 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1698 1699 /** 1700 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1701 * @dev: Device to suspend. 1702 * @state: PM transition of the system being carried out. 1703 * @cb: Suspend callback to execute. 1704 * @info: string description of caller. 1705 */ 1706 static int legacy_suspend(struct device *dev, pm_message_t state, 1707 int (*cb)(struct device *dev, pm_message_t state), 1708 const char *info) 1709 { 1710 int error; 1711 ktime_t calltime; 1712 1713 calltime = initcall_debug_start(dev, cb); 1714 1715 trace_device_pm_callback_start(dev, info, state.event); 1716 error = cb(dev, state); 1717 trace_device_pm_callback_end(dev, error); 1718 suspend_report_result(dev, cb, error); 1719 1720 initcall_debug_report(dev, calltime, cb, error); 1721 1722 return error; 1723 } 1724 1725 static void dpm_clear_superiors_direct_complete(struct device *dev) 1726 { 1727 struct device_link *link; 1728 int idx; 1729 1730 if (dev->parent) { 1731 spin_lock_irq(&dev->parent->power.lock); 1732 dev->parent->power.direct_complete = false; 1733 spin_unlock_irq(&dev->parent->power.lock); 1734 } 1735 1736 idx = device_links_read_lock(); 1737 1738 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { 1739 spin_lock_irq(&link->supplier->power.lock); 1740 link->supplier->power.direct_complete = false; 1741 spin_unlock_irq(&link->supplier->power.lock); 1742 } 1743 1744 device_links_read_unlock(idx); 1745 } 1746 1747 static void async_suspend(void *data, async_cookie_t cookie); 1748 1749 /** 1750 * device_suspend - Execute "suspend" callbacks for given device. 1751 * @dev: Device to handle. 1752 * @state: PM transition of the system being carried out. 1753 * @async: If true, the device is being suspended asynchronously. 1754 */ 1755 static int device_suspend(struct device *dev, pm_message_t state, bool async) 1756 { 1757 pm_callback_t callback = NULL; 1758 const char *info = NULL; 1759 int error = 0; 1760 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1761 1762 TRACE_DEVICE(dev); 1763 TRACE_SUSPEND(0); 1764 1765 dpm_wait_for_subordinate(dev, async); 1766 1767 if (async_error) { 1768 dev->power.direct_complete = false; 1769 goto Complete; 1770 } 1771 1772 /* 1773 * Wait for possible runtime PM transitions of the device in progress 1774 * to complete and if there's a runtime resume request pending for it, 1775 * resume it before proceeding with invoking the system-wide suspend 1776 * callbacks for it. 1777 * 1778 * If the system-wide suspend callbacks below change the configuration 1779 * of the device, they must disable runtime PM for it or otherwise 1780 * ensure that its runtime-resume callbacks will not be confused by that 1781 * change in case they are invoked going forward. 1782 */ 1783 pm_runtime_barrier(dev); 1784 1785 if (pm_wakeup_pending()) { 1786 dev->power.direct_complete = false; 1787 async_error = -EBUSY; 1788 goto Complete; 1789 } 1790 1791 if (dev->power.syscore) 1792 goto Complete; 1793 1794 /* Avoid direct_complete to let wakeup_path propagate. */ 1795 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 1796 dev->power.direct_complete = false; 1797 1798 if (dev->power.direct_complete) { 1799 if (pm_runtime_status_suspended(dev)) { 1800 pm_runtime_disable(dev); 1801 if (pm_runtime_status_suspended(dev)) { 1802 pm_dev_dbg(dev, state, "direct-complete "); 1803 dev->power.is_suspended = true; 1804 goto Complete; 1805 } 1806 1807 pm_runtime_enable(dev); 1808 } 1809 dev->power.direct_complete = false; 1810 } 1811 1812 dev->power.may_skip_resume = true; 1813 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); 1814 1815 dpm_watchdog_set(&wd, dev); 1816 device_lock(dev); 1817 1818 if (dev->pm_domain) { 1819 info = "power domain "; 1820 callback = pm_op(&dev->pm_domain->ops, state); 1821 goto Run; 1822 } 1823 1824 if (dev->type && dev->type->pm) { 1825 info = "type "; 1826 callback = pm_op(dev->type->pm, state); 1827 goto Run; 1828 } 1829 1830 if (dev->class && dev->class->pm) { 1831 info = "class "; 1832 callback = pm_op(dev->class->pm, state); 1833 goto Run; 1834 } 1835 1836 if (dev->bus) { 1837 if (dev->bus->pm) { 1838 info = "bus "; 1839 callback = pm_op(dev->bus->pm, state); 1840 } else if (dev->bus->suspend) { 1841 pm_dev_dbg(dev, state, "legacy bus "); 1842 error = legacy_suspend(dev, state, dev->bus->suspend, 1843 "legacy bus "); 1844 goto End; 1845 } 1846 } 1847 1848 Run: 1849 if (!callback && dev->driver && dev->driver->pm) { 1850 info = "driver "; 1851 callback = pm_op(dev->driver->pm, state); 1852 } 1853 1854 error = dpm_run_callback(callback, dev, state, info); 1855 1856 End: 1857 if (!error) { 1858 dev->power.is_suspended = true; 1859 if (device_may_wakeup(dev)) 1860 dev->power.wakeup_path = true; 1861 1862 dpm_propagate_wakeup_to_parent(dev); 1863 dpm_clear_superiors_direct_complete(dev); 1864 } 1865 1866 device_unlock(dev); 1867 dpm_watchdog_clear(&wd); 1868 1869 Complete: 1870 if (error) { 1871 async_error = error; 1872 dpm_save_failed_dev(dev_name(dev)); 1873 pm_dev_err(dev, state, async ? " async" : "", error); 1874 } 1875 1876 complete_all(&dev->power.completion); 1877 TRACE_SUSPEND(error); 1878 1879 if (error || async_error) 1880 return error; 1881 1882 dpm_async_suspend_parent(dev, async_suspend); 1883 1884 return 0; 1885 } 1886 1887 static void async_suspend(void *data, async_cookie_t cookie) 1888 { 1889 struct device *dev = data; 1890 1891 device_suspend(dev, pm_transition, true); 1892 put_device(dev); 1893 } 1894 1895 /** 1896 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1897 * @state: PM transition of the system being carried out. 1898 */ 1899 int dpm_suspend(pm_message_t state) 1900 { 1901 ktime_t starttime = ktime_get(); 1902 struct device *dev; 1903 int error = 0; 1904 1905 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1906 might_sleep(); 1907 1908 devfreq_suspend(); 1909 cpufreq_suspend(); 1910 1911 pm_transition = state; 1912 async_error = 0; 1913 1914 mutex_lock(&dpm_list_mtx); 1915 1916 /* 1917 * Start processing "async" leaf devices upfront so they don't need to 1918 * wait for the "sync" devices they don't depend on. 1919 */ 1920 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { 1921 dpm_clear_async_state(dev); 1922 if (dpm_leaf_device(dev)) 1923 dpm_async_with_cleanup(dev, async_suspend); 1924 } 1925 1926 while (!list_empty(&dpm_prepared_list)) { 1927 dev = to_device(dpm_prepared_list.prev); 1928 1929 list_move(&dev->power.entry, &dpm_suspended_list); 1930 1931 if (dpm_async_fn(dev, async_suspend)) 1932 continue; 1933 1934 get_device(dev); 1935 1936 mutex_unlock(&dpm_list_mtx); 1937 1938 error = device_suspend(dev, state, false); 1939 1940 put_device(dev); 1941 1942 mutex_lock(&dpm_list_mtx); 1943 1944 if (error || async_error) { 1945 /* 1946 * Move all devices to the target list to resume them 1947 * properly. 1948 */ 1949 list_splice(&dpm_prepared_list, &dpm_suspended_list); 1950 break; 1951 } 1952 } 1953 1954 mutex_unlock(&dpm_list_mtx); 1955 1956 async_synchronize_full(); 1957 if (!error) 1958 error = async_error; 1959 1960 if (error) 1961 dpm_save_failed_step(SUSPEND_SUSPEND); 1962 1963 dpm_show_time(starttime, state, error, NULL); 1964 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1965 return error; 1966 } 1967 1968 static bool device_prepare_smart_suspend(struct device *dev) 1969 { 1970 struct device_link *link; 1971 bool ret = true; 1972 int idx; 1973 1974 /* 1975 * The "smart suspend" feature is enabled for devices whose drivers ask 1976 * for it and for devices without PM callbacks. 1977 * 1978 * However, if "smart suspend" is not enabled for the device's parent 1979 * or any of its suppliers that take runtime PM into account, it cannot 1980 * be enabled for the device either. 1981 */ 1982 if (!dev->power.no_pm_callbacks && 1983 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 1984 return false; 1985 1986 if (dev->parent && !dev_pm_smart_suspend(dev->parent) && 1987 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent)) 1988 return false; 1989 1990 idx = device_links_read_lock(); 1991 1992 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { 1993 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 1994 continue; 1995 1996 if (!dev_pm_smart_suspend(link->supplier) && 1997 !pm_runtime_blocked(link->supplier)) { 1998 ret = false; 1999 break; 2000 } 2001 } 2002 2003 device_links_read_unlock(idx); 2004 2005 return ret; 2006 } 2007 2008 /** 2009 * device_prepare - Prepare a device for system power transition. 2010 * @dev: Device to handle. 2011 * @state: PM transition of the system being carried out. 2012 * 2013 * Execute the ->prepare() callback(s) for given device. No new children of the 2014 * device may be registered after this function has returned. 2015 */ 2016 static int device_prepare(struct device *dev, pm_message_t state) 2017 { 2018 int (*callback)(struct device *) = NULL; 2019 bool smart_suspend; 2020 int ret = 0; 2021 2022 /* 2023 * If a device's parent goes into runtime suspend at the wrong time, 2024 * it won't be possible to resume the device. To prevent this we 2025 * block runtime suspend here, during the prepare phase, and allow 2026 * it again during the complete phase. 2027 */ 2028 pm_runtime_get_noresume(dev); 2029 /* 2030 * If runtime PM is disabled for the device at this point and it has 2031 * never been enabled so far, it should not be enabled until this system 2032 * suspend-resume cycle is complete, so prepare to trigger a warning on 2033 * subsequent attempts to enable it. 2034 */ 2035 smart_suspend = !pm_runtime_block_if_disabled(dev); 2036 2037 if (dev->power.syscore) 2038 return 0; 2039 2040 device_lock(dev); 2041 2042 dev->power.wakeup_path = false; 2043 2044 if (dev->power.no_pm_callbacks) 2045 goto unlock; 2046 2047 if (dev->pm_domain) 2048 callback = dev->pm_domain->ops.prepare; 2049 else if (dev->type && dev->type->pm) 2050 callback = dev->type->pm->prepare; 2051 else if (dev->class && dev->class->pm) 2052 callback = dev->class->pm->prepare; 2053 else if (dev->bus && dev->bus->pm) 2054 callback = dev->bus->pm->prepare; 2055 2056 if (!callback && dev->driver && dev->driver->pm) 2057 callback = dev->driver->pm->prepare; 2058 2059 if (callback) 2060 ret = callback(dev); 2061 2062 unlock: 2063 device_unlock(dev); 2064 2065 if (ret < 0) { 2066 suspend_report_result(dev, callback, ret); 2067 pm_runtime_put(dev); 2068 return ret; 2069 } 2070 /* Do not enable "smart suspend" for devices with disabled runtime PM. */ 2071 if (smart_suspend) 2072 smart_suspend = device_prepare_smart_suspend(dev); 2073 2074 spin_lock_irq(&dev->power.lock); 2075 2076 dev->power.smart_suspend = smart_suspend; 2077 /* 2078 * A positive return value from ->prepare() means "this device appears 2079 * to be runtime-suspended and its state is fine, so if it really is 2080 * runtime-suspended, you can leave it in that state provided that you 2081 * will do the same thing with all of its descendants". This only 2082 * applies to suspend transitions, however. 2083 */ 2084 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && 2085 (ret > 0 || dev->power.no_pm_callbacks) && 2086 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 2087 2088 spin_unlock_irq(&dev->power.lock); 2089 2090 return 0; 2091 } 2092 2093 /** 2094 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 2095 * @state: PM transition of the system being carried out. 2096 * 2097 * Execute the ->prepare() callback(s) for all devices. 2098 */ 2099 int dpm_prepare(pm_message_t state) 2100 { 2101 int error = 0; 2102 2103 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 2104 might_sleep(); 2105 2106 /* 2107 * Give a chance for the known devices to complete their probes, before 2108 * disable probing of devices. This sync point is important at least 2109 * at boot time + hibernation restore. 2110 */ 2111 wait_for_device_probe(); 2112 /* 2113 * It is unsafe if probing of devices will happen during suspend or 2114 * hibernation and system behavior will be unpredictable in this case. 2115 * So, let's prohibit device's probing here and defer their probes 2116 * instead. The normal behavior will be restored in dpm_complete(). 2117 */ 2118 device_block_probing(); 2119 2120 mutex_lock(&dpm_list_mtx); 2121 while (!list_empty(&dpm_list) && !error) { 2122 struct device *dev = to_device(dpm_list.next); 2123 2124 get_device(dev); 2125 2126 mutex_unlock(&dpm_list_mtx); 2127 2128 trace_device_pm_callback_start(dev, "", state.event); 2129 error = device_prepare(dev, state); 2130 trace_device_pm_callback_end(dev, error); 2131 2132 mutex_lock(&dpm_list_mtx); 2133 2134 if (!error) { 2135 dev->power.is_prepared = true; 2136 if (!list_empty(&dev->power.entry)) 2137 list_move_tail(&dev->power.entry, &dpm_prepared_list); 2138 } else if (error == -EAGAIN) { 2139 error = 0; 2140 } else { 2141 dev_info(dev, "not prepared for power transition: code %d\n", 2142 error); 2143 } 2144 2145 mutex_unlock(&dpm_list_mtx); 2146 2147 put_device(dev); 2148 2149 mutex_lock(&dpm_list_mtx); 2150 } 2151 mutex_unlock(&dpm_list_mtx); 2152 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 2153 return error; 2154 } 2155 2156 /** 2157 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 2158 * @state: PM transition of the system being carried out. 2159 * 2160 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 2161 * callbacks for them. 2162 */ 2163 int dpm_suspend_start(pm_message_t state) 2164 { 2165 ktime_t starttime = ktime_get(); 2166 int error; 2167 2168 error = dpm_prepare(state); 2169 if (error) 2170 dpm_save_failed_step(SUSPEND_PREPARE); 2171 else 2172 error = dpm_suspend(state); 2173 2174 dpm_show_time(starttime, state, error, "start"); 2175 return error; 2176 } 2177 EXPORT_SYMBOL_GPL(dpm_suspend_start); 2178 2179 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) 2180 { 2181 if (ret) 2182 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret); 2183 } 2184 EXPORT_SYMBOL_GPL(__suspend_report_result); 2185 2186 /** 2187 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 2188 * @subordinate: Device that needs to wait for @dev. 2189 * @dev: Device to wait for. 2190 */ 2191 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 2192 { 2193 dpm_wait(dev, subordinate->power.async_suspend); 2194 return async_error; 2195 } 2196 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 2197 2198 /** 2199 * dpm_for_each_dev - device iterator. 2200 * @data: data for the callback. 2201 * @fn: function to be called for each device. 2202 * 2203 * Iterate over devices in dpm_list, and call @fn for each device, 2204 * passing it @data. 2205 */ 2206 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 2207 { 2208 struct device *dev; 2209 2210 if (!fn) 2211 return; 2212 2213 device_pm_lock(); 2214 list_for_each_entry(dev, &dpm_list, power.entry) 2215 fn(dev, data); 2216 device_pm_unlock(); 2217 } 2218 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 2219 2220 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 2221 { 2222 if (!ops) 2223 return true; 2224 2225 return !ops->prepare && 2226 !ops->suspend && 2227 !ops->suspend_late && 2228 !ops->suspend_noirq && 2229 !ops->resume_noirq && 2230 !ops->resume_early && 2231 !ops->resume && 2232 !ops->complete; 2233 } 2234 2235 void device_pm_check_callbacks(struct device *dev) 2236 { 2237 unsigned long flags; 2238 2239 spin_lock_irqsave(&dev->power.lock, flags); 2240 dev->power.no_pm_callbacks = 2241 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && 2242 !dev->bus->suspend && !dev->bus->resume)) && 2243 (!dev->class || pm_ops_is_empty(dev->class->pm)) && 2244 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 2245 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 2246 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && 2247 !dev->driver->suspend && !dev->driver->resume)); 2248 spin_unlock_irqrestore(&dev->power.lock, flags); 2249 } 2250 2251 bool dev_pm_skip_suspend(struct device *dev) 2252 { 2253 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); 2254 } 2255