1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm-trace.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/interrupt.h> 29 #include <linux/sched.h> 30 #include <linux/sched/debug.h> 31 #include <linux/async.h> 32 #include <linux/suspend.h> 33 #include <trace/events/power.h> 34 #include <linux/cpufreq.h> 35 #include <linux/cpuidle.h> 36 #include <linux/timer.h> 37 38 #include "../base.h" 39 #include "power.h" 40 41 typedef int (*pm_callback_t)(struct device *); 42 43 /* 44 * The entries in the dpm_list list are in a depth first order, simply 45 * because children are guaranteed to be discovered after parents, and 46 * are inserted at the back of the list on discovery. 47 * 48 * Since device_pm_add() may be called with a device lock held, 49 * we must never try to acquire a device lock while holding 50 * dpm_list_mutex. 51 */ 52 53 LIST_HEAD(dpm_list); 54 static LIST_HEAD(dpm_prepared_list); 55 static LIST_HEAD(dpm_suspended_list); 56 static LIST_HEAD(dpm_late_early_list); 57 static LIST_HEAD(dpm_noirq_list); 58 59 struct suspend_stats suspend_stats; 60 static DEFINE_MUTEX(dpm_list_mtx); 61 static pm_message_t pm_transition; 62 63 static int async_error; 64 65 static const char *pm_verb(int event) 66 { 67 switch (event) { 68 case PM_EVENT_SUSPEND: 69 return "suspend"; 70 case PM_EVENT_RESUME: 71 return "resume"; 72 case PM_EVENT_FREEZE: 73 return "freeze"; 74 case PM_EVENT_QUIESCE: 75 return "quiesce"; 76 case PM_EVENT_HIBERNATE: 77 return "hibernate"; 78 case PM_EVENT_THAW: 79 return "thaw"; 80 case PM_EVENT_RESTORE: 81 return "restore"; 82 case PM_EVENT_RECOVER: 83 return "recover"; 84 default: 85 return "(unknown PM event)"; 86 } 87 } 88 89 /** 90 * device_pm_sleep_init - Initialize system suspend-related device fields. 91 * @dev: Device object being initialized. 92 */ 93 void device_pm_sleep_init(struct device *dev) 94 { 95 dev->power.is_prepared = false; 96 dev->power.is_suspended = false; 97 dev->power.is_noirq_suspended = false; 98 dev->power.is_late_suspended = false; 99 init_completion(&dev->power.completion); 100 complete_all(&dev->power.completion); 101 dev->power.wakeup = NULL; 102 INIT_LIST_HEAD(&dev->power.entry); 103 } 104 105 /** 106 * device_pm_lock - Lock the list of active devices used by the PM core. 107 */ 108 void device_pm_lock(void) 109 { 110 mutex_lock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_unlock - Unlock the list of active devices used by the PM core. 115 */ 116 void device_pm_unlock(void) 117 { 118 mutex_unlock(&dpm_list_mtx); 119 } 120 121 /** 122 * device_pm_add - Add a device to the PM core's list of active devices. 123 * @dev: Device to add to the list. 124 */ 125 void device_pm_add(struct device *dev) 126 { 127 pr_debug("PM: Adding info for %s:%s\n", 128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 129 device_pm_check_callbacks(dev); 130 mutex_lock(&dpm_list_mtx); 131 if (dev->parent && dev->parent->power.is_prepared) 132 dev_warn(dev, "parent %s should not be sleeping\n", 133 dev_name(dev->parent)); 134 list_add_tail(&dev->power.entry, &dpm_list); 135 dev->power.in_dpm_list = true; 136 mutex_unlock(&dpm_list_mtx); 137 } 138 139 /** 140 * device_pm_remove - Remove a device from the PM core's list of active devices. 141 * @dev: Device to be removed from the list. 142 */ 143 void device_pm_remove(struct device *dev) 144 { 145 pr_debug("PM: Removing info for %s:%s\n", 146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 147 complete_all(&dev->power.completion); 148 mutex_lock(&dpm_list_mtx); 149 list_del_init(&dev->power.entry); 150 dev->power.in_dpm_list = false; 151 mutex_unlock(&dpm_list_mtx); 152 device_wakeup_disable(dev); 153 pm_runtime_remove(dev); 154 device_pm_check_callbacks(dev); 155 } 156 157 /** 158 * device_pm_move_before - Move device in the PM core's list of active devices. 159 * @deva: Device to move in dpm_list. 160 * @devb: Device @deva should come before. 161 */ 162 void device_pm_move_before(struct device *deva, struct device *devb) 163 { 164 pr_debug("PM: Moving %s:%s before %s:%s\n", 165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 167 /* Delete deva from dpm_list and reinsert before devb. */ 168 list_move_tail(&deva->power.entry, &devb->power.entry); 169 } 170 171 /** 172 * device_pm_move_after - Move device in the PM core's list of active devices. 173 * @deva: Device to move in dpm_list. 174 * @devb: Device @deva should come after. 175 */ 176 void device_pm_move_after(struct device *deva, struct device *devb) 177 { 178 pr_debug("PM: Moving %s:%s after %s:%s\n", 179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 181 /* Delete deva from dpm_list and reinsert after devb. */ 182 list_move(&deva->power.entry, &devb->power.entry); 183 } 184 185 /** 186 * device_pm_move_last - Move device to end of the PM core's list of devices. 187 * @dev: Device to move in dpm_list. 188 */ 189 void device_pm_move_last(struct device *dev) 190 { 191 pr_debug("PM: Moving %s:%s to end of list\n", 192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 193 list_move_tail(&dev->power.entry, &dpm_list); 194 } 195 196 static ktime_t initcall_debug_start(struct device *dev) 197 { 198 ktime_t calltime = 0; 199 200 if (pm_print_times_enabled) { 201 pr_info("calling %s+ @ %i, parent: %s\n", 202 dev_name(dev), task_pid_nr(current), 203 dev->parent ? dev_name(dev->parent) : "none"); 204 calltime = ktime_get(); 205 } 206 207 return calltime; 208 } 209 210 static void initcall_debug_report(struct device *dev, ktime_t calltime, 211 int error, pm_message_t state, 212 const char *info) 213 { 214 ktime_t rettime; 215 s64 nsecs; 216 217 rettime = ktime_get(); 218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 219 220 if (pm_print_times_enabled) { 221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 222 error, (unsigned long long)nsecs >> 10); 223 } 224 } 225 226 /** 227 * dpm_wait - Wait for a PM operation to complete. 228 * @dev: Device to wait for. 229 * @async: If unset, wait only if the device's power.async_suspend flag is set. 230 */ 231 static void dpm_wait(struct device *dev, bool async) 232 { 233 if (!dev) 234 return; 235 236 if (async || (pm_async_enabled && dev->power.async_suspend)) 237 wait_for_completion(&dev->power.completion); 238 } 239 240 static int dpm_wait_fn(struct device *dev, void *async_ptr) 241 { 242 dpm_wait(dev, *((bool *)async_ptr)); 243 return 0; 244 } 245 246 static void dpm_wait_for_children(struct device *dev, bool async) 247 { 248 device_for_each_child(dev, &async, dpm_wait_fn); 249 } 250 251 static void dpm_wait_for_suppliers(struct device *dev, bool async) 252 { 253 struct device_link *link; 254 int idx; 255 256 idx = device_links_read_lock(); 257 258 /* 259 * If the supplier goes away right after we've checked the link to it, 260 * we'll wait for its completion to change the state, but that's fine, 261 * because the only things that will block as a result are the SRCU 262 * callbacks freeing the link objects for the links in the list we're 263 * walking. 264 */ 265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 266 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 267 dpm_wait(link->supplier, async); 268 269 device_links_read_unlock(idx); 270 } 271 272 static void dpm_wait_for_superior(struct device *dev, bool async) 273 { 274 dpm_wait(dev->parent, async); 275 dpm_wait_for_suppliers(dev, async); 276 } 277 278 static void dpm_wait_for_consumers(struct device *dev, bool async) 279 { 280 struct device_link *link; 281 int idx; 282 283 idx = device_links_read_lock(); 284 285 /* 286 * The status of a device link can only be changed from "dormant" by a 287 * probe, but that cannot happen during system suspend/resume. In 288 * theory it can change to "dormant" at that time, but then it is 289 * reasonable to wait for the target device anyway (eg. if it goes 290 * away, it's better to wait for it to go away completely and then 291 * continue instead of trying to continue in parallel with its 292 * unregistration). 293 */ 294 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 295 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 296 dpm_wait(link->consumer, async); 297 298 device_links_read_unlock(idx); 299 } 300 301 static void dpm_wait_for_subordinate(struct device *dev, bool async) 302 { 303 dpm_wait_for_children(dev, async); 304 dpm_wait_for_consumers(dev, async); 305 } 306 307 /** 308 * pm_op - Return the PM operation appropriate for given PM event. 309 * @ops: PM operations to choose from. 310 * @state: PM transition of the system being carried out. 311 */ 312 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 313 { 314 switch (state.event) { 315 #ifdef CONFIG_SUSPEND 316 case PM_EVENT_SUSPEND: 317 return ops->suspend; 318 case PM_EVENT_RESUME: 319 return ops->resume; 320 #endif /* CONFIG_SUSPEND */ 321 #ifdef CONFIG_HIBERNATE_CALLBACKS 322 case PM_EVENT_FREEZE: 323 case PM_EVENT_QUIESCE: 324 return ops->freeze; 325 case PM_EVENT_HIBERNATE: 326 return ops->poweroff; 327 case PM_EVENT_THAW: 328 case PM_EVENT_RECOVER: 329 return ops->thaw; 330 break; 331 case PM_EVENT_RESTORE: 332 return ops->restore; 333 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 334 } 335 336 return NULL; 337 } 338 339 /** 340 * pm_late_early_op - Return the PM operation appropriate for given PM event. 341 * @ops: PM operations to choose from. 342 * @state: PM transition of the system being carried out. 343 * 344 * Runtime PM is disabled for @dev while this function is being executed. 345 */ 346 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 347 pm_message_t state) 348 { 349 switch (state.event) { 350 #ifdef CONFIG_SUSPEND 351 case PM_EVENT_SUSPEND: 352 return ops->suspend_late; 353 case PM_EVENT_RESUME: 354 return ops->resume_early; 355 #endif /* CONFIG_SUSPEND */ 356 #ifdef CONFIG_HIBERNATE_CALLBACKS 357 case PM_EVENT_FREEZE: 358 case PM_EVENT_QUIESCE: 359 return ops->freeze_late; 360 case PM_EVENT_HIBERNATE: 361 return ops->poweroff_late; 362 case PM_EVENT_THAW: 363 case PM_EVENT_RECOVER: 364 return ops->thaw_early; 365 case PM_EVENT_RESTORE: 366 return ops->restore_early; 367 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 368 } 369 370 return NULL; 371 } 372 373 /** 374 * pm_noirq_op - Return the PM operation appropriate for given PM event. 375 * @ops: PM operations to choose from. 376 * @state: PM transition of the system being carried out. 377 * 378 * The driver of @dev will not receive interrupts while this function is being 379 * executed. 380 */ 381 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 382 { 383 switch (state.event) { 384 #ifdef CONFIG_SUSPEND 385 case PM_EVENT_SUSPEND: 386 return ops->suspend_noirq; 387 case PM_EVENT_RESUME: 388 return ops->resume_noirq; 389 #endif /* CONFIG_SUSPEND */ 390 #ifdef CONFIG_HIBERNATE_CALLBACKS 391 case PM_EVENT_FREEZE: 392 case PM_EVENT_QUIESCE: 393 return ops->freeze_noirq; 394 case PM_EVENT_HIBERNATE: 395 return ops->poweroff_noirq; 396 case PM_EVENT_THAW: 397 case PM_EVENT_RECOVER: 398 return ops->thaw_noirq; 399 case PM_EVENT_RESTORE: 400 return ops->restore_noirq; 401 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 402 } 403 404 return NULL; 405 } 406 407 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) 408 { 409 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 410 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 411 ", may wakeup" : ""); 412 } 413 414 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, 415 int error) 416 { 417 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 418 dev_name(dev), pm_verb(state.event), info, error); 419 } 420 421 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, 422 const char *info) 423 { 424 ktime_t calltime; 425 u64 usecs64; 426 int usecs; 427 428 calltime = ktime_get(); 429 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 430 do_div(usecs64, NSEC_PER_USEC); 431 usecs = usecs64; 432 if (usecs == 0) 433 usecs = 1; 434 435 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", 436 info ?: "", info ? " " : "", pm_verb(state.event), 437 error ? "aborted" : "complete", 438 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 439 } 440 441 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 442 pm_message_t state, const char *info) 443 { 444 ktime_t calltime; 445 int error; 446 447 if (!cb) 448 return 0; 449 450 calltime = initcall_debug_start(dev); 451 452 pm_dev_dbg(dev, state, info); 453 trace_device_pm_callback_start(dev, info, state.event); 454 error = cb(dev); 455 trace_device_pm_callback_end(dev, error); 456 suspend_report_result(cb, error); 457 458 initcall_debug_report(dev, calltime, error, state, info); 459 460 return error; 461 } 462 463 #ifdef CONFIG_DPM_WATCHDOG 464 struct dpm_watchdog { 465 struct device *dev; 466 struct task_struct *tsk; 467 struct timer_list timer; 468 }; 469 470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 471 struct dpm_watchdog wd 472 473 /** 474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 475 * @data: Watchdog object address. 476 * 477 * Called when a driver has timed out suspending or resuming. 478 * There's not much we can do here to recover so panic() to 479 * capture a crash-dump in pstore. 480 */ 481 static void dpm_watchdog_handler(unsigned long data) 482 { 483 struct dpm_watchdog *wd = (void *)data; 484 485 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 486 show_stack(wd->tsk, NULL); 487 panic("%s %s: unrecoverable failure\n", 488 dev_driver_string(wd->dev), dev_name(wd->dev)); 489 } 490 491 /** 492 * dpm_watchdog_set - Enable pm watchdog for given device. 493 * @wd: Watchdog. Must be allocated on the stack. 494 * @dev: Device to handle. 495 */ 496 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 497 { 498 struct timer_list *timer = &wd->timer; 499 500 wd->dev = dev; 501 wd->tsk = current; 502 503 init_timer_on_stack(timer); 504 /* use same timeout value for both suspend and resume */ 505 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 506 timer->function = dpm_watchdog_handler; 507 timer->data = (unsigned long)wd; 508 add_timer(timer); 509 } 510 511 /** 512 * dpm_watchdog_clear - Disable suspend/resume watchdog. 513 * @wd: Watchdog to disable. 514 */ 515 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 516 { 517 struct timer_list *timer = &wd->timer; 518 519 del_timer_sync(timer); 520 destroy_timer_on_stack(timer); 521 } 522 #else 523 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 524 #define dpm_watchdog_set(x, y) 525 #define dpm_watchdog_clear(x) 526 #endif 527 528 /*------------------------- Resume routines -------------------------*/ 529 530 /** 531 * device_resume_noirq - Execute an "early resume" callback for given device. 532 * @dev: Device to handle. 533 * @state: PM transition of the system being carried out. 534 * @async: If true, the device is being resumed asynchronously. 535 * 536 * The driver of @dev will not receive interrupts while this function is being 537 * executed. 538 */ 539 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 540 { 541 pm_callback_t callback = NULL; 542 const char *info = NULL; 543 int error = 0; 544 545 TRACE_DEVICE(dev); 546 TRACE_RESUME(0); 547 548 if (dev->power.syscore || dev->power.direct_complete) 549 goto Out; 550 551 if (!dev->power.is_noirq_suspended) 552 goto Out; 553 554 dpm_wait_for_superior(dev, async); 555 556 if (dev->pm_domain) { 557 info = "noirq power domain "; 558 callback = pm_noirq_op(&dev->pm_domain->ops, state); 559 } else if (dev->type && dev->type->pm) { 560 info = "noirq type "; 561 callback = pm_noirq_op(dev->type->pm, state); 562 } else if (dev->class && dev->class->pm) { 563 info = "noirq class "; 564 callback = pm_noirq_op(dev->class->pm, state); 565 } else if (dev->bus && dev->bus->pm) { 566 info = "noirq bus "; 567 callback = pm_noirq_op(dev->bus->pm, state); 568 } 569 570 if (!callback && dev->driver && dev->driver->pm) { 571 info = "noirq driver "; 572 callback = pm_noirq_op(dev->driver->pm, state); 573 } 574 575 error = dpm_run_callback(callback, dev, state, info); 576 dev->power.is_noirq_suspended = false; 577 578 Out: 579 complete_all(&dev->power.completion); 580 TRACE_RESUME(error); 581 return error; 582 } 583 584 static bool is_async(struct device *dev) 585 { 586 return dev->power.async_suspend && pm_async_enabled 587 && !pm_trace_is_enabled(); 588 } 589 590 static void async_resume_noirq(void *data, async_cookie_t cookie) 591 { 592 struct device *dev = (struct device *)data; 593 int error; 594 595 error = device_resume_noirq(dev, pm_transition, true); 596 if (error) 597 pm_dev_err(dev, pm_transition, " async", error); 598 599 put_device(dev); 600 } 601 602 void dpm_noirq_resume_devices(pm_message_t state) 603 { 604 struct device *dev; 605 ktime_t starttime = ktime_get(); 606 607 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 608 mutex_lock(&dpm_list_mtx); 609 pm_transition = state; 610 611 /* 612 * Advanced the async threads upfront, 613 * in case the starting of async threads is 614 * delayed by non-async resuming devices. 615 */ 616 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 617 reinit_completion(&dev->power.completion); 618 if (is_async(dev)) { 619 get_device(dev); 620 async_schedule(async_resume_noirq, dev); 621 } 622 } 623 624 while (!list_empty(&dpm_noirq_list)) { 625 dev = to_device(dpm_noirq_list.next); 626 get_device(dev); 627 list_move_tail(&dev->power.entry, &dpm_late_early_list); 628 mutex_unlock(&dpm_list_mtx); 629 630 if (!is_async(dev)) { 631 int error; 632 633 error = device_resume_noirq(dev, state, false); 634 if (error) { 635 suspend_stats.failed_resume_noirq++; 636 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 637 dpm_save_failed_dev(dev_name(dev)); 638 pm_dev_err(dev, state, " noirq", error); 639 } 640 } 641 642 mutex_lock(&dpm_list_mtx); 643 put_device(dev); 644 } 645 mutex_unlock(&dpm_list_mtx); 646 async_synchronize_full(); 647 dpm_show_time(starttime, state, 0, "noirq"); 648 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 649 } 650 651 void dpm_noirq_end(void) 652 { 653 resume_device_irqs(); 654 device_wakeup_disarm_wake_irqs(); 655 cpuidle_resume(); 656 } 657 658 /** 659 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 660 * @state: PM transition of the system being carried out. 661 * 662 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and 663 * allow device drivers' interrupt handlers to be called. 664 */ 665 void dpm_resume_noirq(pm_message_t state) 666 { 667 dpm_noirq_resume_devices(state); 668 dpm_noirq_end(); 669 } 670 671 /** 672 * device_resume_early - Execute an "early resume" callback for given device. 673 * @dev: Device to handle. 674 * @state: PM transition of the system being carried out. 675 * @async: If true, the device is being resumed asynchronously. 676 * 677 * Runtime PM is disabled for @dev while this function is being executed. 678 */ 679 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 680 { 681 pm_callback_t callback = NULL; 682 const char *info = NULL; 683 int error = 0; 684 685 TRACE_DEVICE(dev); 686 TRACE_RESUME(0); 687 688 if (dev->power.syscore || dev->power.direct_complete) 689 goto Out; 690 691 if (!dev->power.is_late_suspended) 692 goto Out; 693 694 dpm_wait_for_superior(dev, async); 695 696 if (dev->pm_domain) { 697 info = "early power domain "; 698 callback = pm_late_early_op(&dev->pm_domain->ops, state); 699 } else if (dev->type && dev->type->pm) { 700 info = "early type "; 701 callback = pm_late_early_op(dev->type->pm, state); 702 } else if (dev->class && dev->class->pm) { 703 info = "early class "; 704 callback = pm_late_early_op(dev->class->pm, state); 705 } else if (dev->bus && dev->bus->pm) { 706 info = "early bus "; 707 callback = pm_late_early_op(dev->bus->pm, state); 708 } 709 710 if (!callback && dev->driver && dev->driver->pm) { 711 info = "early driver "; 712 callback = pm_late_early_op(dev->driver->pm, state); 713 } 714 715 error = dpm_run_callback(callback, dev, state, info); 716 dev->power.is_late_suspended = false; 717 718 Out: 719 TRACE_RESUME(error); 720 721 pm_runtime_enable(dev); 722 complete_all(&dev->power.completion); 723 return error; 724 } 725 726 static void async_resume_early(void *data, async_cookie_t cookie) 727 { 728 struct device *dev = (struct device *)data; 729 int error; 730 731 error = device_resume_early(dev, pm_transition, true); 732 if (error) 733 pm_dev_err(dev, pm_transition, " async", error); 734 735 put_device(dev); 736 } 737 738 /** 739 * dpm_resume_early - Execute "early resume" callbacks for all devices. 740 * @state: PM transition of the system being carried out. 741 */ 742 void dpm_resume_early(pm_message_t state) 743 { 744 struct device *dev; 745 ktime_t starttime = ktime_get(); 746 747 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 748 mutex_lock(&dpm_list_mtx); 749 pm_transition = state; 750 751 /* 752 * Advanced the async threads upfront, 753 * in case the starting of async threads is 754 * delayed by non-async resuming devices. 755 */ 756 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 757 reinit_completion(&dev->power.completion); 758 if (is_async(dev)) { 759 get_device(dev); 760 async_schedule(async_resume_early, dev); 761 } 762 } 763 764 while (!list_empty(&dpm_late_early_list)) { 765 dev = to_device(dpm_late_early_list.next); 766 get_device(dev); 767 list_move_tail(&dev->power.entry, &dpm_suspended_list); 768 mutex_unlock(&dpm_list_mtx); 769 770 if (!is_async(dev)) { 771 int error; 772 773 error = device_resume_early(dev, state, false); 774 if (error) { 775 suspend_stats.failed_resume_early++; 776 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 777 dpm_save_failed_dev(dev_name(dev)); 778 pm_dev_err(dev, state, " early", error); 779 } 780 } 781 mutex_lock(&dpm_list_mtx); 782 put_device(dev); 783 } 784 mutex_unlock(&dpm_list_mtx); 785 async_synchronize_full(); 786 dpm_show_time(starttime, state, 0, "early"); 787 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 788 } 789 790 /** 791 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 792 * @state: PM transition of the system being carried out. 793 */ 794 void dpm_resume_start(pm_message_t state) 795 { 796 dpm_resume_noirq(state); 797 dpm_resume_early(state); 798 } 799 EXPORT_SYMBOL_GPL(dpm_resume_start); 800 801 /** 802 * device_resume - Execute "resume" callbacks for given device. 803 * @dev: Device to handle. 804 * @state: PM transition of the system being carried out. 805 * @async: If true, the device is being resumed asynchronously. 806 */ 807 static int device_resume(struct device *dev, pm_message_t state, bool async) 808 { 809 pm_callback_t callback = NULL; 810 const char *info = NULL; 811 int error = 0; 812 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 813 814 TRACE_DEVICE(dev); 815 TRACE_RESUME(0); 816 817 if (dev->power.syscore) 818 goto Complete; 819 820 if (dev->power.direct_complete) { 821 /* Match the pm_runtime_disable() in __device_suspend(). */ 822 pm_runtime_enable(dev); 823 goto Complete; 824 } 825 826 dpm_wait_for_superior(dev, async); 827 dpm_watchdog_set(&wd, dev); 828 device_lock(dev); 829 830 /* 831 * This is a fib. But we'll allow new children to be added below 832 * a resumed device, even if the device hasn't been completed yet. 833 */ 834 dev->power.is_prepared = false; 835 836 if (!dev->power.is_suspended) 837 goto Unlock; 838 839 if (dev->pm_domain) { 840 info = "power domain "; 841 callback = pm_op(&dev->pm_domain->ops, state); 842 goto Driver; 843 } 844 845 if (dev->type && dev->type->pm) { 846 info = "type "; 847 callback = pm_op(dev->type->pm, state); 848 goto Driver; 849 } 850 851 if (dev->class) { 852 if (dev->class->pm) { 853 info = "class "; 854 callback = pm_op(dev->class->pm, state); 855 goto Driver; 856 } else if (dev->class->resume) { 857 info = "legacy class "; 858 callback = dev->class->resume; 859 goto End; 860 } 861 } 862 863 if (dev->bus) { 864 if (dev->bus->pm) { 865 info = "bus "; 866 callback = pm_op(dev->bus->pm, state); 867 } else if (dev->bus->resume) { 868 info = "legacy bus "; 869 callback = dev->bus->resume; 870 goto End; 871 } 872 } 873 874 Driver: 875 if (!callback && dev->driver && dev->driver->pm) { 876 info = "driver "; 877 callback = pm_op(dev->driver->pm, state); 878 } 879 880 End: 881 error = dpm_run_callback(callback, dev, state, info); 882 dev->power.is_suspended = false; 883 884 Unlock: 885 device_unlock(dev); 886 dpm_watchdog_clear(&wd); 887 888 Complete: 889 complete_all(&dev->power.completion); 890 891 TRACE_RESUME(error); 892 893 return error; 894 } 895 896 static void async_resume(void *data, async_cookie_t cookie) 897 { 898 struct device *dev = (struct device *)data; 899 int error; 900 901 error = device_resume(dev, pm_transition, true); 902 if (error) 903 pm_dev_err(dev, pm_transition, " async", error); 904 put_device(dev); 905 } 906 907 /** 908 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 909 * @state: PM transition of the system being carried out. 910 * 911 * Execute the appropriate "resume" callback for all devices whose status 912 * indicates that they are suspended. 913 */ 914 void dpm_resume(pm_message_t state) 915 { 916 struct device *dev; 917 ktime_t starttime = ktime_get(); 918 919 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 920 might_sleep(); 921 922 mutex_lock(&dpm_list_mtx); 923 pm_transition = state; 924 async_error = 0; 925 926 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 927 reinit_completion(&dev->power.completion); 928 if (is_async(dev)) { 929 get_device(dev); 930 async_schedule(async_resume, dev); 931 } 932 } 933 934 while (!list_empty(&dpm_suspended_list)) { 935 dev = to_device(dpm_suspended_list.next); 936 get_device(dev); 937 if (!is_async(dev)) { 938 int error; 939 940 mutex_unlock(&dpm_list_mtx); 941 942 error = device_resume(dev, state, false); 943 if (error) { 944 suspend_stats.failed_resume++; 945 dpm_save_failed_step(SUSPEND_RESUME); 946 dpm_save_failed_dev(dev_name(dev)); 947 pm_dev_err(dev, state, "", error); 948 } 949 950 mutex_lock(&dpm_list_mtx); 951 } 952 if (!list_empty(&dev->power.entry)) 953 list_move_tail(&dev->power.entry, &dpm_prepared_list); 954 put_device(dev); 955 } 956 mutex_unlock(&dpm_list_mtx); 957 async_synchronize_full(); 958 dpm_show_time(starttime, state, 0, NULL); 959 960 cpufreq_resume(); 961 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 962 } 963 964 /** 965 * device_complete - Complete a PM transition for given device. 966 * @dev: Device to handle. 967 * @state: PM transition of the system being carried out. 968 */ 969 static void device_complete(struct device *dev, pm_message_t state) 970 { 971 void (*callback)(struct device *) = NULL; 972 const char *info = NULL; 973 974 if (dev->power.syscore) 975 return; 976 977 device_lock(dev); 978 979 if (dev->pm_domain) { 980 info = "completing power domain "; 981 callback = dev->pm_domain->ops.complete; 982 } else if (dev->type && dev->type->pm) { 983 info = "completing type "; 984 callback = dev->type->pm->complete; 985 } else if (dev->class && dev->class->pm) { 986 info = "completing class "; 987 callback = dev->class->pm->complete; 988 } else if (dev->bus && dev->bus->pm) { 989 info = "completing bus "; 990 callback = dev->bus->pm->complete; 991 } 992 993 if (!callback && dev->driver && dev->driver->pm) { 994 info = "completing driver "; 995 callback = dev->driver->pm->complete; 996 } 997 998 if (callback) { 999 pm_dev_dbg(dev, state, info); 1000 callback(dev); 1001 } 1002 1003 device_unlock(dev); 1004 1005 pm_runtime_put(dev); 1006 } 1007 1008 /** 1009 * dpm_complete - Complete a PM transition for all non-sysdev devices. 1010 * @state: PM transition of the system being carried out. 1011 * 1012 * Execute the ->complete() callbacks for all devices whose PM status is not 1013 * DPM_ON (this allows new devices to be registered). 1014 */ 1015 void dpm_complete(pm_message_t state) 1016 { 1017 struct list_head list; 1018 1019 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1020 might_sleep(); 1021 1022 INIT_LIST_HEAD(&list); 1023 mutex_lock(&dpm_list_mtx); 1024 while (!list_empty(&dpm_prepared_list)) { 1025 struct device *dev = to_device(dpm_prepared_list.prev); 1026 1027 get_device(dev); 1028 dev->power.is_prepared = false; 1029 list_move(&dev->power.entry, &list); 1030 mutex_unlock(&dpm_list_mtx); 1031 1032 trace_device_pm_callback_start(dev, "", state.event); 1033 device_complete(dev, state); 1034 trace_device_pm_callback_end(dev, 0); 1035 1036 mutex_lock(&dpm_list_mtx); 1037 put_device(dev); 1038 } 1039 list_splice(&list, &dpm_list); 1040 mutex_unlock(&dpm_list_mtx); 1041 1042 /* Allow device probing and trigger re-probing of deferred devices */ 1043 device_unblock_probing(); 1044 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1045 } 1046 1047 /** 1048 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1049 * @state: PM transition of the system being carried out. 1050 * 1051 * Execute "resume" callbacks for all devices and complete the PM transition of 1052 * the system. 1053 */ 1054 void dpm_resume_end(pm_message_t state) 1055 { 1056 dpm_resume(state); 1057 dpm_complete(state); 1058 } 1059 EXPORT_SYMBOL_GPL(dpm_resume_end); 1060 1061 1062 /*------------------------- Suspend routines -------------------------*/ 1063 1064 /** 1065 * resume_event - Return a "resume" message for given "suspend" sleep state. 1066 * @sleep_state: PM message representing a sleep state. 1067 * 1068 * Return a PM message representing the resume event corresponding to given 1069 * sleep state. 1070 */ 1071 static pm_message_t resume_event(pm_message_t sleep_state) 1072 { 1073 switch (sleep_state.event) { 1074 case PM_EVENT_SUSPEND: 1075 return PMSG_RESUME; 1076 case PM_EVENT_FREEZE: 1077 case PM_EVENT_QUIESCE: 1078 return PMSG_RECOVER; 1079 case PM_EVENT_HIBERNATE: 1080 return PMSG_RESTORE; 1081 } 1082 return PMSG_ON; 1083 } 1084 1085 /** 1086 * device_suspend_noirq - Execute a "late suspend" callback for given device. 1087 * @dev: Device to handle. 1088 * @state: PM transition of the system being carried out. 1089 * @async: If true, the device is being suspended asynchronously. 1090 * 1091 * The driver of @dev will not receive interrupts while this function is being 1092 * executed. 1093 */ 1094 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1095 { 1096 pm_callback_t callback = NULL; 1097 const char *info = NULL; 1098 int error = 0; 1099 1100 TRACE_DEVICE(dev); 1101 TRACE_SUSPEND(0); 1102 1103 dpm_wait_for_subordinate(dev, async); 1104 1105 if (async_error) 1106 goto Complete; 1107 1108 if (pm_wakeup_pending()) { 1109 async_error = -EBUSY; 1110 goto Complete; 1111 } 1112 1113 if (dev->power.syscore || dev->power.direct_complete) 1114 goto Complete; 1115 1116 if (dev->pm_domain) { 1117 info = "noirq power domain "; 1118 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1119 } else if (dev->type && dev->type->pm) { 1120 info = "noirq type "; 1121 callback = pm_noirq_op(dev->type->pm, state); 1122 } else if (dev->class && dev->class->pm) { 1123 info = "noirq class "; 1124 callback = pm_noirq_op(dev->class->pm, state); 1125 } else if (dev->bus && dev->bus->pm) { 1126 info = "noirq bus "; 1127 callback = pm_noirq_op(dev->bus->pm, state); 1128 } 1129 1130 if (!callback && dev->driver && dev->driver->pm) { 1131 info = "noirq driver "; 1132 callback = pm_noirq_op(dev->driver->pm, state); 1133 } 1134 1135 error = dpm_run_callback(callback, dev, state, info); 1136 if (!error) 1137 dev->power.is_noirq_suspended = true; 1138 else 1139 async_error = error; 1140 1141 Complete: 1142 complete_all(&dev->power.completion); 1143 TRACE_SUSPEND(error); 1144 return error; 1145 } 1146 1147 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1148 { 1149 struct device *dev = (struct device *)data; 1150 int error; 1151 1152 error = __device_suspend_noirq(dev, pm_transition, true); 1153 if (error) { 1154 dpm_save_failed_dev(dev_name(dev)); 1155 pm_dev_err(dev, pm_transition, " async", error); 1156 } 1157 1158 put_device(dev); 1159 } 1160 1161 static int device_suspend_noirq(struct device *dev) 1162 { 1163 reinit_completion(&dev->power.completion); 1164 1165 if (is_async(dev)) { 1166 get_device(dev); 1167 async_schedule(async_suspend_noirq, dev); 1168 return 0; 1169 } 1170 return __device_suspend_noirq(dev, pm_transition, false); 1171 } 1172 1173 void dpm_noirq_begin(void) 1174 { 1175 cpuidle_pause(); 1176 device_wakeup_arm_wake_irqs(); 1177 suspend_device_irqs(); 1178 } 1179 1180 int dpm_noirq_suspend_devices(pm_message_t state) 1181 { 1182 ktime_t starttime = ktime_get(); 1183 int error = 0; 1184 1185 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1186 mutex_lock(&dpm_list_mtx); 1187 pm_transition = state; 1188 async_error = 0; 1189 1190 while (!list_empty(&dpm_late_early_list)) { 1191 struct device *dev = to_device(dpm_late_early_list.prev); 1192 1193 get_device(dev); 1194 mutex_unlock(&dpm_list_mtx); 1195 1196 error = device_suspend_noirq(dev); 1197 1198 mutex_lock(&dpm_list_mtx); 1199 if (error) { 1200 pm_dev_err(dev, state, " noirq", error); 1201 dpm_save_failed_dev(dev_name(dev)); 1202 put_device(dev); 1203 break; 1204 } 1205 if (!list_empty(&dev->power.entry)) 1206 list_move(&dev->power.entry, &dpm_noirq_list); 1207 put_device(dev); 1208 1209 if (async_error) 1210 break; 1211 } 1212 mutex_unlock(&dpm_list_mtx); 1213 async_synchronize_full(); 1214 if (!error) 1215 error = async_error; 1216 1217 if (error) { 1218 suspend_stats.failed_suspend_noirq++; 1219 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1220 } 1221 dpm_show_time(starttime, state, error, "noirq"); 1222 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1223 return error; 1224 } 1225 1226 /** 1227 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1228 * @state: PM transition of the system being carried out. 1229 * 1230 * Prevent device drivers' interrupt handlers from being called and invoke 1231 * "noirq" suspend callbacks for all non-sysdev devices. 1232 */ 1233 int dpm_suspend_noirq(pm_message_t state) 1234 { 1235 int ret; 1236 1237 dpm_noirq_begin(); 1238 ret = dpm_noirq_suspend_devices(state); 1239 if (ret) 1240 dpm_resume_noirq(resume_event(state)); 1241 1242 return ret; 1243 } 1244 1245 /** 1246 * device_suspend_late - Execute a "late suspend" callback for given device. 1247 * @dev: Device to handle. 1248 * @state: PM transition of the system being carried out. 1249 * @async: If true, the device is being suspended asynchronously. 1250 * 1251 * Runtime PM is disabled for @dev while this function is being executed. 1252 */ 1253 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1254 { 1255 pm_callback_t callback = NULL; 1256 const char *info = NULL; 1257 int error = 0; 1258 1259 TRACE_DEVICE(dev); 1260 TRACE_SUSPEND(0); 1261 1262 __pm_runtime_disable(dev, false); 1263 1264 dpm_wait_for_subordinate(dev, async); 1265 1266 if (async_error) 1267 goto Complete; 1268 1269 if (pm_wakeup_pending()) { 1270 async_error = -EBUSY; 1271 goto Complete; 1272 } 1273 1274 if (dev->power.syscore || dev->power.direct_complete) 1275 goto Complete; 1276 1277 if (dev->pm_domain) { 1278 info = "late power domain "; 1279 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1280 } else if (dev->type && dev->type->pm) { 1281 info = "late type "; 1282 callback = pm_late_early_op(dev->type->pm, state); 1283 } else if (dev->class && dev->class->pm) { 1284 info = "late class "; 1285 callback = pm_late_early_op(dev->class->pm, state); 1286 } else if (dev->bus && dev->bus->pm) { 1287 info = "late bus "; 1288 callback = pm_late_early_op(dev->bus->pm, state); 1289 } 1290 1291 if (!callback && dev->driver && dev->driver->pm) { 1292 info = "late driver "; 1293 callback = pm_late_early_op(dev->driver->pm, state); 1294 } 1295 1296 error = dpm_run_callback(callback, dev, state, info); 1297 if (!error) 1298 dev->power.is_late_suspended = true; 1299 else 1300 async_error = error; 1301 1302 Complete: 1303 TRACE_SUSPEND(error); 1304 complete_all(&dev->power.completion); 1305 return error; 1306 } 1307 1308 static void async_suspend_late(void *data, async_cookie_t cookie) 1309 { 1310 struct device *dev = (struct device *)data; 1311 int error; 1312 1313 error = __device_suspend_late(dev, pm_transition, true); 1314 if (error) { 1315 dpm_save_failed_dev(dev_name(dev)); 1316 pm_dev_err(dev, pm_transition, " async", error); 1317 } 1318 put_device(dev); 1319 } 1320 1321 static int device_suspend_late(struct device *dev) 1322 { 1323 reinit_completion(&dev->power.completion); 1324 1325 if (is_async(dev)) { 1326 get_device(dev); 1327 async_schedule(async_suspend_late, dev); 1328 return 0; 1329 } 1330 1331 return __device_suspend_late(dev, pm_transition, false); 1332 } 1333 1334 /** 1335 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1336 * @state: PM transition of the system being carried out. 1337 */ 1338 int dpm_suspend_late(pm_message_t state) 1339 { 1340 ktime_t starttime = ktime_get(); 1341 int error = 0; 1342 1343 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1344 mutex_lock(&dpm_list_mtx); 1345 pm_transition = state; 1346 async_error = 0; 1347 1348 while (!list_empty(&dpm_suspended_list)) { 1349 struct device *dev = to_device(dpm_suspended_list.prev); 1350 1351 get_device(dev); 1352 mutex_unlock(&dpm_list_mtx); 1353 1354 error = device_suspend_late(dev); 1355 1356 mutex_lock(&dpm_list_mtx); 1357 if (!list_empty(&dev->power.entry)) 1358 list_move(&dev->power.entry, &dpm_late_early_list); 1359 1360 if (error) { 1361 pm_dev_err(dev, state, " late", error); 1362 dpm_save_failed_dev(dev_name(dev)); 1363 put_device(dev); 1364 break; 1365 } 1366 put_device(dev); 1367 1368 if (async_error) 1369 break; 1370 } 1371 mutex_unlock(&dpm_list_mtx); 1372 async_synchronize_full(); 1373 if (!error) 1374 error = async_error; 1375 if (error) { 1376 suspend_stats.failed_suspend_late++; 1377 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1378 dpm_resume_early(resume_event(state)); 1379 } 1380 dpm_show_time(starttime, state, error, "late"); 1381 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1382 return error; 1383 } 1384 1385 /** 1386 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1387 * @state: PM transition of the system being carried out. 1388 */ 1389 int dpm_suspend_end(pm_message_t state) 1390 { 1391 int error = dpm_suspend_late(state); 1392 if (error) 1393 return error; 1394 1395 error = dpm_suspend_noirq(state); 1396 if (error) { 1397 dpm_resume_early(resume_event(state)); 1398 return error; 1399 } 1400 1401 return 0; 1402 } 1403 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1404 1405 /** 1406 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1407 * @dev: Device to suspend. 1408 * @state: PM transition of the system being carried out. 1409 * @cb: Suspend callback to execute. 1410 * @info: string description of caller. 1411 */ 1412 static int legacy_suspend(struct device *dev, pm_message_t state, 1413 int (*cb)(struct device *dev, pm_message_t state), 1414 const char *info) 1415 { 1416 int error; 1417 ktime_t calltime; 1418 1419 calltime = initcall_debug_start(dev); 1420 1421 trace_device_pm_callback_start(dev, info, state.event); 1422 error = cb(dev, state); 1423 trace_device_pm_callback_end(dev, error); 1424 suspend_report_result(cb, error); 1425 1426 initcall_debug_report(dev, calltime, error, state, info); 1427 1428 return error; 1429 } 1430 1431 static void dpm_clear_suppliers_direct_complete(struct device *dev) 1432 { 1433 struct device_link *link; 1434 int idx; 1435 1436 idx = device_links_read_lock(); 1437 1438 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 1439 spin_lock_irq(&link->supplier->power.lock); 1440 link->supplier->power.direct_complete = false; 1441 spin_unlock_irq(&link->supplier->power.lock); 1442 } 1443 1444 device_links_read_unlock(idx); 1445 } 1446 1447 /** 1448 * device_suspend - Execute "suspend" callbacks for given device. 1449 * @dev: Device to handle. 1450 * @state: PM transition of the system being carried out. 1451 * @async: If true, the device is being suspended asynchronously. 1452 */ 1453 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1454 { 1455 pm_callback_t callback = NULL; 1456 const char *info = NULL; 1457 int error = 0; 1458 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1459 1460 TRACE_DEVICE(dev); 1461 TRACE_SUSPEND(0); 1462 1463 dpm_wait_for_subordinate(dev, async); 1464 1465 if (async_error) 1466 goto Complete; 1467 1468 /* 1469 * If a device configured to wake up the system from sleep states 1470 * has been suspended at run time and there's a resume request pending 1471 * for it, this is equivalent to the device signaling wakeup, so the 1472 * system suspend operation should be aborted. 1473 */ 1474 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1475 pm_wakeup_event(dev, 0); 1476 1477 if (pm_wakeup_pending()) { 1478 async_error = -EBUSY; 1479 goto Complete; 1480 } 1481 1482 if (dev->power.syscore) 1483 goto Complete; 1484 1485 if (dev->power.direct_complete) { 1486 if (pm_runtime_status_suspended(dev)) { 1487 pm_runtime_disable(dev); 1488 if (pm_runtime_status_suspended(dev)) 1489 goto Complete; 1490 1491 pm_runtime_enable(dev); 1492 } 1493 dev->power.direct_complete = false; 1494 } 1495 1496 dpm_watchdog_set(&wd, dev); 1497 device_lock(dev); 1498 1499 if (dev->pm_domain) { 1500 info = "power domain "; 1501 callback = pm_op(&dev->pm_domain->ops, state); 1502 goto Run; 1503 } 1504 1505 if (dev->type && dev->type->pm) { 1506 info = "type "; 1507 callback = pm_op(dev->type->pm, state); 1508 goto Run; 1509 } 1510 1511 if (dev->class) { 1512 if (dev->class->pm) { 1513 info = "class "; 1514 callback = pm_op(dev->class->pm, state); 1515 goto Run; 1516 } else if (dev->class->suspend) { 1517 pm_dev_dbg(dev, state, "legacy class "); 1518 error = legacy_suspend(dev, state, dev->class->suspend, 1519 "legacy class "); 1520 goto End; 1521 } 1522 } 1523 1524 if (dev->bus) { 1525 if (dev->bus->pm) { 1526 info = "bus "; 1527 callback = pm_op(dev->bus->pm, state); 1528 } else if (dev->bus->suspend) { 1529 pm_dev_dbg(dev, state, "legacy bus "); 1530 error = legacy_suspend(dev, state, dev->bus->suspend, 1531 "legacy bus "); 1532 goto End; 1533 } 1534 } 1535 1536 Run: 1537 if (!callback && dev->driver && dev->driver->pm) { 1538 info = "driver "; 1539 callback = pm_op(dev->driver->pm, state); 1540 } 1541 1542 error = dpm_run_callback(callback, dev, state, info); 1543 1544 End: 1545 if (!error) { 1546 struct device *parent = dev->parent; 1547 1548 dev->power.is_suspended = true; 1549 if (parent) { 1550 spin_lock_irq(&parent->power.lock); 1551 1552 dev->parent->power.direct_complete = false; 1553 if (dev->power.wakeup_path 1554 && !dev->parent->power.ignore_children) 1555 dev->parent->power.wakeup_path = true; 1556 1557 spin_unlock_irq(&parent->power.lock); 1558 } 1559 dpm_clear_suppliers_direct_complete(dev); 1560 } 1561 1562 device_unlock(dev); 1563 dpm_watchdog_clear(&wd); 1564 1565 Complete: 1566 if (error) 1567 async_error = error; 1568 1569 complete_all(&dev->power.completion); 1570 TRACE_SUSPEND(error); 1571 return error; 1572 } 1573 1574 static void async_suspend(void *data, async_cookie_t cookie) 1575 { 1576 struct device *dev = (struct device *)data; 1577 int error; 1578 1579 error = __device_suspend(dev, pm_transition, true); 1580 if (error) { 1581 dpm_save_failed_dev(dev_name(dev)); 1582 pm_dev_err(dev, pm_transition, " async", error); 1583 } 1584 1585 put_device(dev); 1586 } 1587 1588 static int device_suspend(struct device *dev) 1589 { 1590 reinit_completion(&dev->power.completion); 1591 1592 if (is_async(dev)) { 1593 get_device(dev); 1594 async_schedule(async_suspend, dev); 1595 return 0; 1596 } 1597 1598 return __device_suspend(dev, pm_transition, false); 1599 } 1600 1601 /** 1602 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1603 * @state: PM transition of the system being carried out. 1604 */ 1605 int dpm_suspend(pm_message_t state) 1606 { 1607 ktime_t starttime = ktime_get(); 1608 int error = 0; 1609 1610 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1611 might_sleep(); 1612 1613 cpufreq_suspend(); 1614 1615 mutex_lock(&dpm_list_mtx); 1616 pm_transition = state; 1617 async_error = 0; 1618 while (!list_empty(&dpm_prepared_list)) { 1619 struct device *dev = to_device(dpm_prepared_list.prev); 1620 1621 get_device(dev); 1622 mutex_unlock(&dpm_list_mtx); 1623 1624 error = device_suspend(dev); 1625 1626 mutex_lock(&dpm_list_mtx); 1627 if (error) { 1628 pm_dev_err(dev, state, "", error); 1629 dpm_save_failed_dev(dev_name(dev)); 1630 put_device(dev); 1631 break; 1632 } 1633 if (!list_empty(&dev->power.entry)) 1634 list_move(&dev->power.entry, &dpm_suspended_list); 1635 put_device(dev); 1636 if (async_error) 1637 break; 1638 } 1639 mutex_unlock(&dpm_list_mtx); 1640 async_synchronize_full(); 1641 if (!error) 1642 error = async_error; 1643 if (error) { 1644 suspend_stats.failed_suspend++; 1645 dpm_save_failed_step(SUSPEND_SUSPEND); 1646 } 1647 dpm_show_time(starttime, state, error, NULL); 1648 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1649 return error; 1650 } 1651 1652 /** 1653 * device_prepare - Prepare a device for system power transition. 1654 * @dev: Device to handle. 1655 * @state: PM transition of the system being carried out. 1656 * 1657 * Execute the ->prepare() callback(s) for given device. No new children of the 1658 * device may be registered after this function has returned. 1659 */ 1660 static int device_prepare(struct device *dev, pm_message_t state) 1661 { 1662 int (*callback)(struct device *) = NULL; 1663 int ret = 0; 1664 1665 if (dev->power.syscore) 1666 return 0; 1667 1668 /* 1669 * If a device's parent goes into runtime suspend at the wrong time, 1670 * it won't be possible to resume the device. To prevent this we 1671 * block runtime suspend here, during the prepare phase, and allow 1672 * it again during the complete phase. 1673 */ 1674 pm_runtime_get_noresume(dev); 1675 1676 device_lock(dev); 1677 1678 dev->power.wakeup_path = device_may_wakeup(dev); 1679 1680 if (dev->power.no_pm_callbacks) { 1681 ret = 1; /* Let device go direct_complete */ 1682 goto unlock; 1683 } 1684 1685 if (dev->pm_domain) 1686 callback = dev->pm_domain->ops.prepare; 1687 else if (dev->type && dev->type->pm) 1688 callback = dev->type->pm->prepare; 1689 else if (dev->class && dev->class->pm) 1690 callback = dev->class->pm->prepare; 1691 else if (dev->bus && dev->bus->pm) 1692 callback = dev->bus->pm->prepare; 1693 1694 if (!callback && dev->driver && dev->driver->pm) 1695 callback = dev->driver->pm->prepare; 1696 1697 if (callback) 1698 ret = callback(dev); 1699 1700 unlock: 1701 device_unlock(dev); 1702 1703 if (ret < 0) { 1704 suspend_report_result(callback, ret); 1705 pm_runtime_put(dev); 1706 return ret; 1707 } 1708 /* 1709 * A positive return value from ->prepare() means "this device appears 1710 * to be runtime-suspended and its state is fine, so if it really is 1711 * runtime-suspended, you can leave it in that state provided that you 1712 * will do the same thing with all of its descendants". This only 1713 * applies to suspend transitions, however. 1714 */ 1715 spin_lock_irq(&dev->power.lock); 1716 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1717 spin_unlock_irq(&dev->power.lock); 1718 return 0; 1719 } 1720 1721 /** 1722 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1723 * @state: PM transition of the system being carried out. 1724 * 1725 * Execute the ->prepare() callback(s) for all devices. 1726 */ 1727 int dpm_prepare(pm_message_t state) 1728 { 1729 int error = 0; 1730 1731 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 1732 might_sleep(); 1733 1734 /* 1735 * Give a chance for the known devices to complete their probes, before 1736 * disable probing of devices. This sync point is important at least 1737 * at boot time + hibernation restore. 1738 */ 1739 wait_for_device_probe(); 1740 /* 1741 * It is unsafe if probing of devices will happen during suspend or 1742 * hibernation and system behavior will be unpredictable in this case. 1743 * So, let's prohibit device's probing here and defer their probes 1744 * instead. The normal behavior will be restored in dpm_complete(). 1745 */ 1746 device_block_probing(); 1747 1748 mutex_lock(&dpm_list_mtx); 1749 while (!list_empty(&dpm_list)) { 1750 struct device *dev = to_device(dpm_list.next); 1751 1752 get_device(dev); 1753 mutex_unlock(&dpm_list_mtx); 1754 1755 trace_device_pm_callback_start(dev, "", state.event); 1756 error = device_prepare(dev, state); 1757 trace_device_pm_callback_end(dev, error); 1758 1759 mutex_lock(&dpm_list_mtx); 1760 if (error) { 1761 if (error == -EAGAIN) { 1762 put_device(dev); 1763 error = 0; 1764 continue; 1765 } 1766 printk(KERN_INFO "PM: Device %s not prepared " 1767 "for power transition: code %d\n", 1768 dev_name(dev), error); 1769 put_device(dev); 1770 break; 1771 } 1772 dev->power.is_prepared = true; 1773 if (!list_empty(&dev->power.entry)) 1774 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1775 put_device(dev); 1776 } 1777 mutex_unlock(&dpm_list_mtx); 1778 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 1779 return error; 1780 } 1781 1782 /** 1783 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1784 * @state: PM transition of the system being carried out. 1785 * 1786 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1787 * callbacks for them. 1788 */ 1789 int dpm_suspend_start(pm_message_t state) 1790 { 1791 int error; 1792 1793 error = dpm_prepare(state); 1794 if (error) { 1795 suspend_stats.failed_prepare++; 1796 dpm_save_failed_step(SUSPEND_PREPARE); 1797 } else 1798 error = dpm_suspend(state); 1799 return error; 1800 } 1801 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1802 1803 void __suspend_report_result(const char *function, void *fn, int ret) 1804 { 1805 if (ret) 1806 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1807 } 1808 EXPORT_SYMBOL_GPL(__suspend_report_result); 1809 1810 /** 1811 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1812 * @dev: Device to wait for. 1813 * @subordinate: Device that needs to wait for @dev. 1814 */ 1815 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1816 { 1817 dpm_wait(dev, subordinate->power.async_suspend); 1818 return async_error; 1819 } 1820 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1821 1822 /** 1823 * dpm_for_each_dev - device iterator. 1824 * @data: data for the callback. 1825 * @fn: function to be called for each device. 1826 * 1827 * Iterate over devices in dpm_list, and call @fn for each device, 1828 * passing it @data. 1829 */ 1830 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 1831 { 1832 struct device *dev; 1833 1834 if (!fn) 1835 return; 1836 1837 device_pm_lock(); 1838 list_for_each_entry(dev, &dpm_list, power.entry) 1839 fn(dev, data); 1840 device_pm_unlock(); 1841 } 1842 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 1843 1844 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 1845 { 1846 if (!ops) 1847 return true; 1848 1849 return !ops->prepare && 1850 !ops->suspend && 1851 !ops->suspend_late && 1852 !ops->suspend_noirq && 1853 !ops->resume_noirq && 1854 !ops->resume_early && 1855 !ops->resume && 1856 !ops->complete; 1857 } 1858 1859 void device_pm_check_callbacks(struct device *dev) 1860 { 1861 spin_lock_irq(&dev->power.lock); 1862 dev->power.no_pm_callbacks = 1863 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && 1864 !dev->bus->suspend && !dev->bus->resume)) && 1865 (!dev->class || (pm_ops_is_empty(dev->class->pm) && 1866 !dev->class->suspend && !dev->class->resume)) && 1867 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1868 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1869 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && 1870 !dev->driver->suspend && !dev->driver->resume)); 1871 spin_unlock_irq(&dev->power.lock); 1872 } 1873