1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/resume-trace.h> 27 #include <linux/interrupt.h> 28 #include <linux/sched.h> 29 #include <linux/async.h> 30 #include <linux/suspend.h> 31 32 #include "../base.h" 33 #include "power.h" 34 35 typedef int (*pm_callback_t)(struct device *); 36 37 /* 38 * The entries in the dpm_list list are in a depth first order, simply 39 * because children are guaranteed to be discovered after parents, and 40 * are inserted at the back of the list on discovery. 41 * 42 * Since device_pm_add() may be called with a device lock held, 43 * we must never try to acquire a device lock while holding 44 * dpm_list_mutex. 45 */ 46 47 LIST_HEAD(dpm_list); 48 LIST_HEAD(dpm_prepared_list); 49 LIST_HEAD(dpm_suspended_list); 50 LIST_HEAD(dpm_late_early_list); 51 LIST_HEAD(dpm_noirq_list); 52 53 struct suspend_stats suspend_stats; 54 static DEFINE_MUTEX(dpm_list_mtx); 55 static pm_message_t pm_transition; 56 57 static int async_error; 58 59 /** 60 * device_pm_init - Initialize the PM-related part of a device object. 61 * @dev: Device object being initialized. 62 */ 63 void device_pm_init(struct device *dev) 64 { 65 dev->power.is_prepared = false; 66 dev->power.is_suspended = false; 67 init_completion(&dev->power.completion); 68 complete_all(&dev->power.completion); 69 dev->power.wakeup = NULL; 70 spin_lock_init(&dev->power.lock); 71 pm_runtime_init(dev); 72 INIT_LIST_HEAD(&dev->power.entry); 73 dev->power.power_state = PMSG_INVALID; 74 } 75 76 /** 77 * device_pm_lock - Lock the list of active devices used by the PM core. 78 */ 79 void device_pm_lock(void) 80 { 81 mutex_lock(&dpm_list_mtx); 82 } 83 84 /** 85 * device_pm_unlock - Unlock the list of active devices used by the PM core. 86 */ 87 void device_pm_unlock(void) 88 { 89 mutex_unlock(&dpm_list_mtx); 90 } 91 92 /** 93 * device_pm_add - Add a device to the PM core's list of active devices. 94 * @dev: Device to add to the list. 95 */ 96 void device_pm_add(struct device *dev) 97 { 98 pr_debug("PM: Adding info for %s:%s\n", 99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 100 mutex_lock(&dpm_list_mtx); 101 if (dev->parent && dev->parent->power.is_prepared) 102 dev_warn(dev, "parent %s should not be sleeping\n", 103 dev_name(dev->parent)); 104 list_add_tail(&dev->power.entry, &dpm_list); 105 dev_pm_qos_constraints_init(dev); 106 mutex_unlock(&dpm_list_mtx); 107 } 108 109 /** 110 * device_pm_remove - Remove a device from the PM core's list of active devices. 111 * @dev: Device to be removed from the list. 112 */ 113 void device_pm_remove(struct device *dev) 114 { 115 pr_debug("PM: Removing info for %s:%s\n", 116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 117 complete_all(&dev->power.completion); 118 mutex_lock(&dpm_list_mtx); 119 dev_pm_qos_constraints_destroy(dev); 120 list_del_init(&dev->power.entry); 121 mutex_unlock(&dpm_list_mtx); 122 device_wakeup_disable(dev); 123 pm_runtime_remove(dev); 124 } 125 126 /** 127 * device_pm_move_before - Move device in the PM core's list of active devices. 128 * @deva: Device to move in dpm_list. 129 * @devb: Device @deva should come before. 130 */ 131 void device_pm_move_before(struct device *deva, struct device *devb) 132 { 133 pr_debug("PM: Moving %s:%s before %s:%s\n", 134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 136 /* Delete deva from dpm_list and reinsert before devb. */ 137 list_move_tail(&deva->power.entry, &devb->power.entry); 138 } 139 140 /** 141 * device_pm_move_after - Move device in the PM core's list of active devices. 142 * @deva: Device to move in dpm_list. 143 * @devb: Device @deva should come after. 144 */ 145 void device_pm_move_after(struct device *deva, struct device *devb) 146 { 147 pr_debug("PM: Moving %s:%s after %s:%s\n", 148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 150 /* Delete deva from dpm_list and reinsert after devb. */ 151 list_move(&deva->power.entry, &devb->power.entry); 152 } 153 154 /** 155 * device_pm_move_last - Move device to end of the PM core's list of devices. 156 * @dev: Device to move in dpm_list. 157 */ 158 void device_pm_move_last(struct device *dev) 159 { 160 pr_debug("PM: Moving %s:%s to end of list\n", 161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 162 list_move_tail(&dev->power.entry, &dpm_list); 163 } 164 165 static ktime_t initcall_debug_start(struct device *dev) 166 { 167 ktime_t calltime = ktime_set(0, 0); 168 169 if (initcall_debug) { 170 pr_info("calling %s+ @ %i, parent: %s\n", 171 dev_name(dev), task_pid_nr(current), 172 dev->parent ? dev_name(dev->parent) : "none"); 173 calltime = ktime_get(); 174 } 175 176 return calltime; 177 } 178 179 static void initcall_debug_report(struct device *dev, ktime_t calltime, 180 int error) 181 { 182 ktime_t delta, rettime; 183 184 if (initcall_debug) { 185 rettime = ktime_get(); 186 delta = ktime_sub(rettime, calltime); 187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 188 error, (unsigned long long)ktime_to_ns(delta) >> 10); 189 } 190 } 191 192 /** 193 * dpm_wait - Wait for a PM operation to complete. 194 * @dev: Device to wait for. 195 * @async: If unset, wait only if the device's power.async_suspend flag is set. 196 */ 197 static void dpm_wait(struct device *dev, bool async) 198 { 199 if (!dev) 200 return; 201 202 if (async || (pm_async_enabled && dev->power.async_suspend)) 203 wait_for_completion(&dev->power.completion); 204 } 205 206 static int dpm_wait_fn(struct device *dev, void *async_ptr) 207 { 208 dpm_wait(dev, *((bool *)async_ptr)); 209 return 0; 210 } 211 212 static void dpm_wait_for_children(struct device *dev, bool async) 213 { 214 device_for_each_child(dev, &async, dpm_wait_fn); 215 } 216 217 /** 218 * pm_op - Return the PM operation appropriate for given PM event. 219 * @ops: PM operations to choose from. 220 * @state: PM transition of the system being carried out. 221 */ 222 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 223 { 224 switch (state.event) { 225 #ifdef CONFIG_SUSPEND 226 case PM_EVENT_SUSPEND: 227 return ops->suspend; 228 case PM_EVENT_RESUME: 229 return ops->resume; 230 #endif /* CONFIG_SUSPEND */ 231 #ifdef CONFIG_HIBERNATE_CALLBACKS 232 case PM_EVENT_FREEZE: 233 case PM_EVENT_QUIESCE: 234 return ops->freeze; 235 case PM_EVENT_HIBERNATE: 236 return ops->poweroff; 237 case PM_EVENT_THAW: 238 case PM_EVENT_RECOVER: 239 return ops->thaw; 240 break; 241 case PM_EVENT_RESTORE: 242 return ops->restore; 243 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 244 } 245 246 return NULL; 247 } 248 249 /** 250 * pm_late_early_op - Return the PM operation appropriate for given PM event. 251 * @ops: PM operations to choose from. 252 * @state: PM transition of the system being carried out. 253 * 254 * Runtime PM is disabled for @dev while this function is being executed. 255 */ 256 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 257 pm_message_t state) 258 { 259 switch (state.event) { 260 #ifdef CONFIG_SUSPEND 261 case PM_EVENT_SUSPEND: 262 return ops->suspend_late; 263 case PM_EVENT_RESUME: 264 return ops->resume_early; 265 #endif /* CONFIG_SUSPEND */ 266 #ifdef CONFIG_HIBERNATE_CALLBACKS 267 case PM_EVENT_FREEZE: 268 case PM_EVENT_QUIESCE: 269 return ops->freeze_late; 270 case PM_EVENT_HIBERNATE: 271 return ops->poweroff_late; 272 case PM_EVENT_THAW: 273 case PM_EVENT_RECOVER: 274 return ops->thaw_early; 275 case PM_EVENT_RESTORE: 276 return ops->restore_early; 277 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 278 } 279 280 return NULL; 281 } 282 283 /** 284 * pm_noirq_op - Return the PM operation appropriate for given PM event. 285 * @ops: PM operations to choose from. 286 * @state: PM transition of the system being carried out. 287 * 288 * The driver of @dev will not receive interrupts while this function is being 289 * executed. 290 */ 291 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 292 { 293 switch (state.event) { 294 #ifdef CONFIG_SUSPEND 295 case PM_EVENT_SUSPEND: 296 return ops->suspend_noirq; 297 case PM_EVENT_RESUME: 298 return ops->resume_noirq; 299 #endif /* CONFIG_SUSPEND */ 300 #ifdef CONFIG_HIBERNATE_CALLBACKS 301 case PM_EVENT_FREEZE: 302 case PM_EVENT_QUIESCE: 303 return ops->freeze_noirq; 304 case PM_EVENT_HIBERNATE: 305 return ops->poweroff_noirq; 306 case PM_EVENT_THAW: 307 case PM_EVENT_RECOVER: 308 return ops->thaw_noirq; 309 case PM_EVENT_RESTORE: 310 return ops->restore_noirq; 311 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 312 } 313 314 return NULL; 315 } 316 317 static char *pm_verb(int event) 318 { 319 switch (event) { 320 case PM_EVENT_SUSPEND: 321 return "suspend"; 322 case PM_EVENT_RESUME: 323 return "resume"; 324 case PM_EVENT_FREEZE: 325 return "freeze"; 326 case PM_EVENT_QUIESCE: 327 return "quiesce"; 328 case PM_EVENT_HIBERNATE: 329 return "hibernate"; 330 case PM_EVENT_THAW: 331 return "thaw"; 332 case PM_EVENT_RESTORE: 333 return "restore"; 334 case PM_EVENT_RECOVER: 335 return "recover"; 336 default: 337 return "(unknown PM event)"; 338 } 339 } 340 341 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 342 { 343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 345 ", may wakeup" : ""); 346 } 347 348 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 349 int error) 350 { 351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 352 dev_name(dev), pm_verb(state.event), info, error); 353 } 354 355 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 356 { 357 ktime_t calltime; 358 u64 usecs64; 359 int usecs; 360 361 calltime = ktime_get(); 362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 363 do_div(usecs64, NSEC_PER_USEC); 364 usecs = usecs64; 365 if (usecs == 0) 366 usecs = 1; 367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 368 info ?: "", info ? " " : "", pm_verb(state.event), 369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 370 } 371 372 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 373 pm_message_t state, char *info) 374 { 375 ktime_t calltime; 376 int error; 377 378 if (!cb) 379 return 0; 380 381 calltime = initcall_debug_start(dev); 382 383 pm_dev_dbg(dev, state, info); 384 error = cb(dev); 385 suspend_report_result(cb, error); 386 387 initcall_debug_report(dev, calltime, error); 388 389 return error; 390 } 391 392 /*------------------------- Resume routines -------------------------*/ 393 394 /** 395 * device_resume_noirq - Execute an "early resume" callback for given device. 396 * @dev: Device to handle. 397 * @state: PM transition of the system being carried out. 398 * 399 * The driver of @dev will not receive interrupts while this function is being 400 * executed. 401 */ 402 static int device_resume_noirq(struct device *dev, pm_message_t state) 403 { 404 pm_callback_t callback = NULL; 405 char *info = NULL; 406 int error = 0; 407 408 TRACE_DEVICE(dev); 409 TRACE_RESUME(0); 410 411 if (dev->pm_domain) { 412 info = "noirq power domain "; 413 callback = pm_noirq_op(&dev->pm_domain->ops, state); 414 } else if (dev->type && dev->type->pm) { 415 info = "noirq type "; 416 callback = pm_noirq_op(dev->type->pm, state); 417 } else if (dev->class && dev->class->pm) { 418 info = "noirq class "; 419 callback = pm_noirq_op(dev->class->pm, state); 420 } else if (dev->bus && dev->bus->pm) { 421 info = "noirq bus "; 422 callback = pm_noirq_op(dev->bus->pm, state); 423 } 424 425 if (!callback && dev->driver && dev->driver->pm) { 426 info = "noirq driver "; 427 callback = pm_noirq_op(dev->driver->pm, state); 428 } 429 430 error = dpm_run_callback(callback, dev, state, info); 431 432 TRACE_RESUME(error); 433 return error; 434 } 435 436 /** 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 438 * @state: PM transition of the system being carried out. 439 * 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 441 * enable device drivers to receive interrupts. 442 */ 443 static void dpm_resume_noirq(pm_message_t state) 444 { 445 ktime_t starttime = ktime_get(); 446 447 mutex_lock(&dpm_list_mtx); 448 while (!list_empty(&dpm_noirq_list)) { 449 struct device *dev = to_device(dpm_noirq_list.next); 450 int error; 451 452 get_device(dev); 453 list_move_tail(&dev->power.entry, &dpm_late_early_list); 454 mutex_unlock(&dpm_list_mtx); 455 456 error = device_resume_noirq(dev, state); 457 if (error) { 458 suspend_stats.failed_resume_noirq++; 459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 460 dpm_save_failed_dev(dev_name(dev)); 461 pm_dev_err(dev, state, " noirq", error); 462 } 463 464 mutex_lock(&dpm_list_mtx); 465 put_device(dev); 466 } 467 mutex_unlock(&dpm_list_mtx); 468 dpm_show_time(starttime, state, "noirq"); 469 resume_device_irqs(); 470 } 471 472 /** 473 * device_resume_early - Execute an "early resume" callback for given device. 474 * @dev: Device to handle. 475 * @state: PM transition of the system being carried out. 476 * 477 * Runtime PM is disabled for @dev while this function is being executed. 478 */ 479 static int device_resume_early(struct device *dev, pm_message_t state) 480 { 481 pm_callback_t callback = NULL; 482 char *info = NULL; 483 int error = 0; 484 485 TRACE_DEVICE(dev); 486 TRACE_RESUME(0); 487 488 if (dev->pm_domain) { 489 info = "early power domain "; 490 callback = pm_late_early_op(&dev->pm_domain->ops, state); 491 } else if (dev->type && dev->type->pm) { 492 info = "early type "; 493 callback = pm_late_early_op(dev->type->pm, state); 494 } else if (dev->class && dev->class->pm) { 495 info = "early class "; 496 callback = pm_late_early_op(dev->class->pm, state); 497 } else if (dev->bus && dev->bus->pm) { 498 info = "early bus "; 499 callback = pm_late_early_op(dev->bus->pm, state); 500 } 501 502 if (!callback && dev->driver && dev->driver->pm) { 503 info = "early driver "; 504 callback = pm_late_early_op(dev->driver->pm, state); 505 } 506 507 error = dpm_run_callback(callback, dev, state, info); 508 509 TRACE_RESUME(error); 510 return error; 511 } 512 513 /** 514 * dpm_resume_early - Execute "early resume" callbacks for all devices. 515 * @state: PM transition of the system being carried out. 516 */ 517 static void dpm_resume_early(pm_message_t state) 518 { 519 ktime_t starttime = ktime_get(); 520 521 mutex_lock(&dpm_list_mtx); 522 while (!list_empty(&dpm_late_early_list)) { 523 struct device *dev = to_device(dpm_late_early_list.next); 524 int error; 525 526 get_device(dev); 527 list_move_tail(&dev->power.entry, &dpm_suspended_list); 528 mutex_unlock(&dpm_list_mtx); 529 530 error = device_resume_early(dev, state); 531 if (error) { 532 suspend_stats.failed_resume_early++; 533 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 534 dpm_save_failed_dev(dev_name(dev)); 535 pm_dev_err(dev, state, " early", error); 536 } 537 538 mutex_lock(&dpm_list_mtx); 539 put_device(dev); 540 } 541 mutex_unlock(&dpm_list_mtx); 542 dpm_show_time(starttime, state, "early"); 543 } 544 545 /** 546 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 547 * @state: PM transition of the system being carried out. 548 */ 549 void dpm_resume_start(pm_message_t state) 550 { 551 dpm_resume_noirq(state); 552 dpm_resume_early(state); 553 } 554 EXPORT_SYMBOL_GPL(dpm_resume_start); 555 556 /** 557 * device_resume - Execute "resume" callbacks for given device. 558 * @dev: Device to handle. 559 * @state: PM transition of the system being carried out. 560 * @async: If true, the device is being resumed asynchronously. 561 */ 562 static int device_resume(struct device *dev, pm_message_t state, bool async) 563 { 564 pm_callback_t callback = NULL; 565 char *info = NULL; 566 int error = 0; 567 bool put = false; 568 569 TRACE_DEVICE(dev); 570 TRACE_RESUME(0); 571 572 dpm_wait(dev->parent, async); 573 device_lock(dev); 574 575 /* 576 * This is a fib. But we'll allow new children to be added below 577 * a resumed device, even if the device hasn't been completed yet. 578 */ 579 dev->power.is_prepared = false; 580 581 if (!dev->power.is_suspended) 582 goto Unlock; 583 584 pm_runtime_enable(dev); 585 put = true; 586 587 if (dev->pm_domain) { 588 info = "power domain "; 589 callback = pm_op(&dev->pm_domain->ops, state); 590 goto Driver; 591 } 592 593 if (dev->type && dev->type->pm) { 594 info = "type "; 595 callback = pm_op(dev->type->pm, state); 596 goto Driver; 597 } 598 599 if (dev->class) { 600 if (dev->class->pm) { 601 info = "class "; 602 callback = pm_op(dev->class->pm, state); 603 goto Driver; 604 } else if (dev->class->resume) { 605 info = "legacy class "; 606 callback = dev->class->resume; 607 goto End; 608 } 609 } 610 611 if (dev->bus) { 612 if (dev->bus->pm) { 613 info = "bus "; 614 callback = pm_op(dev->bus->pm, state); 615 } else if (dev->bus->resume) { 616 info = "legacy bus "; 617 callback = dev->bus->resume; 618 goto End; 619 } 620 } 621 622 Driver: 623 if (!callback && dev->driver && dev->driver->pm) { 624 info = "driver "; 625 callback = pm_op(dev->driver->pm, state); 626 } 627 628 End: 629 error = dpm_run_callback(callback, dev, state, info); 630 dev->power.is_suspended = false; 631 632 Unlock: 633 device_unlock(dev); 634 complete_all(&dev->power.completion); 635 636 TRACE_RESUME(error); 637 638 if (put) 639 pm_runtime_put_sync(dev); 640 641 return error; 642 } 643 644 static void async_resume(void *data, async_cookie_t cookie) 645 { 646 struct device *dev = (struct device *)data; 647 int error; 648 649 error = device_resume(dev, pm_transition, true); 650 if (error) 651 pm_dev_err(dev, pm_transition, " async", error); 652 put_device(dev); 653 } 654 655 static bool is_async(struct device *dev) 656 { 657 return dev->power.async_suspend && pm_async_enabled 658 && !pm_trace_is_enabled(); 659 } 660 661 /** 662 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 663 * @state: PM transition of the system being carried out. 664 * 665 * Execute the appropriate "resume" callback for all devices whose status 666 * indicates that they are suspended. 667 */ 668 void dpm_resume(pm_message_t state) 669 { 670 struct device *dev; 671 ktime_t starttime = ktime_get(); 672 673 might_sleep(); 674 675 mutex_lock(&dpm_list_mtx); 676 pm_transition = state; 677 async_error = 0; 678 679 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 680 INIT_COMPLETION(dev->power.completion); 681 if (is_async(dev)) { 682 get_device(dev); 683 async_schedule(async_resume, dev); 684 } 685 } 686 687 while (!list_empty(&dpm_suspended_list)) { 688 dev = to_device(dpm_suspended_list.next); 689 get_device(dev); 690 if (!is_async(dev)) { 691 int error; 692 693 mutex_unlock(&dpm_list_mtx); 694 695 error = device_resume(dev, state, false); 696 if (error) { 697 suspend_stats.failed_resume++; 698 dpm_save_failed_step(SUSPEND_RESUME); 699 dpm_save_failed_dev(dev_name(dev)); 700 pm_dev_err(dev, state, "", error); 701 } 702 703 mutex_lock(&dpm_list_mtx); 704 } 705 if (!list_empty(&dev->power.entry)) 706 list_move_tail(&dev->power.entry, &dpm_prepared_list); 707 put_device(dev); 708 } 709 mutex_unlock(&dpm_list_mtx); 710 async_synchronize_full(); 711 dpm_show_time(starttime, state, NULL); 712 } 713 714 /** 715 * device_complete - Complete a PM transition for given device. 716 * @dev: Device to handle. 717 * @state: PM transition of the system being carried out. 718 */ 719 static void device_complete(struct device *dev, pm_message_t state) 720 { 721 void (*callback)(struct device *) = NULL; 722 char *info = NULL; 723 724 device_lock(dev); 725 726 if (dev->pm_domain) { 727 info = "completing power domain "; 728 callback = dev->pm_domain->ops.complete; 729 } else if (dev->type && dev->type->pm) { 730 info = "completing type "; 731 callback = dev->type->pm->complete; 732 } else if (dev->class && dev->class->pm) { 733 info = "completing class "; 734 callback = dev->class->pm->complete; 735 } else if (dev->bus && dev->bus->pm) { 736 info = "completing bus "; 737 callback = dev->bus->pm->complete; 738 } 739 740 if (!callback && dev->driver && dev->driver->pm) { 741 info = "completing driver "; 742 callback = dev->driver->pm->complete; 743 } 744 745 if (callback) { 746 pm_dev_dbg(dev, state, info); 747 callback(dev); 748 } 749 750 device_unlock(dev); 751 } 752 753 /** 754 * dpm_complete - Complete a PM transition for all non-sysdev devices. 755 * @state: PM transition of the system being carried out. 756 * 757 * Execute the ->complete() callbacks for all devices whose PM status is not 758 * DPM_ON (this allows new devices to be registered). 759 */ 760 void dpm_complete(pm_message_t state) 761 { 762 struct list_head list; 763 764 might_sleep(); 765 766 INIT_LIST_HEAD(&list); 767 mutex_lock(&dpm_list_mtx); 768 while (!list_empty(&dpm_prepared_list)) { 769 struct device *dev = to_device(dpm_prepared_list.prev); 770 771 get_device(dev); 772 dev->power.is_prepared = false; 773 list_move(&dev->power.entry, &list); 774 mutex_unlock(&dpm_list_mtx); 775 776 device_complete(dev, state); 777 778 mutex_lock(&dpm_list_mtx); 779 put_device(dev); 780 } 781 list_splice(&list, &dpm_list); 782 mutex_unlock(&dpm_list_mtx); 783 } 784 785 /** 786 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 787 * @state: PM transition of the system being carried out. 788 * 789 * Execute "resume" callbacks for all devices and complete the PM transition of 790 * the system. 791 */ 792 void dpm_resume_end(pm_message_t state) 793 { 794 dpm_resume(state); 795 dpm_complete(state); 796 } 797 EXPORT_SYMBOL_GPL(dpm_resume_end); 798 799 800 /*------------------------- Suspend routines -------------------------*/ 801 802 /** 803 * resume_event - Return a "resume" message for given "suspend" sleep state. 804 * @sleep_state: PM message representing a sleep state. 805 * 806 * Return a PM message representing the resume event corresponding to given 807 * sleep state. 808 */ 809 static pm_message_t resume_event(pm_message_t sleep_state) 810 { 811 switch (sleep_state.event) { 812 case PM_EVENT_SUSPEND: 813 return PMSG_RESUME; 814 case PM_EVENT_FREEZE: 815 case PM_EVENT_QUIESCE: 816 return PMSG_RECOVER; 817 case PM_EVENT_HIBERNATE: 818 return PMSG_RESTORE; 819 } 820 return PMSG_ON; 821 } 822 823 /** 824 * device_suspend_noirq - Execute a "late suspend" callback for given device. 825 * @dev: Device to handle. 826 * @state: PM transition of the system being carried out. 827 * 828 * The driver of @dev will not receive interrupts while this function is being 829 * executed. 830 */ 831 static int device_suspend_noirq(struct device *dev, pm_message_t state) 832 { 833 pm_callback_t callback = NULL; 834 char *info = NULL; 835 836 if (dev->pm_domain) { 837 info = "noirq power domain "; 838 callback = pm_noirq_op(&dev->pm_domain->ops, state); 839 } else if (dev->type && dev->type->pm) { 840 info = "noirq type "; 841 callback = pm_noirq_op(dev->type->pm, state); 842 } else if (dev->class && dev->class->pm) { 843 info = "noirq class "; 844 callback = pm_noirq_op(dev->class->pm, state); 845 } else if (dev->bus && dev->bus->pm) { 846 info = "noirq bus "; 847 callback = pm_noirq_op(dev->bus->pm, state); 848 } 849 850 if (!callback && dev->driver && dev->driver->pm) { 851 info = "noirq driver "; 852 callback = pm_noirq_op(dev->driver->pm, state); 853 } 854 855 return dpm_run_callback(callback, dev, state, info); 856 } 857 858 /** 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 860 * @state: PM transition of the system being carried out. 861 * 862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 863 * handlers for all non-sysdev devices. 864 */ 865 static int dpm_suspend_noirq(pm_message_t state) 866 { 867 ktime_t starttime = ktime_get(); 868 int error = 0; 869 870 suspend_device_irqs(); 871 mutex_lock(&dpm_list_mtx); 872 while (!list_empty(&dpm_late_early_list)) { 873 struct device *dev = to_device(dpm_late_early_list.prev); 874 875 get_device(dev); 876 mutex_unlock(&dpm_list_mtx); 877 878 error = device_suspend_noirq(dev, state); 879 880 mutex_lock(&dpm_list_mtx); 881 if (error) { 882 pm_dev_err(dev, state, " noirq", error); 883 suspend_stats.failed_suspend_noirq++; 884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 885 dpm_save_failed_dev(dev_name(dev)); 886 put_device(dev); 887 break; 888 } 889 if (!list_empty(&dev->power.entry)) 890 list_move(&dev->power.entry, &dpm_noirq_list); 891 put_device(dev); 892 893 if (pm_wakeup_pending()) { 894 error = -EBUSY; 895 break; 896 } 897 } 898 mutex_unlock(&dpm_list_mtx); 899 if (error) 900 dpm_resume_noirq(resume_event(state)); 901 else 902 dpm_show_time(starttime, state, "noirq"); 903 return error; 904 } 905 906 /** 907 * device_suspend_late - Execute a "late suspend" callback for given device. 908 * @dev: Device to handle. 909 * @state: PM transition of the system being carried out. 910 * 911 * Runtime PM is disabled for @dev while this function is being executed. 912 */ 913 static int device_suspend_late(struct device *dev, pm_message_t state) 914 { 915 pm_callback_t callback = NULL; 916 char *info = NULL; 917 918 if (dev->pm_domain) { 919 info = "late power domain "; 920 callback = pm_late_early_op(&dev->pm_domain->ops, state); 921 } else if (dev->type && dev->type->pm) { 922 info = "late type "; 923 callback = pm_late_early_op(dev->type->pm, state); 924 } else if (dev->class && dev->class->pm) { 925 info = "late class "; 926 callback = pm_late_early_op(dev->class->pm, state); 927 } else if (dev->bus && dev->bus->pm) { 928 info = "late bus "; 929 callback = pm_late_early_op(dev->bus->pm, state); 930 } 931 932 if (!callback && dev->driver && dev->driver->pm) { 933 info = "late driver "; 934 callback = pm_late_early_op(dev->driver->pm, state); 935 } 936 937 return dpm_run_callback(callback, dev, state, info); 938 } 939 940 /** 941 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 942 * @state: PM transition of the system being carried out. 943 */ 944 static int dpm_suspend_late(pm_message_t state) 945 { 946 ktime_t starttime = ktime_get(); 947 int error = 0; 948 949 mutex_lock(&dpm_list_mtx); 950 while (!list_empty(&dpm_suspended_list)) { 951 struct device *dev = to_device(dpm_suspended_list.prev); 952 953 get_device(dev); 954 mutex_unlock(&dpm_list_mtx); 955 956 error = device_suspend_late(dev, state); 957 958 mutex_lock(&dpm_list_mtx); 959 if (error) { 960 pm_dev_err(dev, state, " late", error); 961 suspend_stats.failed_suspend_late++; 962 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 963 dpm_save_failed_dev(dev_name(dev)); 964 put_device(dev); 965 break; 966 } 967 if (!list_empty(&dev->power.entry)) 968 list_move(&dev->power.entry, &dpm_late_early_list); 969 put_device(dev); 970 971 if (pm_wakeup_pending()) { 972 error = -EBUSY; 973 break; 974 } 975 } 976 mutex_unlock(&dpm_list_mtx); 977 if (error) 978 dpm_resume_early(resume_event(state)); 979 else 980 dpm_show_time(starttime, state, "late"); 981 982 return error; 983 } 984 985 /** 986 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 987 * @state: PM transition of the system being carried out. 988 */ 989 int dpm_suspend_end(pm_message_t state) 990 { 991 int error = dpm_suspend_late(state); 992 993 return error ? : dpm_suspend_noirq(state); 994 } 995 EXPORT_SYMBOL_GPL(dpm_suspend_end); 996 997 /** 998 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 999 * @dev: Device to suspend. 1000 * @state: PM transition of the system being carried out. 1001 * @cb: Suspend callback to execute. 1002 */ 1003 static int legacy_suspend(struct device *dev, pm_message_t state, 1004 int (*cb)(struct device *dev, pm_message_t state)) 1005 { 1006 int error; 1007 ktime_t calltime; 1008 1009 calltime = initcall_debug_start(dev); 1010 1011 error = cb(dev, state); 1012 suspend_report_result(cb, error); 1013 1014 initcall_debug_report(dev, calltime, error); 1015 1016 return error; 1017 } 1018 1019 /** 1020 * device_suspend - Execute "suspend" callbacks for given device. 1021 * @dev: Device to handle. 1022 * @state: PM transition of the system being carried out. 1023 * @async: If true, the device is being suspended asynchronously. 1024 */ 1025 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1026 { 1027 pm_callback_t callback = NULL; 1028 char *info = NULL; 1029 int error = 0; 1030 1031 dpm_wait_for_children(dev, async); 1032 1033 if (async_error) 1034 goto Complete; 1035 1036 pm_runtime_get_noresume(dev); 1037 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1038 pm_wakeup_event(dev, 0); 1039 1040 if (pm_wakeup_pending()) { 1041 pm_runtime_put_sync(dev); 1042 async_error = -EBUSY; 1043 goto Complete; 1044 } 1045 1046 device_lock(dev); 1047 1048 if (dev->pm_domain) { 1049 info = "power domain "; 1050 callback = pm_op(&dev->pm_domain->ops, state); 1051 goto Run; 1052 } 1053 1054 if (dev->type && dev->type->pm) { 1055 info = "type "; 1056 callback = pm_op(dev->type->pm, state); 1057 goto Run; 1058 } 1059 1060 if (dev->class) { 1061 if (dev->class->pm) { 1062 info = "class "; 1063 callback = pm_op(dev->class->pm, state); 1064 goto Run; 1065 } else if (dev->class->suspend) { 1066 pm_dev_dbg(dev, state, "legacy class "); 1067 error = legacy_suspend(dev, state, dev->class->suspend); 1068 goto End; 1069 } 1070 } 1071 1072 if (dev->bus) { 1073 if (dev->bus->pm) { 1074 info = "bus "; 1075 callback = pm_op(dev->bus->pm, state); 1076 } else if (dev->bus->suspend) { 1077 pm_dev_dbg(dev, state, "legacy bus "); 1078 error = legacy_suspend(dev, state, dev->bus->suspend); 1079 goto End; 1080 } 1081 } 1082 1083 Run: 1084 if (!callback && dev->driver && dev->driver->pm) { 1085 info = "driver "; 1086 callback = pm_op(dev->driver->pm, state); 1087 } 1088 1089 error = dpm_run_callback(callback, dev, state, info); 1090 1091 End: 1092 if (!error) { 1093 dev->power.is_suspended = true; 1094 if (dev->power.wakeup_path 1095 && dev->parent && !dev->parent->power.ignore_children) 1096 dev->parent->power.wakeup_path = true; 1097 } 1098 1099 device_unlock(dev); 1100 1101 Complete: 1102 complete_all(&dev->power.completion); 1103 1104 if (error) { 1105 pm_runtime_put_sync(dev); 1106 async_error = error; 1107 } else if (dev->power.is_suspended) { 1108 __pm_runtime_disable(dev, false); 1109 } 1110 1111 return error; 1112 } 1113 1114 static void async_suspend(void *data, async_cookie_t cookie) 1115 { 1116 struct device *dev = (struct device *)data; 1117 int error; 1118 1119 error = __device_suspend(dev, pm_transition, true); 1120 if (error) { 1121 dpm_save_failed_dev(dev_name(dev)); 1122 pm_dev_err(dev, pm_transition, " async", error); 1123 } 1124 1125 put_device(dev); 1126 } 1127 1128 static int device_suspend(struct device *dev) 1129 { 1130 INIT_COMPLETION(dev->power.completion); 1131 1132 if (pm_async_enabled && dev->power.async_suspend) { 1133 get_device(dev); 1134 async_schedule(async_suspend, dev); 1135 return 0; 1136 } 1137 1138 return __device_suspend(dev, pm_transition, false); 1139 } 1140 1141 /** 1142 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1143 * @state: PM transition of the system being carried out. 1144 */ 1145 int dpm_suspend(pm_message_t state) 1146 { 1147 ktime_t starttime = ktime_get(); 1148 int error = 0; 1149 1150 might_sleep(); 1151 1152 mutex_lock(&dpm_list_mtx); 1153 pm_transition = state; 1154 async_error = 0; 1155 while (!list_empty(&dpm_prepared_list)) { 1156 struct device *dev = to_device(dpm_prepared_list.prev); 1157 1158 get_device(dev); 1159 mutex_unlock(&dpm_list_mtx); 1160 1161 error = device_suspend(dev); 1162 1163 mutex_lock(&dpm_list_mtx); 1164 if (error) { 1165 pm_dev_err(dev, state, "", error); 1166 dpm_save_failed_dev(dev_name(dev)); 1167 put_device(dev); 1168 break; 1169 } 1170 if (!list_empty(&dev->power.entry)) 1171 list_move(&dev->power.entry, &dpm_suspended_list); 1172 put_device(dev); 1173 if (async_error) 1174 break; 1175 } 1176 mutex_unlock(&dpm_list_mtx); 1177 async_synchronize_full(); 1178 if (!error) 1179 error = async_error; 1180 if (error) { 1181 suspend_stats.failed_suspend++; 1182 dpm_save_failed_step(SUSPEND_SUSPEND); 1183 } else 1184 dpm_show_time(starttime, state, NULL); 1185 return error; 1186 } 1187 1188 /** 1189 * device_prepare - Prepare a device for system power transition. 1190 * @dev: Device to handle. 1191 * @state: PM transition of the system being carried out. 1192 * 1193 * Execute the ->prepare() callback(s) for given device. No new children of the 1194 * device may be registered after this function has returned. 1195 */ 1196 static int device_prepare(struct device *dev, pm_message_t state) 1197 { 1198 int (*callback)(struct device *) = NULL; 1199 char *info = NULL; 1200 int error = 0; 1201 1202 device_lock(dev); 1203 1204 dev->power.wakeup_path = device_may_wakeup(dev); 1205 1206 if (dev->pm_domain) { 1207 info = "preparing power domain "; 1208 callback = dev->pm_domain->ops.prepare; 1209 } else if (dev->type && dev->type->pm) { 1210 info = "preparing type "; 1211 callback = dev->type->pm->prepare; 1212 } else if (dev->class && dev->class->pm) { 1213 info = "preparing class "; 1214 callback = dev->class->pm->prepare; 1215 } else if (dev->bus && dev->bus->pm) { 1216 info = "preparing bus "; 1217 callback = dev->bus->pm->prepare; 1218 } 1219 1220 if (!callback && dev->driver && dev->driver->pm) { 1221 info = "preparing driver "; 1222 callback = dev->driver->pm->prepare; 1223 } 1224 1225 if (callback) { 1226 error = callback(dev); 1227 suspend_report_result(callback, error); 1228 } 1229 1230 device_unlock(dev); 1231 1232 return error; 1233 } 1234 1235 /** 1236 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1237 * @state: PM transition of the system being carried out. 1238 * 1239 * Execute the ->prepare() callback(s) for all devices. 1240 */ 1241 int dpm_prepare(pm_message_t state) 1242 { 1243 int error = 0; 1244 1245 might_sleep(); 1246 1247 mutex_lock(&dpm_list_mtx); 1248 while (!list_empty(&dpm_list)) { 1249 struct device *dev = to_device(dpm_list.next); 1250 1251 get_device(dev); 1252 mutex_unlock(&dpm_list_mtx); 1253 1254 error = device_prepare(dev, state); 1255 1256 mutex_lock(&dpm_list_mtx); 1257 if (error) { 1258 if (error == -EAGAIN) { 1259 put_device(dev); 1260 error = 0; 1261 continue; 1262 } 1263 printk(KERN_INFO "PM: Device %s not prepared " 1264 "for power transition: code %d\n", 1265 dev_name(dev), error); 1266 put_device(dev); 1267 break; 1268 } 1269 dev->power.is_prepared = true; 1270 if (!list_empty(&dev->power.entry)) 1271 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1272 put_device(dev); 1273 } 1274 mutex_unlock(&dpm_list_mtx); 1275 return error; 1276 } 1277 1278 /** 1279 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1280 * @state: PM transition of the system being carried out. 1281 * 1282 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1283 * callbacks for them. 1284 */ 1285 int dpm_suspend_start(pm_message_t state) 1286 { 1287 int error; 1288 1289 error = dpm_prepare(state); 1290 if (error) { 1291 suspend_stats.failed_prepare++; 1292 dpm_save_failed_step(SUSPEND_PREPARE); 1293 } else 1294 error = dpm_suspend(state); 1295 return error; 1296 } 1297 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1298 1299 void __suspend_report_result(const char *function, void *fn, int ret) 1300 { 1301 if (ret) 1302 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1303 } 1304 EXPORT_SYMBOL_GPL(__suspend_report_result); 1305 1306 /** 1307 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1308 * @dev: Device to wait for. 1309 * @subordinate: Device that needs to wait for @dev. 1310 */ 1311 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1312 { 1313 dpm_wait(dev, subordinate->power.async_suspend); 1314 return async_error; 1315 } 1316 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1317