1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 #include <linux/suspend.h> 30 31 #include "../base.h" 32 #include "power.h" 33 34 /* 35 * The entries in the dpm_list list are in a depth first order, simply 36 * because children are guaranteed to be discovered after parents, and 37 * are inserted at the back of the list on discovery. 38 * 39 * Since device_pm_add() may be called with a device lock held, 40 * we must never try to acquire a device lock while holding 41 * dpm_list_mutex. 42 */ 43 44 LIST_HEAD(dpm_list); 45 LIST_HEAD(dpm_prepared_list); 46 LIST_HEAD(dpm_suspended_list); 47 LIST_HEAD(dpm_noirq_list); 48 49 struct suspend_stats suspend_stats; 50 static DEFINE_MUTEX(dpm_list_mtx); 51 static pm_message_t pm_transition; 52 53 static int async_error; 54 55 /** 56 * device_pm_init - Initialize the PM-related part of a device object. 57 * @dev: Device object being initialized. 58 */ 59 void device_pm_init(struct device *dev) 60 { 61 dev->power.is_prepared = false; 62 dev->power.is_suspended = false; 63 init_completion(&dev->power.completion); 64 complete_all(&dev->power.completion); 65 dev->power.wakeup = NULL; 66 spin_lock_init(&dev->power.lock); 67 pm_runtime_init(dev); 68 INIT_LIST_HEAD(&dev->power.entry); 69 dev->power.power_state = PMSG_INVALID; 70 } 71 72 /** 73 * device_pm_lock - Lock the list of active devices used by the PM core. 74 */ 75 void device_pm_lock(void) 76 { 77 mutex_lock(&dpm_list_mtx); 78 } 79 80 /** 81 * device_pm_unlock - Unlock the list of active devices used by the PM core. 82 */ 83 void device_pm_unlock(void) 84 { 85 mutex_unlock(&dpm_list_mtx); 86 } 87 88 /** 89 * device_pm_add - Add a device to the PM core's list of active devices. 90 * @dev: Device to add to the list. 91 */ 92 void device_pm_add(struct device *dev) 93 { 94 pr_debug("PM: Adding info for %s:%s\n", 95 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 96 mutex_lock(&dpm_list_mtx); 97 if (dev->parent && dev->parent->power.is_prepared) 98 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_name(dev->parent)); 100 list_add_tail(&dev->power.entry, &dpm_list); 101 dev_pm_qos_constraints_init(dev); 102 mutex_unlock(&dpm_list_mtx); 103 } 104 105 /** 106 * device_pm_remove - Remove a device from the PM core's list of active devices. 107 * @dev: Device to be removed from the list. 108 */ 109 void device_pm_remove(struct device *dev) 110 { 111 pr_debug("PM: Removing info for %s:%s\n", 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 113 complete_all(&dev->power.completion); 114 mutex_lock(&dpm_list_mtx); 115 dev_pm_qos_constraints_destroy(dev); 116 list_del_init(&dev->power.entry); 117 mutex_unlock(&dpm_list_mtx); 118 device_wakeup_disable(dev); 119 pm_runtime_remove(dev); 120 } 121 122 /** 123 * device_pm_move_before - Move device in the PM core's list of active devices. 124 * @deva: Device to move in dpm_list. 125 * @devb: Device @deva should come before. 126 */ 127 void device_pm_move_before(struct device *deva, struct device *devb) 128 { 129 pr_debug("PM: Moving %s:%s before %s:%s\n", 130 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 131 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 132 /* Delete deva from dpm_list and reinsert before devb. */ 133 list_move_tail(&deva->power.entry, &devb->power.entry); 134 } 135 136 /** 137 * device_pm_move_after - Move device in the PM core's list of active devices. 138 * @deva: Device to move in dpm_list. 139 * @devb: Device @deva should come after. 140 */ 141 void device_pm_move_after(struct device *deva, struct device *devb) 142 { 143 pr_debug("PM: Moving %s:%s after %s:%s\n", 144 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 145 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 146 /* Delete deva from dpm_list and reinsert after devb. */ 147 list_move(&deva->power.entry, &devb->power.entry); 148 } 149 150 /** 151 * device_pm_move_last - Move device to end of the PM core's list of devices. 152 * @dev: Device to move in dpm_list. 153 */ 154 void device_pm_move_last(struct device *dev) 155 { 156 pr_debug("PM: Moving %s:%s to end of list\n", 157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 158 list_move_tail(&dev->power.entry, &dpm_list); 159 } 160 161 static ktime_t initcall_debug_start(struct device *dev) 162 { 163 ktime_t calltime = ktime_set(0, 0); 164 165 if (initcall_debug) { 166 pr_info("calling %s+ @ %i\n", 167 dev_name(dev), task_pid_nr(current)); 168 calltime = ktime_get(); 169 } 170 171 return calltime; 172 } 173 174 static void initcall_debug_report(struct device *dev, ktime_t calltime, 175 int error) 176 { 177 ktime_t delta, rettime; 178 179 if (initcall_debug) { 180 rettime = ktime_get(); 181 delta = ktime_sub(rettime, calltime); 182 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 183 error, (unsigned long long)ktime_to_ns(delta) >> 10); 184 } 185 } 186 187 /** 188 * dpm_wait - Wait for a PM operation to complete. 189 * @dev: Device to wait for. 190 * @async: If unset, wait only if the device's power.async_suspend flag is set. 191 */ 192 static void dpm_wait(struct device *dev, bool async) 193 { 194 if (!dev) 195 return; 196 197 if (async || (pm_async_enabled && dev->power.async_suspend)) 198 wait_for_completion(&dev->power.completion); 199 } 200 201 static int dpm_wait_fn(struct device *dev, void *async_ptr) 202 { 203 dpm_wait(dev, *((bool *)async_ptr)); 204 return 0; 205 } 206 207 static void dpm_wait_for_children(struct device *dev, bool async) 208 { 209 device_for_each_child(dev, &async, dpm_wait_fn); 210 } 211 212 /** 213 * pm_op - Execute the PM operation appropriate for given PM event. 214 * @dev: Device to handle. 215 * @ops: PM operations to choose from. 216 * @state: PM transition of the system being carried out. 217 */ 218 static int pm_op(struct device *dev, 219 const struct dev_pm_ops *ops, 220 pm_message_t state) 221 { 222 int error = 0; 223 ktime_t calltime; 224 225 calltime = initcall_debug_start(dev); 226 227 switch (state.event) { 228 #ifdef CONFIG_SUSPEND 229 case PM_EVENT_SUSPEND: 230 if (ops->suspend) { 231 error = ops->suspend(dev); 232 suspend_report_result(ops->suspend, error); 233 } 234 break; 235 case PM_EVENT_RESUME: 236 if (ops->resume) { 237 error = ops->resume(dev); 238 suspend_report_result(ops->resume, error); 239 } 240 break; 241 #endif /* CONFIG_SUSPEND */ 242 #ifdef CONFIG_HIBERNATE_CALLBACKS 243 case PM_EVENT_FREEZE: 244 case PM_EVENT_QUIESCE: 245 if (ops->freeze) { 246 error = ops->freeze(dev); 247 suspend_report_result(ops->freeze, error); 248 } 249 break; 250 case PM_EVENT_HIBERNATE: 251 if (ops->poweroff) { 252 error = ops->poweroff(dev); 253 suspend_report_result(ops->poweroff, error); 254 } 255 break; 256 case PM_EVENT_THAW: 257 case PM_EVENT_RECOVER: 258 if (ops->thaw) { 259 error = ops->thaw(dev); 260 suspend_report_result(ops->thaw, error); 261 } 262 break; 263 case PM_EVENT_RESTORE: 264 if (ops->restore) { 265 error = ops->restore(dev); 266 suspend_report_result(ops->restore, error); 267 } 268 break; 269 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 270 default: 271 error = -EINVAL; 272 } 273 274 initcall_debug_report(dev, calltime, error); 275 276 return error; 277 } 278 279 /** 280 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 281 * @dev: Device to handle. 282 * @ops: PM operations to choose from. 283 * @state: PM transition of the system being carried out. 284 * 285 * The driver of @dev will not receive interrupts while this function is being 286 * executed. 287 */ 288 static int pm_noirq_op(struct device *dev, 289 const struct dev_pm_ops *ops, 290 pm_message_t state) 291 { 292 int error = 0; 293 ktime_t calltime = ktime_set(0, 0), delta, rettime; 294 295 if (initcall_debug) { 296 pr_info("calling %s+ @ %i, parent: %s\n", 297 dev_name(dev), task_pid_nr(current), 298 dev->parent ? dev_name(dev->parent) : "none"); 299 calltime = ktime_get(); 300 } 301 302 switch (state.event) { 303 #ifdef CONFIG_SUSPEND 304 case PM_EVENT_SUSPEND: 305 if (ops->suspend_noirq) { 306 error = ops->suspend_noirq(dev); 307 suspend_report_result(ops->suspend_noirq, error); 308 } 309 break; 310 case PM_EVENT_RESUME: 311 if (ops->resume_noirq) { 312 error = ops->resume_noirq(dev); 313 suspend_report_result(ops->resume_noirq, error); 314 } 315 break; 316 #endif /* CONFIG_SUSPEND */ 317 #ifdef CONFIG_HIBERNATE_CALLBACKS 318 case PM_EVENT_FREEZE: 319 case PM_EVENT_QUIESCE: 320 if (ops->freeze_noirq) { 321 error = ops->freeze_noirq(dev); 322 suspend_report_result(ops->freeze_noirq, error); 323 } 324 break; 325 case PM_EVENT_HIBERNATE: 326 if (ops->poweroff_noirq) { 327 error = ops->poweroff_noirq(dev); 328 suspend_report_result(ops->poweroff_noirq, error); 329 } 330 break; 331 case PM_EVENT_THAW: 332 case PM_EVENT_RECOVER: 333 if (ops->thaw_noirq) { 334 error = ops->thaw_noirq(dev); 335 suspend_report_result(ops->thaw_noirq, error); 336 } 337 break; 338 case PM_EVENT_RESTORE: 339 if (ops->restore_noirq) { 340 error = ops->restore_noirq(dev); 341 suspend_report_result(ops->restore_noirq, error); 342 } 343 break; 344 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 345 default: 346 error = -EINVAL; 347 } 348 349 if (initcall_debug) { 350 rettime = ktime_get(); 351 delta = ktime_sub(rettime, calltime); 352 printk("initcall %s_i+ returned %d after %Ld usecs\n", 353 dev_name(dev), error, 354 (unsigned long long)ktime_to_ns(delta) >> 10); 355 } 356 357 return error; 358 } 359 360 static char *pm_verb(int event) 361 { 362 switch (event) { 363 case PM_EVENT_SUSPEND: 364 return "suspend"; 365 case PM_EVENT_RESUME: 366 return "resume"; 367 case PM_EVENT_FREEZE: 368 return "freeze"; 369 case PM_EVENT_QUIESCE: 370 return "quiesce"; 371 case PM_EVENT_HIBERNATE: 372 return "hibernate"; 373 case PM_EVENT_THAW: 374 return "thaw"; 375 case PM_EVENT_RESTORE: 376 return "restore"; 377 case PM_EVENT_RECOVER: 378 return "recover"; 379 default: 380 return "(unknown PM event)"; 381 } 382 } 383 384 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 385 { 386 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 387 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 388 ", may wakeup" : ""); 389 } 390 391 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 392 int error) 393 { 394 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 395 dev_name(dev), pm_verb(state.event), info, error); 396 } 397 398 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 399 { 400 ktime_t calltime; 401 u64 usecs64; 402 int usecs; 403 404 calltime = ktime_get(); 405 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 406 do_div(usecs64, NSEC_PER_USEC); 407 usecs = usecs64; 408 if (usecs == 0) 409 usecs = 1; 410 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 411 info ?: "", info ? " " : "", pm_verb(state.event), 412 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 413 } 414 415 /*------------------------- Resume routines -------------------------*/ 416 417 /** 418 * device_resume_noirq - Execute an "early resume" callback for given device. 419 * @dev: Device to handle. 420 * @state: PM transition of the system being carried out. 421 * 422 * The driver of @dev will not receive interrupts while this function is being 423 * executed. 424 */ 425 static int device_resume_noirq(struct device *dev, pm_message_t state) 426 { 427 int error = 0; 428 429 TRACE_DEVICE(dev); 430 TRACE_RESUME(0); 431 432 if (dev->pm_domain) { 433 pm_dev_dbg(dev, state, "EARLY power domain "); 434 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 435 } else if (dev->type && dev->type->pm) { 436 pm_dev_dbg(dev, state, "EARLY type "); 437 error = pm_noirq_op(dev, dev->type->pm, state); 438 } else if (dev->class && dev->class->pm) { 439 pm_dev_dbg(dev, state, "EARLY class "); 440 error = pm_noirq_op(dev, dev->class->pm, state); 441 } else if (dev->bus && dev->bus->pm) { 442 pm_dev_dbg(dev, state, "EARLY "); 443 error = pm_noirq_op(dev, dev->bus->pm, state); 444 } 445 446 TRACE_RESUME(error); 447 return error; 448 } 449 450 /** 451 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 452 * @state: PM transition of the system being carried out. 453 * 454 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 455 * enable device drivers to receive interrupts. 456 */ 457 void dpm_resume_noirq(pm_message_t state) 458 { 459 ktime_t starttime = ktime_get(); 460 461 mutex_lock(&dpm_list_mtx); 462 while (!list_empty(&dpm_noirq_list)) { 463 struct device *dev = to_device(dpm_noirq_list.next); 464 int error; 465 466 get_device(dev); 467 list_move_tail(&dev->power.entry, &dpm_suspended_list); 468 mutex_unlock(&dpm_list_mtx); 469 470 error = device_resume_noirq(dev, state); 471 if (error) { 472 suspend_stats.failed_resume_noirq++; 473 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 474 dpm_save_failed_dev(dev_name(dev)); 475 pm_dev_err(dev, state, " early", error); 476 } 477 478 mutex_lock(&dpm_list_mtx); 479 put_device(dev); 480 } 481 mutex_unlock(&dpm_list_mtx); 482 dpm_show_time(starttime, state, "early"); 483 resume_device_irqs(); 484 } 485 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 486 487 /** 488 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 489 * @dev: Device to resume. 490 * @cb: Resume callback to execute. 491 */ 492 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 493 { 494 int error; 495 ktime_t calltime; 496 497 calltime = initcall_debug_start(dev); 498 499 error = cb(dev); 500 suspend_report_result(cb, error); 501 502 initcall_debug_report(dev, calltime, error); 503 504 return error; 505 } 506 507 /** 508 * device_resume - Execute "resume" callbacks for given device. 509 * @dev: Device to handle. 510 * @state: PM transition of the system being carried out. 511 * @async: If true, the device is being resumed asynchronously. 512 */ 513 static int device_resume(struct device *dev, pm_message_t state, bool async) 514 { 515 int error = 0; 516 bool put = false; 517 518 TRACE_DEVICE(dev); 519 TRACE_RESUME(0); 520 521 dpm_wait(dev->parent, async); 522 device_lock(dev); 523 524 /* 525 * This is a fib. But we'll allow new children to be added below 526 * a resumed device, even if the device hasn't been completed yet. 527 */ 528 dev->power.is_prepared = false; 529 530 if (!dev->power.is_suspended) 531 goto Unlock; 532 533 pm_runtime_enable(dev); 534 put = true; 535 536 if (dev->pm_domain) { 537 pm_dev_dbg(dev, state, "power domain "); 538 error = pm_op(dev, &dev->pm_domain->ops, state); 539 goto End; 540 } 541 542 if (dev->type && dev->type->pm) { 543 pm_dev_dbg(dev, state, "type "); 544 error = pm_op(dev, dev->type->pm, state); 545 goto End; 546 } 547 548 if (dev->class) { 549 if (dev->class->pm) { 550 pm_dev_dbg(dev, state, "class "); 551 error = pm_op(dev, dev->class->pm, state); 552 goto End; 553 } else if (dev->class->resume) { 554 pm_dev_dbg(dev, state, "legacy class "); 555 error = legacy_resume(dev, dev->class->resume); 556 goto End; 557 } 558 } 559 560 if (dev->bus) { 561 if (dev->bus->pm) { 562 pm_dev_dbg(dev, state, ""); 563 error = pm_op(dev, dev->bus->pm, state); 564 } else if (dev->bus->resume) { 565 pm_dev_dbg(dev, state, "legacy "); 566 error = legacy_resume(dev, dev->bus->resume); 567 } 568 } 569 570 End: 571 dev->power.is_suspended = false; 572 573 Unlock: 574 device_unlock(dev); 575 complete_all(&dev->power.completion); 576 577 TRACE_RESUME(error); 578 579 if (put) 580 pm_runtime_put_sync(dev); 581 582 return error; 583 } 584 585 static void async_resume(void *data, async_cookie_t cookie) 586 { 587 struct device *dev = (struct device *)data; 588 int error; 589 590 error = device_resume(dev, pm_transition, true); 591 if (error) 592 pm_dev_err(dev, pm_transition, " async", error); 593 put_device(dev); 594 } 595 596 static bool is_async(struct device *dev) 597 { 598 return dev->power.async_suspend && pm_async_enabled 599 && !pm_trace_is_enabled(); 600 } 601 602 /** 603 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 604 * @state: PM transition of the system being carried out. 605 * 606 * Execute the appropriate "resume" callback for all devices whose status 607 * indicates that they are suspended. 608 */ 609 void dpm_resume(pm_message_t state) 610 { 611 struct device *dev; 612 ktime_t starttime = ktime_get(); 613 614 might_sleep(); 615 616 mutex_lock(&dpm_list_mtx); 617 pm_transition = state; 618 async_error = 0; 619 620 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 621 INIT_COMPLETION(dev->power.completion); 622 if (is_async(dev)) { 623 get_device(dev); 624 async_schedule(async_resume, dev); 625 } 626 } 627 628 while (!list_empty(&dpm_suspended_list)) { 629 dev = to_device(dpm_suspended_list.next); 630 get_device(dev); 631 if (!is_async(dev)) { 632 int error; 633 634 mutex_unlock(&dpm_list_mtx); 635 636 error = device_resume(dev, state, false); 637 if (error) { 638 suspend_stats.failed_resume++; 639 dpm_save_failed_step(SUSPEND_RESUME); 640 dpm_save_failed_dev(dev_name(dev)); 641 pm_dev_err(dev, state, "", error); 642 } 643 644 mutex_lock(&dpm_list_mtx); 645 } 646 if (!list_empty(&dev->power.entry)) 647 list_move_tail(&dev->power.entry, &dpm_prepared_list); 648 put_device(dev); 649 } 650 mutex_unlock(&dpm_list_mtx); 651 async_synchronize_full(); 652 dpm_show_time(starttime, state, NULL); 653 } 654 655 /** 656 * device_complete - Complete a PM transition for given device. 657 * @dev: Device to handle. 658 * @state: PM transition of the system being carried out. 659 */ 660 static void device_complete(struct device *dev, pm_message_t state) 661 { 662 device_lock(dev); 663 664 if (dev->pm_domain) { 665 pm_dev_dbg(dev, state, "completing power domain "); 666 if (dev->pm_domain->ops.complete) 667 dev->pm_domain->ops.complete(dev); 668 } else if (dev->type && dev->type->pm) { 669 pm_dev_dbg(dev, state, "completing type "); 670 if (dev->type->pm->complete) 671 dev->type->pm->complete(dev); 672 } else if (dev->class && dev->class->pm) { 673 pm_dev_dbg(dev, state, "completing class "); 674 if (dev->class->pm->complete) 675 dev->class->pm->complete(dev); 676 } else if (dev->bus && dev->bus->pm) { 677 pm_dev_dbg(dev, state, "completing "); 678 if (dev->bus->pm->complete) 679 dev->bus->pm->complete(dev); 680 } 681 682 device_unlock(dev); 683 } 684 685 /** 686 * dpm_complete - Complete a PM transition for all non-sysdev devices. 687 * @state: PM transition of the system being carried out. 688 * 689 * Execute the ->complete() callbacks for all devices whose PM status is not 690 * DPM_ON (this allows new devices to be registered). 691 */ 692 void dpm_complete(pm_message_t state) 693 { 694 struct list_head list; 695 696 might_sleep(); 697 698 INIT_LIST_HEAD(&list); 699 mutex_lock(&dpm_list_mtx); 700 while (!list_empty(&dpm_prepared_list)) { 701 struct device *dev = to_device(dpm_prepared_list.prev); 702 703 get_device(dev); 704 dev->power.is_prepared = false; 705 list_move(&dev->power.entry, &list); 706 mutex_unlock(&dpm_list_mtx); 707 708 device_complete(dev, state); 709 710 mutex_lock(&dpm_list_mtx); 711 put_device(dev); 712 } 713 list_splice(&list, &dpm_list); 714 mutex_unlock(&dpm_list_mtx); 715 } 716 717 /** 718 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 719 * @state: PM transition of the system being carried out. 720 * 721 * Execute "resume" callbacks for all devices and complete the PM transition of 722 * the system. 723 */ 724 void dpm_resume_end(pm_message_t state) 725 { 726 dpm_resume(state); 727 dpm_complete(state); 728 } 729 EXPORT_SYMBOL_GPL(dpm_resume_end); 730 731 732 /*------------------------- Suspend routines -------------------------*/ 733 734 /** 735 * resume_event - Return a "resume" message for given "suspend" sleep state. 736 * @sleep_state: PM message representing a sleep state. 737 * 738 * Return a PM message representing the resume event corresponding to given 739 * sleep state. 740 */ 741 static pm_message_t resume_event(pm_message_t sleep_state) 742 { 743 switch (sleep_state.event) { 744 case PM_EVENT_SUSPEND: 745 return PMSG_RESUME; 746 case PM_EVENT_FREEZE: 747 case PM_EVENT_QUIESCE: 748 return PMSG_RECOVER; 749 case PM_EVENT_HIBERNATE: 750 return PMSG_RESTORE; 751 } 752 return PMSG_ON; 753 } 754 755 /** 756 * device_suspend_noirq - Execute a "late suspend" callback for given device. 757 * @dev: Device to handle. 758 * @state: PM transition of the system being carried out. 759 * 760 * The driver of @dev will not receive interrupts while this function is being 761 * executed. 762 */ 763 static int device_suspend_noirq(struct device *dev, pm_message_t state) 764 { 765 int error; 766 767 if (dev->pm_domain) { 768 pm_dev_dbg(dev, state, "LATE power domain "); 769 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 770 if (error) 771 return error; 772 } else if (dev->type && dev->type->pm) { 773 pm_dev_dbg(dev, state, "LATE type "); 774 error = pm_noirq_op(dev, dev->type->pm, state); 775 if (error) 776 return error; 777 } else if (dev->class && dev->class->pm) { 778 pm_dev_dbg(dev, state, "LATE class "); 779 error = pm_noirq_op(dev, dev->class->pm, state); 780 if (error) 781 return error; 782 } else if (dev->bus && dev->bus->pm) { 783 pm_dev_dbg(dev, state, "LATE "); 784 error = pm_noirq_op(dev, dev->bus->pm, state); 785 if (error) 786 return error; 787 } 788 789 return 0; 790 } 791 792 /** 793 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 794 * @state: PM transition of the system being carried out. 795 * 796 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 797 * handlers for all non-sysdev devices. 798 */ 799 int dpm_suspend_noirq(pm_message_t state) 800 { 801 ktime_t starttime = ktime_get(); 802 int error = 0; 803 804 suspend_device_irqs(); 805 mutex_lock(&dpm_list_mtx); 806 while (!list_empty(&dpm_suspended_list)) { 807 struct device *dev = to_device(dpm_suspended_list.prev); 808 809 get_device(dev); 810 mutex_unlock(&dpm_list_mtx); 811 812 error = device_suspend_noirq(dev, state); 813 814 mutex_lock(&dpm_list_mtx); 815 if (error) { 816 pm_dev_err(dev, state, " late", error); 817 suspend_stats.failed_suspend_noirq++; 818 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 819 dpm_save_failed_dev(dev_name(dev)); 820 put_device(dev); 821 break; 822 } 823 if (!list_empty(&dev->power.entry)) 824 list_move(&dev->power.entry, &dpm_noirq_list); 825 put_device(dev); 826 } 827 mutex_unlock(&dpm_list_mtx); 828 if (error) 829 dpm_resume_noirq(resume_event(state)); 830 else 831 dpm_show_time(starttime, state, "late"); 832 return error; 833 } 834 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 835 836 /** 837 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 838 * @dev: Device to suspend. 839 * @state: PM transition of the system being carried out. 840 * @cb: Suspend callback to execute. 841 */ 842 static int legacy_suspend(struct device *dev, pm_message_t state, 843 int (*cb)(struct device *dev, pm_message_t state)) 844 { 845 int error; 846 ktime_t calltime; 847 848 calltime = initcall_debug_start(dev); 849 850 error = cb(dev, state); 851 suspend_report_result(cb, error); 852 853 initcall_debug_report(dev, calltime, error); 854 855 return error; 856 } 857 858 /** 859 * device_suspend - Execute "suspend" callbacks for given device. 860 * @dev: Device to handle. 861 * @state: PM transition of the system being carried out. 862 * @async: If true, the device is being suspended asynchronously. 863 */ 864 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 865 { 866 int error = 0; 867 868 dpm_wait_for_children(dev, async); 869 870 if (async_error) 871 return 0; 872 873 pm_runtime_get_noresume(dev); 874 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 875 pm_wakeup_event(dev, 0); 876 877 if (pm_wakeup_pending()) { 878 pm_runtime_put_sync(dev); 879 async_error = -EBUSY; 880 return 0; 881 } 882 883 device_lock(dev); 884 885 if (dev->pm_domain) { 886 pm_dev_dbg(dev, state, "power domain "); 887 error = pm_op(dev, &dev->pm_domain->ops, state); 888 goto End; 889 } 890 891 if (dev->type && dev->type->pm) { 892 pm_dev_dbg(dev, state, "type "); 893 error = pm_op(dev, dev->type->pm, state); 894 goto End; 895 } 896 897 if (dev->class) { 898 if (dev->class->pm) { 899 pm_dev_dbg(dev, state, "class "); 900 error = pm_op(dev, dev->class->pm, state); 901 goto End; 902 } else if (dev->class->suspend) { 903 pm_dev_dbg(dev, state, "legacy class "); 904 error = legacy_suspend(dev, state, dev->class->suspend); 905 goto End; 906 } 907 } 908 909 if (dev->bus) { 910 if (dev->bus->pm) { 911 pm_dev_dbg(dev, state, ""); 912 error = pm_op(dev, dev->bus->pm, state); 913 } else if (dev->bus->suspend) { 914 pm_dev_dbg(dev, state, "legacy "); 915 error = legacy_suspend(dev, state, dev->bus->suspend); 916 } 917 } 918 919 End: 920 if (!error) { 921 dev->power.is_suspended = true; 922 if (dev->power.wakeup_path && dev->parent) 923 dev->parent->power.wakeup_path = true; 924 } 925 926 device_unlock(dev); 927 complete_all(&dev->power.completion); 928 929 if (error) { 930 pm_runtime_put_sync(dev); 931 async_error = error; 932 } else if (dev->power.is_suspended) { 933 __pm_runtime_disable(dev, false); 934 } 935 936 return error; 937 } 938 939 static void async_suspend(void *data, async_cookie_t cookie) 940 { 941 struct device *dev = (struct device *)data; 942 int error; 943 944 error = __device_suspend(dev, pm_transition, true); 945 if (error) { 946 dpm_save_failed_dev(dev_name(dev)); 947 pm_dev_err(dev, pm_transition, " async", error); 948 } 949 950 put_device(dev); 951 } 952 953 static int device_suspend(struct device *dev) 954 { 955 INIT_COMPLETION(dev->power.completion); 956 957 if (pm_async_enabled && dev->power.async_suspend) { 958 get_device(dev); 959 async_schedule(async_suspend, dev); 960 return 0; 961 } 962 963 return __device_suspend(dev, pm_transition, false); 964 } 965 966 /** 967 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 968 * @state: PM transition of the system being carried out. 969 */ 970 int dpm_suspend(pm_message_t state) 971 { 972 ktime_t starttime = ktime_get(); 973 int error = 0; 974 975 might_sleep(); 976 977 mutex_lock(&dpm_list_mtx); 978 pm_transition = state; 979 async_error = 0; 980 while (!list_empty(&dpm_prepared_list)) { 981 struct device *dev = to_device(dpm_prepared_list.prev); 982 983 get_device(dev); 984 mutex_unlock(&dpm_list_mtx); 985 986 error = device_suspend(dev); 987 988 mutex_lock(&dpm_list_mtx); 989 if (error) { 990 pm_dev_err(dev, state, "", error); 991 dpm_save_failed_dev(dev_name(dev)); 992 put_device(dev); 993 break; 994 } 995 if (!list_empty(&dev->power.entry)) 996 list_move(&dev->power.entry, &dpm_suspended_list); 997 put_device(dev); 998 if (async_error) 999 break; 1000 } 1001 mutex_unlock(&dpm_list_mtx); 1002 async_synchronize_full(); 1003 if (!error) 1004 error = async_error; 1005 if (error) { 1006 suspend_stats.failed_suspend++; 1007 dpm_save_failed_step(SUSPEND_SUSPEND); 1008 } else 1009 dpm_show_time(starttime, state, NULL); 1010 return error; 1011 } 1012 1013 /** 1014 * device_prepare - Prepare a device for system power transition. 1015 * @dev: Device to handle. 1016 * @state: PM transition of the system being carried out. 1017 * 1018 * Execute the ->prepare() callback(s) for given device. No new children of the 1019 * device may be registered after this function has returned. 1020 */ 1021 static int device_prepare(struct device *dev, pm_message_t state) 1022 { 1023 int error = 0; 1024 1025 device_lock(dev); 1026 1027 dev->power.wakeup_path = device_may_wakeup(dev); 1028 1029 if (dev->pm_domain) { 1030 pm_dev_dbg(dev, state, "preparing power domain "); 1031 if (dev->pm_domain->ops.prepare) 1032 error = dev->pm_domain->ops.prepare(dev); 1033 suspend_report_result(dev->pm_domain->ops.prepare, error); 1034 if (error) 1035 goto End; 1036 } else if (dev->type && dev->type->pm) { 1037 pm_dev_dbg(dev, state, "preparing type "); 1038 if (dev->type->pm->prepare) 1039 error = dev->type->pm->prepare(dev); 1040 suspend_report_result(dev->type->pm->prepare, error); 1041 if (error) 1042 goto End; 1043 } else if (dev->class && dev->class->pm) { 1044 pm_dev_dbg(dev, state, "preparing class "); 1045 if (dev->class->pm->prepare) 1046 error = dev->class->pm->prepare(dev); 1047 suspend_report_result(dev->class->pm->prepare, error); 1048 if (error) 1049 goto End; 1050 } else if (dev->bus && dev->bus->pm) { 1051 pm_dev_dbg(dev, state, "preparing "); 1052 if (dev->bus->pm->prepare) 1053 error = dev->bus->pm->prepare(dev); 1054 suspend_report_result(dev->bus->pm->prepare, error); 1055 } 1056 1057 End: 1058 device_unlock(dev); 1059 1060 return error; 1061 } 1062 1063 /** 1064 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1065 * @state: PM transition of the system being carried out. 1066 * 1067 * Execute the ->prepare() callback(s) for all devices. 1068 */ 1069 int dpm_prepare(pm_message_t state) 1070 { 1071 int error = 0; 1072 1073 might_sleep(); 1074 1075 mutex_lock(&dpm_list_mtx); 1076 while (!list_empty(&dpm_list)) { 1077 struct device *dev = to_device(dpm_list.next); 1078 1079 get_device(dev); 1080 mutex_unlock(&dpm_list_mtx); 1081 1082 error = device_prepare(dev, state); 1083 1084 mutex_lock(&dpm_list_mtx); 1085 if (error) { 1086 if (error == -EAGAIN) { 1087 put_device(dev); 1088 error = 0; 1089 continue; 1090 } 1091 printk(KERN_INFO "PM: Device %s not prepared " 1092 "for power transition: code %d\n", 1093 dev_name(dev), error); 1094 put_device(dev); 1095 break; 1096 } 1097 dev->power.is_prepared = true; 1098 if (!list_empty(&dev->power.entry)) 1099 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1100 put_device(dev); 1101 } 1102 mutex_unlock(&dpm_list_mtx); 1103 return error; 1104 } 1105 1106 /** 1107 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1108 * @state: PM transition of the system being carried out. 1109 * 1110 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1111 * callbacks for them. 1112 */ 1113 int dpm_suspend_start(pm_message_t state) 1114 { 1115 int error; 1116 1117 error = dpm_prepare(state); 1118 if (error) { 1119 suspend_stats.failed_prepare++; 1120 dpm_save_failed_step(SUSPEND_PREPARE); 1121 } else 1122 error = dpm_suspend(state); 1123 return error; 1124 } 1125 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1126 1127 void __suspend_report_result(const char *function, void *fn, int ret) 1128 { 1129 if (ret) 1130 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1131 } 1132 EXPORT_SYMBOL_GPL(__suspend_report_result); 1133 1134 /** 1135 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1136 * @dev: Device to wait for. 1137 * @subordinate: Device that needs to wait for @dev. 1138 */ 1139 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1140 { 1141 dpm_wait(dev, subordinate->power.async_suspend); 1142 return async_error; 1143 } 1144 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1145