1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 #include <linux/suspend.h> 30 31 #include "../base.h" 32 #include "power.h" 33 34 /* 35 * The entries in the dpm_list list are in a depth first order, simply 36 * because children are guaranteed to be discovered after parents, and 37 * are inserted at the back of the list on discovery. 38 * 39 * Since device_pm_add() may be called with a device lock held, 40 * we must never try to acquire a device lock while holding 41 * dpm_list_mutex. 42 */ 43 44 LIST_HEAD(dpm_list); 45 LIST_HEAD(dpm_prepared_list); 46 LIST_HEAD(dpm_suspended_list); 47 LIST_HEAD(dpm_noirq_list); 48 49 static DEFINE_MUTEX(dpm_list_mtx); 50 static pm_message_t pm_transition; 51 52 static int async_error; 53 54 /** 55 * device_pm_init - Initialize the PM-related part of a device object. 56 * @dev: Device object being initialized. 57 */ 58 void device_pm_init(struct device *dev) 59 { 60 dev->power.in_suspend = false; 61 init_completion(&dev->power.completion); 62 complete_all(&dev->power.completion); 63 dev->power.wakeup = NULL; 64 spin_lock_init(&dev->power.lock); 65 pm_runtime_init(dev); 66 INIT_LIST_HEAD(&dev->power.entry); 67 } 68 69 /** 70 * device_pm_lock - Lock the list of active devices used by the PM core. 71 */ 72 void device_pm_lock(void) 73 { 74 mutex_lock(&dpm_list_mtx); 75 } 76 77 /** 78 * device_pm_unlock - Unlock the list of active devices used by the PM core. 79 */ 80 void device_pm_unlock(void) 81 { 82 mutex_unlock(&dpm_list_mtx); 83 } 84 85 /** 86 * device_pm_add - Add a device to the PM core's list of active devices. 87 * @dev: Device to add to the list. 88 */ 89 void device_pm_add(struct device *dev) 90 { 91 pr_debug("PM: Adding info for %s:%s\n", 92 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 93 mutex_lock(&dpm_list_mtx); 94 if (dev->parent && dev->parent->power.in_suspend) 95 dev_warn(dev, "parent %s should not be sleeping\n", 96 dev_name(dev->parent)); 97 list_add_tail(&dev->power.entry, &dpm_list); 98 mutex_unlock(&dpm_list_mtx); 99 } 100 101 /** 102 * device_pm_remove - Remove a device from the PM core's list of active devices. 103 * @dev: Device to be removed from the list. 104 */ 105 void device_pm_remove(struct device *dev) 106 { 107 pr_debug("PM: Removing info for %s:%s\n", 108 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 109 complete_all(&dev->power.completion); 110 mutex_lock(&dpm_list_mtx); 111 list_del_init(&dev->power.entry); 112 mutex_unlock(&dpm_list_mtx); 113 device_wakeup_disable(dev); 114 pm_runtime_remove(dev); 115 } 116 117 /** 118 * device_pm_move_before - Move device in the PM core's list of active devices. 119 * @deva: Device to move in dpm_list. 120 * @devb: Device @deva should come before. 121 */ 122 void device_pm_move_before(struct device *deva, struct device *devb) 123 { 124 pr_debug("PM: Moving %s:%s before %s:%s\n", 125 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 126 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 127 /* Delete deva from dpm_list and reinsert before devb. */ 128 list_move_tail(&deva->power.entry, &devb->power.entry); 129 } 130 131 /** 132 * device_pm_move_after - Move device in the PM core's list of active devices. 133 * @deva: Device to move in dpm_list. 134 * @devb: Device @deva should come after. 135 */ 136 void device_pm_move_after(struct device *deva, struct device *devb) 137 { 138 pr_debug("PM: Moving %s:%s after %s:%s\n", 139 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 140 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 141 /* Delete deva from dpm_list and reinsert after devb. */ 142 list_move(&deva->power.entry, &devb->power.entry); 143 } 144 145 /** 146 * device_pm_move_last - Move device to end of the PM core's list of devices. 147 * @dev: Device to move in dpm_list. 148 */ 149 void device_pm_move_last(struct device *dev) 150 { 151 pr_debug("PM: Moving %s:%s to end of list\n", 152 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 153 list_move_tail(&dev->power.entry, &dpm_list); 154 } 155 156 static ktime_t initcall_debug_start(struct device *dev) 157 { 158 ktime_t calltime = ktime_set(0, 0); 159 160 if (initcall_debug) { 161 pr_info("calling %s+ @ %i\n", 162 dev_name(dev), task_pid_nr(current)); 163 calltime = ktime_get(); 164 } 165 166 return calltime; 167 } 168 169 static void initcall_debug_report(struct device *dev, ktime_t calltime, 170 int error) 171 { 172 ktime_t delta, rettime; 173 174 if (initcall_debug) { 175 rettime = ktime_get(); 176 delta = ktime_sub(rettime, calltime); 177 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 178 error, (unsigned long long)ktime_to_ns(delta) >> 10); 179 } 180 } 181 182 /** 183 * dpm_wait - Wait for a PM operation to complete. 184 * @dev: Device to wait for. 185 * @async: If unset, wait only if the device's power.async_suspend flag is set. 186 */ 187 static void dpm_wait(struct device *dev, bool async) 188 { 189 if (!dev) 190 return; 191 192 if (async || (pm_async_enabled && dev->power.async_suspend)) 193 wait_for_completion(&dev->power.completion); 194 } 195 196 static int dpm_wait_fn(struct device *dev, void *async_ptr) 197 { 198 dpm_wait(dev, *((bool *)async_ptr)); 199 return 0; 200 } 201 202 static void dpm_wait_for_children(struct device *dev, bool async) 203 { 204 device_for_each_child(dev, &async, dpm_wait_fn); 205 } 206 207 /** 208 * pm_op - Execute the PM operation appropriate for given PM event. 209 * @dev: Device to handle. 210 * @ops: PM operations to choose from. 211 * @state: PM transition of the system being carried out. 212 */ 213 static int pm_op(struct device *dev, 214 const struct dev_pm_ops *ops, 215 pm_message_t state) 216 { 217 int error = 0; 218 ktime_t calltime; 219 220 calltime = initcall_debug_start(dev); 221 222 switch (state.event) { 223 #ifdef CONFIG_SUSPEND 224 case PM_EVENT_SUSPEND: 225 if (ops->suspend) { 226 error = ops->suspend(dev); 227 suspend_report_result(ops->suspend, error); 228 } 229 break; 230 case PM_EVENT_RESUME: 231 if (ops->resume) { 232 error = ops->resume(dev); 233 suspend_report_result(ops->resume, error); 234 } 235 break; 236 #endif /* CONFIG_SUSPEND */ 237 #ifdef CONFIG_HIBERNATE_CALLBACKS 238 case PM_EVENT_FREEZE: 239 case PM_EVENT_QUIESCE: 240 if (ops->freeze) { 241 error = ops->freeze(dev); 242 suspend_report_result(ops->freeze, error); 243 } 244 break; 245 case PM_EVENT_HIBERNATE: 246 if (ops->poweroff) { 247 error = ops->poweroff(dev); 248 suspend_report_result(ops->poweroff, error); 249 } 250 break; 251 case PM_EVENT_THAW: 252 case PM_EVENT_RECOVER: 253 if (ops->thaw) { 254 error = ops->thaw(dev); 255 suspend_report_result(ops->thaw, error); 256 } 257 break; 258 case PM_EVENT_RESTORE: 259 if (ops->restore) { 260 error = ops->restore(dev); 261 suspend_report_result(ops->restore, error); 262 } 263 break; 264 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 265 default: 266 error = -EINVAL; 267 } 268 269 initcall_debug_report(dev, calltime, error); 270 271 return error; 272 } 273 274 /** 275 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 276 * @dev: Device to handle. 277 * @ops: PM operations to choose from. 278 * @state: PM transition of the system being carried out. 279 * 280 * The driver of @dev will not receive interrupts while this function is being 281 * executed. 282 */ 283 static int pm_noirq_op(struct device *dev, 284 const struct dev_pm_ops *ops, 285 pm_message_t state) 286 { 287 int error = 0; 288 ktime_t calltime = ktime_set(0, 0), delta, rettime; 289 290 if (initcall_debug) { 291 pr_info("calling %s+ @ %i, parent: %s\n", 292 dev_name(dev), task_pid_nr(current), 293 dev->parent ? dev_name(dev->parent) : "none"); 294 calltime = ktime_get(); 295 } 296 297 switch (state.event) { 298 #ifdef CONFIG_SUSPEND 299 case PM_EVENT_SUSPEND: 300 if (ops->suspend_noirq) { 301 error = ops->suspend_noirq(dev); 302 suspend_report_result(ops->suspend_noirq, error); 303 } 304 break; 305 case PM_EVENT_RESUME: 306 if (ops->resume_noirq) { 307 error = ops->resume_noirq(dev); 308 suspend_report_result(ops->resume_noirq, error); 309 } 310 break; 311 #endif /* CONFIG_SUSPEND */ 312 #ifdef CONFIG_HIBERNATE_CALLBACKS 313 case PM_EVENT_FREEZE: 314 case PM_EVENT_QUIESCE: 315 if (ops->freeze_noirq) { 316 error = ops->freeze_noirq(dev); 317 suspend_report_result(ops->freeze_noirq, error); 318 } 319 break; 320 case PM_EVENT_HIBERNATE: 321 if (ops->poweroff_noirq) { 322 error = ops->poweroff_noirq(dev); 323 suspend_report_result(ops->poweroff_noirq, error); 324 } 325 break; 326 case PM_EVENT_THAW: 327 case PM_EVENT_RECOVER: 328 if (ops->thaw_noirq) { 329 error = ops->thaw_noirq(dev); 330 suspend_report_result(ops->thaw_noirq, error); 331 } 332 break; 333 case PM_EVENT_RESTORE: 334 if (ops->restore_noirq) { 335 error = ops->restore_noirq(dev); 336 suspend_report_result(ops->restore_noirq, error); 337 } 338 break; 339 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 340 default: 341 error = -EINVAL; 342 } 343 344 if (initcall_debug) { 345 rettime = ktime_get(); 346 delta = ktime_sub(rettime, calltime); 347 printk("initcall %s_i+ returned %d after %Ld usecs\n", 348 dev_name(dev), error, 349 (unsigned long long)ktime_to_ns(delta) >> 10); 350 } 351 352 return error; 353 } 354 355 static char *pm_verb(int event) 356 { 357 switch (event) { 358 case PM_EVENT_SUSPEND: 359 return "suspend"; 360 case PM_EVENT_RESUME: 361 return "resume"; 362 case PM_EVENT_FREEZE: 363 return "freeze"; 364 case PM_EVENT_QUIESCE: 365 return "quiesce"; 366 case PM_EVENT_HIBERNATE: 367 return "hibernate"; 368 case PM_EVENT_THAW: 369 return "thaw"; 370 case PM_EVENT_RESTORE: 371 return "restore"; 372 case PM_EVENT_RECOVER: 373 return "recover"; 374 default: 375 return "(unknown PM event)"; 376 } 377 } 378 379 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 380 { 381 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 382 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 383 ", may wakeup" : ""); 384 } 385 386 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 387 int error) 388 { 389 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 390 dev_name(dev), pm_verb(state.event), info, error); 391 } 392 393 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 394 { 395 ktime_t calltime; 396 u64 usecs64; 397 int usecs; 398 399 calltime = ktime_get(); 400 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 401 do_div(usecs64, NSEC_PER_USEC); 402 usecs = usecs64; 403 if (usecs == 0) 404 usecs = 1; 405 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 406 info ?: "", info ? " " : "", pm_verb(state.event), 407 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 408 } 409 410 /*------------------------- Resume routines -------------------------*/ 411 412 /** 413 * device_resume_noirq - Execute an "early resume" callback for given device. 414 * @dev: Device to handle. 415 * @state: PM transition of the system being carried out. 416 * 417 * The driver of @dev will not receive interrupts while this function is being 418 * executed. 419 */ 420 static int device_resume_noirq(struct device *dev, pm_message_t state) 421 { 422 int error = 0; 423 424 TRACE_DEVICE(dev); 425 TRACE_RESUME(0); 426 427 if (dev->pwr_domain) { 428 pm_dev_dbg(dev, state, "EARLY power domain "); 429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 430 } 431 432 if (dev->type && dev->type->pm) { 433 pm_dev_dbg(dev, state, "EARLY type "); 434 error = pm_noirq_op(dev, dev->type->pm, state); 435 } else if (dev->class && dev->class->pm) { 436 pm_dev_dbg(dev, state, "EARLY class "); 437 error = pm_noirq_op(dev, dev->class->pm, state); 438 } else if (dev->bus && dev->bus->pm) { 439 pm_dev_dbg(dev, state, "EARLY "); 440 error = pm_noirq_op(dev, dev->bus->pm, state); 441 } 442 443 TRACE_RESUME(error); 444 return error; 445 } 446 447 /** 448 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 449 * @state: PM transition of the system being carried out. 450 * 451 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 452 * enable device drivers to receive interrupts. 453 */ 454 void dpm_resume_noirq(pm_message_t state) 455 { 456 ktime_t starttime = ktime_get(); 457 458 mutex_lock(&dpm_list_mtx); 459 while (!list_empty(&dpm_noirq_list)) { 460 struct device *dev = to_device(dpm_noirq_list.next); 461 int error; 462 463 get_device(dev); 464 list_move_tail(&dev->power.entry, &dpm_suspended_list); 465 mutex_unlock(&dpm_list_mtx); 466 467 error = device_resume_noirq(dev, state); 468 if (error) 469 pm_dev_err(dev, state, " early", error); 470 471 mutex_lock(&dpm_list_mtx); 472 put_device(dev); 473 } 474 mutex_unlock(&dpm_list_mtx); 475 dpm_show_time(starttime, state, "early"); 476 resume_device_irqs(); 477 } 478 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 479 480 /** 481 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 482 * @dev: Device to resume. 483 * @cb: Resume callback to execute. 484 */ 485 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 486 { 487 int error; 488 ktime_t calltime; 489 490 calltime = initcall_debug_start(dev); 491 492 error = cb(dev); 493 suspend_report_result(cb, error); 494 495 initcall_debug_report(dev, calltime, error); 496 497 return error; 498 } 499 500 /** 501 * device_resume - Execute "resume" callbacks for given device. 502 * @dev: Device to handle. 503 * @state: PM transition of the system being carried out. 504 * @async: If true, the device is being resumed asynchronously. 505 */ 506 static int device_resume(struct device *dev, pm_message_t state, bool async) 507 { 508 int error = 0; 509 510 TRACE_DEVICE(dev); 511 TRACE_RESUME(0); 512 513 dpm_wait(dev->parent, async); 514 device_lock(dev); 515 516 dev->power.in_suspend = false; 517 518 if (dev->pwr_domain) { 519 pm_dev_dbg(dev, state, "power domain "); 520 pm_op(dev, &dev->pwr_domain->ops, state); 521 } 522 523 if (dev->type && dev->type->pm) { 524 pm_dev_dbg(dev, state, "type "); 525 error = pm_op(dev, dev->type->pm, state); 526 goto End; 527 } 528 529 if (dev->class) { 530 if (dev->class->pm) { 531 pm_dev_dbg(dev, state, "class "); 532 error = pm_op(dev, dev->class->pm, state); 533 goto End; 534 } else if (dev->class->resume) { 535 pm_dev_dbg(dev, state, "legacy class "); 536 error = legacy_resume(dev, dev->class->resume); 537 goto End; 538 } 539 } 540 541 if (dev->bus) { 542 if (dev->bus->pm) { 543 pm_dev_dbg(dev, state, ""); 544 error = pm_op(dev, dev->bus->pm, state); 545 } else if (dev->bus->resume) { 546 pm_dev_dbg(dev, state, "legacy "); 547 error = legacy_resume(dev, dev->bus->resume); 548 } 549 } 550 551 End: 552 device_unlock(dev); 553 complete_all(&dev->power.completion); 554 555 TRACE_RESUME(error); 556 return error; 557 } 558 559 static void async_resume(void *data, async_cookie_t cookie) 560 { 561 struct device *dev = (struct device *)data; 562 int error; 563 564 error = device_resume(dev, pm_transition, true); 565 if (error) 566 pm_dev_err(dev, pm_transition, " async", error); 567 put_device(dev); 568 } 569 570 static bool is_async(struct device *dev) 571 { 572 return dev->power.async_suspend && pm_async_enabled 573 && !pm_trace_is_enabled(); 574 } 575 576 /** 577 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 578 * @state: PM transition of the system being carried out. 579 * 580 * Execute the appropriate "resume" callback for all devices whose status 581 * indicates that they are suspended. 582 */ 583 static void dpm_resume(pm_message_t state) 584 { 585 struct device *dev; 586 ktime_t starttime = ktime_get(); 587 588 mutex_lock(&dpm_list_mtx); 589 pm_transition = state; 590 async_error = 0; 591 592 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 593 INIT_COMPLETION(dev->power.completion); 594 if (is_async(dev)) { 595 get_device(dev); 596 async_schedule(async_resume, dev); 597 } 598 } 599 600 while (!list_empty(&dpm_suspended_list)) { 601 dev = to_device(dpm_suspended_list.next); 602 get_device(dev); 603 if (!is_async(dev)) { 604 int error; 605 606 mutex_unlock(&dpm_list_mtx); 607 608 error = device_resume(dev, state, false); 609 if (error) 610 pm_dev_err(dev, state, "", error); 611 612 mutex_lock(&dpm_list_mtx); 613 } 614 if (!list_empty(&dev->power.entry)) 615 list_move_tail(&dev->power.entry, &dpm_prepared_list); 616 put_device(dev); 617 } 618 mutex_unlock(&dpm_list_mtx); 619 async_synchronize_full(); 620 dpm_show_time(starttime, state, NULL); 621 } 622 623 /** 624 * device_complete - Complete a PM transition for given device. 625 * @dev: Device to handle. 626 * @state: PM transition of the system being carried out. 627 */ 628 static void device_complete(struct device *dev, pm_message_t state) 629 { 630 device_lock(dev); 631 632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 633 pm_dev_dbg(dev, state, "completing power domain "); 634 dev->pwr_domain->ops.complete(dev); 635 } 636 637 if (dev->type && dev->type->pm) { 638 pm_dev_dbg(dev, state, "completing type "); 639 if (dev->type->pm->complete) 640 dev->type->pm->complete(dev); 641 } else if (dev->class && dev->class->pm) { 642 pm_dev_dbg(dev, state, "completing class "); 643 if (dev->class->pm->complete) 644 dev->class->pm->complete(dev); 645 } else if (dev->bus && dev->bus->pm) { 646 pm_dev_dbg(dev, state, "completing "); 647 if (dev->bus->pm->complete) 648 dev->bus->pm->complete(dev); 649 } 650 651 device_unlock(dev); 652 } 653 654 /** 655 * dpm_complete - Complete a PM transition for all non-sysdev devices. 656 * @state: PM transition of the system being carried out. 657 * 658 * Execute the ->complete() callbacks for all devices whose PM status is not 659 * DPM_ON (this allows new devices to be registered). 660 */ 661 static void dpm_complete(pm_message_t state) 662 { 663 struct list_head list; 664 665 INIT_LIST_HEAD(&list); 666 mutex_lock(&dpm_list_mtx); 667 while (!list_empty(&dpm_prepared_list)) { 668 struct device *dev = to_device(dpm_prepared_list.prev); 669 670 get_device(dev); 671 dev->power.in_suspend = false; 672 list_move(&dev->power.entry, &list); 673 mutex_unlock(&dpm_list_mtx); 674 675 device_complete(dev, state); 676 677 mutex_lock(&dpm_list_mtx); 678 put_device(dev); 679 } 680 list_splice(&list, &dpm_list); 681 mutex_unlock(&dpm_list_mtx); 682 } 683 684 /** 685 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 686 * @state: PM transition of the system being carried out. 687 * 688 * Execute "resume" callbacks for all devices and complete the PM transition of 689 * the system. 690 */ 691 void dpm_resume_end(pm_message_t state) 692 { 693 might_sleep(); 694 dpm_resume(state); 695 dpm_complete(state); 696 } 697 EXPORT_SYMBOL_GPL(dpm_resume_end); 698 699 700 /*------------------------- Suspend routines -------------------------*/ 701 702 /** 703 * resume_event - Return a "resume" message for given "suspend" sleep state. 704 * @sleep_state: PM message representing a sleep state. 705 * 706 * Return a PM message representing the resume event corresponding to given 707 * sleep state. 708 */ 709 static pm_message_t resume_event(pm_message_t sleep_state) 710 { 711 switch (sleep_state.event) { 712 case PM_EVENT_SUSPEND: 713 return PMSG_RESUME; 714 case PM_EVENT_FREEZE: 715 case PM_EVENT_QUIESCE: 716 return PMSG_RECOVER; 717 case PM_EVENT_HIBERNATE: 718 return PMSG_RESTORE; 719 } 720 return PMSG_ON; 721 } 722 723 /** 724 * device_suspend_noirq - Execute a "late suspend" callback for given device. 725 * @dev: Device to handle. 726 * @state: PM transition of the system being carried out. 727 * 728 * The driver of @dev will not receive interrupts while this function is being 729 * executed. 730 */ 731 static int device_suspend_noirq(struct device *dev, pm_message_t state) 732 { 733 int error; 734 735 if (dev->type && dev->type->pm) { 736 pm_dev_dbg(dev, state, "LATE type "); 737 error = pm_noirq_op(dev, dev->type->pm, state); 738 if (error) 739 return error; 740 } else if (dev->class && dev->class->pm) { 741 pm_dev_dbg(dev, state, "LATE class "); 742 error = pm_noirq_op(dev, dev->class->pm, state); 743 if (error) 744 return error; 745 } else if (dev->bus && dev->bus->pm) { 746 pm_dev_dbg(dev, state, "LATE "); 747 error = pm_noirq_op(dev, dev->bus->pm, state); 748 if (error) 749 return error; 750 } 751 752 if (dev->pwr_domain) { 753 pm_dev_dbg(dev, state, "LATE power domain "); 754 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 755 } 756 757 return 0; 758 } 759 760 /** 761 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 762 * @state: PM transition of the system being carried out. 763 * 764 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 765 * handlers for all non-sysdev devices. 766 */ 767 int dpm_suspend_noirq(pm_message_t state) 768 { 769 ktime_t starttime = ktime_get(); 770 int error = 0; 771 772 suspend_device_irqs(); 773 mutex_lock(&dpm_list_mtx); 774 while (!list_empty(&dpm_suspended_list)) { 775 struct device *dev = to_device(dpm_suspended_list.prev); 776 777 get_device(dev); 778 mutex_unlock(&dpm_list_mtx); 779 780 error = device_suspend_noirq(dev, state); 781 782 mutex_lock(&dpm_list_mtx); 783 if (error) { 784 pm_dev_err(dev, state, " late", error); 785 put_device(dev); 786 break; 787 } 788 if (!list_empty(&dev->power.entry)) 789 list_move(&dev->power.entry, &dpm_noirq_list); 790 put_device(dev); 791 } 792 mutex_unlock(&dpm_list_mtx); 793 if (error) 794 dpm_resume_noirq(resume_event(state)); 795 else 796 dpm_show_time(starttime, state, "late"); 797 return error; 798 } 799 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 800 801 /** 802 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 803 * @dev: Device to suspend. 804 * @state: PM transition of the system being carried out. 805 * @cb: Suspend callback to execute. 806 */ 807 static int legacy_suspend(struct device *dev, pm_message_t state, 808 int (*cb)(struct device *dev, pm_message_t state)) 809 { 810 int error; 811 ktime_t calltime; 812 813 calltime = initcall_debug_start(dev); 814 815 error = cb(dev, state); 816 suspend_report_result(cb, error); 817 818 initcall_debug_report(dev, calltime, error); 819 820 return error; 821 } 822 823 /** 824 * device_suspend - Execute "suspend" callbacks for given device. 825 * @dev: Device to handle. 826 * @state: PM transition of the system being carried out. 827 * @async: If true, the device is being suspended asynchronously. 828 */ 829 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 830 { 831 int error = 0; 832 833 dpm_wait_for_children(dev, async); 834 device_lock(dev); 835 836 if (async_error) 837 goto End; 838 839 if (pm_wakeup_pending()) { 840 async_error = -EBUSY; 841 goto End; 842 } 843 844 if (dev->type && dev->type->pm) { 845 pm_dev_dbg(dev, state, "type "); 846 error = pm_op(dev, dev->type->pm, state); 847 goto Domain; 848 } 849 850 if (dev->class) { 851 if (dev->class->pm) { 852 pm_dev_dbg(dev, state, "class "); 853 error = pm_op(dev, dev->class->pm, state); 854 goto Domain; 855 } else if (dev->class->suspend) { 856 pm_dev_dbg(dev, state, "legacy class "); 857 error = legacy_suspend(dev, state, dev->class->suspend); 858 goto Domain; 859 } 860 } 861 862 if (dev->bus) { 863 if (dev->bus->pm) { 864 pm_dev_dbg(dev, state, ""); 865 error = pm_op(dev, dev->bus->pm, state); 866 } else if (dev->bus->suspend) { 867 pm_dev_dbg(dev, state, "legacy "); 868 error = legacy_suspend(dev, state, dev->bus->suspend); 869 } 870 } 871 872 Domain: 873 if (!error && dev->pwr_domain) { 874 pm_dev_dbg(dev, state, "power domain "); 875 pm_op(dev, &dev->pwr_domain->ops, state); 876 } 877 878 End: 879 device_unlock(dev); 880 complete_all(&dev->power.completion); 881 882 if (error) 883 async_error = error; 884 885 return error; 886 } 887 888 static void async_suspend(void *data, async_cookie_t cookie) 889 { 890 struct device *dev = (struct device *)data; 891 int error; 892 893 error = __device_suspend(dev, pm_transition, true); 894 if (error) 895 pm_dev_err(dev, pm_transition, " async", error); 896 897 put_device(dev); 898 } 899 900 static int device_suspend(struct device *dev) 901 { 902 INIT_COMPLETION(dev->power.completion); 903 904 if (pm_async_enabled && dev->power.async_suspend) { 905 get_device(dev); 906 async_schedule(async_suspend, dev); 907 return 0; 908 } 909 910 return __device_suspend(dev, pm_transition, false); 911 } 912 913 /** 914 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 915 * @state: PM transition of the system being carried out. 916 */ 917 static int dpm_suspend(pm_message_t state) 918 { 919 ktime_t starttime = ktime_get(); 920 int error = 0; 921 922 mutex_lock(&dpm_list_mtx); 923 pm_transition = state; 924 async_error = 0; 925 while (!list_empty(&dpm_prepared_list)) { 926 struct device *dev = to_device(dpm_prepared_list.prev); 927 928 get_device(dev); 929 mutex_unlock(&dpm_list_mtx); 930 931 error = device_suspend(dev); 932 933 mutex_lock(&dpm_list_mtx); 934 if (error) { 935 pm_dev_err(dev, state, "", error); 936 put_device(dev); 937 break; 938 } 939 if (!list_empty(&dev->power.entry)) 940 list_move(&dev->power.entry, &dpm_suspended_list); 941 put_device(dev); 942 if (async_error) 943 break; 944 } 945 mutex_unlock(&dpm_list_mtx); 946 async_synchronize_full(); 947 if (!error) 948 error = async_error; 949 if (!error) 950 dpm_show_time(starttime, state, NULL); 951 return error; 952 } 953 954 /** 955 * device_prepare - Prepare a device for system power transition. 956 * @dev: Device to handle. 957 * @state: PM transition of the system being carried out. 958 * 959 * Execute the ->prepare() callback(s) for given device. No new children of the 960 * device may be registered after this function has returned. 961 */ 962 static int device_prepare(struct device *dev, pm_message_t state) 963 { 964 int error = 0; 965 966 device_lock(dev); 967 968 if (dev->type && dev->type->pm) { 969 pm_dev_dbg(dev, state, "preparing type "); 970 if (dev->type->pm->prepare) 971 error = dev->type->pm->prepare(dev); 972 suspend_report_result(dev->type->pm->prepare, error); 973 if (error) 974 goto End; 975 } else if (dev->class && dev->class->pm) { 976 pm_dev_dbg(dev, state, "preparing class "); 977 if (dev->class->pm->prepare) 978 error = dev->class->pm->prepare(dev); 979 suspend_report_result(dev->class->pm->prepare, error); 980 if (error) 981 goto End; 982 } else if (dev->bus && dev->bus->pm) { 983 pm_dev_dbg(dev, state, "preparing "); 984 if (dev->bus->pm->prepare) 985 error = dev->bus->pm->prepare(dev); 986 suspend_report_result(dev->bus->pm->prepare, error); 987 if (error) 988 goto End; 989 } 990 991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { 992 pm_dev_dbg(dev, state, "preparing power domain "); 993 dev->pwr_domain->ops.prepare(dev); 994 } 995 996 End: 997 device_unlock(dev); 998 999 return error; 1000 } 1001 1002 /** 1003 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1004 * @state: PM transition of the system being carried out. 1005 * 1006 * Execute the ->prepare() callback(s) for all devices. 1007 */ 1008 static int dpm_prepare(pm_message_t state) 1009 { 1010 int error = 0; 1011 1012 mutex_lock(&dpm_list_mtx); 1013 while (!list_empty(&dpm_list)) { 1014 struct device *dev = to_device(dpm_list.next); 1015 1016 get_device(dev); 1017 mutex_unlock(&dpm_list_mtx); 1018 1019 pm_runtime_get_noresume(dev); 1020 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1021 pm_wakeup_event(dev, 0); 1022 1023 pm_runtime_put_sync(dev); 1024 error = pm_wakeup_pending() ? 1025 -EBUSY : device_prepare(dev, state); 1026 1027 mutex_lock(&dpm_list_mtx); 1028 if (error) { 1029 if (error == -EAGAIN) { 1030 put_device(dev); 1031 error = 0; 1032 continue; 1033 } 1034 printk(KERN_INFO "PM: Device %s not prepared " 1035 "for power transition: code %d\n", 1036 dev_name(dev), error); 1037 put_device(dev); 1038 break; 1039 } 1040 dev->power.in_suspend = true; 1041 if (!list_empty(&dev->power.entry)) 1042 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1043 put_device(dev); 1044 } 1045 mutex_unlock(&dpm_list_mtx); 1046 return error; 1047 } 1048 1049 /** 1050 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1051 * @state: PM transition of the system being carried out. 1052 * 1053 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1054 * callbacks for them. 1055 */ 1056 int dpm_suspend_start(pm_message_t state) 1057 { 1058 int error; 1059 1060 might_sleep(); 1061 error = dpm_prepare(state); 1062 if (!error) 1063 error = dpm_suspend(state); 1064 return error; 1065 } 1066 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1067 1068 void __suspend_report_result(const char *function, void *fn, int ret) 1069 { 1070 if (ret) 1071 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1072 } 1073 EXPORT_SYMBOL_GPL(__suspend_report_result); 1074 1075 /** 1076 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1077 * @dev: Device to wait for. 1078 * @subordinate: Device that needs to wait for @dev. 1079 */ 1080 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1081 { 1082 dpm_wait(dev, subordinate->power.async_suspend); 1083 return async_error; 1084 } 1085 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1086