1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will intialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 30 #include "../base.h" 31 #include "power.h" 32 33 /* 34 * The entries in the dpm_list list are in a depth first order, simply 35 * because children are guaranteed to be discovered after parents, and 36 * are inserted at the back of the list on discovery. 37 * 38 * Since device_pm_add() may be called with a device semaphore held, 39 * we must never try to acquire a device semaphore while holding 40 * dpm_list_mutex. 41 */ 42 43 LIST_HEAD(dpm_list); 44 45 static DEFINE_MUTEX(dpm_list_mtx); 46 static pm_message_t pm_transition; 47 48 /* 49 * Set once the preparation of devices for a PM transition has started, reset 50 * before starting to resume devices. Protected by dpm_list_mtx. 51 */ 52 static bool transition_started; 53 54 /** 55 * device_pm_init - Initialize the PM-related part of a device object. 56 * @dev: Device object being initialized. 57 */ 58 void device_pm_init(struct device *dev) 59 { 60 dev->power.status = DPM_ON; 61 init_completion(&dev->power.completion); 62 pm_runtime_init(dev); 63 } 64 65 /** 66 * device_pm_lock - Lock the list of active devices used by the PM core. 67 */ 68 void device_pm_lock(void) 69 { 70 mutex_lock(&dpm_list_mtx); 71 } 72 73 /** 74 * device_pm_unlock - Unlock the list of active devices used by the PM core. 75 */ 76 void device_pm_unlock(void) 77 { 78 mutex_unlock(&dpm_list_mtx); 79 } 80 81 /** 82 * device_pm_add - Add a device to the PM core's list of active devices. 83 * @dev: Device to add to the list. 84 */ 85 void device_pm_add(struct device *dev) 86 { 87 pr_debug("PM: Adding info for %s:%s\n", 88 dev->bus ? dev->bus->name : "No Bus", 89 kobject_name(&dev->kobj)); 90 mutex_lock(&dpm_list_mtx); 91 if (dev->parent) { 92 if (dev->parent->power.status >= DPM_SUSPENDING) 93 dev_warn(dev, "parent %s should not be sleeping\n", 94 dev_name(dev->parent)); 95 } else if (transition_started) { 96 /* 97 * We refuse to register parentless devices while a PM 98 * transition is in progress in order to avoid leaving them 99 * unhandled down the road 100 */ 101 dev_WARN(dev, "Parentless device registered during a PM transaction\n"); 102 } 103 104 list_add_tail(&dev->power.entry, &dpm_list); 105 mutex_unlock(&dpm_list_mtx); 106 } 107 108 /** 109 * device_pm_remove - Remove a device from the PM core's list of active devices. 110 * @dev: Device to be removed from the list. 111 */ 112 void device_pm_remove(struct device *dev) 113 { 114 pr_debug("PM: Removing info for %s:%s\n", 115 dev->bus ? dev->bus->name : "No Bus", 116 kobject_name(&dev->kobj)); 117 complete_all(&dev->power.completion); 118 mutex_lock(&dpm_list_mtx); 119 list_del_init(&dev->power.entry); 120 mutex_unlock(&dpm_list_mtx); 121 pm_runtime_remove(dev); 122 } 123 124 /** 125 * device_pm_move_before - Move device in the PM core's list of active devices. 126 * @deva: Device to move in dpm_list. 127 * @devb: Device @deva should come before. 128 */ 129 void device_pm_move_before(struct device *deva, struct device *devb) 130 { 131 pr_debug("PM: Moving %s:%s before %s:%s\n", 132 deva->bus ? deva->bus->name : "No Bus", 133 kobject_name(&deva->kobj), 134 devb->bus ? devb->bus->name : "No Bus", 135 kobject_name(&devb->kobj)); 136 /* Delete deva from dpm_list and reinsert before devb. */ 137 list_move_tail(&deva->power.entry, &devb->power.entry); 138 } 139 140 /** 141 * device_pm_move_after - Move device in the PM core's list of active devices. 142 * @deva: Device to move in dpm_list. 143 * @devb: Device @deva should come after. 144 */ 145 void device_pm_move_after(struct device *deva, struct device *devb) 146 { 147 pr_debug("PM: Moving %s:%s after %s:%s\n", 148 deva->bus ? deva->bus->name : "No Bus", 149 kobject_name(&deva->kobj), 150 devb->bus ? devb->bus->name : "No Bus", 151 kobject_name(&devb->kobj)); 152 /* Delete deva from dpm_list and reinsert after devb. */ 153 list_move(&deva->power.entry, &devb->power.entry); 154 } 155 156 /** 157 * device_pm_move_last - Move device to end of the PM core's list of devices. 158 * @dev: Device to move in dpm_list. 159 */ 160 void device_pm_move_last(struct device *dev) 161 { 162 pr_debug("PM: Moving %s:%s to end of list\n", 163 dev->bus ? dev->bus->name : "No Bus", 164 kobject_name(&dev->kobj)); 165 list_move_tail(&dev->power.entry, &dpm_list); 166 } 167 168 static ktime_t initcall_debug_start(struct device *dev) 169 { 170 ktime_t calltime = ktime_set(0, 0); 171 172 if (initcall_debug) { 173 pr_info("calling %s+ @ %i\n", 174 dev_name(dev), task_pid_nr(current)); 175 calltime = ktime_get(); 176 } 177 178 return calltime; 179 } 180 181 static void initcall_debug_report(struct device *dev, ktime_t calltime, 182 int error) 183 { 184 ktime_t delta, rettime; 185 186 if (initcall_debug) { 187 rettime = ktime_get(); 188 delta = ktime_sub(rettime, calltime); 189 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 190 error, (unsigned long long)ktime_to_ns(delta) >> 10); 191 } 192 } 193 194 /** 195 * dpm_wait - Wait for a PM operation to complete. 196 * @dev: Device to wait for. 197 * @async: If unset, wait only if the device's power.async_suspend flag is set. 198 */ 199 static void dpm_wait(struct device *dev, bool async) 200 { 201 if (!dev) 202 return; 203 204 if (async || (pm_async_enabled && dev->power.async_suspend)) 205 wait_for_completion(&dev->power.completion); 206 } 207 208 static int dpm_wait_fn(struct device *dev, void *async_ptr) 209 { 210 dpm_wait(dev, *((bool *)async_ptr)); 211 return 0; 212 } 213 214 static void dpm_wait_for_children(struct device *dev, bool async) 215 { 216 device_for_each_child(dev, &async, dpm_wait_fn); 217 } 218 219 /** 220 * pm_op - Execute the PM operation appropriate for given PM event. 221 * @dev: Device to handle. 222 * @ops: PM operations to choose from. 223 * @state: PM transition of the system being carried out. 224 */ 225 static int pm_op(struct device *dev, 226 const struct dev_pm_ops *ops, 227 pm_message_t state) 228 { 229 int error = 0; 230 ktime_t calltime; 231 232 calltime = initcall_debug_start(dev); 233 234 switch (state.event) { 235 #ifdef CONFIG_SUSPEND 236 case PM_EVENT_SUSPEND: 237 if (ops->suspend) { 238 error = ops->suspend(dev); 239 suspend_report_result(ops->suspend, error); 240 } 241 break; 242 case PM_EVENT_RESUME: 243 if (ops->resume) { 244 error = ops->resume(dev); 245 suspend_report_result(ops->resume, error); 246 } 247 break; 248 #endif /* CONFIG_SUSPEND */ 249 #ifdef CONFIG_HIBERNATION 250 case PM_EVENT_FREEZE: 251 case PM_EVENT_QUIESCE: 252 if (ops->freeze) { 253 error = ops->freeze(dev); 254 suspend_report_result(ops->freeze, error); 255 } 256 break; 257 case PM_EVENT_HIBERNATE: 258 if (ops->poweroff) { 259 error = ops->poweroff(dev); 260 suspend_report_result(ops->poweroff, error); 261 } 262 break; 263 case PM_EVENT_THAW: 264 case PM_EVENT_RECOVER: 265 if (ops->thaw) { 266 error = ops->thaw(dev); 267 suspend_report_result(ops->thaw, error); 268 } 269 break; 270 case PM_EVENT_RESTORE: 271 if (ops->restore) { 272 error = ops->restore(dev); 273 suspend_report_result(ops->restore, error); 274 } 275 break; 276 #endif /* CONFIG_HIBERNATION */ 277 default: 278 error = -EINVAL; 279 } 280 281 initcall_debug_report(dev, calltime, error); 282 283 return error; 284 } 285 286 /** 287 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 288 * @dev: Device to handle. 289 * @ops: PM operations to choose from. 290 * @state: PM transition of the system being carried out. 291 * 292 * The driver of @dev will not receive interrupts while this function is being 293 * executed. 294 */ 295 static int pm_noirq_op(struct device *dev, 296 const struct dev_pm_ops *ops, 297 pm_message_t state) 298 { 299 int error = 0; 300 ktime_t calltime, delta, rettime; 301 302 if (initcall_debug) { 303 pr_info("calling %s+ @ %i, parent: %s\n", 304 dev_name(dev), task_pid_nr(current), 305 dev->parent ? dev_name(dev->parent) : "none"); 306 calltime = ktime_get(); 307 } 308 309 switch (state.event) { 310 #ifdef CONFIG_SUSPEND 311 case PM_EVENT_SUSPEND: 312 if (ops->suspend_noirq) { 313 error = ops->suspend_noirq(dev); 314 suspend_report_result(ops->suspend_noirq, error); 315 } 316 break; 317 case PM_EVENT_RESUME: 318 if (ops->resume_noirq) { 319 error = ops->resume_noirq(dev); 320 suspend_report_result(ops->resume_noirq, error); 321 } 322 break; 323 #endif /* CONFIG_SUSPEND */ 324 #ifdef CONFIG_HIBERNATION 325 case PM_EVENT_FREEZE: 326 case PM_EVENT_QUIESCE: 327 if (ops->freeze_noirq) { 328 error = ops->freeze_noirq(dev); 329 suspend_report_result(ops->freeze_noirq, error); 330 } 331 break; 332 case PM_EVENT_HIBERNATE: 333 if (ops->poweroff_noirq) { 334 error = ops->poweroff_noirq(dev); 335 suspend_report_result(ops->poweroff_noirq, error); 336 } 337 break; 338 case PM_EVENT_THAW: 339 case PM_EVENT_RECOVER: 340 if (ops->thaw_noirq) { 341 error = ops->thaw_noirq(dev); 342 suspend_report_result(ops->thaw_noirq, error); 343 } 344 break; 345 case PM_EVENT_RESTORE: 346 if (ops->restore_noirq) { 347 error = ops->restore_noirq(dev); 348 suspend_report_result(ops->restore_noirq, error); 349 } 350 break; 351 #endif /* CONFIG_HIBERNATION */ 352 default: 353 error = -EINVAL; 354 } 355 356 if (initcall_debug) { 357 rettime = ktime_get(); 358 delta = ktime_sub(rettime, calltime); 359 printk("initcall %s_i+ returned %d after %Ld usecs\n", 360 dev_name(dev), error, 361 (unsigned long long)ktime_to_ns(delta) >> 10); 362 } 363 364 return error; 365 } 366 367 static char *pm_verb(int event) 368 { 369 switch (event) { 370 case PM_EVENT_SUSPEND: 371 return "suspend"; 372 case PM_EVENT_RESUME: 373 return "resume"; 374 case PM_EVENT_FREEZE: 375 return "freeze"; 376 case PM_EVENT_QUIESCE: 377 return "quiesce"; 378 case PM_EVENT_HIBERNATE: 379 return "hibernate"; 380 case PM_EVENT_THAW: 381 return "thaw"; 382 case PM_EVENT_RESTORE: 383 return "restore"; 384 case PM_EVENT_RECOVER: 385 return "recover"; 386 default: 387 return "(unknown PM event)"; 388 } 389 } 390 391 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 392 { 393 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 394 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 395 ", may wakeup" : ""); 396 } 397 398 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 399 int error) 400 { 401 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 402 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 403 } 404 405 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 406 { 407 ktime_t calltime; 408 s64 usecs64; 409 int usecs; 410 411 calltime = ktime_get(); 412 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 413 do_div(usecs64, NSEC_PER_USEC); 414 usecs = usecs64; 415 if (usecs == 0) 416 usecs = 1; 417 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 418 info ?: "", info ? " " : "", pm_verb(state.event), 419 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 420 } 421 422 /*------------------------- Resume routines -------------------------*/ 423 424 /** 425 * device_resume_noirq - Execute an "early resume" callback for given device. 426 * @dev: Device to handle. 427 * @state: PM transition of the system being carried out. 428 * 429 * The driver of @dev will not receive interrupts while this function is being 430 * executed. 431 */ 432 static int device_resume_noirq(struct device *dev, pm_message_t state) 433 { 434 int error = 0; 435 436 TRACE_DEVICE(dev); 437 TRACE_RESUME(0); 438 439 if (dev->bus && dev->bus->pm) { 440 pm_dev_dbg(dev, state, "EARLY "); 441 error = pm_noirq_op(dev, dev->bus->pm, state); 442 } 443 444 TRACE_RESUME(error); 445 return error; 446 } 447 448 /** 449 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 450 * @state: PM transition of the system being carried out. 451 * 452 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 453 * enable device drivers to receive interrupts. 454 */ 455 void dpm_resume_noirq(pm_message_t state) 456 { 457 struct device *dev; 458 ktime_t starttime = ktime_get(); 459 460 mutex_lock(&dpm_list_mtx); 461 transition_started = false; 462 list_for_each_entry(dev, &dpm_list, power.entry) 463 if (dev->power.status > DPM_OFF) { 464 int error; 465 466 dev->power.status = DPM_OFF; 467 error = device_resume_noirq(dev, state); 468 if (error) 469 pm_dev_err(dev, state, " early", error); 470 } 471 mutex_unlock(&dpm_list_mtx); 472 dpm_show_time(starttime, state, "early"); 473 resume_device_irqs(); 474 } 475 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 476 477 /** 478 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 479 * @dev: Device to resume. 480 * @cb: Resume callback to execute. 481 */ 482 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 483 { 484 int error; 485 ktime_t calltime; 486 487 calltime = initcall_debug_start(dev); 488 489 error = cb(dev); 490 suspend_report_result(cb, error); 491 492 initcall_debug_report(dev, calltime, error); 493 494 return error; 495 } 496 497 /** 498 * device_resume - Execute "resume" callbacks for given device. 499 * @dev: Device to handle. 500 * @state: PM transition of the system being carried out. 501 * @async: If true, the device is being resumed asynchronously. 502 */ 503 static int device_resume(struct device *dev, pm_message_t state, bool async) 504 { 505 int error = 0; 506 507 TRACE_DEVICE(dev); 508 TRACE_RESUME(0); 509 510 dpm_wait(dev->parent, async); 511 down(&dev->sem); 512 513 dev->power.status = DPM_RESUMING; 514 515 if (dev->bus) { 516 if (dev->bus->pm) { 517 pm_dev_dbg(dev, state, ""); 518 error = pm_op(dev, dev->bus->pm, state); 519 } else if (dev->bus->resume) { 520 pm_dev_dbg(dev, state, "legacy "); 521 error = legacy_resume(dev, dev->bus->resume); 522 } 523 if (error) 524 goto End; 525 } 526 527 if (dev->type) { 528 if (dev->type->pm) { 529 pm_dev_dbg(dev, state, "type "); 530 error = pm_op(dev, dev->type->pm, state); 531 } 532 if (error) 533 goto End; 534 } 535 536 if (dev->class) { 537 if (dev->class->pm) { 538 pm_dev_dbg(dev, state, "class "); 539 error = pm_op(dev, dev->class->pm, state); 540 } else if (dev->class->resume) { 541 pm_dev_dbg(dev, state, "legacy class "); 542 error = legacy_resume(dev, dev->class->resume); 543 } 544 } 545 End: 546 up(&dev->sem); 547 complete_all(&dev->power.completion); 548 549 TRACE_RESUME(error); 550 return error; 551 } 552 553 static void async_resume(void *data, async_cookie_t cookie) 554 { 555 struct device *dev = (struct device *)data; 556 int error; 557 558 error = device_resume(dev, pm_transition, true); 559 if (error) 560 pm_dev_err(dev, pm_transition, " async", error); 561 put_device(dev); 562 } 563 564 static bool is_async(struct device *dev) 565 { 566 return dev->power.async_suspend && pm_async_enabled 567 && !pm_trace_is_enabled(); 568 } 569 570 /** 571 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 572 * @state: PM transition of the system being carried out. 573 * 574 * Execute the appropriate "resume" callback for all devices whose status 575 * indicates that they are suspended. 576 */ 577 static void dpm_resume(pm_message_t state) 578 { 579 struct list_head list; 580 struct device *dev; 581 ktime_t starttime = ktime_get(); 582 583 INIT_LIST_HEAD(&list); 584 mutex_lock(&dpm_list_mtx); 585 pm_transition = state; 586 587 list_for_each_entry(dev, &dpm_list, power.entry) { 588 if (dev->power.status < DPM_OFF) 589 continue; 590 591 INIT_COMPLETION(dev->power.completion); 592 if (is_async(dev)) { 593 get_device(dev); 594 async_schedule(async_resume, dev); 595 } 596 } 597 598 while (!list_empty(&dpm_list)) { 599 dev = to_device(dpm_list.next); 600 get_device(dev); 601 if (dev->power.status >= DPM_OFF && !is_async(dev)) { 602 int error; 603 604 mutex_unlock(&dpm_list_mtx); 605 606 error = device_resume(dev, state, false); 607 608 mutex_lock(&dpm_list_mtx); 609 if (error) 610 pm_dev_err(dev, state, "", error); 611 } else if (dev->power.status == DPM_SUSPENDING) { 612 /* Allow new children of the device to be registered */ 613 dev->power.status = DPM_RESUMING; 614 } 615 if (!list_empty(&dev->power.entry)) 616 list_move_tail(&dev->power.entry, &list); 617 put_device(dev); 618 } 619 list_splice(&list, &dpm_list); 620 mutex_unlock(&dpm_list_mtx); 621 async_synchronize_full(); 622 dpm_show_time(starttime, state, NULL); 623 } 624 625 /** 626 * device_complete - Complete a PM transition for given device. 627 * @dev: Device to handle. 628 * @state: PM transition of the system being carried out. 629 */ 630 static void device_complete(struct device *dev, pm_message_t state) 631 { 632 down(&dev->sem); 633 634 if (dev->class && dev->class->pm && dev->class->pm->complete) { 635 pm_dev_dbg(dev, state, "completing class "); 636 dev->class->pm->complete(dev); 637 } 638 639 if (dev->type && dev->type->pm && dev->type->pm->complete) { 640 pm_dev_dbg(dev, state, "completing type "); 641 dev->type->pm->complete(dev); 642 } 643 644 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { 645 pm_dev_dbg(dev, state, "completing "); 646 dev->bus->pm->complete(dev); 647 } 648 649 up(&dev->sem); 650 } 651 652 /** 653 * dpm_complete - Complete a PM transition for all non-sysdev devices. 654 * @state: PM transition of the system being carried out. 655 * 656 * Execute the ->complete() callbacks for all devices whose PM status is not 657 * DPM_ON (this allows new devices to be registered). 658 */ 659 static void dpm_complete(pm_message_t state) 660 { 661 struct list_head list; 662 663 INIT_LIST_HEAD(&list); 664 mutex_lock(&dpm_list_mtx); 665 transition_started = false; 666 while (!list_empty(&dpm_list)) { 667 struct device *dev = to_device(dpm_list.prev); 668 669 get_device(dev); 670 if (dev->power.status > DPM_ON) { 671 dev->power.status = DPM_ON; 672 mutex_unlock(&dpm_list_mtx); 673 674 device_complete(dev, state); 675 pm_runtime_put_sync(dev); 676 677 mutex_lock(&dpm_list_mtx); 678 } 679 if (!list_empty(&dev->power.entry)) 680 list_move(&dev->power.entry, &list); 681 put_device(dev); 682 } 683 list_splice(&list, &dpm_list); 684 mutex_unlock(&dpm_list_mtx); 685 } 686 687 /** 688 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 689 * @state: PM transition of the system being carried out. 690 * 691 * Execute "resume" callbacks for all devices and complete the PM transition of 692 * the system. 693 */ 694 void dpm_resume_end(pm_message_t state) 695 { 696 might_sleep(); 697 dpm_resume(state); 698 dpm_complete(state); 699 } 700 EXPORT_SYMBOL_GPL(dpm_resume_end); 701 702 703 /*------------------------- Suspend routines -------------------------*/ 704 705 /** 706 * resume_event - Return a "resume" message for given "suspend" sleep state. 707 * @sleep_state: PM message representing a sleep state. 708 * 709 * Return a PM message representing the resume event corresponding to given 710 * sleep state. 711 */ 712 static pm_message_t resume_event(pm_message_t sleep_state) 713 { 714 switch (sleep_state.event) { 715 case PM_EVENT_SUSPEND: 716 return PMSG_RESUME; 717 case PM_EVENT_FREEZE: 718 case PM_EVENT_QUIESCE: 719 return PMSG_RECOVER; 720 case PM_EVENT_HIBERNATE: 721 return PMSG_RESTORE; 722 } 723 return PMSG_ON; 724 } 725 726 /** 727 * device_suspend_noirq - Execute a "late suspend" callback for given device. 728 * @dev: Device to handle. 729 * @state: PM transition of the system being carried out. 730 * 731 * The driver of @dev will not receive interrupts while this function is being 732 * executed. 733 */ 734 static int device_suspend_noirq(struct device *dev, pm_message_t state) 735 { 736 int error = 0; 737 738 if (dev->bus && dev->bus->pm) { 739 pm_dev_dbg(dev, state, "LATE "); 740 error = pm_noirq_op(dev, dev->bus->pm, state); 741 } 742 return error; 743 } 744 745 /** 746 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 747 * @state: PM transition of the system being carried out. 748 * 749 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 750 * handlers for all non-sysdev devices. 751 */ 752 int dpm_suspend_noirq(pm_message_t state) 753 { 754 struct device *dev; 755 ktime_t starttime = ktime_get(); 756 int error = 0; 757 758 suspend_device_irqs(); 759 mutex_lock(&dpm_list_mtx); 760 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 761 error = device_suspend_noirq(dev, state); 762 if (error) { 763 pm_dev_err(dev, state, " late", error); 764 break; 765 } 766 dev->power.status = DPM_OFF_IRQ; 767 } 768 mutex_unlock(&dpm_list_mtx); 769 if (error) 770 dpm_resume_noirq(resume_event(state)); 771 else 772 dpm_show_time(starttime, state, "late"); 773 return error; 774 } 775 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 776 777 /** 778 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 779 * @dev: Device to suspend. 780 * @state: PM transition of the system being carried out. 781 * @cb: Suspend callback to execute. 782 */ 783 static int legacy_suspend(struct device *dev, pm_message_t state, 784 int (*cb)(struct device *dev, pm_message_t state)) 785 { 786 int error; 787 ktime_t calltime; 788 789 calltime = initcall_debug_start(dev); 790 791 error = cb(dev, state); 792 suspend_report_result(cb, error); 793 794 initcall_debug_report(dev, calltime, error); 795 796 return error; 797 } 798 799 static int async_error; 800 801 /** 802 * device_suspend - Execute "suspend" callbacks for given device. 803 * @dev: Device to handle. 804 * @state: PM transition of the system being carried out. 805 * @async: If true, the device is being suspended asynchronously. 806 */ 807 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 808 { 809 int error = 0; 810 811 dpm_wait_for_children(dev, async); 812 down(&dev->sem); 813 814 if (async_error) 815 goto End; 816 817 if (dev->class) { 818 if (dev->class->pm) { 819 pm_dev_dbg(dev, state, "class "); 820 error = pm_op(dev, dev->class->pm, state); 821 } else if (dev->class->suspend) { 822 pm_dev_dbg(dev, state, "legacy class "); 823 error = legacy_suspend(dev, state, dev->class->suspend); 824 } 825 if (error) 826 goto End; 827 } 828 829 if (dev->type) { 830 if (dev->type->pm) { 831 pm_dev_dbg(dev, state, "type "); 832 error = pm_op(dev, dev->type->pm, state); 833 } 834 if (error) 835 goto End; 836 } 837 838 if (dev->bus) { 839 if (dev->bus->pm) { 840 pm_dev_dbg(dev, state, ""); 841 error = pm_op(dev, dev->bus->pm, state); 842 } else if (dev->bus->suspend) { 843 pm_dev_dbg(dev, state, "legacy "); 844 error = legacy_suspend(dev, state, dev->bus->suspend); 845 } 846 } 847 848 if (!error) 849 dev->power.status = DPM_OFF; 850 851 End: 852 up(&dev->sem); 853 complete_all(&dev->power.completion); 854 855 return error; 856 } 857 858 static void async_suspend(void *data, async_cookie_t cookie) 859 { 860 struct device *dev = (struct device *)data; 861 int error; 862 863 error = __device_suspend(dev, pm_transition, true); 864 if (error) { 865 pm_dev_err(dev, pm_transition, " async", error); 866 async_error = error; 867 } 868 869 put_device(dev); 870 } 871 872 static int device_suspend(struct device *dev) 873 { 874 INIT_COMPLETION(dev->power.completion); 875 876 if (pm_async_enabled && dev->power.async_suspend) { 877 get_device(dev); 878 async_schedule(async_suspend, dev); 879 return 0; 880 } 881 882 return __device_suspend(dev, pm_transition, false); 883 } 884 885 /** 886 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 887 * @state: PM transition of the system being carried out. 888 */ 889 static int dpm_suspend(pm_message_t state) 890 { 891 struct list_head list; 892 ktime_t starttime = ktime_get(); 893 int error = 0; 894 895 INIT_LIST_HEAD(&list); 896 mutex_lock(&dpm_list_mtx); 897 pm_transition = state; 898 async_error = 0; 899 while (!list_empty(&dpm_list)) { 900 struct device *dev = to_device(dpm_list.prev); 901 902 get_device(dev); 903 mutex_unlock(&dpm_list_mtx); 904 905 error = device_suspend(dev); 906 907 mutex_lock(&dpm_list_mtx); 908 if (error) { 909 pm_dev_err(dev, state, "", error); 910 put_device(dev); 911 break; 912 } 913 if (!list_empty(&dev->power.entry)) 914 list_move(&dev->power.entry, &list); 915 put_device(dev); 916 if (async_error) 917 break; 918 } 919 list_splice(&list, dpm_list.prev); 920 mutex_unlock(&dpm_list_mtx); 921 async_synchronize_full(); 922 if (!error) 923 error = async_error; 924 if (!error) 925 dpm_show_time(starttime, state, NULL); 926 return error; 927 } 928 929 /** 930 * device_prepare - Prepare a device for system power transition. 931 * @dev: Device to handle. 932 * @state: PM transition of the system being carried out. 933 * 934 * Execute the ->prepare() callback(s) for given device. No new children of the 935 * device may be registered after this function has returned. 936 */ 937 static int device_prepare(struct device *dev, pm_message_t state) 938 { 939 int error = 0; 940 941 down(&dev->sem); 942 943 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { 944 pm_dev_dbg(dev, state, "preparing "); 945 error = dev->bus->pm->prepare(dev); 946 suspend_report_result(dev->bus->pm->prepare, error); 947 if (error) 948 goto End; 949 } 950 951 if (dev->type && dev->type->pm && dev->type->pm->prepare) { 952 pm_dev_dbg(dev, state, "preparing type "); 953 error = dev->type->pm->prepare(dev); 954 suspend_report_result(dev->type->pm->prepare, error); 955 if (error) 956 goto End; 957 } 958 959 if (dev->class && dev->class->pm && dev->class->pm->prepare) { 960 pm_dev_dbg(dev, state, "preparing class "); 961 error = dev->class->pm->prepare(dev); 962 suspend_report_result(dev->class->pm->prepare, error); 963 } 964 End: 965 up(&dev->sem); 966 967 return error; 968 } 969 970 /** 971 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 972 * @state: PM transition of the system being carried out. 973 * 974 * Execute the ->prepare() callback(s) for all devices. 975 */ 976 static int dpm_prepare(pm_message_t state) 977 { 978 struct list_head list; 979 int error = 0; 980 981 INIT_LIST_HEAD(&list); 982 mutex_lock(&dpm_list_mtx); 983 transition_started = true; 984 while (!list_empty(&dpm_list)) { 985 struct device *dev = to_device(dpm_list.next); 986 987 get_device(dev); 988 dev->power.status = DPM_PREPARING; 989 mutex_unlock(&dpm_list_mtx); 990 991 pm_runtime_get_noresume(dev); 992 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 993 /* Wake-up requested during system sleep transition. */ 994 pm_runtime_put_sync(dev); 995 error = -EBUSY; 996 } else { 997 error = device_prepare(dev, state); 998 } 999 1000 mutex_lock(&dpm_list_mtx); 1001 if (error) { 1002 dev->power.status = DPM_ON; 1003 if (error == -EAGAIN) { 1004 put_device(dev); 1005 error = 0; 1006 continue; 1007 } 1008 printk(KERN_ERR "PM: Failed to prepare device %s " 1009 "for power transition: error %d\n", 1010 kobject_name(&dev->kobj), error); 1011 put_device(dev); 1012 break; 1013 } 1014 dev->power.status = DPM_SUSPENDING; 1015 if (!list_empty(&dev->power.entry)) 1016 list_move_tail(&dev->power.entry, &list); 1017 put_device(dev); 1018 } 1019 list_splice(&list, &dpm_list); 1020 mutex_unlock(&dpm_list_mtx); 1021 return error; 1022 } 1023 1024 /** 1025 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1026 * @state: PM transition of the system being carried out. 1027 * 1028 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1029 * callbacks for them. 1030 */ 1031 int dpm_suspend_start(pm_message_t state) 1032 { 1033 int error; 1034 1035 might_sleep(); 1036 error = dpm_prepare(state); 1037 if (!error) 1038 error = dpm_suspend(state); 1039 return error; 1040 } 1041 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1042 1043 void __suspend_report_result(const char *function, void *fn, int ret) 1044 { 1045 if (ret) 1046 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1047 } 1048 EXPORT_SYMBOL_GPL(__suspend_report_result); 1049 1050 /** 1051 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1052 * @dev: Device to wait for. 1053 * @subordinate: Device that needs to wait for @dev. 1054 */ 1055 void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1056 { 1057 dpm_wait(dev, subordinate->power.async_suspend); 1058 } 1059 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1060