1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will intialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 30 #include "../base.h" 31 #include "power.h" 32 33 /* 34 * The entries in the dpm_list list are in a depth first order, simply 35 * because children are guaranteed to be discovered after parents, and 36 * are inserted at the back of the list on discovery. 37 * 38 * Since device_pm_add() may be called with a device lock held, 39 * we must never try to acquire a device lock while holding 40 * dpm_list_mutex. 41 */ 42 43 LIST_HEAD(dpm_list); 44 45 static DEFINE_MUTEX(dpm_list_mtx); 46 static pm_message_t pm_transition; 47 48 /* 49 * Set once the preparation of devices for a PM transition has started, reset 50 * before starting to resume devices. Protected by dpm_list_mtx. 51 */ 52 static bool transition_started; 53 54 /** 55 * device_pm_init - Initialize the PM-related part of a device object. 56 * @dev: Device object being initialized. 57 */ 58 void device_pm_init(struct device *dev) 59 { 60 dev->power.status = DPM_ON; 61 init_completion(&dev->power.completion); 62 complete_all(&dev->power.completion); 63 dev->power.wakeup_count = 0; 64 pm_runtime_init(dev); 65 } 66 67 /** 68 * device_pm_lock - Lock the list of active devices used by the PM core. 69 */ 70 void device_pm_lock(void) 71 { 72 mutex_lock(&dpm_list_mtx); 73 } 74 75 /** 76 * device_pm_unlock - Unlock the list of active devices used by the PM core. 77 */ 78 void device_pm_unlock(void) 79 { 80 mutex_unlock(&dpm_list_mtx); 81 } 82 83 /** 84 * device_pm_add - Add a device to the PM core's list of active devices. 85 * @dev: Device to add to the list. 86 */ 87 void device_pm_add(struct device *dev) 88 { 89 pr_debug("PM: Adding info for %s:%s\n", 90 dev->bus ? dev->bus->name : "No Bus", 91 kobject_name(&dev->kobj)); 92 mutex_lock(&dpm_list_mtx); 93 if (dev->parent) { 94 if (dev->parent->power.status >= DPM_SUSPENDING) 95 dev_warn(dev, "parent %s should not be sleeping\n", 96 dev_name(dev->parent)); 97 } else if (transition_started) { 98 /* 99 * We refuse to register parentless devices while a PM 100 * transition is in progress in order to avoid leaving them 101 * unhandled down the road 102 */ 103 dev_WARN(dev, "Parentless device registered during a PM transaction\n"); 104 } 105 106 list_add_tail(&dev->power.entry, &dpm_list); 107 mutex_unlock(&dpm_list_mtx); 108 } 109 110 /** 111 * device_pm_remove - Remove a device from the PM core's list of active devices. 112 * @dev: Device to be removed from the list. 113 */ 114 void device_pm_remove(struct device *dev) 115 { 116 pr_debug("PM: Removing info for %s:%s\n", 117 dev->bus ? dev->bus->name : "No Bus", 118 kobject_name(&dev->kobj)); 119 complete_all(&dev->power.completion); 120 mutex_lock(&dpm_list_mtx); 121 list_del_init(&dev->power.entry); 122 mutex_unlock(&dpm_list_mtx); 123 pm_runtime_remove(dev); 124 } 125 126 /** 127 * device_pm_move_before - Move device in the PM core's list of active devices. 128 * @deva: Device to move in dpm_list. 129 * @devb: Device @deva should come before. 130 */ 131 void device_pm_move_before(struct device *deva, struct device *devb) 132 { 133 pr_debug("PM: Moving %s:%s before %s:%s\n", 134 deva->bus ? deva->bus->name : "No Bus", 135 kobject_name(&deva->kobj), 136 devb->bus ? devb->bus->name : "No Bus", 137 kobject_name(&devb->kobj)); 138 /* Delete deva from dpm_list and reinsert before devb. */ 139 list_move_tail(&deva->power.entry, &devb->power.entry); 140 } 141 142 /** 143 * device_pm_move_after - Move device in the PM core's list of active devices. 144 * @deva: Device to move in dpm_list. 145 * @devb: Device @deva should come after. 146 */ 147 void device_pm_move_after(struct device *deva, struct device *devb) 148 { 149 pr_debug("PM: Moving %s:%s after %s:%s\n", 150 deva->bus ? deva->bus->name : "No Bus", 151 kobject_name(&deva->kobj), 152 devb->bus ? devb->bus->name : "No Bus", 153 kobject_name(&devb->kobj)); 154 /* Delete deva from dpm_list and reinsert after devb. */ 155 list_move(&deva->power.entry, &devb->power.entry); 156 } 157 158 /** 159 * device_pm_move_last - Move device to end of the PM core's list of devices. 160 * @dev: Device to move in dpm_list. 161 */ 162 void device_pm_move_last(struct device *dev) 163 { 164 pr_debug("PM: Moving %s:%s to end of list\n", 165 dev->bus ? dev->bus->name : "No Bus", 166 kobject_name(&dev->kobj)); 167 list_move_tail(&dev->power.entry, &dpm_list); 168 } 169 170 static ktime_t initcall_debug_start(struct device *dev) 171 { 172 ktime_t calltime = ktime_set(0, 0); 173 174 if (initcall_debug) { 175 pr_info("calling %s+ @ %i\n", 176 dev_name(dev), task_pid_nr(current)); 177 calltime = ktime_get(); 178 } 179 180 return calltime; 181 } 182 183 static void initcall_debug_report(struct device *dev, ktime_t calltime, 184 int error) 185 { 186 ktime_t delta, rettime; 187 188 if (initcall_debug) { 189 rettime = ktime_get(); 190 delta = ktime_sub(rettime, calltime); 191 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 192 error, (unsigned long long)ktime_to_ns(delta) >> 10); 193 } 194 } 195 196 /** 197 * dpm_wait - Wait for a PM operation to complete. 198 * @dev: Device to wait for. 199 * @async: If unset, wait only if the device's power.async_suspend flag is set. 200 */ 201 static void dpm_wait(struct device *dev, bool async) 202 { 203 if (!dev) 204 return; 205 206 if (async || (pm_async_enabled && dev->power.async_suspend)) 207 wait_for_completion(&dev->power.completion); 208 } 209 210 static int dpm_wait_fn(struct device *dev, void *async_ptr) 211 { 212 dpm_wait(dev, *((bool *)async_ptr)); 213 return 0; 214 } 215 216 static void dpm_wait_for_children(struct device *dev, bool async) 217 { 218 device_for_each_child(dev, &async, dpm_wait_fn); 219 } 220 221 /** 222 * pm_op - Execute the PM operation appropriate for given PM event. 223 * @dev: Device to handle. 224 * @ops: PM operations to choose from. 225 * @state: PM transition of the system being carried out. 226 */ 227 static int pm_op(struct device *dev, 228 const struct dev_pm_ops *ops, 229 pm_message_t state) 230 { 231 int error = 0; 232 ktime_t calltime; 233 234 calltime = initcall_debug_start(dev); 235 236 switch (state.event) { 237 #ifdef CONFIG_SUSPEND 238 case PM_EVENT_SUSPEND: 239 if (ops->suspend) { 240 error = ops->suspend(dev); 241 suspend_report_result(ops->suspend, error); 242 } 243 break; 244 case PM_EVENT_RESUME: 245 if (ops->resume) { 246 error = ops->resume(dev); 247 suspend_report_result(ops->resume, error); 248 } 249 break; 250 #endif /* CONFIG_SUSPEND */ 251 #ifdef CONFIG_HIBERNATION 252 case PM_EVENT_FREEZE: 253 case PM_EVENT_QUIESCE: 254 if (ops->freeze) { 255 error = ops->freeze(dev); 256 suspend_report_result(ops->freeze, error); 257 } 258 break; 259 case PM_EVENT_HIBERNATE: 260 if (ops->poweroff) { 261 error = ops->poweroff(dev); 262 suspend_report_result(ops->poweroff, error); 263 } 264 break; 265 case PM_EVENT_THAW: 266 case PM_EVENT_RECOVER: 267 if (ops->thaw) { 268 error = ops->thaw(dev); 269 suspend_report_result(ops->thaw, error); 270 } 271 break; 272 case PM_EVENT_RESTORE: 273 if (ops->restore) { 274 error = ops->restore(dev); 275 suspend_report_result(ops->restore, error); 276 } 277 break; 278 #endif /* CONFIG_HIBERNATION */ 279 default: 280 error = -EINVAL; 281 } 282 283 initcall_debug_report(dev, calltime, error); 284 285 return error; 286 } 287 288 /** 289 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 290 * @dev: Device to handle. 291 * @ops: PM operations to choose from. 292 * @state: PM transition of the system being carried out. 293 * 294 * The driver of @dev will not receive interrupts while this function is being 295 * executed. 296 */ 297 static int pm_noirq_op(struct device *dev, 298 const struct dev_pm_ops *ops, 299 pm_message_t state) 300 { 301 int error = 0; 302 ktime_t calltime, delta, rettime; 303 304 if (initcall_debug) { 305 pr_info("calling %s+ @ %i, parent: %s\n", 306 dev_name(dev), task_pid_nr(current), 307 dev->parent ? dev_name(dev->parent) : "none"); 308 calltime = ktime_get(); 309 } 310 311 switch (state.event) { 312 #ifdef CONFIG_SUSPEND 313 case PM_EVENT_SUSPEND: 314 if (ops->suspend_noirq) { 315 error = ops->suspend_noirq(dev); 316 suspend_report_result(ops->suspend_noirq, error); 317 } 318 break; 319 case PM_EVENT_RESUME: 320 if (ops->resume_noirq) { 321 error = ops->resume_noirq(dev); 322 suspend_report_result(ops->resume_noirq, error); 323 } 324 break; 325 #endif /* CONFIG_SUSPEND */ 326 #ifdef CONFIG_HIBERNATION 327 case PM_EVENT_FREEZE: 328 case PM_EVENT_QUIESCE: 329 if (ops->freeze_noirq) { 330 error = ops->freeze_noirq(dev); 331 suspend_report_result(ops->freeze_noirq, error); 332 } 333 break; 334 case PM_EVENT_HIBERNATE: 335 if (ops->poweroff_noirq) { 336 error = ops->poweroff_noirq(dev); 337 suspend_report_result(ops->poweroff_noirq, error); 338 } 339 break; 340 case PM_EVENT_THAW: 341 case PM_EVENT_RECOVER: 342 if (ops->thaw_noirq) { 343 error = ops->thaw_noirq(dev); 344 suspend_report_result(ops->thaw_noirq, error); 345 } 346 break; 347 case PM_EVENT_RESTORE: 348 if (ops->restore_noirq) { 349 error = ops->restore_noirq(dev); 350 suspend_report_result(ops->restore_noirq, error); 351 } 352 break; 353 #endif /* CONFIG_HIBERNATION */ 354 default: 355 error = -EINVAL; 356 } 357 358 if (initcall_debug) { 359 rettime = ktime_get(); 360 delta = ktime_sub(rettime, calltime); 361 printk("initcall %s_i+ returned %d after %Ld usecs\n", 362 dev_name(dev), error, 363 (unsigned long long)ktime_to_ns(delta) >> 10); 364 } 365 366 return error; 367 } 368 369 static char *pm_verb(int event) 370 { 371 switch (event) { 372 case PM_EVENT_SUSPEND: 373 return "suspend"; 374 case PM_EVENT_RESUME: 375 return "resume"; 376 case PM_EVENT_FREEZE: 377 return "freeze"; 378 case PM_EVENT_QUIESCE: 379 return "quiesce"; 380 case PM_EVENT_HIBERNATE: 381 return "hibernate"; 382 case PM_EVENT_THAW: 383 return "thaw"; 384 case PM_EVENT_RESTORE: 385 return "restore"; 386 case PM_EVENT_RECOVER: 387 return "recover"; 388 default: 389 return "(unknown PM event)"; 390 } 391 } 392 393 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 394 { 395 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 396 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 397 ", may wakeup" : ""); 398 } 399 400 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 401 int error) 402 { 403 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 404 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 405 } 406 407 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 408 { 409 ktime_t calltime; 410 s64 usecs64; 411 int usecs; 412 413 calltime = ktime_get(); 414 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 415 do_div(usecs64, NSEC_PER_USEC); 416 usecs = usecs64; 417 if (usecs == 0) 418 usecs = 1; 419 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 420 info ?: "", info ? " " : "", pm_verb(state.event), 421 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 422 } 423 424 /*------------------------- Resume routines -------------------------*/ 425 426 /** 427 * device_resume_noirq - Execute an "early resume" callback for given device. 428 * @dev: Device to handle. 429 * @state: PM transition of the system being carried out. 430 * 431 * The driver of @dev will not receive interrupts while this function is being 432 * executed. 433 */ 434 static int device_resume_noirq(struct device *dev, pm_message_t state) 435 { 436 int error = 0; 437 438 TRACE_DEVICE(dev); 439 TRACE_RESUME(0); 440 441 if (dev->bus && dev->bus->pm) { 442 pm_dev_dbg(dev, state, "EARLY "); 443 error = pm_noirq_op(dev, dev->bus->pm, state); 444 if (error) 445 goto End; 446 } 447 448 if (dev->type && dev->type->pm) { 449 pm_dev_dbg(dev, state, "EARLY type "); 450 error = pm_noirq_op(dev, dev->type->pm, state); 451 if (error) 452 goto End; 453 } 454 455 if (dev->class && dev->class->pm) { 456 pm_dev_dbg(dev, state, "EARLY class "); 457 error = pm_noirq_op(dev, dev->class->pm, state); 458 } 459 460 End: 461 TRACE_RESUME(error); 462 return error; 463 } 464 465 /** 466 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 467 * @state: PM transition of the system being carried out. 468 * 469 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 470 * enable device drivers to receive interrupts. 471 */ 472 void dpm_resume_noirq(pm_message_t state) 473 { 474 struct device *dev; 475 ktime_t starttime = ktime_get(); 476 477 mutex_lock(&dpm_list_mtx); 478 transition_started = false; 479 list_for_each_entry(dev, &dpm_list, power.entry) 480 if (dev->power.status > DPM_OFF) { 481 int error; 482 483 dev->power.status = DPM_OFF; 484 error = device_resume_noirq(dev, state); 485 if (error) 486 pm_dev_err(dev, state, " early", error); 487 } 488 mutex_unlock(&dpm_list_mtx); 489 dpm_show_time(starttime, state, "early"); 490 resume_device_irqs(); 491 } 492 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 493 494 /** 495 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 496 * @dev: Device to resume. 497 * @cb: Resume callback to execute. 498 */ 499 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 500 { 501 int error; 502 ktime_t calltime; 503 504 calltime = initcall_debug_start(dev); 505 506 error = cb(dev); 507 suspend_report_result(cb, error); 508 509 initcall_debug_report(dev, calltime, error); 510 511 return error; 512 } 513 514 /** 515 * device_resume - Execute "resume" callbacks for given device. 516 * @dev: Device to handle. 517 * @state: PM transition of the system being carried out. 518 * @async: If true, the device is being resumed asynchronously. 519 */ 520 static int device_resume(struct device *dev, pm_message_t state, bool async) 521 { 522 int error = 0; 523 524 TRACE_DEVICE(dev); 525 TRACE_RESUME(0); 526 527 dpm_wait(dev->parent, async); 528 device_lock(dev); 529 530 dev->power.status = DPM_RESUMING; 531 532 if (dev->bus) { 533 if (dev->bus->pm) { 534 pm_dev_dbg(dev, state, ""); 535 error = pm_op(dev, dev->bus->pm, state); 536 } else if (dev->bus->resume) { 537 pm_dev_dbg(dev, state, "legacy "); 538 error = legacy_resume(dev, dev->bus->resume); 539 } 540 if (error) 541 goto End; 542 } 543 544 if (dev->type) { 545 if (dev->type->pm) { 546 pm_dev_dbg(dev, state, "type "); 547 error = pm_op(dev, dev->type->pm, state); 548 } 549 if (error) 550 goto End; 551 } 552 553 if (dev->class) { 554 if (dev->class->pm) { 555 pm_dev_dbg(dev, state, "class "); 556 error = pm_op(dev, dev->class->pm, state); 557 } else if (dev->class->resume) { 558 pm_dev_dbg(dev, state, "legacy class "); 559 error = legacy_resume(dev, dev->class->resume); 560 } 561 } 562 End: 563 device_unlock(dev); 564 complete_all(&dev->power.completion); 565 566 TRACE_RESUME(error); 567 return error; 568 } 569 570 static void async_resume(void *data, async_cookie_t cookie) 571 { 572 struct device *dev = (struct device *)data; 573 int error; 574 575 error = device_resume(dev, pm_transition, true); 576 if (error) 577 pm_dev_err(dev, pm_transition, " async", error); 578 put_device(dev); 579 } 580 581 static bool is_async(struct device *dev) 582 { 583 return dev->power.async_suspend && pm_async_enabled 584 && !pm_trace_is_enabled(); 585 } 586 587 /** 588 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 589 * @state: PM transition of the system being carried out. 590 * 591 * Execute the appropriate "resume" callback for all devices whose status 592 * indicates that they are suspended. 593 */ 594 static void dpm_resume(pm_message_t state) 595 { 596 struct list_head list; 597 struct device *dev; 598 ktime_t starttime = ktime_get(); 599 600 INIT_LIST_HEAD(&list); 601 mutex_lock(&dpm_list_mtx); 602 pm_transition = state; 603 604 list_for_each_entry(dev, &dpm_list, power.entry) { 605 if (dev->power.status < DPM_OFF) 606 continue; 607 608 INIT_COMPLETION(dev->power.completion); 609 if (is_async(dev)) { 610 get_device(dev); 611 async_schedule(async_resume, dev); 612 } 613 } 614 615 while (!list_empty(&dpm_list)) { 616 dev = to_device(dpm_list.next); 617 get_device(dev); 618 if (dev->power.status >= DPM_OFF && !is_async(dev)) { 619 int error; 620 621 mutex_unlock(&dpm_list_mtx); 622 623 error = device_resume(dev, state, false); 624 625 mutex_lock(&dpm_list_mtx); 626 if (error) 627 pm_dev_err(dev, state, "", error); 628 } else if (dev->power.status == DPM_SUSPENDING) { 629 /* Allow new children of the device to be registered */ 630 dev->power.status = DPM_RESUMING; 631 } 632 if (!list_empty(&dev->power.entry)) 633 list_move_tail(&dev->power.entry, &list); 634 put_device(dev); 635 } 636 list_splice(&list, &dpm_list); 637 mutex_unlock(&dpm_list_mtx); 638 async_synchronize_full(); 639 dpm_show_time(starttime, state, NULL); 640 } 641 642 /** 643 * device_complete - Complete a PM transition for given device. 644 * @dev: Device to handle. 645 * @state: PM transition of the system being carried out. 646 */ 647 static void device_complete(struct device *dev, pm_message_t state) 648 { 649 device_lock(dev); 650 651 if (dev->class && dev->class->pm && dev->class->pm->complete) { 652 pm_dev_dbg(dev, state, "completing class "); 653 dev->class->pm->complete(dev); 654 } 655 656 if (dev->type && dev->type->pm && dev->type->pm->complete) { 657 pm_dev_dbg(dev, state, "completing type "); 658 dev->type->pm->complete(dev); 659 } 660 661 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { 662 pm_dev_dbg(dev, state, "completing "); 663 dev->bus->pm->complete(dev); 664 } 665 666 device_unlock(dev); 667 } 668 669 /** 670 * dpm_complete - Complete a PM transition for all non-sysdev devices. 671 * @state: PM transition of the system being carried out. 672 * 673 * Execute the ->complete() callbacks for all devices whose PM status is not 674 * DPM_ON (this allows new devices to be registered). 675 */ 676 static void dpm_complete(pm_message_t state) 677 { 678 struct list_head list; 679 680 INIT_LIST_HEAD(&list); 681 mutex_lock(&dpm_list_mtx); 682 transition_started = false; 683 while (!list_empty(&dpm_list)) { 684 struct device *dev = to_device(dpm_list.prev); 685 686 get_device(dev); 687 if (dev->power.status > DPM_ON) { 688 dev->power.status = DPM_ON; 689 mutex_unlock(&dpm_list_mtx); 690 691 device_complete(dev, state); 692 pm_runtime_put_sync(dev); 693 694 mutex_lock(&dpm_list_mtx); 695 } 696 if (!list_empty(&dev->power.entry)) 697 list_move(&dev->power.entry, &list); 698 put_device(dev); 699 } 700 list_splice(&list, &dpm_list); 701 mutex_unlock(&dpm_list_mtx); 702 } 703 704 /** 705 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 706 * @state: PM transition of the system being carried out. 707 * 708 * Execute "resume" callbacks for all devices and complete the PM transition of 709 * the system. 710 */ 711 void dpm_resume_end(pm_message_t state) 712 { 713 might_sleep(); 714 dpm_resume(state); 715 dpm_complete(state); 716 } 717 EXPORT_SYMBOL_GPL(dpm_resume_end); 718 719 720 /*------------------------- Suspend routines -------------------------*/ 721 722 /** 723 * resume_event - Return a "resume" message for given "suspend" sleep state. 724 * @sleep_state: PM message representing a sleep state. 725 * 726 * Return a PM message representing the resume event corresponding to given 727 * sleep state. 728 */ 729 static pm_message_t resume_event(pm_message_t sleep_state) 730 { 731 switch (sleep_state.event) { 732 case PM_EVENT_SUSPEND: 733 return PMSG_RESUME; 734 case PM_EVENT_FREEZE: 735 case PM_EVENT_QUIESCE: 736 return PMSG_RECOVER; 737 case PM_EVENT_HIBERNATE: 738 return PMSG_RESTORE; 739 } 740 return PMSG_ON; 741 } 742 743 /** 744 * device_suspend_noirq - Execute a "late suspend" callback for given device. 745 * @dev: Device to handle. 746 * @state: PM transition of the system being carried out. 747 * 748 * The driver of @dev will not receive interrupts while this function is being 749 * executed. 750 */ 751 static int device_suspend_noirq(struct device *dev, pm_message_t state) 752 { 753 int error = 0; 754 755 if (dev->class && dev->class->pm) { 756 pm_dev_dbg(dev, state, "LATE class "); 757 error = pm_noirq_op(dev, dev->class->pm, state); 758 if (error) 759 goto End; 760 } 761 762 if (dev->type && dev->type->pm) { 763 pm_dev_dbg(dev, state, "LATE type "); 764 error = pm_noirq_op(dev, dev->type->pm, state); 765 if (error) 766 goto End; 767 } 768 769 if (dev->bus && dev->bus->pm) { 770 pm_dev_dbg(dev, state, "LATE "); 771 error = pm_noirq_op(dev, dev->bus->pm, state); 772 } 773 774 End: 775 return error; 776 } 777 778 /** 779 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 780 * @state: PM transition of the system being carried out. 781 * 782 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 783 * handlers for all non-sysdev devices. 784 */ 785 int dpm_suspend_noirq(pm_message_t state) 786 { 787 struct device *dev; 788 ktime_t starttime = ktime_get(); 789 int error = 0; 790 791 suspend_device_irqs(); 792 mutex_lock(&dpm_list_mtx); 793 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 794 error = device_suspend_noirq(dev, state); 795 if (error) { 796 pm_dev_err(dev, state, " late", error); 797 break; 798 } 799 dev->power.status = DPM_OFF_IRQ; 800 } 801 mutex_unlock(&dpm_list_mtx); 802 if (error) 803 dpm_resume_noirq(resume_event(state)); 804 else 805 dpm_show_time(starttime, state, "late"); 806 return error; 807 } 808 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 809 810 /** 811 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 812 * @dev: Device to suspend. 813 * @state: PM transition of the system being carried out. 814 * @cb: Suspend callback to execute. 815 */ 816 static int legacy_suspend(struct device *dev, pm_message_t state, 817 int (*cb)(struct device *dev, pm_message_t state)) 818 { 819 int error; 820 ktime_t calltime; 821 822 calltime = initcall_debug_start(dev); 823 824 error = cb(dev, state); 825 suspend_report_result(cb, error); 826 827 initcall_debug_report(dev, calltime, error); 828 829 return error; 830 } 831 832 static int async_error; 833 834 /** 835 * device_suspend - Execute "suspend" callbacks for given device. 836 * @dev: Device to handle. 837 * @state: PM transition of the system being carried out. 838 * @async: If true, the device is being suspended asynchronously. 839 */ 840 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 841 { 842 int error = 0; 843 844 dpm_wait_for_children(dev, async); 845 device_lock(dev); 846 847 if (async_error) 848 goto End; 849 850 if (dev->class) { 851 if (dev->class->pm) { 852 pm_dev_dbg(dev, state, "class "); 853 error = pm_op(dev, dev->class->pm, state); 854 } else if (dev->class->suspend) { 855 pm_dev_dbg(dev, state, "legacy class "); 856 error = legacy_suspend(dev, state, dev->class->suspend); 857 } 858 if (error) 859 goto End; 860 } 861 862 if (dev->type) { 863 if (dev->type->pm) { 864 pm_dev_dbg(dev, state, "type "); 865 error = pm_op(dev, dev->type->pm, state); 866 } 867 if (error) 868 goto End; 869 } 870 871 if (dev->bus) { 872 if (dev->bus->pm) { 873 pm_dev_dbg(dev, state, ""); 874 error = pm_op(dev, dev->bus->pm, state); 875 } else if (dev->bus->suspend) { 876 pm_dev_dbg(dev, state, "legacy "); 877 error = legacy_suspend(dev, state, dev->bus->suspend); 878 } 879 } 880 881 if (!error) 882 dev->power.status = DPM_OFF; 883 884 End: 885 device_unlock(dev); 886 complete_all(&dev->power.completion); 887 888 return error; 889 } 890 891 static void async_suspend(void *data, async_cookie_t cookie) 892 { 893 struct device *dev = (struct device *)data; 894 int error; 895 896 error = __device_suspend(dev, pm_transition, true); 897 if (error) { 898 pm_dev_err(dev, pm_transition, " async", error); 899 async_error = error; 900 } 901 902 put_device(dev); 903 } 904 905 static int device_suspend(struct device *dev) 906 { 907 INIT_COMPLETION(dev->power.completion); 908 909 if (pm_async_enabled && dev->power.async_suspend) { 910 get_device(dev); 911 async_schedule(async_suspend, dev); 912 return 0; 913 } 914 915 return __device_suspend(dev, pm_transition, false); 916 } 917 918 /** 919 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 920 * @state: PM transition of the system being carried out. 921 */ 922 static int dpm_suspend(pm_message_t state) 923 { 924 struct list_head list; 925 ktime_t starttime = ktime_get(); 926 int error = 0; 927 928 INIT_LIST_HEAD(&list); 929 mutex_lock(&dpm_list_mtx); 930 pm_transition = state; 931 async_error = 0; 932 while (!list_empty(&dpm_list)) { 933 struct device *dev = to_device(dpm_list.prev); 934 935 get_device(dev); 936 mutex_unlock(&dpm_list_mtx); 937 938 error = device_suspend(dev); 939 940 mutex_lock(&dpm_list_mtx); 941 if (error) { 942 pm_dev_err(dev, state, "", error); 943 put_device(dev); 944 break; 945 } 946 if (!list_empty(&dev->power.entry)) 947 list_move(&dev->power.entry, &list); 948 put_device(dev); 949 if (async_error) 950 break; 951 } 952 list_splice(&list, dpm_list.prev); 953 mutex_unlock(&dpm_list_mtx); 954 async_synchronize_full(); 955 if (!error) 956 error = async_error; 957 if (!error) 958 dpm_show_time(starttime, state, NULL); 959 return error; 960 } 961 962 /** 963 * device_prepare - Prepare a device for system power transition. 964 * @dev: Device to handle. 965 * @state: PM transition of the system being carried out. 966 * 967 * Execute the ->prepare() callback(s) for given device. No new children of the 968 * device may be registered after this function has returned. 969 */ 970 static int device_prepare(struct device *dev, pm_message_t state) 971 { 972 int error = 0; 973 974 device_lock(dev); 975 976 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { 977 pm_dev_dbg(dev, state, "preparing "); 978 error = dev->bus->pm->prepare(dev); 979 suspend_report_result(dev->bus->pm->prepare, error); 980 if (error) 981 goto End; 982 } 983 984 if (dev->type && dev->type->pm && dev->type->pm->prepare) { 985 pm_dev_dbg(dev, state, "preparing type "); 986 error = dev->type->pm->prepare(dev); 987 suspend_report_result(dev->type->pm->prepare, error); 988 if (error) 989 goto End; 990 } 991 992 if (dev->class && dev->class->pm && dev->class->pm->prepare) { 993 pm_dev_dbg(dev, state, "preparing class "); 994 error = dev->class->pm->prepare(dev); 995 suspend_report_result(dev->class->pm->prepare, error); 996 } 997 End: 998 device_unlock(dev); 999 1000 return error; 1001 } 1002 1003 /** 1004 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1005 * @state: PM transition of the system being carried out. 1006 * 1007 * Execute the ->prepare() callback(s) for all devices. 1008 */ 1009 static int dpm_prepare(pm_message_t state) 1010 { 1011 struct list_head list; 1012 int error = 0; 1013 1014 INIT_LIST_HEAD(&list); 1015 mutex_lock(&dpm_list_mtx); 1016 transition_started = true; 1017 while (!list_empty(&dpm_list)) { 1018 struct device *dev = to_device(dpm_list.next); 1019 1020 get_device(dev); 1021 dev->power.status = DPM_PREPARING; 1022 mutex_unlock(&dpm_list_mtx); 1023 1024 pm_runtime_get_noresume(dev); 1025 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 1026 /* Wake-up requested during system sleep transition. */ 1027 pm_runtime_put_sync(dev); 1028 error = -EBUSY; 1029 } else { 1030 error = device_prepare(dev, state); 1031 } 1032 1033 mutex_lock(&dpm_list_mtx); 1034 if (error) { 1035 dev->power.status = DPM_ON; 1036 if (error == -EAGAIN) { 1037 put_device(dev); 1038 error = 0; 1039 continue; 1040 } 1041 printk(KERN_ERR "PM: Failed to prepare device %s " 1042 "for power transition: error %d\n", 1043 kobject_name(&dev->kobj), error); 1044 put_device(dev); 1045 break; 1046 } 1047 dev->power.status = DPM_SUSPENDING; 1048 if (!list_empty(&dev->power.entry)) 1049 list_move_tail(&dev->power.entry, &list); 1050 put_device(dev); 1051 } 1052 list_splice(&list, &dpm_list); 1053 mutex_unlock(&dpm_list_mtx); 1054 return error; 1055 } 1056 1057 /** 1058 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1059 * @state: PM transition of the system being carried out. 1060 * 1061 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1062 * callbacks for them. 1063 */ 1064 int dpm_suspend_start(pm_message_t state) 1065 { 1066 int error; 1067 1068 might_sleep(); 1069 error = dpm_prepare(state); 1070 if (!error) 1071 error = dpm_suspend(state); 1072 return error; 1073 } 1074 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1075 1076 void __suspend_report_result(const char *function, void *fn, int ret) 1077 { 1078 if (ret) 1079 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1080 } 1081 EXPORT_SYMBOL_GPL(__suspend_report_result); 1082 1083 /** 1084 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1085 * @dev: Device to wait for. 1086 * @subordinate: Device that needs to wait for @dev. 1087 */ 1088 void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1089 { 1090 dpm_wait(dev, subordinate->power.async_suspend); 1091 } 1092 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1093