1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will intialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 30 #include "../base.h" 31 #include "power.h" 32 33 /* 34 * The entries in the dpm_list list are in a depth first order, simply 35 * because children are guaranteed to be discovered after parents, and 36 * are inserted at the back of the list on discovery. 37 * 38 * Since device_pm_add() may be called with a device lock held, 39 * we must never try to acquire a device lock while holding 40 * dpm_list_mutex. 41 */ 42 43 LIST_HEAD(dpm_list); 44 45 static DEFINE_MUTEX(dpm_list_mtx); 46 static pm_message_t pm_transition; 47 48 /* 49 * Set once the preparation of devices for a PM transition has started, reset 50 * before starting to resume devices. Protected by dpm_list_mtx. 51 */ 52 static bool transition_started; 53 54 static int async_error; 55 56 /** 57 * device_pm_init - Initialize the PM-related part of a device object. 58 * @dev: Device object being initialized. 59 */ 60 void device_pm_init(struct device *dev) 61 { 62 dev->power.status = DPM_ON; 63 init_completion(&dev->power.completion); 64 complete_all(&dev->power.completion); 65 dev->power.wakeup = NULL; 66 spin_lock_init(&dev->power.lock); 67 pm_runtime_init(dev); 68 } 69 70 /** 71 * device_pm_lock - Lock the list of active devices used by the PM core. 72 */ 73 void device_pm_lock(void) 74 { 75 mutex_lock(&dpm_list_mtx); 76 } 77 78 /** 79 * device_pm_unlock - Unlock the list of active devices used by the PM core. 80 */ 81 void device_pm_unlock(void) 82 { 83 mutex_unlock(&dpm_list_mtx); 84 } 85 86 /** 87 * device_pm_add - Add a device to the PM core's list of active devices. 88 * @dev: Device to add to the list. 89 */ 90 void device_pm_add(struct device *dev) 91 { 92 pr_debug("PM: Adding info for %s:%s\n", 93 dev->bus ? dev->bus->name : "No Bus", 94 kobject_name(&dev->kobj)); 95 mutex_lock(&dpm_list_mtx); 96 if (dev->parent) { 97 if (dev->parent->power.status >= DPM_SUSPENDING) 98 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_name(dev->parent)); 100 } else if (transition_started) { 101 /* 102 * We refuse to register parentless devices while a PM 103 * transition is in progress in order to avoid leaving them 104 * unhandled down the road 105 */ 106 dev_WARN(dev, "Parentless device registered during a PM transaction\n"); 107 } 108 109 list_add_tail(&dev->power.entry, &dpm_list); 110 mutex_unlock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_remove - Remove a device from the PM core's list of active devices. 115 * @dev: Device to be removed from the list. 116 */ 117 void device_pm_remove(struct device *dev) 118 { 119 pr_debug("PM: Removing info for %s:%s\n", 120 dev->bus ? dev->bus->name : "No Bus", 121 kobject_name(&dev->kobj)); 122 complete_all(&dev->power.completion); 123 mutex_lock(&dpm_list_mtx); 124 list_del_init(&dev->power.entry); 125 mutex_unlock(&dpm_list_mtx); 126 device_wakeup_disable(dev); 127 pm_runtime_remove(dev); 128 } 129 130 /** 131 * device_pm_move_before - Move device in the PM core's list of active devices. 132 * @deva: Device to move in dpm_list. 133 * @devb: Device @deva should come before. 134 */ 135 void device_pm_move_before(struct device *deva, struct device *devb) 136 { 137 pr_debug("PM: Moving %s:%s before %s:%s\n", 138 deva->bus ? deva->bus->name : "No Bus", 139 kobject_name(&deva->kobj), 140 devb->bus ? devb->bus->name : "No Bus", 141 kobject_name(&devb->kobj)); 142 /* Delete deva from dpm_list and reinsert before devb. */ 143 list_move_tail(&deva->power.entry, &devb->power.entry); 144 } 145 146 /** 147 * device_pm_move_after - Move device in the PM core's list of active devices. 148 * @deva: Device to move in dpm_list. 149 * @devb: Device @deva should come after. 150 */ 151 void device_pm_move_after(struct device *deva, struct device *devb) 152 { 153 pr_debug("PM: Moving %s:%s after %s:%s\n", 154 deva->bus ? deva->bus->name : "No Bus", 155 kobject_name(&deva->kobj), 156 devb->bus ? devb->bus->name : "No Bus", 157 kobject_name(&devb->kobj)); 158 /* Delete deva from dpm_list and reinsert after devb. */ 159 list_move(&deva->power.entry, &devb->power.entry); 160 } 161 162 /** 163 * device_pm_move_last - Move device to end of the PM core's list of devices. 164 * @dev: Device to move in dpm_list. 165 */ 166 void device_pm_move_last(struct device *dev) 167 { 168 pr_debug("PM: Moving %s:%s to end of list\n", 169 dev->bus ? dev->bus->name : "No Bus", 170 kobject_name(&dev->kobj)); 171 list_move_tail(&dev->power.entry, &dpm_list); 172 } 173 174 static ktime_t initcall_debug_start(struct device *dev) 175 { 176 ktime_t calltime = ktime_set(0, 0); 177 178 if (initcall_debug) { 179 pr_info("calling %s+ @ %i\n", 180 dev_name(dev), task_pid_nr(current)); 181 calltime = ktime_get(); 182 } 183 184 return calltime; 185 } 186 187 static void initcall_debug_report(struct device *dev, ktime_t calltime, 188 int error) 189 { 190 ktime_t delta, rettime; 191 192 if (initcall_debug) { 193 rettime = ktime_get(); 194 delta = ktime_sub(rettime, calltime); 195 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 196 error, (unsigned long long)ktime_to_ns(delta) >> 10); 197 } 198 } 199 200 /** 201 * dpm_wait - Wait for a PM operation to complete. 202 * @dev: Device to wait for. 203 * @async: If unset, wait only if the device's power.async_suspend flag is set. 204 */ 205 static void dpm_wait(struct device *dev, bool async) 206 { 207 if (!dev) 208 return; 209 210 if (async || (pm_async_enabled && dev->power.async_suspend)) 211 wait_for_completion(&dev->power.completion); 212 } 213 214 static int dpm_wait_fn(struct device *dev, void *async_ptr) 215 { 216 dpm_wait(dev, *((bool *)async_ptr)); 217 return 0; 218 } 219 220 static void dpm_wait_for_children(struct device *dev, bool async) 221 { 222 device_for_each_child(dev, &async, dpm_wait_fn); 223 } 224 225 /** 226 * pm_op - Execute the PM operation appropriate for given PM event. 227 * @dev: Device to handle. 228 * @ops: PM operations to choose from. 229 * @state: PM transition of the system being carried out. 230 */ 231 static int pm_op(struct device *dev, 232 const struct dev_pm_ops *ops, 233 pm_message_t state) 234 { 235 int error = 0; 236 ktime_t calltime; 237 238 calltime = initcall_debug_start(dev); 239 240 switch (state.event) { 241 #ifdef CONFIG_SUSPEND 242 case PM_EVENT_SUSPEND: 243 if (ops->suspend) { 244 error = ops->suspend(dev); 245 suspend_report_result(ops->suspend, error); 246 } 247 break; 248 case PM_EVENT_RESUME: 249 if (ops->resume) { 250 error = ops->resume(dev); 251 suspend_report_result(ops->resume, error); 252 } 253 break; 254 #endif /* CONFIG_SUSPEND */ 255 #ifdef CONFIG_HIBERNATION 256 case PM_EVENT_FREEZE: 257 case PM_EVENT_QUIESCE: 258 if (ops->freeze) { 259 error = ops->freeze(dev); 260 suspend_report_result(ops->freeze, error); 261 } 262 break; 263 case PM_EVENT_HIBERNATE: 264 if (ops->poweroff) { 265 error = ops->poweroff(dev); 266 suspend_report_result(ops->poweroff, error); 267 } 268 break; 269 case PM_EVENT_THAW: 270 case PM_EVENT_RECOVER: 271 if (ops->thaw) { 272 error = ops->thaw(dev); 273 suspend_report_result(ops->thaw, error); 274 } 275 break; 276 case PM_EVENT_RESTORE: 277 if (ops->restore) { 278 error = ops->restore(dev); 279 suspend_report_result(ops->restore, error); 280 } 281 break; 282 #endif /* CONFIG_HIBERNATION */ 283 default: 284 error = -EINVAL; 285 } 286 287 initcall_debug_report(dev, calltime, error); 288 289 return error; 290 } 291 292 /** 293 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 294 * @dev: Device to handle. 295 * @ops: PM operations to choose from. 296 * @state: PM transition of the system being carried out. 297 * 298 * The driver of @dev will not receive interrupts while this function is being 299 * executed. 300 */ 301 static int pm_noirq_op(struct device *dev, 302 const struct dev_pm_ops *ops, 303 pm_message_t state) 304 { 305 int error = 0; 306 ktime_t calltime, delta, rettime; 307 308 if (initcall_debug) { 309 pr_info("calling %s+ @ %i, parent: %s\n", 310 dev_name(dev), task_pid_nr(current), 311 dev->parent ? dev_name(dev->parent) : "none"); 312 calltime = ktime_get(); 313 } 314 315 switch (state.event) { 316 #ifdef CONFIG_SUSPEND 317 case PM_EVENT_SUSPEND: 318 if (ops->suspend_noirq) { 319 error = ops->suspend_noirq(dev); 320 suspend_report_result(ops->suspend_noirq, error); 321 } 322 break; 323 case PM_EVENT_RESUME: 324 if (ops->resume_noirq) { 325 error = ops->resume_noirq(dev); 326 suspend_report_result(ops->resume_noirq, error); 327 } 328 break; 329 #endif /* CONFIG_SUSPEND */ 330 #ifdef CONFIG_HIBERNATION 331 case PM_EVENT_FREEZE: 332 case PM_EVENT_QUIESCE: 333 if (ops->freeze_noirq) { 334 error = ops->freeze_noirq(dev); 335 suspend_report_result(ops->freeze_noirq, error); 336 } 337 break; 338 case PM_EVENT_HIBERNATE: 339 if (ops->poweroff_noirq) { 340 error = ops->poweroff_noirq(dev); 341 suspend_report_result(ops->poweroff_noirq, error); 342 } 343 break; 344 case PM_EVENT_THAW: 345 case PM_EVENT_RECOVER: 346 if (ops->thaw_noirq) { 347 error = ops->thaw_noirq(dev); 348 suspend_report_result(ops->thaw_noirq, error); 349 } 350 break; 351 case PM_EVENT_RESTORE: 352 if (ops->restore_noirq) { 353 error = ops->restore_noirq(dev); 354 suspend_report_result(ops->restore_noirq, error); 355 } 356 break; 357 #endif /* CONFIG_HIBERNATION */ 358 default: 359 error = -EINVAL; 360 } 361 362 if (initcall_debug) { 363 rettime = ktime_get(); 364 delta = ktime_sub(rettime, calltime); 365 printk("initcall %s_i+ returned %d after %Ld usecs\n", 366 dev_name(dev), error, 367 (unsigned long long)ktime_to_ns(delta) >> 10); 368 } 369 370 return error; 371 } 372 373 static char *pm_verb(int event) 374 { 375 switch (event) { 376 case PM_EVENT_SUSPEND: 377 return "suspend"; 378 case PM_EVENT_RESUME: 379 return "resume"; 380 case PM_EVENT_FREEZE: 381 return "freeze"; 382 case PM_EVENT_QUIESCE: 383 return "quiesce"; 384 case PM_EVENT_HIBERNATE: 385 return "hibernate"; 386 case PM_EVENT_THAW: 387 return "thaw"; 388 case PM_EVENT_RESTORE: 389 return "restore"; 390 case PM_EVENT_RECOVER: 391 return "recover"; 392 default: 393 return "(unknown PM event)"; 394 } 395 } 396 397 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 398 { 399 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 400 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 401 ", may wakeup" : ""); 402 } 403 404 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 405 int error) 406 { 407 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 408 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 409 } 410 411 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 412 { 413 ktime_t calltime; 414 u64 usecs64; 415 int usecs; 416 417 calltime = ktime_get(); 418 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 419 do_div(usecs64, NSEC_PER_USEC); 420 usecs = usecs64; 421 if (usecs == 0) 422 usecs = 1; 423 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 424 info ?: "", info ? " " : "", pm_verb(state.event), 425 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 426 } 427 428 /*------------------------- Resume routines -------------------------*/ 429 430 /** 431 * device_resume_noirq - Execute an "early resume" callback for given device. 432 * @dev: Device to handle. 433 * @state: PM transition of the system being carried out. 434 * 435 * The driver of @dev will not receive interrupts while this function is being 436 * executed. 437 */ 438 static int device_resume_noirq(struct device *dev, pm_message_t state) 439 { 440 int error = 0; 441 442 TRACE_DEVICE(dev); 443 TRACE_RESUME(0); 444 445 if (dev->bus && dev->bus->pm) { 446 pm_dev_dbg(dev, state, "EARLY "); 447 error = pm_noirq_op(dev, dev->bus->pm, state); 448 if (error) 449 goto End; 450 } 451 452 if (dev->type && dev->type->pm) { 453 pm_dev_dbg(dev, state, "EARLY type "); 454 error = pm_noirq_op(dev, dev->type->pm, state); 455 if (error) 456 goto End; 457 } 458 459 if (dev->class && dev->class->pm) { 460 pm_dev_dbg(dev, state, "EARLY class "); 461 error = pm_noirq_op(dev, dev->class->pm, state); 462 } 463 464 End: 465 TRACE_RESUME(error); 466 return error; 467 } 468 469 /** 470 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 471 * @state: PM transition of the system being carried out. 472 * 473 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 474 * enable device drivers to receive interrupts. 475 */ 476 void dpm_resume_noirq(pm_message_t state) 477 { 478 struct device *dev; 479 ktime_t starttime = ktime_get(); 480 481 mutex_lock(&dpm_list_mtx); 482 transition_started = false; 483 list_for_each_entry(dev, &dpm_list, power.entry) 484 if (dev->power.status > DPM_OFF) { 485 int error; 486 487 dev->power.status = DPM_OFF; 488 error = device_resume_noirq(dev, state); 489 if (error) 490 pm_dev_err(dev, state, " early", error); 491 } 492 mutex_unlock(&dpm_list_mtx); 493 dpm_show_time(starttime, state, "early"); 494 resume_device_irqs(); 495 } 496 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 497 498 /** 499 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 500 * @dev: Device to resume. 501 * @cb: Resume callback to execute. 502 */ 503 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 504 { 505 int error; 506 ktime_t calltime; 507 508 calltime = initcall_debug_start(dev); 509 510 error = cb(dev); 511 suspend_report_result(cb, error); 512 513 initcall_debug_report(dev, calltime, error); 514 515 return error; 516 } 517 518 /** 519 * device_resume - Execute "resume" callbacks for given device. 520 * @dev: Device to handle. 521 * @state: PM transition of the system being carried out. 522 * @async: If true, the device is being resumed asynchronously. 523 */ 524 static int device_resume(struct device *dev, pm_message_t state, bool async) 525 { 526 int error = 0; 527 528 TRACE_DEVICE(dev); 529 TRACE_RESUME(0); 530 531 dpm_wait(dev->parent, async); 532 device_lock(dev); 533 534 dev->power.status = DPM_RESUMING; 535 536 if (dev->bus) { 537 if (dev->bus->pm) { 538 pm_dev_dbg(dev, state, ""); 539 error = pm_op(dev, dev->bus->pm, state); 540 } else if (dev->bus->resume) { 541 pm_dev_dbg(dev, state, "legacy "); 542 error = legacy_resume(dev, dev->bus->resume); 543 } 544 if (error) 545 goto End; 546 } 547 548 if (dev->type) { 549 if (dev->type->pm) { 550 pm_dev_dbg(dev, state, "type "); 551 error = pm_op(dev, dev->type->pm, state); 552 } 553 if (error) 554 goto End; 555 } 556 557 if (dev->class) { 558 if (dev->class->pm) { 559 pm_dev_dbg(dev, state, "class "); 560 error = pm_op(dev, dev->class->pm, state); 561 } else if (dev->class->resume) { 562 pm_dev_dbg(dev, state, "legacy class "); 563 error = legacy_resume(dev, dev->class->resume); 564 } 565 } 566 End: 567 device_unlock(dev); 568 complete_all(&dev->power.completion); 569 570 TRACE_RESUME(error); 571 return error; 572 } 573 574 static void async_resume(void *data, async_cookie_t cookie) 575 { 576 struct device *dev = (struct device *)data; 577 int error; 578 579 error = device_resume(dev, pm_transition, true); 580 if (error) 581 pm_dev_err(dev, pm_transition, " async", error); 582 put_device(dev); 583 } 584 585 static bool is_async(struct device *dev) 586 { 587 return dev->power.async_suspend && pm_async_enabled 588 && !pm_trace_is_enabled(); 589 } 590 591 /** 592 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 593 * @state: PM transition of the system being carried out. 594 * 595 * Execute the appropriate "resume" callback for all devices whose status 596 * indicates that they are suspended. 597 */ 598 static void dpm_resume(pm_message_t state) 599 { 600 struct list_head list; 601 struct device *dev; 602 ktime_t starttime = ktime_get(); 603 604 INIT_LIST_HEAD(&list); 605 mutex_lock(&dpm_list_mtx); 606 pm_transition = state; 607 async_error = 0; 608 609 list_for_each_entry(dev, &dpm_list, power.entry) { 610 if (dev->power.status < DPM_OFF) 611 continue; 612 613 INIT_COMPLETION(dev->power.completion); 614 if (is_async(dev)) { 615 get_device(dev); 616 async_schedule(async_resume, dev); 617 } 618 } 619 620 while (!list_empty(&dpm_list)) { 621 dev = to_device(dpm_list.next); 622 get_device(dev); 623 if (dev->power.status >= DPM_OFF && !is_async(dev)) { 624 int error; 625 626 mutex_unlock(&dpm_list_mtx); 627 628 error = device_resume(dev, state, false); 629 630 mutex_lock(&dpm_list_mtx); 631 if (error) 632 pm_dev_err(dev, state, "", error); 633 } else if (dev->power.status == DPM_SUSPENDING) { 634 /* Allow new children of the device to be registered */ 635 dev->power.status = DPM_RESUMING; 636 } 637 if (!list_empty(&dev->power.entry)) 638 list_move_tail(&dev->power.entry, &list); 639 put_device(dev); 640 } 641 list_splice(&list, &dpm_list); 642 mutex_unlock(&dpm_list_mtx); 643 async_synchronize_full(); 644 dpm_show_time(starttime, state, NULL); 645 } 646 647 /** 648 * device_complete - Complete a PM transition for given device. 649 * @dev: Device to handle. 650 * @state: PM transition of the system being carried out. 651 */ 652 static void device_complete(struct device *dev, pm_message_t state) 653 { 654 device_lock(dev); 655 656 if (dev->class && dev->class->pm && dev->class->pm->complete) { 657 pm_dev_dbg(dev, state, "completing class "); 658 dev->class->pm->complete(dev); 659 } 660 661 if (dev->type && dev->type->pm && dev->type->pm->complete) { 662 pm_dev_dbg(dev, state, "completing type "); 663 dev->type->pm->complete(dev); 664 } 665 666 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { 667 pm_dev_dbg(dev, state, "completing "); 668 dev->bus->pm->complete(dev); 669 } 670 671 device_unlock(dev); 672 } 673 674 /** 675 * dpm_complete - Complete a PM transition for all non-sysdev devices. 676 * @state: PM transition of the system being carried out. 677 * 678 * Execute the ->complete() callbacks for all devices whose PM status is not 679 * DPM_ON (this allows new devices to be registered). 680 */ 681 static void dpm_complete(pm_message_t state) 682 { 683 struct list_head list; 684 685 INIT_LIST_HEAD(&list); 686 mutex_lock(&dpm_list_mtx); 687 transition_started = false; 688 while (!list_empty(&dpm_list)) { 689 struct device *dev = to_device(dpm_list.prev); 690 691 get_device(dev); 692 if (dev->power.status > DPM_ON) { 693 dev->power.status = DPM_ON; 694 mutex_unlock(&dpm_list_mtx); 695 696 device_complete(dev, state); 697 pm_runtime_put_sync(dev); 698 699 mutex_lock(&dpm_list_mtx); 700 } 701 if (!list_empty(&dev->power.entry)) 702 list_move(&dev->power.entry, &list); 703 put_device(dev); 704 } 705 list_splice(&list, &dpm_list); 706 mutex_unlock(&dpm_list_mtx); 707 } 708 709 /** 710 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 711 * @state: PM transition of the system being carried out. 712 * 713 * Execute "resume" callbacks for all devices and complete the PM transition of 714 * the system. 715 */ 716 void dpm_resume_end(pm_message_t state) 717 { 718 might_sleep(); 719 dpm_resume(state); 720 dpm_complete(state); 721 } 722 EXPORT_SYMBOL_GPL(dpm_resume_end); 723 724 725 /*------------------------- Suspend routines -------------------------*/ 726 727 /** 728 * resume_event - Return a "resume" message for given "suspend" sleep state. 729 * @sleep_state: PM message representing a sleep state. 730 * 731 * Return a PM message representing the resume event corresponding to given 732 * sleep state. 733 */ 734 static pm_message_t resume_event(pm_message_t sleep_state) 735 { 736 switch (sleep_state.event) { 737 case PM_EVENT_SUSPEND: 738 return PMSG_RESUME; 739 case PM_EVENT_FREEZE: 740 case PM_EVENT_QUIESCE: 741 return PMSG_RECOVER; 742 case PM_EVENT_HIBERNATE: 743 return PMSG_RESTORE; 744 } 745 return PMSG_ON; 746 } 747 748 /** 749 * device_suspend_noirq - Execute a "late suspend" callback for given device. 750 * @dev: Device to handle. 751 * @state: PM transition of the system being carried out. 752 * 753 * The driver of @dev will not receive interrupts while this function is being 754 * executed. 755 */ 756 static int device_suspend_noirq(struct device *dev, pm_message_t state) 757 { 758 int error = 0; 759 760 if (dev->class && dev->class->pm) { 761 pm_dev_dbg(dev, state, "LATE class "); 762 error = pm_noirq_op(dev, dev->class->pm, state); 763 if (error) 764 goto End; 765 } 766 767 if (dev->type && dev->type->pm) { 768 pm_dev_dbg(dev, state, "LATE type "); 769 error = pm_noirq_op(dev, dev->type->pm, state); 770 if (error) 771 goto End; 772 } 773 774 if (dev->bus && dev->bus->pm) { 775 pm_dev_dbg(dev, state, "LATE "); 776 error = pm_noirq_op(dev, dev->bus->pm, state); 777 } 778 779 End: 780 return error; 781 } 782 783 /** 784 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 785 * @state: PM transition of the system being carried out. 786 * 787 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 788 * handlers for all non-sysdev devices. 789 */ 790 int dpm_suspend_noirq(pm_message_t state) 791 { 792 struct device *dev; 793 ktime_t starttime = ktime_get(); 794 int error = 0; 795 796 suspend_device_irqs(); 797 mutex_lock(&dpm_list_mtx); 798 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 799 error = device_suspend_noirq(dev, state); 800 if (error) { 801 pm_dev_err(dev, state, " late", error); 802 break; 803 } 804 dev->power.status = DPM_OFF_IRQ; 805 } 806 mutex_unlock(&dpm_list_mtx); 807 if (error) 808 dpm_resume_noirq(resume_event(state)); 809 else 810 dpm_show_time(starttime, state, "late"); 811 return error; 812 } 813 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 814 815 /** 816 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 817 * @dev: Device to suspend. 818 * @state: PM transition of the system being carried out. 819 * @cb: Suspend callback to execute. 820 */ 821 static int legacy_suspend(struct device *dev, pm_message_t state, 822 int (*cb)(struct device *dev, pm_message_t state)) 823 { 824 int error; 825 ktime_t calltime; 826 827 calltime = initcall_debug_start(dev); 828 829 error = cb(dev, state); 830 suspend_report_result(cb, error); 831 832 initcall_debug_report(dev, calltime, error); 833 834 return error; 835 } 836 837 /** 838 * device_suspend - Execute "suspend" callbacks for given device. 839 * @dev: Device to handle. 840 * @state: PM transition of the system being carried out. 841 * @async: If true, the device is being suspended asynchronously. 842 */ 843 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 844 { 845 int error = 0; 846 847 dpm_wait_for_children(dev, async); 848 device_lock(dev); 849 850 if (async_error) 851 goto End; 852 853 if (dev->class) { 854 if (dev->class->pm) { 855 pm_dev_dbg(dev, state, "class "); 856 error = pm_op(dev, dev->class->pm, state); 857 } else if (dev->class->suspend) { 858 pm_dev_dbg(dev, state, "legacy class "); 859 error = legacy_suspend(dev, state, dev->class->suspend); 860 } 861 if (error) 862 goto End; 863 } 864 865 if (dev->type) { 866 if (dev->type->pm) { 867 pm_dev_dbg(dev, state, "type "); 868 error = pm_op(dev, dev->type->pm, state); 869 } 870 if (error) 871 goto End; 872 } 873 874 if (dev->bus) { 875 if (dev->bus->pm) { 876 pm_dev_dbg(dev, state, ""); 877 error = pm_op(dev, dev->bus->pm, state); 878 } else if (dev->bus->suspend) { 879 pm_dev_dbg(dev, state, "legacy "); 880 error = legacy_suspend(dev, state, dev->bus->suspend); 881 } 882 } 883 884 if (!error) 885 dev->power.status = DPM_OFF; 886 887 End: 888 device_unlock(dev); 889 complete_all(&dev->power.completion); 890 891 if (error) 892 async_error = error; 893 894 return error; 895 } 896 897 static void async_suspend(void *data, async_cookie_t cookie) 898 { 899 struct device *dev = (struct device *)data; 900 int error; 901 902 error = __device_suspend(dev, pm_transition, true); 903 if (error) 904 pm_dev_err(dev, pm_transition, " async", error); 905 906 put_device(dev); 907 } 908 909 static int device_suspend(struct device *dev) 910 { 911 INIT_COMPLETION(dev->power.completion); 912 913 if (pm_async_enabled && dev->power.async_suspend) { 914 get_device(dev); 915 async_schedule(async_suspend, dev); 916 return 0; 917 } 918 919 return __device_suspend(dev, pm_transition, false); 920 } 921 922 /** 923 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 924 * @state: PM transition of the system being carried out. 925 */ 926 static int dpm_suspend(pm_message_t state) 927 { 928 struct list_head list; 929 ktime_t starttime = ktime_get(); 930 int error = 0; 931 932 INIT_LIST_HEAD(&list); 933 mutex_lock(&dpm_list_mtx); 934 pm_transition = state; 935 async_error = 0; 936 while (!list_empty(&dpm_list)) { 937 struct device *dev = to_device(dpm_list.prev); 938 939 get_device(dev); 940 mutex_unlock(&dpm_list_mtx); 941 942 error = device_suspend(dev); 943 944 mutex_lock(&dpm_list_mtx); 945 if (error) { 946 pm_dev_err(dev, state, "", error); 947 put_device(dev); 948 break; 949 } 950 if (!list_empty(&dev->power.entry)) 951 list_move(&dev->power.entry, &list); 952 put_device(dev); 953 if (async_error) 954 break; 955 } 956 list_splice(&list, dpm_list.prev); 957 mutex_unlock(&dpm_list_mtx); 958 async_synchronize_full(); 959 if (!error) 960 error = async_error; 961 if (!error) 962 dpm_show_time(starttime, state, NULL); 963 return error; 964 } 965 966 /** 967 * device_prepare - Prepare a device for system power transition. 968 * @dev: Device to handle. 969 * @state: PM transition of the system being carried out. 970 * 971 * Execute the ->prepare() callback(s) for given device. No new children of the 972 * device may be registered after this function has returned. 973 */ 974 static int device_prepare(struct device *dev, pm_message_t state) 975 { 976 int error = 0; 977 978 device_lock(dev); 979 980 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { 981 pm_dev_dbg(dev, state, "preparing "); 982 error = dev->bus->pm->prepare(dev); 983 suspend_report_result(dev->bus->pm->prepare, error); 984 if (error) 985 goto End; 986 } 987 988 if (dev->type && dev->type->pm && dev->type->pm->prepare) { 989 pm_dev_dbg(dev, state, "preparing type "); 990 error = dev->type->pm->prepare(dev); 991 suspend_report_result(dev->type->pm->prepare, error); 992 if (error) 993 goto End; 994 } 995 996 if (dev->class && dev->class->pm && dev->class->pm->prepare) { 997 pm_dev_dbg(dev, state, "preparing class "); 998 error = dev->class->pm->prepare(dev); 999 suspend_report_result(dev->class->pm->prepare, error); 1000 } 1001 End: 1002 device_unlock(dev); 1003 1004 return error; 1005 } 1006 1007 /** 1008 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1009 * @state: PM transition of the system being carried out. 1010 * 1011 * Execute the ->prepare() callback(s) for all devices. 1012 */ 1013 static int dpm_prepare(pm_message_t state) 1014 { 1015 struct list_head list; 1016 int error = 0; 1017 1018 INIT_LIST_HEAD(&list); 1019 mutex_lock(&dpm_list_mtx); 1020 transition_started = true; 1021 while (!list_empty(&dpm_list)) { 1022 struct device *dev = to_device(dpm_list.next); 1023 1024 get_device(dev); 1025 dev->power.status = DPM_PREPARING; 1026 mutex_unlock(&dpm_list_mtx); 1027 1028 pm_runtime_get_noresume(dev); 1029 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 1030 /* Wake-up requested during system sleep transition. */ 1031 pm_runtime_put_sync(dev); 1032 error = -EBUSY; 1033 } else { 1034 error = device_prepare(dev, state); 1035 } 1036 1037 mutex_lock(&dpm_list_mtx); 1038 if (error) { 1039 dev->power.status = DPM_ON; 1040 if (error == -EAGAIN) { 1041 put_device(dev); 1042 error = 0; 1043 continue; 1044 } 1045 printk(KERN_ERR "PM: Failed to prepare device %s " 1046 "for power transition: error %d\n", 1047 kobject_name(&dev->kobj), error); 1048 put_device(dev); 1049 break; 1050 } 1051 dev->power.status = DPM_SUSPENDING; 1052 if (!list_empty(&dev->power.entry)) 1053 list_move_tail(&dev->power.entry, &list); 1054 put_device(dev); 1055 } 1056 list_splice(&list, &dpm_list); 1057 mutex_unlock(&dpm_list_mtx); 1058 return error; 1059 } 1060 1061 /** 1062 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1063 * @state: PM transition of the system being carried out. 1064 * 1065 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1066 * callbacks for them. 1067 */ 1068 int dpm_suspend_start(pm_message_t state) 1069 { 1070 int error; 1071 1072 might_sleep(); 1073 error = dpm_prepare(state); 1074 if (!error) 1075 error = dpm_suspend(state); 1076 return error; 1077 } 1078 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1079 1080 void __suspend_report_result(const char *function, void *fn, int ret) 1081 { 1082 if (ret) 1083 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1084 } 1085 EXPORT_SYMBOL_GPL(__suspend_report_result); 1086 1087 /** 1088 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1089 * @dev: Device to wait for. 1090 * @subordinate: Device that needs to wait for @dev. 1091 */ 1092 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1093 { 1094 dpm_wait(dev, subordinate->power.async_suspend); 1095 return async_error; 1096 } 1097 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1098