1 /* 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/export.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_wakeirq.h> 14 #include <trace/events/rpm.h> 15 #include "power.h" 16 17 typedef int (*pm_callback_t)(struct device *); 18 19 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 20 { 21 pm_callback_t cb; 22 const struct dev_pm_ops *ops; 23 24 if (dev->pm_domain) 25 ops = &dev->pm_domain->ops; 26 else if (dev->type && dev->type->pm) 27 ops = dev->type->pm; 28 else if (dev->class && dev->class->pm) 29 ops = dev->class->pm; 30 else if (dev->bus && dev->bus->pm) 31 ops = dev->bus->pm; 32 else 33 ops = NULL; 34 35 if (ops) 36 cb = *(pm_callback_t *)((void *)ops + cb_offset); 37 else 38 cb = NULL; 39 40 if (!cb && dev->driver && dev->driver->pm) 41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 42 43 return cb; 44 } 45 46 #define RPM_GET_CALLBACK(dev, callback) \ 47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) 48 49 static int rpm_resume(struct device *dev, int rpmflags); 50 static int rpm_suspend(struct device *dev, int rpmflags); 51 52 /** 53 * update_pm_runtime_accounting - Update the time accounting of power states 54 * @dev: Device to update the accounting for 55 * 56 * In order to be able to have time accounting of the various power states 57 * (as used by programs such as PowerTOP to show the effectiveness of runtime 58 * PM), we need to track the time spent in each state. 59 * update_pm_runtime_accounting must be called each time before the 60 * runtime_status field is updated, to account the time in the old state 61 * correctly. 62 */ 63 void update_pm_runtime_accounting(struct device *dev) 64 { 65 unsigned long now = jiffies; 66 unsigned long delta; 67 68 delta = now - dev->power.accounting_timestamp; 69 70 dev->power.accounting_timestamp = now; 71 72 if (dev->power.disable_depth > 0) 73 return; 74 75 if (dev->power.runtime_status == RPM_SUSPENDED) 76 dev->power.suspended_jiffies += delta; 77 else 78 dev->power.active_jiffies += delta; 79 } 80 81 static void __update_runtime_status(struct device *dev, enum rpm_status status) 82 { 83 update_pm_runtime_accounting(dev); 84 dev->power.runtime_status = status; 85 } 86 87 /** 88 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 89 * @dev: Device to handle. 90 */ 91 static void pm_runtime_deactivate_timer(struct device *dev) 92 { 93 if (dev->power.timer_expires > 0) { 94 del_timer(&dev->power.suspend_timer); 95 dev->power.timer_expires = 0; 96 } 97 } 98 99 /** 100 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 101 * @dev: Device to handle. 102 */ 103 static void pm_runtime_cancel_pending(struct device *dev) 104 { 105 pm_runtime_deactivate_timer(dev); 106 /* 107 * In case there's a request pending, make sure its work function will 108 * return without doing anything. 109 */ 110 dev->power.request = RPM_REQ_NONE; 111 } 112 113 /* 114 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 115 * @dev: Device to handle. 116 * 117 * Compute the autosuspend-delay expiration time based on the device's 118 * power.last_busy time. If the delay has already expired or is disabled 119 * (negative) or the power.use_autosuspend flag isn't set, return 0. 120 * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 121 * 122 * This function may be called either with or without dev->power.lock held. 123 * Either way it can be racy, since power.last_busy may be updated at any time. 124 */ 125 unsigned long pm_runtime_autosuspend_expiration(struct device *dev) 126 { 127 int autosuspend_delay; 128 long elapsed; 129 unsigned long last_busy; 130 unsigned long expires = 0; 131 132 if (!dev->power.use_autosuspend) 133 goto out; 134 135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); 136 if (autosuspend_delay < 0) 137 goto out; 138 139 last_busy = ACCESS_ONCE(dev->power.last_busy); 140 elapsed = jiffies - last_busy; 141 if (elapsed < 0) 142 goto out; /* jiffies has wrapped around. */ 143 144 /* 145 * If the autosuspend_delay is >= 1 second, align the timer by rounding 146 * up to the nearest second. 147 */ 148 expires = last_busy + msecs_to_jiffies(autosuspend_delay); 149 if (autosuspend_delay >= 1000) 150 expires = round_jiffies(expires); 151 expires += !expires; 152 if (elapsed >= expires - last_busy) 153 expires = 0; /* Already expired. */ 154 155 out: 156 return expires; 157 } 158 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 159 160 static int dev_memalloc_noio(struct device *dev, void *data) 161 { 162 return dev->power.memalloc_noio; 163 } 164 165 /* 166 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 167 * @dev: Device to handle. 168 * @enable: True for setting the flag and False for clearing the flag. 169 * 170 * Set the flag for all devices in the path from the device to the 171 * root device in the device tree if @enable is true, otherwise clear 172 * the flag for devices in the path whose siblings don't set the flag. 173 * 174 * The function should only be called by block device, or network 175 * device driver for solving the deadlock problem during runtime 176 * resume/suspend: 177 * 178 * If memory allocation with GFP_KERNEL is called inside runtime 179 * resume/suspend callback of any one of its ancestors(or the 180 * block device itself), the deadlock may be triggered inside the 181 * memory allocation since it might not complete until the block 182 * device becomes active and the involed page I/O finishes. The 183 * situation is pointed out first by Alan Stern. Network device 184 * are involved in iSCSI kind of situation. 185 * 186 * The lock of dev_hotplug_mutex is held in the function for handling 187 * hotplug race because pm_runtime_set_memalloc_noio() may be called 188 * in async probe(). 189 * 190 * The function should be called between device_add() and device_del() 191 * on the affected device(block/network device). 192 */ 193 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 194 { 195 static DEFINE_MUTEX(dev_hotplug_mutex); 196 197 mutex_lock(&dev_hotplug_mutex); 198 for (;;) { 199 bool enabled; 200 201 /* hold power lock since bitfield is not SMP-safe. */ 202 spin_lock_irq(&dev->power.lock); 203 enabled = dev->power.memalloc_noio; 204 dev->power.memalloc_noio = enable; 205 spin_unlock_irq(&dev->power.lock); 206 207 /* 208 * not need to enable ancestors any more if the device 209 * has been enabled. 210 */ 211 if (enabled && enable) 212 break; 213 214 dev = dev->parent; 215 216 /* 217 * clear flag of the parent device only if all the 218 * children don't set the flag because ancestor's 219 * flag was set by any one of the descendants. 220 */ 221 if (!dev || (!enable && 222 device_for_each_child(dev, NULL, 223 dev_memalloc_noio))) 224 break; 225 } 226 mutex_unlock(&dev_hotplug_mutex); 227 } 228 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 229 230 /** 231 * rpm_check_suspend_allowed - Test whether a device may be suspended. 232 * @dev: Device to test. 233 */ 234 static int rpm_check_suspend_allowed(struct device *dev) 235 { 236 int retval = 0; 237 238 if (dev->power.runtime_error) 239 retval = -EINVAL; 240 else if (dev->power.disable_depth > 0) 241 retval = -EACCES; 242 else if (atomic_read(&dev->power.usage_count) > 0) 243 retval = -EAGAIN; 244 else if (!pm_children_suspended(dev)) 245 retval = -EBUSY; 246 247 /* Pending resume requests take precedence over suspends. */ 248 else if ((dev->power.deferred_resume 249 && dev->power.runtime_status == RPM_SUSPENDING) 250 || (dev->power.request_pending 251 && dev->power.request == RPM_REQ_RESUME)) 252 retval = -EAGAIN; 253 else if (__dev_pm_qos_read_value(dev) < 0) 254 retval = -EPERM; 255 else if (dev->power.runtime_status == RPM_SUSPENDED) 256 retval = 1; 257 258 return retval; 259 } 260 261 /** 262 * __rpm_callback - Run a given runtime PM callback for a given device. 263 * @cb: Runtime PM callback to run. 264 * @dev: Device to run the callback for. 265 */ 266 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 267 __releases(&dev->power.lock) __acquires(&dev->power.lock) 268 { 269 int retval; 270 271 if (dev->power.irq_safe) 272 spin_unlock(&dev->power.lock); 273 else 274 spin_unlock_irq(&dev->power.lock); 275 276 retval = cb(dev); 277 278 if (dev->power.irq_safe) 279 spin_lock(&dev->power.lock); 280 else 281 spin_lock_irq(&dev->power.lock); 282 283 return retval; 284 } 285 286 /** 287 * rpm_idle - Notify device bus type if the device can be suspended. 288 * @dev: Device to notify the bus type about. 289 * @rpmflags: Flag bits. 290 * 291 * Check if the device's runtime PM status allows it to be suspended. If 292 * another idle notification has been started earlier, return immediately. If 293 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 294 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback 295 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. 296 * 297 * This function must be called under dev->power.lock with interrupts disabled. 298 */ 299 static int rpm_idle(struct device *dev, int rpmflags) 300 { 301 int (*callback)(struct device *); 302 int retval; 303 304 trace_rpm_idle(dev, rpmflags); 305 retval = rpm_check_suspend_allowed(dev); 306 if (retval < 0) 307 ; /* Conditions are wrong. */ 308 309 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 310 else if (dev->power.runtime_status != RPM_ACTIVE) 311 retval = -EAGAIN; 312 313 /* 314 * Any pending request other than an idle notification takes 315 * precedence over us, except that the timer may be running. 316 */ 317 else if (dev->power.request_pending && 318 dev->power.request > RPM_REQ_IDLE) 319 retval = -EAGAIN; 320 321 /* Act as though RPM_NOWAIT is always set. */ 322 else if (dev->power.idle_notification) 323 retval = -EINPROGRESS; 324 if (retval) 325 goto out; 326 327 /* Pending requests need to be canceled. */ 328 dev->power.request = RPM_REQ_NONE; 329 330 if (dev->power.no_callbacks) 331 goto out; 332 333 /* Carry out an asynchronous or a synchronous idle notification. */ 334 if (rpmflags & RPM_ASYNC) { 335 dev->power.request = RPM_REQ_IDLE; 336 if (!dev->power.request_pending) { 337 dev->power.request_pending = true; 338 queue_work(pm_wq, &dev->power.work); 339 } 340 trace_rpm_return_int(dev, _THIS_IP_, 0); 341 return 0; 342 } 343 344 dev->power.idle_notification = true; 345 346 callback = RPM_GET_CALLBACK(dev, runtime_idle); 347 348 if (callback) 349 retval = __rpm_callback(callback, dev); 350 351 dev->power.idle_notification = false; 352 wake_up_all(&dev->power.wait_queue); 353 354 out: 355 trace_rpm_return_int(dev, _THIS_IP_, retval); 356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 357 } 358 359 /** 360 * rpm_callback - Run a given runtime PM callback for a given device. 361 * @cb: Runtime PM callback to run. 362 * @dev: Device to run the callback for. 363 */ 364 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 365 { 366 int retval; 367 368 if (!cb) 369 return -ENOSYS; 370 371 if (dev->power.memalloc_noio) { 372 unsigned int noio_flag; 373 374 /* 375 * Deadlock might be caused if memory allocation with 376 * GFP_KERNEL happens inside runtime_suspend and 377 * runtime_resume callbacks of one block device's 378 * ancestor or the block device itself. Network 379 * device might be thought as part of iSCSI block 380 * device, so network device and its ancestor should 381 * be marked as memalloc_noio too. 382 */ 383 noio_flag = memalloc_noio_save(); 384 retval = __rpm_callback(cb, dev); 385 memalloc_noio_restore(noio_flag); 386 } else { 387 retval = __rpm_callback(cb, dev); 388 } 389 390 dev->power.runtime_error = retval; 391 return retval != -EACCES ? retval : -EIO; 392 } 393 394 /** 395 * rpm_suspend - Carry out runtime suspend of given device. 396 * @dev: Device to suspend. 397 * @rpmflags: Flag bits. 398 * 399 * Check if the device's runtime PM status allows it to be suspended. 400 * Cancel a pending idle notification, autosuspend or suspend. If 401 * another suspend has been started earlier, either return immediately 402 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 403 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 404 * otherwise run the ->runtime_suspend() callback directly. When 405 * ->runtime_suspend succeeded, if a deferred resume was requested while 406 * the callback was running then carry it out, otherwise send an idle 407 * notification for its parent (if the suspend succeeded and both 408 * ignore_children of parent->power and irq_safe of dev->power are not set). 409 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 410 * flag is set and the next autosuspend-delay expiration time is in the 411 * future, schedule another autosuspend attempt. 412 * 413 * This function must be called under dev->power.lock with interrupts disabled. 414 */ 415 static int rpm_suspend(struct device *dev, int rpmflags) 416 __releases(&dev->power.lock) __acquires(&dev->power.lock) 417 { 418 int (*callback)(struct device *); 419 struct device *parent = NULL; 420 int retval; 421 422 trace_rpm_suspend(dev, rpmflags); 423 424 repeat: 425 retval = rpm_check_suspend_allowed(dev); 426 427 if (retval < 0) 428 ; /* Conditions are wrong. */ 429 430 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 431 else if (dev->power.runtime_status == RPM_RESUMING && 432 !(rpmflags & RPM_ASYNC)) 433 retval = -EAGAIN; 434 if (retval) 435 goto out; 436 437 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 438 if ((rpmflags & RPM_AUTO) 439 && dev->power.runtime_status != RPM_SUSPENDING) { 440 unsigned long expires = pm_runtime_autosuspend_expiration(dev); 441 442 if (expires != 0) { 443 /* Pending requests need to be canceled. */ 444 dev->power.request = RPM_REQ_NONE; 445 446 /* 447 * Optimization: If the timer is already running and is 448 * set to expire at or before the autosuspend delay, 449 * avoid the overhead of resetting it. Just let it 450 * expire; pm_suspend_timer_fn() will take care of the 451 * rest. 452 */ 453 if (!(dev->power.timer_expires && time_before_eq( 454 dev->power.timer_expires, expires))) { 455 dev->power.timer_expires = expires; 456 mod_timer(&dev->power.suspend_timer, expires); 457 } 458 dev->power.timer_autosuspends = 1; 459 goto out; 460 } 461 } 462 463 /* Other scheduled or pending requests need to be canceled. */ 464 pm_runtime_cancel_pending(dev); 465 466 if (dev->power.runtime_status == RPM_SUSPENDING) { 467 DEFINE_WAIT(wait); 468 469 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 470 retval = -EINPROGRESS; 471 goto out; 472 } 473 474 if (dev->power.irq_safe) { 475 spin_unlock(&dev->power.lock); 476 477 cpu_relax(); 478 479 spin_lock(&dev->power.lock); 480 goto repeat; 481 } 482 483 /* Wait for the other suspend running in parallel with us. */ 484 for (;;) { 485 prepare_to_wait(&dev->power.wait_queue, &wait, 486 TASK_UNINTERRUPTIBLE); 487 if (dev->power.runtime_status != RPM_SUSPENDING) 488 break; 489 490 spin_unlock_irq(&dev->power.lock); 491 492 schedule(); 493 494 spin_lock_irq(&dev->power.lock); 495 } 496 finish_wait(&dev->power.wait_queue, &wait); 497 goto repeat; 498 } 499 500 if (dev->power.no_callbacks) 501 goto no_callback; /* Assume success. */ 502 503 /* Carry out an asynchronous or a synchronous suspend. */ 504 if (rpmflags & RPM_ASYNC) { 505 dev->power.request = (rpmflags & RPM_AUTO) ? 506 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 507 if (!dev->power.request_pending) { 508 dev->power.request_pending = true; 509 queue_work(pm_wq, &dev->power.work); 510 } 511 goto out; 512 } 513 514 __update_runtime_status(dev, RPM_SUSPENDING); 515 516 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 517 518 dev_pm_enable_wake_irq(dev); 519 retval = rpm_callback(callback, dev); 520 if (retval) 521 goto fail; 522 523 no_callback: 524 __update_runtime_status(dev, RPM_SUSPENDED); 525 pm_runtime_deactivate_timer(dev); 526 527 if (dev->parent) { 528 parent = dev->parent; 529 atomic_add_unless(&parent->power.child_count, -1, 0); 530 } 531 wake_up_all(&dev->power.wait_queue); 532 533 if (dev->power.deferred_resume) { 534 dev->power.deferred_resume = false; 535 rpm_resume(dev, 0); 536 retval = -EAGAIN; 537 goto out; 538 } 539 540 /* Maybe the parent is now able to suspend. */ 541 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 542 spin_unlock(&dev->power.lock); 543 544 spin_lock(&parent->power.lock); 545 rpm_idle(parent, RPM_ASYNC); 546 spin_unlock(&parent->power.lock); 547 548 spin_lock(&dev->power.lock); 549 } 550 551 out: 552 trace_rpm_return_int(dev, _THIS_IP_, retval); 553 554 return retval; 555 556 fail: 557 dev_pm_disable_wake_irq(dev); 558 __update_runtime_status(dev, RPM_ACTIVE); 559 dev->power.deferred_resume = false; 560 wake_up_all(&dev->power.wait_queue); 561 562 if (retval == -EAGAIN || retval == -EBUSY) { 563 dev->power.runtime_error = 0; 564 565 /* 566 * If the callback routine failed an autosuspend, and 567 * if the last_busy time has been updated so that there 568 * is a new autosuspend expiration time, automatically 569 * reschedule another autosuspend. 570 */ 571 if ((rpmflags & RPM_AUTO) && 572 pm_runtime_autosuspend_expiration(dev) != 0) 573 goto repeat; 574 } else { 575 pm_runtime_cancel_pending(dev); 576 } 577 goto out; 578 } 579 580 /** 581 * rpm_resume - Carry out runtime resume of given device. 582 * @dev: Device to resume. 583 * @rpmflags: Flag bits. 584 * 585 * Check if the device's runtime PM status allows it to be resumed. Cancel 586 * any scheduled or pending requests. If another resume has been started 587 * earlier, either return immediately or wait for it to finish, depending on the 588 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 589 * parallel with this function, either tell the other process to resume after 590 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 591 * flag is set then queue a resume request; otherwise run the 592 * ->runtime_resume() callback directly. Queue an idle notification for the 593 * device if the resume succeeded. 594 * 595 * This function must be called under dev->power.lock with interrupts disabled. 596 */ 597 static int rpm_resume(struct device *dev, int rpmflags) 598 __releases(&dev->power.lock) __acquires(&dev->power.lock) 599 { 600 int (*callback)(struct device *); 601 struct device *parent = NULL; 602 int retval = 0; 603 604 trace_rpm_resume(dev, rpmflags); 605 606 repeat: 607 if (dev->power.runtime_error) 608 retval = -EINVAL; 609 else if (dev->power.disable_depth == 1 && dev->power.is_suspended 610 && dev->power.runtime_status == RPM_ACTIVE) 611 retval = 1; 612 else if (dev->power.disable_depth > 0) 613 retval = -EACCES; 614 if (retval) 615 goto out; 616 617 /* 618 * Other scheduled or pending requests need to be canceled. Small 619 * optimization: If an autosuspend timer is running, leave it running 620 * rather than cancelling it now only to restart it again in the near 621 * future. 622 */ 623 dev->power.request = RPM_REQ_NONE; 624 if (!dev->power.timer_autosuspends) 625 pm_runtime_deactivate_timer(dev); 626 627 if (dev->power.runtime_status == RPM_ACTIVE) { 628 retval = 1; 629 goto out; 630 } 631 632 if (dev->power.runtime_status == RPM_RESUMING 633 || dev->power.runtime_status == RPM_SUSPENDING) { 634 DEFINE_WAIT(wait); 635 636 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 637 if (dev->power.runtime_status == RPM_SUSPENDING) 638 dev->power.deferred_resume = true; 639 else 640 retval = -EINPROGRESS; 641 goto out; 642 } 643 644 if (dev->power.irq_safe) { 645 spin_unlock(&dev->power.lock); 646 647 cpu_relax(); 648 649 spin_lock(&dev->power.lock); 650 goto repeat; 651 } 652 653 /* Wait for the operation carried out in parallel with us. */ 654 for (;;) { 655 prepare_to_wait(&dev->power.wait_queue, &wait, 656 TASK_UNINTERRUPTIBLE); 657 if (dev->power.runtime_status != RPM_RESUMING 658 && dev->power.runtime_status != RPM_SUSPENDING) 659 break; 660 661 spin_unlock_irq(&dev->power.lock); 662 663 schedule(); 664 665 spin_lock_irq(&dev->power.lock); 666 } 667 finish_wait(&dev->power.wait_queue, &wait); 668 goto repeat; 669 } 670 671 /* 672 * See if we can skip waking up the parent. This is safe only if 673 * power.no_callbacks is set, because otherwise we don't know whether 674 * the resume will actually succeed. 675 */ 676 if (dev->power.no_callbacks && !parent && dev->parent) { 677 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 678 if (dev->parent->power.disable_depth > 0 679 || dev->parent->power.ignore_children 680 || dev->parent->power.runtime_status == RPM_ACTIVE) { 681 atomic_inc(&dev->parent->power.child_count); 682 spin_unlock(&dev->parent->power.lock); 683 retval = 1; 684 goto no_callback; /* Assume success. */ 685 } 686 spin_unlock(&dev->parent->power.lock); 687 } 688 689 /* Carry out an asynchronous or a synchronous resume. */ 690 if (rpmflags & RPM_ASYNC) { 691 dev->power.request = RPM_REQ_RESUME; 692 if (!dev->power.request_pending) { 693 dev->power.request_pending = true; 694 queue_work(pm_wq, &dev->power.work); 695 } 696 retval = 0; 697 goto out; 698 } 699 700 if (!parent && dev->parent) { 701 /* 702 * Increment the parent's usage counter and resume it if 703 * necessary. Not needed if dev is irq-safe; then the 704 * parent is permanently resumed. 705 */ 706 parent = dev->parent; 707 if (dev->power.irq_safe) 708 goto skip_parent; 709 spin_unlock(&dev->power.lock); 710 711 pm_runtime_get_noresume(parent); 712 713 spin_lock(&parent->power.lock); 714 /* 715 * We can resume if the parent's runtime PM is disabled or it 716 * is set to ignore children. 717 */ 718 if (!parent->power.disable_depth 719 && !parent->power.ignore_children) { 720 rpm_resume(parent, 0); 721 if (parent->power.runtime_status != RPM_ACTIVE) 722 retval = -EBUSY; 723 } 724 spin_unlock(&parent->power.lock); 725 726 spin_lock(&dev->power.lock); 727 if (retval) 728 goto out; 729 goto repeat; 730 } 731 skip_parent: 732 733 if (dev->power.no_callbacks) 734 goto no_callback; /* Assume success. */ 735 736 __update_runtime_status(dev, RPM_RESUMING); 737 738 callback = RPM_GET_CALLBACK(dev, runtime_resume); 739 740 dev_pm_disable_wake_irq(dev); 741 retval = rpm_callback(callback, dev); 742 if (retval) { 743 __update_runtime_status(dev, RPM_SUSPENDED); 744 pm_runtime_cancel_pending(dev); 745 dev_pm_enable_wake_irq(dev); 746 } else { 747 no_callback: 748 __update_runtime_status(dev, RPM_ACTIVE); 749 pm_runtime_mark_last_busy(dev); 750 if (parent) 751 atomic_inc(&parent->power.child_count); 752 } 753 wake_up_all(&dev->power.wait_queue); 754 755 if (retval >= 0) 756 rpm_idle(dev, RPM_ASYNC); 757 758 out: 759 if (parent && !dev->power.irq_safe) { 760 spin_unlock_irq(&dev->power.lock); 761 762 pm_runtime_put(parent); 763 764 spin_lock_irq(&dev->power.lock); 765 } 766 767 trace_rpm_return_int(dev, _THIS_IP_, retval); 768 769 return retval; 770 } 771 772 /** 773 * pm_runtime_work - Universal runtime PM work function. 774 * @work: Work structure used for scheduling the execution of this function. 775 * 776 * Use @work to get the device object the work is to be done for, determine what 777 * is to be done and execute the appropriate runtime PM function. 778 */ 779 static void pm_runtime_work(struct work_struct *work) 780 { 781 struct device *dev = container_of(work, struct device, power.work); 782 enum rpm_request req; 783 784 spin_lock_irq(&dev->power.lock); 785 786 if (!dev->power.request_pending) 787 goto out; 788 789 req = dev->power.request; 790 dev->power.request = RPM_REQ_NONE; 791 dev->power.request_pending = false; 792 793 switch (req) { 794 case RPM_REQ_NONE: 795 break; 796 case RPM_REQ_IDLE: 797 rpm_idle(dev, RPM_NOWAIT); 798 break; 799 case RPM_REQ_SUSPEND: 800 rpm_suspend(dev, RPM_NOWAIT); 801 break; 802 case RPM_REQ_AUTOSUSPEND: 803 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 804 break; 805 case RPM_REQ_RESUME: 806 rpm_resume(dev, RPM_NOWAIT); 807 break; 808 } 809 810 out: 811 spin_unlock_irq(&dev->power.lock); 812 } 813 814 /** 815 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 816 * @data: Device pointer passed by pm_schedule_suspend(). 817 * 818 * Check if the time is right and queue a suspend request. 819 */ 820 static void pm_suspend_timer_fn(unsigned long data) 821 { 822 struct device *dev = (struct device *)data; 823 unsigned long flags; 824 unsigned long expires; 825 826 spin_lock_irqsave(&dev->power.lock, flags); 827 828 expires = dev->power.timer_expires; 829 /* If 'expire' is after 'jiffies' we've been called too early. */ 830 if (expires > 0 && !time_after(expires, jiffies)) { 831 dev->power.timer_expires = 0; 832 rpm_suspend(dev, dev->power.timer_autosuspends ? 833 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 834 } 835 836 spin_unlock_irqrestore(&dev->power.lock, flags); 837 } 838 839 /** 840 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 841 * @dev: Device to suspend. 842 * @delay: Time to wait before submitting a suspend request, in milliseconds. 843 */ 844 int pm_schedule_suspend(struct device *dev, unsigned int delay) 845 { 846 unsigned long flags; 847 int retval; 848 849 spin_lock_irqsave(&dev->power.lock, flags); 850 851 if (!delay) { 852 retval = rpm_suspend(dev, RPM_ASYNC); 853 goto out; 854 } 855 856 retval = rpm_check_suspend_allowed(dev); 857 if (retval) 858 goto out; 859 860 /* Other scheduled or pending requests need to be canceled. */ 861 pm_runtime_cancel_pending(dev); 862 863 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 864 dev->power.timer_expires += !dev->power.timer_expires; 865 dev->power.timer_autosuspends = 0; 866 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 867 868 out: 869 spin_unlock_irqrestore(&dev->power.lock, flags); 870 871 return retval; 872 } 873 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 874 875 /** 876 * __pm_runtime_idle - Entry point for runtime idle operations. 877 * @dev: Device to send idle notification for. 878 * @rpmflags: Flag bits. 879 * 880 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 881 * return immediately if it is larger than zero. Then carry out an idle 882 * notification, either synchronous or asynchronous. 883 * 884 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 885 * or if pm_runtime_irq_safe() has been called. 886 */ 887 int __pm_runtime_idle(struct device *dev, int rpmflags) 888 { 889 unsigned long flags; 890 int retval; 891 892 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 893 894 if (rpmflags & RPM_GET_PUT) { 895 if (!atomic_dec_and_test(&dev->power.usage_count)) 896 return 0; 897 } 898 899 spin_lock_irqsave(&dev->power.lock, flags); 900 retval = rpm_idle(dev, rpmflags); 901 spin_unlock_irqrestore(&dev->power.lock, flags); 902 903 return retval; 904 } 905 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 906 907 /** 908 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 909 * @dev: Device to suspend. 910 * @rpmflags: Flag bits. 911 * 912 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 913 * return immediately if it is larger than zero. Then carry out a suspend, 914 * either synchronous or asynchronous. 915 * 916 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 917 * or if pm_runtime_irq_safe() has been called. 918 */ 919 int __pm_runtime_suspend(struct device *dev, int rpmflags) 920 { 921 unsigned long flags; 922 int retval; 923 924 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 925 926 if (rpmflags & RPM_GET_PUT) { 927 if (!atomic_dec_and_test(&dev->power.usage_count)) 928 return 0; 929 } 930 931 spin_lock_irqsave(&dev->power.lock, flags); 932 retval = rpm_suspend(dev, rpmflags); 933 spin_unlock_irqrestore(&dev->power.lock, flags); 934 935 return retval; 936 } 937 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 938 939 /** 940 * __pm_runtime_resume - Entry point for runtime resume operations. 941 * @dev: Device to resume. 942 * @rpmflags: Flag bits. 943 * 944 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 945 * carry out a resume, either synchronous or asynchronous. 946 * 947 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 948 * or if pm_runtime_irq_safe() has been called. 949 */ 950 int __pm_runtime_resume(struct device *dev, int rpmflags) 951 { 952 unsigned long flags; 953 int retval; 954 955 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 956 957 if (rpmflags & RPM_GET_PUT) 958 atomic_inc(&dev->power.usage_count); 959 960 spin_lock_irqsave(&dev->power.lock, flags); 961 retval = rpm_resume(dev, rpmflags); 962 spin_unlock_irqrestore(&dev->power.lock, flags); 963 964 return retval; 965 } 966 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 967 968 /** 969 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. 970 * @dev: Device to handle. 971 * 972 * Return -EINVAL if runtime PM is disabled for the device. 973 * 974 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE 975 * and the runtime PM usage counter is nonzero, increment the counter and 976 * return 1. Otherwise return 0 without changing the counter. 977 */ 978 int pm_runtime_get_if_in_use(struct device *dev) 979 { 980 unsigned long flags; 981 int retval; 982 983 spin_lock_irqsave(&dev->power.lock, flags); 984 retval = dev->power.disable_depth > 0 ? -EINVAL : 985 dev->power.runtime_status == RPM_ACTIVE 986 && atomic_inc_not_zero(&dev->power.usage_count); 987 spin_unlock_irqrestore(&dev->power.lock, flags); 988 return retval; 989 } 990 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); 991 992 /** 993 * __pm_runtime_set_status - Set runtime PM status of a device. 994 * @dev: Device to handle. 995 * @status: New runtime PM status of the device. 996 * 997 * If runtime PM of the device is disabled or its power.runtime_error field is 998 * different from zero, the status may be changed either to RPM_ACTIVE, or to 999 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 1000 * However, if the device has a parent and the parent is not active, and the 1001 * parent's power.ignore_children flag is unset, the device's status cannot be 1002 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 1003 * 1004 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 1005 * and the device parent's counter of unsuspended children is modified to 1006 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1007 * notification request for the parent is submitted. 1008 */ 1009 int __pm_runtime_set_status(struct device *dev, unsigned int status) 1010 { 1011 struct device *parent = dev->parent; 1012 unsigned long flags; 1013 bool notify_parent = false; 1014 int error = 0; 1015 1016 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1017 return -EINVAL; 1018 1019 spin_lock_irqsave(&dev->power.lock, flags); 1020 1021 if (!dev->power.runtime_error && !dev->power.disable_depth) { 1022 error = -EAGAIN; 1023 goto out; 1024 } 1025 1026 if (dev->power.runtime_status == status) 1027 goto out_set; 1028 1029 if (status == RPM_SUSPENDED) { 1030 /* It always is possible to set the status to 'suspended'. */ 1031 if (parent) { 1032 atomic_add_unless(&parent->power.child_count, -1, 0); 1033 notify_parent = !parent->power.ignore_children; 1034 } 1035 goto out_set; 1036 } 1037 1038 if (parent) { 1039 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1040 1041 /* 1042 * It is invalid to put an active child under a parent that is 1043 * not active, has runtime PM enabled and the 1044 * 'power.ignore_children' flag unset. 1045 */ 1046 if (!parent->power.disable_depth 1047 && !parent->power.ignore_children 1048 && parent->power.runtime_status != RPM_ACTIVE) 1049 error = -EBUSY; 1050 else if (dev->power.runtime_status == RPM_SUSPENDED) 1051 atomic_inc(&parent->power.child_count); 1052 1053 spin_unlock(&parent->power.lock); 1054 1055 if (error) 1056 goto out; 1057 } 1058 1059 out_set: 1060 __update_runtime_status(dev, status); 1061 dev->power.runtime_error = 0; 1062 out: 1063 spin_unlock_irqrestore(&dev->power.lock, flags); 1064 1065 if (notify_parent) 1066 pm_request_idle(parent); 1067 1068 return error; 1069 } 1070 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1071 1072 /** 1073 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1074 * @dev: Device to handle. 1075 * 1076 * Flush all pending requests for the device from pm_wq and wait for all 1077 * runtime PM operations involving the device in progress to complete. 1078 * 1079 * Should be called under dev->power.lock with interrupts disabled. 1080 */ 1081 static void __pm_runtime_barrier(struct device *dev) 1082 { 1083 pm_runtime_deactivate_timer(dev); 1084 1085 if (dev->power.request_pending) { 1086 dev->power.request = RPM_REQ_NONE; 1087 spin_unlock_irq(&dev->power.lock); 1088 1089 cancel_work_sync(&dev->power.work); 1090 1091 spin_lock_irq(&dev->power.lock); 1092 dev->power.request_pending = false; 1093 } 1094 1095 if (dev->power.runtime_status == RPM_SUSPENDING 1096 || dev->power.runtime_status == RPM_RESUMING 1097 || dev->power.idle_notification) { 1098 DEFINE_WAIT(wait); 1099 1100 /* Suspend, wake-up or idle notification in progress. */ 1101 for (;;) { 1102 prepare_to_wait(&dev->power.wait_queue, &wait, 1103 TASK_UNINTERRUPTIBLE); 1104 if (dev->power.runtime_status != RPM_SUSPENDING 1105 && dev->power.runtime_status != RPM_RESUMING 1106 && !dev->power.idle_notification) 1107 break; 1108 spin_unlock_irq(&dev->power.lock); 1109 1110 schedule(); 1111 1112 spin_lock_irq(&dev->power.lock); 1113 } 1114 finish_wait(&dev->power.wait_queue, &wait); 1115 } 1116 } 1117 1118 /** 1119 * pm_runtime_barrier - Flush pending requests and wait for completions. 1120 * @dev: Device to handle. 1121 * 1122 * Prevent the device from being suspended by incrementing its usage counter and 1123 * if there's a pending resume request for the device, wake the device up. 1124 * Next, make sure that all pending requests for the device have been flushed 1125 * from pm_wq and wait for all runtime PM operations involving the device in 1126 * progress to complete. 1127 * 1128 * Return value: 1129 * 1, if there was a resume request pending and the device had to be woken up, 1130 * 0, otherwise 1131 */ 1132 int pm_runtime_barrier(struct device *dev) 1133 { 1134 int retval = 0; 1135 1136 pm_runtime_get_noresume(dev); 1137 spin_lock_irq(&dev->power.lock); 1138 1139 if (dev->power.request_pending 1140 && dev->power.request == RPM_REQ_RESUME) { 1141 rpm_resume(dev, 0); 1142 retval = 1; 1143 } 1144 1145 __pm_runtime_barrier(dev); 1146 1147 spin_unlock_irq(&dev->power.lock); 1148 pm_runtime_put_noidle(dev); 1149 1150 return retval; 1151 } 1152 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1153 1154 /** 1155 * __pm_runtime_disable - Disable runtime PM of a device. 1156 * @dev: Device to handle. 1157 * @check_resume: If set, check if there's a resume request for the device. 1158 * 1159 * Increment power.disable_depth for the device and if it was zero previously, 1160 * cancel all pending runtime PM requests for the device and wait for all 1161 * operations in progress to complete. The device can be either active or 1162 * suspended after its runtime PM has been disabled. 1163 * 1164 * If @check_resume is set and there's a resume request pending when 1165 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1166 * function will wake up the device before disabling its runtime PM. 1167 */ 1168 void __pm_runtime_disable(struct device *dev, bool check_resume) 1169 { 1170 spin_lock_irq(&dev->power.lock); 1171 1172 if (dev->power.disable_depth > 0) { 1173 dev->power.disable_depth++; 1174 goto out; 1175 } 1176 1177 /* 1178 * Wake up the device if there's a resume request pending, because that 1179 * means there probably is some I/O to process and disabling runtime PM 1180 * shouldn't prevent the device from processing the I/O. 1181 */ 1182 if (check_resume && dev->power.request_pending 1183 && dev->power.request == RPM_REQ_RESUME) { 1184 /* 1185 * Prevent suspends and idle notifications from being carried 1186 * out after we have woken up the device. 1187 */ 1188 pm_runtime_get_noresume(dev); 1189 1190 rpm_resume(dev, 0); 1191 1192 pm_runtime_put_noidle(dev); 1193 } 1194 1195 if (!dev->power.disable_depth++) 1196 __pm_runtime_barrier(dev); 1197 1198 out: 1199 spin_unlock_irq(&dev->power.lock); 1200 } 1201 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1202 1203 /** 1204 * pm_runtime_enable - Enable runtime PM of a device. 1205 * @dev: Device to handle. 1206 */ 1207 void pm_runtime_enable(struct device *dev) 1208 { 1209 unsigned long flags; 1210 1211 spin_lock_irqsave(&dev->power.lock, flags); 1212 1213 if (dev->power.disable_depth > 0) 1214 dev->power.disable_depth--; 1215 else 1216 dev_warn(dev, "Unbalanced %s!\n", __func__); 1217 1218 spin_unlock_irqrestore(&dev->power.lock, flags); 1219 } 1220 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1221 1222 /** 1223 * pm_runtime_forbid - Block runtime PM of a device. 1224 * @dev: Device to handle. 1225 * 1226 * Increase the device's usage count and clear its power.runtime_auto flag, 1227 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1228 * for it. 1229 */ 1230 void pm_runtime_forbid(struct device *dev) 1231 { 1232 spin_lock_irq(&dev->power.lock); 1233 if (!dev->power.runtime_auto) 1234 goto out; 1235 1236 dev->power.runtime_auto = false; 1237 atomic_inc(&dev->power.usage_count); 1238 rpm_resume(dev, 0); 1239 1240 out: 1241 spin_unlock_irq(&dev->power.lock); 1242 } 1243 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1244 1245 /** 1246 * pm_runtime_allow - Unblock runtime PM of a device. 1247 * @dev: Device to handle. 1248 * 1249 * Decrease the device's usage count and set its power.runtime_auto flag. 1250 */ 1251 void pm_runtime_allow(struct device *dev) 1252 { 1253 spin_lock_irq(&dev->power.lock); 1254 if (dev->power.runtime_auto) 1255 goto out; 1256 1257 dev->power.runtime_auto = true; 1258 if (atomic_dec_and_test(&dev->power.usage_count)) 1259 rpm_idle(dev, RPM_AUTO); 1260 1261 out: 1262 spin_unlock_irq(&dev->power.lock); 1263 } 1264 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1265 1266 /** 1267 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1268 * @dev: Device to handle. 1269 * 1270 * Set the power.no_callbacks flag, which tells the PM core that this 1271 * device is power-managed through its parent and has no runtime PM 1272 * callbacks of its own. The runtime sysfs attributes will be removed. 1273 */ 1274 void pm_runtime_no_callbacks(struct device *dev) 1275 { 1276 spin_lock_irq(&dev->power.lock); 1277 dev->power.no_callbacks = 1; 1278 spin_unlock_irq(&dev->power.lock); 1279 if (device_is_registered(dev)) 1280 rpm_sysfs_remove(dev); 1281 } 1282 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1283 1284 /** 1285 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1286 * @dev: Device to handle 1287 * 1288 * Set the power.irq_safe flag, which tells the PM core that the 1289 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1290 * always be invoked with the spinlock held and interrupts disabled. It also 1291 * causes the parent's usage counter to be permanently incremented, preventing 1292 * the parent from runtime suspending -- otherwise an irq-safe child might have 1293 * to wait for a non-irq-safe parent. 1294 */ 1295 void pm_runtime_irq_safe(struct device *dev) 1296 { 1297 if (dev->parent) 1298 pm_runtime_get_sync(dev->parent); 1299 spin_lock_irq(&dev->power.lock); 1300 dev->power.irq_safe = 1; 1301 spin_unlock_irq(&dev->power.lock); 1302 } 1303 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1304 1305 /** 1306 * update_autosuspend - Handle a change to a device's autosuspend settings. 1307 * @dev: Device to handle. 1308 * @old_delay: The former autosuspend_delay value. 1309 * @old_use: The former use_autosuspend value. 1310 * 1311 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1312 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1313 * 1314 * This function must be called under dev->power.lock with interrupts disabled. 1315 */ 1316 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1317 { 1318 int delay = dev->power.autosuspend_delay; 1319 1320 /* Should runtime suspend be prevented now? */ 1321 if (dev->power.use_autosuspend && delay < 0) { 1322 1323 /* If it used to be allowed then prevent it. */ 1324 if (!old_use || old_delay >= 0) { 1325 atomic_inc(&dev->power.usage_count); 1326 rpm_resume(dev, 0); 1327 } 1328 } 1329 1330 /* Runtime suspend should be allowed now. */ 1331 else { 1332 1333 /* If it used to be prevented then allow it. */ 1334 if (old_use && old_delay < 0) 1335 atomic_dec(&dev->power.usage_count); 1336 1337 /* Maybe we can autosuspend now. */ 1338 rpm_idle(dev, RPM_AUTO); 1339 } 1340 } 1341 1342 /** 1343 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1344 * @dev: Device to handle. 1345 * @delay: Value of the new delay in milliseconds. 1346 * 1347 * Set the device's power.autosuspend_delay value. If it changes to negative 1348 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1349 * changes the other way, allow runtime suspends. 1350 */ 1351 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1352 { 1353 int old_delay, old_use; 1354 1355 spin_lock_irq(&dev->power.lock); 1356 old_delay = dev->power.autosuspend_delay; 1357 old_use = dev->power.use_autosuspend; 1358 dev->power.autosuspend_delay = delay; 1359 update_autosuspend(dev, old_delay, old_use); 1360 spin_unlock_irq(&dev->power.lock); 1361 } 1362 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1363 1364 /** 1365 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1366 * @dev: Device to handle. 1367 * @use: New value for use_autosuspend. 1368 * 1369 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1370 * suspends as needed. 1371 */ 1372 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1373 { 1374 int old_delay, old_use; 1375 1376 spin_lock_irq(&dev->power.lock); 1377 old_delay = dev->power.autosuspend_delay; 1378 old_use = dev->power.use_autosuspend; 1379 dev->power.use_autosuspend = use; 1380 update_autosuspend(dev, old_delay, old_use); 1381 spin_unlock_irq(&dev->power.lock); 1382 } 1383 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1384 1385 /** 1386 * pm_runtime_init - Initialize runtime PM fields in given device object. 1387 * @dev: Device object to initialize. 1388 */ 1389 void pm_runtime_init(struct device *dev) 1390 { 1391 dev->power.runtime_status = RPM_SUSPENDED; 1392 dev->power.idle_notification = false; 1393 1394 dev->power.disable_depth = 1; 1395 atomic_set(&dev->power.usage_count, 0); 1396 1397 dev->power.runtime_error = 0; 1398 1399 atomic_set(&dev->power.child_count, 0); 1400 pm_suspend_ignore_children(dev, false); 1401 dev->power.runtime_auto = true; 1402 1403 dev->power.request_pending = false; 1404 dev->power.request = RPM_REQ_NONE; 1405 dev->power.deferred_resume = false; 1406 dev->power.accounting_timestamp = jiffies; 1407 INIT_WORK(&dev->power.work, pm_runtime_work); 1408 1409 dev->power.timer_expires = 0; 1410 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1411 (unsigned long)dev); 1412 1413 init_waitqueue_head(&dev->power.wait_queue); 1414 } 1415 1416 /** 1417 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. 1418 * @dev: Device object to re-initialize. 1419 */ 1420 void pm_runtime_reinit(struct device *dev) 1421 { 1422 if (!pm_runtime_enabled(dev)) { 1423 if (dev->power.runtime_status == RPM_ACTIVE) 1424 pm_runtime_set_suspended(dev); 1425 if (dev->power.irq_safe) { 1426 spin_lock_irq(&dev->power.lock); 1427 dev->power.irq_safe = 0; 1428 spin_unlock_irq(&dev->power.lock); 1429 if (dev->parent) 1430 pm_runtime_put(dev->parent); 1431 } 1432 } 1433 } 1434 1435 /** 1436 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1437 * @dev: Device object being removed from device hierarchy. 1438 */ 1439 void pm_runtime_remove(struct device *dev) 1440 { 1441 __pm_runtime_disable(dev, false); 1442 pm_runtime_reinit(dev); 1443 } 1444 1445 /** 1446 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1447 * @dev: Device to suspend. 1448 * 1449 * Disable runtime PM so we safely can check the device's runtime PM status and 1450 * if it is active, invoke it's .runtime_suspend callback to bring it into 1451 * suspend state. Keep runtime PM disabled to preserve the state unless we 1452 * encounter errors. 1453 * 1454 * Typically this function may be invoked from a system suspend callback to make 1455 * sure the device is put into low power state. 1456 */ 1457 int pm_runtime_force_suspend(struct device *dev) 1458 { 1459 int (*callback)(struct device *); 1460 int ret = 0; 1461 1462 pm_runtime_disable(dev); 1463 if (pm_runtime_status_suspended(dev)) 1464 return 0; 1465 1466 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1467 1468 if (!callback) { 1469 ret = -ENOSYS; 1470 goto err; 1471 } 1472 1473 ret = callback(dev); 1474 if (ret) 1475 goto err; 1476 1477 pm_runtime_set_suspended(dev); 1478 return 0; 1479 err: 1480 pm_runtime_enable(dev); 1481 return ret; 1482 } 1483 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 1484 1485 /** 1486 * pm_runtime_force_resume - Force a device into resume state. 1487 * @dev: Device to resume. 1488 * 1489 * Prior invoking this function we expect the user to have brought the device 1490 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 1491 * those actions and brings the device into full power. We update the runtime PM 1492 * status and re-enables runtime PM. 1493 * 1494 * Typically this function may be invoked from a system resume callback to make 1495 * sure the device is put into full power state. 1496 */ 1497 int pm_runtime_force_resume(struct device *dev) 1498 { 1499 int (*callback)(struct device *); 1500 int ret = 0; 1501 1502 callback = RPM_GET_CALLBACK(dev, runtime_resume); 1503 1504 if (!callback) { 1505 ret = -ENOSYS; 1506 goto out; 1507 } 1508 1509 ret = pm_runtime_set_active(dev); 1510 if (ret) 1511 goto out; 1512 1513 ret = callback(dev); 1514 if (ret) { 1515 pm_runtime_set_suspended(dev); 1516 goto out; 1517 } 1518 1519 pm_runtime_mark_last_busy(dev); 1520 out: 1521 pm_runtime_enable(dev); 1522 return ret; 1523 } 1524 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 1525