1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/runtime.c - Helper functions for device runtime PM 4 * 5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 7 */ 8 #include <linux/sched/mm.h> 9 #include <linux/ktime.h> 10 #include <linux/hrtimer.h> 11 #include <linux/export.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_wakeirq.h> 14 #include <linux/rculist.h> 15 #include <trace/events/rpm.h> 16 17 #include "../base.h" 18 #include "power.h" 19 20 typedef int (*pm_callback_t)(struct device *); 21 22 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 23 { 24 pm_callback_t cb; 25 const struct dev_pm_ops *ops; 26 27 if (dev->pm_domain) 28 ops = &dev->pm_domain->ops; 29 else if (dev->type && dev->type->pm) 30 ops = dev->type->pm; 31 else if (dev->class && dev->class->pm) 32 ops = dev->class->pm; 33 else if (dev->bus && dev->bus->pm) 34 ops = dev->bus->pm; 35 else 36 ops = NULL; 37 38 if (ops) 39 cb = *(pm_callback_t *)((void *)ops + cb_offset); 40 else 41 cb = NULL; 42 43 if (!cb && dev->driver && dev->driver->pm) 44 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 45 46 return cb; 47 } 48 49 #define RPM_GET_CALLBACK(dev, callback) \ 50 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) 51 52 static int rpm_resume(struct device *dev, int rpmflags); 53 static int rpm_suspend(struct device *dev, int rpmflags); 54 55 /** 56 * update_pm_runtime_accounting - Update the time accounting of power states 57 * @dev: Device to update the accounting for 58 * 59 * In order to be able to have time accounting of the various power states 60 * (as used by programs such as PowerTOP to show the effectiveness of runtime 61 * PM), we need to track the time spent in each state. 62 * update_pm_runtime_accounting must be called each time before the 63 * runtime_status field is updated, to account the time in the old state 64 * correctly. 65 */ 66 static void update_pm_runtime_accounting(struct device *dev) 67 { 68 u64 now, last, delta; 69 70 if (dev->power.disable_depth > 0) 71 return; 72 73 last = dev->power.accounting_timestamp; 74 75 now = ktime_get_mono_fast_ns(); 76 dev->power.accounting_timestamp = now; 77 78 /* 79 * Because ktime_get_mono_fast_ns() is not monotonic during 80 * timekeeping updates, ensure that 'now' is after the last saved 81 * timesptamp. 82 */ 83 if (now < last) 84 return; 85 86 delta = now - last; 87 88 if (dev->power.runtime_status == RPM_SUSPENDED) 89 dev->power.suspended_time += delta; 90 else 91 dev->power.active_time += delta; 92 } 93 94 static void __update_runtime_status(struct device *dev, enum rpm_status status) 95 { 96 update_pm_runtime_accounting(dev); 97 dev->power.runtime_status = status; 98 } 99 100 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) 101 { 102 u64 time; 103 unsigned long flags; 104 105 spin_lock_irqsave(&dev->power.lock, flags); 106 107 update_pm_runtime_accounting(dev); 108 time = suspended ? dev->power.suspended_time : dev->power.active_time; 109 110 spin_unlock_irqrestore(&dev->power.lock, flags); 111 112 return time; 113 } 114 115 u64 pm_runtime_active_time(struct device *dev) 116 { 117 return rpm_get_accounted_time(dev, false); 118 } 119 120 u64 pm_runtime_suspended_time(struct device *dev) 121 { 122 return rpm_get_accounted_time(dev, true); 123 } 124 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); 125 126 /** 127 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 128 * @dev: Device to handle. 129 */ 130 static void pm_runtime_deactivate_timer(struct device *dev) 131 { 132 if (dev->power.timer_expires > 0) { 133 hrtimer_try_to_cancel(&dev->power.suspend_timer); 134 dev->power.timer_expires = 0; 135 } 136 } 137 138 /** 139 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 140 * @dev: Device to handle. 141 */ 142 static void pm_runtime_cancel_pending(struct device *dev) 143 { 144 pm_runtime_deactivate_timer(dev); 145 /* 146 * In case there's a request pending, make sure its work function will 147 * return without doing anything. 148 */ 149 dev->power.request = RPM_REQ_NONE; 150 } 151 152 /* 153 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 154 * @dev: Device to handle. 155 * 156 * Compute the autosuspend-delay expiration time based on the device's 157 * power.last_busy time. If the delay has already expired or is disabled 158 * (negative) or the power.use_autosuspend flag isn't set, return 0. 159 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). 160 * 161 * This function may be called either with or without dev->power.lock held. 162 * Either way it can be racy, since power.last_busy may be updated at any time. 163 */ 164 u64 pm_runtime_autosuspend_expiration(struct device *dev) 165 { 166 int autosuspend_delay; 167 u64 expires; 168 169 if (!dev->power.use_autosuspend) 170 return 0; 171 172 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); 173 if (autosuspend_delay < 0) 174 return 0; 175 176 expires = READ_ONCE(dev->power.last_busy); 177 expires += (u64)autosuspend_delay * NSEC_PER_MSEC; 178 if (expires > ktime_get_mono_fast_ns()) 179 return expires; /* Expires in the future */ 180 181 return 0; 182 } 183 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 184 185 static int dev_memalloc_noio(struct device *dev, void *data) 186 { 187 return dev->power.memalloc_noio; 188 } 189 190 /* 191 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 192 * @dev: Device to handle. 193 * @enable: True for setting the flag and False for clearing the flag. 194 * 195 * Set the flag for all devices in the path from the device to the 196 * root device in the device tree if @enable is true, otherwise clear 197 * the flag for devices in the path whose siblings don't set the flag. 198 * 199 * The function should only be called by block device, or network 200 * device driver for solving the deadlock problem during runtime 201 * resume/suspend: 202 * 203 * If memory allocation with GFP_KERNEL is called inside runtime 204 * resume/suspend callback of any one of its ancestors(or the 205 * block device itself), the deadlock may be triggered inside the 206 * memory allocation since it might not complete until the block 207 * device becomes active and the involed page I/O finishes. The 208 * situation is pointed out first by Alan Stern. Network device 209 * are involved in iSCSI kind of situation. 210 * 211 * The lock of dev_hotplug_mutex is held in the function for handling 212 * hotplug race because pm_runtime_set_memalloc_noio() may be called 213 * in async probe(). 214 * 215 * The function should be called between device_add() and device_del() 216 * on the affected device(block/network device). 217 */ 218 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 219 { 220 static DEFINE_MUTEX(dev_hotplug_mutex); 221 222 mutex_lock(&dev_hotplug_mutex); 223 for (;;) { 224 bool enabled; 225 226 /* hold power lock since bitfield is not SMP-safe. */ 227 spin_lock_irq(&dev->power.lock); 228 enabled = dev->power.memalloc_noio; 229 dev->power.memalloc_noio = enable; 230 spin_unlock_irq(&dev->power.lock); 231 232 /* 233 * not need to enable ancestors any more if the device 234 * has been enabled. 235 */ 236 if (enabled && enable) 237 break; 238 239 dev = dev->parent; 240 241 /* 242 * clear flag of the parent device only if all the 243 * children don't set the flag because ancestor's 244 * flag was set by any one of the descendants. 245 */ 246 if (!dev || (!enable && 247 device_for_each_child(dev, NULL, dev_memalloc_noio))) 248 break; 249 } 250 mutex_unlock(&dev_hotplug_mutex); 251 } 252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 253 254 /** 255 * rpm_check_suspend_allowed - Test whether a device may be suspended. 256 * @dev: Device to test. 257 */ 258 static int rpm_check_suspend_allowed(struct device *dev) 259 { 260 int retval = 0; 261 262 if (dev->power.runtime_error) 263 retval = -EINVAL; 264 else if (dev->power.disable_depth > 0) 265 retval = -EACCES; 266 else if (atomic_read(&dev->power.usage_count)) 267 retval = -EAGAIN; 268 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) 269 retval = -EBUSY; 270 271 /* Pending resume requests take precedence over suspends. */ 272 else if ((dev->power.deferred_resume && 273 dev->power.runtime_status == RPM_SUSPENDING) || 274 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) 275 retval = -EAGAIN; 276 else if (__dev_pm_qos_resume_latency(dev) == 0) 277 retval = -EPERM; 278 else if (dev->power.runtime_status == RPM_SUSPENDED) 279 retval = 1; 280 281 return retval; 282 } 283 284 static int rpm_get_suppliers(struct device *dev) 285 { 286 struct device_link *link; 287 288 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 289 device_links_read_lock_held()) { 290 int retval; 291 292 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 293 continue; 294 295 retval = pm_runtime_get_sync(link->supplier); 296 /* Ignore suppliers with disabled runtime PM. */ 297 if (retval < 0 && retval != -EACCES) { 298 pm_runtime_put_noidle(link->supplier); 299 return retval; 300 } 301 refcount_inc(&link->rpm_active); 302 } 303 return 0; 304 } 305 306 /** 307 * pm_runtime_release_supplier - Drop references to device link's supplier. 308 * @link: Target device link. 309 * 310 * Drop all runtime PM references associated with @link to its supplier device. 311 */ 312 void pm_runtime_release_supplier(struct device_link *link) 313 { 314 struct device *supplier = link->supplier; 315 316 /* 317 * The additional power.usage_count check is a safety net in case 318 * the rpm_active refcount becomes saturated, in which case 319 * refcount_dec_not_one() would return true forever, but it is not 320 * strictly necessary. 321 */ 322 while (refcount_dec_not_one(&link->rpm_active) && 323 atomic_read(&supplier->power.usage_count) > 0) 324 pm_runtime_put_noidle(supplier); 325 } 326 327 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) 328 { 329 struct device_link *link; 330 331 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 332 device_links_read_lock_held()) { 333 pm_runtime_release_supplier(link); 334 if (try_to_suspend) 335 pm_request_idle(link->supplier); 336 } 337 } 338 339 static void rpm_put_suppliers(struct device *dev) 340 { 341 __rpm_put_suppliers(dev, true); 342 } 343 344 static void rpm_suspend_suppliers(struct device *dev) 345 { 346 struct device_link *link; 347 int idx = device_links_read_lock(); 348 349 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 350 device_links_read_lock_held()) 351 pm_request_idle(link->supplier); 352 353 device_links_read_unlock(idx); 354 } 355 356 /** 357 * __rpm_callback - Run a given runtime PM callback for a given device. 358 * @cb: Runtime PM callback to run. 359 * @dev: Device to run the callback for. 360 */ 361 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 362 __releases(&dev->power.lock) __acquires(&dev->power.lock) 363 { 364 int retval = 0, idx; 365 bool use_links = dev->power.links_count > 0; 366 367 if (dev->power.irq_safe) { 368 spin_unlock(&dev->power.lock); 369 } else { 370 spin_unlock_irq(&dev->power.lock); 371 372 /* 373 * Resume suppliers if necessary. 374 * 375 * The device's runtime PM status cannot change until this 376 * routine returns, so it is safe to read the status outside of 377 * the lock. 378 */ 379 if (use_links && dev->power.runtime_status == RPM_RESUMING) { 380 idx = device_links_read_lock(); 381 382 retval = rpm_get_suppliers(dev); 383 if (retval) { 384 rpm_put_suppliers(dev); 385 goto fail; 386 } 387 388 device_links_read_unlock(idx); 389 } 390 } 391 392 if (cb) 393 retval = cb(dev); 394 395 if (dev->power.irq_safe) { 396 spin_lock(&dev->power.lock); 397 } else { 398 /* 399 * If the device is suspending and the callback has returned 400 * success, drop the usage counters of the suppliers that have 401 * been reference counted on its resume. 402 * 403 * Do that if resume fails too. 404 */ 405 if (use_links && 406 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || 407 (dev->power.runtime_status == RPM_RESUMING && retval))) { 408 idx = device_links_read_lock(); 409 410 __rpm_put_suppliers(dev, false); 411 412 fail: 413 device_links_read_unlock(idx); 414 } 415 416 spin_lock_irq(&dev->power.lock); 417 } 418 419 return retval; 420 } 421 422 /** 423 * rpm_callback - Run a given runtime PM callback for a given device. 424 * @cb: Runtime PM callback to run. 425 * @dev: Device to run the callback for. 426 */ 427 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 428 { 429 int retval; 430 431 if (dev->power.memalloc_noio) { 432 unsigned int noio_flag; 433 434 /* 435 * Deadlock might be caused if memory allocation with 436 * GFP_KERNEL happens inside runtime_suspend and 437 * runtime_resume callbacks of one block device's 438 * ancestor or the block device itself. Network 439 * device might be thought as part of iSCSI block 440 * device, so network device and its ancestor should 441 * be marked as memalloc_noio too. 442 */ 443 noio_flag = memalloc_noio_save(); 444 retval = __rpm_callback(cb, dev); 445 memalloc_noio_restore(noio_flag); 446 } else { 447 retval = __rpm_callback(cb, dev); 448 } 449 450 dev->power.runtime_error = retval; 451 return retval != -EACCES ? retval : -EIO; 452 } 453 454 /** 455 * rpm_idle - Notify device bus type if the device can be suspended. 456 * @dev: Device to notify the bus type about. 457 * @rpmflags: Flag bits. 458 * 459 * Check if the device's runtime PM status allows it to be suspended. If 460 * another idle notification has been started earlier, return immediately. If 461 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 462 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback 463 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. 464 * 465 * This function must be called under dev->power.lock with interrupts disabled. 466 */ 467 static int rpm_idle(struct device *dev, int rpmflags) 468 { 469 int (*callback)(struct device *); 470 int retval; 471 472 trace_rpm_idle(dev, rpmflags); 473 retval = rpm_check_suspend_allowed(dev); 474 if (retval < 0) 475 ; /* Conditions are wrong. */ 476 477 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 478 else if (dev->power.runtime_status != RPM_ACTIVE) 479 retval = -EAGAIN; 480 481 /* 482 * Any pending request other than an idle notification takes 483 * precedence over us, except that the timer may be running. 484 */ 485 else if (dev->power.request_pending && 486 dev->power.request > RPM_REQ_IDLE) 487 retval = -EAGAIN; 488 489 /* Act as though RPM_NOWAIT is always set. */ 490 else if (dev->power.idle_notification) 491 retval = -EINPROGRESS; 492 493 if (retval) 494 goto out; 495 496 /* Pending requests need to be canceled. */ 497 dev->power.request = RPM_REQ_NONE; 498 499 callback = RPM_GET_CALLBACK(dev, runtime_idle); 500 501 /* If no callback assume success. */ 502 if (!callback || dev->power.no_callbacks) 503 goto out; 504 505 /* Carry out an asynchronous or a synchronous idle notification. */ 506 if (rpmflags & RPM_ASYNC) { 507 dev->power.request = RPM_REQ_IDLE; 508 if (!dev->power.request_pending) { 509 dev->power.request_pending = true; 510 queue_work(pm_wq, &dev->power.work); 511 } 512 trace_rpm_return_int(dev, _THIS_IP_, 0); 513 return 0; 514 } 515 516 dev->power.idle_notification = true; 517 518 if (dev->power.irq_safe) 519 spin_unlock(&dev->power.lock); 520 else 521 spin_unlock_irq(&dev->power.lock); 522 523 retval = callback(dev); 524 525 if (dev->power.irq_safe) 526 spin_lock(&dev->power.lock); 527 else 528 spin_lock_irq(&dev->power.lock); 529 530 dev->power.idle_notification = false; 531 wake_up_all(&dev->power.wait_queue); 532 533 out: 534 trace_rpm_return_int(dev, _THIS_IP_, retval); 535 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 536 } 537 538 /** 539 * rpm_suspend - Carry out runtime suspend of given device. 540 * @dev: Device to suspend. 541 * @rpmflags: Flag bits. 542 * 543 * Check if the device's runtime PM status allows it to be suspended. 544 * Cancel a pending idle notification, autosuspend or suspend. If 545 * another suspend has been started earlier, either return immediately 546 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 547 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 548 * otherwise run the ->runtime_suspend() callback directly. When 549 * ->runtime_suspend succeeded, if a deferred resume was requested while 550 * the callback was running then carry it out, otherwise send an idle 551 * notification for its parent (if the suspend succeeded and both 552 * ignore_children of parent->power and irq_safe of dev->power are not set). 553 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 554 * flag is set and the next autosuspend-delay expiration time is in the 555 * future, schedule another autosuspend attempt. 556 * 557 * This function must be called under dev->power.lock with interrupts disabled. 558 */ 559 static int rpm_suspend(struct device *dev, int rpmflags) 560 __releases(&dev->power.lock) __acquires(&dev->power.lock) 561 { 562 int (*callback)(struct device *); 563 struct device *parent = NULL; 564 int retval; 565 566 trace_rpm_suspend(dev, rpmflags); 567 568 repeat: 569 retval = rpm_check_suspend_allowed(dev); 570 if (retval < 0) 571 goto out; /* Conditions are wrong. */ 572 573 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 574 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) 575 retval = -EAGAIN; 576 577 if (retval) 578 goto out; 579 580 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 581 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { 582 u64 expires = pm_runtime_autosuspend_expiration(dev); 583 584 if (expires != 0) { 585 /* Pending requests need to be canceled. */ 586 dev->power.request = RPM_REQ_NONE; 587 588 /* 589 * Optimization: If the timer is already running and is 590 * set to expire at or before the autosuspend delay, 591 * avoid the overhead of resetting it. Just let it 592 * expire; pm_suspend_timer_fn() will take care of the 593 * rest. 594 */ 595 if (!(dev->power.timer_expires && 596 dev->power.timer_expires <= expires)) { 597 /* 598 * We add a slack of 25% to gather wakeups 599 * without sacrificing the granularity. 600 */ 601 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * 602 (NSEC_PER_MSEC >> 2); 603 604 dev->power.timer_expires = expires; 605 hrtimer_start_range_ns(&dev->power.suspend_timer, 606 ns_to_ktime(expires), 607 slack, 608 HRTIMER_MODE_ABS); 609 } 610 dev->power.timer_autosuspends = 1; 611 goto out; 612 } 613 } 614 615 /* Other scheduled or pending requests need to be canceled. */ 616 pm_runtime_cancel_pending(dev); 617 618 if (dev->power.runtime_status == RPM_SUSPENDING) { 619 DEFINE_WAIT(wait); 620 621 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 622 retval = -EINPROGRESS; 623 goto out; 624 } 625 626 if (dev->power.irq_safe) { 627 spin_unlock(&dev->power.lock); 628 629 cpu_relax(); 630 631 spin_lock(&dev->power.lock); 632 goto repeat; 633 } 634 635 /* Wait for the other suspend running in parallel with us. */ 636 for (;;) { 637 prepare_to_wait(&dev->power.wait_queue, &wait, 638 TASK_UNINTERRUPTIBLE); 639 if (dev->power.runtime_status != RPM_SUSPENDING) 640 break; 641 642 spin_unlock_irq(&dev->power.lock); 643 644 schedule(); 645 646 spin_lock_irq(&dev->power.lock); 647 } 648 finish_wait(&dev->power.wait_queue, &wait); 649 goto repeat; 650 } 651 652 if (dev->power.no_callbacks) 653 goto no_callback; /* Assume success. */ 654 655 /* Carry out an asynchronous or a synchronous suspend. */ 656 if (rpmflags & RPM_ASYNC) { 657 dev->power.request = (rpmflags & RPM_AUTO) ? 658 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 659 if (!dev->power.request_pending) { 660 dev->power.request_pending = true; 661 queue_work(pm_wq, &dev->power.work); 662 } 663 goto out; 664 } 665 666 __update_runtime_status(dev, RPM_SUSPENDING); 667 668 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 669 670 dev_pm_enable_wake_irq_check(dev, true); 671 retval = rpm_callback(callback, dev); 672 if (retval) 673 goto fail; 674 675 dev_pm_enable_wake_irq_complete(dev); 676 677 no_callback: 678 __update_runtime_status(dev, RPM_SUSPENDED); 679 pm_runtime_deactivate_timer(dev); 680 681 if (dev->parent) { 682 parent = dev->parent; 683 atomic_add_unless(&parent->power.child_count, -1, 0); 684 } 685 wake_up_all(&dev->power.wait_queue); 686 687 if (dev->power.deferred_resume) { 688 dev->power.deferred_resume = false; 689 rpm_resume(dev, 0); 690 retval = -EAGAIN; 691 goto out; 692 } 693 694 if (dev->power.irq_safe) 695 goto out; 696 697 /* Maybe the parent is now able to suspend. */ 698 if (parent && !parent->power.ignore_children) { 699 spin_unlock(&dev->power.lock); 700 701 spin_lock(&parent->power.lock); 702 rpm_idle(parent, RPM_ASYNC); 703 spin_unlock(&parent->power.lock); 704 705 spin_lock(&dev->power.lock); 706 } 707 /* Maybe the suppliers are now able to suspend. */ 708 if (dev->power.links_count > 0) { 709 spin_unlock_irq(&dev->power.lock); 710 711 rpm_suspend_suppliers(dev); 712 713 spin_lock_irq(&dev->power.lock); 714 } 715 716 out: 717 trace_rpm_return_int(dev, _THIS_IP_, retval); 718 719 return retval; 720 721 fail: 722 dev_pm_disable_wake_irq_check(dev, true); 723 __update_runtime_status(dev, RPM_ACTIVE); 724 dev->power.deferred_resume = false; 725 wake_up_all(&dev->power.wait_queue); 726 727 if (retval == -EAGAIN || retval == -EBUSY) { 728 dev->power.runtime_error = 0; 729 730 /* 731 * If the callback routine failed an autosuspend, and 732 * if the last_busy time has been updated so that there 733 * is a new autosuspend expiration time, automatically 734 * reschedule another autosuspend. 735 */ 736 if ((rpmflags & RPM_AUTO) && 737 pm_runtime_autosuspend_expiration(dev) != 0) 738 goto repeat; 739 } else { 740 pm_runtime_cancel_pending(dev); 741 } 742 goto out; 743 } 744 745 /** 746 * rpm_resume - Carry out runtime resume of given device. 747 * @dev: Device to resume. 748 * @rpmflags: Flag bits. 749 * 750 * Check if the device's runtime PM status allows it to be resumed. Cancel 751 * any scheduled or pending requests. If another resume has been started 752 * earlier, either return immediately or wait for it to finish, depending on the 753 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 754 * parallel with this function, either tell the other process to resume after 755 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 756 * flag is set then queue a resume request; otherwise run the 757 * ->runtime_resume() callback directly. Queue an idle notification for the 758 * device if the resume succeeded. 759 * 760 * This function must be called under dev->power.lock with interrupts disabled. 761 */ 762 static int rpm_resume(struct device *dev, int rpmflags) 763 __releases(&dev->power.lock) __acquires(&dev->power.lock) 764 { 765 int (*callback)(struct device *); 766 struct device *parent = NULL; 767 int retval = 0; 768 769 trace_rpm_resume(dev, rpmflags); 770 771 repeat: 772 if (dev->power.runtime_error) { 773 retval = -EINVAL; 774 } else if (dev->power.disable_depth > 0) { 775 if (dev->power.runtime_status == RPM_ACTIVE && 776 dev->power.last_status == RPM_ACTIVE) 777 retval = 1; 778 else 779 retval = -EACCES; 780 } 781 if (retval) 782 goto out; 783 784 /* 785 * Other scheduled or pending requests need to be canceled. Small 786 * optimization: If an autosuspend timer is running, leave it running 787 * rather than cancelling it now only to restart it again in the near 788 * future. 789 */ 790 dev->power.request = RPM_REQ_NONE; 791 if (!dev->power.timer_autosuspends) 792 pm_runtime_deactivate_timer(dev); 793 794 if (dev->power.runtime_status == RPM_ACTIVE) { 795 retval = 1; 796 goto out; 797 } 798 799 if (dev->power.runtime_status == RPM_RESUMING || 800 dev->power.runtime_status == RPM_SUSPENDING) { 801 DEFINE_WAIT(wait); 802 803 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 804 if (dev->power.runtime_status == RPM_SUSPENDING) { 805 dev->power.deferred_resume = true; 806 if (rpmflags & RPM_NOWAIT) 807 retval = -EINPROGRESS; 808 } else { 809 retval = -EINPROGRESS; 810 } 811 goto out; 812 } 813 814 if (dev->power.irq_safe) { 815 spin_unlock(&dev->power.lock); 816 817 cpu_relax(); 818 819 spin_lock(&dev->power.lock); 820 goto repeat; 821 } 822 823 /* Wait for the operation carried out in parallel with us. */ 824 for (;;) { 825 prepare_to_wait(&dev->power.wait_queue, &wait, 826 TASK_UNINTERRUPTIBLE); 827 if (dev->power.runtime_status != RPM_RESUMING && 828 dev->power.runtime_status != RPM_SUSPENDING) 829 break; 830 831 spin_unlock_irq(&dev->power.lock); 832 833 schedule(); 834 835 spin_lock_irq(&dev->power.lock); 836 } 837 finish_wait(&dev->power.wait_queue, &wait); 838 goto repeat; 839 } 840 841 /* 842 * See if we can skip waking up the parent. This is safe only if 843 * power.no_callbacks is set, because otherwise we don't know whether 844 * the resume will actually succeed. 845 */ 846 if (dev->power.no_callbacks && !parent && dev->parent) { 847 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 848 if (dev->parent->power.disable_depth > 0 || 849 dev->parent->power.ignore_children || 850 dev->parent->power.runtime_status == RPM_ACTIVE) { 851 atomic_inc(&dev->parent->power.child_count); 852 spin_unlock(&dev->parent->power.lock); 853 retval = 1; 854 goto no_callback; /* Assume success. */ 855 } 856 spin_unlock(&dev->parent->power.lock); 857 } 858 859 /* Carry out an asynchronous or a synchronous resume. */ 860 if (rpmflags & RPM_ASYNC) { 861 dev->power.request = RPM_REQ_RESUME; 862 if (!dev->power.request_pending) { 863 dev->power.request_pending = true; 864 queue_work(pm_wq, &dev->power.work); 865 } 866 retval = 0; 867 goto out; 868 } 869 870 if (!parent && dev->parent) { 871 /* 872 * Increment the parent's usage counter and resume it if 873 * necessary. Not needed if dev is irq-safe; then the 874 * parent is permanently resumed. 875 */ 876 parent = dev->parent; 877 if (dev->power.irq_safe) 878 goto skip_parent; 879 880 spin_unlock(&dev->power.lock); 881 882 pm_runtime_get_noresume(parent); 883 884 spin_lock(&parent->power.lock); 885 /* 886 * Resume the parent if it has runtime PM enabled and not been 887 * set to ignore its children. 888 */ 889 if (!parent->power.disable_depth && 890 !parent->power.ignore_children) { 891 rpm_resume(parent, 0); 892 if (parent->power.runtime_status != RPM_ACTIVE) 893 retval = -EBUSY; 894 } 895 spin_unlock(&parent->power.lock); 896 897 spin_lock(&dev->power.lock); 898 if (retval) 899 goto out; 900 901 goto repeat; 902 } 903 skip_parent: 904 905 if (dev->power.no_callbacks) 906 goto no_callback; /* Assume success. */ 907 908 __update_runtime_status(dev, RPM_RESUMING); 909 910 callback = RPM_GET_CALLBACK(dev, runtime_resume); 911 912 dev_pm_disable_wake_irq_check(dev, false); 913 retval = rpm_callback(callback, dev); 914 if (retval) { 915 __update_runtime_status(dev, RPM_SUSPENDED); 916 pm_runtime_cancel_pending(dev); 917 dev_pm_enable_wake_irq_check(dev, false); 918 } else { 919 no_callback: 920 __update_runtime_status(dev, RPM_ACTIVE); 921 pm_runtime_mark_last_busy(dev); 922 if (parent) 923 atomic_inc(&parent->power.child_count); 924 } 925 wake_up_all(&dev->power.wait_queue); 926 927 if (retval >= 0) 928 rpm_idle(dev, RPM_ASYNC); 929 930 out: 931 if (parent && !dev->power.irq_safe) { 932 spin_unlock_irq(&dev->power.lock); 933 934 pm_runtime_put(parent); 935 936 spin_lock_irq(&dev->power.lock); 937 } 938 939 trace_rpm_return_int(dev, _THIS_IP_, retval); 940 941 return retval; 942 } 943 944 /** 945 * pm_runtime_work - Universal runtime PM work function. 946 * @work: Work structure used for scheduling the execution of this function. 947 * 948 * Use @work to get the device object the work is to be done for, determine what 949 * is to be done and execute the appropriate runtime PM function. 950 */ 951 static void pm_runtime_work(struct work_struct *work) 952 { 953 struct device *dev = container_of(work, struct device, power.work); 954 enum rpm_request req; 955 956 spin_lock_irq(&dev->power.lock); 957 958 if (!dev->power.request_pending) 959 goto out; 960 961 req = dev->power.request; 962 dev->power.request = RPM_REQ_NONE; 963 dev->power.request_pending = false; 964 965 switch (req) { 966 case RPM_REQ_NONE: 967 break; 968 case RPM_REQ_IDLE: 969 rpm_idle(dev, RPM_NOWAIT); 970 break; 971 case RPM_REQ_SUSPEND: 972 rpm_suspend(dev, RPM_NOWAIT); 973 break; 974 case RPM_REQ_AUTOSUSPEND: 975 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 976 break; 977 case RPM_REQ_RESUME: 978 rpm_resume(dev, RPM_NOWAIT); 979 break; 980 } 981 982 out: 983 spin_unlock_irq(&dev->power.lock); 984 } 985 986 /** 987 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 988 * @timer: hrtimer used by pm_schedule_suspend(). 989 * 990 * Check if the time is right and queue a suspend request. 991 */ 992 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) 993 { 994 struct device *dev = container_of(timer, struct device, power.suspend_timer); 995 unsigned long flags; 996 u64 expires; 997 998 spin_lock_irqsave(&dev->power.lock, flags); 999 1000 expires = dev->power.timer_expires; 1001 /* 1002 * If 'expires' is after the current time, we've been called 1003 * too early. 1004 */ 1005 if (expires > 0 && expires < ktime_get_mono_fast_ns()) { 1006 dev->power.timer_expires = 0; 1007 rpm_suspend(dev, dev->power.timer_autosuspends ? 1008 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 1009 } 1010 1011 spin_unlock_irqrestore(&dev->power.lock, flags); 1012 1013 return HRTIMER_NORESTART; 1014 } 1015 1016 /** 1017 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 1018 * @dev: Device to suspend. 1019 * @delay: Time to wait before submitting a suspend request, in milliseconds. 1020 */ 1021 int pm_schedule_suspend(struct device *dev, unsigned int delay) 1022 { 1023 unsigned long flags; 1024 u64 expires; 1025 int retval; 1026 1027 spin_lock_irqsave(&dev->power.lock, flags); 1028 1029 if (!delay) { 1030 retval = rpm_suspend(dev, RPM_ASYNC); 1031 goto out; 1032 } 1033 1034 retval = rpm_check_suspend_allowed(dev); 1035 if (retval) 1036 goto out; 1037 1038 /* Other scheduled or pending requests need to be canceled. */ 1039 pm_runtime_cancel_pending(dev); 1040 1041 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; 1042 dev->power.timer_expires = expires; 1043 dev->power.timer_autosuspends = 0; 1044 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 1045 1046 out: 1047 spin_unlock_irqrestore(&dev->power.lock, flags); 1048 1049 return retval; 1050 } 1051 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 1052 1053 static int rpm_drop_usage_count(struct device *dev) 1054 { 1055 int ret; 1056 1057 ret = atomic_sub_return(1, &dev->power.usage_count); 1058 if (ret >= 0) 1059 return ret; 1060 1061 /* 1062 * Because rpm_resume() does not check the usage counter, it will resume 1063 * the device even if the usage counter is 0 or negative, so it is 1064 * sufficient to increment the usage counter here to reverse the change 1065 * made above. 1066 */ 1067 atomic_inc(&dev->power.usage_count); 1068 dev_warn(dev, "Runtime PM usage count underflow!\n"); 1069 return -EINVAL; 1070 } 1071 1072 /** 1073 * __pm_runtime_idle - Entry point for runtime idle operations. 1074 * @dev: Device to send idle notification for. 1075 * @rpmflags: Flag bits. 1076 * 1077 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 1078 * return immediately if it is larger than zero (if it becomes negative, log a 1079 * warning, increment it, and return an error). Then carry out an idle 1080 * notification, either synchronous or asynchronous. 1081 * 1082 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1083 * or if pm_runtime_irq_safe() has been called. 1084 */ 1085 int __pm_runtime_idle(struct device *dev, int rpmflags) 1086 { 1087 unsigned long flags; 1088 int retval; 1089 1090 if (rpmflags & RPM_GET_PUT) { 1091 retval = rpm_drop_usage_count(dev); 1092 if (retval < 0) { 1093 return retval; 1094 } else if (retval > 0) { 1095 trace_rpm_usage(dev, rpmflags); 1096 return 0; 1097 } 1098 } 1099 1100 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1101 1102 spin_lock_irqsave(&dev->power.lock, flags); 1103 retval = rpm_idle(dev, rpmflags); 1104 spin_unlock_irqrestore(&dev->power.lock, flags); 1105 1106 return retval; 1107 } 1108 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 1109 1110 /** 1111 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 1112 * @dev: Device to suspend. 1113 * @rpmflags: Flag bits. 1114 * 1115 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 1116 * return immediately if it is larger than zero (if it becomes negative, log a 1117 * warning, increment it, and return an error). Then carry out a suspend, 1118 * either synchronous or asynchronous. 1119 * 1120 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1121 * or if pm_runtime_irq_safe() has been called. 1122 */ 1123 int __pm_runtime_suspend(struct device *dev, int rpmflags) 1124 { 1125 unsigned long flags; 1126 int retval; 1127 1128 if (rpmflags & RPM_GET_PUT) { 1129 retval = rpm_drop_usage_count(dev); 1130 if (retval < 0) { 1131 return retval; 1132 } else if (retval > 0) { 1133 trace_rpm_usage(dev, rpmflags); 1134 return 0; 1135 } 1136 } 1137 1138 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1139 1140 spin_lock_irqsave(&dev->power.lock, flags); 1141 retval = rpm_suspend(dev, rpmflags); 1142 spin_unlock_irqrestore(&dev->power.lock, flags); 1143 1144 return retval; 1145 } 1146 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 1147 1148 /** 1149 * __pm_runtime_resume - Entry point for runtime resume operations. 1150 * @dev: Device to resume. 1151 * @rpmflags: Flag bits. 1152 * 1153 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 1154 * carry out a resume, either synchronous or asynchronous. 1155 * 1156 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1157 * or if pm_runtime_irq_safe() has been called. 1158 */ 1159 int __pm_runtime_resume(struct device *dev, int rpmflags) 1160 { 1161 unsigned long flags; 1162 int retval; 1163 1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && 1165 dev->power.runtime_status != RPM_ACTIVE); 1166 1167 if (rpmflags & RPM_GET_PUT) 1168 atomic_inc(&dev->power.usage_count); 1169 1170 spin_lock_irqsave(&dev->power.lock, flags); 1171 retval = rpm_resume(dev, rpmflags); 1172 spin_unlock_irqrestore(&dev->power.lock, flags); 1173 1174 return retval; 1175 } 1176 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 1177 1178 /** 1179 * pm_runtime_get_if_active - Conditionally bump up device usage counter. 1180 * @dev: Device to handle. 1181 * @ign_usage_count: Whether or not to look at the current usage counter value. 1182 * 1183 * Return -EINVAL if runtime PM is disabled for @dev. 1184 * 1185 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either 1186 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not 1187 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 1188 * without changing the usage counter. 1189 * 1190 * If @ign_usage_count is %true, this function can be used to prevent suspending 1191 * the device when its runtime PM status is %RPM_ACTIVE. 1192 * 1193 * If @ign_usage_count is %false, this function can be used to prevent 1194 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its 1195 * runtime PM usage counter is not zero. 1196 * 1197 * The caller is responsible for decrementing the runtime PM usage counter of 1198 * @dev after this function has returned a positive value for it. 1199 */ 1200 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) 1201 { 1202 unsigned long flags; 1203 int retval; 1204 1205 spin_lock_irqsave(&dev->power.lock, flags); 1206 if (dev->power.disable_depth > 0) { 1207 retval = -EINVAL; 1208 } else if (dev->power.runtime_status != RPM_ACTIVE) { 1209 retval = 0; 1210 } else if (ign_usage_count) { 1211 retval = 1; 1212 atomic_inc(&dev->power.usage_count); 1213 } else { 1214 retval = atomic_inc_not_zero(&dev->power.usage_count); 1215 } 1216 trace_rpm_usage(dev, 0); 1217 spin_unlock_irqrestore(&dev->power.lock, flags); 1218 1219 return retval; 1220 } 1221 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); 1222 1223 /** 1224 * __pm_runtime_set_status - Set runtime PM status of a device. 1225 * @dev: Device to handle. 1226 * @status: New runtime PM status of the device. 1227 * 1228 * If runtime PM of the device is disabled or its power.runtime_error field is 1229 * different from zero, the status may be changed either to RPM_ACTIVE, or to 1230 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 1231 * However, if the device has a parent and the parent is not active, and the 1232 * parent's power.ignore_children flag is unset, the device's status cannot be 1233 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 1234 * 1235 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 1236 * and the device parent's counter of unsuspended children is modified to 1237 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1238 * notification request for the parent is submitted. 1239 * 1240 * If @dev has any suppliers (as reflected by device links to them), and @status 1241 * is RPM_ACTIVE, they will be activated upfront and if the activation of one 1242 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead 1243 * of the @status value) and the suppliers will be deacticated on exit. The 1244 * error returned by the failing supplier activation will be returned in that 1245 * case. 1246 */ 1247 int __pm_runtime_set_status(struct device *dev, unsigned int status) 1248 { 1249 struct device *parent = dev->parent; 1250 bool notify_parent = false; 1251 unsigned long flags; 1252 int error = 0; 1253 1254 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1255 return -EINVAL; 1256 1257 spin_lock_irqsave(&dev->power.lock, flags); 1258 1259 /* 1260 * Prevent PM-runtime from being enabled for the device or return an 1261 * error if it is enabled already and working. 1262 */ 1263 if (dev->power.runtime_error || dev->power.disable_depth) 1264 dev->power.disable_depth++; 1265 else 1266 error = -EAGAIN; 1267 1268 spin_unlock_irqrestore(&dev->power.lock, flags); 1269 1270 if (error) 1271 return error; 1272 1273 /* 1274 * If the new status is RPM_ACTIVE, the suppliers can be activated 1275 * upfront regardless of the current status, because next time 1276 * rpm_put_suppliers() runs, the rpm_active refcounts of the links 1277 * involved will be dropped down to one anyway. 1278 */ 1279 if (status == RPM_ACTIVE) { 1280 int idx = device_links_read_lock(); 1281 1282 error = rpm_get_suppliers(dev); 1283 if (error) 1284 status = RPM_SUSPENDED; 1285 1286 device_links_read_unlock(idx); 1287 } 1288 1289 spin_lock_irqsave(&dev->power.lock, flags); 1290 1291 if (dev->power.runtime_status == status || !parent) 1292 goto out_set; 1293 1294 if (status == RPM_SUSPENDED) { 1295 atomic_add_unless(&parent->power.child_count, -1, 0); 1296 notify_parent = !parent->power.ignore_children; 1297 } else { 1298 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1299 1300 /* 1301 * It is invalid to put an active child under a parent that is 1302 * not active, has runtime PM enabled and the 1303 * 'power.ignore_children' flag unset. 1304 */ 1305 if (!parent->power.disable_depth && 1306 !parent->power.ignore_children && 1307 parent->power.runtime_status != RPM_ACTIVE) { 1308 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", 1309 dev_name(dev), 1310 dev_name(parent)); 1311 error = -EBUSY; 1312 } else if (dev->power.runtime_status == RPM_SUSPENDED) { 1313 atomic_inc(&parent->power.child_count); 1314 } 1315 1316 spin_unlock(&parent->power.lock); 1317 1318 if (error) { 1319 status = RPM_SUSPENDED; 1320 goto out; 1321 } 1322 } 1323 1324 out_set: 1325 __update_runtime_status(dev, status); 1326 if (!error) 1327 dev->power.runtime_error = 0; 1328 1329 out: 1330 spin_unlock_irqrestore(&dev->power.lock, flags); 1331 1332 if (notify_parent) 1333 pm_request_idle(parent); 1334 1335 if (status == RPM_SUSPENDED) { 1336 int idx = device_links_read_lock(); 1337 1338 rpm_put_suppliers(dev); 1339 1340 device_links_read_unlock(idx); 1341 } 1342 1343 pm_runtime_enable(dev); 1344 1345 return error; 1346 } 1347 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1348 1349 /** 1350 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1351 * @dev: Device to handle. 1352 * 1353 * Flush all pending requests for the device from pm_wq and wait for all 1354 * runtime PM operations involving the device in progress to complete. 1355 * 1356 * Should be called under dev->power.lock with interrupts disabled. 1357 */ 1358 static void __pm_runtime_barrier(struct device *dev) 1359 { 1360 pm_runtime_deactivate_timer(dev); 1361 1362 if (dev->power.request_pending) { 1363 dev->power.request = RPM_REQ_NONE; 1364 spin_unlock_irq(&dev->power.lock); 1365 1366 cancel_work_sync(&dev->power.work); 1367 1368 spin_lock_irq(&dev->power.lock); 1369 dev->power.request_pending = false; 1370 } 1371 1372 if (dev->power.runtime_status == RPM_SUSPENDING || 1373 dev->power.runtime_status == RPM_RESUMING || 1374 dev->power.idle_notification) { 1375 DEFINE_WAIT(wait); 1376 1377 /* Suspend, wake-up or idle notification in progress. */ 1378 for (;;) { 1379 prepare_to_wait(&dev->power.wait_queue, &wait, 1380 TASK_UNINTERRUPTIBLE); 1381 if (dev->power.runtime_status != RPM_SUSPENDING 1382 && dev->power.runtime_status != RPM_RESUMING 1383 && !dev->power.idle_notification) 1384 break; 1385 spin_unlock_irq(&dev->power.lock); 1386 1387 schedule(); 1388 1389 spin_lock_irq(&dev->power.lock); 1390 } 1391 finish_wait(&dev->power.wait_queue, &wait); 1392 } 1393 } 1394 1395 /** 1396 * pm_runtime_barrier - Flush pending requests and wait for completions. 1397 * @dev: Device to handle. 1398 * 1399 * Prevent the device from being suspended by incrementing its usage counter and 1400 * if there's a pending resume request for the device, wake the device up. 1401 * Next, make sure that all pending requests for the device have been flushed 1402 * from pm_wq and wait for all runtime PM operations involving the device in 1403 * progress to complete. 1404 * 1405 * Return value: 1406 * 1, if there was a resume request pending and the device had to be woken up, 1407 * 0, otherwise 1408 */ 1409 int pm_runtime_barrier(struct device *dev) 1410 { 1411 int retval = 0; 1412 1413 pm_runtime_get_noresume(dev); 1414 spin_lock_irq(&dev->power.lock); 1415 1416 if (dev->power.request_pending 1417 && dev->power.request == RPM_REQ_RESUME) { 1418 rpm_resume(dev, 0); 1419 retval = 1; 1420 } 1421 1422 __pm_runtime_barrier(dev); 1423 1424 spin_unlock_irq(&dev->power.lock); 1425 pm_runtime_put_noidle(dev); 1426 1427 return retval; 1428 } 1429 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1430 1431 /** 1432 * __pm_runtime_disable - Disable runtime PM of a device. 1433 * @dev: Device to handle. 1434 * @check_resume: If set, check if there's a resume request for the device. 1435 * 1436 * Increment power.disable_depth for the device and if it was zero previously, 1437 * cancel all pending runtime PM requests for the device and wait for all 1438 * operations in progress to complete. The device can be either active or 1439 * suspended after its runtime PM has been disabled. 1440 * 1441 * If @check_resume is set and there's a resume request pending when 1442 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1443 * function will wake up the device before disabling its runtime PM. 1444 */ 1445 void __pm_runtime_disable(struct device *dev, bool check_resume) 1446 { 1447 spin_lock_irq(&dev->power.lock); 1448 1449 if (dev->power.disable_depth > 0) { 1450 dev->power.disable_depth++; 1451 goto out; 1452 } 1453 1454 /* 1455 * Wake up the device if there's a resume request pending, because that 1456 * means there probably is some I/O to process and disabling runtime PM 1457 * shouldn't prevent the device from processing the I/O. 1458 */ 1459 if (check_resume && dev->power.request_pending && 1460 dev->power.request == RPM_REQ_RESUME) { 1461 /* 1462 * Prevent suspends and idle notifications from being carried 1463 * out after we have woken up the device. 1464 */ 1465 pm_runtime_get_noresume(dev); 1466 1467 rpm_resume(dev, 0); 1468 1469 pm_runtime_put_noidle(dev); 1470 } 1471 1472 /* Update time accounting before disabling PM-runtime. */ 1473 update_pm_runtime_accounting(dev); 1474 1475 if (!dev->power.disable_depth++) { 1476 __pm_runtime_barrier(dev); 1477 dev->power.last_status = dev->power.runtime_status; 1478 } 1479 1480 out: 1481 spin_unlock_irq(&dev->power.lock); 1482 } 1483 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1484 1485 /** 1486 * pm_runtime_enable - Enable runtime PM of a device. 1487 * @dev: Device to handle. 1488 */ 1489 void pm_runtime_enable(struct device *dev) 1490 { 1491 unsigned long flags; 1492 1493 spin_lock_irqsave(&dev->power.lock, flags); 1494 1495 if (!dev->power.disable_depth) { 1496 dev_warn(dev, "Unbalanced %s!\n", __func__); 1497 goto out; 1498 } 1499 1500 if (--dev->power.disable_depth > 0) 1501 goto out; 1502 1503 dev->power.last_status = RPM_INVALID; 1504 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); 1505 1506 if (dev->power.runtime_status == RPM_SUSPENDED && 1507 !dev->power.ignore_children && 1508 atomic_read(&dev->power.child_count) > 0) 1509 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); 1510 1511 out: 1512 spin_unlock_irqrestore(&dev->power.lock, flags); 1513 } 1514 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1515 1516 static void pm_runtime_disable_action(void *data) 1517 { 1518 pm_runtime_dont_use_autosuspend(data); 1519 pm_runtime_disable(data); 1520 } 1521 1522 /** 1523 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable. 1524 * 1525 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for 1526 * you at driver exit time if needed. 1527 * 1528 * @dev: Device to handle. 1529 */ 1530 int devm_pm_runtime_enable(struct device *dev) 1531 { 1532 pm_runtime_enable(dev); 1533 1534 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev); 1535 } 1536 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); 1537 1538 /** 1539 * pm_runtime_forbid - Block runtime PM of a device. 1540 * @dev: Device to handle. 1541 * 1542 * Increase the device's usage count and clear its power.runtime_auto flag, 1543 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1544 * for it. 1545 */ 1546 void pm_runtime_forbid(struct device *dev) 1547 { 1548 spin_lock_irq(&dev->power.lock); 1549 if (!dev->power.runtime_auto) 1550 goto out; 1551 1552 dev->power.runtime_auto = false; 1553 atomic_inc(&dev->power.usage_count); 1554 rpm_resume(dev, 0); 1555 1556 out: 1557 spin_unlock_irq(&dev->power.lock); 1558 } 1559 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1560 1561 /** 1562 * pm_runtime_allow - Unblock runtime PM of a device. 1563 * @dev: Device to handle. 1564 * 1565 * Decrease the device's usage count and set its power.runtime_auto flag. 1566 */ 1567 void pm_runtime_allow(struct device *dev) 1568 { 1569 int ret; 1570 1571 spin_lock_irq(&dev->power.lock); 1572 if (dev->power.runtime_auto) 1573 goto out; 1574 1575 dev->power.runtime_auto = true; 1576 ret = rpm_drop_usage_count(dev); 1577 if (ret == 0) 1578 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); 1579 else if (ret > 0) 1580 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC); 1581 1582 out: 1583 spin_unlock_irq(&dev->power.lock); 1584 } 1585 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1586 1587 /** 1588 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1589 * @dev: Device to handle. 1590 * 1591 * Set the power.no_callbacks flag, which tells the PM core that this 1592 * device is power-managed through its parent and has no runtime PM 1593 * callbacks of its own. The runtime sysfs attributes will be removed. 1594 */ 1595 void pm_runtime_no_callbacks(struct device *dev) 1596 { 1597 spin_lock_irq(&dev->power.lock); 1598 dev->power.no_callbacks = 1; 1599 spin_unlock_irq(&dev->power.lock); 1600 if (device_is_registered(dev)) 1601 rpm_sysfs_remove(dev); 1602 } 1603 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1604 1605 /** 1606 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1607 * @dev: Device to handle 1608 * 1609 * Set the power.irq_safe flag, which tells the PM core that the 1610 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1611 * always be invoked with the spinlock held and interrupts disabled. It also 1612 * causes the parent's usage counter to be permanently incremented, preventing 1613 * the parent from runtime suspending -- otherwise an irq-safe child might have 1614 * to wait for a non-irq-safe parent. 1615 */ 1616 void pm_runtime_irq_safe(struct device *dev) 1617 { 1618 if (dev->parent) 1619 pm_runtime_get_sync(dev->parent); 1620 1621 spin_lock_irq(&dev->power.lock); 1622 dev->power.irq_safe = 1; 1623 spin_unlock_irq(&dev->power.lock); 1624 } 1625 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1626 1627 /** 1628 * update_autosuspend - Handle a change to a device's autosuspend settings. 1629 * @dev: Device to handle. 1630 * @old_delay: The former autosuspend_delay value. 1631 * @old_use: The former use_autosuspend value. 1632 * 1633 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1634 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1635 * 1636 * This function must be called under dev->power.lock with interrupts disabled. 1637 */ 1638 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1639 { 1640 int delay = dev->power.autosuspend_delay; 1641 1642 /* Should runtime suspend be prevented now? */ 1643 if (dev->power.use_autosuspend && delay < 0) { 1644 1645 /* If it used to be allowed then prevent it. */ 1646 if (!old_use || old_delay >= 0) { 1647 atomic_inc(&dev->power.usage_count); 1648 rpm_resume(dev, 0); 1649 } else { 1650 trace_rpm_usage(dev, 0); 1651 } 1652 } 1653 1654 /* Runtime suspend should be allowed now. */ 1655 else { 1656 1657 /* If it used to be prevented then allow it. */ 1658 if (old_use && old_delay < 0) 1659 atomic_dec(&dev->power.usage_count); 1660 1661 /* Maybe we can autosuspend now. */ 1662 rpm_idle(dev, RPM_AUTO); 1663 } 1664 } 1665 1666 /** 1667 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1668 * @dev: Device to handle. 1669 * @delay: Value of the new delay in milliseconds. 1670 * 1671 * Set the device's power.autosuspend_delay value. If it changes to negative 1672 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1673 * changes the other way, allow runtime suspends. 1674 */ 1675 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1676 { 1677 int old_delay, old_use; 1678 1679 spin_lock_irq(&dev->power.lock); 1680 old_delay = dev->power.autosuspend_delay; 1681 old_use = dev->power.use_autosuspend; 1682 dev->power.autosuspend_delay = delay; 1683 update_autosuspend(dev, old_delay, old_use); 1684 spin_unlock_irq(&dev->power.lock); 1685 } 1686 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1687 1688 /** 1689 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1690 * @dev: Device to handle. 1691 * @use: New value for use_autosuspend. 1692 * 1693 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1694 * suspends as needed. 1695 */ 1696 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1697 { 1698 int old_delay, old_use; 1699 1700 spin_lock_irq(&dev->power.lock); 1701 old_delay = dev->power.autosuspend_delay; 1702 old_use = dev->power.use_autosuspend; 1703 dev->power.use_autosuspend = use; 1704 update_autosuspend(dev, old_delay, old_use); 1705 spin_unlock_irq(&dev->power.lock); 1706 } 1707 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1708 1709 /** 1710 * pm_runtime_init - Initialize runtime PM fields in given device object. 1711 * @dev: Device object to initialize. 1712 */ 1713 void pm_runtime_init(struct device *dev) 1714 { 1715 dev->power.runtime_status = RPM_SUSPENDED; 1716 dev->power.last_status = RPM_INVALID; 1717 dev->power.idle_notification = false; 1718 1719 dev->power.disable_depth = 1; 1720 atomic_set(&dev->power.usage_count, 0); 1721 1722 dev->power.runtime_error = 0; 1723 1724 atomic_set(&dev->power.child_count, 0); 1725 pm_suspend_ignore_children(dev, false); 1726 dev->power.runtime_auto = true; 1727 1728 dev->power.request_pending = false; 1729 dev->power.request = RPM_REQ_NONE; 1730 dev->power.deferred_resume = false; 1731 dev->power.needs_force_resume = 0; 1732 INIT_WORK(&dev->power.work, pm_runtime_work); 1733 1734 dev->power.timer_expires = 0; 1735 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1736 dev->power.suspend_timer.function = pm_suspend_timer_fn; 1737 1738 init_waitqueue_head(&dev->power.wait_queue); 1739 } 1740 1741 /** 1742 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. 1743 * @dev: Device object to re-initialize. 1744 */ 1745 void pm_runtime_reinit(struct device *dev) 1746 { 1747 if (!pm_runtime_enabled(dev)) { 1748 if (dev->power.runtime_status == RPM_ACTIVE) 1749 pm_runtime_set_suspended(dev); 1750 if (dev->power.irq_safe) { 1751 spin_lock_irq(&dev->power.lock); 1752 dev->power.irq_safe = 0; 1753 spin_unlock_irq(&dev->power.lock); 1754 if (dev->parent) 1755 pm_runtime_put(dev->parent); 1756 } 1757 } 1758 } 1759 1760 /** 1761 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1762 * @dev: Device object being removed from device hierarchy. 1763 */ 1764 void pm_runtime_remove(struct device *dev) 1765 { 1766 __pm_runtime_disable(dev, false); 1767 pm_runtime_reinit(dev); 1768 } 1769 1770 /** 1771 * pm_runtime_get_suppliers - Resume and reference-count supplier devices. 1772 * @dev: Consumer device. 1773 */ 1774 void pm_runtime_get_suppliers(struct device *dev) 1775 { 1776 struct device_link *link; 1777 int idx; 1778 1779 idx = device_links_read_lock(); 1780 1781 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 1782 device_links_read_lock_held()) 1783 if (link->flags & DL_FLAG_PM_RUNTIME) { 1784 link->supplier_preactivated = true; 1785 pm_runtime_get_sync(link->supplier); 1786 } 1787 1788 device_links_read_unlock(idx); 1789 } 1790 1791 /** 1792 * pm_runtime_put_suppliers - Drop references to supplier devices. 1793 * @dev: Consumer device. 1794 */ 1795 void pm_runtime_put_suppliers(struct device *dev) 1796 { 1797 struct device_link *link; 1798 int idx; 1799 1800 idx = device_links_read_lock(); 1801 1802 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 1803 device_links_read_lock_held()) 1804 if (link->supplier_preactivated) { 1805 link->supplier_preactivated = false; 1806 pm_runtime_put(link->supplier); 1807 } 1808 1809 device_links_read_unlock(idx); 1810 } 1811 1812 void pm_runtime_new_link(struct device *dev) 1813 { 1814 spin_lock_irq(&dev->power.lock); 1815 dev->power.links_count++; 1816 spin_unlock_irq(&dev->power.lock); 1817 } 1818 1819 static void pm_runtime_drop_link_count(struct device *dev) 1820 { 1821 spin_lock_irq(&dev->power.lock); 1822 WARN_ON(dev->power.links_count == 0); 1823 dev->power.links_count--; 1824 spin_unlock_irq(&dev->power.lock); 1825 } 1826 1827 /** 1828 * pm_runtime_drop_link - Prepare for device link removal. 1829 * @link: Device link going away. 1830 * 1831 * Drop the link count of the consumer end of @link and decrement the supplier 1832 * device's runtime PM usage counter as many times as needed to drop all of the 1833 * PM runtime reference to it from the consumer. 1834 */ 1835 void pm_runtime_drop_link(struct device_link *link) 1836 { 1837 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 1838 return; 1839 1840 pm_runtime_drop_link_count(link->consumer); 1841 pm_runtime_release_supplier(link); 1842 pm_request_idle(link->supplier); 1843 } 1844 1845 static bool pm_runtime_need_not_resume(struct device *dev) 1846 { 1847 return atomic_read(&dev->power.usage_count) <= 1 && 1848 (atomic_read(&dev->power.child_count) == 0 || 1849 dev->power.ignore_children); 1850 } 1851 1852 /** 1853 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1854 * @dev: Device to suspend. 1855 * 1856 * Disable runtime PM so we safely can check the device's runtime PM status and 1857 * if it is active, invoke its ->runtime_suspend callback to suspend it and 1858 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's 1859 * usage and children counters don't indicate that the device was in use before 1860 * the system-wide transition under way, decrement its parent's children counter 1861 * (if there is a parent). Keep runtime PM disabled to preserve the state 1862 * unless we encounter errors. 1863 * 1864 * Typically this function may be invoked from a system suspend callback to make 1865 * sure the device is put into low power state and it should only be used during 1866 * system-wide PM transitions to sleep states. It assumes that the analogous 1867 * pm_runtime_force_resume() will be used to resume the device. 1868 * 1869 * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent 1870 * state where this function has called the ->runtime_suspend callback but the 1871 * PM core marks the driver as runtime active. 1872 */ 1873 int pm_runtime_force_suspend(struct device *dev) 1874 { 1875 int (*callback)(struct device *); 1876 int ret; 1877 1878 pm_runtime_disable(dev); 1879 if (pm_runtime_status_suspended(dev)) 1880 return 0; 1881 1882 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1883 1884 dev_pm_enable_wake_irq_check(dev, true); 1885 ret = callback ? callback(dev) : 0; 1886 if (ret) 1887 goto err; 1888 1889 dev_pm_enable_wake_irq_complete(dev); 1890 1891 /* 1892 * If the device can stay in suspend after the system-wide transition 1893 * to the working state that will follow, drop the children counter of 1894 * its parent, but set its status to RPM_SUSPENDED anyway in case this 1895 * function will be called again for it in the meantime. 1896 */ 1897 if (pm_runtime_need_not_resume(dev)) { 1898 pm_runtime_set_suspended(dev); 1899 } else { 1900 __update_runtime_status(dev, RPM_SUSPENDED); 1901 dev->power.needs_force_resume = 1; 1902 } 1903 1904 return 0; 1905 1906 err: 1907 dev_pm_disable_wake_irq_check(dev, true); 1908 pm_runtime_enable(dev); 1909 return ret; 1910 } 1911 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 1912 1913 /** 1914 * pm_runtime_force_resume - Force a device into resume state if needed. 1915 * @dev: Device to resume. 1916 * 1917 * Prior invoking this function we expect the user to have brought the device 1918 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 1919 * those actions and bring the device into full power, if it is expected to be 1920 * used on system resume. In the other case, we defer the resume to be managed 1921 * via runtime PM. 1922 * 1923 * Typically this function may be invoked from a system resume callback. 1924 */ 1925 int pm_runtime_force_resume(struct device *dev) 1926 { 1927 int (*callback)(struct device *); 1928 int ret = 0; 1929 1930 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) 1931 goto out; 1932 1933 /* 1934 * The value of the parent's children counter is correct already, so 1935 * just update the status of the device. 1936 */ 1937 __update_runtime_status(dev, RPM_ACTIVE); 1938 1939 callback = RPM_GET_CALLBACK(dev, runtime_resume); 1940 1941 dev_pm_disable_wake_irq_check(dev, false); 1942 ret = callback ? callback(dev) : 0; 1943 if (ret) { 1944 pm_runtime_set_suspended(dev); 1945 dev_pm_enable_wake_irq_check(dev, false); 1946 goto out; 1947 } 1948 1949 pm_runtime_mark_last_busy(dev); 1950 out: 1951 dev->power.needs_force_resume = 0; 1952 pm_runtime_enable(dev); 1953 return ret; 1954 } 1955 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 1956