1 /* 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/sched.h> 10 #include <linux/pm_runtime.h> 11 #include <linux/jiffies.h> 12 13 static int __pm_runtime_resume(struct device *dev, bool from_wq); 14 static int __pm_request_idle(struct device *dev); 15 static int __pm_request_resume(struct device *dev); 16 17 /** 18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 19 * @dev: Device to handle. 20 */ 21 static void pm_runtime_deactivate_timer(struct device *dev) 22 { 23 if (dev->power.timer_expires > 0) { 24 del_timer(&dev->power.suspend_timer); 25 dev->power.timer_expires = 0; 26 } 27 } 28 29 /** 30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 31 * @dev: Device to handle. 32 */ 33 static void pm_runtime_cancel_pending(struct device *dev) 34 { 35 pm_runtime_deactivate_timer(dev); 36 /* 37 * In case there's a request pending, make sure its work function will 38 * return without doing anything. 39 */ 40 dev->power.request = RPM_REQ_NONE; 41 } 42 43 /** 44 * __pm_runtime_idle - Notify device bus type if the device can be suspended. 45 * @dev: Device to notify the bus type about. 46 * 47 * This function must be called under dev->power.lock with interrupts disabled. 48 */ 49 static int __pm_runtime_idle(struct device *dev) 50 __releases(&dev->power.lock) __acquires(&dev->power.lock) 51 { 52 int retval = 0; 53 54 if (dev->power.runtime_error) 55 retval = -EINVAL; 56 else if (dev->power.idle_notification) 57 retval = -EINPROGRESS; 58 else if (atomic_read(&dev->power.usage_count) > 0 59 || dev->power.disable_depth > 0 60 || dev->power.runtime_status != RPM_ACTIVE) 61 retval = -EAGAIN; 62 else if (!pm_children_suspended(dev)) 63 retval = -EBUSY; 64 if (retval) 65 goto out; 66 67 if (dev->power.request_pending) { 68 /* 69 * If an idle notification request is pending, cancel it. Any 70 * other pending request takes precedence over us. 71 */ 72 if (dev->power.request == RPM_REQ_IDLE) { 73 dev->power.request = RPM_REQ_NONE; 74 } else if (dev->power.request != RPM_REQ_NONE) { 75 retval = -EAGAIN; 76 goto out; 77 } 78 } 79 80 dev->power.idle_notification = true; 81 82 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { 83 spin_unlock_irq(&dev->power.lock); 84 85 dev->bus->pm->runtime_idle(dev); 86 87 spin_lock_irq(&dev->power.lock); 88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { 89 spin_unlock_irq(&dev->power.lock); 90 91 dev->type->pm->runtime_idle(dev); 92 93 spin_lock_irq(&dev->power.lock); 94 } else if (dev->class && dev->class->pm 95 && dev->class->pm->runtime_idle) { 96 spin_unlock_irq(&dev->power.lock); 97 98 dev->class->pm->runtime_idle(dev); 99 100 spin_lock_irq(&dev->power.lock); 101 } 102 103 dev->power.idle_notification = false; 104 wake_up_all(&dev->power.wait_queue); 105 106 out: 107 return retval; 108 } 109 110 /** 111 * pm_runtime_idle - Notify device bus type if the device can be suspended. 112 * @dev: Device to notify the bus type about. 113 */ 114 int pm_runtime_idle(struct device *dev) 115 { 116 int retval; 117 118 spin_lock_irq(&dev->power.lock); 119 retval = __pm_runtime_idle(dev); 120 spin_unlock_irq(&dev->power.lock); 121 122 return retval; 123 } 124 EXPORT_SYMBOL_GPL(pm_runtime_idle); 125 126 /** 127 * __pm_runtime_suspend - Carry out run-time suspend of given device. 128 * @dev: Device to suspend. 129 * @from_wq: If set, the function has been called via pm_wq. 130 * 131 * Check if the device can be suspended and run the ->runtime_suspend() callback 132 * provided by its bus type. If another suspend has been started earlier, wait 133 * for it to finish. If an idle notification or suspend request is pending or 134 * scheduled, cancel it. 135 * 136 * This function must be called under dev->power.lock with interrupts disabled. 137 */ 138 int __pm_runtime_suspend(struct device *dev, bool from_wq) 139 __releases(&dev->power.lock) __acquires(&dev->power.lock) 140 { 141 struct device *parent = NULL; 142 bool notify = false; 143 int retval = 0; 144 145 dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 146 from_wq ? " from workqueue" : ""); 147 148 repeat: 149 if (dev->power.runtime_error) { 150 retval = -EINVAL; 151 goto out; 152 } 153 154 /* Pending resume requests take precedence over us. */ 155 if (dev->power.request_pending 156 && dev->power.request == RPM_REQ_RESUME) { 157 retval = -EAGAIN; 158 goto out; 159 } 160 161 /* Other scheduled or pending requests need to be canceled. */ 162 pm_runtime_cancel_pending(dev); 163 164 if (dev->power.runtime_status == RPM_SUSPENDED) 165 retval = 1; 166 else if (dev->power.runtime_status == RPM_RESUMING 167 || dev->power.disable_depth > 0 168 || atomic_read(&dev->power.usage_count) > 0) 169 retval = -EAGAIN; 170 else if (!pm_children_suspended(dev)) 171 retval = -EBUSY; 172 if (retval) 173 goto out; 174 175 if (dev->power.runtime_status == RPM_SUSPENDING) { 176 DEFINE_WAIT(wait); 177 178 if (from_wq) { 179 retval = -EINPROGRESS; 180 goto out; 181 } 182 183 /* Wait for the other suspend running in parallel with us. */ 184 for (;;) { 185 prepare_to_wait(&dev->power.wait_queue, &wait, 186 TASK_UNINTERRUPTIBLE); 187 if (dev->power.runtime_status != RPM_SUSPENDING) 188 break; 189 190 spin_unlock_irq(&dev->power.lock); 191 192 schedule(); 193 194 spin_lock_irq(&dev->power.lock); 195 } 196 finish_wait(&dev->power.wait_queue, &wait); 197 goto repeat; 198 } 199 200 dev->power.runtime_status = RPM_SUSPENDING; 201 dev->power.deferred_resume = false; 202 203 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { 204 spin_unlock_irq(&dev->power.lock); 205 206 retval = dev->bus->pm->runtime_suspend(dev); 207 208 spin_lock_irq(&dev->power.lock); 209 dev->power.runtime_error = retval; 210 } else if (dev->type && dev->type->pm 211 && dev->type->pm->runtime_suspend) { 212 spin_unlock_irq(&dev->power.lock); 213 214 retval = dev->type->pm->runtime_suspend(dev); 215 216 spin_lock_irq(&dev->power.lock); 217 dev->power.runtime_error = retval; 218 } else if (dev->class && dev->class->pm 219 && dev->class->pm->runtime_suspend) { 220 spin_unlock_irq(&dev->power.lock); 221 222 retval = dev->class->pm->runtime_suspend(dev); 223 224 spin_lock_irq(&dev->power.lock); 225 dev->power.runtime_error = retval; 226 } else { 227 retval = -ENOSYS; 228 } 229 230 if (retval) { 231 dev->power.runtime_status = RPM_ACTIVE; 232 if (retval == -EAGAIN || retval == -EBUSY) { 233 if (dev->power.timer_expires == 0) 234 notify = true; 235 dev->power.runtime_error = 0; 236 } else { 237 pm_runtime_cancel_pending(dev); 238 } 239 } else { 240 dev->power.runtime_status = RPM_SUSPENDED; 241 pm_runtime_deactivate_timer(dev); 242 243 if (dev->parent) { 244 parent = dev->parent; 245 atomic_add_unless(&parent->power.child_count, -1, 0); 246 } 247 } 248 wake_up_all(&dev->power.wait_queue); 249 250 if (dev->power.deferred_resume) { 251 __pm_runtime_resume(dev, false); 252 retval = -EAGAIN; 253 goto out; 254 } 255 256 if (notify) 257 __pm_runtime_idle(dev); 258 259 if (parent && !parent->power.ignore_children) { 260 spin_unlock_irq(&dev->power.lock); 261 262 pm_request_idle(parent); 263 264 spin_lock_irq(&dev->power.lock); 265 } 266 267 out: 268 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 269 270 return retval; 271 } 272 273 /** 274 * pm_runtime_suspend - Carry out run-time suspend of given device. 275 * @dev: Device to suspend. 276 */ 277 int pm_runtime_suspend(struct device *dev) 278 { 279 int retval; 280 281 spin_lock_irq(&dev->power.lock); 282 retval = __pm_runtime_suspend(dev, false); 283 spin_unlock_irq(&dev->power.lock); 284 285 return retval; 286 } 287 EXPORT_SYMBOL_GPL(pm_runtime_suspend); 288 289 /** 290 * __pm_runtime_resume - Carry out run-time resume of given device. 291 * @dev: Device to resume. 292 * @from_wq: If set, the function has been called via pm_wq. 293 * 294 * Check if the device can be woken up and run the ->runtime_resume() callback 295 * provided by its bus type. If another resume has been started earlier, wait 296 * for it to finish. If there's a suspend running in parallel with this 297 * function, wait for it to finish and resume the device. Cancel any scheduled 298 * or pending requests. 299 * 300 * This function must be called under dev->power.lock with interrupts disabled. 301 */ 302 int __pm_runtime_resume(struct device *dev, bool from_wq) 303 __releases(&dev->power.lock) __acquires(&dev->power.lock) 304 { 305 struct device *parent = NULL; 306 int retval = 0; 307 308 dev_dbg(dev, "__pm_runtime_resume()%s!\n", 309 from_wq ? " from workqueue" : ""); 310 311 repeat: 312 if (dev->power.runtime_error) { 313 retval = -EINVAL; 314 goto out; 315 } 316 317 pm_runtime_cancel_pending(dev); 318 319 if (dev->power.runtime_status == RPM_ACTIVE) 320 retval = 1; 321 else if (dev->power.disable_depth > 0) 322 retval = -EAGAIN; 323 if (retval) 324 goto out; 325 326 if (dev->power.runtime_status == RPM_RESUMING 327 || dev->power.runtime_status == RPM_SUSPENDING) { 328 DEFINE_WAIT(wait); 329 330 if (from_wq) { 331 if (dev->power.runtime_status == RPM_SUSPENDING) 332 dev->power.deferred_resume = true; 333 retval = -EINPROGRESS; 334 goto out; 335 } 336 337 /* Wait for the operation carried out in parallel with us. */ 338 for (;;) { 339 prepare_to_wait(&dev->power.wait_queue, &wait, 340 TASK_UNINTERRUPTIBLE); 341 if (dev->power.runtime_status != RPM_RESUMING 342 && dev->power.runtime_status != RPM_SUSPENDING) 343 break; 344 345 spin_unlock_irq(&dev->power.lock); 346 347 schedule(); 348 349 spin_lock_irq(&dev->power.lock); 350 } 351 finish_wait(&dev->power.wait_queue, &wait); 352 goto repeat; 353 } 354 355 if (!parent && dev->parent) { 356 /* 357 * Increment the parent's resume counter and resume it if 358 * necessary. 359 */ 360 parent = dev->parent; 361 spin_unlock(&dev->power.lock); 362 363 pm_runtime_get_noresume(parent); 364 365 spin_lock(&parent->power.lock); 366 /* 367 * We can resume if the parent's run-time PM is disabled or it 368 * is set to ignore children. 369 */ 370 if (!parent->power.disable_depth 371 && !parent->power.ignore_children) { 372 __pm_runtime_resume(parent, false); 373 if (parent->power.runtime_status != RPM_ACTIVE) 374 retval = -EBUSY; 375 } 376 spin_unlock(&parent->power.lock); 377 378 spin_lock(&dev->power.lock); 379 if (retval) 380 goto out; 381 goto repeat; 382 } 383 384 dev->power.runtime_status = RPM_RESUMING; 385 386 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { 387 spin_unlock_irq(&dev->power.lock); 388 389 retval = dev->bus->pm->runtime_resume(dev); 390 391 spin_lock_irq(&dev->power.lock); 392 dev->power.runtime_error = retval; 393 } else if (dev->type && dev->type->pm 394 && dev->type->pm->runtime_resume) { 395 spin_unlock_irq(&dev->power.lock); 396 397 retval = dev->type->pm->runtime_resume(dev); 398 399 spin_lock_irq(&dev->power.lock); 400 dev->power.runtime_error = retval; 401 } else if (dev->class && dev->class->pm 402 && dev->class->pm->runtime_resume) { 403 spin_unlock_irq(&dev->power.lock); 404 405 retval = dev->class->pm->runtime_resume(dev); 406 407 spin_lock_irq(&dev->power.lock); 408 dev->power.runtime_error = retval; 409 } else { 410 retval = -ENOSYS; 411 } 412 413 if (retval) { 414 dev->power.runtime_status = RPM_SUSPENDED; 415 pm_runtime_cancel_pending(dev); 416 } else { 417 dev->power.runtime_status = RPM_ACTIVE; 418 if (parent) 419 atomic_inc(&parent->power.child_count); 420 } 421 wake_up_all(&dev->power.wait_queue); 422 423 if (!retval) 424 __pm_request_idle(dev); 425 426 out: 427 if (parent) { 428 spin_unlock_irq(&dev->power.lock); 429 430 pm_runtime_put(parent); 431 432 spin_lock_irq(&dev->power.lock); 433 } 434 435 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 436 437 return retval; 438 } 439 440 /** 441 * pm_runtime_resume - Carry out run-time resume of given device. 442 * @dev: Device to suspend. 443 */ 444 int pm_runtime_resume(struct device *dev) 445 { 446 int retval; 447 448 spin_lock_irq(&dev->power.lock); 449 retval = __pm_runtime_resume(dev, false); 450 spin_unlock_irq(&dev->power.lock); 451 452 return retval; 453 } 454 EXPORT_SYMBOL_GPL(pm_runtime_resume); 455 456 /** 457 * pm_runtime_work - Universal run-time PM work function. 458 * @work: Work structure used for scheduling the execution of this function. 459 * 460 * Use @work to get the device object the work is to be done for, determine what 461 * is to be done and execute the appropriate run-time PM function. 462 */ 463 static void pm_runtime_work(struct work_struct *work) 464 { 465 struct device *dev = container_of(work, struct device, power.work); 466 enum rpm_request req; 467 468 spin_lock_irq(&dev->power.lock); 469 470 if (!dev->power.request_pending) 471 goto out; 472 473 req = dev->power.request; 474 dev->power.request = RPM_REQ_NONE; 475 dev->power.request_pending = false; 476 477 switch (req) { 478 case RPM_REQ_NONE: 479 break; 480 case RPM_REQ_IDLE: 481 __pm_runtime_idle(dev); 482 break; 483 case RPM_REQ_SUSPEND: 484 __pm_runtime_suspend(dev, true); 485 break; 486 case RPM_REQ_RESUME: 487 __pm_runtime_resume(dev, true); 488 break; 489 } 490 491 out: 492 spin_unlock_irq(&dev->power.lock); 493 } 494 495 /** 496 * __pm_request_idle - Submit an idle notification request for given device. 497 * @dev: Device to handle. 498 * 499 * Check if the device's run-time PM status is correct for suspending the device 500 * and queue up a request to run __pm_runtime_idle() for it. 501 * 502 * This function must be called under dev->power.lock with interrupts disabled. 503 */ 504 static int __pm_request_idle(struct device *dev) 505 { 506 int retval = 0; 507 508 if (dev->power.runtime_error) 509 retval = -EINVAL; 510 else if (atomic_read(&dev->power.usage_count) > 0 511 || dev->power.disable_depth > 0 512 || dev->power.runtime_status == RPM_SUSPENDED 513 || dev->power.runtime_status == RPM_SUSPENDING) 514 retval = -EAGAIN; 515 else if (!pm_children_suspended(dev)) 516 retval = -EBUSY; 517 if (retval) 518 return retval; 519 520 if (dev->power.request_pending) { 521 /* Any requests other then RPM_REQ_IDLE take precedence. */ 522 if (dev->power.request == RPM_REQ_NONE) 523 dev->power.request = RPM_REQ_IDLE; 524 else if (dev->power.request != RPM_REQ_IDLE) 525 retval = -EAGAIN; 526 return retval; 527 } 528 529 dev->power.request = RPM_REQ_IDLE; 530 dev->power.request_pending = true; 531 queue_work(pm_wq, &dev->power.work); 532 533 return retval; 534 } 535 536 /** 537 * pm_request_idle - Submit an idle notification request for given device. 538 * @dev: Device to handle. 539 */ 540 int pm_request_idle(struct device *dev) 541 { 542 unsigned long flags; 543 int retval; 544 545 spin_lock_irqsave(&dev->power.lock, flags); 546 retval = __pm_request_idle(dev); 547 spin_unlock_irqrestore(&dev->power.lock, flags); 548 549 return retval; 550 } 551 EXPORT_SYMBOL_GPL(pm_request_idle); 552 553 /** 554 * __pm_request_suspend - Submit a suspend request for given device. 555 * @dev: Device to suspend. 556 * 557 * This function must be called under dev->power.lock with interrupts disabled. 558 */ 559 static int __pm_request_suspend(struct device *dev) 560 { 561 int retval = 0; 562 563 if (dev->power.runtime_error) 564 return -EINVAL; 565 566 if (dev->power.runtime_status == RPM_SUSPENDED) 567 retval = 1; 568 else if (atomic_read(&dev->power.usage_count) > 0 569 || dev->power.disable_depth > 0) 570 retval = -EAGAIN; 571 else if (dev->power.runtime_status == RPM_SUSPENDING) 572 retval = -EINPROGRESS; 573 else if (!pm_children_suspended(dev)) 574 retval = -EBUSY; 575 if (retval < 0) 576 return retval; 577 578 pm_runtime_deactivate_timer(dev); 579 580 if (dev->power.request_pending) { 581 /* 582 * Pending resume requests take precedence over us, but we can 583 * overtake any other pending request. 584 */ 585 if (dev->power.request == RPM_REQ_RESUME) 586 retval = -EAGAIN; 587 else if (dev->power.request != RPM_REQ_SUSPEND) 588 dev->power.request = retval ? 589 RPM_REQ_NONE : RPM_REQ_SUSPEND; 590 return retval; 591 } else if (retval) { 592 return retval; 593 } 594 595 dev->power.request = RPM_REQ_SUSPEND; 596 dev->power.request_pending = true; 597 queue_work(pm_wq, &dev->power.work); 598 599 return 0; 600 } 601 602 /** 603 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 604 * @data: Device pointer passed by pm_schedule_suspend(). 605 * 606 * Check if the time is right and execute __pm_request_suspend() in that case. 607 */ 608 static void pm_suspend_timer_fn(unsigned long data) 609 { 610 struct device *dev = (struct device *)data; 611 unsigned long flags; 612 unsigned long expires; 613 614 spin_lock_irqsave(&dev->power.lock, flags); 615 616 expires = dev->power.timer_expires; 617 /* If 'expire' is after 'jiffies' we've been called too early. */ 618 if (expires > 0 && !time_after(expires, jiffies)) { 619 dev->power.timer_expires = 0; 620 __pm_request_suspend(dev); 621 } 622 623 spin_unlock_irqrestore(&dev->power.lock, flags); 624 } 625 626 /** 627 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 628 * @dev: Device to suspend. 629 * @delay: Time to wait before submitting a suspend request, in milliseconds. 630 */ 631 int pm_schedule_suspend(struct device *dev, unsigned int delay) 632 { 633 unsigned long flags; 634 int retval = 0; 635 636 spin_lock_irqsave(&dev->power.lock, flags); 637 638 if (dev->power.runtime_error) { 639 retval = -EINVAL; 640 goto out; 641 } 642 643 if (!delay) { 644 retval = __pm_request_suspend(dev); 645 goto out; 646 } 647 648 pm_runtime_deactivate_timer(dev); 649 650 if (dev->power.request_pending) { 651 /* 652 * Pending resume requests take precedence over us, but any 653 * other pending requests have to be canceled. 654 */ 655 if (dev->power.request == RPM_REQ_RESUME) { 656 retval = -EAGAIN; 657 goto out; 658 } 659 dev->power.request = RPM_REQ_NONE; 660 } 661 662 if (dev->power.runtime_status == RPM_SUSPENDED) 663 retval = 1; 664 else if (atomic_read(&dev->power.usage_count) > 0 665 || dev->power.disable_depth > 0) 666 retval = -EAGAIN; 667 else if (!pm_children_suspended(dev)) 668 retval = -EBUSY; 669 if (retval) 670 goto out; 671 672 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 673 if (!dev->power.timer_expires) 674 dev->power.timer_expires = 1; 675 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 676 677 out: 678 spin_unlock_irqrestore(&dev->power.lock, flags); 679 680 return retval; 681 } 682 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 683 684 /** 685 * pm_request_resume - Submit a resume request for given device. 686 * @dev: Device to resume. 687 * 688 * This function must be called under dev->power.lock with interrupts disabled. 689 */ 690 static int __pm_request_resume(struct device *dev) 691 { 692 int retval = 0; 693 694 if (dev->power.runtime_error) 695 return -EINVAL; 696 697 if (dev->power.runtime_status == RPM_ACTIVE) 698 retval = 1; 699 else if (dev->power.runtime_status == RPM_RESUMING) 700 retval = -EINPROGRESS; 701 else if (dev->power.disable_depth > 0) 702 retval = -EAGAIN; 703 if (retval < 0) 704 return retval; 705 706 pm_runtime_deactivate_timer(dev); 707 708 if (dev->power.runtime_status == RPM_SUSPENDING) { 709 dev->power.deferred_resume = true; 710 return retval; 711 } 712 if (dev->power.request_pending) { 713 /* If non-resume request is pending, we can overtake it. */ 714 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; 715 return retval; 716 } 717 if (retval) 718 return retval; 719 720 dev->power.request = RPM_REQ_RESUME; 721 dev->power.request_pending = true; 722 queue_work(pm_wq, &dev->power.work); 723 724 return retval; 725 } 726 727 /** 728 * pm_request_resume - Submit a resume request for given device. 729 * @dev: Device to resume. 730 */ 731 int pm_request_resume(struct device *dev) 732 { 733 unsigned long flags; 734 int retval; 735 736 spin_lock_irqsave(&dev->power.lock, flags); 737 retval = __pm_request_resume(dev); 738 spin_unlock_irqrestore(&dev->power.lock, flags); 739 740 return retval; 741 } 742 EXPORT_SYMBOL_GPL(pm_request_resume); 743 744 /** 745 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 746 * @dev: Device to handle. 747 * @sync: If set and the device is suspended, resume it synchronously. 748 * 749 * Increment the usage count of the device and resume it or submit a resume 750 * request for it, depending on the value of @sync. 751 */ 752 int __pm_runtime_get(struct device *dev, bool sync) 753 { 754 int retval; 755 756 atomic_inc(&dev->power.usage_count); 757 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 758 759 return retval; 760 } 761 EXPORT_SYMBOL_GPL(__pm_runtime_get); 762 763 /** 764 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 765 * @dev: Device to handle. 766 * @sync: If the device's bus type is to be notified, do that synchronously. 767 * 768 * Decrement the usage count of the device and if it reaches zero, carry out a 769 * synchronous idle notification or submit an idle notification request for it, 770 * depending on the value of @sync. 771 */ 772 int __pm_runtime_put(struct device *dev, bool sync) 773 { 774 int retval = 0; 775 776 if (atomic_dec_and_test(&dev->power.usage_count)) 777 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); 778 779 return retval; 780 } 781 EXPORT_SYMBOL_GPL(__pm_runtime_put); 782 783 /** 784 * __pm_runtime_set_status - Set run-time PM status of a device. 785 * @dev: Device to handle. 786 * @status: New run-time PM status of the device. 787 * 788 * If run-time PM of the device is disabled or its power.runtime_error field is 789 * different from zero, the status may be changed either to RPM_ACTIVE, or to 790 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 791 * However, if the device has a parent and the parent is not active, and the 792 * parent's power.ignore_children flag is unset, the device's status cannot be 793 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 794 * 795 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 796 * and the device parent's counter of unsuspended children is modified to 797 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 798 * notification request for the parent is submitted. 799 */ 800 int __pm_runtime_set_status(struct device *dev, unsigned int status) 801 { 802 struct device *parent = dev->parent; 803 unsigned long flags; 804 bool notify_parent = false; 805 int error = 0; 806 807 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 808 return -EINVAL; 809 810 spin_lock_irqsave(&dev->power.lock, flags); 811 812 if (!dev->power.runtime_error && !dev->power.disable_depth) { 813 error = -EAGAIN; 814 goto out; 815 } 816 817 if (dev->power.runtime_status == status) 818 goto out_set; 819 820 if (status == RPM_SUSPENDED) { 821 /* It always is possible to set the status to 'suspended'. */ 822 if (parent) { 823 atomic_add_unless(&parent->power.child_count, -1, 0); 824 notify_parent = !parent->power.ignore_children; 825 } 826 goto out_set; 827 } 828 829 if (parent) { 830 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 831 832 /* 833 * It is invalid to put an active child under a parent that is 834 * not active, has run-time PM enabled and the 835 * 'power.ignore_children' flag unset. 836 */ 837 if (!parent->power.disable_depth 838 && !parent->power.ignore_children 839 && parent->power.runtime_status != RPM_ACTIVE) 840 error = -EBUSY; 841 else if (dev->power.runtime_status == RPM_SUSPENDED) 842 atomic_inc(&parent->power.child_count); 843 844 spin_unlock(&parent->power.lock); 845 846 if (error) 847 goto out; 848 } 849 850 out_set: 851 dev->power.runtime_status = status; 852 dev->power.runtime_error = 0; 853 out: 854 spin_unlock_irqrestore(&dev->power.lock, flags); 855 856 if (notify_parent) 857 pm_request_idle(parent); 858 859 return error; 860 } 861 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 862 863 /** 864 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 865 * @dev: Device to handle. 866 * 867 * Flush all pending requests for the device from pm_wq and wait for all 868 * run-time PM operations involving the device in progress to complete. 869 * 870 * Should be called under dev->power.lock with interrupts disabled. 871 */ 872 static void __pm_runtime_barrier(struct device *dev) 873 { 874 pm_runtime_deactivate_timer(dev); 875 876 if (dev->power.request_pending) { 877 dev->power.request = RPM_REQ_NONE; 878 spin_unlock_irq(&dev->power.lock); 879 880 cancel_work_sync(&dev->power.work); 881 882 spin_lock_irq(&dev->power.lock); 883 dev->power.request_pending = false; 884 } 885 886 if (dev->power.runtime_status == RPM_SUSPENDING 887 || dev->power.runtime_status == RPM_RESUMING 888 || dev->power.idle_notification) { 889 DEFINE_WAIT(wait); 890 891 /* Suspend, wake-up or idle notification in progress. */ 892 for (;;) { 893 prepare_to_wait(&dev->power.wait_queue, &wait, 894 TASK_UNINTERRUPTIBLE); 895 if (dev->power.runtime_status != RPM_SUSPENDING 896 && dev->power.runtime_status != RPM_RESUMING 897 && !dev->power.idle_notification) 898 break; 899 spin_unlock_irq(&dev->power.lock); 900 901 schedule(); 902 903 spin_lock_irq(&dev->power.lock); 904 } 905 finish_wait(&dev->power.wait_queue, &wait); 906 } 907 } 908 909 /** 910 * pm_runtime_barrier - Flush pending requests and wait for completions. 911 * @dev: Device to handle. 912 * 913 * Prevent the device from being suspended by incrementing its usage counter and 914 * if there's a pending resume request for the device, wake the device up. 915 * Next, make sure that all pending requests for the device have been flushed 916 * from pm_wq and wait for all run-time PM operations involving the device in 917 * progress to complete. 918 * 919 * Return value: 920 * 1, if there was a resume request pending and the device had to be woken up, 921 * 0, otherwise 922 */ 923 int pm_runtime_barrier(struct device *dev) 924 { 925 int retval = 0; 926 927 pm_runtime_get_noresume(dev); 928 spin_lock_irq(&dev->power.lock); 929 930 if (dev->power.request_pending 931 && dev->power.request == RPM_REQ_RESUME) { 932 __pm_runtime_resume(dev, false); 933 retval = 1; 934 } 935 936 __pm_runtime_barrier(dev); 937 938 spin_unlock_irq(&dev->power.lock); 939 pm_runtime_put_noidle(dev); 940 941 return retval; 942 } 943 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 944 945 /** 946 * __pm_runtime_disable - Disable run-time PM of a device. 947 * @dev: Device to handle. 948 * @check_resume: If set, check if there's a resume request for the device. 949 * 950 * Increment power.disable_depth for the device and if was zero previously, 951 * cancel all pending run-time PM requests for the device and wait for all 952 * operations in progress to complete. The device can be either active or 953 * suspended after its run-time PM has been disabled. 954 * 955 * If @check_resume is set and there's a resume request pending when 956 * __pm_runtime_disable() is called and power.disable_depth is zero, the 957 * function will wake up the device before disabling its run-time PM. 958 */ 959 void __pm_runtime_disable(struct device *dev, bool check_resume) 960 { 961 spin_lock_irq(&dev->power.lock); 962 963 if (dev->power.disable_depth > 0) { 964 dev->power.disable_depth++; 965 goto out; 966 } 967 968 /* 969 * Wake up the device if there's a resume request pending, because that 970 * means there probably is some I/O to process and disabling run-time PM 971 * shouldn't prevent the device from processing the I/O. 972 */ 973 if (check_resume && dev->power.request_pending 974 && dev->power.request == RPM_REQ_RESUME) { 975 /* 976 * Prevent suspends and idle notifications from being carried 977 * out after we have woken up the device. 978 */ 979 pm_runtime_get_noresume(dev); 980 981 __pm_runtime_resume(dev, false); 982 983 pm_runtime_put_noidle(dev); 984 } 985 986 if (!dev->power.disable_depth++) 987 __pm_runtime_barrier(dev); 988 989 out: 990 spin_unlock_irq(&dev->power.lock); 991 } 992 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 993 994 /** 995 * pm_runtime_enable - Enable run-time PM of a device. 996 * @dev: Device to handle. 997 */ 998 void pm_runtime_enable(struct device *dev) 999 { 1000 unsigned long flags; 1001 1002 spin_lock_irqsave(&dev->power.lock, flags); 1003 1004 if (dev->power.disable_depth > 0) 1005 dev->power.disable_depth--; 1006 else 1007 dev_warn(dev, "Unbalanced %s!\n", __func__); 1008 1009 spin_unlock_irqrestore(&dev->power.lock, flags); 1010 } 1011 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1012 1013 /** 1014 * pm_runtime_forbid - Block run-time PM of a device. 1015 * @dev: Device to handle. 1016 * 1017 * Increase the device's usage count and clear its power.runtime_auto flag, 1018 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1019 * for it. 1020 */ 1021 void pm_runtime_forbid(struct device *dev) 1022 { 1023 spin_lock_irq(&dev->power.lock); 1024 if (!dev->power.runtime_auto) 1025 goto out; 1026 1027 dev->power.runtime_auto = false; 1028 atomic_inc(&dev->power.usage_count); 1029 __pm_runtime_resume(dev, false); 1030 1031 out: 1032 spin_unlock_irq(&dev->power.lock); 1033 } 1034 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1035 1036 /** 1037 * pm_runtime_allow - Unblock run-time PM of a device. 1038 * @dev: Device to handle. 1039 * 1040 * Decrease the device's usage count and set its power.runtime_auto flag. 1041 */ 1042 void pm_runtime_allow(struct device *dev) 1043 { 1044 spin_lock_irq(&dev->power.lock); 1045 if (dev->power.runtime_auto) 1046 goto out; 1047 1048 dev->power.runtime_auto = true; 1049 if (atomic_dec_and_test(&dev->power.usage_count)) 1050 __pm_runtime_idle(dev); 1051 1052 out: 1053 spin_unlock_irq(&dev->power.lock); 1054 } 1055 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1056 1057 /** 1058 * pm_runtime_init - Initialize run-time PM fields in given device object. 1059 * @dev: Device object to initialize. 1060 */ 1061 void pm_runtime_init(struct device *dev) 1062 { 1063 spin_lock_init(&dev->power.lock); 1064 1065 dev->power.runtime_status = RPM_SUSPENDED; 1066 dev->power.idle_notification = false; 1067 1068 dev->power.disable_depth = 1; 1069 atomic_set(&dev->power.usage_count, 0); 1070 1071 dev->power.runtime_error = 0; 1072 1073 atomic_set(&dev->power.child_count, 0); 1074 pm_suspend_ignore_children(dev, false); 1075 dev->power.runtime_auto = true; 1076 1077 dev->power.request_pending = false; 1078 dev->power.request = RPM_REQ_NONE; 1079 dev->power.deferred_resume = false; 1080 INIT_WORK(&dev->power.work, pm_runtime_work); 1081 1082 dev->power.timer_expires = 0; 1083 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1084 (unsigned long)dev); 1085 1086 init_waitqueue_head(&dev->power.wait_queue); 1087 } 1088 1089 /** 1090 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1091 * @dev: Device object being removed from device hierarchy. 1092 */ 1093 void pm_runtime_remove(struct device *dev) 1094 { 1095 __pm_runtime_disable(dev, false); 1096 1097 /* Change the status back to 'suspended' to match the initial status. */ 1098 if (dev->power.runtime_status == RPM_ACTIVE) 1099 pm_runtime_set_suspended(dev); 1100 } 1101