1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/power/main.c - PM subsystem core functionality. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/export.h> 11 #include <linux/kobject.h> 12 #include <linux/string.h> 13 #include <linux/pm-trace.h> 14 #include <linux/workqueue.h> 15 #include <linux/debugfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/suspend.h> 18 #include <linux/syscalls.h> 19 #include <linux/pm_runtime.h> 20 21 #include "power.h" 22 23 #ifdef CONFIG_PM_SLEEP 24 /* 25 * The following functions are used by the suspend/hibernate code to temporarily 26 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 27 * while devices are suspended. To avoid races with the suspend/hibernate code, 28 * they should always be called with system_transition_mutex held 29 * (gfp_allowed_mask also should only be modified with system_transition_mutex 30 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 31 * with that modification). 32 */ 33 static gfp_t saved_gfp_mask; 34 35 void pm_restore_gfp_mask(void) 36 { 37 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 38 if (saved_gfp_mask) { 39 gfp_allowed_mask = saved_gfp_mask; 40 saved_gfp_mask = 0; 41 } 42 } 43 44 void pm_restrict_gfp_mask(void) 45 { 46 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 47 WARN_ON(saved_gfp_mask); 48 saved_gfp_mask = gfp_allowed_mask; 49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 50 } 51 52 unsigned int lock_system_sleep(void) 53 { 54 unsigned int flags = current->flags; 55 current->flags |= PF_NOFREEZE; 56 mutex_lock(&system_transition_mutex); 57 return flags; 58 } 59 EXPORT_SYMBOL_GPL(lock_system_sleep); 60 61 void unlock_system_sleep(unsigned int flags) 62 { 63 /* 64 * Don't use freezer_count() because we don't want the call to 65 * try_to_freeze() here. 66 * 67 * Reason: 68 * Fundamentally, we just don't need it, because freezing condition 69 * doesn't come into effect until we release the 70 * system_transition_mutex lock, since the freezer always works with 71 * system_transition_mutex held. 72 * 73 * More importantly, in the case of hibernation, 74 * unlock_system_sleep() gets called in snapshot_read() and 75 * snapshot_write() when the freezing condition is still in effect. 76 * Which means, if we use try_to_freeze() here, it would make them 77 * enter the refrigerator, thus causing hibernation to lockup. 78 */ 79 if (!(flags & PF_NOFREEZE)) 80 current->flags &= ~PF_NOFREEZE; 81 mutex_unlock(&system_transition_mutex); 82 } 83 EXPORT_SYMBOL_GPL(unlock_system_sleep); 84 85 void ksys_sync_helper(void) 86 { 87 ktime_t start; 88 long elapsed_msecs; 89 90 start = ktime_get(); 91 ksys_sync(); 92 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start)); 93 pr_info("Filesystems sync: %ld.%03ld seconds\n", 94 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC); 95 } 96 EXPORT_SYMBOL_GPL(ksys_sync_helper); 97 98 /* Routines for PM-transition notifications */ 99 100 static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 101 102 int register_pm_notifier(struct notifier_block *nb) 103 { 104 return blocking_notifier_chain_register(&pm_chain_head, nb); 105 } 106 EXPORT_SYMBOL_GPL(register_pm_notifier); 107 108 int unregister_pm_notifier(struct notifier_block *nb) 109 { 110 return blocking_notifier_chain_unregister(&pm_chain_head, nb); 111 } 112 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 113 114 void pm_report_hw_sleep_time(u64 t) 115 { 116 suspend_stats.last_hw_sleep = t; 117 suspend_stats.total_hw_sleep += t; 118 } 119 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time); 120 121 void pm_report_max_hw_sleep(u64 t) 122 { 123 suspend_stats.max_hw_sleep = t; 124 } 125 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep); 126 127 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) 128 { 129 int ret; 130 131 ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); 132 133 return notifier_to_errno(ret); 134 } 135 136 int pm_notifier_call_chain(unsigned long val) 137 { 138 return blocking_notifier_call_chain(&pm_chain_head, val, NULL); 139 } 140 141 /* If set, devices may be suspended and resumed asynchronously. */ 142 int pm_async_enabled = 1; 143 144 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 145 char *buf) 146 { 147 return sprintf(buf, "%d\n", pm_async_enabled); 148 } 149 150 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, 151 const char *buf, size_t n) 152 { 153 unsigned long val; 154 155 if (kstrtoul(buf, 10, &val)) 156 return -EINVAL; 157 158 if (val > 1) 159 return -EINVAL; 160 161 pm_async_enabled = val; 162 return n; 163 } 164 165 power_attr(pm_async); 166 167 #ifdef CONFIG_SUSPEND 168 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, 169 char *buf) 170 { 171 char *s = buf; 172 suspend_state_t i; 173 174 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) { 175 if (i >= PM_SUSPEND_MEM && cxl_mem_active()) 176 continue; 177 if (mem_sleep_states[i]) { 178 const char *label = mem_sleep_states[i]; 179 180 if (mem_sleep_current == i) 181 s += sprintf(s, "[%s] ", label); 182 else 183 s += sprintf(s, "%s ", label); 184 } 185 } 186 187 /* Convert the last space to a newline if needed. */ 188 if (s != buf) 189 *(s-1) = '\n'; 190 191 return (s - buf); 192 } 193 194 static suspend_state_t decode_suspend_state(const char *buf, size_t n) 195 { 196 suspend_state_t state; 197 char *p; 198 int len; 199 200 p = memchr(buf, '\n', n); 201 len = p ? p - buf : n; 202 203 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 204 const char *label = mem_sleep_states[state]; 205 206 if (label && len == strlen(label) && !strncmp(buf, label, len)) 207 return state; 208 } 209 210 return PM_SUSPEND_ON; 211 } 212 213 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, 214 const char *buf, size_t n) 215 { 216 suspend_state_t state; 217 int error; 218 219 error = pm_autosleep_lock(); 220 if (error) 221 return error; 222 223 if (pm_autosleep_state() > PM_SUSPEND_ON) { 224 error = -EBUSY; 225 goto out; 226 } 227 228 state = decode_suspend_state(buf, n); 229 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) 230 mem_sleep_current = state; 231 else 232 error = -EINVAL; 233 234 out: 235 pm_autosleep_unlock(); 236 return error ? error : n; 237 } 238 239 power_attr(mem_sleep); 240 241 /* 242 * sync_on_suspend: invoke ksys_sync_helper() before suspend. 243 * 244 * show() returns whether ksys_sync_helper() is invoked before suspend. 245 * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it. 246 */ 247 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC); 248 249 static ssize_t sync_on_suspend_show(struct kobject *kobj, 250 struct kobj_attribute *attr, char *buf) 251 { 252 return sprintf(buf, "%d\n", sync_on_suspend_enabled); 253 } 254 255 static ssize_t sync_on_suspend_store(struct kobject *kobj, 256 struct kobj_attribute *attr, 257 const char *buf, size_t n) 258 { 259 unsigned long val; 260 261 if (kstrtoul(buf, 10, &val)) 262 return -EINVAL; 263 264 if (val > 1) 265 return -EINVAL; 266 267 sync_on_suspend_enabled = !!val; 268 return n; 269 } 270 271 power_attr(sync_on_suspend); 272 #endif /* CONFIG_SUSPEND */ 273 274 #ifdef CONFIG_PM_SLEEP_DEBUG 275 int pm_test_level = TEST_NONE; 276 277 static const char * const pm_tests[__TEST_AFTER_LAST] = { 278 [TEST_NONE] = "none", 279 [TEST_CORE] = "core", 280 [TEST_CPUS] = "processors", 281 [TEST_PLATFORM] = "platform", 282 [TEST_DEVICES] = "devices", 283 [TEST_FREEZER] = "freezer", 284 }; 285 286 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, 287 char *buf) 288 { 289 char *s = buf; 290 int level; 291 292 for (level = TEST_FIRST; level <= TEST_MAX; level++) 293 if (pm_tests[level]) { 294 if (level == pm_test_level) 295 s += sprintf(s, "[%s] ", pm_tests[level]); 296 else 297 s += sprintf(s, "%s ", pm_tests[level]); 298 } 299 300 if (s != buf) 301 /* convert the last space to a newline */ 302 *(s-1) = '\n'; 303 304 return (s - buf); 305 } 306 307 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, 308 const char *buf, size_t n) 309 { 310 unsigned int sleep_flags; 311 const char * const *s; 312 int error = -EINVAL; 313 int level; 314 char *p; 315 int len; 316 317 p = memchr(buf, '\n', n); 318 len = p ? p - buf : n; 319 320 sleep_flags = lock_system_sleep(); 321 322 level = TEST_FIRST; 323 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 324 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 325 pm_test_level = level; 326 error = 0; 327 break; 328 } 329 330 unlock_system_sleep(sleep_flags); 331 332 return error ? error : n; 333 } 334 335 power_attr(pm_test); 336 #endif /* CONFIG_PM_SLEEP_DEBUG */ 337 338 static char *suspend_step_name(enum suspend_stat_step step) 339 { 340 switch (step) { 341 case SUSPEND_FREEZE: 342 return "freeze"; 343 case SUSPEND_PREPARE: 344 return "prepare"; 345 case SUSPEND_SUSPEND: 346 return "suspend"; 347 case SUSPEND_SUSPEND_NOIRQ: 348 return "suspend_noirq"; 349 case SUSPEND_RESUME_NOIRQ: 350 return "resume_noirq"; 351 case SUSPEND_RESUME: 352 return "resume"; 353 default: 354 return ""; 355 } 356 } 357 358 #define suspend_attr(_name, format_str) \ 359 static ssize_t _name##_show(struct kobject *kobj, \ 360 struct kobj_attribute *attr, char *buf) \ 361 { \ 362 return sprintf(buf, format_str, suspend_stats._name); \ 363 } \ 364 static struct kobj_attribute _name = __ATTR_RO(_name) 365 366 suspend_attr(success, "%d\n"); 367 suspend_attr(fail, "%d\n"); 368 suspend_attr(failed_freeze, "%d\n"); 369 suspend_attr(failed_prepare, "%d\n"); 370 suspend_attr(failed_suspend, "%d\n"); 371 suspend_attr(failed_suspend_late, "%d\n"); 372 suspend_attr(failed_suspend_noirq, "%d\n"); 373 suspend_attr(failed_resume, "%d\n"); 374 suspend_attr(failed_resume_early, "%d\n"); 375 suspend_attr(failed_resume_noirq, "%d\n"); 376 suspend_attr(last_hw_sleep, "%llu\n"); 377 suspend_attr(total_hw_sleep, "%llu\n"); 378 suspend_attr(max_hw_sleep, "%llu\n"); 379 380 static ssize_t last_failed_dev_show(struct kobject *kobj, 381 struct kobj_attribute *attr, char *buf) 382 { 383 int index; 384 char *last_failed_dev = NULL; 385 386 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 387 index %= REC_FAILED_NUM; 388 last_failed_dev = suspend_stats.failed_devs[index]; 389 390 return sprintf(buf, "%s\n", last_failed_dev); 391 } 392 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev); 393 394 static ssize_t last_failed_errno_show(struct kobject *kobj, 395 struct kobj_attribute *attr, char *buf) 396 { 397 int index; 398 int last_failed_errno; 399 400 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 401 index %= REC_FAILED_NUM; 402 last_failed_errno = suspend_stats.errno[index]; 403 404 return sprintf(buf, "%d\n", last_failed_errno); 405 } 406 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno); 407 408 static ssize_t last_failed_step_show(struct kobject *kobj, 409 struct kobj_attribute *attr, char *buf) 410 { 411 int index; 412 enum suspend_stat_step step; 413 char *last_failed_step = NULL; 414 415 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 416 index %= REC_FAILED_NUM; 417 step = suspend_stats.failed_steps[index]; 418 last_failed_step = suspend_step_name(step); 419 420 return sprintf(buf, "%s\n", last_failed_step); 421 } 422 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step); 423 424 static struct attribute *suspend_attrs[] = { 425 &success.attr, 426 &fail.attr, 427 &failed_freeze.attr, 428 &failed_prepare.attr, 429 &failed_suspend.attr, 430 &failed_suspend_late.attr, 431 &failed_suspend_noirq.attr, 432 &failed_resume.attr, 433 &failed_resume_early.attr, 434 &failed_resume_noirq.attr, 435 &last_failed_dev.attr, 436 &last_failed_errno.attr, 437 &last_failed_step.attr, 438 &last_hw_sleep.attr, 439 &total_hw_sleep.attr, 440 &max_hw_sleep.attr, 441 NULL, 442 }; 443 444 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 445 { 446 if (attr != &last_hw_sleep.attr && 447 attr != &total_hw_sleep.attr && 448 attr != &max_hw_sleep.attr) 449 return 0444; 450 451 #ifdef CONFIG_ACPI 452 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 453 return 0444; 454 #endif 455 return 0; 456 } 457 458 static const struct attribute_group suspend_attr_group = { 459 .name = "suspend_stats", 460 .attrs = suspend_attrs, 461 .is_visible = suspend_attr_is_visible, 462 }; 463 464 #ifdef CONFIG_DEBUG_FS 465 static int suspend_stats_show(struct seq_file *s, void *unused) 466 { 467 int i, index, last_dev, last_errno, last_step; 468 469 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 470 last_dev %= REC_FAILED_NUM; 471 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 472 last_errno %= REC_FAILED_NUM; 473 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 474 last_step %= REC_FAILED_NUM; 475 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" 476 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", 477 "success", suspend_stats.success, 478 "fail", suspend_stats.fail, 479 "failed_freeze", suspend_stats.failed_freeze, 480 "failed_prepare", suspend_stats.failed_prepare, 481 "failed_suspend", suspend_stats.failed_suspend, 482 "failed_suspend_late", 483 suspend_stats.failed_suspend_late, 484 "failed_suspend_noirq", 485 suspend_stats.failed_suspend_noirq, 486 "failed_resume", suspend_stats.failed_resume, 487 "failed_resume_early", 488 suspend_stats.failed_resume_early, 489 "failed_resume_noirq", 490 suspend_stats.failed_resume_noirq); 491 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 492 suspend_stats.failed_devs[last_dev]); 493 for (i = 1; i < REC_FAILED_NUM; i++) { 494 index = last_dev + REC_FAILED_NUM - i; 495 index %= REC_FAILED_NUM; 496 seq_printf(s, "\t\t\t%-s\n", 497 suspend_stats.failed_devs[index]); 498 } 499 seq_printf(s, " last_failed_errno:\t%-d\n", 500 suspend_stats.errno[last_errno]); 501 for (i = 1; i < REC_FAILED_NUM; i++) { 502 index = last_errno + REC_FAILED_NUM - i; 503 index %= REC_FAILED_NUM; 504 seq_printf(s, "\t\t\t%-d\n", 505 suspend_stats.errno[index]); 506 } 507 seq_printf(s, " last_failed_step:\t%-s\n", 508 suspend_step_name( 509 suspend_stats.failed_steps[last_step])); 510 for (i = 1; i < REC_FAILED_NUM; i++) { 511 index = last_step + REC_FAILED_NUM - i; 512 index %= REC_FAILED_NUM; 513 seq_printf(s, "\t\t\t%-s\n", 514 suspend_step_name( 515 suspend_stats.failed_steps[index])); 516 } 517 518 return 0; 519 } 520 DEFINE_SHOW_ATTRIBUTE(suspend_stats); 521 522 static int __init pm_debugfs_init(void) 523 { 524 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, 525 NULL, NULL, &suspend_stats_fops); 526 return 0; 527 } 528 529 late_initcall(pm_debugfs_init); 530 #endif /* CONFIG_DEBUG_FS */ 531 532 #endif /* CONFIG_PM_SLEEP */ 533 534 #ifdef CONFIG_PM_SLEEP_DEBUG 535 /* 536 * pm_print_times: print time taken by devices to suspend and resume. 537 * 538 * show() returns whether printing of suspend and resume times is enabled. 539 * store() accepts 0 or 1. 0 disables printing and 1 enables it. 540 */ 541 bool pm_print_times_enabled; 542 543 static ssize_t pm_print_times_show(struct kobject *kobj, 544 struct kobj_attribute *attr, char *buf) 545 { 546 return sprintf(buf, "%d\n", pm_print_times_enabled); 547 } 548 549 static ssize_t pm_print_times_store(struct kobject *kobj, 550 struct kobj_attribute *attr, 551 const char *buf, size_t n) 552 { 553 unsigned long val; 554 555 if (kstrtoul(buf, 10, &val)) 556 return -EINVAL; 557 558 if (val > 1) 559 return -EINVAL; 560 561 pm_print_times_enabled = !!val; 562 return n; 563 } 564 565 power_attr(pm_print_times); 566 567 static inline void pm_print_times_init(void) 568 { 569 pm_print_times_enabled = !!initcall_debug; 570 } 571 572 static ssize_t pm_wakeup_irq_show(struct kobject *kobj, 573 struct kobj_attribute *attr, 574 char *buf) 575 { 576 if (!pm_wakeup_irq()) 577 return -ENODATA; 578 579 return sprintf(buf, "%u\n", pm_wakeup_irq()); 580 } 581 582 power_attr_ro(pm_wakeup_irq); 583 584 bool pm_debug_messages_on __read_mostly; 585 586 bool pm_debug_messages_should_print(void) 587 { 588 return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON; 589 } 590 EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); 591 592 static ssize_t pm_debug_messages_show(struct kobject *kobj, 593 struct kobj_attribute *attr, char *buf) 594 { 595 return sprintf(buf, "%d\n", pm_debug_messages_on); 596 } 597 598 static ssize_t pm_debug_messages_store(struct kobject *kobj, 599 struct kobj_attribute *attr, 600 const char *buf, size_t n) 601 { 602 unsigned long val; 603 604 if (kstrtoul(buf, 10, &val)) 605 return -EINVAL; 606 607 if (val > 1) 608 return -EINVAL; 609 610 pm_debug_messages_on = !!val; 611 return n; 612 } 613 614 power_attr(pm_debug_messages); 615 616 static int __init pm_debug_messages_setup(char *str) 617 { 618 pm_debug_messages_on = true; 619 return 1; 620 } 621 __setup("pm_debug_messages", pm_debug_messages_setup); 622 623 #else /* !CONFIG_PM_SLEEP_DEBUG */ 624 static inline void pm_print_times_init(void) {} 625 #endif /* CONFIG_PM_SLEEP_DEBUG */ 626 627 struct kobject *power_kobj; 628 629 /* 630 * state - control system sleep states. 631 * 632 * show() returns available sleep state labels, which may be "mem", "standby", 633 * "freeze" and "disk" (hibernation). 634 * See Documentation/admin-guide/pm/sleep-states.rst for a description of 635 * what they mean. 636 * 637 * store() accepts one of those strings, translates it into the proper 638 * enumerated value, and initiates a suspend transition. 639 */ 640 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 641 char *buf) 642 { 643 char *s = buf; 644 #ifdef CONFIG_SUSPEND 645 suspend_state_t i; 646 647 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 648 if (pm_states[i]) 649 s += sprintf(s,"%s ", pm_states[i]); 650 651 #endif 652 if (hibernation_available()) 653 s += sprintf(s, "disk "); 654 if (s != buf) 655 /* convert the last space to a newline */ 656 *(s-1) = '\n'; 657 return (s - buf); 658 } 659 660 static suspend_state_t decode_state(const char *buf, size_t n) 661 { 662 #ifdef CONFIG_SUSPEND 663 suspend_state_t state; 664 #endif 665 char *p; 666 int len; 667 668 p = memchr(buf, '\n', n); 669 len = p ? p - buf : n; 670 671 /* Check hibernation first. */ 672 if (len == 4 && str_has_prefix(buf, "disk")) 673 return PM_SUSPEND_MAX; 674 675 #ifdef CONFIG_SUSPEND 676 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 677 const char *label = pm_states[state]; 678 679 if (label && len == strlen(label) && !strncmp(buf, label, len)) 680 return state; 681 } 682 #endif 683 684 return PM_SUSPEND_ON; 685 } 686 687 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 688 const char *buf, size_t n) 689 { 690 suspend_state_t state; 691 int error; 692 693 error = pm_autosleep_lock(); 694 if (error) 695 return error; 696 697 if (pm_autosleep_state() > PM_SUSPEND_ON) { 698 error = -EBUSY; 699 goto out; 700 } 701 702 state = decode_state(buf, n); 703 if (state < PM_SUSPEND_MAX) { 704 if (state == PM_SUSPEND_MEM) 705 state = mem_sleep_current; 706 707 error = pm_suspend(state); 708 } else if (state == PM_SUSPEND_MAX) { 709 error = hibernate(); 710 } else { 711 error = -EINVAL; 712 } 713 714 out: 715 pm_autosleep_unlock(); 716 return error ? error : n; 717 } 718 719 power_attr(state); 720 721 #ifdef CONFIG_PM_SLEEP 722 /* 723 * The 'wakeup_count' attribute, along with the functions defined in 724 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 725 * handled in a non-racy way. 726 * 727 * If a wakeup event occurs when the system is in a sleep state, it simply is 728 * woken up. In turn, if an event that would wake the system up from a sleep 729 * state occurs when it is undergoing a transition to that sleep state, the 730 * transition should be aborted. Moreover, if such an event occurs when the 731 * system is in the working state, an attempt to start a transition to the 732 * given sleep state should fail during certain period after the detection of 733 * the event. Using the 'state' attribute alone is not sufficient to satisfy 734 * these requirements, because a wakeup event may occur exactly when 'state' 735 * is being written to and may be delivered to user space right before it is 736 * frozen, so the event will remain only partially processed until the system is 737 * woken up by another event. In particular, it won't cause the transition to 738 * a sleep state to be aborted. 739 * 740 * This difficulty may be overcome if user space uses 'wakeup_count' before 741 * writing to 'state'. It first should read from 'wakeup_count' and store 742 * the read value. Then, after carrying out its own preparations for the system 743 * transition to a sleep state, it should write the stored value to 744 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since 745 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 746 * is allowed to write to 'state', but the transition will be aborted if there 747 * are any wakeup events detected after 'wakeup_count' was written to. 748 */ 749 750 static ssize_t wakeup_count_show(struct kobject *kobj, 751 struct kobj_attribute *attr, 752 char *buf) 753 { 754 unsigned int val; 755 756 return pm_get_wakeup_count(&val, true) ? 757 sprintf(buf, "%u\n", val) : -EINTR; 758 } 759 760 static ssize_t wakeup_count_store(struct kobject *kobj, 761 struct kobj_attribute *attr, 762 const char *buf, size_t n) 763 { 764 unsigned int val; 765 int error; 766 767 error = pm_autosleep_lock(); 768 if (error) 769 return error; 770 771 if (pm_autosleep_state() > PM_SUSPEND_ON) { 772 error = -EBUSY; 773 goto out; 774 } 775 776 error = -EINVAL; 777 if (sscanf(buf, "%u", &val) == 1) { 778 if (pm_save_wakeup_count(val)) 779 error = n; 780 else 781 pm_print_active_wakeup_sources(); 782 } 783 784 out: 785 pm_autosleep_unlock(); 786 return error; 787 } 788 789 power_attr(wakeup_count); 790 791 #ifdef CONFIG_PM_AUTOSLEEP 792 static ssize_t autosleep_show(struct kobject *kobj, 793 struct kobj_attribute *attr, 794 char *buf) 795 { 796 suspend_state_t state = pm_autosleep_state(); 797 798 if (state == PM_SUSPEND_ON) 799 return sprintf(buf, "off\n"); 800 801 #ifdef CONFIG_SUSPEND 802 if (state < PM_SUSPEND_MAX) 803 return sprintf(buf, "%s\n", pm_states[state] ? 804 pm_states[state] : "error"); 805 #endif 806 #ifdef CONFIG_HIBERNATION 807 return sprintf(buf, "disk\n"); 808 #else 809 return sprintf(buf, "error"); 810 #endif 811 } 812 813 static ssize_t autosleep_store(struct kobject *kobj, 814 struct kobj_attribute *attr, 815 const char *buf, size_t n) 816 { 817 suspend_state_t state = decode_state(buf, n); 818 int error; 819 820 if (state == PM_SUSPEND_ON 821 && strcmp(buf, "off") && strcmp(buf, "off\n")) 822 return -EINVAL; 823 824 if (state == PM_SUSPEND_MEM) 825 state = mem_sleep_current; 826 827 error = pm_autosleep_set_state(state); 828 return error ? error : n; 829 } 830 831 power_attr(autosleep); 832 #endif /* CONFIG_PM_AUTOSLEEP */ 833 834 #ifdef CONFIG_PM_WAKELOCKS 835 static ssize_t wake_lock_show(struct kobject *kobj, 836 struct kobj_attribute *attr, 837 char *buf) 838 { 839 return pm_show_wakelocks(buf, true); 840 } 841 842 static ssize_t wake_lock_store(struct kobject *kobj, 843 struct kobj_attribute *attr, 844 const char *buf, size_t n) 845 { 846 int error = pm_wake_lock(buf); 847 return error ? error : n; 848 } 849 850 power_attr(wake_lock); 851 852 static ssize_t wake_unlock_show(struct kobject *kobj, 853 struct kobj_attribute *attr, 854 char *buf) 855 { 856 return pm_show_wakelocks(buf, false); 857 } 858 859 static ssize_t wake_unlock_store(struct kobject *kobj, 860 struct kobj_attribute *attr, 861 const char *buf, size_t n) 862 { 863 int error = pm_wake_unlock(buf); 864 return error ? error : n; 865 } 866 867 power_attr(wake_unlock); 868 869 #endif /* CONFIG_PM_WAKELOCKS */ 870 #endif /* CONFIG_PM_SLEEP */ 871 872 #ifdef CONFIG_PM_TRACE 873 int pm_trace_enabled; 874 875 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, 876 char *buf) 877 { 878 return sprintf(buf, "%d\n", pm_trace_enabled); 879 } 880 881 static ssize_t 882 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, 883 const char *buf, size_t n) 884 { 885 int val; 886 887 if (sscanf(buf, "%d", &val) == 1) { 888 pm_trace_enabled = !!val; 889 if (pm_trace_enabled) { 890 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" 891 "PM: Correct system time has to be restored manually after resume.\n"); 892 } 893 return n; 894 } 895 return -EINVAL; 896 } 897 898 power_attr(pm_trace); 899 900 static ssize_t pm_trace_dev_match_show(struct kobject *kobj, 901 struct kobj_attribute *attr, 902 char *buf) 903 { 904 return show_trace_dev_match(buf, PAGE_SIZE); 905 } 906 907 power_attr_ro(pm_trace_dev_match); 908 909 #endif /* CONFIG_PM_TRACE */ 910 911 #ifdef CONFIG_FREEZER 912 static ssize_t pm_freeze_timeout_show(struct kobject *kobj, 913 struct kobj_attribute *attr, char *buf) 914 { 915 return sprintf(buf, "%u\n", freeze_timeout_msecs); 916 } 917 918 static ssize_t pm_freeze_timeout_store(struct kobject *kobj, 919 struct kobj_attribute *attr, 920 const char *buf, size_t n) 921 { 922 unsigned long val; 923 924 if (kstrtoul(buf, 10, &val)) 925 return -EINVAL; 926 927 freeze_timeout_msecs = val; 928 return n; 929 } 930 931 power_attr(pm_freeze_timeout); 932 933 #endif /* CONFIG_FREEZER*/ 934 935 static struct attribute * g[] = { 936 &state_attr.attr, 937 #ifdef CONFIG_PM_TRACE 938 &pm_trace_attr.attr, 939 &pm_trace_dev_match_attr.attr, 940 #endif 941 #ifdef CONFIG_PM_SLEEP 942 &pm_async_attr.attr, 943 &wakeup_count_attr.attr, 944 #ifdef CONFIG_SUSPEND 945 &mem_sleep_attr.attr, 946 &sync_on_suspend_attr.attr, 947 #endif 948 #ifdef CONFIG_PM_AUTOSLEEP 949 &autosleep_attr.attr, 950 #endif 951 #ifdef CONFIG_PM_WAKELOCKS 952 &wake_lock_attr.attr, 953 &wake_unlock_attr.attr, 954 #endif 955 #ifdef CONFIG_PM_SLEEP_DEBUG 956 &pm_test_attr.attr, 957 &pm_print_times_attr.attr, 958 &pm_wakeup_irq_attr.attr, 959 &pm_debug_messages_attr.attr, 960 #endif 961 #endif 962 #ifdef CONFIG_FREEZER 963 &pm_freeze_timeout_attr.attr, 964 #endif 965 NULL, 966 }; 967 968 static const struct attribute_group attr_group = { 969 .attrs = g, 970 }; 971 972 static const struct attribute_group *attr_groups[] = { 973 &attr_group, 974 #ifdef CONFIG_PM_SLEEP 975 &suspend_attr_group, 976 #endif 977 NULL, 978 }; 979 980 struct workqueue_struct *pm_wq; 981 EXPORT_SYMBOL_GPL(pm_wq); 982 983 static int __init pm_start_workqueue(void) 984 { 985 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 986 987 return pm_wq ? 0 : -ENOMEM; 988 } 989 990 static int __init pm_init(void) 991 { 992 int error = pm_start_workqueue(); 993 if (error) 994 return error; 995 hibernate_image_size_init(); 996 hibernate_reserved_size_init(); 997 pm_states_init(); 998 power_kobj = kobject_create_and_add("power", NULL); 999 if (!power_kobj) 1000 return -ENOMEM; 1001 error = sysfs_create_groups(power_kobj, attr_groups); 1002 if (error) 1003 return error; 1004 pm_print_times_init(); 1005 return pm_autosleep_init(); 1006 } 1007 1008 core_initcall(pm_init); 1009