1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/power/main.c - PM subsystem core functionality. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/export.h> 11 #include <linux/kobject.h> 12 #include <linux/string.h> 13 #include <linux/pm-trace.h> 14 #include <linux/workqueue.h> 15 #include <linux/debugfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/suspend.h> 18 #include <linux/syscalls.h> 19 #include <linux/pm_runtime.h> 20 21 #include "power.h" 22 23 #ifdef CONFIG_PM_SLEEP 24 /* 25 * The following functions are used by the suspend/hibernate code to temporarily 26 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 27 * while devices are suspended. To avoid races with the suspend/hibernate code, 28 * they should always be called with system_transition_mutex held 29 * (gfp_allowed_mask also should only be modified with system_transition_mutex 30 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 31 * with that modification). 32 */ 33 static gfp_t saved_gfp_mask; 34 35 void pm_restore_gfp_mask(void) 36 { 37 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 38 if (saved_gfp_mask) { 39 gfp_allowed_mask = saved_gfp_mask; 40 saved_gfp_mask = 0; 41 } 42 } 43 44 void pm_restrict_gfp_mask(void) 45 { 46 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 47 WARN_ON(saved_gfp_mask); 48 saved_gfp_mask = gfp_allowed_mask; 49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 50 } 51 52 unsigned int lock_system_sleep(void) 53 { 54 unsigned int flags = current->flags; 55 current->flags |= PF_NOFREEZE; 56 mutex_lock(&system_transition_mutex); 57 return flags; 58 } 59 EXPORT_SYMBOL_GPL(lock_system_sleep); 60 61 void unlock_system_sleep(unsigned int flags) 62 { 63 if (!(flags & PF_NOFREEZE)) 64 current->flags &= ~PF_NOFREEZE; 65 mutex_unlock(&system_transition_mutex); 66 } 67 EXPORT_SYMBOL_GPL(unlock_system_sleep); 68 69 void ksys_sync_helper(void) 70 { 71 ktime_t start; 72 long elapsed_msecs; 73 74 start = ktime_get(); 75 ksys_sync(); 76 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start)); 77 pr_info("Filesystems sync: %ld.%03ld seconds\n", 78 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC); 79 } 80 EXPORT_SYMBOL_GPL(ksys_sync_helper); 81 82 /* Routines for PM-transition notifications */ 83 84 static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 85 86 int register_pm_notifier(struct notifier_block *nb) 87 { 88 return blocking_notifier_chain_register(&pm_chain_head, nb); 89 } 90 EXPORT_SYMBOL_GPL(register_pm_notifier); 91 92 int unregister_pm_notifier(struct notifier_block *nb) 93 { 94 return blocking_notifier_chain_unregister(&pm_chain_head, nb); 95 } 96 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 97 98 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) 99 { 100 int ret; 101 102 ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); 103 104 return notifier_to_errno(ret); 105 } 106 107 int pm_notifier_call_chain(unsigned long val) 108 { 109 return blocking_notifier_call_chain(&pm_chain_head, val, NULL); 110 } 111 112 /* If set, devices may be suspended and resumed asynchronously. */ 113 int pm_async_enabled = 1; 114 115 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 116 char *buf) 117 { 118 return sprintf(buf, "%d\n", pm_async_enabled); 119 } 120 121 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, 122 const char *buf, size_t n) 123 { 124 unsigned long val; 125 126 if (kstrtoul(buf, 10, &val)) 127 return -EINVAL; 128 129 if (val > 1) 130 return -EINVAL; 131 132 pm_async_enabled = val; 133 return n; 134 } 135 136 power_attr(pm_async); 137 138 #ifdef CONFIG_SUSPEND 139 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, 140 char *buf) 141 { 142 char *s = buf; 143 suspend_state_t i; 144 145 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) { 146 if (i >= PM_SUSPEND_MEM && cxl_mem_active()) 147 continue; 148 if (mem_sleep_states[i]) { 149 const char *label = mem_sleep_states[i]; 150 151 if (mem_sleep_current == i) 152 s += sprintf(s, "[%s] ", label); 153 else 154 s += sprintf(s, "%s ", label); 155 } 156 } 157 158 /* Convert the last space to a newline if needed. */ 159 if (s != buf) 160 *(s-1) = '\n'; 161 162 return (s - buf); 163 } 164 165 static suspend_state_t decode_suspend_state(const char *buf, size_t n) 166 { 167 suspend_state_t state; 168 char *p; 169 int len; 170 171 p = memchr(buf, '\n', n); 172 len = p ? p - buf : n; 173 174 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 175 const char *label = mem_sleep_states[state]; 176 177 if (label && len == strlen(label) && !strncmp(buf, label, len)) 178 return state; 179 } 180 181 return PM_SUSPEND_ON; 182 } 183 184 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, 185 const char *buf, size_t n) 186 { 187 suspend_state_t state; 188 int error; 189 190 error = pm_autosleep_lock(); 191 if (error) 192 return error; 193 194 if (pm_autosleep_state() > PM_SUSPEND_ON) { 195 error = -EBUSY; 196 goto out; 197 } 198 199 state = decode_suspend_state(buf, n); 200 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) 201 mem_sleep_current = state; 202 else 203 error = -EINVAL; 204 205 out: 206 pm_autosleep_unlock(); 207 return error ? error : n; 208 } 209 210 power_attr(mem_sleep); 211 212 /* 213 * sync_on_suspend: invoke ksys_sync_helper() before suspend. 214 * 215 * show() returns whether ksys_sync_helper() is invoked before suspend. 216 * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it. 217 */ 218 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC); 219 220 static ssize_t sync_on_suspend_show(struct kobject *kobj, 221 struct kobj_attribute *attr, char *buf) 222 { 223 return sprintf(buf, "%d\n", sync_on_suspend_enabled); 224 } 225 226 static ssize_t sync_on_suspend_store(struct kobject *kobj, 227 struct kobj_attribute *attr, 228 const char *buf, size_t n) 229 { 230 unsigned long val; 231 232 if (kstrtoul(buf, 10, &val)) 233 return -EINVAL; 234 235 if (val > 1) 236 return -EINVAL; 237 238 sync_on_suspend_enabled = !!val; 239 return n; 240 } 241 242 power_attr(sync_on_suspend); 243 #endif /* CONFIG_SUSPEND */ 244 245 #ifdef CONFIG_PM_SLEEP_DEBUG 246 int pm_test_level = TEST_NONE; 247 248 static const char * const pm_tests[__TEST_AFTER_LAST] = { 249 [TEST_NONE] = "none", 250 [TEST_CORE] = "core", 251 [TEST_CPUS] = "processors", 252 [TEST_PLATFORM] = "platform", 253 [TEST_DEVICES] = "devices", 254 [TEST_FREEZER] = "freezer", 255 }; 256 257 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, 258 char *buf) 259 { 260 char *s = buf; 261 int level; 262 263 for (level = TEST_FIRST; level <= TEST_MAX; level++) 264 if (pm_tests[level]) { 265 if (level == pm_test_level) 266 s += sprintf(s, "[%s] ", pm_tests[level]); 267 else 268 s += sprintf(s, "%s ", pm_tests[level]); 269 } 270 271 if (s != buf) 272 /* convert the last space to a newline */ 273 *(s-1) = '\n'; 274 275 return (s - buf); 276 } 277 278 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, 279 const char *buf, size_t n) 280 { 281 unsigned int sleep_flags; 282 const char * const *s; 283 int error = -EINVAL; 284 int level; 285 char *p; 286 int len; 287 288 p = memchr(buf, '\n', n); 289 len = p ? p - buf : n; 290 291 sleep_flags = lock_system_sleep(); 292 293 level = TEST_FIRST; 294 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 295 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 296 pm_test_level = level; 297 error = 0; 298 break; 299 } 300 301 unlock_system_sleep(sleep_flags); 302 303 return error ? error : n; 304 } 305 306 power_attr(pm_test); 307 #endif /* CONFIG_PM_SLEEP_DEBUG */ 308 309 #define SUSPEND_NR_STEPS SUSPEND_RESUME 310 #define REC_FAILED_NUM 2 311 312 struct suspend_stats { 313 unsigned int step_failures[SUSPEND_NR_STEPS]; 314 unsigned int success; 315 unsigned int fail; 316 int last_failed_dev; 317 char failed_devs[REC_FAILED_NUM][40]; 318 int last_failed_errno; 319 int errno[REC_FAILED_NUM]; 320 int last_failed_step; 321 u64 last_hw_sleep; 322 u64 total_hw_sleep; 323 u64 max_hw_sleep; 324 enum suspend_stat_step failed_steps[REC_FAILED_NUM]; 325 }; 326 327 static struct suspend_stats suspend_stats; 328 static DEFINE_MUTEX(suspend_stats_lock); 329 330 void dpm_save_failed_dev(const char *name) 331 { 332 mutex_lock(&suspend_stats_lock); 333 334 strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], 335 name, sizeof(suspend_stats.failed_devs[0])); 336 suspend_stats.last_failed_dev++; 337 suspend_stats.last_failed_dev %= REC_FAILED_NUM; 338 339 mutex_unlock(&suspend_stats_lock); 340 } 341 342 void dpm_save_failed_step(enum suspend_stat_step step) 343 { 344 suspend_stats.step_failures[step-1]++; 345 suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; 346 suspend_stats.last_failed_step++; 347 suspend_stats.last_failed_step %= REC_FAILED_NUM; 348 } 349 350 void dpm_save_errno(int err) 351 { 352 if (!err) { 353 suspend_stats.success++; 354 return; 355 } 356 357 suspend_stats.fail++; 358 359 suspend_stats.errno[suspend_stats.last_failed_errno] = err; 360 suspend_stats.last_failed_errno++; 361 suspend_stats.last_failed_errno %= REC_FAILED_NUM; 362 } 363 364 void pm_report_hw_sleep_time(u64 t) 365 { 366 suspend_stats.last_hw_sleep = t; 367 suspend_stats.total_hw_sleep += t; 368 } 369 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time); 370 371 void pm_report_max_hw_sleep(u64 t) 372 { 373 suspend_stats.max_hw_sleep = t; 374 } 375 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep); 376 377 static const char * const suspend_step_names[] = { 378 [SUSPEND_WORKING] = "", 379 [SUSPEND_FREEZE] = "freeze", 380 [SUSPEND_PREPARE] = "prepare", 381 [SUSPEND_SUSPEND] = "suspend", 382 [SUSPEND_SUSPEND_LATE] = "suspend_late", 383 [SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq", 384 [SUSPEND_RESUME_NOIRQ] = "resume_noirq", 385 [SUSPEND_RESUME_EARLY] = "resume_early", 386 [SUSPEND_RESUME] = "resume", 387 }; 388 389 #define suspend_attr(_name, format_str) \ 390 static ssize_t _name##_show(struct kobject *kobj, \ 391 struct kobj_attribute *attr, char *buf) \ 392 { \ 393 return sprintf(buf, format_str, suspend_stats._name); \ 394 } \ 395 static struct kobj_attribute _name = __ATTR_RO(_name) 396 397 suspend_attr(success, "%u\n"); 398 suspend_attr(fail, "%u\n"); 399 suspend_attr(last_hw_sleep, "%llu\n"); 400 suspend_attr(total_hw_sleep, "%llu\n"); 401 suspend_attr(max_hw_sleep, "%llu\n"); 402 403 #define suspend_step_attr(_name, step) \ 404 static ssize_t _name##_show(struct kobject *kobj, \ 405 struct kobj_attribute *attr, char *buf) \ 406 { \ 407 return sprintf(buf, "%u\n", \ 408 suspend_stats.step_failures[step-1]); \ 409 } \ 410 static struct kobj_attribute _name = __ATTR_RO(_name) 411 412 suspend_step_attr(failed_freeze, SUSPEND_FREEZE); 413 suspend_step_attr(failed_prepare, SUSPEND_PREPARE); 414 suspend_step_attr(failed_suspend, SUSPEND_SUSPEND); 415 suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE); 416 suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ); 417 suspend_step_attr(failed_resume, SUSPEND_RESUME); 418 suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY); 419 suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ); 420 421 static ssize_t last_failed_dev_show(struct kobject *kobj, 422 struct kobj_attribute *attr, char *buf) 423 { 424 int index; 425 char *last_failed_dev = NULL; 426 427 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 428 index %= REC_FAILED_NUM; 429 last_failed_dev = suspend_stats.failed_devs[index]; 430 431 return sprintf(buf, "%s\n", last_failed_dev); 432 } 433 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev); 434 435 static ssize_t last_failed_errno_show(struct kobject *kobj, 436 struct kobj_attribute *attr, char *buf) 437 { 438 int index; 439 int last_failed_errno; 440 441 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 442 index %= REC_FAILED_NUM; 443 last_failed_errno = suspend_stats.errno[index]; 444 445 return sprintf(buf, "%d\n", last_failed_errno); 446 } 447 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno); 448 449 static ssize_t last_failed_step_show(struct kobject *kobj, 450 struct kobj_attribute *attr, char *buf) 451 { 452 enum suspend_stat_step step; 453 int index; 454 455 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 456 index %= REC_FAILED_NUM; 457 step = suspend_stats.failed_steps[index]; 458 459 return sprintf(buf, "%s\n", suspend_step_names[step]); 460 } 461 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step); 462 463 static struct attribute *suspend_attrs[] = { 464 &success.attr, 465 &fail.attr, 466 &failed_freeze.attr, 467 &failed_prepare.attr, 468 &failed_suspend.attr, 469 &failed_suspend_late.attr, 470 &failed_suspend_noirq.attr, 471 &failed_resume.attr, 472 &failed_resume_early.attr, 473 &failed_resume_noirq.attr, 474 &last_failed_dev.attr, 475 &last_failed_errno.attr, 476 &last_failed_step.attr, 477 &last_hw_sleep.attr, 478 &total_hw_sleep.attr, 479 &max_hw_sleep.attr, 480 NULL, 481 }; 482 483 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 484 { 485 if (attr != &last_hw_sleep.attr && 486 attr != &total_hw_sleep.attr && 487 attr != &max_hw_sleep.attr) 488 return 0444; 489 490 #ifdef CONFIG_ACPI 491 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 492 return 0444; 493 #endif 494 return 0; 495 } 496 497 static const struct attribute_group suspend_attr_group = { 498 .name = "suspend_stats", 499 .attrs = suspend_attrs, 500 .is_visible = suspend_attr_is_visible, 501 }; 502 503 #ifdef CONFIG_DEBUG_FS 504 static int suspend_stats_show(struct seq_file *s, void *unused) 505 { 506 int i, index, last_dev, last_errno, last_step; 507 enum suspend_stat_step step; 508 509 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 510 last_dev %= REC_FAILED_NUM; 511 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 512 last_errno %= REC_FAILED_NUM; 513 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 514 last_step %= REC_FAILED_NUM; 515 516 seq_printf(s, "success: %u\nfail: %u\n", 517 suspend_stats.success, suspend_stats.fail); 518 519 for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++) 520 seq_printf(s, "failed_%s: %u\n", suspend_step_names[step], 521 suspend_stats.step_failures[step-1]); 522 523 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 524 suspend_stats.failed_devs[last_dev]); 525 for (i = 1; i < REC_FAILED_NUM; i++) { 526 index = last_dev + REC_FAILED_NUM - i; 527 index %= REC_FAILED_NUM; 528 seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]); 529 } 530 seq_printf(s, " last_failed_errno:\t%-d\n", 531 suspend_stats.errno[last_errno]); 532 for (i = 1; i < REC_FAILED_NUM; i++) { 533 index = last_errno + REC_FAILED_NUM - i; 534 index %= REC_FAILED_NUM; 535 seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]); 536 } 537 seq_printf(s, " last_failed_step:\t%-s\n", 538 suspend_step_names[suspend_stats.failed_steps[last_step]]); 539 for (i = 1; i < REC_FAILED_NUM; i++) { 540 index = last_step + REC_FAILED_NUM - i; 541 index %= REC_FAILED_NUM; 542 seq_printf(s, "\t\t\t%-s\n", 543 suspend_step_names[suspend_stats.failed_steps[index]]); 544 } 545 546 return 0; 547 } 548 DEFINE_SHOW_ATTRIBUTE(suspend_stats); 549 550 static int __init pm_debugfs_init(void) 551 { 552 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, 553 NULL, NULL, &suspend_stats_fops); 554 return 0; 555 } 556 557 late_initcall(pm_debugfs_init); 558 #endif /* CONFIG_DEBUG_FS */ 559 560 #endif /* CONFIG_PM_SLEEP */ 561 562 #ifdef CONFIG_PM_SLEEP_DEBUG 563 /* 564 * pm_print_times: print time taken by devices to suspend and resume. 565 * 566 * show() returns whether printing of suspend and resume times is enabled. 567 * store() accepts 0 or 1. 0 disables printing and 1 enables it. 568 */ 569 bool pm_print_times_enabled; 570 571 static ssize_t pm_print_times_show(struct kobject *kobj, 572 struct kobj_attribute *attr, char *buf) 573 { 574 return sprintf(buf, "%d\n", pm_print_times_enabled); 575 } 576 577 static ssize_t pm_print_times_store(struct kobject *kobj, 578 struct kobj_attribute *attr, 579 const char *buf, size_t n) 580 { 581 unsigned long val; 582 583 if (kstrtoul(buf, 10, &val)) 584 return -EINVAL; 585 586 if (val > 1) 587 return -EINVAL; 588 589 pm_print_times_enabled = !!val; 590 return n; 591 } 592 593 power_attr(pm_print_times); 594 595 static inline void pm_print_times_init(void) 596 { 597 pm_print_times_enabled = !!initcall_debug; 598 } 599 600 static ssize_t pm_wakeup_irq_show(struct kobject *kobj, 601 struct kobj_attribute *attr, 602 char *buf) 603 { 604 if (!pm_wakeup_irq()) 605 return -ENODATA; 606 607 return sprintf(buf, "%u\n", pm_wakeup_irq()); 608 } 609 610 power_attr_ro(pm_wakeup_irq); 611 612 bool pm_debug_messages_on __read_mostly; 613 614 bool pm_debug_messages_should_print(void) 615 { 616 return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON; 617 } 618 EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); 619 620 static ssize_t pm_debug_messages_show(struct kobject *kobj, 621 struct kobj_attribute *attr, char *buf) 622 { 623 return sprintf(buf, "%d\n", pm_debug_messages_on); 624 } 625 626 static ssize_t pm_debug_messages_store(struct kobject *kobj, 627 struct kobj_attribute *attr, 628 const char *buf, size_t n) 629 { 630 unsigned long val; 631 632 if (kstrtoul(buf, 10, &val)) 633 return -EINVAL; 634 635 if (val > 1) 636 return -EINVAL; 637 638 pm_debug_messages_on = !!val; 639 return n; 640 } 641 642 power_attr(pm_debug_messages); 643 644 static int __init pm_debug_messages_setup(char *str) 645 { 646 pm_debug_messages_on = true; 647 return 1; 648 } 649 __setup("pm_debug_messages", pm_debug_messages_setup); 650 651 #else /* !CONFIG_PM_SLEEP_DEBUG */ 652 static inline void pm_print_times_init(void) {} 653 #endif /* CONFIG_PM_SLEEP_DEBUG */ 654 655 struct kobject *power_kobj; 656 657 /* 658 * state - control system sleep states. 659 * 660 * show() returns available sleep state labels, which may be "mem", "standby", 661 * "freeze" and "disk" (hibernation). 662 * See Documentation/admin-guide/pm/sleep-states.rst for a description of 663 * what they mean. 664 * 665 * store() accepts one of those strings, translates it into the proper 666 * enumerated value, and initiates a suspend transition. 667 */ 668 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 669 char *buf) 670 { 671 char *s = buf; 672 #ifdef CONFIG_SUSPEND 673 suspend_state_t i; 674 675 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 676 if (pm_states[i]) 677 s += sprintf(s,"%s ", pm_states[i]); 678 679 #endif 680 if (hibernation_available()) 681 s += sprintf(s, "disk "); 682 if (s != buf) 683 /* convert the last space to a newline */ 684 *(s-1) = '\n'; 685 return (s - buf); 686 } 687 688 static suspend_state_t decode_state(const char *buf, size_t n) 689 { 690 #ifdef CONFIG_SUSPEND 691 suspend_state_t state; 692 #endif 693 char *p; 694 int len; 695 696 p = memchr(buf, '\n', n); 697 len = p ? p - buf : n; 698 699 /* Check hibernation first. */ 700 if (len == 4 && str_has_prefix(buf, "disk")) 701 return PM_SUSPEND_MAX; 702 703 #ifdef CONFIG_SUSPEND 704 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 705 const char *label = pm_states[state]; 706 707 if (label && len == strlen(label) && !strncmp(buf, label, len)) 708 return state; 709 } 710 #endif 711 712 return PM_SUSPEND_ON; 713 } 714 715 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 716 const char *buf, size_t n) 717 { 718 suspend_state_t state; 719 int error; 720 721 error = pm_autosleep_lock(); 722 if (error) 723 return error; 724 725 if (pm_autosleep_state() > PM_SUSPEND_ON) { 726 error = -EBUSY; 727 goto out; 728 } 729 730 state = decode_state(buf, n); 731 if (state < PM_SUSPEND_MAX) { 732 if (state == PM_SUSPEND_MEM) 733 state = mem_sleep_current; 734 735 error = pm_suspend(state); 736 } else if (state == PM_SUSPEND_MAX) { 737 error = hibernate(); 738 } else { 739 error = -EINVAL; 740 } 741 742 out: 743 pm_autosleep_unlock(); 744 return error ? error : n; 745 } 746 747 power_attr(state); 748 749 #ifdef CONFIG_PM_SLEEP 750 /* 751 * The 'wakeup_count' attribute, along with the functions defined in 752 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 753 * handled in a non-racy way. 754 * 755 * If a wakeup event occurs when the system is in a sleep state, it simply is 756 * woken up. In turn, if an event that would wake the system up from a sleep 757 * state occurs when it is undergoing a transition to that sleep state, the 758 * transition should be aborted. Moreover, if such an event occurs when the 759 * system is in the working state, an attempt to start a transition to the 760 * given sleep state should fail during certain period after the detection of 761 * the event. Using the 'state' attribute alone is not sufficient to satisfy 762 * these requirements, because a wakeup event may occur exactly when 'state' 763 * is being written to and may be delivered to user space right before it is 764 * frozen, so the event will remain only partially processed until the system is 765 * woken up by another event. In particular, it won't cause the transition to 766 * a sleep state to be aborted. 767 * 768 * This difficulty may be overcome if user space uses 'wakeup_count' before 769 * writing to 'state'. It first should read from 'wakeup_count' and store 770 * the read value. Then, after carrying out its own preparations for the system 771 * transition to a sleep state, it should write the stored value to 772 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since 773 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 774 * is allowed to write to 'state', but the transition will be aborted if there 775 * are any wakeup events detected after 'wakeup_count' was written to. 776 */ 777 778 static ssize_t wakeup_count_show(struct kobject *kobj, 779 struct kobj_attribute *attr, 780 char *buf) 781 { 782 unsigned int val; 783 784 return pm_get_wakeup_count(&val, true) ? 785 sprintf(buf, "%u\n", val) : -EINTR; 786 } 787 788 static ssize_t wakeup_count_store(struct kobject *kobj, 789 struct kobj_attribute *attr, 790 const char *buf, size_t n) 791 { 792 unsigned int val; 793 int error; 794 795 error = pm_autosleep_lock(); 796 if (error) 797 return error; 798 799 if (pm_autosleep_state() > PM_SUSPEND_ON) { 800 error = -EBUSY; 801 goto out; 802 } 803 804 error = -EINVAL; 805 if (sscanf(buf, "%u", &val) == 1) { 806 if (pm_save_wakeup_count(val)) 807 error = n; 808 else 809 pm_print_active_wakeup_sources(); 810 } 811 812 out: 813 pm_autosleep_unlock(); 814 return error; 815 } 816 817 power_attr(wakeup_count); 818 819 #ifdef CONFIG_PM_AUTOSLEEP 820 static ssize_t autosleep_show(struct kobject *kobj, 821 struct kobj_attribute *attr, 822 char *buf) 823 { 824 suspend_state_t state = pm_autosleep_state(); 825 826 if (state == PM_SUSPEND_ON) 827 return sprintf(buf, "off\n"); 828 829 #ifdef CONFIG_SUSPEND 830 if (state < PM_SUSPEND_MAX) 831 return sprintf(buf, "%s\n", pm_states[state] ? 832 pm_states[state] : "error"); 833 #endif 834 #ifdef CONFIG_HIBERNATION 835 return sprintf(buf, "disk\n"); 836 #else 837 return sprintf(buf, "error"); 838 #endif 839 } 840 841 static ssize_t autosleep_store(struct kobject *kobj, 842 struct kobj_attribute *attr, 843 const char *buf, size_t n) 844 { 845 suspend_state_t state = decode_state(buf, n); 846 int error; 847 848 if (state == PM_SUSPEND_ON 849 && strcmp(buf, "off") && strcmp(buf, "off\n")) 850 return -EINVAL; 851 852 if (state == PM_SUSPEND_MEM) 853 state = mem_sleep_current; 854 855 error = pm_autosleep_set_state(state); 856 return error ? error : n; 857 } 858 859 power_attr(autosleep); 860 #endif /* CONFIG_PM_AUTOSLEEP */ 861 862 #ifdef CONFIG_PM_WAKELOCKS 863 static ssize_t wake_lock_show(struct kobject *kobj, 864 struct kobj_attribute *attr, 865 char *buf) 866 { 867 return pm_show_wakelocks(buf, true); 868 } 869 870 static ssize_t wake_lock_store(struct kobject *kobj, 871 struct kobj_attribute *attr, 872 const char *buf, size_t n) 873 { 874 int error = pm_wake_lock(buf); 875 return error ? error : n; 876 } 877 878 power_attr(wake_lock); 879 880 static ssize_t wake_unlock_show(struct kobject *kobj, 881 struct kobj_attribute *attr, 882 char *buf) 883 { 884 return pm_show_wakelocks(buf, false); 885 } 886 887 static ssize_t wake_unlock_store(struct kobject *kobj, 888 struct kobj_attribute *attr, 889 const char *buf, size_t n) 890 { 891 int error = pm_wake_unlock(buf); 892 return error ? error : n; 893 } 894 895 power_attr(wake_unlock); 896 897 #endif /* CONFIG_PM_WAKELOCKS */ 898 #endif /* CONFIG_PM_SLEEP */ 899 900 #ifdef CONFIG_PM_TRACE 901 int pm_trace_enabled; 902 903 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, 904 char *buf) 905 { 906 return sprintf(buf, "%d\n", pm_trace_enabled); 907 } 908 909 static ssize_t 910 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, 911 const char *buf, size_t n) 912 { 913 int val; 914 915 if (sscanf(buf, "%d", &val) == 1) { 916 pm_trace_enabled = !!val; 917 if (pm_trace_enabled) { 918 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" 919 "PM: Correct system time has to be restored manually after resume.\n"); 920 } 921 return n; 922 } 923 return -EINVAL; 924 } 925 926 power_attr(pm_trace); 927 928 static ssize_t pm_trace_dev_match_show(struct kobject *kobj, 929 struct kobj_attribute *attr, 930 char *buf) 931 { 932 return show_trace_dev_match(buf, PAGE_SIZE); 933 } 934 935 power_attr_ro(pm_trace_dev_match); 936 937 #endif /* CONFIG_PM_TRACE */ 938 939 #ifdef CONFIG_FREEZER 940 static ssize_t pm_freeze_timeout_show(struct kobject *kobj, 941 struct kobj_attribute *attr, char *buf) 942 { 943 return sprintf(buf, "%u\n", freeze_timeout_msecs); 944 } 945 946 static ssize_t pm_freeze_timeout_store(struct kobject *kobj, 947 struct kobj_attribute *attr, 948 const char *buf, size_t n) 949 { 950 unsigned long val; 951 952 if (kstrtoul(buf, 10, &val)) 953 return -EINVAL; 954 955 freeze_timeout_msecs = val; 956 return n; 957 } 958 959 power_attr(pm_freeze_timeout); 960 961 #endif /* CONFIG_FREEZER*/ 962 963 static struct attribute * g[] = { 964 &state_attr.attr, 965 #ifdef CONFIG_PM_TRACE 966 &pm_trace_attr.attr, 967 &pm_trace_dev_match_attr.attr, 968 #endif 969 #ifdef CONFIG_PM_SLEEP 970 &pm_async_attr.attr, 971 &wakeup_count_attr.attr, 972 #ifdef CONFIG_SUSPEND 973 &mem_sleep_attr.attr, 974 &sync_on_suspend_attr.attr, 975 #endif 976 #ifdef CONFIG_PM_AUTOSLEEP 977 &autosleep_attr.attr, 978 #endif 979 #ifdef CONFIG_PM_WAKELOCKS 980 &wake_lock_attr.attr, 981 &wake_unlock_attr.attr, 982 #endif 983 #ifdef CONFIG_PM_SLEEP_DEBUG 984 &pm_test_attr.attr, 985 &pm_print_times_attr.attr, 986 &pm_wakeup_irq_attr.attr, 987 &pm_debug_messages_attr.attr, 988 #endif 989 #endif 990 #ifdef CONFIG_FREEZER 991 &pm_freeze_timeout_attr.attr, 992 #endif 993 NULL, 994 }; 995 996 static const struct attribute_group attr_group = { 997 .attrs = g, 998 }; 999 1000 static const struct attribute_group *attr_groups[] = { 1001 &attr_group, 1002 #ifdef CONFIG_PM_SLEEP 1003 &suspend_attr_group, 1004 #endif 1005 NULL, 1006 }; 1007 1008 struct workqueue_struct *pm_wq; 1009 EXPORT_SYMBOL_GPL(pm_wq); 1010 1011 static int __init pm_start_workqueue(void) 1012 { 1013 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 1014 1015 return pm_wq ? 0 : -ENOMEM; 1016 } 1017 1018 static int __init pm_init(void) 1019 { 1020 int error = pm_start_workqueue(); 1021 if (error) 1022 return error; 1023 hibernate_image_size_init(); 1024 hibernate_reserved_size_init(); 1025 pm_states_init(); 1026 power_kobj = kobject_create_and_add("power", NULL); 1027 if (!power_kobj) 1028 return -ENOMEM; 1029 error = sysfs_create_groups(power_kobj, attr_groups); 1030 if (error) 1031 return error; 1032 pm_print_times_init(); 1033 return pm_autosleep_init(); 1034 } 1035 1036 core_initcall(pm_init); 1037