1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/power/main.c - PM subsystem core functionality. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/export.h> 11 #include <linux/init.h> 12 #include <linux/kobject.h> 13 #include <linux/string.h> 14 #include <linux/pm-trace.h> 15 #include <linux/workqueue.h> 16 #include <linux/debugfs.h> 17 #include <linux/seq_file.h> 18 #include <linux/suspend.h> 19 #include <linux/syscalls.h> 20 #include <linux/pm_runtime.h> 21 22 #include "power.h" 23 24 #ifdef CONFIG_PM_SLEEP 25 /* 26 * The following functions are used by the suspend/hibernate code to temporarily 27 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 28 * while devices are suspended. To avoid races with the suspend/hibernate code, 29 * they should always be called with system_transition_mutex held 30 * (gfp_allowed_mask also should only be modified with system_transition_mutex 31 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 32 * with that modification). 33 */ 34 static unsigned int saved_gfp_count; 35 static gfp_t saved_gfp_mask; 36 37 void pm_restore_gfp_mask(void) 38 { 39 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 40 41 if (WARN_ON(!saved_gfp_count) || --saved_gfp_count) 42 return; 43 44 gfp_allowed_mask = saved_gfp_mask; 45 saved_gfp_mask = 0; 46 47 pm_pr_dbg("GFP mask restored\n"); 48 } 49 50 void pm_restrict_gfp_mask(void) 51 { 52 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 53 54 if (saved_gfp_count++) { 55 WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask); 56 return; 57 } 58 59 saved_gfp_mask = gfp_allowed_mask; 60 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 61 62 pm_pr_dbg("GFP mask restricted\n"); 63 } 64 65 unsigned int lock_system_sleep(void) 66 { 67 unsigned int flags = current->flags; 68 current->flags |= PF_NOFREEZE; 69 mutex_lock(&system_transition_mutex); 70 return flags; 71 } 72 EXPORT_SYMBOL_GPL(lock_system_sleep); 73 74 void unlock_system_sleep(unsigned int flags) 75 { 76 if (!(flags & PF_NOFREEZE)) 77 current->flags &= ~PF_NOFREEZE; 78 mutex_unlock(&system_transition_mutex); 79 } 80 EXPORT_SYMBOL_GPL(unlock_system_sleep); 81 82 void ksys_sync_helper(void) 83 { 84 ktime_t start; 85 long elapsed_msecs; 86 87 start = ktime_get(); 88 ksys_sync(); 89 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start)); 90 pr_info("Filesystems sync: %ld.%03ld seconds\n", 91 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC); 92 } 93 EXPORT_SYMBOL_GPL(ksys_sync_helper); 94 95 /* Routines for PM-transition notifications */ 96 97 static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 98 99 int register_pm_notifier(struct notifier_block *nb) 100 { 101 return blocking_notifier_chain_register(&pm_chain_head, nb); 102 } 103 EXPORT_SYMBOL_GPL(register_pm_notifier); 104 105 int unregister_pm_notifier(struct notifier_block *nb) 106 { 107 return blocking_notifier_chain_unregister(&pm_chain_head, nb); 108 } 109 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 110 111 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) 112 { 113 int ret; 114 115 ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); 116 117 return notifier_to_errno(ret); 118 } 119 120 int pm_notifier_call_chain(unsigned long val) 121 { 122 return blocking_notifier_call_chain(&pm_chain_head, val, NULL); 123 } 124 125 /* If set, devices may be suspended and resumed asynchronously. */ 126 int pm_async_enabled = 1; 127 128 static int __init pm_async_setup(char *str) 129 { 130 if (!strcmp(str, "off")) 131 pm_async_enabled = 0; 132 return 1; 133 } 134 __setup("pm_async=", pm_async_setup); 135 136 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 137 char *buf) 138 { 139 return sysfs_emit(buf, "%d\n", pm_async_enabled); 140 } 141 142 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, 143 const char *buf, size_t n) 144 { 145 unsigned long val; 146 147 if (kstrtoul(buf, 10, &val)) 148 return -EINVAL; 149 150 if (val > 1) 151 return -EINVAL; 152 153 pm_async_enabled = val; 154 return n; 155 } 156 157 power_attr(pm_async); 158 159 #ifdef CONFIG_SUSPEND 160 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, 161 char *buf) 162 { 163 ssize_t count = 0; 164 suspend_state_t i; 165 166 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) { 167 if (i >= PM_SUSPEND_MEM && cxl_mem_active()) 168 continue; 169 if (mem_sleep_states[i]) { 170 const char *label = mem_sleep_states[i]; 171 172 if (mem_sleep_current == i) 173 count += sysfs_emit_at(buf, count, "[%s] ", label); 174 else 175 count += sysfs_emit_at(buf, count, "%s ", label); 176 } 177 } 178 179 /* Convert the last space to a newline if needed. */ 180 if (count > 0) 181 buf[count - 1] = '\n'; 182 183 return count; 184 } 185 186 static suspend_state_t decode_suspend_state(const char *buf, size_t n) 187 { 188 suspend_state_t state; 189 char *p; 190 int len; 191 192 p = memchr(buf, '\n', n); 193 len = p ? p - buf : n; 194 195 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 196 const char *label = mem_sleep_states[state]; 197 198 if (label && len == strlen(label) && !strncmp(buf, label, len)) 199 return state; 200 } 201 202 return PM_SUSPEND_ON; 203 } 204 205 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, 206 const char *buf, size_t n) 207 { 208 suspend_state_t state; 209 int error; 210 211 error = pm_autosleep_lock(); 212 if (error) 213 return error; 214 215 if (pm_autosleep_state() > PM_SUSPEND_ON) { 216 error = -EBUSY; 217 goto out; 218 } 219 220 state = decode_suspend_state(buf, n); 221 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) 222 mem_sleep_current = state; 223 else 224 error = -EINVAL; 225 226 out: 227 pm_autosleep_unlock(); 228 return error ? error : n; 229 } 230 231 power_attr(mem_sleep); 232 233 /* 234 * sync_on_suspend: invoke ksys_sync_helper() before suspend. 235 * 236 * show() returns whether ksys_sync_helper() is invoked before suspend. 237 * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it. 238 */ 239 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC); 240 241 static ssize_t sync_on_suspend_show(struct kobject *kobj, 242 struct kobj_attribute *attr, char *buf) 243 { 244 return sysfs_emit(buf, "%d\n", sync_on_suspend_enabled); 245 } 246 247 static ssize_t sync_on_suspend_store(struct kobject *kobj, 248 struct kobj_attribute *attr, 249 const char *buf, size_t n) 250 { 251 unsigned long val; 252 253 if (kstrtoul(buf, 10, &val)) 254 return -EINVAL; 255 256 if (val > 1) 257 return -EINVAL; 258 259 sync_on_suspend_enabled = !!val; 260 return n; 261 } 262 263 power_attr(sync_on_suspend); 264 #endif /* CONFIG_SUSPEND */ 265 266 #ifdef CONFIG_PM_SLEEP_DEBUG 267 int pm_test_level = TEST_NONE; 268 269 static const char * const pm_tests[__TEST_AFTER_LAST] = { 270 [TEST_NONE] = "none", 271 [TEST_CORE] = "core", 272 [TEST_CPUS] = "processors", 273 [TEST_PLATFORM] = "platform", 274 [TEST_DEVICES] = "devices", 275 [TEST_FREEZER] = "freezer", 276 }; 277 278 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, 279 char *buf) 280 { 281 ssize_t count = 0; 282 int level; 283 284 for (level = TEST_FIRST; level <= TEST_MAX; level++) 285 if (pm_tests[level]) { 286 if (level == pm_test_level) 287 count += sysfs_emit_at(buf, count, "[%s] ", pm_tests[level]); 288 else 289 count += sysfs_emit_at(buf, count, "%s ", pm_tests[level]); 290 } 291 292 /* Convert the last space to a newline if needed. */ 293 if (count > 0) 294 buf[count - 1] = '\n'; 295 296 return count; 297 } 298 299 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, 300 const char *buf, size_t n) 301 { 302 unsigned int sleep_flags; 303 const char * const *s; 304 int error = -EINVAL; 305 int level; 306 char *p; 307 int len; 308 309 p = memchr(buf, '\n', n); 310 len = p ? p - buf : n; 311 312 sleep_flags = lock_system_sleep(); 313 314 level = TEST_FIRST; 315 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 316 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 317 pm_test_level = level; 318 error = 0; 319 break; 320 } 321 322 unlock_system_sleep(sleep_flags); 323 324 return error ? error : n; 325 } 326 327 power_attr(pm_test); 328 #endif /* CONFIG_PM_SLEEP_DEBUG */ 329 330 #define SUSPEND_NR_STEPS SUSPEND_RESUME 331 #define REC_FAILED_NUM 2 332 333 struct suspend_stats { 334 unsigned int step_failures[SUSPEND_NR_STEPS]; 335 unsigned int success; 336 unsigned int fail; 337 int last_failed_dev; 338 char failed_devs[REC_FAILED_NUM][40]; 339 int last_failed_errno; 340 int errno[REC_FAILED_NUM]; 341 int last_failed_step; 342 u64 last_hw_sleep; 343 u64 total_hw_sleep; 344 u64 max_hw_sleep; 345 enum suspend_stat_step failed_steps[REC_FAILED_NUM]; 346 }; 347 348 static struct suspend_stats suspend_stats; 349 static DEFINE_MUTEX(suspend_stats_lock); 350 351 void dpm_save_failed_dev(const char *name) 352 { 353 mutex_lock(&suspend_stats_lock); 354 355 strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], 356 name, sizeof(suspend_stats.failed_devs[0])); 357 suspend_stats.last_failed_dev++; 358 suspend_stats.last_failed_dev %= REC_FAILED_NUM; 359 360 mutex_unlock(&suspend_stats_lock); 361 } 362 363 void dpm_save_failed_step(enum suspend_stat_step step) 364 { 365 suspend_stats.step_failures[step-1]++; 366 suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; 367 suspend_stats.last_failed_step++; 368 suspend_stats.last_failed_step %= REC_FAILED_NUM; 369 } 370 371 void dpm_save_errno(int err) 372 { 373 if (!err) { 374 suspend_stats.success++; 375 return; 376 } 377 378 suspend_stats.fail++; 379 380 suspend_stats.errno[suspend_stats.last_failed_errno] = err; 381 suspend_stats.last_failed_errno++; 382 suspend_stats.last_failed_errno %= REC_FAILED_NUM; 383 } 384 385 void pm_report_hw_sleep_time(u64 t) 386 { 387 suspend_stats.last_hw_sleep = t; 388 suspend_stats.total_hw_sleep += t; 389 } 390 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time); 391 392 void pm_report_max_hw_sleep(u64 t) 393 { 394 suspend_stats.max_hw_sleep = t; 395 } 396 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep); 397 398 static const char * const suspend_step_names[] = { 399 [SUSPEND_WORKING] = "", 400 [SUSPEND_FREEZE] = "freeze", 401 [SUSPEND_PREPARE] = "prepare", 402 [SUSPEND_SUSPEND] = "suspend", 403 [SUSPEND_SUSPEND_LATE] = "suspend_late", 404 [SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq", 405 [SUSPEND_RESUME_NOIRQ] = "resume_noirq", 406 [SUSPEND_RESUME_EARLY] = "resume_early", 407 [SUSPEND_RESUME] = "resume", 408 }; 409 410 #define suspend_attr(_name, format_str) \ 411 static ssize_t _name##_show(struct kobject *kobj, \ 412 struct kobj_attribute *attr, char *buf) \ 413 { \ 414 return sysfs_emit(buf, format_str, suspend_stats._name);\ 415 } \ 416 static struct kobj_attribute _name = __ATTR_RO(_name) 417 418 suspend_attr(success, "%u\n"); 419 suspend_attr(fail, "%u\n"); 420 suspend_attr(last_hw_sleep, "%llu\n"); 421 suspend_attr(total_hw_sleep, "%llu\n"); 422 suspend_attr(max_hw_sleep, "%llu\n"); 423 424 #define suspend_step_attr(_name, step) \ 425 static ssize_t _name##_show(struct kobject *kobj, \ 426 struct kobj_attribute *attr, char *buf) \ 427 { \ 428 return sysfs_emit(buf, "%u\n", \ 429 suspend_stats.step_failures[step-1]); \ 430 } \ 431 static struct kobj_attribute _name = __ATTR_RO(_name) 432 433 suspend_step_attr(failed_freeze, SUSPEND_FREEZE); 434 suspend_step_attr(failed_prepare, SUSPEND_PREPARE); 435 suspend_step_attr(failed_suspend, SUSPEND_SUSPEND); 436 suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE); 437 suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ); 438 suspend_step_attr(failed_resume, SUSPEND_RESUME); 439 suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY); 440 suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ); 441 442 static ssize_t last_failed_dev_show(struct kobject *kobj, 443 struct kobj_attribute *attr, char *buf) 444 { 445 int index; 446 char *last_failed_dev = NULL; 447 448 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 449 index %= REC_FAILED_NUM; 450 last_failed_dev = suspend_stats.failed_devs[index]; 451 452 return sysfs_emit(buf, "%s\n", last_failed_dev); 453 } 454 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev); 455 456 static ssize_t last_failed_errno_show(struct kobject *kobj, 457 struct kobj_attribute *attr, char *buf) 458 { 459 int index; 460 int last_failed_errno; 461 462 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 463 index %= REC_FAILED_NUM; 464 last_failed_errno = suspend_stats.errno[index]; 465 466 return sysfs_emit(buf, "%d\n", last_failed_errno); 467 } 468 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno); 469 470 static ssize_t last_failed_step_show(struct kobject *kobj, 471 struct kobj_attribute *attr, char *buf) 472 { 473 enum suspend_stat_step step; 474 int index; 475 476 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 477 index %= REC_FAILED_NUM; 478 step = suspend_stats.failed_steps[index]; 479 480 return sysfs_emit(buf, "%s\n", suspend_step_names[step]); 481 } 482 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step); 483 484 static struct attribute *suspend_attrs[] = { 485 &success.attr, 486 &fail.attr, 487 &failed_freeze.attr, 488 &failed_prepare.attr, 489 &failed_suspend.attr, 490 &failed_suspend_late.attr, 491 &failed_suspend_noirq.attr, 492 &failed_resume.attr, 493 &failed_resume_early.attr, 494 &failed_resume_noirq.attr, 495 &last_failed_dev.attr, 496 &last_failed_errno.attr, 497 &last_failed_step.attr, 498 &last_hw_sleep.attr, 499 &total_hw_sleep.attr, 500 &max_hw_sleep.attr, 501 NULL, 502 }; 503 504 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 505 { 506 if (attr != &last_hw_sleep.attr && 507 attr != &total_hw_sleep.attr && 508 attr != &max_hw_sleep.attr) 509 return 0444; 510 511 #ifdef CONFIG_ACPI 512 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 513 return 0444; 514 #endif 515 return 0; 516 } 517 518 static const struct attribute_group suspend_attr_group = { 519 .name = "suspend_stats", 520 .attrs = suspend_attrs, 521 .is_visible = suspend_attr_is_visible, 522 }; 523 524 #ifdef CONFIG_DEBUG_FS 525 static int suspend_stats_show(struct seq_file *s, void *unused) 526 { 527 int i, index, last_dev, last_errno, last_step; 528 enum suspend_stat_step step; 529 530 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 531 last_dev %= REC_FAILED_NUM; 532 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 533 last_errno %= REC_FAILED_NUM; 534 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 535 last_step %= REC_FAILED_NUM; 536 537 seq_printf(s, "success: %u\nfail: %u\n", 538 suspend_stats.success, suspend_stats.fail); 539 540 for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++) 541 seq_printf(s, "failed_%s: %u\n", suspend_step_names[step], 542 suspend_stats.step_failures[step-1]); 543 544 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 545 suspend_stats.failed_devs[last_dev]); 546 for (i = 1; i < REC_FAILED_NUM; i++) { 547 index = last_dev + REC_FAILED_NUM - i; 548 index %= REC_FAILED_NUM; 549 seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]); 550 } 551 seq_printf(s, " last_failed_errno:\t%-d\n", 552 suspend_stats.errno[last_errno]); 553 for (i = 1; i < REC_FAILED_NUM; i++) { 554 index = last_errno + REC_FAILED_NUM - i; 555 index %= REC_FAILED_NUM; 556 seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]); 557 } 558 seq_printf(s, " last_failed_step:\t%-s\n", 559 suspend_step_names[suspend_stats.failed_steps[last_step]]); 560 for (i = 1; i < REC_FAILED_NUM; i++) { 561 index = last_step + REC_FAILED_NUM - i; 562 index %= REC_FAILED_NUM; 563 seq_printf(s, "\t\t\t%-s\n", 564 suspend_step_names[suspend_stats.failed_steps[index]]); 565 } 566 567 return 0; 568 } 569 DEFINE_SHOW_ATTRIBUTE(suspend_stats); 570 571 static int __init pm_debugfs_init(void) 572 { 573 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, 574 NULL, NULL, &suspend_stats_fops); 575 return 0; 576 } 577 578 late_initcall(pm_debugfs_init); 579 #endif /* CONFIG_DEBUG_FS */ 580 581 bool pm_sleep_transition_in_progress(void) 582 { 583 return pm_suspend_in_progress() || hibernation_in_progress(); 584 } 585 #endif /* CONFIG_PM_SLEEP */ 586 587 #ifdef CONFIG_PM_SLEEP_DEBUG 588 /* 589 * pm_print_times: print time taken by devices to suspend and resume. 590 * 591 * show() returns whether printing of suspend and resume times is enabled. 592 * store() accepts 0 or 1. 0 disables printing and 1 enables it. 593 */ 594 bool pm_print_times_enabled; 595 596 static ssize_t pm_print_times_show(struct kobject *kobj, 597 struct kobj_attribute *attr, char *buf) 598 { 599 return sysfs_emit(buf, "%d\n", pm_print_times_enabled); 600 } 601 602 static ssize_t pm_print_times_store(struct kobject *kobj, 603 struct kobj_attribute *attr, 604 const char *buf, size_t n) 605 { 606 unsigned long val; 607 608 if (kstrtoul(buf, 10, &val)) 609 return -EINVAL; 610 611 if (val > 1) 612 return -EINVAL; 613 614 pm_print_times_enabled = !!val; 615 return n; 616 } 617 618 power_attr(pm_print_times); 619 620 static inline void pm_print_times_init(void) 621 { 622 pm_print_times_enabled = initcall_debug; 623 } 624 625 static ssize_t pm_wakeup_irq_show(struct kobject *kobj, 626 struct kobj_attribute *attr, 627 char *buf) 628 { 629 if (!pm_wakeup_irq()) 630 return -ENODATA; 631 632 return sysfs_emit(buf, "%u\n", pm_wakeup_irq()); 633 } 634 635 power_attr_ro(pm_wakeup_irq); 636 637 bool pm_debug_messages_on __read_mostly; 638 639 bool pm_debug_messages_should_print(void) 640 { 641 return pm_debug_messages_on && pm_sleep_transition_in_progress(); 642 } 643 EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); 644 645 static ssize_t pm_debug_messages_show(struct kobject *kobj, 646 struct kobj_attribute *attr, char *buf) 647 { 648 return sysfs_emit(buf, "%d\n", pm_debug_messages_on); 649 } 650 651 static ssize_t pm_debug_messages_store(struct kobject *kobj, 652 struct kobj_attribute *attr, 653 const char *buf, size_t n) 654 { 655 unsigned long val; 656 657 if (kstrtoul(buf, 10, &val)) 658 return -EINVAL; 659 660 if (val > 1) 661 return -EINVAL; 662 663 pm_debug_messages_on = !!val; 664 return n; 665 } 666 667 power_attr(pm_debug_messages); 668 669 static int __init pm_debug_messages_setup(char *str) 670 { 671 pm_debug_messages_on = true; 672 return 1; 673 } 674 __setup("pm_debug_messages", pm_debug_messages_setup); 675 676 #else /* !CONFIG_PM_SLEEP_DEBUG */ 677 static inline void pm_print_times_init(void) {} 678 #endif /* CONFIG_PM_SLEEP_DEBUG */ 679 680 struct kobject *power_kobj; 681 682 /* 683 * state - control system sleep states. 684 * 685 * show() returns available sleep state labels, which may be "mem", "standby", 686 * "freeze" and "disk" (hibernation). 687 * See Documentation/admin-guide/pm/sleep-states.rst for a description of 688 * what they mean. 689 * 690 * store() accepts one of those strings, translates it into the proper 691 * enumerated value, and initiates a suspend transition. 692 */ 693 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 694 char *buf) 695 { 696 ssize_t count = 0; 697 #ifdef CONFIG_SUSPEND 698 suspend_state_t i; 699 700 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 701 if (pm_states[i]) 702 count += sysfs_emit_at(buf, count, "%s ", pm_states[i]); 703 704 #endif 705 if (hibernation_available()) 706 count += sysfs_emit_at(buf, count, "disk "); 707 708 /* Convert the last space to a newline if needed. */ 709 if (count > 0) 710 buf[count - 1] = '\n'; 711 712 return count; 713 } 714 715 static suspend_state_t decode_state(const char *buf, size_t n) 716 { 717 #ifdef CONFIG_SUSPEND 718 suspend_state_t state; 719 #endif 720 char *p; 721 int len; 722 723 p = memchr(buf, '\n', n); 724 len = p ? p - buf : n; 725 726 /* Check hibernation first. */ 727 if (len == 4 && str_has_prefix(buf, "disk")) 728 return PM_SUSPEND_MAX; 729 730 #ifdef CONFIG_SUSPEND 731 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 732 const char *label = pm_states[state]; 733 734 if (label && len == strlen(label) && !strncmp(buf, label, len)) 735 return state; 736 } 737 #endif 738 739 return PM_SUSPEND_ON; 740 } 741 742 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 743 const char *buf, size_t n) 744 { 745 suspend_state_t state; 746 int error; 747 748 error = pm_autosleep_lock(); 749 if (error) 750 return error; 751 752 if (pm_autosleep_state() > PM_SUSPEND_ON) { 753 error = -EBUSY; 754 goto out; 755 } 756 757 state = decode_state(buf, n); 758 if (state < PM_SUSPEND_MAX) { 759 if (state == PM_SUSPEND_MEM) 760 state = mem_sleep_current; 761 762 error = pm_suspend(state); 763 } else if (state == PM_SUSPEND_MAX) { 764 error = hibernate(); 765 } else { 766 error = -EINVAL; 767 } 768 769 out: 770 pm_autosleep_unlock(); 771 return error ? error : n; 772 } 773 774 power_attr(state); 775 776 #ifdef CONFIG_PM_SLEEP 777 /* 778 * The 'wakeup_count' attribute, along with the functions defined in 779 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 780 * handled in a non-racy way. 781 * 782 * If a wakeup event occurs when the system is in a sleep state, it simply is 783 * woken up. In turn, if an event that would wake the system up from a sleep 784 * state occurs when it is undergoing a transition to that sleep state, the 785 * transition should be aborted. Moreover, if such an event occurs when the 786 * system is in the working state, an attempt to start a transition to the 787 * given sleep state should fail during certain period after the detection of 788 * the event. Using the 'state' attribute alone is not sufficient to satisfy 789 * these requirements, because a wakeup event may occur exactly when 'state' 790 * is being written to and may be delivered to user space right before it is 791 * frozen, so the event will remain only partially processed until the system is 792 * woken up by another event. In particular, it won't cause the transition to 793 * a sleep state to be aborted. 794 * 795 * This difficulty may be overcome if user space uses 'wakeup_count' before 796 * writing to 'state'. It first should read from 'wakeup_count' and store 797 * the read value. Then, after carrying out its own preparations for the system 798 * transition to a sleep state, it should write the stored value to 799 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since 800 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 801 * is allowed to write to 'state', but the transition will be aborted if there 802 * are any wakeup events detected after 'wakeup_count' was written to. 803 */ 804 805 static ssize_t wakeup_count_show(struct kobject *kobj, 806 struct kobj_attribute *attr, 807 char *buf) 808 { 809 unsigned int val; 810 811 return pm_get_wakeup_count(&val, true) ? 812 sysfs_emit(buf, "%u\n", val) : -EINTR; 813 } 814 815 static ssize_t wakeup_count_store(struct kobject *kobj, 816 struct kobj_attribute *attr, 817 const char *buf, size_t n) 818 { 819 unsigned int val; 820 int error; 821 822 error = pm_autosleep_lock(); 823 if (error) 824 return error; 825 826 if (pm_autosleep_state() > PM_SUSPEND_ON) { 827 error = -EBUSY; 828 goto out; 829 } 830 831 error = -EINVAL; 832 if (sscanf(buf, "%u", &val) == 1) { 833 if (pm_save_wakeup_count(val)) 834 error = n; 835 else 836 pm_print_active_wakeup_sources(); 837 } 838 839 out: 840 pm_autosleep_unlock(); 841 return error; 842 } 843 844 power_attr(wakeup_count); 845 846 #ifdef CONFIG_PM_AUTOSLEEP 847 static ssize_t autosleep_show(struct kobject *kobj, 848 struct kobj_attribute *attr, 849 char *buf) 850 { 851 suspend_state_t state = pm_autosleep_state(); 852 853 if (state == PM_SUSPEND_ON) 854 return sysfs_emit(buf, "off\n"); 855 856 #ifdef CONFIG_SUSPEND 857 if (state < PM_SUSPEND_MAX) 858 return sysfs_emit(buf, "%s\n", pm_states[state] ? 859 pm_states[state] : "error"); 860 #endif 861 #ifdef CONFIG_HIBERNATION 862 return sysfs_emit(buf, "disk\n"); 863 #else 864 return sysfs_emit(buf, "error\n"); 865 #endif 866 } 867 868 static ssize_t autosleep_store(struct kobject *kobj, 869 struct kobj_attribute *attr, 870 const char *buf, size_t n) 871 { 872 suspend_state_t state = decode_state(buf, n); 873 int error; 874 875 if (state == PM_SUSPEND_ON 876 && strcmp(buf, "off") && strcmp(buf, "off\n")) 877 return -EINVAL; 878 879 if (state == PM_SUSPEND_MEM) 880 state = mem_sleep_current; 881 882 error = pm_autosleep_set_state(state); 883 return error ? error : n; 884 } 885 886 power_attr(autosleep); 887 #endif /* CONFIG_PM_AUTOSLEEP */ 888 889 #ifdef CONFIG_PM_WAKELOCKS 890 static ssize_t wake_lock_show(struct kobject *kobj, 891 struct kobj_attribute *attr, 892 char *buf) 893 { 894 return pm_show_wakelocks(buf, true); 895 } 896 897 static ssize_t wake_lock_store(struct kobject *kobj, 898 struct kobj_attribute *attr, 899 const char *buf, size_t n) 900 { 901 int error = pm_wake_lock(buf); 902 return error ? error : n; 903 } 904 905 power_attr(wake_lock); 906 907 static ssize_t wake_unlock_show(struct kobject *kobj, 908 struct kobj_attribute *attr, 909 char *buf) 910 { 911 return pm_show_wakelocks(buf, false); 912 } 913 914 static ssize_t wake_unlock_store(struct kobject *kobj, 915 struct kobj_attribute *attr, 916 const char *buf, size_t n) 917 { 918 int error = pm_wake_unlock(buf); 919 return error ? error : n; 920 } 921 922 power_attr(wake_unlock); 923 924 #endif /* CONFIG_PM_WAKELOCKS */ 925 #endif /* CONFIG_PM_SLEEP */ 926 927 #ifdef CONFIG_PM_TRACE 928 int pm_trace_enabled; 929 930 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, 931 char *buf) 932 { 933 return sysfs_emit(buf, "%d\n", pm_trace_enabled); 934 } 935 936 static ssize_t 937 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, 938 const char *buf, size_t n) 939 { 940 int val; 941 942 if (sscanf(buf, "%d", &val) == 1) { 943 pm_trace_enabled = !!val; 944 if (pm_trace_enabled) { 945 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" 946 "PM: Correct system time has to be restored manually after resume.\n"); 947 } 948 return n; 949 } 950 return -EINVAL; 951 } 952 953 power_attr(pm_trace); 954 955 static ssize_t pm_trace_dev_match_show(struct kobject *kobj, 956 struct kobj_attribute *attr, 957 char *buf) 958 { 959 return show_trace_dev_match(buf, PAGE_SIZE); 960 } 961 962 power_attr_ro(pm_trace_dev_match); 963 964 #endif /* CONFIG_PM_TRACE */ 965 966 #ifdef CONFIG_FREEZER 967 static ssize_t pm_freeze_timeout_show(struct kobject *kobj, 968 struct kobj_attribute *attr, char *buf) 969 { 970 return sysfs_emit(buf, "%u\n", freeze_timeout_msecs); 971 } 972 973 static ssize_t pm_freeze_timeout_store(struct kobject *kobj, 974 struct kobj_attribute *attr, 975 const char *buf, size_t n) 976 { 977 unsigned long val; 978 979 if (kstrtoul(buf, 10, &val)) 980 return -EINVAL; 981 982 freeze_timeout_msecs = val; 983 return n; 984 } 985 986 power_attr(pm_freeze_timeout); 987 988 #endif /* CONFIG_FREEZER*/ 989 990 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) 991 bool filesystem_freeze_enabled = false; 992 993 static ssize_t freeze_filesystems_show(struct kobject *kobj, 994 struct kobj_attribute *attr, char *buf) 995 { 996 return sysfs_emit(buf, "%d\n", filesystem_freeze_enabled); 997 } 998 999 static ssize_t freeze_filesystems_store(struct kobject *kobj, 1000 struct kobj_attribute *attr, 1001 const char *buf, size_t n) 1002 { 1003 unsigned long val; 1004 1005 if (kstrtoul(buf, 10, &val)) 1006 return -EINVAL; 1007 1008 if (val > 1) 1009 return -EINVAL; 1010 1011 filesystem_freeze_enabled = !!val; 1012 return n; 1013 } 1014 1015 power_attr(freeze_filesystems); 1016 #endif /* CONFIG_SUSPEND || CONFIG_HIBERNATION */ 1017 1018 static struct attribute * g[] = { 1019 &state_attr.attr, 1020 #ifdef CONFIG_PM_TRACE 1021 &pm_trace_attr.attr, 1022 &pm_trace_dev_match_attr.attr, 1023 #endif 1024 #ifdef CONFIG_PM_SLEEP 1025 &pm_async_attr.attr, 1026 &wakeup_count_attr.attr, 1027 #ifdef CONFIG_SUSPEND 1028 &mem_sleep_attr.attr, 1029 &sync_on_suspend_attr.attr, 1030 #endif 1031 #ifdef CONFIG_PM_AUTOSLEEP 1032 &autosleep_attr.attr, 1033 #endif 1034 #ifdef CONFIG_PM_WAKELOCKS 1035 &wake_lock_attr.attr, 1036 &wake_unlock_attr.attr, 1037 #endif 1038 #ifdef CONFIG_PM_SLEEP_DEBUG 1039 &pm_test_attr.attr, 1040 &pm_print_times_attr.attr, 1041 &pm_wakeup_irq_attr.attr, 1042 &pm_debug_messages_attr.attr, 1043 #endif 1044 #endif 1045 #ifdef CONFIG_FREEZER 1046 &pm_freeze_timeout_attr.attr, 1047 #endif 1048 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) 1049 &freeze_filesystems_attr.attr, 1050 #endif 1051 NULL, 1052 }; 1053 1054 static const struct attribute_group attr_group = { 1055 .attrs = g, 1056 }; 1057 1058 static const struct attribute_group *attr_groups[] = { 1059 &attr_group, 1060 #ifdef CONFIG_PM_SLEEP 1061 &suspend_attr_group, 1062 #endif 1063 NULL, 1064 }; 1065 1066 struct workqueue_struct *pm_wq; 1067 EXPORT_SYMBOL_GPL(pm_wq); 1068 1069 static int __init pm_start_workqueue(void) 1070 { 1071 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 1072 1073 return pm_wq ? 0 : -ENOMEM; 1074 } 1075 1076 static int __init pm_init(void) 1077 { 1078 int error = pm_start_workqueue(); 1079 if (error) 1080 return error; 1081 hibernate_image_size_init(); 1082 hibernate_reserved_size_init(); 1083 pm_states_init(); 1084 power_kobj = kobject_create_and_add("power", NULL); 1085 if (!power_kobj) 1086 return -ENOMEM; 1087 error = sysfs_create_groups(power_kobj, attr_groups); 1088 if (error) 1089 return error; 1090 pm_print_times_init(); 1091 return pm_autosleep_init(); 1092 } 1093 1094 core_initcall(pm_init); 1095