xref: /linux/kernel/power/main.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/power/main.c - PM subsystem core functionality.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/export.h>
11 #include <linux/init.h>
12 #include <linux/kobject.h>
13 #include <linux/string.h>
14 #include <linux/pm-trace.h>
15 #include <linux/workqueue.h>
16 #include <linux/debugfs.h>
17 #include <linux/seq_file.h>
18 #include <linux/suspend.h>
19 #include <linux/syscalls.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/atomic.h>
22 #include <linux/wait.h>
23 
24 #include "power.h"
25 
26 #ifdef CONFIG_PM_SLEEP
27 /*
28  * The following functions are used by the suspend/hibernate code to temporarily
29  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
30  * while devices are suspended.  To avoid races with the suspend/hibernate code,
31  * they should always be called with system_transition_mutex held
32  * (gfp_allowed_mask also should only be modified with system_transition_mutex
33  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
34  * with that modification).
35  */
36 static unsigned int saved_gfp_count;
37 static gfp_t saved_gfp_mask;
38 
39 void pm_restore_gfp_mask(void)
40 {
41 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
42 
43 	if (WARN_ON(!saved_gfp_count) || --saved_gfp_count)
44 		return;
45 
46 	gfp_allowed_mask = saved_gfp_mask;
47 	saved_gfp_mask = 0;
48 
49 	pm_pr_dbg("GFP mask restored\n");
50 }
51 
52 void pm_restrict_gfp_mask(void)
53 {
54 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
55 
56 	if (saved_gfp_count++) {
57 		WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask);
58 		return;
59 	}
60 
61 	saved_gfp_mask = gfp_allowed_mask;
62 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
63 
64 	pm_pr_dbg("GFP mask restricted\n");
65 }
66 
67 unsigned int lock_system_sleep(void)
68 {
69 	unsigned int flags = current->flags;
70 	current->flags |= PF_NOFREEZE;
71 	mutex_lock(&system_transition_mutex);
72 	return flags;
73 }
74 EXPORT_SYMBOL_GPL(lock_system_sleep);
75 
76 void unlock_system_sleep(unsigned int flags)
77 {
78 	if (!(flags & PF_NOFREEZE))
79 		current->flags &= ~PF_NOFREEZE;
80 	mutex_unlock(&system_transition_mutex);
81 }
82 EXPORT_SYMBOL_GPL(unlock_system_sleep);
83 
84 void ksys_sync_helper(void)
85 {
86 	ktime_t start;
87 	long elapsed_msecs;
88 
89 	start = ktime_get();
90 	ksys_sync();
91 	elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
92 	pr_info("Filesystems sync: %ld.%03ld seconds\n",
93 		elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
94 }
95 EXPORT_SYMBOL_GPL(ksys_sync_helper);
96 
97 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
98 /* Wakeup events handling resolution while syncing file systems in jiffies */
99 #define PM_FS_SYNC_WAKEUP_RESOLUTION	5
100 
101 static atomic_t pm_fs_sync_count = ATOMIC_INIT(0);
102 static struct workqueue_struct *pm_fs_sync_wq;
103 static DECLARE_WAIT_QUEUE_HEAD(pm_fs_sync_wait);
104 
105 static bool pm_fs_sync_completed(void)
106 {
107 	return atomic_read(&pm_fs_sync_count) == 0;
108 }
109 
110 static void pm_fs_sync_work_fn(struct work_struct *work)
111 {
112 	ksys_sync_helper();
113 
114 	if (atomic_dec_and_test(&pm_fs_sync_count))
115 		wake_up(&pm_fs_sync_wait);
116 }
117 static DECLARE_WORK(pm_fs_sync_work, pm_fs_sync_work_fn);
118 
119 /**
120  * pm_sleep_fs_sync() - Sync file systems in an interruptible way
121  *
122  * Return: 0 on successful file system sync, or -EBUSY if the file system sync
123  * was aborted.
124  */
125 int pm_sleep_fs_sync(void)
126 {
127 	pm_wakeup_clear(0);
128 
129 	/*
130 	 * Take back-to-back sleeps into account by queuing a subsequent fs sync
131 	 * only if the previous fs sync is running or is not queued. Multiple fs
132 	 * syncs increase the likelihood of saving the latest files immediately
133 	 * before sleep.
134 	 */
135 	if (!work_pending(&pm_fs_sync_work)) {
136 		atomic_inc(&pm_fs_sync_count);
137 		queue_work(pm_fs_sync_wq, &pm_fs_sync_work);
138 	}
139 
140 	while (!pm_fs_sync_completed()) {
141 		if (pm_wakeup_pending())
142 			return -EBUSY;
143 
144 		wait_event_timeout(pm_fs_sync_wait, pm_fs_sync_completed(),
145 				   PM_FS_SYNC_WAKEUP_RESOLUTION);
146 	}
147 
148 	return 0;
149 }
150 #endif /* CONFIG_SUSPEND || CONFIG_HIBERNATION */
151 
152 /* Routines for PM-transition notifications */
153 
154 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
155 
156 int register_pm_notifier(struct notifier_block *nb)
157 {
158 	return blocking_notifier_chain_register(&pm_chain_head, nb);
159 }
160 EXPORT_SYMBOL_GPL(register_pm_notifier);
161 
162 int unregister_pm_notifier(struct notifier_block *nb)
163 {
164 	return blocking_notifier_chain_unregister(&pm_chain_head, nb);
165 }
166 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
167 
168 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
169 {
170 	int ret;
171 
172 	ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
173 
174 	return notifier_to_errno(ret);
175 }
176 
177 int pm_notifier_call_chain(unsigned long val)
178 {
179 	return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
180 }
181 
182 /* If set, devices may be suspended and resumed asynchronously. */
183 int pm_async_enabled = 1;
184 
185 static int __init pm_async_setup(char *str)
186 {
187 	if (!strcmp(str, "off"))
188 		pm_async_enabled = 0;
189 	return 1;
190 }
191 __setup("pm_async=", pm_async_setup);
192 
193 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
194 			     char *buf)
195 {
196 	return sysfs_emit(buf, "%d\n", pm_async_enabled);
197 }
198 
199 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
200 			      const char *buf, size_t n)
201 {
202 	unsigned long val;
203 
204 	if (kstrtoul(buf, 10, &val))
205 		return -EINVAL;
206 
207 	if (val > 1)
208 		return -EINVAL;
209 
210 	pm_async_enabled = val;
211 	return n;
212 }
213 
214 power_attr(pm_async);
215 
216 #ifdef CONFIG_SUSPEND
217 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
218 			      char *buf)
219 {
220 	ssize_t count = 0;
221 	suspend_state_t i;
222 
223 	for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) {
224 		if (i >= PM_SUSPEND_MEM && cxl_mem_active())
225 			continue;
226 		if (mem_sleep_states[i]) {
227 			const char *label = mem_sleep_states[i];
228 
229 			if (mem_sleep_current == i)
230 				count += sysfs_emit_at(buf, count, "[%s] ", label);
231 			else
232 				count += sysfs_emit_at(buf, count, "%s ", label);
233 		}
234 	}
235 
236 	/* Convert the last space to a newline if needed. */
237 	if (count > 0)
238 		buf[count - 1] = '\n';
239 
240 	return count;
241 }
242 
243 static suspend_state_t decode_suspend_state(const char *buf, size_t n)
244 {
245 	suspend_state_t state;
246 	char *p;
247 	int len;
248 
249 	p = memchr(buf, '\n', n);
250 	len = p ? p - buf : n;
251 
252 	for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
253 		const char *label = mem_sleep_states[state];
254 
255 		if (label && len == strlen(label) && !strncmp(buf, label, len))
256 			return state;
257 	}
258 
259 	return PM_SUSPEND_ON;
260 }
261 
262 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
263 			       const char *buf, size_t n)
264 {
265 	suspend_state_t state;
266 	int error;
267 
268 	error = pm_autosleep_lock();
269 	if (error)
270 		return error;
271 
272 	if (pm_autosleep_state() > PM_SUSPEND_ON) {
273 		error = -EBUSY;
274 		goto out;
275 	}
276 
277 	state = decode_suspend_state(buf, n);
278 	if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON)
279 		mem_sleep_current = state;
280 	else
281 		error = -EINVAL;
282 
283  out:
284 	pm_autosleep_unlock();
285 	return error ? error : n;
286 }
287 
288 power_attr(mem_sleep);
289 
290 /*
291  * sync_on_suspend: Sync file systems before suspend.
292  *
293  * show() returns whether file systems sync before suspend is enabled.
294  * store() accepts 0 or 1.  0 disables file systems sync and 1 enables it.
295  */
296 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
297 
298 static ssize_t sync_on_suspend_show(struct kobject *kobj,
299 				   struct kobj_attribute *attr, char *buf)
300 {
301 	return sysfs_emit(buf, "%d\n", sync_on_suspend_enabled);
302 }
303 
304 static ssize_t sync_on_suspend_store(struct kobject *kobj,
305 				    struct kobj_attribute *attr,
306 				    const char *buf, size_t n)
307 {
308 	unsigned long val;
309 
310 	if (kstrtoul(buf, 10, &val))
311 		return -EINVAL;
312 
313 	if (val > 1)
314 		return -EINVAL;
315 
316 	sync_on_suspend_enabled = !!val;
317 	return n;
318 }
319 
320 power_attr(sync_on_suspend);
321 #endif /* CONFIG_SUSPEND */
322 
323 #ifdef CONFIG_PM_SLEEP_DEBUG
324 int pm_test_level = TEST_NONE;
325 
326 static const char * const pm_tests[__TEST_AFTER_LAST] = {
327 	[TEST_NONE] = "none",
328 	[TEST_CORE] = "core",
329 	[TEST_CPUS] = "processors",
330 	[TEST_PLATFORM] = "platform",
331 	[TEST_DEVICES] = "devices",
332 	[TEST_FREEZER] = "freezer",
333 };
334 
335 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
336 				char *buf)
337 {
338 	ssize_t count = 0;
339 	int level;
340 
341 	for (level = TEST_FIRST; level <= TEST_MAX; level++)
342 		if (pm_tests[level]) {
343 			if (level == pm_test_level)
344 				count += sysfs_emit_at(buf, count, "[%s] ", pm_tests[level]);
345 			else
346 				count += sysfs_emit_at(buf, count, "%s ", pm_tests[level]);
347 		}
348 
349 	/* Convert the last space to a newline if needed. */
350 	if (count > 0)
351 		buf[count - 1] = '\n';
352 
353 	return count;
354 }
355 
356 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
357 				const char *buf, size_t n)
358 {
359 	unsigned int sleep_flags;
360 	const char * const *s;
361 	int error = -EINVAL;
362 	int level;
363 	char *p;
364 	int len;
365 
366 	p = memchr(buf, '\n', n);
367 	len = p ? p - buf : n;
368 
369 	sleep_flags = lock_system_sleep();
370 
371 	level = TEST_FIRST;
372 	for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
373 		if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
374 			pm_test_level = level;
375 			error = 0;
376 			break;
377 		}
378 
379 	unlock_system_sleep(sleep_flags);
380 
381 	return error ? error : n;
382 }
383 
384 power_attr(pm_test);
385 #endif /* CONFIG_PM_SLEEP_DEBUG */
386 
387 #define SUSPEND_NR_STEPS	SUSPEND_RESUME
388 #define REC_FAILED_NUM		2
389 
390 struct suspend_stats {
391 	unsigned int step_failures[SUSPEND_NR_STEPS];
392 	unsigned int success;
393 	unsigned int fail;
394 	int last_failed_dev;
395 	char failed_devs[REC_FAILED_NUM][40];
396 	int last_failed_errno;
397 	int errno[REC_FAILED_NUM];
398 	int last_failed_step;
399 	u64 last_hw_sleep;
400 	u64 total_hw_sleep;
401 	u64 max_hw_sleep;
402 	enum suspend_stat_step failed_steps[REC_FAILED_NUM];
403 };
404 
405 static struct suspend_stats suspend_stats;
406 static DEFINE_MUTEX(suspend_stats_lock);
407 
408 void dpm_save_failed_dev(const char *name)
409 {
410 	mutex_lock(&suspend_stats_lock);
411 
412 	strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
413 		name, sizeof(suspend_stats.failed_devs[0]));
414 	suspend_stats.last_failed_dev++;
415 	suspend_stats.last_failed_dev %= REC_FAILED_NUM;
416 
417 	mutex_unlock(&suspend_stats_lock);
418 }
419 
420 void dpm_save_failed_step(enum suspend_stat_step step)
421 {
422 	suspend_stats.step_failures[step-1]++;
423 	suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
424 	suspend_stats.last_failed_step++;
425 	suspend_stats.last_failed_step %= REC_FAILED_NUM;
426 }
427 
428 void dpm_save_errno(int err)
429 {
430 	if (!err) {
431 		suspend_stats.success++;
432 		return;
433 	}
434 
435 	suspend_stats.fail++;
436 
437 	suspend_stats.errno[suspend_stats.last_failed_errno] = err;
438 	suspend_stats.last_failed_errno++;
439 	suspend_stats.last_failed_errno %= REC_FAILED_NUM;
440 }
441 
442 void pm_report_hw_sleep_time(u64 t)
443 {
444 	suspend_stats.last_hw_sleep = t;
445 	suspend_stats.total_hw_sleep += t;
446 }
447 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
448 
449 void pm_report_max_hw_sleep(u64 t)
450 {
451 	suspend_stats.max_hw_sleep = t;
452 }
453 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
454 
455 static const char * const suspend_step_names[] = {
456 	[SUSPEND_WORKING] = "",
457 	[SUSPEND_FREEZE] = "freeze",
458 	[SUSPEND_PREPARE] = "prepare",
459 	[SUSPEND_SUSPEND] = "suspend",
460 	[SUSPEND_SUSPEND_LATE] = "suspend_late",
461 	[SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq",
462 	[SUSPEND_RESUME_NOIRQ] = "resume_noirq",
463 	[SUSPEND_RESUME_EARLY] = "resume_early",
464 	[SUSPEND_RESUME] = "resume",
465 };
466 
467 #define suspend_attr(_name, format_str)				\
468 static ssize_t _name##_show(struct kobject *kobj,		\
469 		struct kobj_attribute *attr, char *buf)		\
470 {								\
471 	return sysfs_emit(buf, format_str, suspend_stats._name);\
472 }								\
473 static struct kobj_attribute _name = __ATTR_RO(_name)
474 
475 suspend_attr(success, "%u\n");
476 suspend_attr(fail, "%u\n");
477 suspend_attr(last_hw_sleep, "%llu\n");
478 suspend_attr(total_hw_sleep, "%llu\n");
479 suspend_attr(max_hw_sleep, "%llu\n");
480 
481 #define suspend_step_attr(_name, step)		\
482 static ssize_t _name##_show(struct kobject *kobj,		\
483 		struct kobj_attribute *attr, char *buf)		\
484 {								\
485 	return sysfs_emit(buf, "%u\n",				\
486 		       suspend_stats.step_failures[step-1]);	\
487 }								\
488 static struct kobj_attribute _name = __ATTR_RO(_name)
489 
490 suspend_step_attr(failed_freeze, SUSPEND_FREEZE);
491 suspend_step_attr(failed_prepare, SUSPEND_PREPARE);
492 suspend_step_attr(failed_suspend, SUSPEND_SUSPEND);
493 suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE);
494 suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ);
495 suspend_step_attr(failed_resume, SUSPEND_RESUME);
496 suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY);
497 suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ);
498 
499 static ssize_t last_failed_dev_show(struct kobject *kobj,
500 		struct kobj_attribute *attr, char *buf)
501 {
502 	int index;
503 	char *last_failed_dev = NULL;
504 
505 	index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
506 	index %= REC_FAILED_NUM;
507 	last_failed_dev = suspend_stats.failed_devs[index];
508 
509 	return sysfs_emit(buf, "%s\n", last_failed_dev);
510 }
511 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
512 
513 static ssize_t last_failed_errno_show(struct kobject *kobj,
514 		struct kobj_attribute *attr, char *buf)
515 {
516 	int index;
517 	int last_failed_errno;
518 
519 	index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
520 	index %= REC_FAILED_NUM;
521 	last_failed_errno = suspend_stats.errno[index];
522 
523 	return sysfs_emit(buf, "%d\n", last_failed_errno);
524 }
525 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
526 
527 static ssize_t last_failed_step_show(struct kobject *kobj,
528 		struct kobj_attribute *attr, char *buf)
529 {
530 	enum suspend_stat_step step;
531 	int index;
532 
533 	index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
534 	index %= REC_FAILED_NUM;
535 	step = suspend_stats.failed_steps[index];
536 
537 	return sysfs_emit(buf, "%s\n", suspend_step_names[step]);
538 }
539 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
540 
541 static struct attribute *suspend_attrs[] = {
542 	&success.attr,
543 	&fail.attr,
544 	&failed_freeze.attr,
545 	&failed_prepare.attr,
546 	&failed_suspend.attr,
547 	&failed_suspend_late.attr,
548 	&failed_suspend_noirq.attr,
549 	&failed_resume.attr,
550 	&failed_resume_early.attr,
551 	&failed_resume_noirq.attr,
552 	&last_failed_dev.attr,
553 	&last_failed_errno.attr,
554 	&last_failed_step.attr,
555 	&last_hw_sleep.attr,
556 	&total_hw_sleep.attr,
557 	&max_hw_sleep.attr,
558 	NULL,
559 };
560 
561 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
562 {
563 	if (attr != &last_hw_sleep.attr &&
564 	    attr != &total_hw_sleep.attr &&
565 	    attr != &max_hw_sleep.attr)
566 		return 0444;
567 
568 #ifdef CONFIG_ACPI
569 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
570 		return 0444;
571 #endif
572 	return 0;
573 }
574 
575 static const struct attribute_group suspend_attr_group = {
576 	.name = "suspend_stats",
577 	.attrs = suspend_attrs,
578 	.is_visible = suspend_attr_is_visible,
579 };
580 
581 #ifdef CONFIG_DEBUG_FS
582 static int suspend_stats_show(struct seq_file *s, void *unused)
583 {
584 	int i, index, last_dev, last_errno, last_step;
585 	enum suspend_stat_step step;
586 
587 	last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
588 	last_dev %= REC_FAILED_NUM;
589 	last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
590 	last_errno %= REC_FAILED_NUM;
591 	last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
592 	last_step %= REC_FAILED_NUM;
593 
594 	seq_printf(s, "success: %u\nfail: %u\n",
595 		   suspend_stats.success, suspend_stats.fail);
596 
597 	for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++)
598 		seq_printf(s, "failed_%s: %u\n", suspend_step_names[step],
599 			   suspend_stats.step_failures[step-1]);
600 
601 	seq_printf(s,	"failures:\n  last_failed_dev:\t%-s\n",
602 		   suspend_stats.failed_devs[last_dev]);
603 	for (i = 1; i < REC_FAILED_NUM; i++) {
604 		index = last_dev + REC_FAILED_NUM - i;
605 		index %= REC_FAILED_NUM;
606 		seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]);
607 	}
608 	seq_printf(s,	"  last_failed_errno:\t%-d\n",
609 			suspend_stats.errno[last_errno]);
610 	for (i = 1; i < REC_FAILED_NUM; i++) {
611 		index = last_errno + REC_FAILED_NUM - i;
612 		index %= REC_FAILED_NUM;
613 		seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]);
614 	}
615 	seq_printf(s,	"  last_failed_step:\t%-s\n",
616 		   suspend_step_names[suspend_stats.failed_steps[last_step]]);
617 	for (i = 1; i < REC_FAILED_NUM; i++) {
618 		index = last_step + REC_FAILED_NUM - i;
619 		index %= REC_FAILED_NUM;
620 		seq_printf(s, "\t\t\t%-s\n",
621 			   suspend_step_names[suspend_stats.failed_steps[index]]);
622 	}
623 
624 	return 0;
625 }
626 DEFINE_SHOW_ATTRIBUTE(suspend_stats);
627 
628 static int __init pm_debugfs_init(void)
629 {
630 	debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
631 			NULL, NULL, &suspend_stats_fops);
632 	return 0;
633 }
634 
635 late_initcall(pm_debugfs_init);
636 #endif /* CONFIG_DEBUG_FS */
637 
638 bool pm_sleep_transition_in_progress(void)
639 {
640 	return pm_suspend_in_progress() || hibernation_in_progress();
641 }
642 #endif /* CONFIG_PM_SLEEP */
643 
644 #ifdef CONFIG_PM_SLEEP_DEBUG
645 /*
646  * pm_print_times: print time taken by devices to suspend and resume.
647  *
648  * show() returns whether printing of suspend and resume times is enabled.
649  * store() accepts 0 or 1.  0 disables printing and 1 enables it.
650  */
651 bool pm_print_times_enabled;
652 
653 static ssize_t pm_print_times_show(struct kobject *kobj,
654 				   struct kobj_attribute *attr, char *buf)
655 {
656 	return sysfs_emit(buf, "%d\n", pm_print_times_enabled);
657 }
658 
659 static ssize_t pm_print_times_store(struct kobject *kobj,
660 				    struct kobj_attribute *attr,
661 				    const char *buf, size_t n)
662 {
663 	unsigned long val;
664 
665 	if (kstrtoul(buf, 10, &val))
666 		return -EINVAL;
667 
668 	if (val > 1)
669 		return -EINVAL;
670 
671 	pm_print_times_enabled = !!val;
672 	return n;
673 }
674 
675 power_attr(pm_print_times);
676 
677 static inline void pm_print_times_init(void)
678 {
679 	pm_print_times_enabled = initcall_debug;
680 }
681 
682 static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
683 					struct kobj_attribute *attr,
684 					char *buf)
685 {
686 	if (!pm_wakeup_irq())
687 		return -ENODATA;
688 
689 	return sysfs_emit(buf, "%u\n", pm_wakeup_irq());
690 }
691 
692 power_attr_ro(pm_wakeup_irq);
693 
694 bool pm_debug_messages_on __read_mostly;
695 
696 bool pm_debug_messages_should_print(void)
697 {
698 	return pm_debug_messages_on && pm_sleep_transition_in_progress();
699 }
700 EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
701 
702 static ssize_t pm_debug_messages_show(struct kobject *kobj,
703 				      struct kobj_attribute *attr, char *buf)
704 {
705 	return sysfs_emit(buf, "%d\n", pm_debug_messages_on);
706 }
707 
708 static ssize_t pm_debug_messages_store(struct kobject *kobj,
709 				       struct kobj_attribute *attr,
710 				       const char *buf, size_t n)
711 {
712 	unsigned long val;
713 
714 	if (kstrtoul(buf, 10, &val))
715 		return -EINVAL;
716 
717 	if (val > 1)
718 		return -EINVAL;
719 
720 	pm_debug_messages_on = !!val;
721 	return n;
722 }
723 
724 power_attr(pm_debug_messages);
725 
726 static int __init pm_debug_messages_setup(char *str)
727 {
728 	pm_debug_messages_on = true;
729 	return 1;
730 }
731 __setup("pm_debug_messages", pm_debug_messages_setup);
732 
733 #else /* !CONFIG_PM_SLEEP_DEBUG */
734 static inline void pm_print_times_init(void) {}
735 #endif /* CONFIG_PM_SLEEP_DEBUG */
736 
737 struct kobject *power_kobj;
738 
739 /*
740  * state - control system sleep states.
741  *
742  * show() returns available sleep state labels, which may be "mem", "standby",
743  * "freeze" and "disk" (hibernation).
744  * See Documentation/admin-guide/pm/sleep-states.rst for a description of
745  * what they mean.
746  *
747  * store() accepts one of those strings, translates it into the proper
748  * enumerated value, and initiates a suspend transition.
749  */
750 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
751 			  char *buf)
752 {
753 	ssize_t count = 0;
754 #ifdef CONFIG_SUSPEND
755 	suspend_state_t i;
756 
757 	for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
758 		if (pm_states[i])
759 			count += sysfs_emit_at(buf, count, "%s ", pm_states[i]);
760 
761 #endif
762 	if (hibernation_available())
763 		count += sysfs_emit_at(buf, count, "disk ");
764 
765 	/* Convert the last space to a newline if needed. */
766 	if (count > 0)
767 		buf[count - 1] = '\n';
768 
769 	return count;
770 }
771 
772 static suspend_state_t decode_state(const char *buf, size_t n)
773 {
774 #ifdef CONFIG_SUSPEND
775 	suspend_state_t state;
776 #endif
777 	char *p;
778 	int len;
779 
780 	p = memchr(buf, '\n', n);
781 	len = p ? p - buf : n;
782 
783 	/* Check hibernation first. */
784 	if (len == 4 && str_has_prefix(buf, "disk"))
785 		return PM_SUSPEND_MAX;
786 
787 #ifdef CONFIG_SUSPEND
788 	for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
789 		const char *label = pm_states[state];
790 
791 		if (label && len == strlen(label) && !strncmp(buf, label, len))
792 			return state;
793 	}
794 #endif
795 
796 	return PM_SUSPEND_ON;
797 }
798 
799 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
800 			   const char *buf, size_t n)
801 {
802 	suspend_state_t state;
803 	int error;
804 
805 	error = pm_autosleep_lock();
806 	if (error)
807 		return error;
808 
809 	if (pm_autosleep_state() > PM_SUSPEND_ON) {
810 		error = -EBUSY;
811 		goto out;
812 	}
813 
814 	state = decode_state(buf, n);
815 	if (state < PM_SUSPEND_MAX) {
816 		if (state == PM_SUSPEND_MEM)
817 			state = mem_sleep_current;
818 
819 		error = pm_suspend(state);
820 	} else if (state == PM_SUSPEND_MAX) {
821 		error = hibernate();
822 	} else {
823 		error = -EINVAL;
824 	}
825 
826  out:
827 	pm_autosleep_unlock();
828 	return error ? error : n;
829 }
830 
831 power_attr(state);
832 
833 #ifdef CONFIG_PM_SLEEP
834 /*
835  * The 'wakeup_count' attribute, along with the functions defined in
836  * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
837  * handled in a non-racy way.
838  *
839  * If a wakeup event occurs when the system is in a sleep state, it simply is
840  * woken up.  In turn, if an event that would wake the system up from a sleep
841  * state occurs when it is undergoing a transition to that sleep state, the
842  * transition should be aborted.  Moreover, if such an event occurs when the
843  * system is in the working state, an attempt to start a transition to the
844  * given sleep state should fail during certain period after the detection of
845  * the event.  Using the 'state' attribute alone is not sufficient to satisfy
846  * these requirements, because a wakeup event may occur exactly when 'state'
847  * is being written to and may be delivered to user space right before it is
848  * frozen, so the event will remain only partially processed until the system is
849  * woken up by another event.  In particular, it won't cause the transition to
850  * a sleep state to be aborted.
851  *
852  * This difficulty may be overcome if user space uses 'wakeup_count' before
853  * writing to 'state'.  It first should read from 'wakeup_count' and store
854  * the read value.  Then, after carrying out its own preparations for the system
855  * transition to a sleep state, it should write the stored value to
856  * 'wakeup_count'.  If that fails, at least one wakeup event has occurred since
857  * 'wakeup_count' was read and 'state' should not be written to.  Otherwise, it
858  * is allowed to write to 'state', but the transition will be aborted if there
859  * are any wakeup events detected after 'wakeup_count' was written to.
860  */
861 
862 static ssize_t wakeup_count_show(struct kobject *kobj,
863 				struct kobj_attribute *attr,
864 				char *buf)
865 {
866 	unsigned int val;
867 
868 	return pm_get_wakeup_count(&val, true) ?
869 		sysfs_emit(buf, "%u\n", val) : -EINTR;
870 }
871 
872 static ssize_t wakeup_count_store(struct kobject *kobj,
873 				struct kobj_attribute *attr,
874 				const char *buf, size_t n)
875 {
876 	unsigned int val;
877 	int error;
878 
879 	error = pm_autosleep_lock();
880 	if (error)
881 		return error;
882 
883 	if (pm_autosleep_state() > PM_SUSPEND_ON) {
884 		error = -EBUSY;
885 		goto out;
886 	}
887 
888 	error = -EINVAL;
889 	if (sscanf(buf, "%u", &val) == 1) {
890 		if (pm_save_wakeup_count(val))
891 			error = n;
892 		else
893 			pm_print_active_wakeup_sources();
894 	}
895 
896  out:
897 	pm_autosleep_unlock();
898 	return error;
899 }
900 
901 power_attr(wakeup_count);
902 
903 #ifdef CONFIG_PM_AUTOSLEEP
904 static ssize_t autosleep_show(struct kobject *kobj,
905 			      struct kobj_attribute *attr,
906 			      char *buf)
907 {
908 	suspend_state_t state = pm_autosleep_state();
909 
910 	if (state == PM_SUSPEND_ON)
911 		return sysfs_emit(buf, "off\n");
912 
913 #ifdef CONFIG_SUSPEND
914 	if (state < PM_SUSPEND_MAX)
915 		return sysfs_emit(buf, "%s\n", pm_states[state] ?
916 					pm_states[state] : "error");
917 #endif
918 #ifdef CONFIG_HIBERNATION
919 	return sysfs_emit(buf, "disk\n");
920 #else
921 	return sysfs_emit(buf, "error\n");
922 #endif
923 }
924 
925 static ssize_t autosleep_store(struct kobject *kobj,
926 			       struct kobj_attribute *attr,
927 			       const char *buf, size_t n)
928 {
929 	suspend_state_t state = decode_state(buf, n);
930 	int error;
931 
932 	if (state == PM_SUSPEND_ON
933 	    && strcmp(buf, "off") && strcmp(buf, "off\n"))
934 		return -EINVAL;
935 
936 	if (state == PM_SUSPEND_MEM)
937 		state = mem_sleep_current;
938 
939 	error = pm_autosleep_set_state(state);
940 	return error ? error : n;
941 }
942 
943 power_attr(autosleep);
944 #endif /* CONFIG_PM_AUTOSLEEP */
945 
946 #ifdef CONFIG_PM_WAKELOCKS
947 static ssize_t wake_lock_show(struct kobject *kobj,
948 			      struct kobj_attribute *attr,
949 			      char *buf)
950 {
951 	return pm_show_wakelocks(buf, true);
952 }
953 
954 static ssize_t wake_lock_store(struct kobject *kobj,
955 			       struct kobj_attribute *attr,
956 			       const char *buf, size_t n)
957 {
958 	int error = pm_wake_lock(buf);
959 	return error ? error : n;
960 }
961 
962 power_attr(wake_lock);
963 
964 static ssize_t wake_unlock_show(struct kobject *kobj,
965 				struct kobj_attribute *attr,
966 				char *buf)
967 {
968 	return pm_show_wakelocks(buf, false);
969 }
970 
971 static ssize_t wake_unlock_store(struct kobject *kobj,
972 				 struct kobj_attribute *attr,
973 				 const char *buf, size_t n)
974 {
975 	int error = pm_wake_unlock(buf);
976 	return error ? error : n;
977 }
978 
979 power_attr(wake_unlock);
980 
981 #endif /* CONFIG_PM_WAKELOCKS */
982 #endif /* CONFIG_PM_SLEEP */
983 
984 #ifdef CONFIG_PM_TRACE
985 int pm_trace_enabled;
986 
987 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
988 			     char *buf)
989 {
990 	return sysfs_emit(buf, "%d\n", pm_trace_enabled);
991 }
992 
993 static ssize_t
994 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
995 	       const char *buf, size_t n)
996 {
997 	int val;
998 
999 	if (sscanf(buf, "%d", &val) == 1) {
1000 		pm_trace_enabled = !!val;
1001 		if (pm_trace_enabled) {
1002 			pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
1003 				"PM: Correct system time has to be restored manually after resume.\n");
1004 		}
1005 		return n;
1006 	}
1007 	return -EINVAL;
1008 }
1009 
1010 power_attr(pm_trace);
1011 
1012 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
1013 				       struct kobj_attribute *attr,
1014 				       char *buf)
1015 {
1016 	return show_trace_dev_match(buf, PAGE_SIZE);
1017 }
1018 
1019 power_attr_ro(pm_trace_dev_match);
1020 
1021 #endif /* CONFIG_PM_TRACE */
1022 
1023 #ifdef CONFIG_FREEZER
1024 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
1025 				      struct kobj_attribute *attr, char *buf)
1026 {
1027 	return sysfs_emit(buf, "%u\n", freeze_timeout_msecs);
1028 }
1029 
1030 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
1031 				       struct kobj_attribute *attr,
1032 				       const char *buf, size_t n)
1033 {
1034 	unsigned long val;
1035 
1036 	if (kstrtoul(buf, 10, &val))
1037 		return -EINVAL;
1038 
1039 	freeze_timeout_msecs = val;
1040 	return n;
1041 }
1042 
1043 power_attr(pm_freeze_timeout);
1044 
1045 #endif	/* CONFIG_FREEZER*/
1046 
1047 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
1048 bool filesystem_freeze_enabled = false;
1049 
1050 static ssize_t freeze_filesystems_show(struct kobject *kobj,
1051 				       struct kobj_attribute *attr, char *buf)
1052 {
1053 	return sysfs_emit(buf, "%d\n", filesystem_freeze_enabled);
1054 }
1055 
1056 static ssize_t freeze_filesystems_store(struct kobject *kobj,
1057 					struct kobj_attribute *attr,
1058 					const char *buf, size_t n)
1059 {
1060 	unsigned long val;
1061 
1062 	if (kstrtoul(buf, 10, &val))
1063 		return -EINVAL;
1064 
1065 	if (val > 1)
1066 		return -EINVAL;
1067 
1068 	filesystem_freeze_enabled = !!val;
1069 	return n;
1070 }
1071 
1072 power_attr(freeze_filesystems);
1073 #endif /* CONFIG_SUSPEND || CONFIG_HIBERNATION */
1074 
1075 static struct attribute * g[] = {
1076 	&state_attr.attr,
1077 #ifdef CONFIG_PM_TRACE
1078 	&pm_trace_attr.attr,
1079 	&pm_trace_dev_match_attr.attr,
1080 #endif
1081 #ifdef CONFIG_PM_SLEEP
1082 	&pm_async_attr.attr,
1083 	&wakeup_count_attr.attr,
1084 #ifdef CONFIG_SUSPEND
1085 	&mem_sleep_attr.attr,
1086 	&sync_on_suspend_attr.attr,
1087 #endif
1088 #ifdef CONFIG_PM_AUTOSLEEP
1089 	&autosleep_attr.attr,
1090 #endif
1091 #ifdef CONFIG_PM_WAKELOCKS
1092 	&wake_lock_attr.attr,
1093 	&wake_unlock_attr.attr,
1094 #endif
1095 #ifdef CONFIG_PM_SLEEP_DEBUG
1096 	&pm_test_attr.attr,
1097 	&pm_print_times_attr.attr,
1098 	&pm_wakeup_irq_attr.attr,
1099 	&pm_debug_messages_attr.attr,
1100 #endif
1101 #endif
1102 #ifdef CONFIG_FREEZER
1103 	&pm_freeze_timeout_attr.attr,
1104 #endif
1105 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
1106 	&freeze_filesystems_attr.attr,
1107 #endif
1108 	NULL,
1109 };
1110 
1111 static const struct attribute_group attr_group = {
1112 	.attrs = g,
1113 };
1114 
1115 static const struct attribute_group *attr_groups[] = {
1116 	&attr_group,
1117 #ifdef CONFIG_PM_SLEEP
1118 	&suspend_attr_group,
1119 #endif
1120 	NULL,
1121 };
1122 
1123 struct workqueue_struct *pm_wq;
1124 EXPORT_SYMBOL_GPL(pm_wq);
1125 
1126 static int __init pm_start_workqueues(void)
1127 {
1128 	pm_wq = alloc_workqueue("pm", WQ_FREEZABLE | WQ_UNBOUND, 0);
1129 	if (!pm_wq)
1130 		return -ENOMEM;
1131 
1132 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
1133 	pm_fs_sync_wq = alloc_ordered_workqueue("pm_fs_sync", 0);
1134 	if (!pm_fs_sync_wq) {
1135 		destroy_workqueue(pm_wq);
1136 		return -ENOMEM;
1137 	}
1138 #endif
1139 
1140 	return 0;
1141 }
1142 
1143 static int __init pm_init(void)
1144 {
1145 	int error = pm_start_workqueues();
1146 	if (error)
1147 		return error;
1148 	hibernate_image_size_init();
1149 	hibernate_reserved_size_init();
1150 	pm_states_init();
1151 	power_kobj = kobject_create_and_add("power", NULL);
1152 	if (!power_kobj)
1153 		return -ENOMEM;
1154 	error = sysfs_create_groups(power_kobj, attr_groups);
1155 	if (error)
1156 		return error;
1157 	pm_print_times_init();
1158 	return pm_autosleep_init();
1159 }
1160 
1161 core_initcall(pm_init);
1162