xref: /linux/kernel/power/hibernate.c (revision ddb7a62af2e766eabb4ab7080e6ed8d6b8915302)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
9  * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
10  */
11 
12 #define pr_fmt(fmt) "PM: hibernation: " fmt
13 
14 #include <crypto/acompress.h>
15 #include <linux/blkdev.h>
16 #include <linux/export.h>
17 #include <linux/suspend.h>
18 #include <linux/reboot.h>
19 #include <linux/string.h>
20 #include <linux/device.h>
21 #include <linux/async.h>
22 #include <linux/delay.h>
23 #include <linux/fs.h>
24 #include <linux/mount.h>
25 #include <linux/pm.h>
26 #include <linux/nmi.h>
27 #include <linux/console.h>
28 #include <linux/cpu.h>
29 #include <linux/freezer.h>
30 #include <linux/gfp.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/ctype.h>
33 #include <linux/ktime.h>
34 #include <linux/security.h>
35 #include <linux/secretmem.h>
36 #include <trace/events/power.h>
37 
38 #include "power.h"
39 
40 
41 static int nocompress;
42 static int noresume;
43 static int nohibernate;
44 static int resume_wait;
45 static unsigned int resume_delay;
46 static char resume_file[256] = CONFIG_PM_STD_PARTITION;
47 dev_t swsusp_resume_device;
48 sector_t swsusp_resume_block;
49 __visible int in_suspend __nosavedata;
50 
51 static char hibernate_compressor[CRYPTO_MAX_ALG_NAME] = CONFIG_HIBERNATION_DEF_COMP;
52 
53 /*
54  * Compression/decompression algorithm to be used while saving/loading
55  * image to/from disk. This would later be used in 'kernel/power/swap.c'
56  * to allocate comp streams.
57  */
58 char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
59 
60 enum {
61 	HIBERNATION_INVALID,
62 	HIBERNATION_PLATFORM,
63 	HIBERNATION_SHUTDOWN,
64 	HIBERNATION_REBOOT,
65 #ifdef CONFIG_SUSPEND
66 	HIBERNATION_SUSPEND,
67 #endif
68 	HIBERNATION_TEST_RESUME,
69 	/* keep last */
70 	__HIBERNATION_AFTER_LAST
71 };
72 #define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1)
73 #define HIBERNATION_FIRST (HIBERNATION_INVALID + 1)
74 
75 static int hibernation_mode = HIBERNATION_SHUTDOWN;
76 
77 bool freezer_test_done;
78 
79 static const struct platform_hibernation_ops *hibernation_ops;
80 
81 static atomic_t hibernate_atomic = ATOMIC_INIT(1);
82 
hibernate_acquire(void)83 bool hibernate_acquire(void)
84 {
85 	return atomic_add_unless(&hibernate_atomic, -1, 0);
86 }
87 
hibernate_release(void)88 void hibernate_release(void)
89 {
90 	atomic_inc(&hibernate_atomic);
91 }
92 
hibernation_in_progress(void)93 bool hibernation_in_progress(void)
94 {
95 	return !atomic_read(&hibernate_atomic);
96 }
97 
hibernation_available(void)98 bool hibernation_available(void)
99 {
100 	return nohibernate == 0 &&
101 		!security_locked_down(LOCKDOWN_HIBERNATION) &&
102 		!secretmem_active() && !cxl_mem_active();
103 }
104 
105 /**
106  * hibernation_set_ops - Set the global hibernate operations.
107  * @ops: Hibernation operations to use in subsequent hibernation transitions.
108  */
hibernation_set_ops(const struct platform_hibernation_ops * ops)109 void hibernation_set_ops(const struct platform_hibernation_ops *ops)
110 {
111 	unsigned int sleep_flags;
112 
113 	if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
114 	    && ops->prepare && ops->finish && ops->enter && ops->pre_restore
115 	    && ops->restore_cleanup && ops->leave)) {
116 		WARN_ON(1);
117 		return;
118 	}
119 
120 	sleep_flags = lock_system_sleep();
121 
122 	hibernation_ops = ops;
123 	if (ops)
124 		hibernation_mode = HIBERNATION_PLATFORM;
125 	else if (hibernation_mode == HIBERNATION_PLATFORM)
126 		hibernation_mode = HIBERNATION_SHUTDOWN;
127 
128 	unlock_system_sleep(sleep_flags);
129 }
130 EXPORT_SYMBOL_GPL(hibernation_set_ops);
131 
132 static bool entering_platform_hibernation;
133 
system_entering_hibernation(void)134 bool system_entering_hibernation(void)
135 {
136 	return entering_platform_hibernation;
137 }
138 EXPORT_SYMBOL(system_entering_hibernation);
139 
140 #ifdef CONFIG_PM_DEBUG
141 static unsigned int pm_test_delay = 5;
142 module_param(pm_test_delay, uint, 0644);
143 MODULE_PARM_DESC(pm_test_delay,
144 		 "Number of seconds to wait before resuming from hibernation test");
hibernation_debug_sleep(void)145 static void hibernation_debug_sleep(void)
146 {
147 	pr_info("hibernation debug: Waiting for %d second(s).\n",
148 		pm_test_delay);
149 	mdelay(pm_test_delay * 1000);
150 }
151 
hibernation_test(int level)152 static int hibernation_test(int level)
153 {
154 	if (pm_test_level == level) {
155 		hibernation_debug_sleep();
156 		return 1;
157 	}
158 	return 0;
159 }
160 #else /* !CONFIG_PM_DEBUG */
hibernation_test(int level)161 static int hibernation_test(int level) { return 0; }
162 #endif /* !CONFIG_PM_DEBUG */
163 
164 /**
165  * platform_begin - Call platform to start hibernation.
166  * @platform_mode: Whether or not to use the platform driver.
167  */
platform_begin(int platform_mode)168 static int platform_begin(int platform_mode)
169 {
170 	return (platform_mode && hibernation_ops) ?
171 		hibernation_ops->begin(PMSG_FREEZE) : 0;
172 }
173 
174 /**
175  * platform_end - Call platform to finish transition to the working state.
176  * @platform_mode: Whether or not to use the platform driver.
177  */
platform_end(int platform_mode)178 static void platform_end(int platform_mode)
179 {
180 	if (platform_mode && hibernation_ops)
181 		hibernation_ops->end();
182 }
183 
184 /**
185  * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
186  * @platform_mode: Whether or not to use the platform driver.
187  *
188  * Use the platform driver to prepare the system for creating a hibernate image,
189  * if so configured, and return an error code if that fails.
190  */
191 
platform_pre_snapshot(int platform_mode)192 static int platform_pre_snapshot(int platform_mode)
193 {
194 	return (platform_mode && hibernation_ops) ?
195 		hibernation_ops->pre_snapshot() : 0;
196 }
197 
198 /**
199  * platform_leave - Call platform to prepare a transition to the working state.
200  * @platform_mode: Whether or not to use the platform driver.
201  *
202  * Use the platform driver prepare to prepare the machine for switching to the
203  * normal mode of operation.
204  *
205  * This routine is called on one CPU with interrupts disabled.
206  */
platform_leave(int platform_mode)207 static void platform_leave(int platform_mode)
208 {
209 	if (platform_mode && hibernation_ops)
210 		hibernation_ops->leave();
211 }
212 
213 /**
214  * platform_finish - Call platform to switch the system to the working state.
215  * @platform_mode: Whether or not to use the platform driver.
216  *
217  * Use the platform driver to switch the machine to the normal mode of
218  * operation.
219  *
220  * This routine must be called after platform_prepare().
221  */
platform_finish(int platform_mode)222 static void platform_finish(int platform_mode)
223 {
224 	if (platform_mode && hibernation_ops)
225 		hibernation_ops->finish();
226 }
227 
228 /**
229  * platform_pre_restore - Prepare for hibernate image restoration.
230  * @platform_mode: Whether or not to use the platform driver.
231  *
232  * Use the platform driver to prepare the system for resume from a hibernation
233  * image.
234  *
235  * If the restore fails after this function has been called,
236  * platform_restore_cleanup() must be called.
237  */
platform_pre_restore(int platform_mode)238 static int platform_pre_restore(int platform_mode)
239 {
240 	return (platform_mode && hibernation_ops) ?
241 		hibernation_ops->pre_restore() : 0;
242 }
243 
244 /**
245  * platform_restore_cleanup - Switch to the working state after failing restore.
246  * @platform_mode: Whether or not to use the platform driver.
247  *
248  * Use the platform driver to switch the system to the normal mode of operation
249  * after a failing restore.
250  *
251  * If platform_pre_restore() has been called before the failing restore, this
252  * function must be called too, regardless of the result of
253  * platform_pre_restore().
254  */
platform_restore_cleanup(int platform_mode)255 static void platform_restore_cleanup(int platform_mode)
256 {
257 	if (platform_mode && hibernation_ops)
258 		hibernation_ops->restore_cleanup();
259 }
260 
261 /**
262  * platform_recover - Recover from a failure to suspend devices.
263  * @platform_mode: Whether or not to use the platform driver.
264  */
platform_recover(int platform_mode)265 static void platform_recover(int platform_mode)
266 {
267 	if (platform_mode && hibernation_ops && hibernation_ops->recover)
268 		hibernation_ops->recover();
269 }
270 
271 /**
272  * swsusp_show_speed - Print time elapsed between two events during hibernation.
273  * @start: Starting event.
274  * @stop: Final event.
275  * @nr_pages: Number of memory pages processed between @start and @stop.
276  * @msg: Additional diagnostic message to print.
277  */
swsusp_show_speed(ktime_t start,ktime_t stop,unsigned nr_pages,char * msg)278 void swsusp_show_speed(ktime_t start, ktime_t stop,
279 		      unsigned nr_pages, char *msg)
280 {
281 	ktime_t diff;
282 	u64 elapsed_centisecs64;
283 	unsigned int centisecs;
284 	unsigned int k;
285 	unsigned int kps;
286 
287 	diff = ktime_sub(stop, start);
288 	elapsed_centisecs64 = ktime_divns(diff, 10*NSEC_PER_MSEC);
289 	centisecs = elapsed_centisecs64;
290 	if (centisecs == 0)
291 		centisecs = 1;	/* avoid div-by-zero */
292 	k = nr_pages * (PAGE_SIZE / 1024);
293 	kps = (k * 100) / centisecs;
294 	pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
295 		msg, k, centisecs / 100, centisecs % 100, kps / 1000,
296 		(kps % 1000) / 10);
297 }
298 
arch_resume_nosmt(void)299 __weak int arch_resume_nosmt(void)
300 {
301 	return 0;
302 }
303 
304 /**
305  * create_image - Create a hibernation image.
306  * @platform_mode: Whether or not to use the platform driver.
307  *
308  * Execute device drivers' "late" and "noirq" freeze callbacks, create a
309  * hibernation image and run the drivers' "noirq" and "early" thaw callbacks.
310  *
311  * Control reappears in this routine after the subsequent restore.
312  */
create_image(int platform_mode)313 static int create_image(int platform_mode)
314 {
315 	int error;
316 
317 	error = dpm_suspend_end(PMSG_FREEZE);
318 	if (error) {
319 		pr_err("Some devices failed to power down, aborting\n");
320 		return error;
321 	}
322 
323 	error = platform_pre_snapshot(platform_mode);
324 	if (error || hibernation_test(TEST_PLATFORM))
325 		goto Platform_finish;
326 
327 	error = pm_sleep_disable_secondary_cpus();
328 	if (error || hibernation_test(TEST_CPUS))
329 		goto Enable_cpus;
330 
331 	local_irq_disable();
332 
333 	system_state = SYSTEM_SUSPEND;
334 
335 	error = syscore_suspend();
336 	if (error) {
337 		pr_err("Some system devices failed to power down, aborting\n");
338 		goto Enable_irqs;
339 	}
340 
341 	if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
342 		goto Power_up;
343 
344 	in_suspend = 1;
345 	save_processor_state();
346 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
347 	error = swsusp_arch_suspend();
348 	/* Restore control flow magically appears here */
349 	restore_processor_state();
350 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
351 	if (error)
352 		pr_err("Error %d creating image\n", error);
353 
354 	if (!in_suspend) {
355 		events_check_enabled = false;
356 		clear_or_poison_free_pages();
357 	}
358 
359 	platform_leave(platform_mode);
360 
361  Power_up:
362 	syscore_resume();
363 
364  Enable_irqs:
365 	system_state = SYSTEM_RUNNING;
366 	local_irq_enable();
367 
368  Enable_cpus:
369 	pm_sleep_enable_secondary_cpus();
370 
371 	/* Allow architectures to do nosmt-specific post-resume dances */
372 	if (!in_suspend)
373 		error = arch_resume_nosmt();
374 
375  Platform_finish:
376 	platform_finish(platform_mode);
377 
378 	dpm_resume_start(in_suspend ?
379 		(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
380 
381 	return error;
382 }
383 
shrink_shmem_memory(void)384 static void shrink_shmem_memory(void)
385 {
386 	struct sysinfo info;
387 	unsigned long nr_shmem_pages, nr_freed_pages;
388 
389 	si_meminfo(&info);
390 	nr_shmem_pages = info.sharedram; /* current page count used for shmem */
391 	/*
392 	 * The intent is to reclaim all shmem pages. Though shrink_all_memory() can
393 	 * only reclaim about half of them, it's enough for creating the hibernation
394 	 * image.
395 	 */
396 	nr_freed_pages = shrink_all_memory(nr_shmem_pages);
397 	pr_debug("requested to reclaim %lu shmem pages, actually freed %lu pages\n",
398 			nr_shmem_pages, nr_freed_pages);
399 }
400 
401 /**
402  * hibernation_snapshot - Quiesce devices and create a hibernation image.
403  * @platform_mode: If set, use platform driver to prepare for the transition.
404  *
405  * This routine must be called with system_transition_mutex held.
406  */
hibernation_snapshot(int platform_mode)407 int hibernation_snapshot(int platform_mode)
408 {
409 	pm_message_t msg;
410 	int error;
411 
412 	pm_suspend_clear_flags();
413 	error = platform_begin(platform_mode);
414 	if (error)
415 		goto Close;
416 
417 	/* Preallocate image memory before shutting down devices. */
418 	error = hibernate_preallocate_memory();
419 	if (error)
420 		goto Close;
421 
422 	error = freeze_kernel_threads();
423 	if (error)
424 		goto Cleanup;
425 
426 	if (hibernation_test(TEST_FREEZER)) {
427 
428 		/*
429 		 * Indicate to the caller that we are returning due to a
430 		 * successful freezer test.
431 		 */
432 		freezer_test_done = true;
433 		goto Thaw;
434 	}
435 
436 	error = dpm_prepare(PMSG_FREEZE);
437 	if (error) {
438 		dpm_complete(PMSG_RECOVER);
439 		goto Thaw;
440 	}
441 
442 	/*
443 	 * Device drivers may move lots of data to shmem in dpm_prepare(). The shmem
444 	 * pages will use lots of system memory, causing hibernation image creation
445 	 * fail due to insufficient free memory.
446 	 * This call is to force flush the shmem pages to swap disk and reclaim
447 	 * the system memory so that image creation can succeed.
448 	 */
449 	shrink_shmem_memory();
450 
451 	console_suspend_all();
452 
453 	error = dpm_suspend(PMSG_FREEZE);
454 
455 	if (error || hibernation_test(TEST_DEVICES))
456 		platform_recover(platform_mode);
457 	else
458 		error = create_image(platform_mode);
459 
460 	/*
461 	 * In the case that we call create_image() above, the control
462 	 * returns here (1) after the image has been created or the
463 	 * image creation has failed and (2) after a successful restore.
464 	 */
465 
466 	/* We may need to release the preallocated image pages here. */
467 	if (error || !in_suspend)
468 		swsusp_free();
469 
470 	msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE;
471 	dpm_resume(msg);
472 
473 	if (error || !in_suspend)
474 		pm_restore_gfp_mask();
475 
476 	console_resume_all();
477 	dpm_complete(msg);
478 
479  Close:
480 	platform_end(platform_mode);
481 	return error;
482 
483  Thaw:
484 	thaw_kernel_threads();
485  Cleanup:
486 	swsusp_free();
487 	goto Close;
488 }
489 
hibernate_resume_nonboot_cpu_disable(void)490 int __weak hibernate_resume_nonboot_cpu_disable(void)
491 {
492 	return suspend_disable_secondary_cpus();
493 }
494 
495 /**
496  * resume_target_kernel - Restore system state from a hibernation image.
497  * @platform_mode: Whether or not to use the platform driver.
498  *
499  * Execute device drivers' "noirq" and "late" freeze callbacks, restore the
500  * contents of highmem that have not been restored yet from the image and run
501  * the low-level code that will restore the remaining contents of memory and
502  * switch to the just restored target kernel.
503  */
resume_target_kernel(bool platform_mode)504 static int resume_target_kernel(bool platform_mode)
505 {
506 	int error;
507 
508 	error = dpm_suspend_end(PMSG_QUIESCE);
509 	if (error) {
510 		pr_err("Some devices failed to power down, aborting resume\n");
511 		return error;
512 	}
513 
514 	error = platform_pre_restore(platform_mode);
515 	if (error)
516 		goto Cleanup;
517 
518 	cpuidle_pause();
519 
520 	error = hibernate_resume_nonboot_cpu_disable();
521 	if (error)
522 		goto Enable_cpus;
523 
524 	local_irq_disable();
525 	system_state = SYSTEM_SUSPEND;
526 
527 	error = syscore_suspend();
528 	if (error)
529 		goto Enable_irqs;
530 
531 	save_processor_state();
532 	error = restore_highmem();
533 	if (!error) {
534 		error = swsusp_arch_resume();
535 		/*
536 		 * The code below is only ever reached in case of a failure.
537 		 * Otherwise, execution continues at the place where
538 		 * swsusp_arch_suspend() was called.
539 		 */
540 		BUG_ON(!error);
541 		/*
542 		 * This call to restore_highmem() reverts the changes made by
543 		 * the previous one.
544 		 */
545 		restore_highmem();
546 	}
547 	/*
548 	 * The only reason why swsusp_arch_resume() can fail is memory being
549 	 * very tight, so we have to free it as soon as we can to avoid
550 	 * subsequent failures.
551 	 */
552 	swsusp_free();
553 	restore_processor_state();
554 	touch_softlockup_watchdog();
555 
556 	syscore_resume();
557 
558  Enable_irqs:
559 	system_state = SYSTEM_RUNNING;
560 	local_irq_enable();
561 
562  Enable_cpus:
563 	pm_sleep_enable_secondary_cpus();
564 
565  Cleanup:
566 	platform_restore_cleanup(platform_mode);
567 
568 	dpm_resume_start(PMSG_RECOVER);
569 
570 	return error;
571 }
572 
573 /**
574  * hibernation_restore - Quiesce devices and restore from a hibernation image.
575  * @platform_mode: If set, use platform driver to prepare for the transition.
576  *
577  * This routine must be called with system_transition_mutex held.  If it is
578  * successful, control reappears in the restored target kernel in
579  * hibernation_snapshot().
580  */
hibernation_restore(int platform_mode)581 int hibernation_restore(int platform_mode)
582 {
583 	int error;
584 
585 	pm_prepare_console();
586 	console_suspend_all();
587 	error = dpm_suspend_start(PMSG_QUIESCE);
588 	if (!error) {
589 		error = resume_target_kernel(platform_mode);
590 		/*
591 		 * The above should either succeed and jump to the new kernel,
592 		 * or return with an error. Otherwise things are just
593 		 * undefined, so let's be paranoid.
594 		 */
595 		BUG_ON(!error);
596 	}
597 	dpm_resume_end(PMSG_RECOVER);
598 	console_resume_all();
599 	pm_restore_console();
600 	return error;
601 }
602 
603 /**
604  * hibernation_platform_enter - Power off the system using the platform driver.
605  */
hibernation_platform_enter(void)606 int hibernation_platform_enter(void)
607 {
608 	int error;
609 
610 	if (!hibernation_ops)
611 		return -ENOSYS;
612 
613 	/*
614 	 * We have cancelled the power transition by running
615 	 * hibernation_ops->finish() before saving the image, so we should let
616 	 * the firmware know that we're going to enter the sleep state after all
617 	 */
618 	error = hibernation_ops->begin(PMSG_HIBERNATE);
619 	if (error)
620 		goto Close;
621 
622 	entering_platform_hibernation = true;
623 	console_suspend_all();
624 	error = dpm_suspend_start(PMSG_HIBERNATE);
625 	if (error) {
626 		if (hibernation_ops->recover)
627 			hibernation_ops->recover();
628 		goto Resume_devices;
629 	}
630 
631 	error = dpm_suspend_end(PMSG_HIBERNATE);
632 	if (error)
633 		goto Resume_devices;
634 
635 	error = hibernation_ops->prepare();
636 	if (error)
637 		goto Platform_finish;
638 
639 	error = pm_sleep_disable_secondary_cpus();
640 	if (error)
641 		goto Enable_cpus;
642 
643 	local_irq_disable();
644 	system_state = SYSTEM_SUSPEND;
645 
646 	error = syscore_suspend();
647 	if (error)
648 		goto Enable_irqs;
649 
650 	if (pm_wakeup_pending()) {
651 		error = -EAGAIN;
652 		goto Power_up;
653 	}
654 
655 	hibernation_ops->enter();
656 	/* We should never get here */
657 	while (1);
658 
659  Power_up:
660 	syscore_resume();
661  Enable_irqs:
662 	system_state = SYSTEM_RUNNING;
663 	local_irq_enable();
664 
665  Enable_cpus:
666 	pm_sleep_enable_secondary_cpus();
667 
668  Platform_finish:
669 	hibernation_ops->finish();
670 
671 	dpm_resume_start(PMSG_RESTORE);
672 
673  Resume_devices:
674 	entering_platform_hibernation = false;
675 	dpm_resume_end(PMSG_RESTORE);
676 	console_resume_all();
677 
678  Close:
679 	hibernation_ops->end();
680 
681 	return error;
682 }
683 
684 /**
685  * power_down - Shut the machine down for hibernation.
686  *
687  * Use the platform driver, if configured, to put the system into the sleep
688  * state corresponding to hibernation, or try to power it off or reboot,
689  * depending on the value of hibernation_mode.
690  */
power_down(void)691 static void power_down(void)
692 {
693 	int error;
694 
695 #ifdef CONFIG_SUSPEND
696 	if (hibernation_mode == HIBERNATION_SUSPEND) {
697 		error = suspend_devices_and_enter(mem_sleep_current);
698 		if (error) {
699 			hibernation_mode = hibernation_ops ?
700 						HIBERNATION_PLATFORM :
701 						HIBERNATION_SHUTDOWN;
702 		} else {
703 			/* Restore swap signature. */
704 			error = swsusp_unmark();
705 			if (error)
706 				pr_err("Swap will be unusable! Try swapon -a.\n");
707 
708 			return;
709 		}
710 	}
711 #endif
712 
713 	switch (hibernation_mode) {
714 	case HIBERNATION_REBOOT:
715 		kernel_restart(NULL);
716 		break;
717 	case HIBERNATION_PLATFORM:
718 		error = hibernation_platform_enter();
719 		if (error == -EAGAIN || error == -EBUSY) {
720 			swsusp_unmark();
721 			events_check_enabled = false;
722 			pr_info("Wakeup event detected during hibernation, rolling back.\n");
723 			return;
724 		}
725 		fallthrough;
726 	case HIBERNATION_SHUTDOWN:
727 		if (kernel_can_power_off()) {
728 			entering_platform_hibernation = true;
729 			kernel_power_off();
730 			entering_platform_hibernation = false;
731 		}
732 		break;
733 	}
734 	kernel_halt();
735 	/*
736 	 * Valid image is on the disk, if we continue we risk serious data
737 	 * corruption after resume.
738 	 */
739 	pr_crit("Power down manually\n");
740 	while (1)
741 		cpu_relax();
742 }
743 
load_image_and_restore(void)744 static int load_image_and_restore(void)
745 {
746 	int error;
747 	unsigned int flags;
748 
749 	pm_pr_dbg("Loading hibernation image.\n");
750 
751 	lock_device_hotplug();
752 	error = create_basic_memory_bitmaps();
753 	if (error) {
754 		swsusp_close();
755 		goto Unlock;
756 	}
757 
758 	error = swsusp_read(&flags);
759 	swsusp_close();
760 	if (!error)
761 		error = hibernation_restore(flags & SF_PLATFORM_MODE);
762 
763 	pr_err("Failed to load image, recovering.\n");
764 	swsusp_free();
765 	free_basic_memory_bitmaps();
766  Unlock:
767 	unlock_device_hotplug();
768 
769 	return error;
770 }
771 
772 #define COMPRESSION_ALGO_LZO "lzo"
773 #define COMPRESSION_ALGO_LZ4 "lz4"
774 
775 /**
776  * hibernate - Carry out system hibernation, including saving the image.
777  */
hibernate(void)778 int hibernate(void)
779 {
780 	bool snapshot_test = false;
781 	unsigned int sleep_flags;
782 	int error;
783 
784 	if (!hibernation_available()) {
785 		pm_pr_dbg("Hibernation not available.\n");
786 		return -EPERM;
787 	}
788 
789 	/*
790 	 * Query for the compression algorithm support if compression is enabled.
791 	 */
792 	if (!nocompress) {
793 		strscpy(hib_comp_algo, hibernate_compressor);
794 		if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
795 			pr_err("%s compression is not available\n", hib_comp_algo);
796 			return -EOPNOTSUPP;
797 		}
798 	}
799 
800 	sleep_flags = lock_system_sleep();
801 	/* The snapshot device should not be opened while we're running */
802 	if (!hibernate_acquire()) {
803 		error = -EBUSY;
804 		goto Unlock;
805 	}
806 
807 	pr_info("hibernation entry\n");
808 	pm_prepare_console();
809 	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
810 	if (error)
811 		goto Restore;
812 
813 	ksys_sync_helper();
814 	if (filesystem_freeze_enabled)
815 		filesystems_freeze();
816 
817 	error = freeze_processes();
818 	if (error)
819 		goto Exit;
820 
821 	lock_device_hotplug();
822 	/* Allocate memory management structures */
823 	error = create_basic_memory_bitmaps();
824 	if (error)
825 		goto Thaw;
826 
827 	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
828 	if (error || freezer_test_done)
829 		goto Free_bitmaps;
830 
831 	if (in_suspend) {
832 		unsigned int flags = 0;
833 
834 		if (hibernation_mode == HIBERNATION_PLATFORM)
835 			flags |= SF_PLATFORM_MODE;
836 		if (nocompress) {
837 			flags |= SF_NOCOMPRESS_MODE;
838 		} else {
839 		        flags |= SF_CRC32_MODE;
840 
841 			/*
842 			 * By default, LZO compression is enabled. Use SF_COMPRESSION_ALG_LZ4
843 			 * to override this behaviour and use LZ4.
844 			 *
845 			 * Refer kernel/power/power.h for more details
846 			 */
847 
848 			if (!strcmp(hib_comp_algo, COMPRESSION_ALGO_LZ4))
849 				flags |= SF_COMPRESSION_ALG_LZ4;
850 			else
851 				flags |= SF_COMPRESSION_ALG_LZO;
852 		}
853 
854 		pm_pr_dbg("Writing hibernation image.\n");
855 		error = swsusp_write(flags);
856 		swsusp_free();
857 		if (!error) {
858 			if (hibernation_mode == HIBERNATION_TEST_RESUME)
859 				snapshot_test = true;
860 			else
861 				power_down();
862 		}
863 		in_suspend = 0;
864 		pm_restore_gfp_mask();
865 	} else {
866 		pm_pr_dbg("Hibernation image restored successfully.\n");
867 	}
868 
869  Free_bitmaps:
870 	free_basic_memory_bitmaps();
871  Thaw:
872 	unlock_device_hotplug();
873 	if (snapshot_test) {
874 		pm_pr_dbg("Checking hibernation image\n");
875 		error = swsusp_check(false);
876 		if (!error)
877 			error = load_image_and_restore();
878 	}
879 	thaw_processes();
880 
881 	/* Don't bother checking whether freezer_test_done is true */
882 	freezer_test_done = false;
883  Exit:
884 	filesystems_thaw();
885 	pm_notifier_call_chain(PM_POST_HIBERNATION);
886  Restore:
887 	pm_restore_console();
888 	hibernate_release();
889  Unlock:
890 	unlock_system_sleep(sleep_flags);
891 	pr_info("hibernation exit\n");
892 
893 	return error;
894 }
895 
896 /**
897  * hibernate_quiet_exec - Execute a function with all devices frozen.
898  * @func: Function to execute.
899  * @data: Data pointer to pass to @func.
900  *
901  * Return the @func return value or an error code if it cannot be executed.
902  */
hibernate_quiet_exec(int (* func)(void * data),void * data)903 int hibernate_quiet_exec(int (*func)(void *data), void *data)
904 {
905 	unsigned int sleep_flags;
906 	int error;
907 
908 	sleep_flags = lock_system_sleep();
909 
910 	if (!hibernate_acquire()) {
911 		error = -EBUSY;
912 		goto unlock;
913 	}
914 
915 	pm_prepare_console();
916 
917 	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
918 	if (error)
919 		goto restore;
920 
921 	if (filesystem_freeze_enabled)
922 		filesystems_freeze();
923 
924 	error = freeze_processes();
925 	if (error)
926 		goto exit;
927 
928 	lock_device_hotplug();
929 
930 	pm_suspend_clear_flags();
931 
932 	error = platform_begin(true);
933 	if (error)
934 		goto thaw;
935 
936 	error = freeze_kernel_threads();
937 	if (error)
938 		goto thaw;
939 
940 	error = dpm_prepare(PMSG_FREEZE);
941 	if (error)
942 		goto dpm_complete;
943 
944 	console_suspend_all();
945 
946 	error = dpm_suspend(PMSG_FREEZE);
947 	if (error)
948 		goto dpm_resume;
949 
950 	error = dpm_suspend_end(PMSG_FREEZE);
951 	if (error)
952 		goto dpm_resume;
953 
954 	error = platform_pre_snapshot(true);
955 	if (error)
956 		goto skip;
957 
958 	error = func(data);
959 
960 skip:
961 	platform_finish(true);
962 
963 	dpm_resume_start(PMSG_THAW);
964 
965 dpm_resume:
966 	dpm_resume(PMSG_THAW);
967 
968 	console_resume_all();
969 
970 dpm_complete:
971 	dpm_complete(PMSG_THAW);
972 
973 	thaw_kernel_threads();
974 
975 thaw:
976 	platform_end(true);
977 
978 	unlock_device_hotplug();
979 
980 	thaw_processes();
981 
982 exit:
983 	filesystems_thaw();
984 	pm_notifier_call_chain(PM_POST_HIBERNATION);
985 
986 restore:
987 	pm_restore_console();
988 
989 	hibernate_release();
990 
991 unlock:
992 	unlock_system_sleep(sleep_flags);
993 
994 	return error;
995 }
996 EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
997 
find_resume_device(void)998 static int __init find_resume_device(void)
999 {
1000 	if (!strlen(resume_file))
1001 		return -ENOENT;
1002 
1003 	pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
1004 
1005 	if (resume_delay) {
1006 		pr_info("Waiting %dsec before reading resume device ...\n",
1007 			resume_delay);
1008 		ssleep(resume_delay);
1009 	}
1010 
1011 	/* Check if the device is there */
1012 	if (!early_lookup_bdev(resume_file, &swsusp_resume_device))
1013 		return 0;
1014 
1015 	/*
1016 	 * Some device discovery might still be in progress; we need to wait for
1017 	 * this to finish.
1018 	 */
1019 	wait_for_device_probe();
1020 	if (resume_wait) {
1021 		while (early_lookup_bdev(resume_file, &swsusp_resume_device))
1022 			msleep(10);
1023 		async_synchronize_full();
1024 	}
1025 
1026 	return early_lookup_bdev(resume_file, &swsusp_resume_device);
1027 }
1028 
software_resume(void)1029 static int software_resume(void)
1030 {
1031 	int error;
1032 
1033 	pm_pr_dbg("Hibernation image partition %d:%d present\n",
1034 		MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
1035 
1036 	pm_pr_dbg("Looking for hibernation image.\n");
1037 
1038 	mutex_lock(&system_transition_mutex);
1039 	error = swsusp_check(true);
1040 	if (error)
1041 		goto Unlock;
1042 
1043 	/*
1044 	 * Check if the hibernation image is compressed. If so, query for
1045 	 * the algorithm support.
1046 	 */
1047 	if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
1048 		if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4)
1049 			strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4);
1050 		else
1051 			strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO);
1052 		if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
1053 			pr_err("%s compression is not available\n", hib_comp_algo);
1054 			error = -EOPNOTSUPP;
1055 			goto Unlock;
1056 		}
1057 	}
1058 
1059 	/* The snapshot device should not be opened while we're running */
1060 	if (!hibernate_acquire()) {
1061 		error = -EBUSY;
1062 		swsusp_close();
1063 		goto Unlock;
1064 	}
1065 
1066 	pr_info("resume from hibernation\n");
1067 	pm_prepare_console();
1068 	error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
1069 	if (error)
1070 		goto Restore;
1071 
1072 	if (filesystem_freeze_enabled)
1073 		filesystems_freeze();
1074 
1075 	pm_pr_dbg("Preparing processes for hibernation restore.\n");
1076 	error = freeze_processes();
1077 	if (error) {
1078 		filesystems_thaw();
1079 		goto Close_Finish;
1080 	}
1081 
1082 	error = freeze_kernel_threads();
1083 	if (error) {
1084 		thaw_processes();
1085 		filesystems_thaw();
1086 		goto Close_Finish;
1087 	}
1088 
1089 	error = load_image_and_restore();
1090 	thaw_processes();
1091 	filesystems_thaw();
1092  Finish:
1093 	pm_notifier_call_chain(PM_POST_RESTORE);
1094  Restore:
1095 	pm_restore_console();
1096 	pr_info("resume failed (%d)\n", error);
1097 	hibernate_release();
1098 	/* For success case, the suspend path will release the lock */
1099  Unlock:
1100 	mutex_unlock(&system_transition_mutex);
1101 	pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
1102 	return error;
1103  Close_Finish:
1104 	swsusp_close();
1105 	goto Finish;
1106 }
1107 
1108 /**
1109  * software_resume_initcall - Resume from a saved hibernation image.
1110  *
1111  * This routine is called as a late initcall, when all devices have been
1112  * discovered and initialized already.
1113  *
1114  * The image reading code is called to see if there is a hibernation image
1115  * available for reading.  If that is the case, devices are quiesced and the
1116  * contents of memory is restored from the saved image.
1117  *
1118  * If this is successful, control reappears in the restored target kernel in
1119  * hibernation_snapshot() which returns to hibernate().  Otherwise, the routine
1120  * attempts to recover gracefully and make the kernel return to the normal mode
1121  * of operation.
1122  */
software_resume_initcall(void)1123 static int __init software_resume_initcall(void)
1124 {
1125 	/*
1126 	 * If the user said "noresume".. bail out early.
1127 	 */
1128 	if (noresume || !hibernation_available())
1129 		return 0;
1130 
1131 	if (!swsusp_resume_device) {
1132 		int error = find_resume_device();
1133 
1134 		if (error)
1135 			return error;
1136 	}
1137 
1138 	return software_resume();
1139 }
1140 late_initcall_sync(software_resume_initcall);
1141 
1142 
1143 static const char * const hibernation_modes[] = {
1144 	[HIBERNATION_PLATFORM]	= "platform",
1145 	[HIBERNATION_SHUTDOWN]	= "shutdown",
1146 	[HIBERNATION_REBOOT]	= "reboot",
1147 #ifdef CONFIG_SUSPEND
1148 	[HIBERNATION_SUSPEND]	= "suspend",
1149 #endif
1150 	[HIBERNATION_TEST_RESUME]	= "test_resume",
1151 };
1152 
1153 /*
1154  * /sys/power/disk - Control hibernation mode.
1155  *
1156  * Hibernation can be handled in several ways.  There are a few different ways
1157  * to put the system into the sleep state: using the platform driver (e.g. ACPI
1158  * or other hibernation_ops), powering it off or rebooting it (for testing
1159  * mostly).
1160  *
1161  * The sysfs file /sys/power/disk provides an interface for selecting the
1162  * hibernation mode to use.  Reading from this file causes the available modes
1163  * to be printed.  There are 3 modes that can be supported:
1164  *
1165  *	'platform'
1166  *	'shutdown'
1167  *	'reboot'
1168  *
1169  * If a platform hibernation driver is in use, 'platform' will be supported
1170  * and will be used by default.  Otherwise, 'shutdown' will be used by default.
1171  * The selected option (i.e. the one corresponding to the current value of
1172  * hibernation_mode) is enclosed by a square bracket.
1173  *
1174  * To select a given hibernation mode it is necessary to write the mode's
1175  * string representation (as returned by reading from /sys/power/disk) back
1176  * into /sys/power/disk.
1177  */
1178 
disk_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1179 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
1180 			 char *buf)
1181 {
1182 	ssize_t count = 0;
1183 	int i;
1184 
1185 	if (!hibernation_available())
1186 		return sysfs_emit(buf, "[disabled]\n");
1187 
1188 	for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
1189 		if (!hibernation_modes[i])
1190 			continue;
1191 		switch (i) {
1192 		case HIBERNATION_SHUTDOWN:
1193 		case HIBERNATION_REBOOT:
1194 #ifdef CONFIG_SUSPEND
1195 		case HIBERNATION_SUSPEND:
1196 #endif
1197 		case HIBERNATION_TEST_RESUME:
1198 			break;
1199 		case HIBERNATION_PLATFORM:
1200 			if (hibernation_ops)
1201 				break;
1202 			/* not a valid mode, continue with loop */
1203 			continue;
1204 		}
1205 		if (i == hibernation_mode)
1206 			count += sysfs_emit_at(buf, count, "[%s] ", hibernation_modes[i]);
1207 		else
1208 			count += sysfs_emit_at(buf, count, "%s ", hibernation_modes[i]);
1209 	}
1210 
1211 	/* Convert the last space to a newline if needed. */
1212 	if (count > 0)
1213 		buf[count - 1] = '\n';
1214 
1215 	return count;
1216 }
1217 
disk_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1218 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
1219 			  const char *buf, size_t n)
1220 {
1221 	int mode = HIBERNATION_INVALID;
1222 	unsigned int sleep_flags;
1223 	int error = 0;
1224 	int len;
1225 	char *p;
1226 	int i;
1227 
1228 	if (!hibernation_available())
1229 		return -EPERM;
1230 
1231 	p = memchr(buf, '\n', n);
1232 	len = p ? p - buf : n;
1233 
1234 	sleep_flags = lock_system_sleep();
1235 	for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
1236 		if (len == strlen(hibernation_modes[i])
1237 		    && !strncmp(buf, hibernation_modes[i], len)) {
1238 			mode = i;
1239 			break;
1240 		}
1241 	}
1242 	if (mode != HIBERNATION_INVALID) {
1243 		switch (mode) {
1244 		case HIBERNATION_SHUTDOWN:
1245 		case HIBERNATION_REBOOT:
1246 #ifdef CONFIG_SUSPEND
1247 		case HIBERNATION_SUSPEND:
1248 #endif
1249 		case HIBERNATION_TEST_RESUME:
1250 			hibernation_mode = mode;
1251 			break;
1252 		case HIBERNATION_PLATFORM:
1253 			if (hibernation_ops)
1254 				hibernation_mode = mode;
1255 			else
1256 				error = -EINVAL;
1257 		}
1258 	} else
1259 		error = -EINVAL;
1260 
1261 	if (!error)
1262 		pm_pr_dbg("Hibernation mode set to '%s'\n",
1263 			       hibernation_modes[mode]);
1264 	unlock_system_sleep(sleep_flags);
1265 	return error ? error : n;
1266 }
1267 
1268 power_attr(disk);
1269 
resume_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1270 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
1271 			   char *buf)
1272 {
1273 	return sysfs_emit(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
1274 			  MINOR(swsusp_resume_device));
1275 }
1276 
resume_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1277 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
1278 			    const char *buf, size_t n)
1279 {
1280 	unsigned int sleep_flags;
1281 	int len = n;
1282 	char *name;
1283 	dev_t dev;
1284 	int error;
1285 
1286 	if (!hibernation_available())
1287 		return n;
1288 
1289 	if (len && buf[len-1] == '\n')
1290 		len--;
1291 	name = kstrndup(buf, len, GFP_KERNEL);
1292 	if (!name)
1293 		return -ENOMEM;
1294 
1295 	error = lookup_bdev(name, &dev);
1296 	if (error) {
1297 		unsigned maj, min, offset;
1298 		char *p, dummy;
1299 
1300 		error = 0;
1301 		if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 ||
1302 		    sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset,
1303 				&dummy) == 3) {
1304 			dev = MKDEV(maj, min);
1305 			if (maj != MAJOR(dev) || min != MINOR(dev))
1306 				error = -EINVAL;
1307 		} else {
1308 			dev = new_decode_dev(simple_strtoul(name, &p, 16));
1309 			if (*p)
1310 				error = -EINVAL;
1311 		}
1312 	}
1313 	kfree(name);
1314 	if (error)
1315 		return error;
1316 
1317 	sleep_flags = lock_system_sleep();
1318 	swsusp_resume_device = dev;
1319 	unlock_system_sleep(sleep_flags);
1320 
1321 	pm_pr_dbg("Configured hibernation resume from disk to %u\n",
1322 		  swsusp_resume_device);
1323 	noresume = 0;
1324 	software_resume();
1325 	return n;
1326 }
1327 
1328 power_attr(resume);
1329 
resume_offset_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1330 static ssize_t resume_offset_show(struct kobject *kobj,
1331 				  struct kobj_attribute *attr, char *buf)
1332 {
1333 	return sysfs_emit(buf, "%llu\n", (unsigned long long)swsusp_resume_block);
1334 }
1335 
resume_offset_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1336 static ssize_t resume_offset_store(struct kobject *kobj,
1337 				   struct kobj_attribute *attr, const char *buf,
1338 				   size_t n)
1339 {
1340 	unsigned long long offset;
1341 	int rc;
1342 
1343 	rc = kstrtoull(buf, 0, &offset);
1344 	if (rc)
1345 		return rc;
1346 	swsusp_resume_block = offset;
1347 
1348 	return n;
1349 }
1350 
1351 power_attr(resume_offset);
1352 
image_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1353 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
1354 			       char *buf)
1355 {
1356 	return sysfs_emit(buf, "%lu\n", image_size);
1357 }
1358 
image_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1359 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr,
1360 				const char *buf, size_t n)
1361 {
1362 	unsigned long size;
1363 
1364 	if (sscanf(buf, "%lu", &size) == 1) {
1365 		image_size = size;
1366 		return n;
1367 	}
1368 
1369 	return -EINVAL;
1370 }
1371 
1372 power_attr(image_size);
1373 
reserved_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1374 static ssize_t reserved_size_show(struct kobject *kobj,
1375 				  struct kobj_attribute *attr, char *buf)
1376 {
1377 	return sysfs_emit(buf, "%lu\n", reserved_size);
1378 }
1379 
reserved_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1380 static ssize_t reserved_size_store(struct kobject *kobj,
1381 				   struct kobj_attribute *attr,
1382 				   const char *buf, size_t n)
1383 {
1384 	unsigned long size;
1385 
1386 	if (sscanf(buf, "%lu", &size) == 1) {
1387 		reserved_size = size;
1388 		return n;
1389 	}
1390 
1391 	return -EINVAL;
1392 }
1393 
1394 power_attr(reserved_size);
1395 
1396 static struct attribute *g[] = {
1397 	&disk_attr.attr,
1398 	&resume_offset_attr.attr,
1399 	&resume_attr.attr,
1400 	&image_size_attr.attr,
1401 	&reserved_size_attr.attr,
1402 	NULL,
1403 };
1404 
1405 
1406 static const struct attribute_group attr_group = {
1407 	.attrs = g,
1408 };
1409 
1410 
pm_disk_init(void)1411 static int __init pm_disk_init(void)
1412 {
1413 	return sysfs_create_group(power_kobj, &attr_group);
1414 }
1415 
1416 core_initcall(pm_disk_init);
1417 
1418 
resume_setup(char * str)1419 static int __init resume_setup(char *str)
1420 {
1421 	if (noresume)
1422 		return 1;
1423 
1424 	strscpy(resume_file, str);
1425 	return 1;
1426 }
1427 
resume_offset_setup(char * str)1428 static int __init resume_offset_setup(char *str)
1429 {
1430 	unsigned long long offset;
1431 
1432 	if (noresume)
1433 		return 1;
1434 
1435 	if (sscanf(str, "%llu", &offset) == 1)
1436 		swsusp_resume_block = offset;
1437 
1438 	return 1;
1439 }
1440 
hibernate_setup(char * str)1441 static int __init hibernate_setup(char *str)
1442 {
1443 	if (!strncmp(str, "noresume", 8)) {
1444 		noresume = 1;
1445 	} else if (!strncmp(str, "nocompress", 10)) {
1446 		nocompress = 1;
1447 	} else if (!strncmp(str, "no", 2)) {
1448 		noresume = 1;
1449 		nohibernate = 1;
1450 	} else if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)
1451 		   && !strncmp(str, "protect_image", 13)) {
1452 		enable_restore_image_protection();
1453 	}
1454 	return 1;
1455 }
1456 
noresume_setup(char * str)1457 static int __init noresume_setup(char *str)
1458 {
1459 	noresume = 1;
1460 	return 1;
1461 }
1462 
resumewait_setup(char * str)1463 static int __init resumewait_setup(char *str)
1464 {
1465 	resume_wait = 1;
1466 	return 1;
1467 }
1468 
resumedelay_setup(char * str)1469 static int __init resumedelay_setup(char *str)
1470 {
1471 	int rc = kstrtouint(str, 0, &resume_delay);
1472 
1473 	if (rc)
1474 		pr_warn("resumedelay: bad option string '%s'\n", str);
1475 	return 1;
1476 }
1477 
nohibernate_setup(char * str)1478 static int __init nohibernate_setup(char *str)
1479 {
1480 	noresume = 1;
1481 	nohibernate = 1;
1482 	return 1;
1483 }
1484 
1485 static const char * const comp_alg_enabled[] = {
1486 #if IS_ENABLED(CONFIG_CRYPTO_LZO)
1487 	COMPRESSION_ALGO_LZO,
1488 #endif
1489 #if IS_ENABLED(CONFIG_CRYPTO_LZ4)
1490 	COMPRESSION_ALGO_LZ4,
1491 #endif
1492 };
1493 
hibernate_compressor_param_set(const char * compressor,const struct kernel_param * kp)1494 static int hibernate_compressor_param_set(const char *compressor,
1495 		const struct kernel_param *kp)
1496 {
1497 	int index, ret;
1498 
1499 	if (!mutex_trylock(&system_transition_mutex))
1500 		return -EBUSY;
1501 
1502 	index = sysfs_match_string(comp_alg_enabled, compressor);
1503 	if (index >= 0) {
1504 		ret = param_set_copystring(comp_alg_enabled[index], kp);
1505 		if (!ret)
1506 			strscpy(hib_comp_algo, comp_alg_enabled[index]);
1507 	} else {
1508 		ret = index;
1509 	}
1510 
1511 	mutex_unlock(&system_transition_mutex);
1512 
1513 	if (ret)
1514 		pr_debug("Cannot set specified compressor %s\n",
1515 			 compressor);
1516 
1517 	return ret;
1518 }
1519 
1520 static const struct kernel_param_ops hibernate_compressor_param_ops = {
1521 	.set    = hibernate_compressor_param_set,
1522 	.get    = param_get_string,
1523 };
1524 
1525 static struct kparam_string hibernate_compressor_param_string = {
1526 	.maxlen = sizeof(hibernate_compressor),
1527 	.string = hibernate_compressor,
1528 };
1529 
1530 module_param_cb(compressor, &hibernate_compressor_param_ops,
1531 		&hibernate_compressor_param_string, 0644);
1532 MODULE_PARM_DESC(compressor,
1533 		 "Compression algorithm to be used with hibernation");
1534 
1535 __setup("noresume", noresume_setup);
1536 __setup("resume_offset=", resume_offset_setup);
1537 __setup("resume=", resume_setup);
1538 __setup("hibernate=", hibernate_setup);
1539 __setup("resumewait", resumewait_setup);
1540 __setup("resumedelay=", resumedelay_setup);
1541 __setup("nohibernate", nohibernate_setup);
1542