xref: /linux/kernel/power/hibernate.c (revision a1228f048a314b9280784a2cbd757cac74705589)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
9  * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
10  */
11 
12 #define pr_fmt(fmt) "PM: hibernation: " fmt
13 
14 #include <crypto/acompress.h>
15 #include <linux/blkdev.h>
16 #include <linux/export.h>
17 #include <linux/suspend.h>
18 #include <linux/reboot.h>
19 #include <linux/string.h>
20 #include <linux/device.h>
21 #include <linux/async.h>
22 #include <linux/delay.h>
23 #include <linux/fs.h>
24 #include <linux/mount.h>
25 #include <linux/pm.h>
26 #include <linux/nmi.h>
27 #include <linux/console.h>
28 #include <linux/cpu.h>
29 #include <linux/freezer.h>
30 #include <linux/gfp.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/ctype.h>
33 #include <linux/ktime.h>
34 #include <linux/security.h>
35 #include <linux/secretmem.h>
36 #include <trace/events/power.h>
37 
38 #include "power.h"
39 
40 
41 static int nocompress;
42 static int noresume;
43 static int nohibernate;
44 static int resume_wait;
45 static unsigned int resume_delay;
46 static char resume_file[256] = CONFIG_PM_STD_PARTITION;
47 dev_t swsusp_resume_device;
48 sector_t swsusp_resume_block;
49 __visible int in_suspend __nosavedata;
50 
51 static char hibernate_compressor[CRYPTO_MAX_ALG_NAME] = CONFIG_HIBERNATION_DEF_COMP;
52 
53 /*
54  * Compression/decompression algorithm to be used while saving/loading
55  * image to/from disk. This would later be used in 'kernel/power/swap.c'
56  * to allocate comp streams.
57  */
58 char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
59 
60 enum {
61 	HIBERNATION_INVALID,
62 	HIBERNATION_PLATFORM,
63 	HIBERNATION_SHUTDOWN,
64 	HIBERNATION_REBOOT,
65 #ifdef CONFIG_SUSPEND
66 	HIBERNATION_SUSPEND,
67 #endif
68 	HIBERNATION_TEST_RESUME,
69 	/* keep last */
70 	__HIBERNATION_AFTER_LAST
71 };
72 #define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1)
73 #define HIBERNATION_FIRST (HIBERNATION_INVALID + 1)
74 
75 static int hibernation_mode = HIBERNATION_SHUTDOWN;
76 
77 bool freezer_test_done;
78 
79 static const struct platform_hibernation_ops *hibernation_ops;
80 
81 static atomic_t hibernate_atomic = ATOMIC_INIT(1);
82 
hibernate_acquire(void)83 bool hibernate_acquire(void)
84 {
85 	return atomic_add_unless(&hibernate_atomic, -1, 0);
86 }
87 
hibernate_release(void)88 void hibernate_release(void)
89 {
90 	atomic_inc(&hibernate_atomic);
91 }
92 
hibernation_in_progress(void)93 bool hibernation_in_progress(void)
94 {
95 	return !atomic_read(&hibernate_atomic);
96 }
97 
hibernation_available(void)98 bool hibernation_available(void)
99 {
100 	return nohibernate == 0 &&
101 		!security_locked_down(LOCKDOWN_HIBERNATION) &&
102 		!secretmem_active() && !cxl_mem_active();
103 }
104 
105 /**
106  * hibernation_set_ops - Set the global hibernate operations.
107  * @ops: Hibernation operations to use in subsequent hibernation transitions.
108  */
hibernation_set_ops(const struct platform_hibernation_ops * ops)109 void hibernation_set_ops(const struct platform_hibernation_ops *ops)
110 {
111 	unsigned int sleep_flags;
112 
113 	if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
114 	    && ops->prepare && ops->finish && ops->enter && ops->pre_restore
115 	    && ops->restore_cleanup && ops->leave)) {
116 		WARN_ON(1);
117 		return;
118 	}
119 
120 	sleep_flags = lock_system_sleep();
121 
122 	hibernation_ops = ops;
123 	if (ops)
124 		hibernation_mode = HIBERNATION_PLATFORM;
125 	else if (hibernation_mode == HIBERNATION_PLATFORM)
126 		hibernation_mode = HIBERNATION_SHUTDOWN;
127 
128 	unlock_system_sleep(sleep_flags);
129 }
130 EXPORT_SYMBOL_GPL(hibernation_set_ops);
131 
132 static bool entering_platform_hibernation;
133 
system_entering_hibernation(void)134 bool system_entering_hibernation(void)
135 {
136 	return entering_platform_hibernation;
137 }
138 EXPORT_SYMBOL(system_entering_hibernation);
139 
140 #ifdef CONFIG_PM_DEBUG
141 static unsigned int pm_test_delay = 5;
142 module_param(pm_test_delay, uint, 0644);
143 MODULE_PARM_DESC(pm_test_delay,
144 		 "Number of seconds to wait before resuming from hibernation test");
hibernation_debug_sleep(void)145 static void hibernation_debug_sleep(void)
146 {
147 	pr_info("hibernation debug: Waiting for %d second(s).\n",
148 		pm_test_delay);
149 	mdelay(pm_test_delay * 1000);
150 }
151 
hibernation_test(int level)152 static int hibernation_test(int level)
153 {
154 	if (pm_test_level == level) {
155 		hibernation_debug_sleep();
156 		return 1;
157 	}
158 	return 0;
159 }
160 #else /* !CONFIG_PM_DEBUG */
hibernation_test(int level)161 static int hibernation_test(int level) { return 0; }
162 #endif /* !CONFIG_PM_DEBUG */
163 
164 /**
165  * platform_begin - Call platform to start hibernation.
166  * @platform_mode: Whether or not to use the platform driver.
167  */
platform_begin(int platform_mode)168 static int platform_begin(int platform_mode)
169 {
170 	return (platform_mode && hibernation_ops) ?
171 		hibernation_ops->begin(PMSG_FREEZE) : 0;
172 }
173 
174 /**
175  * platform_end - Call platform to finish transition to the working state.
176  * @platform_mode: Whether or not to use the platform driver.
177  */
platform_end(int platform_mode)178 static void platform_end(int platform_mode)
179 {
180 	if (platform_mode && hibernation_ops)
181 		hibernation_ops->end();
182 }
183 
184 /**
185  * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
186  * @platform_mode: Whether or not to use the platform driver.
187  *
188  * Use the platform driver to prepare the system for creating a hibernate image,
189  * if so configured, and return an error code if that fails.
190  */
191 
platform_pre_snapshot(int platform_mode)192 static int platform_pre_snapshot(int platform_mode)
193 {
194 	return (platform_mode && hibernation_ops) ?
195 		hibernation_ops->pre_snapshot() : 0;
196 }
197 
198 /**
199  * platform_leave - Call platform to prepare a transition to the working state.
200  * @platform_mode: Whether or not to use the platform driver.
201  *
202  * Use the platform driver prepare to prepare the machine for switching to the
203  * normal mode of operation.
204  *
205  * This routine is called on one CPU with interrupts disabled.
206  */
platform_leave(int platform_mode)207 static void platform_leave(int platform_mode)
208 {
209 	if (platform_mode && hibernation_ops)
210 		hibernation_ops->leave();
211 }
212 
213 /**
214  * platform_finish - Call platform to switch the system to the working state.
215  * @platform_mode: Whether or not to use the platform driver.
216  *
217  * Use the platform driver to switch the machine to the normal mode of
218  * operation.
219  *
220  * This routine must be called after platform_prepare().
221  */
platform_finish(int platform_mode)222 static void platform_finish(int platform_mode)
223 {
224 	if (platform_mode && hibernation_ops)
225 		hibernation_ops->finish();
226 }
227 
228 /**
229  * platform_pre_restore - Prepare for hibernate image restoration.
230  * @platform_mode: Whether or not to use the platform driver.
231  *
232  * Use the platform driver to prepare the system for resume from a hibernation
233  * image.
234  *
235  * If the restore fails after this function has been called,
236  * platform_restore_cleanup() must be called.
237  */
platform_pre_restore(int platform_mode)238 static int platform_pre_restore(int platform_mode)
239 {
240 	return (platform_mode && hibernation_ops) ?
241 		hibernation_ops->pre_restore() : 0;
242 }
243 
244 /**
245  * platform_restore_cleanup - Switch to the working state after failing restore.
246  * @platform_mode: Whether or not to use the platform driver.
247  *
248  * Use the platform driver to switch the system to the normal mode of operation
249  * after a failing restore.
250  *
251  * If platform_pre_restore() has been called before the failing restore, this
252  * function must be called too, regardless of the result of
253  * platform_pre_restore().
254  */
platform_restore_cleanup(int platform_mode)255 static void platform_restore_cleanup(int platform_mode)
256 {
257 	if (platform_mode && hibernation_ops)
258 		hibernation_ops->restore_cleanup();
259 }
260 
261 /**
262  * platform_recover - Recover from a failure to suspend devices.
263  * @platform_mode: Whether or not to use the platform driver.
264  */
platform_recover(int platform_mode)265 static void platform_recover(int platform_mode)
266 {
267 	if (platform_mode && hibernation_ops && hibernation_ops->recover)
268 		hibernation_ops->recover();
269 }
270 
271 /**
272  * swsusp_show_speed - Print time elapsed between two events during hibernation.
273  * @start: Starting event.
274  * @stop: Final event.
275  * @nr_pages: Number of memory pages processed between @start and @stop.
276  * @msg: Additional diagnostic message to print.
277  */
swsusp_show_speed(ktime_t start,ktime_t stop,unsigned nr_pages,char * msg)278 void swsusp_show_speed(ktime_t start, ktime_t stop,
279 		      unsigned nr_pages, char *msg)
280 {
281 	ktime_t diff;
282 	u64 elapsed_centisecs64;
283 	unsigned int centisecs;
284 	unsigned int k;
285 	unsigned int kps;
286 
287 	diff = ktime_sub(stop, start);
288 	elapsed_centisecs64 = ktime_divns(diff, 10*NSEC_PER_MSEC);
289 	centisecs = elapsed_centisecs64;
290 	if (centisecs == 0)
291 		centisecs = 1;	/* avoid div-by-zero */
292 	k = nr_pages * (PAGE_SIZE / 1024);
293 	kps = (k * 100) / centisecs;
294 	pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
295 		msg, k, centisecs / 100, centisecs % 100, kps / 1000,
296 		(kps % 1000) / 10);
297 }
298 
arch_resume_nosmt(void)299 __weak int arch_resume_nosmt(void)
300 {
301 	return 0;
302 }
303 
304 /**
305  * create_image - Create a hibernation image.
306  * @platform_mode: Whether or not to use the platform driver.
307  *
308  * Execute device drivers' "late" and "noirq" freeze callbacks, create a
309  * hibernation image and run the drivers' "noirq" and "early" thaw callbacks.
310  *
311  * Control reappears in this routine after the subsequent restore.
312  */
create_image(int platform_mode)313 static int create_image(int platform_mode)
314 {
315 	int error;
316 
317 	error = dpm_suspend_end(PMSG_FREEZE);
318 	if (error) {
319 		pr_err("Some devices failed to power down, aborting\n");
320 		return error;
321 	}
322 
323 	error = platform_pre_snapshot(platform_mode);
324 	if (error || hibernation_test(TEST_PLATFORM))
325 		goto Platform_finish;
326 
327 	error = pm_sleep_disable_secondary_cpus();
328 	if (error || hibernation_test(TEST_CPUS))
329 		goto Enable_cpus;
330 
331 	local_irq_disable();
332 
333 	system_state = SYSTEM_SUSPEND;
334 
335 	error = syscore_suspend();
336 	if (error) {
337 		pr_err("Some system devices failed to power down, aborting\n");
338 		goto Enable_irqs;
339 	}
340 
341 	if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
342 		goto Power_up;
343 
344 	in_suspend = 1;
345 	save_processor_state();
346 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
347 	error = swsusp_arch_suspend();
348 	/* Restore control flow magically appears here */
349 	restore_processor_state();
350 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
351 	if (error)
352 		pr_err("Error %d creating image\n", error);
353 
354 	if (!in_suspend) {
355 		events_check_enabled = false;
356 		clear_or_poison_free_pages();
357 	}
358 
359 	platform_leave(platform_mode);
360 
361  Power_up:
362 	syscore_resume();
363 
364  Enable_irqs:
365 	system_state = SYSTEM_RUNNING;
366 	local_irq_enable();
367 
368  Enable_cpus:
369 	pm_sleep_enable_secondary_cpus();
370 
371 	/* Allow architectures to do nosmt-specific post-resume dances */
372 	if (!in_suspend)
373 		error = arch_resume_nosmt();
374 
375  Platform_finish:
376 	platform_finish(platform_mode);
377 
378 	dpm_resume_start(in_suspend ?
379 		(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
380 
381 	return error;
382 }
383 
shrink_shmem_memory(void)384 static void shrink_shmem_memory(void)
385 {
386 	struct sysinfo info;
387 	unsigned long nr_shmem_pages, nr_freed_pages;
388 
389 	si_meminfo(&info);
390 	nr_shmem_pages = info.sharedram; /* current page count used for shmem */
391 	/*
392 	 * The intent is to reclaim all shmem pages. Though shrink_all_memory() can
393 	 * only reclaim about half of them, it's enough for creating the hibernation
394 	 * image.
395 	 */
396 	nr_freed_pages = shrink_all_memory(nr_shmem_pages);
397 	pr_debug("requested to reclaim %lu shmem pages, actually freed %lu pages\n",
398 			nr_shmem_pages, nr_freed_pages);
399 }
400 
401 /**
402  * hibernation_snapshot - Quiesce devices and create a hibernation image.
403  * @platform_mode: If set, use platform driver to prepare for the transition.
404  *
405  * This routine must be called with system_transition_mutex held.
406  */
hibernation_snapshot(int platform_mode)407 int hibernation_snapshot(int platform_mode)
408 {
409 	pm_message_t msg;
410 	int error;
411 
412 	pm_suspend_clear_flags();
413 	error = platform_begin(platform_mode);
414 	if (error)
415 		goto Close;
416 
417 	/* Preallocate image memory before shutting down devices. */
418 	error = hibernate_preallocate_memory();
419 	if (error)
420 		goto Close;
421 
422 	error = freeze_kernel_threads();
423 	if (error)
424 		goto Cleanup;
425 
426 	if (hibernation_test(TEST_FREEZER)) {
427 
428 		/*
429 		 * Indicate to the caller that we are returning due to a
430 		 * successful freezer test.
431 		 */
432 		freezer_test_done = true;
433 		goto Thaw;
434 	}
435 
436 	error = dpm_prepare(PMSG_FREEZE);
437 	if (error) {
438 		dpm_complete(PMSG_RECOVER);
439 		goto Thaw;
440 	}
441 
442 	/*
443 	 * Device drivers may move lots of data to shmem in dpm_prepare(). The shmem
444 	 * pages will use lots of system memory, causing hibernation image creation
445 	 * fail due to insufficient free memory.
446 	 * This call is to force flush the shmem pages to swap disk and reclaim
447 	 * the system memory so that image creation can succeed.
448 	 */
449 	shrink_shmem_memory();
450 
451 	console_suspend_all();
452 	pm_restrict_gfp_mask();
453 
454 	error = dpm_suspend(PMSG_FREEZE);
455 
456 	if (error || hibernation_test(TEST_DEVICES))
457 		platform_recover(platform_mode);
458 	else
459 		error = create_image(platform_mode);
460 
461 	/*
462 	 * In the case that we call create_image() above, the control
463 	 * returns here (1) after the image has been created or the
464 	 * image creation has failed and (2) after a successful restore.
465 	 */
466 
467 	/* We may need to release the preallocated image pages here. */
468 	if (error || !in_suspend)
469 		swsusp_free();
470 
471 	msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE;
472 	dpm_resume(msg);
473 
474 	if (error || !in_suspend)
475 		pm_restore_gfp_mask();
476 
477 	console_resume_all();
478 	dpm_complete(msg);
479 
480  Close:
481 	platform_end(platform_mode);
482 	return error;
483 
484  Thaw:
485 	thaw_kernel_threads();
486  Cleanup:
487 	swsusp_free();
488 	goto Close;
489 }
490 
hibernate_resume_nonboot_cpu_disable(void)491 int __weak hibernate_resume_nonboot_cpu_disable(void)
492 {
493 	return suspend_disable_secondary_cpus();
494 }
495 
496 /**
497  * resume_target_kernel - Restore system state from a hibernation image.
498  * @platform_mode: Whether or not to use the platform driver.
499  *
500  * Execute device drivers' "noirq" and "late" freeze callbacks, restore the
501  * contents of highmem that have not been restored yet from the image and run
502  * the low-level code that will restore the remaining contents of memory and
503  * switch to the just restored target kernel.
504  */
resume_target_kernel(bool platform_mode)505 static int resume_target_kernel(bool platform_mode)
506 {
507 	int error;
508 
509 	error = dpm_suspend_end(PMSG_QUIESCE);
510 	if (error) {
511 		pr_err("Some devices failed to power down, aborting resume\n");
512 		return error;
513 	}
514 
515 	error = platform_pre_restore(platform_mode);
516 	if (error)
517 		goto Cleanup;
518 
519 	cpuidle_pause();
520 
521 	error = hibernate_resume_nonboot_cpu_disable();
522 	if (error)
523 		goto Enable_cpus;
524 
525 	local_irq_disable();
526 	system_state = SYSTEM_SUSPEND;
527 
528 	error = syscore_suspend();
529 	if (error)
530 		goto Enable_irqs;
531 
532 	save_processor_state();
533 	error = restore_highmem();
534 	if (!error) {
535 		error = swsusp_arch_resume();
536 		/*
537 		 * The code below is only ever reached in case of a failure.
538 		 * Otherwise, execution continues at the place where
539 		 * swsusp_arch_suspend() was called.
540 		 */
541 		BUG_ON(!error);
542 		/*
543 		 * This call to restore_highmem() reverts the changes made by
544 		 * the previous one.
545 		 */
546 		restore_highmem();
547 	}
548 	/*
549 	 * The only reason why swsusp_arch_resume() can fail is memory being
550 	 * very tight, so we have to free it as soon as we can to avoid
551 	 * subsequent failures.
552 	 */
553 	swsusp_free();
554 	restore_processor_state();
555 	touch_softlockup_watchdog();
556 
557 	syscore_resume();
558 
559  Enable_irqs:
560 	system_state = SYSTEM_RUNNING;
561 	local_irq_enable();
562 
563  Enable_cpus:
564 	pm_sleep_enable_secondary_cpus();
565 
566  Cleanup:
567 	platform_restore_cleanup(platform_mode);
568 
569 	dpm_resume_start(PMSG_RECOVER);
570 
571 	return error;
572 }
573 
574 /**
575  * hibernation_restore - Quiesce devices and restore from a hibernation image.
576  * @platform_mode: If set, use platform driver to prepare for the transition.
577  *
578  * This routine must be called with system_transition_mutex held.  If it is
579  * successful, control reappears in the restored target kernel in
580  * hibernation_snapshot().
581  */
hibernation_restore(int platform_mode)582 int hibernation_restore(int platform_mode)
583 {
584 	int error;
585 
586 	pm_prepare_console();
587 	console_suspend_all();
588 	error = dpm_suspend_start(PMSG_QUIESCE);
589 	if (!error) {
590 		error = resume_target_kernel(platform_mode);
591 		/*
592 		 * The above should either succeed and jump to the new kernel,
593 		 * or return with an error. Otherwise things are just
594 		 * undefined, so let's be paranoid.
595 		 */
596 		BUG_ON(!error);
597 	}
598 	dpm_resume_end(PMSG_RECOVER);
599 	console_resume_all();
600 	pm_restore_console();
601 	return error;
602 }
603 
604 /**
605  * hibernation_platform_enter - Power off the system using the platform driver.
606  */
hibernation_platform_enter(void)607 int hibernation_platform_enter(void)
608 {
609 	int error;
610 
611 	if (!hibernation_ops)
612 		return -ENOSYS;
613 
614 	/*
615 	 * We have cancelled the power transition by running
616 	 * hibernation_ops->finish() before saving the image, so we should let
617 	 * the firmware know that we're going to enter the sleep state after all
618 	 */
619 	error = hibernation_ops->begin(PMSG_HIBERNATE);
620 	if (error)
621 		goto Close;
622 
623 	entering_platform_hibernation = true;
624 	console_suspend_all();
625 	error = dpm_suspend_start(PMSG_HIBERNATE);
626 	if (error) {
627 		if (hibernation_ops->recover)
628 			hibernation_ops->recover();
629 		goto Resume_devices;
630 	}
631 
632 	error = dpm_suspend_end(PMSG_HIBERNATE);
633 	if (error)
634 		goto Resume_devices;
635 
636 	error = hibernation_ops->prepare();
637 	if (error)
638 		goto Platform_finish;
639 
640 	error = pm_sleep_disable_secondary_cpus();
641 	if (error)
642 		goto Enable_cpus;
643 
644 	local_irq_disable();
645 	system_state = SYSTEM_SUSPEND;
646 
647 	error = syscore_suspend();
648 	if (error)
649 		goto Enable_irqs;
650 
651 	if (pm_wakeup_pending()) {
652 		error = -EAGAIN;
653 		goto Power_up;
654 	}
655 
656 	hibernation_ops->enter();
657 	/* We should never get here */
658 	while (1);
659 
660  Power_up:
661 	syscore_resume();
662  Enable_irqs:
663 	system_state = SYSTEM_RUNNING;
664 	local_irq_enable();
665 
666  Enable_cpus:
667 	pm_sleep_enable_secondary_cpus();
668 
669  Platform_finish:
670 	hibernation_ops->finish();
671 
672 	dpm_resume_start(PMSG_RESTORE);
673 
674  Resume_devices:
675 	entering_platform_hibernation = false;
676 	dpm_resume_end(PMSG_RESTORE);
677 	console_resume_all();
678 
679  Close:
680 	hibernation_ops->end();
681 
682 	return error;
683 }
684 
685 /**
686  * power_down - Shut the machine down for hibernation.
687  *
688  * Use the platform driver, if configured, to put the system into the sleep
689  * state corresponding to hibernation, or try to power it off or reboot,
690  * depending on the value of hibernation_mode.
691  */
power_down(void)692 static void power_down(void)
693 {
694 	int error;
695 
696 #ifdef CONFIG_SUSPEND
697 	if (hibernation_mode == HIBERNATION_SUSPEND) {
698 		error = suspend_devices_and_enter(mem_sleep_current);
699 		if (error) {
700 			hibernation_mode = hibernation_ops ?
701 						HIBERNATION_PLATFORM :
702 						HIBERNATION_SHUTDOWN;
703 		} else {
704 			/* Restore swap signature. */
705 			error = swsusp_unmark();
706 			if (error)
707 				pr_err("Swap will be unusable! Try swapon -a.\n");
708 
709 			return;
710 		}
711 	}
712 #endif
713 
714 	switch (hibernation_mode) {
715 	case HIBERNATION_REBOOT:
716 		kernel_restart(NULL);
717 		break;
718 	case HIBERNATION_PLATFORM:
719 		error = hibernation_platform_enter();
720 		if (error == -EAGAIN || error == -EBUSY) {
721 			swsusp_unmark();
722 			events_check_enabled = false;
723 			pr_info("Wakeup event detected during hibernation, rolling back.\n");
724 			return;
725 		}
726 		fallthrough;
727 	case HIBERNATION_SHUTDOWN:
728 		if (kernel_can_power_off()) {
729 			entering_platform_hibernation = true;
730 			kernel_power_off();
731 			entering_platform_hibernation = false;
732 		}
733 		break;
734 	}
735 	kernel_halt();
736 	/*
737 	 * Valid image is on the disk, if we continue we risk serious data
738 	 * corruption after resume.
739 	 */
740 	pr_crit("Power down manually\n");
741 	while (1)
742 		cpu_relax();
743 }
744 
load_image_and_restore(void)745 static int load_image_and_restore(void)
746 {
747 	int error;
748 	unsigned int flags;
749 
750 	pm_pr_dbg("Loading hibernation image.\n");
751 
752 	lock_device_hotplug();
753 	error = create_basic_memory_bitmaps();
754 	if (error) {
755 		swsusp_close();
756 		goto Unlock;
757 	}
758 
759 	error = swsusp_read(&flags);
760 	swsusp_close();
761 	if (!error)
762 		error = hibernation_restore(flags & SF_PLATFORM_MODE);
763 
764 	pr_err("Failed to load image, recovering.\n");
765 	swsusp_free();
766 	free_basic_memory_bitmaps();
767  Unlock:
768 	unlock_device_hotplug();
769 
770 	return error;
771 }
772 
773 #define COMPRESSION_ALGO_LZO "lzo"
774 #define COMPRESSION_ALGO_LZ4 "lz4"
775 
776 /**
777  * hibernate - Carry out system hibernation, including saving the image.
778  */
hibernate(void)779 int hibernate(void)
780 {
781 	bool snapshot_test = false;
782 	unsigned int sleep_flags;
783 	int error;
784 
785 	if (!hibernation_available()) {
786 		pm_pr_dbg("Hibernation not available.\n");
787 		return -EPERM;
788 	}
789 
790 	/*
791 	 * Query for the compression algorithm support if compression is enabled.
792 	 */
793 	if (!nocompress) {
794 		strscpy(hib_comp_algo, hibernate_compressor);
795 		if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
796 			pr_err("%s compression is not available\n", hib_comp_algo);
797 			return -EOPNOTSUPP;
798 		}
799 	}
800 
801 	sleep_flags = lock_system_sleep();
802 	/* The snapshot device should not be opened while we're running */
803 	if (!hibernate_acquire()) {
804 		error = -EBUSY;
805 		goto Unlock;
806 	}
807 
808 	pr_info("hibernation entry\n");
809 	pm_prepare_console();
810 	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
811 	if (error)
812 		goto Restore;
813 
814 	ksys_sync_helper();
815 	if (filesystem_freeze_enabled)
816 		filesystems_freeze();
817 
818 	error = freeze_processes();
819 	if (error)
820 		goto Exit;
821 
822 	lock_device_hotplug();
823 	/* Allocate memory management structures */
824 	error = create_basic_memory_bitmaps();
825 	if (error)
826 		goto Thaw;
827 
828 	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
829 	if (error || freezer_test_done)
830 		goto Free_bitmaps;
831 
832 	if (in_suspend) {
833 		unsigned int flags = 0;
834 
835 		if (hibernation_mode == HIBERNATION_PLATFORM)
836 			flags |= SF_PLATFORM_MODE;
837 		if (nocompress) {
838 			flags |= SF_NOCOMPRESS_MODE;
839 		} else {
840 		        flags |= SF_CRC32_MODE;
841 
842 			/*
843 			 * By default, LZO compression is enabled. Use SF_COMPRESSION_ALG_LZ4
844 			 * to override this behaviour and use LZ4.
845 			 *
846 			 * Refer kernel/power/power.h for more details
847 			 */
848 
849 			if (!strcmp(hib_comp_algo, COMPRESSION_ALGO_LZ4))
850 				flags |= SF_COMPRESSION_ALG_LZ4;
851 			else
852 				flags |= SF_COMPRESSION_ALG_LZO;
853 		}
854 
855 		pm_pr_dbg("Writing hibernation image.\n");
856 		error = swsusp_write(flags);
857 		swsusp_free();
858 		if (!error) {
859 			if (hibernation_mode == HIBERNATION_TEST_RESUME)
860 				snapshot_test = true;
861 			else
862 				power_down();
863 		}
864 		in_suspend = 0;
865 		pm_restore_gfp_mask();
866 	} else {
867 		pm_pr_dbg("Hibernation image restored successfully.\n");
868 	}
869 
870  Free_bitmaps:
871 	free_basic_memory_bitmaps();
872  Thaw:
873 	unlock_device_hotplug();
874 	if (snapshot_test) {
875 		pm_pr_dbg("Checking hibernation image\n");
876 		error = swsusp_check(false);
877 		if (!error)
878 			error = load_image_and_restore();
879 	}
880 	thaw_processes();
881 
882 	/* Don't bother checking whether freezer_test_done is true */
883 	freezer_test_done = false;
884  Exit:
885 	filesystems_thaw();
886 	pm_notifier_call_chain(PM_POST_HIBERNATION);
887  Restore:
888 	pm_restore_console();
889 	hibernate_release();
890  Unlock:
891 	unlock_system_sleep(sleep_flags);
892 	pr_info("hibernation exit\n");
893 
894 	return error;
895 }
896 
897 /**
898  * hibernate_quiet_exec - Execute a function with all devices frozen.
899  * @func: Function to execute.
900  * @data: Data pointer to pass to @func.
901  *
902  * Return the @func return value or an error code if it cannot be executed.
903  */
hibernate_quiet_exec(int (* func)(void * data),void * data)904 int hibernate_quiet_exec(int (*func)(void *data), void *data)
905 {
906 	unsigned int sleep_flags;
907 	int error;
908 
909 	sleep_flags = lock_system_sleep();
910 
911 	if (!hibernate_acquire()) {
912 		error = -EBUSY;
913 		goto unlock;
914 	}
915 
916 	pm_prepare_console();
917 
918 	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
919 	if (error)
920 		goto restore;
921 
922 	if (filesystem_freeze_enabled)
923 		filesystems_freeze();
924 
925 	error = freeze_processes();
926 	if (error)
927 		goto exit;
928 
929 	lock_device_hotplug();
930 
931 	pm_suspend_clear_flags();
932 
933 	error = platform_begin(true);
934 	if (error)
935 		goto thaw;
936 
937 	error = freeze_kernel_threads();
938 	if (error)
939 		goto thaw;
940 
941 	error = dpm_prepare(PMSG_FREEZE);
942 	if (error)
943 		goto dpm_complete;
944 
945 	console_suspend_all();
946 
947 	error = dpm_suspend(PMSG_FREEZE);
948 	if (error)
949 		goto dpm_resume;
950 
951 	error = dpm_suspend_end(PMSG_FREEZE);
952 	if (error)
953 		goto dpm_resume;
954 
955 	error = platform_pre_snapshot(true);
956 	if (error)
957 		goto skip;
958 
959 	error = func(data);
960 
961 skip:
962 	platform_finish(true);
963 
964 	dpm_resume_start(PMSG_THAW);
965 
966 dpm_resume:
967 	dpm_resume(PMSG_THAW);
968 
969 	console_resume_all();
970 
971 dpm_complete:
972 	dpm_complete(PMSG_THAW);
973 
974 	thaw_kernel_threads();
975 
976 thaw:
977 	platform_end(true);
978 
979 	unlock_device_hotplug();
980 
981 	thaw_processes();
982 
983 exit:
984 	filesystems_thaw();
985 	pm_notifier_call_chain(PM_POST_HIBERNATION);
986 
987 restore:
988 	pm_restore_console();
989 
990 	hibernate_release();
991 
992 unlock:
993 	unlock_system_sleep(sleep_flags);
994 
995 	return error;
996 }
997 EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
998 
find_resume_device(void)999 static int __init find_resume_device(void)
1000 {
1001 	if (!strlen(resume_file))
1002 		return -ENOENT;
1003 
1004 	pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
1005 
1006 	if (resume_delay) {
1007 		pr_info("Waiting %dsec before reading resume device ...\n",
1008 			resume_delay);
1009 		ssleep(resume_delay);
1010 	}
1011 
1012 	/* Check if the device is there */
1013 	if (!early_lookup_bdev(resume_file, &swsusp_resume_device))
1014 		return 0;
1015 
1016 	/*
1017 	 * Some device discovery might still be in progress; we need to wait for
1018 	 * this to finish.
1019 	 */
1020 	wait_for_device_probe();
1021 	if (resume_wait) {
1022 		while (early_lookup_bdev(resume_file, &swsusp_resume_device))
1023 			msleep(10);
1024 		async_synchronize_full();
1025 	}
1026 
1027 	return early_lookup_bdev(resume_file, &swsusp_resume_device);
1028 }
1029 
software_resume(void)1030 static int software_resume(void)
1031 {
1032 	int error;
1033 
1034 	pm_pr_dbg("Hibernation image partition %d:%d present\n",
1035 		MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
1036 
1037 	pm_pr_dbg("Looking for hibernation image.\n");
1038 
1039 	mutex_lock(&system_transition_mutex);
1040 	error = swsusp_check(true);
1041 	if (error)
1042 		goto Unlock;
1043 
1044 	/*
1045 	 * Check if the hibernation image is compressed. If so, query for
1046 	 * the algorithm support.
1047 	 */
1048 	if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
1049 		if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4)
1050 			strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4);
1051 		else
1052 			strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO);
1053 		if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
1054 			pr_err("%s compression is not available\n", hib_comp_algo);
1055 			error = -EOPNOTSUPP;
1056 			goto Unlock;
1057 		}
1058 	}
1059 
1060 	/* The snapshot device should not be opened while we're running */
1061 	if (!hibernate_acquire()) {
1062 		error = -EBUSY;
1063 		swsusp_close();
1064 		goto Unlock;
1065 	}
1066 
1067 	pr_info("resume from hibernation\n");
1068 	pm_prepare_console();
1069 	error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
1070 	if (error)
1071 		goto Restore;
1072 
1073 	if (filesystem_freeze_enabled)
1074 		filesystems_freeze();
1075 
1076 	pm_pr_dbg("Preparing processes for hibernation restore.\n");
1077 	error = freeze_processes();
1078 	if (error) {
1079 		filesystems_thaw();
1080 		goto Close_Finish;
1081 	}
1082 
1083 	error = freeze_kernel_threads();
1084 	if (error) {
1085 		thaw_processes();
1086 		filesystems_thaw();
1087 		goto Close_Finish;
1088 	}
1089 
1090 	error = load_image_and_restore();
1091 	thaw_processes();
1092 	filesystems_thaw();
1093  Finish:
1094 	pm_notifier_call_chain(PM_POST_RESTORE);
1095  Restore:
1096 	pm_restore_console();
1097 	pr_info("resume failed (%d)\n", error);
1098 	hibernate_release();
1099 	/* For success case, the suspend path will release the lock */
1100  Unlock:
1101 	mutex_unlock(&system_transition_mutex);
1102 	pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
1103 	return error;
1104  Close_Finish:
1105 	swsusp_close();
1106 	goto Finish;
1107 }
1108 
1109 /**
1110  * software_resume_initcall - Resume from a saved hibernation image.
1111  *
1112  * This routine is called as a late initcall, when all devices have been
1113  * discovered and initialized already.
1114  *
1115  * The image reading code is called to see if there is a hibernation image
1116  * available for reading.  If that is the case, devices are quiesced and the
1117  * contents of memory is restored from the saved image.
1118  *
1119  * If this is successful, control reappears in the restored target kernel in
1120  * hibernation_snapshot() which returns to hibernate().  Otherwise, the routine
1121  * attempts to recover gracefully and make the kernel return to the normal mode
1122  * of operation.
1123  */
software_resume_initcall(void)1124 static int __init software_resume_initcall(void)
1125 {
1126 	/*
1127 	 * If the user said "noresume".. bail out early.
1128 	 */
1129 	if (noresume || !hibernation_available())
1130 		return 0;
1131 
1132 	if (!swsusp_resume_device) {
1133 		int error = find_resume_device();
1134 
1135 		if (error)
1136 			return error;
1137 	}
1138 
1139 	return software_resume();
1140 }
1141 late_initcall_sync(software_resume_initcall);
1142 
1143 
1144 static const char * const hibernation_modes[] = {
1145 	[HIBERNATION_PLATFORM]	= "platform",
1146 	[HIBERNATION_SHUTDOWN]	= "shutdown",
1147 	[HIBERNATION_REBOOT]	= "reboot",
1148 #ifdef CONFIG_SUSPEND
1149 	[HIBERNATION_SUSPEND]	= "suspend",
1150 #endif
1151 	[HIBERNATION_TEST_RESUME]	= "test_resume",
1152 };
1153 
1154 /*
1155  * /sys/power/disk - Control hibernation mode.
1156  *
1157  * Hibernation can be handled in several ways.  There are a few different ways
1158  * to put the system into the sleep state: using the platform driver (e.g. ACPI
1159  * or other hibernation_ops), powering it off or rebooting it (for testing
1160  * mostly).
1161  *
1162  * The sysfs file /sys/power/disk provides an interface for selecting the
1163  * hibernation mode to use.  Reading from this file causes the available modes
1164  * to be printed.  There are 3 modes that can be supported:
1165  *
1166  *	'platform'
1167  *	'shutdown'
1168  *	'reboot'
1169  *
1170  * If a platform hibernation driver is in use, 'platform' will be supported
1171  * and will be used by default.  Otherwise, 'shutdown' will be used by default.
1172  * The selected option (i.e. the one corresponding to the current value of
1173  * hibernation_mode) is enclosed by a square bracket.
1174  *
1175  * To select a given hibernation mode it is necessary to write the mode's
1176  * string representation (as returned by reading from /sys/power/disk) back
1177  * into /sys/power/disk.
1178  */
1179 
disk_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1180 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
1181 			 char *buf)
1182 {
1183 	ssize_t count = 0;
1184 	int i;
1185 
1186 	if (!hibernation_available())
1187 		return sysfs_emit(buf, "[disabled]\n");
1188 
1189 	for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
1190 		if (!hibernation_modes[i])
1191 			continue;
1192 		switch (i) {
1193 		case HIBERNATION_SHUTDOWN:
1194 		case HIBERNATION_REBOOT:
1195 #ifdef CONFIG_SUSPEND
1196 		case HIBERNATION_SUSPEND:
1197 #endif
1198 		case HIBERNATION_TEST_RESUME:
1199 			break;
1200 		case HIBERNATION_PLATFORM:
1201 			if (hibernation_ops)
1202 				break;
1203 			/* not a valid mode, continue with loop */
1204 			continue;
1205 		}
1206 		if (i == hibernation_mode)
1207 			count += sysfs_emit_at(buf, count, "[%s] ", hibernation_modes[i]);
1208 		else
1209 			count += sysfs_emit_at(buf, count, "%s ", hibernation_modes[i]);
1210 	}
1211 
1212 	/* Convert the last space to a newline if needed. */
1213 	if (count > 0)
1214 		buf[count - 1] = '\n';
1215 
1216 	return count;
1217 }
1218 
disk_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1219 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
1220 			  const char *buf, size_t n)
1221 {
1222 	int mode = HIBERNATION_INVALID;
1223 	unsigned int sleep_flags;
1224 	int error = 0;
1225 	int len;
1226 	char *p;
1227 	int i;
1228 
1229 	if (!hibernation_available())
1230 		return -EPERM;
1231 
1232 	p = memchr(buf, '\n', n);
1233 	len = p ? p - buf : n;
1234 
1235 	sleep_flags = lock_system_sleep();
1236 	for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
1237 		if (len == strlen(hibernation_modes[i])
1238 		    && !strncmp(buf, hibernation_modes[i], len)) {
1239 			mode = i;
1240 			break;
1241 		}
1242 	}
1243 	if (mode != HIBERNATION_INVALID) {
1244 		switch (mode) {
1245 		case HIBERNATION_SHUTDOWN:
1246 		case HIBERNATION_REBOOT:
1247 #ifdef CONFIG_SUSPEND
1248 		case HIBERNATION_SUSPEND:
1249 #endif
1250 		case HIBERNATION_TEST_RESUME:
1251 			hibernation_mode = mode;
1252 			break;
1253 		case HIBERNATION_PLATFORM:
1254 			if (hibernation_ops)
1255 				hibernation_mode = mode;
1256 			else
1257 				error = -EINVAL;
1258 		}
1259 	} else
1260 		error = -EINVAL;
1261 
1262 	if (!error)
1263 		pm_pr_dbg("Hibernation mode set to '%s'\n",
1264 			       hibernation_modes[mode]);
1265 	unlock_system_sleep(sleep_flags);
1266 	return error ? error : n;
1267 }
1268 
1269 power_attr(disk);
1270 
resume_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1271 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
1272 			   char *buf)
1273 {
1274 	return sysfs_emit(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
1275 			  MINOR(swsusp_resume_device));
1276 }
1277 
resume_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1278 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
1279 			    const char *buf, size_t n)
1280 {
1281 	unsigned int sleep_flags;
1282 	int len = n;
1283 	char *name;
1284 	dev_t dev;
1285 	int error;
1286 
1287 	if (!hibernation_available())
1288 		return n;
1289 
1290 	if (len && buf[len-1] == '\n')
1291 		len--;
1292 	name = kstrndup(buf, len, GFP_KERNEL);
1293 	if (!name)
1294 		return -ENOMEM;
1295 
1296 	error = lookup_bdev(name, &dev);
1297 	if (error) {
1298 		unsigned maj, min, offset;
1299 		char *p, dummy;
1300 
1301 		error = 0;
1302 		if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 ||
1303 		    sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset,
1304 				&dummy) == 3) {
1305 			dev = MKDEV(maj, min);
1306 			if (maj != MAJOR(dev) || min != MINOR(dev))
1307 				error = -EINVAL;
1308 		} else {
1309 			dev = new_decode_dev(simple_strtoul(name, &p, 16));
1310 			if (*p)
1311 				error = -EINVAL;
1312 		}
1313 	}
1314 	kfree(name);
1315 	if (error)
1316 		return error;
1317 
1318 	sleep_flags = lock_system_sleep();
1319 	swsusp_resume_device = dev;
1320 	unlock_system_sleep(sleep_flags);
1321 
1322 	pm_pr_dbg("Configured hibernation resume from disk to %u\n",
1323 		  swsusp_resume_device);
1324 	noresume = 0;
1325 	software_resume();
1326 	return n;
1327 }
1328 
1329 power_attr(resume);
1330 
resume_offset_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1331 static ssize_t resume_offset_show(struct kobject *kobj,
1332 				  struct kobj_attribute *attr, char *buf)
1333 {
1334 	return sysfs_emit(buf, "%llu\n", (unsigned long long)swsusp_resume_block);
1335 }
1336 
resume_offset_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1337 static ssize_t resume_offset_store(struct kobject *kobj,
1338 				   struct kobj_attribute *attr, const char *buf,
1339 				   size_t n)
1340 {
1341 	unsigned long long offset;
1342 	int rc;
1343 
1344 	rc = kstrtoull(buf, 0, &offset);
1345 	if (rc)
1346 		return rc;
1347 	swsusp_resume_block = offset;
1348 
1349 	return n;
1350 }
1351 
1352 power_attr(resume_offset);
1353 
image_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1354 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
1355 			       char *buf)
1356 {
1357 	return sysfs_emit(buf, "%lu\n", image_size);
1358 }
1359 
image_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1360 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr,
1361 				const char *buf, size_t n)
1362 {
1363 	unsigned long size;
1364 
1365 	if (sscanf(buf, "%lu", &size) == 1) {
1366 		image_size = size;
1367 		return n;
1368 	}
1369 
1370 	return -EINVAL;
1371 }
1372 
1373 power_attr(image_size);
1374 
reserved_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1375 static ssize_t reserved_size_show(struct kobject *kobj,
1376 				  struct kobj_attribute *attr, char *buf)
1377 {
1378 	return sysfs_emit(buf, "%lu\n", reserved_size);
1379 }
1380 
reserved_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t n)1381 static ssize_t reserved_size_store(struct kobject *kobj,
1382 				   struct kobj_attribute *attr,
1383 				   const char *buf, size_t n)
1384 {
1385 	unsigned long size;
1386 
1387 	if (sscanf(buf, "%lu", &size) == 1) {
1388 		reserved_size = size;
1389 		return n;
1390 	}
1391 
1392 	return -EINVAL;
1393 }
1394 
1395 power_attr(reserved_size);
1396 
1397 static struct attribute *g[] = {
1398 	&disk_attr.attr,
1399 	&resume_offset_attr.attr,
1400 	&resume_attr.attr,
1401 	&image_size_attr.attr,
1402 	&reserved_size_attr.attr,
1403 	NULL,
1404 };
1405 
1406 
1407 static const struct attribute_group attr_group = {
1408 	.attrs = g,
1409 };
1410 
1411 
pm_disk_init(void)1412 static int __init pm_disk_init(void)
1413 {
1414 	return sysfs_create_group(power_kobj, &attr_group);
1415 }
1416 
1417 core_initcall(pm_disk_init);
1418 
1419 
resume_setup(char * str)1420 static int __init resume_setup(char *str)
1421 {
1422 	if (noresume)
1423 		return 1;
1424 
1425 	strscpy(resume_file, str);
1426 	return 1;
1427 }
1428 
resume_offset_setup(char * str)1429 static int __init resume_offset_setup(char *str)
1430 {
1431 	unsigned long long offset;
1432 
1433 	if (noresume)
1434 		return 1;
1435 
1436 	if (sscanf(str, "%llu", &offset) == 1)
1437 		swsusp_resume_block = offset;
1438 
1439 	return 1;
1440 }
1441 
hibernate_setup(char * str)1442 static int __init hibernate_setup(char *str)
1443 {
1444 	if (!strncmp(str, "noresume", 8)) {
1445 		noresume = 1;
1446 	} else if (!strncmp(str, "nocompress", 10)) {
1447 		nocompress = 1;
1448 	} else if (!strncmp(str, "no", 2)) {
1449 		noresume = 1;
1450 		nohibernate = 1;
1451 	} else if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)
1452 		   && !strncmp(str, "protect_image", 13)) {
1453 		enable_restore_image_protection();
1454 	}
1455 	return 1;
1456 }
1457 
noresume_setup(char * str)1458 static int __init noresume_setup(char *str)
1459 {
1460 	noresume = 1;
1461 	return 1;
1462 }
1463 
resumewait_setup(char * str)1464 static int __init resumewait_setup(char *str)
1465 {
1466 	resume_wait = 1;
1467 	return 1;
1468 }
1469 
resumedelay_setup(char * str)1470 static int __init resumedelay_setup(char *str)
1471 {
1472 	int rc = kstrtouint(str, 0, &resume_delay);
1473 
1474 	if (rc)
1475 		pr_warn("resumedelay: bad option string '%s'\n", str);
1476 	return 1;
1477 }
1478 
nohibernate_setup(char * str)1479 static int __init nohibernate_setup(char *str)
1480 {
1481 	noresume = 1;
1482 	nohibernate = 1;
1483 	return 1;
1484 }
1485 
1486 static const char * const comp_alg_enabled[] = {
1487 #if IS_ENABLED(CONFIG_CRYPTO_LZO)
1488 	COMPRESSION_ALGO_LZO,
1489 #endif
1490 #if IS_ENABLED(CONFIG_CRYPTO_LZ4)
1491 	COMPRESSION_ALGO_LZ4,
1492 #endif
1493 };
1494 
hibernate_compressor_param_set(const char * compressor,const struct kernel_param * kp)1495 static int hibernate_compressor_param_set(const char *compressor,
1496 		const struct kernel_param *kp)
1497 {
1498 	int index, ret;
1499 
1500 	if (!mutex_trylock(&system_transition_mutex))
1501 		return -EBUSY;
1502 
1503 	index = sysfs_match_string(comp_alg_enabled, compressor);
1504 	if (index >= 0) {
1505 		ret = param_set_copystring(comp_alg_enabled[index], kp);
1506 		if (!ret)
1507 			strscpy(hib_comp_algo, comp_alg_enabled[index]);
1508 	} else {
1509 		ret = index;
1510 	}
1511 
1512 	mutex_unlock(&system_transition_mutex);
1513 
1514 	if (ret)
1515 		pr_debug("Cannot set specified compressor %s\n",
1516 			 compressor);
1517 
1518 	return ret;
1519 }
1520 
1521 static const struct kernel_param_ops hibernate_compressor_param_ops = {
1522 	.set    = hibernate_compressor_param_set,
1523 	.get    = param_get_string,
1524 };
1525 
1526 static struct kparam_string hibernate_compressor_param_string = {
1527 	.maxlen = sizeof(hibernate_compressor),
1528 	.string = hibernate_compressor,
1529 };
1530 
1531 module_param_cb(compressor, &hibernate_compressor_param_ops,
1532 		&hibernate_compressor_param_string, 0644);
1533 MODULE_PARM_DESC(compressor,
1534 		 "Compression algorithm to be used with hibernation");
1535 
1536 __setup("noresume", noresume_setup);
1537 __setup("resume_offset=", resume_offset_setup);
1538 __setup("resume=", resume_setup);
1539 __setup("hibernate=", hibernate_setup);
1540 __setup("resumewait", resumewait_setup);
1541 __setup("resumedelay=", resumedelay_setup);
1542 __setup("nohibernate", nohibernate_setup);
1543