xref: /linux/kernel/power/power.h (revision cd80e7ee47d2fd5c97563c003ff31ce8240ca2d8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/suspend.h>
3 #include <linux/suspend_ioctls.h>
4 #include <linux/utsname.h>
5 #include <linux/freezer.h>
6 #include <linux/compiler.h>
7 #include <linux/cpu.h>
8 #include <linux/cpuidle.h>
9 #include <linux/crypto.h>
10 
11 struct swsusp_info {
12 	struct new_utsname	uts;
13 	u32			version_code;
14 	unsigned long		num_physpages;
15 	int			cpus;
16 	unsigned long		image_pages;
17 	unsigned long		pages;
18 	unsigned long		size;
19 } __aligned(PAGE_SIZE);
20 
21 #ifdef CONFIG_HIBERNATION
22 /* kernel/power/snapshot.c */
23 extern void __init hibernate_reserved_size_init(void);
24 extern void __init hibernate_image_size_init(void);
25 
26 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
27 /* Maximum size of architecture specific data in a hibernation header */
28 #define MAX_ARCH_HEADER_SIZE	(sizeof(struct new_utsname) + 4)
29 
30 static inline int init_header_complete(struct swsusp_info *info)
31 {
32 	return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
33 }
34 
35 static inline const char *check_image_kernel(struct swsusp_info *info)
36 {
37 	return arch_hibernation_header_restore(info) ?
38 			"architecture specific data" : NULL;
39 }
40 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
41 
42 /*
43  * Keep some memory free so that I/O operations can succeed without paging
44  * [Might this be more than 4 MB?]
45  */
46 #define PAGES_FOR_IO	((4096 * 1024) >> PAGE_SHIFT)
47 
48 /*
49  * Keep 1 MB of memory free so that device drivers can allocate some pages in
50  * their .suspend() routines without breaking the suspend to disk.
51  */
52 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
53 
54 asmlinkage int swsusp_save(void);
55 
56 /* kernel/power/hibernate.c */
57 extern bool freezer_test_done;
58 extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
59 
60 /* kernel/power/swap.c */
61 extern unsigned int swsusp_header_flags;
62 
63 extern int hibernation_snapshot(int platform_mode);
64 extern int hibernation_restore(int platform_mode);
65 extern int hibernation_platform_enter(void);
66 
67 #ifdef CONFIG_STRICT_KERNEL_RWX
68 /* kernel/power/snapshot.c */
69 extern void enable_restore_image_protection(void);
70 #else
71 static inline void enable_restore_image_protection(void) {}
72 #endif /* CONFIG_STRICT_KERNEL_RWX */
73 
74 #else /* !CONFIG_HIBERNATION */
75 
76 static inline void hibernate_reserved_size_init(void) {}
77 static inline void hibernate_image_size_init(void) {}
78 #endif /* !CONFIG_HIBERNATION */
79 
80 #define power_attr(_name) \
81 static struct kobj_attribute _name##_attr = {	\
82 	.attr	= {				\
83 		.name = __stringify(_name),	\
84 		.mode = 0644,			\
85 	},					\
86 	.show	= _name##_show,			\
87 	.store	= _name##_store,		\
88 }
89 
90 #define power_attr_ro(_name) \
91 static struct kobj_attribute _name##_attr = {	\
92 	.attr	= {				\
93 		.name = __stringify(_name),	\
94 		.mode = S_IRUGO,		\
95 	},					\
96 	.show	= _name##_show,			\
97 }
98 
99 /* Preferred image size in bytes (default 500 MB) */
100 extern unsigned long image_size;
101 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
102 extern unsigned long reserved_size;
103 extern int in_suspend;
104 extern dev_t swsusp_resume_device;
105 extern sector_t swsusp_resume_block;
106 
107 extern int create_basic_memory_bitmaps(void);
108 extern void free_basic_memory_bitmaps(void);
109 extern int hibernate_preallocate_memory(void);
110 
111 extern void clear_or_poison_free_pages(void);
112 
113 /**
114  *	Auxiliary structure used for reading the snapshot image data and
115  *	metadata from and writing them to the list of page backup entries
116  *	(PBEs) which is the main data structure of swsusp.
117  *
118  *	Using struct snapshot_handle we can transfer the image, including its
119  *	metadata, as a continuous sequence of bytes with the help of
120  *	snapshot_read_next() and snapshot_write_next().
121  *
122  *	The code that writes the image to a storage or transfers it to
123  *	the user land is required to use snapshot_read_next() for this
124  *	purpose and it should not make any assumptions regarding the internal
125  *	structure of the image.  Similarly, the code that reads the image from
126  *	a storage or transfers it from the user land is required to use
127  *	snapshot_write_next().
128  *
129  *	This may allow us to change the internal structure of the image
130  *	in the future with considerably less effort.
131  */
132 
133 struct snapshot_handle {
134 	unsigned int	cur;	/* number of the block of PAGE_SIZE bytes the
135 				 * next operation will refer to (ie. current)
136 				 */
137 	void		*buffer;	/* address of the block to read from
138 					 * or write to
139 					 */
140 	int		sync_read;	/* Set to one to notify the caller of
141 					 * snapshot_write_next() that it may
142 					 * need to call wait_on_bio_chain()
143 					 */
144 };
145 
146 /* This macro returns the address from/to which the caller of
147  * snapshot_read_next()/snapshot_write_next() is allowed to
148  * read/write data after the function returns
149  */
150 #define data_of(handle)	((handle).buffer)
151 
152 extern unsigned int snapshot_additional_pages(struct zone *zone);
153 extern unsigned long snapshot_get_image_size(void);
154 extern int snapshot_read_next(struct snapshot_handle *handle);
155 extern int snapshot_write_next(struct snapshot_handle *handle);
156 int snapshot_write_finalize(struct snapshot_handle *handle);
157 extern int snapshot_image_loaded(struct snapshot_handle *handle);
158 
159 extern bool hibernate_acquire(void);
160 extern void hibernate_release(void);
161 
162 extern sector_t alloc_swapdev_block(int swap);
163 extern void free_all_swap_pages(int swap);
164 extern int swsusp_swap_in_use(void);
165 
166 /*
167  * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
168  * the image header.
169  */
170 #define SF_COMPRESSION_ALG_LZO	0 /* dummy, details given  below */
171 #define SF_PLATFORM_MODE	1
172 #define SF_NOCOMPRESS_MODE	2
173 #define SF_CRC32_MODE	        4
174 #define SF_HW_SIG		8
175 
176 /*
177  * Bit to indicate the compression algorithm to be used(for LZ4). The same
178  * could be checked while saving/loading image to/from disk to use the
179  * corresponding algorithms.
180  *
181  * By default, LZO compression is enabled if SF_CRC32_MODE is set. Use
182  * SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4.
183  *
184  * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO
185  * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4
186  */
187 #define SF_COMPRESSION_ALG_LZ4	16
188 
189 /* kernel/power/hibernate.c */
190 int swsusp_check(bool exclusive);
191 extern void swsusp_free(void);
192 extern int swsusp_read(unsigned int *flags_p);
193 extern int swsusp_write(unsigned int flags);
194 void swsusp_close(void);
195 #ifdef CONFIG_SUSPEND
196 extern int swsusp_unmark(void);
197 #else
198 static inline int swsusp_unmark(void) { return 0; }
199 #endif
200 
201 struct __kernel_old_timeval;
202 /* kernel/power/swsusp.c */
203 extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
204 
205 #ifdef CONFIG_SUSPEND
206 /* kernel/power/suspend.c */
207 extern const char * const pm_labels[];
208 extern const char *pm_states[];
209 extern const char *mem_sleep_states[];
210 
211 extern int suspend_devices_and_enter(suspend_state_t state);
212 #else /* !CONFIG_SUSPEND */
213 #define mem_sleep_current	PM_SUSPEND_ON
214 
215 static inline int suspend_devices_and_enter(suspend_state_t state)
216 {
217 	return -ENOSYS;
218 }
219 #endif /* !CONFIG_SUSPEND */
220 
221 #ifdef CONFIG_PM_TEST_SUSPEND
222 /* kernel/power/suspend_test.c */
223 extern void suspend_test_start(void);
224 extern void suspend_test_finish(const char *label);
225 #else /* !CONFIG_PM_TEST_SUSPEND */
226 static inline void suspend_test_start(void) {}
227 static inline void suspend_test_finish(const char *label) {}
228 #endif /* !CONFIG_PM_TEST_SUSPEND */
229 
230 #ifdef CONFIG_PM_SLEEP
231 /* kernel/power/main.c */
232 extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
233 extern int pm_notifier_call_chain(unsigned long val);
234 void pm_restrict_gfp_mask(void);
235 void pm_restore_gfp_mask(void);
236 #else
237 static inline void pm_restrict_gfp_mask(void) {}
238 static inline void pm_restore_gfp_mask(void) {}
239 #endif
240 
241 #ifdef CONFIG_HIGHMEM
242 int restore_highmem(void);
243 #else
244 static inline unsigned int count_highmem_pages(void) { return 0; }
245 static inline int restore_highmem(void) { return 0; }
246 #endif
247 
248 /*
249  * Suspend test levels
250  */
251 enum {
252 	/* keep first */
253 	TEST_NONE,
254 	TEST_CORE,
255 	TEST_CPUS,
256 	TEST_PLATFORM,
257 	TEST_DEVICES,
258 	TEST_FREEZER,
259 	/* keep last */
260 	__TEST_AFTER_LAST
261 };
262 
263 #define TEST_FIRST	TEST_NONE
264 #define TEST_MAX	(__TEST_AFTER_LAST - 1)
265 
266 #ifdef CONFIG_PM_SLEEP_DEBUG
267 extern int pm_test_level;
268 #else
269 #define pm_test_level	(TEST_NONE)
270 #endif
271 
272 #ifdef CONFIG_SUSPEND_FREEZER
273 static inline int suspend_freeze_processes(void)
274 {
275 	int error;
276 
277 	error = freeze_processes();
278 	/*
279 	 * freeze_processes() automatically thaws every task if freezing
280 	 * fails. So we need not do anything extra upon error.
281 	 */
282 	if (error)
283 		return error;
284 
285 	error = freeze_kernel_threads();
286 	/*
287 	 * freeze_kernel_threads() thaws only kernel threads upon freezing
288 	 * failure. So we have to thaw the userspace tasks ourselves.
289 	 */
290 	if (error)
291 		thaw_processes();
292 
293 	return error;
294 }
295 
296 static inline void suspend_thaw_processes(void)
297 {
298 	thaw_processes();
299 }
300 #else
301 static inline int suspend_freeze_processes(void)
302 {
303 	return 0;
304 }
305 
306 static inline void suspend_thaw_processes(void)
307 {
308 }
309 #endif
310 
311 #ifdef CONFIG_PM_AUTOSLEEP
312 
313 /* kernel/power/autosleep.c */
314 extern int pm_autosleep_init(void);
315 extern int pm_autosleep_lock(void);
316 extern void pm_autosleep_unlock(void);
317 extern suspend_state_t pm_autosleep_state(void);
318 extern int pm_autosleep_set_state(suspend_state_t state);
319 
320 #else /* !CONFIG_PM_AUTOSLEEP */
321 
322 static inline int pm_autosleep_init(void) { return 0; }
323 static inline int pm_autosleep_lock(void) { return 0; }
324 static inline void pm_autosleep_unlock(void) {}
325 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
326 
327 #endif /* !CONFIG_PM_AUTOSLEEP */
328 
329 #ifdef CONFIG_PM_WAKELOCKS
330 
331 /* kernel/power/wakelock.c */
332 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
333 extern int pm_wake_lock(const char *buf);
334 extern int pm_wake_unlock(const char *buf);
335 
336 #endif /* !CONFIG_PM_WAKELOCKS */
337 
338 static inline int pm_sleep_disable_secondary_cpus(void)
339 {
340 	cpuidle_pause();
341 	return suspend_disable_secondary_cpus();
342 }
343 
344 static inline void pm_sleep_enable_secondary_cpus(void)
345 {
346 	suspend_enable_secondary_cpus();
347 	cpuidle_resume();
348 }
349 
350 void dpm_save_errno(int err);
351