xref: /linux/kernel/power/power.h (revision 765532c8aaac624b5f8687af6d319c6a1138a257)
1 #include <linux/suspend.h>
2 #include <linux/suspend_ioctls.h>
3 #include <linux/utsname.h>
4 #include <linux/freezer.h>
5 
6 struct swsusp_info {
7 	struct new_utsname	uts;
8 	u32			version_code;
9 	unsigned long		num_physpages;
10 	int			cpus;
11 	unsigned long		image_pages;
12 	unsigned long		pages;
13 	unsigned long		size;
14 } __attribute__((aligned(PAGE_SIZE)));
15 
16 #ifdef CONFIG_HIBERNATION
17 /* kernel/power/snapshot.c */
18 extern void __init hibernate_image_size_init(void);
19 
20 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
21 /* Maximum size of architecture specific data in a hibernation header */
22 #define MAX_ARCH_HEADER_SIZE	(sizeof(struct new_utsname) + 4)
23 
24 extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
25 extern int arch_hibernation_header_restore(void *addr);
26 
27 static inline int init_header_complete(struct swsusp_info *info)
28 {
29 	return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
30 }
31 
32 static inline char *check_image_kernel(struct swsusp_info *info)
33 {
34 	return arch_hibernation_header_restore(info) ?
35 			"architecture specific data" : NULL;
36 }
37 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
38 
39 /*
40  * Keep some memory free so that I/O operations can succeed without paging
41  * [Might this be more than 4 MB?]
42  */
43 #define PAGES_FOR_IO	((4096 * 1024) >> PAGE_SHIFT)
44 
45 /*
46  * Keep 1 MB of memory free so that device drivers can allocate some pages in
47  * their .suspend() routines without breaking the suspend to disk.
48  */
49 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
50 
51 /* kernel/power/hibernate.c */
52 extern int hibernation_snapshot(int platform_mode);
53 extern int hibernation_restore(int platform_mode);
54 extern int hibernation_platform_enter(void);
55 
56 #else /* !CONFIG_HIBERNATION */
57 
58 static inline void hibernate_image_size_init(void) {}
59 #endif /* !CONFIG_HIBERNATION */
60 
61 extern int pfn_is_nosave(unsigned long);
62 
63 #define power_attr(_name) \
64 static struct kobj_attribute _name##_attr = {	\
65 	.attr	= {				\
66 		.name = __stringify(_name),	\
67 		.mode = 0644,			\
68 	},					\
69 	.show	= _name##_show,			\
70 	.store	= _name##_store,		\
71 }
72 
73 /* Preferred image size in bytes (default 500 MB) */
74 extern unsigned long image_size;
75 extern int in_suspend;
76 extern dev_t swsusp_resume_device;
77 extern sector_t swsusp_resume_block;
78 
79 extern asmlinkage int swsusp_arch_suspend(void);
80 extern asmlinkage int swsusp_arch_resume(void);
81 
82 extern int create_basic_memory_bitmaps(void);
83 extern void free_basic_memory_bitmaps(void);
84 extern int hibernate_preallocate_memory(void);
85 
86 /**
87  *	Auxiliary structure used for reading the snapshot image data and
88  *	metadata from and writing them to the list of page backup entries
89  *	(PBEs) which is the main data structure of swsusp.
90  *
91  *	Using struct snapshot_handle we can transfer the image, including its
92  *	metadata, as a continuous sequence of bytes with the help of
93  *	snapshot_read_next() and snapshot_write_next().
94  *
95  *	The code that writes the image to a storage or transfers it to
96  *	the user land is required to use snapshot_read_next() for this
97  *	purpose and it should not make any assumptions regarding the internal
98  *	structure of the image.  Similarly, the code that reads the image from
99  *	a storage or transfers it from the user land is required to use
100  *	snapshot_write_next().
101  *
102  *	This may allow us to change the internal structure of the image
103  *	in the future with considerably less effort.
104  */
105 
106 struct snapshot_handle {
107 	unsigned int	cur;	/* number of the block of PAGE_SIZE bytes the
108 				 * next operation will refer to (ie. current)
109 				 */
110 	void		*buffer;	/* address of the block to read from
111 					 * or write to
112 					 */
113 	int		sync_read;	/* Set to one to notify the caller of
114 					 * snapshot_write_next() that it may
115 					 * need to call wait_on_bio_chain()
116 					 */
117 };
118 
119 /* This macro returns the address from/to which the caller of
120  * snapshot_read_next()/snapshot_write_next() is allowed to
121  * read/write data after the function returns
122  */
123 #define data_of(handle)	((handle).buffer)
124 
125 extern unsigned int snapshot_additional_pages(struct zone *zone);
126 extern unsigned long snapshot_get_image_size(void);
127 extern int snapshot_read_next(struct snapshot_handle *handle);
128 extern int snapshot_write_next(struct snapshot_handle *handle);
129 extern void snapshot_write_finalize(struct snapshot_handle *handle);
130 extern int snapshot_image_loaded(struct snapshot_handle *handle);
131 
132 /* If unset, the snapshot device cannot be open. */
133 extern atomic_t snapshot_device_available;
134 
135 extern sector_t alloc_swapdev_block(int swap);
136 extern void free_all_swap_pages(int swap);
137 extern int swsusp_swap_in_use(void);
138 
139 /*
140  * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
141  * the image header.
142  */
143 #define SF_PLATFORM_MODE	1
144 #define SF_NOCOMPRESS_MODE	2
145 
146 /* kernel/power/hibernate.c */
147 extern int swsusp_check(void);
148 extern void swsusp_free(void);
149 extern int swsusp_read(unsigned int *flags_p);
150 extern int swsusp_write(unsigned int flags);
151 extern void swsusp_close(fmode_t);
152 
153 /* kernel/power/block_io.c */
154 extern struct block_device *hib_resume_bdev;
155 
156 extern int hib_bio_read_page(pgoff_t page_off, void *addr,
157 		struct bio **bio_chain);
158 extern int hib_bio_write_page(pgoff_t page_off, void *addr,
159 		struct bio **bio_chain);
160 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
161 
162 struct timeval;
163 /* kernel/power/swsusp.c */
164 extern void swsusp_show_speed(struct timeval *, struct timeval *,
165 				unsigned int, char *);
166 
167 #ifdef CONFIG_SUSPEND
168 /* kernel/power/suspend.c */
169 extern const char *const pm_states[];
170 
171 extern bool valid_state(suspend_state_t state);
172 extern int suspend_devices_and_enter(suspend_state_t state);
173 extern int enter_state(suspend_state_t state);
174 #else /* !CONFIG_SUSPEND */
175 static inline int suspend_devices_and_enter(suspend_state_t state)
176 {
177 	return -ENOSYS;
178 }
179 static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
180 static inline bool valid_state(suspend_state_t state) { return false; }
181 #endif /* !CONFIG_SUSPEND */
182 
183 #ifdef CONFIG_PM_TEST_SUSPEND
184 /* kernel/power/suspend_test.c */
185 extern void suspend_test_start(void);
186 extern void suspend_test_finish(const char *label);
187 #else /* !CONFIG_PM_TEST_SUSPEND */
188 static inline void suspend_test_start(void) {}
189 static inline void suspend_test_finish(const char *label) {}
190 #endif /* !CONFIG_PM_TEST_SUSPEND */
191 
192 #ifdef CONFIG_PM_SLEEP
193 /* kernel/power/main.c */
194 extern int pm_notifier_call_chain(unsigned long val);
195 #endif
196 
197 #ifdef CONFIG_HIGHMEM
198 int restore_highmem(void);
199 #else
200 static inline unsigned int count_highmem_pages(void) { return 0; }
201 static inline int restore_highmem(void) { return 0; }
202 #endif
203 
204 /*
205  * Suspend test levels
206  */
207 enum {
208 	/* keep first */
209 	TEST_NONE,
210 	TEST_CORE,
211 	TEST_CPUS,
212 	TEST_PLATFORM,
213 	TEST_DEVICES,
214 	TEST_FREEZER,
215 	/* keep last */
216 	__TEST_AFTER_LAST
217 };
218 
219 #define TEST_FIRST	TEST_NONE
220 #define TEST_MAX	(__TEST_AFTER_LAST - 1)
221 
222 extern int pm_test_level;
223 
224 #ifdef CONFIG_SUSPEND_FREEZER
225 static inline int suspend_freeze_processes(void)
226 {
227 	return freeze_processes();
228 }
229 
230 static inline void suspend_thaw_processes(void)
231 {
232 	thaw_processes();
233 }
234 #else
235 static inline int suspend_freeze_processes(void)
236 {
237 	return 0;
238 }
239 
240 static inline void suspend_thaw_processes(void)
241 {
242 }
243 #endif
244