1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/suspend.h> 3 #include <linux/suspend_ioctls.h> 4 #include <linux/utsname.h> 5 #include <linux/freezer.h> 6 #include <linux/compiler.h> 7 #include <linux/cpu.h> 8 #include <linux/cpuidle.h> 9 #include <linux/crypto.h> 10 11 struct swsusp_info { 12 struct new_utsname uts; 13 u32 version_code; 14 unsigned long num_physpages; 15 int cpus; 16 unsigned long image_pages; 17 unsigned long pages; 18 unsigned long size; 19 } __aligned(PAGE_SIZE); 20 21 #if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) 22 extern int pm_sleep_fs_sync(void); 23 extern bool filesystem_freeze_enabled; 24 #endif 25 26 #ifdef CONFIG_HIBERNATION 27 /* kernel/power/snapshot.c */ 28 extern void __init hibernate_reserved_size_init(void); 29 extern void __init hibernate_image_size_init(void); 30 31 #ifdef CONFIG_ARCH_HIBERNATION_HEADER 32 /* Maximum size of architecture specific data in a hibernation header */ 33 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) 34 35 static inline int init_header_complete(struct swsusp_info *info) 36 { 37 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); 38 } 39 40 static inline const char *check_image_kernel(struct swsusp_info *info) 41 { 42 return arch_hibernation_header_restore(info) ? 43 "architecture specific data" : NULL; 44 } 45 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 46 47 /* 48 * Keep some memory free so that I/O operations can succeed without paging 49 * [Might this be more than 4 MB?] 50 */ 51 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) 52 53 /* 54 * Keep 1 MB of memory free so that device drivers can allocate some pages in 55 * their .suspend() routines without breaking the suspend to disk. 56 */ 57 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 58 59 asmlinkage int swsusp_save(void); 60 61 /* kernel/power/hibernate.c */ 62 extern bool freezer_test_done; 63 extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME]; 64 65 /* kernel/power/swap.c */ 66 extern unsigned int swsusp_header_flags; 67 68 extern int hibernation_snapshot(int platform_mode); 69 extern int hibernation_restore(int platform_mode); 70 extern int hibernation_platform_enter(void); 71 72 #ifdef CONFIG_STRICT_KERNEL_RWX 73 /* kernel/power/snapshot.c */ 74 extern void enable_restore_image_protection(void); 75 #else 76 static inline void enable_restore_image_protection(void) {} 77 #endif /* CONFIG_STRICT_KERNEL_RWX */ 78 79 extern bool hibernation_in_progress(void); 80 81 #else /* !CONFIG_HIBERNATION */ 82 83 static inline void hibernate_reserved_size_init(void) {} 84 static inline void hibernate_image_size_init(void) {} 85 86 static inline bool hibernation_in_progress(void) { return false; } 87 #endif /* !CONFIG_HIBERNATION */ 88 89 #define power_attr(_name) \ 90 static struct kobj_attribute _name##_attr = { \ 91 .attr = { \ 92 .name = __stringify(_name), \ 93 .mode = 0644, \ 94 }, \ 95 .show = _name##_show, \ 96 .store = _name##_store, \ 97 } 98 99 #define power_attr_ro(_name) \ 100 static struct kobj_attribute _name##_attr = { \ 101 .attr = { \ 102 .name = __stringify(_name), \ 103 .mode = S_IRUGO, \ 104 }, \ 105 .show = _name##_show, \ 106 } 107 108 /* Preferred image size in bytes (default 500 MB) */ 109 extern unsigned long image_size; 110 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ 111 extern unsigned long reserved_size; 112 extern int in_suspend; 113 extern dev_t swsusp_resume_device; 114 extern sector_t swsusp_resume_block; 115 116 extern int create_basic_memory_bitmaps(void); 117 extern void free_basic_memory_bitmaps(void); 118 extern int hibernate_preallocate_memory(void); 119 120 extern void clear_or_poison_free_pages(void); 121 122 /* 123 * Auxiliary structure used for reading the snapshot image data and 124 * metadata from and writing them to the list of page backup entries 125 * (PBEs) which is the main data structure of swsusp. 126 * 127 * Using struct snapshot_handle we can transfer the image, including its 128 * metadata, as a continuous sequence of bytes with the help of 129 * snapshot_read_next() and snapshot_write_next(). 130 * 131 * The code that writes the image to a storage or transfers it to 132 * the user land is required to use snapshot_read_next() for this 133 * purpose and it should not make any assumptions regarding the internal 134 * structure of the image. Similarly, the code that reads the image from 135 * a storage or transfers it from the user land is required to use 136 * snapshot_write_next(). 137 * 138 * This may allow us to change the internal structure of the image 139 * in the future with considerably less effort. 140 */ 141 142 struct snapshot_handle { 143 unsigned int cur; /* number of the block of PAGE_SIZE bytes the 144 * next operation will refer to (ie. current) 145 */ 146 void *buffer; /* address of the block to read from 147 * or write to 148 */ 149 int sync_read; /* Set to one to notify the caller of 150 * snapshot_write_next() that it may 151 * need to call wait_on_bio_chain() 152 */ 153 }; 154 155 /* This macro returns the address from/to which the caller of 156 * snapshot_read_next()/snapshot_write_next() is allowed to 157 * read/write data after the function returns 158 */ 159 #define data_of(handle) ((handle).buffer) 160 161 extern unsigned int snapshot_additional_pages(struct zone *zone); 162 extern unsigned long snapshot_get_image_size(void); 163 extern int snapshot_read_next(struct snapshot_handle *handle); 164 extern int snapshot_write_next(struct snapshot_handle *handle); 165 int snapshot_write_finalize(struct snapshot_handle *handle); 166 extern int snapshot_image_loaded(struct snapshot_handle *handle); 167 168 extern bool hibernate_acquire(void); 169 extern void hibernate_release(void); 170 171 extern sector_t alloc_swapdev_block(int swap); 172 extern void free_all_swap_pages(int swap); 173 extern int swsusp_swap_in_use(void); 174 175 /* 176 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in 177 * the image header. 178 */ 179 #define SF_COMPRESSION_ALG_LZO 0 /* dummy, details given below */ 180 #define SF_PLATFORM_MODE 1 181 #define SF_NOCOMPRESS_MODE 2 182 #define SF_CRC32_MODE 4 183 #define SF_HW_SIG 8 184 185 /* 186 * Bit to indicate the compression algorithm to be used(for LZ4). The same 187 * could be checked while saving/loading image to/from disk to use the 188 * corresponding algorithms. 189 * 190 * By default, LZO compression is enabled if SF_CRC32_MODE is set. Use 191 * SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4. 192 * 193 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO 194 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4 195 */ 196 #define SF_COMPRESSION_ALG_LZ4 16 197 198 /* kernel/power/hibernate.c */ 199 int swsusp_check(bool exclusive); 200 extern void swsusp_free(void); 201 extern int swsusp_read(unsigned int *flags_p); 202 extern int swsusp_write(unsigned int flags); 203 void swsusp_close(void); 204 #ifdef CONFIG_SUSPEND 205 extern int swsusp_unmark(void); 206 #else 207 static inline int swsusp_unmark(void) { return 0; } 208 #endif 209 210 struct __kernel_old_timeval; 211 /* kernel/power/swsusp.c */ 212 extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *); 213 214 #ifdef CONFIG_SUSPEND 215 /* kernel/power/suspend.c */ 216 extern const char * const pm_labels[]; 217 extern const char *pm_states[]; 218 extern const char *mem_sleep_states[]; 219 220 extern int suspend_devices_and_enter(suspend_state_t state); 221 #else /* !CONFIG_SUSPEND */ 222 #define mem_sleep_current PM_SUSPEND_ON 223 224 static inline int suspend_devices_and_enter(suspend_state_t state) 225 { 226 return -ENOSYS; 227 } 228 #endif /* !CONFIG_SUSPEND */ 229 230 #ifdef CONFIG_PM_TEST_SUSPEND 231 /* kernel/power/suspend_test.c */ 232 extern void suspend_test_start(void); 233 extern void suspend_test_finish(const char *label); 234 #else /* !CONFIG_PM_TEST_SUSPEND */ 235 static inline void suspend_test_start(void) {} 236 static inline void suspend_test_finish(const char *label) {} 237 #endif /* !CONFIG_PM_TEST_SUSPEND */ 238 239 #ifdef CONFIG_PM_SLEEP 240 /* kernel/power/main.c */ 241 extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down); 242 extern int pm_notifier_call_chain(unsigned long val); 243 #endif 244 245 #ifdef CONFIG_HIGHMEM 246 int restore_highmem(void); 247 #else 248 static inline unsigned int count_highmem_pages(void) { return 0; } 249 static inline int restore_highmem(void) { return 0; } 250 #endif 251 252 /* 253 * Suspend test levels 254 */ 255 enum { 256 /* keep first */ 257 TEST_NONE, 258 TEST_CORE, 259 TEST_CPUS, 260 TEST_PLATFORM, 261 TEST_DEVICES, 262 TEST_FREEZER, 263 /* keep last */ 264 __TEST_AFTER_LAST 265 }; 266 267 #define TEST_FIRST TEST_NONE 268 #define TEST_MAX (__TEST_AFTER_LAST - 1) 269 270 #ifdef CONFIG_PM_SLEEP_DEBUG 271 extern int pm_test_level; 272 #else 273 #define pm_test_level (TEST_NONE) 274 #endif 275 276 #ifdef CONFIG_SUSPEND_FREEZER 277 static inline int suspend_freeze_processes(void) 278 { 279 int error; 280 281 error = freeze_processes(); 282 /* 283 * freeze_processes() automatically thaws every task if freezing 284 * fails. So we need not do anything extra upon error. 285 */ 286 if (error) 287 return error; 288 289 error = freeze_kernel_threads(); 290 /* 291 * freeze_kernel_threads() thaws only kernel threads upon freezing 292 * failure. So we have to thaw the userspace tasks ourselves. 293 */ 294 if (error) 295 thaw_processes(); 296 297 return error; 298 } 299 300 static inline void suspend_thaw_processes(void) 301 { 302 thaw_processes(); 303 } 304 #else 305 static inline int suspend_freeze_processes(void) 306 { 307 return 0; 308 } 309 310 static inline void suspend_thaw_processes(void) 311 { 312 } 313 #endif 314 315 #ifdef CONFIG_PM_AUTOSLEEP 316 317 /* kernel/power/autosleep.c */ 318 extern int pm_autosleep_init(void); 319 extern int pm_autosleep_lock(void); 320 extern void pm_autosleep_unlock(void); 321 extern suspend_state_t pm_autosleep_state(void); 322 extern int pm_autosleep_set_state(suspend_state_t state); 323 324 #else /* !CONFIG_PM_AUTOSLEEP */ 325 326 static inline int pm_autosleep_init(void) { return 0; } 327 static inline int pm_autosleep_lock(void) { return 0; } 328 static inline void pm_autosleep_unlock(void) {} 329 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; } 330 331 #endif /* !CONFIG_PM_AUTOSLEEP */ 332 333 #ifdef CONFIG_PM_WAKELOCKS 334 335 /* kernel/power/wakelock.c */ 336 extern ssize_t pm_show_wakelocks(char *buf, bool show_active); 337 extern int pm_wake_lock(const char *buf); 338 extern int pm_wake_unlock(const char *buf); 339 340 #endif /* !CONFIG_PM_WAKELOCKS */ 341 342 static inline int pm_sleep_disable_secondary_cpus(void) 343 { 344 cpuidle_pause(); 345 return suspend_disable_secondary_cpus(); 346 } 347 348 static inline void pm_sleep_enable_secondary_cpus(void) 349 { 350 suspend_enable_secondary_cpus(); 351 cpuidle_resume(); 352 } 353 354 void dpm_save_errno(int err); 355