xref: /linux/kernel/power/snapshot.c (revision 9c5968db9e625019a0ee5226c7eebef5519d366a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/snapshot.c
4  *
5  * This file provides system snapshot/restore functionality for swsusp.
6  *
7  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9  */
10 
11 #define pr_fmt(fmt) "PM: hibernation: " fmt
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
34 
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
38 #include <asm/io.h>
39 
40 #include "power.h"
41 
42 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43 static bool hibernate_restore_protection;
44 static bool hibernate_restore_protection_active;
45 
46 void enable_restore_image_protection(void)
47 {
48 	hibernate_restore_protection = true;
49 }
50 
51 static inline void hibernate_restore_protection_begin(void)
52 {
53 	hibernate_restore_protection_active = hibernate_restore_protection;
54 }
55 
56 static inline void hibernate_restore_protection_end(void)
57 {
58 	hibernate_restore_protection_active = false;
59 }
60 
61 static inline int __must_check hibernate_restore_protect_page(void *page_address)
62 {
63 	if (hibernate_restore_protection_active)
64 		return set_memory_ro((unsigned long)page_address, 1);
65 	return 0;
66 }
67 
68 static inline int hibernate_restore_unprotect_page(void *page_address)
69 {
70 	if (hibernate_restore_protection_active)
71 		return set_memory_rw((unsigned long)page_address, 1);
72 	return 0;
73 }
74 #else
75 static inline void hibernate_restore_protection_begin(void) {}
76 static inline void hibernate_restore_protection_end(void) {}
77 static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
78 static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
79 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
80 
81 
82 /*
83  * The calls to set_direct_map_*() should not fail because remapping a page
84  * here means that we only update protection bits in an existing PTE.
85  * It is still worth to have a warning here if something changes and this
86  * will no longer be the case.
87  */
88 static inline void hibernate_map_page(struct page *page)
89 {
90 	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
91 		int ret = set_direct_map_default_noflush(page);
92 
93 		if (ret)
94 			pr_warn_once("Failed to remap page\n");
95 	} else {
96 		debug_pagealloc_map_pages(page, 1);
97 	}
98 }
99 
100 static inline void hibernate_unmap_page(struct page *page)
101 {
102 	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
103 		unsigned long addr = (unsigned long)page_address(page);
104 		int ret  = set_direct_map_invalid_noflush(page);
105 
106 		if (ret)
107 			pr_warn_once("Failed to remap page\n");
108 
109 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
110 	} else {
111 		debug_pagealloc_unmap_pages(page, 1);
112 	}
113 }
114 
115 static int swsusp_page_is_free(struct page *);
116 static void swsusp_set_page_forbidden(struct page *);
117 static void swsusp_unset_page_forbidden(struct page *);
118 
119 /*
120  * Number of bytes to reserve for memory allocations made by device drivers
121  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
122  * cause image creation to fail (tunable via /sys/power/reserved_size).
123  */
124 unsigned long reserved_size;
125 
126 void __init hibernate_reserved_size_init(void)
127 {
128 	reserved_size = SPARE_PAGES * PAGE_SIZE;
129 }
130 
131 /*
132  * Preferred image size in bytes (tunable via /sys/power/image_size).
133  * When it is set to N, swsusp will do its best to ensure the image
134  * size will not exceed N bytes, but if that is impossible, it will
135  * try to create the smallest image possible.
136  */
137 unsigned long image_size;
138 
139 void __init hibernate_image_size_init(void)
140 {
141 	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
142 }
143 
144 /*
145  * List of PBEs needed for restoring the pages that were allocated before
146  * the suspend and included in the suspend image, but have also been
147  * allocated by the "resume" kernel, so their contents cannot be written
148  * directly to their "original" page frames.
149  */
150 struct pbe *restore_pblist;
151 
152 /* struct linked_page is used to build chains of pages */
153 
154 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
155 
156 struct linked_page {
157 	struct linked_page *next;
158 	char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160 
161 /*
162  * List of "safe" pages (ie. pages that were not used by the image kernel
163  * before hibernation) that may be used as temporary storage for image kernel
164  * memory contents.
165  */
166 static struct linked_page *safe_pages_list;
167 
168 /* Pointer to an auxiliary buffer (1 page) */
169 static void *buffer;
170 
171 #define PG_ANY		0
172 #define PG_SAFE		1
173 #define PG_UNSAFE_CLEAR	1
174 #define PG_UNSAFE_KEEP	0
175 
176 static unsigned int allocated_unsafe_pages;
177 
178 /**
179  * get_image_page - Allocate a page for a hibernation image.
180  * @gfp_mask: GFP mask for the allocation.
181  * @safe_needed: Get pages that were not used before hibernation (restore only)
182  *
183  * During image restoration, for storing the PBE list and the image data, we can
184  * only use memory pages that do not conflict with the pages used before
185  * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
186  * using allocated_unsafe_pages.
187  *
188  * Each allocated image page is marked as PageNosave and PageNosaveFree so that
189  * swsusp_free() can release it.
190  */
191 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
192 {
193 	void *res;
194 
195 	res = (void *)get_zeroed_page(gfp_mask);
196 	if (safe_needed)
197 		while (res && swsusp_page_is_free(virt_to_page(res))) {
198 			/* The page is unsafe, mark it for swsusp_free() */
199 			swsusp_set_page_forbidden(virt_to_page(res));
200 			allocated_unsafe_pages++;
201 			res = (void *)get_zeroed_page(gfp_mask);
202 		}
203 	if (res) {
204 		swsusp_set_page_forbidden(virt_to_page(res));
205 		swsusp_set_page_free(virt_to_page(res));
206 	}
207 	return res;
208 }
209 
210 static void *__get_safe_page(gfp_t gfp_mask)
211 {
212 	if (safe_pages_list) {
213 		void *ret = safe_pages_list;
214 
215 		safe_pages_list = safe_pages_list->next;
216 		memset(ret, 0, PAGE_SIZE);
217 		return ret;
218 	}
219 	return get_image_page(gfp_mask, PG_SAFE);
220 }
221 
222 unsigned long get_safe_page(gfp_t gfp_mask)
223 {
224 	return (unsigned long)__get_safe_page(gfp_mask);
225 }
226 
227 static struct page *alloc_image_page(gfp_t gfp_mask)
228 {
229 	struct page *page;
230 
231 	page = alloc_page(gfp_mask);
232 	if (page) {
233 		swsusp_set_page_forbidden(page);
234 		swsusp_set_page_free(page);
235 	}
236 	return page;
237 }
238 
239 static void recycle_safe_page(void *page_address)
240 {
241 	struct linked_page *lp = page_address;
242 
243 	lp->next = safe_pages_list;
244 	safe_pages_list = lp;
245 }
246 
247 /**
248  * free_image_page - Free a page allocated for hibernation image.
249  * @addr: Address of the page to free.
250  * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
251  *
252  * The page to free should have been allocated by get_image_page() (page flags
253  * set by it are affected).
254  */
255 static inline void free_image_page(void *addr, int clear_nosave_free)
256 {
257 	struct page *page;
258 
259 	BUG_ON(!virt_addr_valid(addr));
260 
261 	page = virt_to_page(addr);
262 
263 	swsusp_unset_page_forbidden(page);
264 	if (clear_nosave_free)
265 		swsusp_unset_page_free(page);
266 
267 	__free_page(page);
268 }
269 
270 static inline void free_list_of_pages(struct linked_page *list,
271 				      int clear_page_nosave)
272 {
273 	while (list) {
274 		struct linked_page *lp = list->next;
275 
276 		free_image_page(list, clear_page_nosave);
277 		list = lp;
278 	}
279 }
280 
281 /*
282  * struct chain_allocator is used for allocating small objects out of
283  * a linked list of pages called 'the chain'.
284  *
285  * The chain grows each time when there is no room for a new object in
286  * the current page.  The allocated objects cannot be freed individually.
287  * It is only possible to free them all at once, by freeing the entire
288  * chain.
289  *
290  * NOTE: The chain allocator may be inefficient if the allocated objects
291  * are not much smaller than PAGE_SIZE.
292  */
293 struct chain_allocator {
294 	struct linked_page *chain;	/* the chain */
295 	unsigned int used_space;	/* total size of objects allocated out
296 					   of the current page */
297 	gfp_t gfp_mask;		/* mask for allocating pages */
298 	int safe_needed;	/* if set, only "safe" pages are allocated */
299 };
300 
301 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
302 		       int safe_needed)
303 {
304 	ca->chain = NULL;
305 	ca->used_space = LINKED_PAGE_DATA_SIZE;
306 	ca->gfp_mask = gfp_mask;
307 	ca->safe_needed = safe_needed;
308 }
309 
310 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
311 {
312 	void *ret;
313 
314 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
315 		struct linked_page *lp;
316 
317 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
318 					get_image_page(ca->gfp_mask, PG_ANY);
319 		if (!lp)
320 			return NULL;
321 
322 		lp->next = ca->chain;
323 		ca->chain = lp;
324 		ca->used_space = 0;
325 	}
326 	ret = ca->chain->data + ca->used_space;
327 	ca->used_space += size;
328 	return ret;
329 }
330 
331 /*
332  * Data types related to memory bitmaps.
333  *
334  * Memory bitmap is a structure consisting of many linked lists of
335  * objects.  The main list's elements are of type struct zone_bitmap
336  * and each of them corresponds to one zone.  For each zone bitmap
337  * object there is a list of objects of type struct bm_block that
338  * represent each blocks of bitmap in which information is stored.
339  *
340  * struct memory_bitmap contains a pointer to the main list of zone
341  * bitmap objects, a struct bm_position used for browsing the bitmap,
342  * and a pointer to the list of pages used for allocating all of the
343  * zone bitmap objects and bitmap block objects.
344  *
345  * NOTE: It has to be possible to lay out the bitmap in memory
346  * using only allocations of order 0.  Additionally, the bitmap is
347  * designed to work with arbitrary number of zones (this is over the
348  * top for now, but let's avoid making unnecessary assumptions ;-).
349  *
350  * struct zone_bitmap contains a pointer to a list of bitmap block
351  * objects and a pointer to the bitmap block object that has been
352  * most recently used for setting bits.  Additionally, it contains the
353  * PFNs that correspond to the start and end of the represented zone.
354  *
355  * struct bm_block contains a pointer to the memory page in which
356  * information is stored (in the form of a block of bitmap)
357  * It also contains the pfns that correspond to the start and end of
358  * the represented memory area.
359  *
360  * The memory bitmap is organized as a radix tree to guarantee fast random
361  * access to the bits. There is one radix tree for each zone (as returned
362  * from create_mem_extents).
363  *
364  * One radix tree is represented by one struct mem_zone_bm_rtree. There are
365  * two linked lists for the nodes of the tree, one for the inner nodes and
366  * one for the leave nodes. The linked leave nodes are used for fast linear
367  * access of the memory bitmap.
368  *
369  * The struct rtree_node represents one node of the radix tree.
370  */
371 
372 #define BM_END_OF_MAP	(~0UL)
373 
374 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
375 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
376 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
377 
378 /*
379  * struct rtree_node is a wrapper struct to link the nodes
380  * of the rtree together for easy linear iteration over
381  * bits and easy freeing
382  */
383 struct rtree_node {
384 	struct list_head list;
385 	unsigned long *data;
386 };
387 
388 /*
389  * struct mem_zone_bm_rtree represents a bitmap used for one
390  * populated memory zone.
391  */
392 struct mem_zone_bm_rtree {
393 	struct list_head list;		/* Link Zones together         */
394 	struct list_head nodes;		/* Radix Tree inner nodes      */
395 	struct list_head leaves;	/* Radix Tree leaves           */
396 	unsigned long start_pfn;	/* Zone start page frame       */
397 	unsigned long end_pfn;		/* Zone end page frame + 1     */
398 	struct rtree_node *rtree;	/* Radix Tree Root             */
399 	int levels;			/* Number of Radix Tree Levels */
400 	unsigned int blocks;		/* Number of Bitmap Blocks     */
401 };
402 
403 /* struct bm_position is used for browsing memory bitmaps */
404 
405 struct bm_position {
406 	struct mem_zone_bm_rtree *zone;
407 	struct rtree_node *node;
408 	unsigned long node_pfn;
409 	unsigned long cur_pfn;
410 	int node_bit;
411 };
412 
413 struct memory_bitmap {
414 	struct list_head zones;
415 	struct linked_page *p_list;	/* list of pages used to store zone
416 					   bitmap objects and bitmap block
417 					   objects */
418 	struct bm_position cur;	/* most recently used bit position */
419 };
420 
421 /* Functions that operate on memory bitmaps */
422 
423 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
424 #if BITS_PER_LONG == 32
425 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
426 #else
427 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
428 #endif
429 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
430 
431 /**
432  * alloc_rtree_node - Allocate a new node and add it to the radix tree.
433  * @gfp_mask: GFP mask for the allocation.
434  * @safe_needed: Get pages not used before hibernation (restore only)
435  * @ca: Pointer to a linked list of pages ("a chain") to allocate from
436  * @list: Radix Tree node to add.
437  *
438  * This function is used to allocate inner nodes as well as the
439  * leave nodes of the radix tree. It also adds the node to the
440  * corresponding linked list passed in by the *list parameter.
441  */
442 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
443 					   struct chain_allocator *ca,
444 					   struct list_head *list)
445 {
446 	struct rtree_node *node;
447 
448 	node = chain_alloc(ca, sizeof(struct rtree_node));
449 	if (!node)
450 		return NULL;
451 
452 	node->data = get_image_page(gfp_mask, safe_needed);
453 	if (!node->data)
454 		return NULL;
455 
456 	list_add_tail(&node->list, list);
457 
458 	return node;
459 }
460 
461 /**
462  * add_rtree_block - Add a new leave node to the radix tree.
463  *
464  * The leave nodes need to be allocated in order to keep the leaves
465  * linked list in order. This is guaranteed by the zone->blocks
466  * counter.
467  */
468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
469 			   int safe_needed, struct chain_allocator *ca)
470 {
471 	struct rtree_node *node, *block, **dst;
472 	unsigned int levels_needed, block_nr;
473 	int i;
474 
475 	block_nr = zone->blocks;
476 	levels_needed = 0;
477 
478 	/* How many levels do we need for this block nr? */
479 	while (block_nr) {
480 		levels_needed += 1;
481 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
482 	}
483 
484 	/* Make sure the rtree has enough levels */
485 	for (i = zone->levels; i < levels_needed; i++) {
486 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
487 					&zone->nodes);
488 		if (!node)
489 			return -ENOMEM;
490 
491 		node->data[0] = (unsigned long)zone->rtree;
492 		zone->rtree = node;
493 		zone->levels += 1;
494 	}
495 
496 	/* Allocate new block */
497 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
498 	if (!block)
499 		return -ENOMEM;
500 
501 	/* Now walk the rtree to insert the block */
502 	node = zone->rtree;
503 	dst = &zone->rtree;
504 	block_nr = zone->blocks;
505 	for (i = zone->levels; i > 0; i--) {
506 		int index;
507 
508 		if (!node) {
509 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
510 						&zone->nodes);
511 			if (!node)
512 				return -ENOMEM;
513 			*dst = node;
514 		}
515 
516 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
517 		index &= BM_RTREE_LEVEL_MASK;
518 		dst = (struct rtree_node **)&((*dst)->data[index]);
519 		node = *dst;
520 	}
521 
522 	zone->blocks += 1;
523 	*dst = block;
524 
525 	return 0;
526 }
527 
528 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
529 			       int clear_nosave_free);
530 
531 /**
532  * create_zone_bm_rtree - Create a radix tree for one zone.
533  *
534  * Allocated the mem_zone_bm_rtree structure and initializes it.
535  * This function also allocated and builds the radix tree for the
536  * zone.
537  */
538 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
539 						      int safe_needed,
540 						      struct chain_allocator *ca,
541 						      unsigned long start,
542 						      unsigned long end)
543 {
544 	struct mem_zone_bm_rtree *zone;
545 	unsigned int i, nr_blocks;
546 	unsigned long pages;
547 
548 	pages = end - start;
549 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
550 	if (!zone)
551 		return NULL;
552 
553 	INIT_LIST_HEAD(&zone->nodes);
554 	INIT_LIST_HEAD(&zone->leaves);
555 	zone->start_pfn = start;
556 	zone->end_pfn = end;
557 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
558 
559 	for (i = 0; i < nr_blocks; i++) {
560 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
561 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
562 			return NULL;
563 		}
564 	}
565 
566 	return zone;
567 }
568 
569 /**
570  * free_zone_bm_rtree - Free the memory of the radix tree.
571  *
572  * Free all node pages of the radix tree. The mem_zone_bm_rtree
573  * structure itself is not freed here nor are the rtree_node
574  * structs.
575  */
576 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
577 			       int clear_nosave_free)
578 {
579 	struct rtree_node *node;
580 
581 	list_for_each_entry(node, &zone->nodes, list)
582 		free_image_page(node->data, clear_nosave_free);
583 
584 	list_for_each_entry(node, &zone->leaves, list)
585 		free_image_page(node->data, clear_nosave_free);
586 }
587 
588 static void memory_bm_position_reset(struct memory_bitmap *bm)
589 {
590 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
591 				  list);
592 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
593 				  struct rtree_node, list);
594 	bm->cur.node_pfn = 0;
595 	bm->cur.cur_pfn = BM_END_OF_MAP;
596 	bm->cur.node_bit = 0;
597 }
598 
599 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
600 
601 struct mem_extent {
602 	struct list_head hook;
603 	unsigned long start;
604 	unsigned long end;
605 };
606 
607 /**
608  * free_mem_extents - Free a list of memory extents.
609  * @list: List of extents to free.
610  */
611 static void free_mem_extents(struct list_head *list)
612 {
613 	struct mem_extent *ext, *aux;
614 
615 	list_for_each_entry_safe(ext, aux, list, hook) {
616 		list_del(&ext->hook);
617 		kfree(ext);
618 	}
619 }
620 
621 /**
622  * create_mem_extents - Create a list of memory extents.
623  * @list: List to put the extents into.
624  * @gfp_mask: Mask to use for memory allocations.
625  *
626  * The extents represent contiguous ranges of PFNs.
627  */
628 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
629 {
630 	struct zone *zone;
631 
632 	INIT_LIST_HEAD(list);
633 
634 	for_each_populated_zone(zone) {
635 		unsigned long zone_start, zone_end;
636 		struct mem_extent *ext, *cur, *aux;
637 
638 		zone_start = zone->zone_start_pfn;
639 		zone_end = zone_end_pfn(zone);
640 
641 		list_for_each_entry(ext, list, hook)
642 			if (zone_start <= ext->end)
643 				break;
644 
645 		if (&ext->hook == list || zone_end < ext->start) {
646 			/* New extent is necessary */
647 			struct mem_extent *new_ext;
648 
649 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
650 			if (!new_ext) {
651 				free_mem_extents(list);
652 				return -ENOMEM;
653 			}
654 			new_ext->start = zone_start;
655 			new_ext->end = zone_end;
656 			list_add_tail(&new_ext->hook, &ext->hook);
657 			continue;
658 		}
659 
660 		/* Merge this zone's range of PFNs with the existing one */
661 		if (zone_start < ext->start)
662 			ext->start = zone_start;
663 		if (zone_end > ext->end)
664 			ext->end = zone_end;
665 
666 		/* More merging may be possible */
667 		cur = ext;
668 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
669 			if (zone_end < cur->start)
670 				break;
671 			if (zone_end < cur->end)
672 				ext->end = cur->end;
673 			list_del(&cur->hook);
674 			kfree(cur);
675 		}
676 	}
677 
678 	return 0;
679 }
680 
681 /**
682  * memory_bm_create - Allocate memory for a memory bitmap.
683  */
684 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
685 			    int safe_needed)
686 {
687 	struct chain_allocator ca;
688 	struct list_head mem_extents;
689 	struct mem_extent *ext;
690 	int error;
691 
692 	chain_init(&ca, gfp_mask, safe_needed);
693 	INIT_LIST_HEAD(&bm->zones);
694 
695 	error = create_mem_extents(&mem_extents, gfp_mask);
696 	if (error)
697 		return error;
698 
699 	list_for_each_entry(ext, &mem_extents, hook) {
700 		struct mem_zone_bm_rtree *zone;
701 
702 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
703 					    ext->start, ext->end);
704 		if (!zone) {
705 			error = -ENOMEM;
706 			goto Error;
707 		}
708 		list_add_tail(&zone->list, &bm->zones);
709 	}
710 
711 	bm->p_list = ca.chain;
712 	memory_bm_position_reset(bm);
713  Exit:
714 	free_mem_extents(&mem_extents);
715 	return error;
716 
717  Error:
718 	bm->p_list = ca.chain;
719 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
720 	goto Exit;
721 }
722 
723 /**
724  * memory_bm_free - Free memory occupied by the memory bitmap.
725  * @bm: Memory bitmap.
726  */
727 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
728 {
729 	struct mem_zone_bm_rtree *zone;
730 
731 	list_for_each_entry(zone, &bm->zones, list)
732 		free_zone_bm_rtree(zone, clear_nosave_free);
733 
734 	free_list_of_pages(bm->p_list, clear_nosave_free);
735 
736 	INIT_LIST_HEAD(&bm->zones);
737 }
738 
739 /**
740  * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
741  *
742  * Find the bit in memory bitmap @bm that corresponds to the given PFN.
743  * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
744  *
745  * Walk the radix tree to find the page containing the bit that represents @pfn
746  * and return the position of the bit in @addr and @bit_nr.
747  */
748 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
749 			      void **addr, unsigned int *bit_nr)
750 {
751 	struct mem_zone_bm_rtree *curr, *zone;
752 	struct rtree_node *node;
753 	int i, block_nr;
754 
755 	zone = bm->cur.zone;
756 
757 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
758 		goto zone_found;
759 
760 	zone = NULL;
761 
762 	/* Find the right zone */
763 	list_for_each_entry(curr, &bm->zones, list) {
764 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
765 			zone = curr;
766 			break;
767 		}
768 	}
769 
770 	if (!zone)
771 		return -EFAULT;
772 
773 zone_found:
774 	/*
775 	 * We have found the zone. Now walk the radix tree to find the leaf node
776 	 * for our PFN.
777 	 */
778 
779 	/*
780 	 * If the zone we wish to scan is the current zone and the
781 	 * pfn falls into the current node then we do not need to walk
782 	 * the tree.
783 	 */
784 	node = bm->cur.node;
785 	if (zone == bm->cur.zone &&
786 	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
787 		goto node_found;
788 
789 	node      = zone->rtree;
790 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
791 
792 	for (i = zone->levels; i > 0; i--) {
793 		int index;
794 
795 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
796 		index &= BM_RTREE_LEVEL_MASK;
797 		BUG_ON(node->data[index] == 0);
798 		node = (struct rtree_node *)node->data[index];
799 	}
800 
801 node_found:
802 	/* Update last position */
803 	bm->cur.zone = zone;
804 	bm->cur.node = node;
805 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
806 	bm->cur.cur_pfn = pfn;
807 
808 	/* Set return values */
809 	*addr = node->data;
810 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
811 
812 	return 0;
813 }
814 
815 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
816 {
817 	void *addr;
818 	unsigned int bit;
819 	int error;
820 
821 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
822 	BUG_ON(error);
823 	set_bit(bit, addr);
824 }
825 
826 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
827 {
828 	void *addr;
829 	unsigned int bit;
830 	int error;
831 
832 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
833 	if (!error)
834 		set_bit(bit, addr);
835 
836 	return error;
837 }
838 
839 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
840 {
841 	void *addr;
842 	unsigned int bit;
843 	int error;
844 
845 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
846 	BUG_ON(error);
847 	clear_bit(bit, addr);
848 }
849 
850 static void memory_bm_clear_current(struct memory_bitmap *bm)
851 {
852 	int bit;
853 
854 	bit = max(bm->cur.node_bit - 1, 0);
855 	clear_bit(bit, bm->cur.node->data);
856 }
857 
858 static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
859 {
860 	return bm->cur.cur_pfn;
861 }
862 
863 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
864 {
865 	void *addr;
866 	unsigned int bit;
867 	int error;
868 
869 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
870 	BUG_ON(error);
871 	return test_bit(bit, addr);
872 }
873 
874 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
875 {
876 	void *addr;
877 	unsigned int bit;
878 
879 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
880 }
881 
882 /*
883  * rtree_next_node - Jump to the next leaf node.
884  *
885  * Set the position to the beginning of the next node in the
886  * memory bitmap. This is either the next node in the current
887  * zone's radix tree or the first node in the radix tree of the
888  * next zone.
889  *
890  * Return true if there is a next node, false otherwise.
891  */
892 static bool rtree_next_node(struct memory_bitmap *bm)
893 {
894 	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
895 		bm->cur.node = list_entry(bm->cur.node->list.next,
896 					  struct rtree_node, list);
897 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
898 		bm->cur.node_bit  = 0;
899 		touch_softlockup_watchdog();
900 		return true;
901 	}
902 
903 	/* No more nodes, goto next zone */
904 	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
905 		bm->cur.zone = list_entry(bm->cur.zone->list.next,
906 				  struct mem_zone_bm_rtree, list);
907 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
908 					  struct rtree_node, list);
909 		bm->cur.node_pfn = 0;
910 		bm->cur.node_bit = 0;
911 		return true;
912 	}
913 
914 	/* No more zones */
915 	return false;
916 }
917 
918 /**
919  * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
920  * @bm: Memory bitmap.
921  *
922  * Starting from the last returned position this function searches for the next
923  * set bit in @bm and returns the PFN represented by it.  If no more bits are
924  * set, BM_END_OF_MAP is returned.
925  *
926  * It is required to run memory_bm_position_reset() before the first call to
927  * this function for the given memory bitmap.
928  */
929 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
930 {
931 	unsigned long bits, pfn, pages;
932 	int bit;
933 
934 	do {
935 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
936 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
937 		bit	  = find_next_bit(bm->cur.node->data, bits,
938 					  bm->cur.node_bit);
939 		if (bit < bits) {
940 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
941 			bm->cur.node_bit = bit + 1;
942 			bm->cur.cur_pfn = pfn;
943 			return pfn;
944 		}
945 	} while (rtree_next_node(bm));
946 
947 	bm->cur.cur_pfn = BM_END_OF_MAP;
948 	return BM_END_OF_MAP;
949 }
950 
951 /*
952  * This structure represents a range of page frames the contents of which
953  * should not be saved during hibernation.
954  */
955 struct nosave_region {
956 	struct list_head list;
957 	unsigned long start_pfn;
958 	unsigned long end_pfn;
959 };
960 
961 static LIST_HEAD(nosave_regions);
962 
963 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
964 {
965 	struct rtree_node *node;
966 
967 	list_for_each_entry(node, &zone->nodes, list)
968 		recycle_safe_page(node->data);
969 
970 	list_for_each_entry(node, &zone->leaves, list)
971 		recycle_safe_page(node->data);
972 }
973 
974 static void memory_bm_recycle(struct memory_bitmap *bm)
975 {
976 	struct mem_zone_bm_rtree *zone;
977 	struct linked_page *p_list;
978 
979 	list_for_each_entry(zone, &bm->zones, list)
980 		recycle_zone_bm_rtree(zone);
981 
982 	p_list = bm->p_list;
983 	while (p_list) {
984 		struct linked_page *lp = p_list;
985 
986 		p_list = lp->next;
987 		recycle_safe_page(lp);
988 	}
989 }
990 
991 /**
992  * register_nosave_region - Register a region of unsaveable memory.
993  *
994  * Register a range of page frames the contents of which should not be saved
995  * during hibernation (to be used in the early initialization code).
996  */
997 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
998 {
999 	struct nosave_region *region;
1000 
1001 	if (start_pfn >= end_pfn)
1002 		return;
1003 
1004 	if (!list_empty(&nosave_regions)) {
1005 		/* Try to extend the previous region (they should be sorted) */
1006 		region = list_entry(nosave_regions.prev,
1007 					struct nosave_region, list);
1008 		if (region->end_pfn == start_pfn) {
1009 			region->end_pfn = end_pfn;
1010 			goto Report;
1011 		}
1012 	}
1013 	/* This allocation cannot fail */
1014 	region = memblock_alloc_or_panic(sizeof(struct nosave_region),
1015 				SMP_CACHE_BYTES);
1016 	region->start_pfn = start_pfn;
1017 	region->end_pfn = end_pfn;
1018 	list_add_tail(&region->list, &nosave_regions);
1019  Report:
1020 	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1021 		(unsigned long long) start_pfn << PAGE_SHIFT,
1022 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1023 }
1024 
1025 /*
1026  * Set bits in this map correspond to the page frames the contents of which
1027  * should not be saved during the suspend.
1028  */
1029 static struct memory_bitmap *forbidden_pages_map;
1030 
1031 /* Set bits in this map correspond to free page frames. */
1032 static struct memory_bitmap *free_pages_map;
1033 
1034 /*
1035  * Each page frame allocated for creating the image is marked by setting the
1036  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1037  */
1038 
1039 void swsusp_set_page_free(struct page *page)
1040 {
1041 	if (free_pages_map)
1042 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1043 }
1044 
1045 static int swsusp_page_is_free(struct page *page)
1046 {
1047 	return free_pages_map ?
1048 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1049 }
1050 
1051 void swsusp_unset_page_free(struct page *page)
1052 {
1053 	if (free_pages_map)
1054 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1055 }
1056 
1057 static void swsusp_set_page_forbidden(struct page *page)
1058 {
1059 	if (forbidden_pages_map)
1060 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1061 }
1062 
1063 int swsusp_page_is_forbidden(struct page *page)
1064 {
1065 	return forbidden_pages_map ?
1066 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1067 }
1068 
1069 static void swsusp_unset_page_forbidden(struct page *page)
1070 {
1071 	if (forbidden_pages_map)
1072 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1073 }
1074 
1075 /**
1076  * mark_nosave_pages - Mark pages that should not be saved.
1077  * @bm: Memory bitmap.
1078  *
1079  * Set the bits in @bm that correspond to the page frames the contents of which
1080  * should not be saved.
1081  */
1082 static void mark_nosave_pages(struct memory_bitmap *bm)
1083 {
1084 	struct nosave_region *region;
1085 
1086 	if (list_empty(&nosave_regions))
1087 		return;
1088 
1089 	list_for_each_entry(region, &nosave_regions, list) {
1090 		unsigned long pfn;
1091 
1092 		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1093 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1094 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1095 				- 1);
1096 
1097 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1098 			if (pfn_valid(pfn)) {
1099 				/*
1100 				 * It is safe to ignore the result of
1101 				 * mem_bm_set_bit_check() here, since we won't
1102 				 * touch the PFNs for which the error is
1103 				 * returned anyway.
1104 				 */
1105 				mem_bm_set_bit_check(bm, pfn);
1106 			}
1107 	}
1108 }
1109 
1110 /**
1111  * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1112  *
1113  * Create bitmaps needed for marking page frames that should not be saved and
1114  * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1115  * only modified if everything goes well, because we don't want the bits to be
1116  * touched before both bitmaps are set up.
1117  */
1118 int create_basic_memory_bitmaps(void)
1119 {
1120 	struct memory_bitmap *bm1, *bm2;
1121 	int error;
1122 
1123 	if (forbidden_pages_map && free_pages_map)
1124 		return 0;
1125 	else
1126 		BUG_ON(forbidden_pages_map || free_pages_map);
1127 
1128 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1129 	if (!bm1)
1130 		return -ENOMEM;
1131 
1132 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1133 	if (error)
1134 		goto Free_first_object;
1135 
1136 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1137 	if (!bm2)
1138 		goto Free_first_bitmap;
1139 
1140 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1141 	if (error)
1142 		goto Free_second_object;
1143 
1144 	forbidden_pages_map = bm1;
1145 	free_pages_map = bm2;
1146 	mark_nosave_pages(forbidden_pages_map);
1147 
1148 	pr_debug("Basic memory bitmaps created\n");
1149 
1150 	return 0;
1151 
1152  Free_second_object:
1153 	kfree(bm2);
1154  Free_first_bitmap:
1155 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1156  Free_first_object:
1157 	kfree(bm1);
1158 	return -ENOMEM;
1159 }
1160 
1161 /**
1162  * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1163  *
1164  * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1165  * auxiliary pointers are necessary so that the bitmaps themselves are not
1166  * referred to while they are being freed.
1167  */
1168 void free_basic_memory_bitmaps(void)
1169 {
1170 	struct memory_bitmap *bm1, *bm2;
1171 
1172 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1173 		return;
1174 
1175 	bm1 = forbidden_pages_map;
1176 	bm2 = free_pages_map;
1177 	forbidden_pages_map = NULL;
1178 	free_pages_map = NULL;
1179 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1180 	kfree(bm1);
1181 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1182 	kfree(bm2);
1183 
1184 	pr_debug("Basic memory bitmaps freed\n");
1185 }
1186 
1187 static void clear_or_poison_free_page(struct page *page)
1188 {
1189 	if (page_poisoning_enabled_static())
1190 		__kernel_poison_pages(page, 1);
1191 	else if (want_init_on_free())
1192 		clear_highpage(page);
1193 }
1194 
1195 void clear_or_poison_free_pages(void)
1196 {
1197 	struct memory_bitmap *bm = free_pages_map;
1198 	unsigned long pfn;
1199 
1200 	if (WARN_ON(!(free_pages_map)))
1201 		return;
1202 
1203 	if (page_poisoning_enabled() || want_init_on_free()) {
1204 		memory_bm_position_reset(bm);
1205 		pfn = memory_bm_next_pfn(bm);
1206 		while (pfn != BM_END_OF_MAP) {
1207 			if (pfn_valid(pfn))
1208 				clear_or_poison_free_page(pfn_to_page(pfn));
1209 
1210 			pfn = memory_bm_next_pfn(bm);
1211 		}
1212 		memory_bm_position_reset(bm);
1213 		pr_info("free pages cleared after restore\n");
1214 	}
1215 }
1216 
1217 /**
1218  * snapshot_additional_pages - Estimate the number of extra pages needed.
1219  * @zone: Memory zone to carry out the computation for.
1220  *
1221  * Estimate the number of additional pages needed for setting up a hibernation
1222  * image data structures for @zone (usually, the returned value is greater than
1223  * the exact number).
1224  */
1225 unsigned int snapshot_additional_pages(struct zone *zone)
1226 {
1227 	unsigned int rtree, nodes;
1228 
1229 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1230 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1231 			      LINKED_PAGE_DATA_SIZE);
1232 	while (nodes > 1) {
1233 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1234 		rtree += nodes;
1235 	}
1236 
1237 	return 2 * rtree;
1238 }
1239 
1240 /*
1241  * Touch the watchdog for every WD_PAGE_COUNT pages.
1242  */
1243 #define WD_PAGE_COUNT	(128*1024)
1244 
1245 static void mark_free_pages(struct zone *zone)
1246 {
1247 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
1248 	unsigned long flags;
1249 	unsigned int order, t;
1250 	struct page *page;
1251 
1252 	if (zone_is_empty(zone))
1253 		return;
1254 
1255 	spin_lock_irqsave(&zone->lock, flags);
1256 
1257 	max_zone_pfn = zone_end_pfn(zone);
1258 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1259 		if (pfn_valid(pfn)) {
1260 			page = pfn_to_page(pfn);
1261 
1262 			if (!--page_count) {
1263 				touch_nmi_watchdog();
1264 				page_count = WD_PAGE_COUNT;
1265 			}
1266 
1267 			if (page_zone(page) != zone)
1268 				continue;
1269 
1270 			if (!swsusp_page_is_forbidden(page))
1271 				swsusp_unset_page_free(page);
1272 		}
1273 
1274 	for_each_migratetype_order(order, t) {
1275 		list_for_each_entry(page,
1276 				&zone->free_area[order].free_list[t], buddy_list) {
1277 			unsigned long i;
1278 
1279 			pfn = page_to_pfn(page);
1280 			for (i = 0; i < (1UL << order); i++) {
1281 				if (!--page_count) {
1282 					touch_nmi_watchdog();
1283 					page_count = WD_PAGE_COUNT;
1284 				}
1285 				swsusp_set_page_free(pfn_to_page(pfn + i));
1286 			}
1287 		}
1288 	}
1289 	spin_unlock_irqrestore(&zone->lock, flags);
1290 }
1291 
1292 #ifdef CONFIG_HIGHMEM
1293 /**
1294  * count_free_highmem_pages - Compute the total number of free highmem pages.
1295  *
1296  * The returned number is system-wide.
1297  */
1298 static unsigned int count_free_highmem_pages(void)
1299 {
1300 	struct zone *zone;
1301 	unsigned int cnt = 0;
1302 
1303 	for_each_populated_zone(zone)
1304 		if (is_highmem(zone))
1305 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1306 
1307 	return cnt;
1308 }
1309 
1310 /**
1311  * saveable_highmem_page - Check if a highmem page is saveable.
1312  *
1313  * Determine whether a highmem page should be included in a hibernation image.
1314  *
1315  * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1316  * and it isn't part of a free chunk of pages.
1317  */
1318 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1319 {
1320 	struct page *page;
1321 
1322 	if (!pfn_valid(pfn))
1323 		return NULL;
1324 
1325 	page = pfn_to_online_page(pfn);
1326 	if (!page || page_zone(page) != zone)
1327 		return NULL;
1328 
1329 	BUG_ON(!PageHighMem(page));
1330 
1331 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1332 		return NULL;
1333 
1334 	if (PageReserved(page) || PageOffline(page))
1335 		return NULL;
1336 
1337 	if (page_is_guard(page))
1338 		return NULL;
1339 
1340 	return page;
1341 }
1342 
1343 /**
1344  * count_highmem_pages - Compute the total number of saveable highmem pages.
1345  */
1346 static unsigned int count_highmem_pages(void)
1347 {
1348 	struct zone *zone;
1349 	unsigned int n = 0;
1350 
1351 	for_each_populated_zone(zone) {
1352 		unsigned long pfn, max_zone_pfn;
1353 
1354 		if (!is_highmem(zone))
1355 			continue;
1356 
1357 		mark_free_pages(zone);
1358 		max_zone_pfn = zone_end_pfn(zone);
1359 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1360 			if (saveable_highmem_page(zone, pfn))
1361 				n++;
1362 	}
1363 	return n;
1364 }
1365 #endif /* CONFIG_HIGHMEM */
1366 
1367 /**
1368  * saveable_page - Check if the given page is saveable.
1369  *
1370  * Determine whether a non-highmem page should be included in a hibernation
1371  * image.
1372  *
1373  * We should save the page if it isn't Nosave, and is not in the range
1374  * of pages statically defined as 'unsaveable', and it isn't part of
1375  * a free chunk of pages.
1376  */
1377 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1378 {
1379 	struct page *page;
1380 
1381 	if (!pfn_valid(pfn))
1382 		return NULL;
1383 
1384 	page = pfn_to_online_page(pfn);
1385 	if (!page || page_zone(page) != zone)
1386 		return NULL;
1387 
1388 	BUG_ON(PageHighMem(page));
1389 
1390 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1391 		return NULL;
1392 
1393 	if (PageOffline(page))
1394 		return NULL;
1395 
1396 	if (PageReserved(page)
1397 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1398 		return NULL;
1399 
1400 	if (page_is_guard(page))
1401 		return NULL;
1402 
1403 	return page;
1404 }
1405 
1406 /**
1407  * count_data_pages - Compute the total number of saveable non-highmem pages.
1408  */
1409 static unsigned int count_data_pages(void)
1410 {
1411 	struct zone *zone;
1412 	unsigned long pfn, max_zone_pfn;
1413 	unsigned int n = 0;
1414 
1415 	for_each_populated_zone(zone) {
1416 		if (is_highmem(zone))
1417 			continue;
1418 
1419 		mark_free_pages(zone);
1420 		max_zone_pfn = zone_end_pfn(zone);
1421 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1422 			if (saveable_page(zone, pfn))
1423 				n++;
1424 	}
1425 	return n;
1426 }
1427 
1428 /*
1429  * This is needed, because copy_page and memcpy are not usable for copying
1430  * task structs. Returns true if the page was filled with only zeros,
1431  * otherwise false.
1432  */
1433 static inline bool do_copy_page(long *dst, long *src)
1434 {
1435 	long z = 0;
1436 	int n;
1437 
1438 	for (n = PAGE_SIZE / sizeof(long); n; n--) {
1439 		z |= *src;
1440 		*dst++ = *src++;
1441 	}
1442 	return !z;
1443 }
1444 
1445 /**
1446  * safe_copy_page - Copy a page in a safe way.
1447  *
1448  * Check if the page we are going to copy is marked as present in the kernel
1449  * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1450  * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1451  * always returns 'true'. Returns true if the page was entirely composed of
1452  * zeros, otherwise it will return false.
1453  */
1454 static bool safe_copy_page(void *dst, struct page *s_page)
1455 {
1456 	bool zeros_only;
1457 
1458 	if (kernel_page_present(s_page)) {
1459 		zeros_only = do_copy_page(dst, page_address(s_page));
1460 	} else {
1461 		hibernate_map_page(s_page);
1462 		zeros_only = do_copy_page(dst, page_address(s_page));
1463 		hibernate_unmap_page(s_page);
1464 	}
1465 	return zeros_only;
1466 }
1467 
1468 #ifdef CONFIG_HIGHMEM
1469 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1470 {
1471 	return is_highmem(zone) ?
1472 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1473 }
1474 
1475 static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1476 {
1477 	struct page *s_page, *d_page;
1478 	void *src, *dst;
1479 	bool zeros_only;
1480 
1481 	s_page = pfn_to_page(src_pfn);
1482 	d_page = pfn_to_page(dst_pfn);
1483 	if (PageHighMem(s_page)) {
1484 		src = kmap_local_page(s_page);
1485 		dst = kmap_local_page(d_page);
1486 		zeros_only = do_copy_page(dst, src);
1487 		kunmap_local(dst);
1488 		kunmap_local(src);
1489 	} else {
1490 		if (PageHighMem(d_page)) {
1491 			/*
1492 			 * The page pointed to by src may contain some kernel
1493 			 * data modified by kmap_atomic()
1494 			 */
1495 			zeros_only = safe_copy_page(buffer, s_page);
1496 			dst = kmap_local_page(d_page);
1497 			copy_page(dst, buffer);
1498 			kunmap_local(dst);
1499 		} else {
1500 			zeros_only = safe_copy_page(page_address(d_page), s_page);
1501 		}
1502 	}
1503 	return zeros_only;
1504 }
1505 #else
1506 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1507 
1508 static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1509 {
1510 	return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1511 				pfn_to_page(src_pfn));
1512 }
1513 #endif /* CONFIG_HIGHMEM */
1514 
1515 /*
1516  * Copy data pages will copy all pages into pages pulled from the copy_bm.
1517  * If a page was entirely filled with zeros it will be marked in the zero_bm.
1518  *
1519  * Returns the number of pages copied.
1520  */
1521 static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1522 			    struct memory_bitmap *orig_bm,
1523 			    struct memory_bitmap *zero_bm)
1524 {
1525 	unsigned long copied_pages = 0;
1526 	struct zone *zone;
1527 	unsigned long pfn, copy_pfn;
1528 
1529 	for_each_populated_zone(zone) {
1530 		unsigned long max_zone_pfn;
1531 
1532 		mark_free_pages(zone);
1533 		max_zone_pfn = zone_end_pfn(zone);
1534 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1535 			if (page_is_saveable(zone, pfn))
1536 				memory_bm_set_bit(orig_bm, pfn);
1537 	}
1538 	memory_bm_position_reset(orig_bm);
1539 	memory_bm_position_reset(copy_bm);
1540 	copy_pfn = memory_bm_next_pfn(copy_bm);
1541 	for(;;) {
1542 		pfn = memory_bm_next_pfn(orig_bm);
1543 		if (unlikely(pfn == BM_END_OF_MAP))
1544 			break;
1545 		if (copy_data_page(copy_pfn, pfn)) {
1546 			memory_bm_set_bit(zero_bm, pfn);
1547 			/* Use this copy_pfn for a page that is not full of zeros */
1548 			continue;
1549 		}
1550 		copied_pages++;
1551 		copy_pfn = memory_bm_next_pfn(copy_bm);
1552 	}
1553 	return copied_pages;
1554 }
1555 
1556 /* Total number of image pages */
1557 static unsigned int nr_copy_pages;
1558 /* Number of pages needed for saving the original pfns of the image pages */
1559 static unsigned int nr_meta_pages;
1560 /* Number of zero pages */
1561 static unsigned int nr_zero_pages;
1562 
1563 /*
1564  * Numbers of normal and highmem page frames allocated for hibernation image
1565  * before suspending devices.
1566  */
1567 static unsigned int alloc_normal, alloc_highmem;
1568 /*
1569  * Memory bitmap used for marking saveable pages (during hibernation) or
1570  * hibernation image pages (during restore)
1571  */
1572 static struct memory_bitmap orig_bm;
1573 /*
1574  * Memory bitmap used during hibernation for marking allocated page frames that
1575  * will contain copies of saveable pages.  During restore it is initially used
1576  * for marking hibernation image pages, but then the set bits from it are
1577  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1578  * used for marking "safe" highmem pages, but it has to be reinitialized for
1579  * this purpose.
1580  */
1581 static struct memory_bitmap copy_bm;
1582 
1583 /* Memory bitmap which tracks which saveable pages were zero filled. */
1584 static struct memory_bitmap zero_bm;
1585 
1586 /**
1587  * swsusp_free - Free pages allocated for hibernation image.
1588  *
1589  * Image pages are allocated before snapshot creation, so they need to be
1590  * released after resume.
1591  */
1592 void swsusp_free(void)
1593 {
1594 	unsigned long fb_pfn, fr_pfn;
1595 
1596 	if (!forbidden_pages_map || !free_pages_map)
1597 		goto out;
1598 
1599 	memory_bm_position_reset(forbidden_pages_map);
1600 	memory_bm_position_reset(free_pages_map);
1601 
1602 loop:
1603 	fr_pfn = memory_bm_next_pfn(free_pages_map);
1604 	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1605 
1606 	/*
1607 	 * Find the next bit set in both bitmaps. This is guaranteed to
1608 	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1609 	 */
1610 	do {
1611 		if (fb_pfn < fr_pfn)
1612 			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1613 		if (fr_pfn < fb_pfn)
1614 			fr_pfn = memory_bm_next_pfn(free_pages_map);
1615 	} while (fb_pfn != fr_pfn);
1616 
1617 	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1618 		struct page *page = pfn_to_page(fr_pfn);
1619 
1620 		memory_bm_clear_current(forbidden_pages_map);
1621 		memory_bm_clear_current(free_pages_map);
1622 		hibernate_restore_unprotect_page(page_address(page));
1623 		__free_page(page);
1624 		goto loop;
1625 	}
1626 
1627 out:
1628 	nr_copy_pages = 0;
1629 	nr_meta_pages = 0;
1630 	nr_zero_pages = 0;
1631 	restore_pblist = NULL;
1632 	buffer = NULL;
1633 	alloc_normal = 0;
1634 	alloc_highmem = 0;
1635 	hibernate_restore_protection_end();
1636 }
1637 
1638 /* Helper functions used for the shrinking of memory. */
1639 
1640 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1641 
1642 /**
1643  * preallocate_image_pages - Allocate a number of pages for hibernation image.
1644  * @nr_pages: Number of page frames to allocate.
1645  * @mask: GFP flags to use for the allocation.
1646  *
1647  * Return value: Number of page frames actually allocated
1648  */
1649 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1650 {
1651 	unsigned long nr_alloc = 0;
1652 
1653 	while (nr_pages > 0) {
1654 		struct page *page;
1655 
1656 		page = alloc_image_page(mask);
1657 		if (!page)
1658 			break;
1659 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1660 		if (PageHighMem(page))
1661 			alloc_highmem++;
1662 		else
1663 			alloc_normal++;
1664 		nr_pages--;
1665 		nr_alloc++;
1666 	}
1667 
1668 	return nr_alloc;
1669 }
1670 
1671 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1672 					      unsigned long avail_normal)
1673 {
1674 	unsigned long alloc;
1675 
1676 	if (avail_normal <= alloc_normal)
1677 		return 0;
1678 
1679 	alloc = avail_normal - alloc_normal;
1680 	if (nr_pages < alloc)
1681 		alloc = nr_pages;
1682 
1683 	return preallocate_image_pages(alloc, GFP_IMAGE);
1684 }
1685 
1686 #ifdef CONFIG_HIGHMEM
1687 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1688 {
1689 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1690 }
1691 
1692 /**
1693  *  __fraction - Compute (an approximation of) x * (multiplier / base).
1694  */
1695 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1696 {
1697 	return div64_u64(x * multiplier, base);
1698 }
1699 
1700 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1701 						  unsigned long highmem,
1702 						  unsigned long total)
1703 {
1704 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1705 
1706 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1707 }
1708 #else /* CONFIG_HIGHMEM */
1709 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1710 {
1711 	return 0;
1712 }
1713 
1714 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1715 							 unsigned long highmem,
1716 							 unsigned long total)
1717 {
1718 	return 0;
1719 }
1720 #endif /* CONFIG_HIGHMEM */
1721 
1722 /**
1723  * free_unnecessary_pages - Release preallocated pages not needed for the image.
1724  */
1725 static unsigned long free_unnecessary_pages(void)
1726 {
1727 	unsigned long save, to_free_normal, to_free_highmem, free;
1728 
1729 	save = count_data_pages();
1730 	if (alloc_normal >= save) {
1731 		to_free_normal = alloc_normal - save;
1732 		save = 0;
1733 	} else {
1734 		to_free_normal = 0;
1735 		save -= alloc_normal;
1736 	}
1737 	save += count_highmem_pages();
1738 	if (alloc_highmem >= save) {
1739 		to_free_highmem = alloc_highmem - save;
1740 	} else {
1741 		to_free_highmem = 0;
1742 		save -= alloc_highmem;
1743 		if (to_free_normal > save)
1744 			to_free_normal -= save;
1745 		else
1746 			to_free_normal = 0;
1747 	}
1748 	free = to_free_normal + to_free_highmem;
1749 
1750 	memory_bm_position_reset(&copy_bm);
1751 
1752 	while (to_free_normal > 0 || to_free_highmem > 0) {
1753 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1754 		struct page *page = pfn_to_page(pfn);
1755 
1756 		if (PageHighMem(page)) {
1757 			if (!to_free_highmem)
1758 				continue;
1759 			to_free_highmem--;
1760 			alloc_highmem--;
1761 		} else {
1762 			if (!to_free_normal)
1763 				continue;
1764 			to_free_normal--;
1765 			alloc_normal--;
1766 		}
1767 		memory_bm_clear_bit(&copy_bm, pfn);
1768 		swsusp_unset_page_forbidden(page);
1769 		swsusp_unset_page_free(page);
1770 		__free_page(page);
1771 	}
1772 
1773 	return free;
1774 }
1775 
1776 /**
1777  * minimum_image_size - Estimate the minimum acceptable size of an image.
1778  * @saveable: Number of saveable pages in the system.
1779  *
1780  * We want to avoid attempting to free too much memory too hard, so estimate the
1781  * minimum acceptable size of a hibernation image to use as the lower limit for
1782  * preallocating memory.
1783  *
1784  * We assume that the minimum image size should be proportional to
1785  *
1786  * [number of saveable pages] - [number of pages that can be freed in theory]
1787  *
1788  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1789  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1790  */
1791 static unsigned long minimum_image_size(unsigned long saveable)
1792 {
1793 	unsigned long size;
1794 
1795 	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1796 		+ global_node_page_state(NR_ACTIVE_ANON)
1797 		+ global_node_page_state(NR_INACTIVE_ANON)
1798 		+ global_node_page_state(NR_ACTIVE_FILE)
1799 		+ global_node_page_state(NR_INACTIVE_FILE);
1800 
1801 	return saveable <= size ? 0 : saveable - size;
1802 }
1803 
1804 /**
1805  * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1806  *
1807  * To create a hibernation image it is necessary to make a copy of every page
1808  * frame in use.  We also need a number of page frames to be free during
1809  * hibernation for allocations made while saving the image and for device
1810  * drivers, in case they need to allocate memory from their hibernation
1811  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1812  * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1813  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1814  * total number of available page frames and allocate at least
1815  *
1816  * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1817  *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1818  *
1819  * of them, which corresponds to the maximum size of a hibernation image.
1820  *
1821  * If image_size is set below the number following from the above formula,
1822  * the preallocation of memory is continued until the total number of saveable
1823  * pages in the system is below the requested image size or the minimum
1824  * acceptable image size returned by minimum_image_size(), whichever is greater.
1825  */
1826 int hibernate_preallocate_memory(void)
1827 {
1828 	struct zone *zone;
1829 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1830 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1831 	ktime_t start, stop;
1832 	int error;
1833 
1834 	pr_info("Preallocating image memory\n");
1835 	start = ktime_get();
1836 
1837 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1838 	if (error) {
1839 		pr_err("Cannot allocate original bitmap\n");
1840 		goto err_out;
1841 	}
1842 
1843 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1844 	if (error) {
1845 		pr_err("Cannot allocate copy bitmap\n");
1846 		goto err_out;
1847 	}
1848 
1849 	error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1850 	if (error) {
1851 		pr_err("Cannot allocate zero bitmap\n");
1852 		goto err_out;
1853 	}
1854 
1855 	alloc_normal = 0;
1856 	alloc_highmem = 0;
1857 	nr_zero_pages = 0;
1858 
1859 	/* Count the number of saveable data pages. */
1860 	save_highmem = count_highmem_pages();
1861 	saveable = count_data_pages();
1862 
1863 	/*
1864 	 * Compute the total number of page frames we can use (count) and the
1865 	 * number of pages needed for image metadata (size).
1866 	 */
1867 	count = saveable;
1868 	saveable += save_highmem;
1869 	highmem = save_highmem;
1870 	size = 0;
1871 	for_each_populated_zone(zone) {
1872 		size += snapshot_additional_pages(zone);
1873 		if (is_highmem(zone))
1874 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1875 		else
1876 			count += zone_page_state(zone, NR_FREE_PAGES);
1877 	}
1878 	avail_normal = count;
1879 	count += highmem;
1880 	count -= totalreserve_pages;
1881 
1882 	/* Compute the maximum number of saveable pages to leave in memory. */
1883 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1884 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1885 	/* Compute the desired number of image pages specified by image_size. */
1886 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1887 	if (size > max_size)
1888 		size = max_size;
1889 	/*
1890 	 * If the desired number of image pages is at least as large as the
1891 	 * current number of saveable pages in memory, allocate page frames for
1892 	 * the image and we're done.
1893 	 */
1894 	if (size >= saveable) {
1895 		pages = preallocate_image_highmem(save_highmem);
1896 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1897 		goto out;
1898 	}
1899 
1900 	/* Estimate the minimum size of the image. */
1901 	pages = minimum_image_size(saveable);
1902 	/*
1903 	 * To avoid excessive pressure on the normal zone, leave room in it to
1904 	 * accommodate an image of the minimum size (unless it's already too
1905 	 * small, in which case don't preallocate pages from it at all).
1906 	 */
1907 	if (avail_normal > pages)
1908 		avail_normal -= pages;
1909 	else
1910 		avail_normal = 0;
1911 	if (size < pages)
1912 		size = min_t(unsigned long, pages, max_size);
1913 
1914 	/*
1915 	 * Let the memory management subsystem know that we're going to need a
1916 	 * large number of page frames to allocate and make it free some memory.
1917 	 * NOTE: If this is not done, performance will be hurt badly in some
1918 	 * test cases.
1919 	 */
1920 	shrink_all_memory(saveable - size);
1921 
1922 	/*
1923 	 * The number of saveable pages in memory was too high, so apply some
1924 	 * pressure to decrease it.  First, make room for the largest possible
1925 	 * image and fail if that doesn't work.  Next, try to decrease the size
1926 	 * of the image as much as indicated by 'size' using allocations from
1927 	 * highmem and non-highmem zones separately.
1928 	 */
1929 	pages_highmem = preallocate_image_highmem(highmem / 2);
1930 	alloc = count - max_size;
1931 	if (alloc > pages_highmem)
1932 		alloc -= pages_highmem;
1933 	else
1934 		alloc = 0;
1935 	pages = preallocate_image_memory(alloc, avail_normal);
1936 	if (pages < alloc) {
1937 		/* We have exhausted non-highmem pages, try highmem. */
1938 		alloc -= pages;
1939 		pages += pages_highmem;
1940 		pages_highmem = preallocate_image_highmem(alloc);
1941 		if (pages_highmem < alloc) {
1942 			pr_err("Image allocation is %lu pages short\n",
1943 				alloc - pages_highmem);
1944 			goto err_out;
1945 		}
1946 		pages += pages_highmem;
1947 		/*
1948 		 * size is the desired number of saveable pages to leave in
1949 		 * memory, so try to preallocate (all memory - size) pages.
1950 		 */
1951 		alloc = (count - pages) - size;
1952 		pages += preallocate_image_highmem(alloc);
1953 	} else {
1954 		/*
1955 		 * There are approximately max_size saveable pages at this point
1956 		 * and we want to reduce this number down to size.
1957 		 */
1958 		alloc = max_size - size;
1959 		size = preallocate_highmem_fraction(alloc, highmem, count);
1960 		pages_highmem += size;
1961 		alloc -= size;
1962 		size = preallocate_image_memory(alloc, avail_normal);
1963 		pages_highmem += preallocate_image_highmem(alloc - size);
1964 		pages += pages_highmem + size;
1965 	}
1966 
1967 	/*
1968 	 * We only need as many page frames for the image as there are saveable
1969 	 * pages in memory, but we have allocated more.  Release the excessive
1970 	 * ones now.
1971 	 */
1972 	pages -= free_unnecessary_pages();
1973 
1974  out:
1975 	stop = ktime_get();
1976 	pr_info("Allocated %lu pages for snapshot\n", pages);
1977 	swsusp_show_speed(start, stop, pages, "Allocated");
1978 
1979 	return 0;
1980 
1981  err_out:
1982 	swsusp_free();
1983 	return -ENOMEM;
1984 }
1985 
1986 #ifdef CONFIG_HIGHMEM
1987 /**
1988  * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1989  *
1990  * Compute the number of non-highmem pages that will be necessary for creating
1991  * copies of highmem pages.
1992  */
1993 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1994 {
1995 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1996 
1997 	if (free_highmem >= nr_highmem)
1998 		nr_highmem = 0;
1999 	else
2000 		nr_highmem -= free_highmem;
2001 
2002 	return nr_highmem;
2003 }
2004 #else
2005 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
2006 #endif /* CONFIG_HIGHMEM */
2007 
2008 /**
2009  * enough_free_mem - Check if there is enough free memory for the image.
2010  */
2011 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
2012 {
2013 	struct zone *zone;
2014 	unsigned int free = alloc_normal;
2015 
2016 	for_each_populated_zone(zone)
2017 		if (!is_highmem(zone))
2018 			free += zone_page_state(zone, NR_FREE_PAGES);
2019 
2020 	nr_pages += count_pages_for_highmem(nr_highmem);
2021 	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
2022 		 nr_pages, PAGES_FOR_IO, free);
2023 
2024 	return free > nr_pages + PAGES_FOR_IO;
2025 }
2026 
2027 #ifdef CONFIG_HIGHMEM
2028 /**
2029  * get_highmem_buffer - Allocate a buffer for highmem pages.
2030  *
2031  * If there are some highmem pages in the hibernation image, we may need a
2032  * buffer to copy them and/or load their data.
2033  */
2034 static inline int get_highmem_buffer(int safe_needed)
2035 {
2036 	buffer = get_image_page(GFP_ATOMIC, safe_needed);
2037 	return buffer ? 0 : -ENOMEM;
2038 }
2039 
2040 /**
2041  * alloc_highmem_pages - Allocate some highmem pages for the image.
2042  *
2043  * Try to allocate as many pages as needed, but if the number of free highmem
2044  * pages is less than that, allocate them all.
2045  */
2046 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2047 					       unsigned int nr_highmem)
2048 {
2049 	unsigned int to_alloc = count_free_highmem_pages();
2050 
2051 	if (to_alloc > nr_highmem)
2052 		to_alloc = nr_highmem;
2053 
2054 	nr_highmem -= to_alloc;
2055 	while (to_alloc-- > 0) {
2056 		struct page *page;
2057 
2058 		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2059 		memory_bm_set_bit(bm, page_to_pfn(page));
2060 	}
2061 	return nr_highmem;
2062 }
2063 #else
2064 static inline int get_highmem_buffer(int safe_needed) { return 0; }
2065 
2066 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2067 					       unsigned int n) { return 0; }
2068 #endif /* CONFIG_HIGHMEM */
2069 
2070 /**
2071  * swsusp_alloc - Allocate memory for hibernation image.
2072  *
2073  * We first try to allocate as many highmem pages as there are
2074  * saveable highmem pages in the system.  If that fails, we allocate
2075  * non-highmem pages for the copies of the remaining highmem ones.
2076  *
2077  * In this approach it is likely that the copies of highmem pages will
2078  * also be located in the high memory, because of the way in which
2079  * copy_data_pages() works.
2080  */
2081 static int swsusp_alloc(struct memory_bitmap *copy_bm,
2082 			unsigned int nr_pages, unsigned int nr_highmem)
2083 {
2084 	if (nr_highmem > 0) {
2085 		if (get_highmem_buffer(PG_ANY))
2086 			goto err_out;
2087 		if (nr_highmem > alloc_highmem) {
2088 			nr_highmem -= alloc_highmem;
2089 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2090 		}
2091 	}
2092 	if (nr_pages > alloc_normal) {
2093 		nr_pages -= alloc_normal;
2094 		while (nr_pages-- > 0) {
2095 			struct page *page;
2096 
2097 			page = alloc_image_page(GFP_ATOMIC);
2098 			if (!page)
2099 				goto err_out;
2100 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
2101 		}
2102 	}
2103 
2104 	return 0;
2105 
2106  err_out:
2107 	swsusp_free();
2108 	return -ENOMEM;
2109 }
2110 
2111 asmlinkage __visible int swsusp_save(void)
2112 {
2113 	unsigned int nr_pages, nr_highmem;
2114 
2115 	pr_info("Creating image:\n");
2116 
2117 	drain_local_pages(NULL);
2118 	nr_pages = count_data_pages();
2119 	nr_highmem = count_highmem_pages();
2120 	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2121 
2122 	if (!enough_free_mem(nr_pages, nr_highmem)) {
2123 		pr_err("Not enough free memory\n");
2124 		return -ENOMEM;
2125 	}
2126 
2127 	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
2128 		pr_err("Memory allocation failed\n");
2129 		return -ENOMEM;
2130 	}
2131 
2132 	/*
2133 	 * During allocating of suspend pagedir, new cold pages may appear.
2134 	 * Kill them.
2135 	 */
2136 	drain_local_pages(NULL);
2137 	nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
2138 
2139 	/*
2140 	 * End of critical section. From now on, we can write to memory,
2141 	 * but we should not touch disk. This specially means we must _not_
2142 	 * touch swap space! Except we must write out our image of course.
2143 	 */
2144 	nr_pages += nr_highmem;
2145 	/* We don't actually copy the zero pages */
2146 	nr_zero_pages = nr_pages - nr_copy_pages;
2147 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2148 
2149 	pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2150 
2151 	return 0;
2152 }
2153 
2154 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2155 static int init_header_complete(struct swsusp_info *info)
2156 {
2157 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2158 	info->version_code = LINUX_VERSION_CODE;
2159 	return 0;
2160 }
2161 
2162 static const char *check_image_kernel(struct swsusp_info *info)
2163 {
2164 	if (info->version_code != LINUX_VERSION_CODE)
2165 		return "kernel version";
2166 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2167 		return "system type";
2168 	if (strcmp(info->uts.release,init_utsname()->release))
2169 		return "kernel release";
2170 	if (strcmp(info->uts.version,init_utsname()->version))
2171 		return "version";
2172 	if (strcmp(info->uts.machine,init_utsname()->machine))
2173 		return "machine";
2174 	return NULL;
2175 }
2176 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2177 
2178 unsigned long snapshot_get_image_size(void)
2179 {
2180 	return nr_copy_pages + nr_meta_pages + 1;
2181 }
2182 
2183 static int init_header(struct swsusp_info *info)
2184 {
2185 	memset(info, 0, sizeof(struct swsusp_info));
2186 	info->num_physpages = get_num_physpages();
2187 	info->image_pages = nr_copy_pages;
2188 	info->pages = snapshot_get_image_size();
2189 	info->size = info->pages;
2190 	info->size <<= PAGE_SHIFT;
2191 	return init_header_complete(info);
2192 }
2193 
2194 #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2195 #define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2196 
2197 /**
2198  * pack_pfns - Prepare PFNs for saving.
2199  * @bm: Memory bitmap.
2200  * @buf: Memory buffer to store the PFNs in.
2201  * @zero_bm: Memory bitmap containing PFNs of zero pages.
2202  *
2203  * PFNs corresponding to set bits in @bm are stored in the area of memory
2204  * pointed to by @buf (1 page at a time). Pages which were filled with only
2205  * zeros will have the highest bit set in the packed format to distinguish
2206  * them from PFNs which will be contained in the image file.
2207  */
2208 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2209 		struct memory_bitmap *zero_bm)
2210 {
2211 	int j;
2212 
2213 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2214 		buf[j] = memory_bm_next_pfn(bm);
2215 		if (unlikely(buf[j] == BM_END_OF_MAP))
2216 			break;
2217 		if (memory_bm_test_bit(zero_bm, buf[j]))
2218 			buf[j] |= ENCODED_PFN_ZERO_FLAG;
2219 	}
2220 }
2221 
2222 /**
2223  * snapshot_read_next - Get the address to read the next image page from.
2224  * @handle: Snapshot handle to be used for the reading.
2225  *
2226  * On the first call, @handle should point to a zeroed snapshot_handle
2227  * structure.  The structure gets populated then and a pointer to it should be
2228  * passed to this function every next time.
2229  *
2230  * On success, the function returns a positive number.  Then, the caller
2231  * is allowed to read up to the returned number of bytes from the memory
2232  * location computed by the data_of() macro.
2233  *
2234  * The function returns 0 to indicate the end of the data stream condition,
2235  * and negative numbers are returned on errors.  If that happens, the structure
2236  * pointed to by @handle is not updated and should not be used any more.
2237  */
2238 int snapshot_read_next(struct snapshot_handle *handle)
2239 {
2240 	if (handle->cur > nr_meta_pages + nr_copy_pages)
2241 		return 0;
2242 
2243 	if (!buffer) {
2244 		/* This makes the buffer be freed by swsusp_free() */
2245 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2246 		if (!buffer)
2247 			return -ENOMEM;
2248 	}
2249 	if (!handle->cur) {
2250 		int error;
2251 
2252 		error = init_header((struct swsusp_info *)buffer);
2253 		if (error)
2254 			return error;
2255 		handle->buffer = buffer;
2256 		memory_bm_position_reset(&orig_bm);
2257 		memory_bm_position_reset(&copy_bm);
2258 	} else if (handle->cur <= nr_meta_pages) {
2259 		clear_page(buffer);
2260 		pack_pfns(buffer, &orig_bm, &zero_bm);
2261 	} else {
2262 		struct page *page;
2263 
2264 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2265 		if (PageHighMem(page)) {
2266 			/*
2267 			 * Highmem pages are copied to the buffer,
2268 			 * because we can't return with a kmapped
2269 			 * highmem page (we may not be called again).
2270 			 */
2271 			void *kaddr;
2272 
2273 			kaddr = kmap_atomic(page);
2274 			copy_page(buffer, kaddr);
2275 			kunmap_atomic(kaddr);
2276 			handle->buffer = buffer;
2277 		} else {
2278 			handle->buffer = page_address(page);
2279 		}
2280 	}
2281 	handle->cur++;
2282 	return PAGE_SIZE;
2283 }
2284 
2285 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2286 				    struct memory_bitmap *src)
2287 {
2288 	unsigned long pfn;
2289 
2290 	memory_bm_position_reset(src);
2291 	pfn = memory_bm_next_pfn(src);
2292 	while (pfn != BM_END_OF_MAP) {
2293 		memory_bm_set_bit(dst, pfn);
2294 		pfn = memory_bm_next_pfn(src);
2295 	}
2296 }
2297 
2298 /**
2299  * mark_unsafe_pages - Mark pages that were used before hibernation.
2300  *
2301  * Mark the pages that cannot be used for storing the image during restoration,
2302  * because they conflict with the pages that had been used before hibernation.
2303  */
2304 static void mark_unsafe_pages(struct memory_bitmap *bm)
2305 {
2306 	unsigned long pfn;
2307 
2308 	/* Clear the "free"/"unsafe" bit for all PFNs */
2309 	memory_bm_position_reset(free_pages_map);
2310 	pfn = memory_bm_next_pfn(free_pages_map);
2311 	while (pfn != BM_END_OF_MAP) {
2312 		memory_bm_clear_current(free_pages_map);
2313 		pfn = memory_bm_next_pfn(free_pages_map);
2314 	}
2315 
2316 	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2317 	duplicate_memory_bitmap(free_pages_map, bm);
2318 
2319 	allocated_unsafe_pages = 0;
2320 }
2321 
2322 static int check_header(struct swsusp_info *info)
2323 {
2324 	const char *reason;
2325 
2326 	reason = check_image_kernel(info);
2327 	if (!reason && info->num_physpages != get_num_physpages())
2328 		reason = "memory size";
2329 	if (reason) {
2330 		pr_err("Image mismatch: %s\n", reason);
2331 		return -EPERM;
2332 	}
2333 	return 0;
2334 }
2335 
2336 /**
2337  * load_header - Check the image header and copy the data from it.
2338  */
2339 static int load_header(struct swsusp_info *info)
2340 {
2341 	int error;
2342 
2343 	restore_pblist = NULL;
2344 	error = check_header(info);
2345 	if (!error) {
2346 		nr_copy_pages = info->image_pages;
2347 		nr_meta_pages = info->pages - info->image_pages - 1;
2348 	}
2349 	return error;
2350 }
2351 
2352 /**
2353  * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2354  * @bm: Memory bitmap.
2355  * @buf: Area of memory containing the PFNs.
2356  * @zero_bm: Memory bitmap with the zero PFNs marked.
2357  *
2358  * For each element of the array pointed to by @buf (1 page at a time), set the
2359  * corresponding bit in @bm. If the page was originally populated with only
2360  * zeros then a corresponding bit will also be set in @zero_bm.
2361  */
2362 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2363 		struct memory_bitmap *zero_bm)
2364 {
2365 	unsigned long decoded_pfn;
2366         bool zero;
2367 	int j;
2368 
2369 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2370 		if (unlikely(buf[j] == BM_END_OF_MAP))
2371 			break;
2372 
2373 		zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2374 		decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2375 		if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2376 			memory_bm_set_bit(bm, decoded_pfn);
2377 			if (zero) {
2378 				memory_bm_set_bit(zero_bm, decoded_pfn);
2379 				nr_zero_pages++;
2380 			}
2381 		} else {
2382 			if (!pfn_valid(decoded_pfn))
2383 				pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2384 				       (unsigned long long)PFN_PHYS(decoded_pfn));
2385 			return -EFAULT;
2386 		}
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 #ifdef CONFIG_HIGHMEM
2393 /*
2394  * struct highmem_pbe is used for creating the list of highmem pages that
2395  * should be restored atomically during the resume from disk, because the page
2396  * frames they have occupied before the suspend are in use.
2397  */
2398 struct highmem_pbe {
2399 	struct page *copy_page;	/* data is here now */
2400 	struct page *orig_page;	/* data was here before the suspend */
2401 	struct highmem_pbe *next;
2402 };
2403 
2404 /*
2405  * List of highmem PBEs needed for restoring the highmem pages that were
2406  * allocated before the suspend and included in the suspend image, but have
2407  * also been allocated by the "resume" kernel, so their contents cannot be
2408  * written directly to their "original" page frames.
2409  */
2410 static struct highmem_pbe *highmem_pblist;
2411 
2412 /**
2413  * count_highmem_image_pages - Compute the number of highmem pages in the image.
2414  * @bm: Memory bitmap.
2415  *
2416  * The bits in @bm that correspond to image pages are assumed to be set.
2417  */
2418 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2419 {
2420 	unsigned long pfn;
2421 	unsigned int cnt = 0;
2422 
2423 	memory_bm_position_reset(bm);
2424 	pfn = memory_bm_next_pfn(bm);
2425 	while (pfn != BM_END_OF_MAP) {
2426 		if (PageHighMem(pfn_to_page(pfn)))
2427 			cnt++;
2428 
2429 		pfn = memory_bm_next_pfn(bm);
2430 	}
2431 	return cnt;
2432 }
2433 
2434 static unsigned int safe_highmem_pages;
2435 
2436 static struct memory_bitmap *safe_highmem_bm;
2437 
2438 /**
2439  * prepare_highmem_image - Allocate memory for loading highmem data from image.
2440  * @bm: Pointer to an uninitialized memory bitmap structure.
2441  * @nr_highmem_p: Pointer to the number of highmem image pages.
2442  *
2443  * Try to allocate as many highmem pages as there are highmem image pages
2444  * (@nr_highmem_p points to the variable containing the number of highmem image
2445  * pages).  The pages that are "safe" (ie. will not be overwritten when the
2446  * hibernation image is restored entirely) have the corresponding bits set in
2447  * @bm (it must be uninitialized).
2448  *
2449  * NOTE: This function should not be called if there are no highmem image pages.
2450  */
2451 static int prepare_highmem_image(struct memory_bitmap *bm,
2452 				 unsigned int *nr_highmem_p)
2453 {
2454 	unsigned int to_alloc;
2455 
2456 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2457 		return -ENOMEM;
2458 
2459 	if (get_highmem_buffer(PG_SAFE))
2460 		return -ENOMEM;
2461 
2462 	to_alloc = count_free_highmem_pages();
2463 	if (to_alloc > *nr_highmem_p)
2464 		to_alloc = *nr_highmem_p;
2465 	else
2466 		*nr_highmem_p = to_alloc;
2467 
2468 	safe_highmem_pages = 0;
2469 	while (to_alloc-- > 0) {
2470 		struct page *page;
2471 
2472 		page = alloc_page(__GFP_HIGHMEM);
2473 		if (!swsusp_page_is_free(page)) {
2474 			/* The page is "safe", set its bit the bitmap */
2475 			memory_bm_set_bit(bm, page_to_pfn(page));
2476 			safe_highmem_pages++;
2477 		}
2478 		/* Mark the page as allocated */
2479 		swsusp_set_page_forbidden(page);
2480 		swsusp_set_page_free(page);
2481 	}
2482 	memory_bm_position_reset(bm);
2483 	safe_highmem_bm = bm;
2484 	return 0;
2485 }
2486 
2487 static struct page *last_highmem_page;
2488 
2489 /**
2490  * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2491  *
2492  * For a given highmem image page get a buffer that suspend_write_next() should
2493  * return to its caller to write to.
2494  *
2495  * If the page is to be saved to its "original" page frame or a copy of
2496  * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2497  * the copy of the page is to be made in normal memory, so the address of
2498  * the copy is returned.
2499  *
2500  * If @buffer is returned, the caller of suspend_write_next() will write
2501  * the page's contents to @buffer, so they will have to be copied to the
2502  * right location on the next call to suspend_write_next() and it is done
2503  * with the help of copy_last_highmem_page().  For this purpose, if
2504  * @buffer is returned, @last_highmem_page is set to the page to which
2505  * the data will have to be copied from @buffer.
2506  */
2507 static void *get_highmem_page_buffer(struct page *page,
2508 				     struct chain_allocator *ca)
2509 {
2510 	struct highmem_pbe *pbe;
2511 	void *kaddr;
2512 
2513 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2514 		/*
2515 		 * We have allocated the "original" page frame and we can
2516 		 * use it directly to store the loaded page.
2517 		 */
2518 		last_highmem_page = page;
2519 		return buffer;
2520 	}
2521 	/*
2522 	 * The "original" page frame has not been allocated and we have to
2523 	 * use a "safe" page frame to store the loaded page.
2524 	 */
2525 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2526 	if (!pbe) {
2527 		swsusp_free();
2528 		return ERR_PTR(-ENOMEM);
2529 	}
2530 	pbe->orig_page = page;
2531 	if (safe_highmem_pages > 0) {
2532 		struct page *tmp;
2533 
2534 		/* Copy of the page will be stored in high memory */
2535 		kaddr = buffer;
2536 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2537 		safe_highmem_pages--;
2538 		last_highmem_page = tmp;
2539 		pbe->copy_page = tmp;
2540 	} else {
2541 		/* Copy of the page will be stored in normal memory */
2542 		kaddr = __get_safe_page(ca->gfp_mask);
2543 		if (!kaddr)
2544 			return ERR_PTR(-ENOMEM);
2545 		pbe->copy_page = virt_to_page(kaddr);
2546 	}
2547 	pbe->next = highmem_pblist;
2548 	highmem_pblist = pbe;
2549 	return kaddr;
2550 }
2551 
2552 /**
2553  * copy_last_highmem_page - Copy most the most recent highmem image page.
2554  *
2555  * Copy the contents of a highmem image from @buffer, where the caller of
2556  * snapshot_write_next() has stored them, to the right location represented by
2557  * @last_highmem_page .
2558  */
2559 static void copy_last_highmem_page(void)
2560 {
2561 	if (last_highmem_page) {
2562 		void *dst;
2563 
2564 		dst = kmap_atomic(last_highmem_page);
2565 		copy_page(dst, buffer);
2566 		kunmap_atomic(dst);
2567 		last_highmem_page = NULL;
2568 	}
2569 }
2570 
2571 static inline int last_highmem_page_copied(void)
2572 {
2573 	return !last_highmem_page;
2574 }
2575 
2576 static inline void free_highmem_data(void)
2577 {
2578 	if (safe_highmem_bm)
2579 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2580 
2581 	if (buffer)
2582 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2583 }
2584 #else
2585 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2586 
2587 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2588 					unsigned int *nr_highmem_p) { return 0; }
2589 
2590 static inline void *get_highmem_page_buffer(struct page *page,
2591 					    struct chain_allocator *ca)
2592 {
2593 	return ERR_PTR(-EINVAL);
2594 }
2595 
2596 static inline void copy_last_highmem_page(void) {}
2597 static inline int last_highmem_page_copied(void) { return 1; }
2598 static inline void free_highmem_data(void) {}
2599 #endif /* CONFIG_HIGHMEM */
2600 
2601 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2602 
2603 /**
2604  * prepare_image - Make room for loading hibernation image.
2605  * @new_bm: Uninitialized memory bitmap structure.
2606  * @bm: Memory bitmap with unsafe pages marked.
2607  * @zero_bm: Memory bitmap containing the zero pages.
2608  *
2609  * Use @bm to mark the pages that will be overwritten in the process of
2610  * restoring the system memory state from the suspend image ("unsafe" pages)
2611  * and allocate memory for the image.
2612  *
2613  * The idea is to allocate a new memory bitmap first and then allocate
2614  * as many pages as needed for image data, but without specifying what those
2615  * pages will be used for just yet.  Instead, we mark them all as allocated and
2616  * create a lists of "safe" pages to be used later.  On systems with high
2617  * memory a list of "safe" highmem pages is created too.
2618  *
2619  * Because it was not known which pages were unsafe when @zero_bm was created,
2620  * make a copy of it and recreate it within safe pages.
2621  */
2622 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2623 		struct memory_bitmap *zero_bm)
2624 {
2625 	unsigned int nr_pages, nr_highmem;
2626 	struct memory_bitmap tmp;
2627 	struct linked_page *lp;
2628 	int error;
2629 
2630 	/* If there is no highmem, the buffer will not be necessary */
2631 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2632 	buffer = NULL;
2633 
2634 	nr_highmem = count_highmem_image_pages(bm);
2635 	mark_unsafe_pages(bm);
2636 
2637 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2638 	if (error)
2639 		goto Free;
2640 
2641 	duplicate_memory_bitmap(new_bm, bm);
2642 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2643 
2644 	/* Make a copy of zero_bm so it can be created in safe pages */
2645 	error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2646 	if (error)
2647 		goto Free;
2648 
2649 	duplicate_memory_bitmap(&tmp, zero_bm);
2650 	memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2651 
2652 	/* Recreate zero_bm in safe pages */
2653 	error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2654 	if (error)
2655 		goto Free;
2656 
2657 	duplicate_memory_bitmap(zero_bm, &tmp);
2658 	memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2659 	/* At this point zero_bm is in safe pages and it can be used for restoring. */
2660 
2661 	if (nr_highmem > 0) {
2662 		error = prepare_highmem_image(bm, &nr_highmem);
2663 		if (error)
2664 			goto Free;
2665 	}
2666 	/*
2667 	 * Reserve some safe pages for potential later use.
2668 	 *
2669 	 * NOTE: This way we make sure there will be enough safe pages for the
2670 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2671 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2672 	 *
2673 	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2674 	 */
2675 	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2676 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2677 	while (nr_pages > 0) {
2678 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2679 		if (!lp) {
2680 			error = -ENOMEM;
2681 			goto Free;
2682 		}
2683 		lp->next = safe_pages_list;
2684 		safe_pages_list = lp;
2685 		nr_pages--;
2686 	}
2687 	/* Preallocate memory for the image */
2688 	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2689 	while (nr_pages > 0) {
2690 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2691 		if (!lp) {
2692 			error = -ENOMEM;
2693 			goto Free;
2694 		}
2695 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2696 			/* The page is "safe", add it to the list */
2697 			lp->next = safe_pages_list;
2698 			safe_pages_list = lp;
2699 		}
2700 		/* Mark the page as allocated */
2701 		swsusp_set_page_forbidden(virt_to_page(lp));
2702 		swsusp_set_page_free(virt_to_page(lp));
2703 		nr_pages--;
2704 	}
2705 	return 0;
2706 
2707  Free:
2708 	swsusp_free();
2709 	return error;
2710 }
2711 
2712 /**
2713  * get_buffer - Get the address to store the next image data page.
2714  *
2715  * Get the address that snapshot_write_next() should return to its caller to
2716  * write to.
2717  */
2718 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2719 {
2720 	struct pbe *pbe;
2721 	struct page *page;
2722 	unsigned long pfn = memory_bm_next_pfn(bm);
2723 
2724 	if (pfn == BM_END_OF_MAP)
2725 		return ERR_PTR(-EFAULT);
2726 
2727 	page = pfn_to_page(pfn);
2728 	if (PageHighMem(page))
2729 		return get_highmem_page_buffer(page, ca);
2730 
2731 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2732 		/*
2733 		 * We have allocated the "original" page frame and we can
2734 		 * use it directly to store the loaded page.
2735 		 */
2736 		return page_address(page);
2737 
2738 	/*
2739 	 * The "original" page frame has not been allocated and we have to
2740 	 * use a "safe" page frame to store the loaded page.
2741 	 */
2742 	pbe = chain_alloc(ca, sizeof(struct pbe));
2743 	if (!pbe) {
2744 		swsusp_free();
2745 		return ERR_PTR(-ENOMEM);
2746 	}
2747 	pbe->orig_address = page_address(page);
2748 	pbe->address = __get_safe_page(ca->gfp_mask);
2749 	if (!pbe->address)
2750 		return ERR_PTR(-ENOMEM);
2751 	pbe->next = restore_pblist;
2752 	restore_pblist = pbe;
2753 	return pbe->address;
2754 }
2755 
2756 /**
2757  * snapshot_write_next - Get the address to store the next image page.
2758  * @handle: Snapshot handle structure to guide the writing.
2759  *
2760  * On the first call, @handle should point to a zeroed snapshot_handle
2761  * structure.  The structure gets populated then and a pointer to it should be
2762  * passed to this function every next time.
2763  *
2764  * On success, the function returns a positive number.  Then, the caller
2765  * is allowed to write up to the returned number of bytes to the memory
2766  * location computed by the data_of() macro.
2767  *
2768  * The function returns 0 to indicate the "end of file" condition.  Negative
2769  * numbers are returned on errors, in which cases the structure pointed to by
2770  * @handle is not updated and should not be used any more.
2771  */
2772 int snapshot_write_next(struct snapshot_handle *handle)
2773 {
2774 	static struct chain_allocator ca;
2775 	int error;
2776 
2777 next:
2778 	/* Check if we have already loaded the entire image */
2779 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2780 		return 0;
2781 
2782 	if (!handle->cur) {
2783 		if (!buffer)
2784 			/* This makes the buffer be freed by swsusp_free() */
2785 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2786 
2787 		if (!buffer)
2788 			return -ENOMEM;
2789 
2790 		handle->buffer = buffer;
2791 	} else if (handle->cur == 1) {
2792 		error = load_header(buffer);
2793 		if (error)
2794 			return error;
2795 
2796 		safe_pages_list = NULL;
2797 
2798 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2799 		if (error)
2800 			return error;
2801 
2802 		error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
2803 		if (error)
2804 			return error;
2805 
2806 		nr_zero_pages = 0;
2807 
2808 		hibernate_restore_protection_begin();
2809 	} else if (handle->cur <= nr_meta_pages + 1) {
2810 		error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
2811 		if (error)
2812 			return error;
2813 
2814 		if (handle->cur == nr_meta_pages + 1) {
2815 			error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
2816 			if (error)
2817 				return error;
2818 
2819 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2820 			memory_bm_position_reset(&orig_bm);
2821 			memory_bm_position_reset(&zero_bm);
2822 			restore_pblist = NULL;
2823 			handle->buffer = get_buffer(&orig_bm, &ca);
2824 			if (IS_ERR(handle->buffer))
2825 				return PTR_ERR(handle->buffer);
2826 		}
2827 	} else {
2828 		copy_last_highmem_page();
2829 		error = hibernate_restore_protect_page(handle->buffer);
2830 		if (error)
2831 			return error;
2832 		handle->buffer = get_buffer(&orig_bm, &ca);
2833 		if (IS_ERR(handle->buffer))
2834 			return PTR_ERR(handle->buffer);
2835 	}
2836 	handle->sync_read = (handle->buffer == buffer);
2837 	handle->cur++;
2838 
2839 	/* Zero pages were not included in the image, memset it and move on. */
2840 	if (handle->cur > nr_meta_pages + 1 &&
2841 	    memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2842 		memset(handle->buffer, 0, PAGE_SIZE);
2843 		goto next;
2844 	}
2845 
2846 	return PAGE_SIZE;
2847 }
2848 
2849 /**
2850  * snapshot_write_finalize - Complete the loading of a hibernation image.
2851  *
2852  * Must be called after the last call to snapshot_write_next() in case the last
2853  * page in the image happens to be a highmem page and its contents should be
2854  * stored in highmem.  Additionally, it recycles bitmap memory that's not
2855  * necessary any more.
2856  */
2857 int snapshot_write_finalize(struct snapshot_handle *handle)
2858 {
2859 	int error;
2860 
2861 	copy_last_highmem_page();
2862 	error = hibernate_restore_protect_page(handle->buffer);
2863 	/* Do that only if we have loaded the image entirely */
2864 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2865 		memory_bm_recycle(&orig_bm);
2866 		free_highmem_data();
2867 	}
2868 	return error;
2869 }
2870 
2871 int snapshot_image_loaded(struct snapshot_handle *handle)
2872 {
2873 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2874 			handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2875 }
2876 
2877 #ifdef CONFIG_HIGHMEM
2878 /* Assumes that @buf is ready and points to a "safe" page */
2879 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2880 				       void *buf)
2881 {
2882 	void *kaddr1, *kaddr2;
2883 
2884 	kaddr1 = kmap_atomic(p1);
2885 	kaddr2 = kmap_atomic(p2);
2886 	copy_page(buf, kaddr1);
2887 	copy_page(kaddr1, kaddr2);
2888 	copy_page(kaddr2, buf);
2889 	kunmap_atomic(kaddr2);
2890 	kunmap_atomic(kaddr1);
2891 }
2892 
2893 /**
2894  * restore_highmem - Put highmem image pages into their original locations.
2895  *
2896  * For each highmem page that was in use before hibernation and is included in
2897  * the image, and also has been allocated by the "restore" kernel, swap its
2898  * current contents with the previous (ie. "before hibernation") ones.
2899  *
2900  * If the restore eventually fails, we can call this function once again and
2901  * restore the highmem state as seen by the restore kernel.
2902  */
2903 int restore_highmem(void)
2904 {
2905 	struct highmem_pbe *pbe = highmem_pblist;
2906 	void *buf;
2907 
2908 	if (!pbe)
2909 		return 0;
2910 
2911 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2912 	if (!buf)
2913 		return -ENOMEM;
2914 
2915 	while (pbe) {
2916 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2917 		pbe = pbe->next;
2918 	}
2919 	free_image_page(buf, PG_UNSAFE_CLEAR);
2920 	return 0;
2921 }
2922 #endif /* CONFIG_HIGHMEM */
2923