xref: /linux/kernel/power/snapshot.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31 
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37 
38 #include "power.h"
39 
40 static int swsusp_page_is_free(struct page *);
41 static void swsusp_set_page_forbidden(struct page *);
42 static void swsusp_unset_page_forbidden(struct page *);
43 
44 /*
45  * Number of bytes to reserve for memory allocations made by device drivers
46  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47  * cause image creation to fail (tunable via /sys/power/reserved_size).
48  */
49 unsigned long reserved_size;
50 
51 void __init hibernate_reserved_size_init(void)
52 {
53 	reserved_size = SPARE_PAGES * PAGE_SIZE;
54 }
55 
56 /*
57  * Preferred image size in bytes (tunable via /sys/power/image_size).
58  * When it is set to N, swsusp will do its best to ensure the image
59  * size will not exceed N bytes, but if that is impossible, it will
60  * try to create the smallest image possible.
61  */
62 unsigned long image_size;
63 
64 void __init hibernate_image_size_init(void)
65 {
66 	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67 }
68 
69 /* List of PBEs needed for restoring the pages that were allocated before
70  * the suspend and included in the suspend image, but have also been
71  * allocated by the "resume" kernel, so their contents cannot be written
72  * directly to their "original" page frames.
73  */
74 struct pbe *restore_pblist;
75 
76 /* Pointer to an auxiliary buffer (1 page) */
77 static void *buffer;
78 
79 /**
80  *	@safe_needed - on resume, for storing the PBE list and the image,
81  *	we can only use memory pages that do not conflict with the pages
82  *	used before suspend.  The unsafe pages have PageNosaveFree set
83  *	and we count them using unsafe_pages.
84  *
85  *	Each allocated image page is marked as PageNosave and PageNosaveFree
86  *	so that swsusp_free() can release it.
87  */
88 
89 #define PG_ANY		0
90 #define PG_SAFE		1
91 #define PG_UNSAFE_CLEAR	1
92 #define PG_UNSAFE_KEEP	0
93 
94 static unsigned int allocated_unsafe_pages;
95 
96 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97 {
98 	void *res;
99 
100 	res = (void *)get_zeroed_page(gfp_mask);
101 	if (safe_needed)
102 		while (res && swsusp_page_is_free(virt_to_page(res))) {
103 			/* The page is unsafe, mark it for swsusp_free() */
104 			swsusp_set_page_forbidden(virt_to_page(res));
105 			allocated_unsafe_pages++;
106 			res = (void *)get_zeroed_page(gfp_mask);
107 		}
108 	if (res) {
109 		swsusp_set_page_forbidden(virt_to_page(res));
110 		swsusp_set_page_free(virt_to_page(res));
111 	}
112 	return res;
113 }
114 
115 unsigned long get_safe_page(gfp_t gfp_mask)
116 {
117 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118 }
119 
120 static struct page *alloc_image_page(gfp_t gfp_mask)
121 {
122 	struct page *page;
123 
124 	page = alloc_page(gfp_mask);
125 	if (page) {
126 		swsusp_set_page_forbidden(page);
127 		swsusp_set_page_free(page);
128 	}
129 	return page;
130 }
131 
132 /**
133  *	free_image_page - free page represented by @addr, allocated with
134  *	get_image_page (page flags set by it must be cleared)
135  */
136 
137 static inline void free_image_page(void *addr, int clear_nosave_free)
138 {
139 	struct page *page;
140 
141 	BUG_ON(!virt_addr_valid(addr));
142 
143 	page = virt_to_page(addr);
144 
145 	swsusp_unset_page_forbidden(page);
146 	if (clear_nosave_free)
147 		swsusp_unset_page_free(page);
148 
149 	__free_page(page);
150 }
151 
152 /* struct linked_page is used to build chains of pages */
153 
154 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
155 
156 struct linked_page {
157 	struct linked_page *next;
158 	char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160 
161 static inline void
162 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163 {
164 	while (list) {
165 		struct linked_page *lp = list->next;
166 
167 		free_image_page(list, clear_page_nosave);
168 		list = lp;
169 	}
170 }
171 
172 /**
173   *	struct chain_allocator is used for allocating small objects out of
174   *	a linked list of pages called 'the chain'.
175   *
176   *	The chain grows each time when there is no room for a new object in
177   *	the current page.  The allocated objects cannot be freed individually.
178   *	It is only possible to free them all at once, by freeing the entire
179   *	chain.
180   *
181   *	NOTE: The chain allocator may be inefficient if the allocated objects
182   *	are not much smaller than PAGE_SIZE.
183   */
184 
185 struct chain_allocator {
186 	struct linked_page *chain;	/* the chain */
187 	unsigned int used_space;	/* total size of objects allocated out
188 					 * of the current page
189 					 */
190 	gfp_t gfp_mask;		/* mask for allocating pages */
191 	int safe_needed;	/* if set, only "safe" pages are allocated */
192 };
193 
194 static void
195 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196 {
197 	ca->chain = NULL;
198 	ca->used_space = LINKED_PAGE_DATA_SIZE;
199 	ca->gfp_mask = gfp_mask;
200 	ca->safe_needed = safe_needed;
201 }
202 
203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204 {
205 	void *ret;
206 
207 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 		struct linked_page *lp;
209 
210 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 		if (!lp)
212 			return NULL;
213 
214 		lp->next = ca->chain;
215 		ca->chain = lp;
216 		ca->used_space = 0;
217 	}
218 	ret = ca->chain->data + ca->used_space;
219 	ca->used_space += size;
220 	return ret;
221 }
222 
223 /**
224  *	Data types related to memory bitmaps.
225  *
226  *	Memory bitmap is a structure consiting of many linked lists of
227  *	objects.  The main list's elements are of type struct zone_bitmap
228  *	and each of them corresonds to one zone.  For each zone bitmap
229  *	object there is a list of objects of type struct bm_block that
230  *	represent each blocks of bitmap in which information is stored.
231  *
232  *	struct memory_bitmap contains a pointer to the main list of zone
233  *	bitmap objects, a struct bm_position used for browsing the bitmap,
234  *	and a pointer to the list of pages used for allocating all of the
235  *	zone bitmap objects and bitmap block objects.
236  *
237  *	NOTE: It has to be possible to lay out the bitmap in memory
238  *	using only allocations of order 0.  Additionally, the bitmap is
239  *	designed to work with arbitrary number of zones (this is over the
240  *	top for now, but let's avoid making unnecessary assumptions ;-).
241  *
242  *	struct zone_bitmap contains a pointer to a list of bitmap block
243  *	objects and a pointer to the bitmap block object that has been
244  *	most recently used for setting bits.  Additionally, it contains the
245  *	pfns that correspond to the start and end of the represented zone.
246  *
247  *	struct bm_block contains a pointer to the memory page in which
248  *	information is stored (in the form of a block of bitmap)
249  *	It also contains the pfns that correspond to the start and end of
250  *	the represented memory area.
251  *
252  *	The memory bitmap is organized as a radix tree to guarantee fast random
253  *	access to the bits. There is one radix tree for each zone (as returned
254  *	from create_mem_extents).
255  *
256  *	One radix tree is represented by one struct mem_zone_bm_rtree. There are
257  *	two linked lists for the nodes of the tree, one for the inner nodes and
258  *	one for the leave nodes. The linked leave nodes are used for fast linear
259  *	access of the memory bitmap.
260  *
261  *	The struct rtree_node represents one node of the radix tree.
262  */
263 
264 #define BM_END_OF_MAP	(~0UL)
265 
266 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
269 
270 /*
271  * struct rtree_node is a wrapper struct to link the nodes
272  * of the rtree together for easy linear iteration over
273  * bits and easy freeing
274  */
275 struct rtree_node {
276 	struct list_head list;
277 	unsigned long *data;
278 };
279 
280 /*
281  * struct mem_zone_bm_rtree represents a bitmap used for one
282  * populated memory zone.
283  */
284 struct mem_zone_bm_rtree {
285 	struct list_head list;		/* Link Zones together         */
286 	struct list_head nodes;		/* Radix Tree inner nodes      */
287 	struct list_head leaves;	/* Radix Tree leaves           */
288 	unsigned long start_pfn;	/* Zone start page frame       */
289 	unsigned long end_pfn;		/* Zone end page frame + 1     */
290 	struct rtree_node *rtree;	/* Radix Tree Root             */
291 	int levels;			/* Number of Radix Tree Levels */
292 	unsigned int blocks;		/* Number of Bitmap Blocks     */
293 };
294 
295 /* strcut bm_position is used for browsing memory bitmaps */
296 
297 struct bm_position {
298 	struct mem_zone_bm_rtree *zone;
299 	struct rtree_node *node;
300 	unsigned long node_pfn;
301 	int node_bit;
302 };
303 
304 struct memory_bitmap {
305 	struct list_head zones;
306 	struct linked_page *p_list;	/* list of pages used to store zone
307 					 * bitmap objects and bitmap block
308 					 * objects
309 					 */
310 	struct bm_position cur;	/* most recently used bit position */
311 };
312 
313 /* Functions that operate on memory bitmaps */
314 
315 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
318 #else
319 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
320 #endif
321 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322 
323 /*
324  *	alloc_rtree_node - Allocate a new node and add it to the radix tree.
325  *
326  *	This function is used to allocate inner nodes as well as the
327  *	leave nodes of the radix tree. It also adds the node to the
328  *	corresponding linked list passed in by the *list parameter.
329  */
330 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 					   struct chain_allocator *ca,
332 					   struct list_head *list)
333 {
334 	struct rtree_node *node;
335 
336 	node = chain_alloc(ca, sizeof(struct rtree_node));
337 	if (!node)
338 		return NULL;
339 
340 	node->data = get_image_page(gfp_mask, safe_needed);
341 	if (!node->data)
342 		return NULL;
343 
344 	list_add_tail(&node->list, list);
345 
346 	return node;
347 }
348 
349 /*
350  *	add_rtree_block - Add a new leave node to the radix tree
351  *
352  *	The leave nodes need to be allocated in order to keep the leaves
353  *	linked list in order. This is guaranteed by the zone->blocks
354  *	counter.
355  */
356 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
357 			   int safe_needed, struct chain_allocator *ca)
358 {
359 	struct rtree_node *node, *block, **dst;
360 	unsigned int levels_needed, block_nr;
361 	int i;
362 
363 	block_nr = zone->blocks;
364 	levels_needed = 0;
365 
366 	/* How many levels do we need for this block nr? */
367 	while (block_nr) {
368 		levels_needed += 1;
369 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 	}
371 
372 	/* Make sure the rtree has enough levels */
373 	for (i = zone->levels; i < levels_needed; i++) {
374 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 					&zone->nodes);
376 		if (!node)
377 			return -ENOMEM;
378 
379 		node->data[0] = (unsigned long)zone->rtree;
380 		zone->rtree = node;
381 		zone->levels += 1;
382 	}
383 
384 	/* Allocate new block */
385 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 	if (!block)
387 		return -ENOMEM;
388 
389 	/* Now walk the rtree to insert the block */
390 	node = zone->rtree;
391 	dst = &zone->rtree;
392 	block_nr = zone->blocks;
393 	for (i = zone->levels; i > 0; i--) {
394 		int index;
395 
396 		if (!node) {
397 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 						&zone->nodes);
399 			if (!node)
400 				return -ENOMEM;
401 			*dst = node;
402 		}
403 
404 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 		index &= BM_RTREE_LEVEL_MASK;
406 		dst = (struct rtree_node **)&((*dst)->data[index]);
407 		node = *dst;
408 	}
409 
410 	zone->blocks += 1;
411 	*dst = block;
412 
413 	return 0;
414 }
415 
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 			       int clear_nosave_free);
418 
419 /*
420  *	create_zone_bm_rtree - create a radix tree for one zone
421  *
422  *	Allocated the mem_zone_bm_rtree structure and initializes it.
423  *	This function also allocated and builds the radix tree for the
424  *	zone.
425  */
426 static struct mem_zone_bm_rtree *
427 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 		     struct chain_allocator *ca,
429 		     unsigned long start, unsigned long end)
430 {
431 	struct mem_zone_bm_rtree *zone;
432 	unsigned int i, nr_blocks;
433 	unsigned long pages;
434 
435 	pages = end - start;
436 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 	if (!zone)
438 		return NULL;
439 
440 	INIT_LIST_HEAD(&zone->nodes);
441 	INIT_LIST_HEAD(&zone->leaves);
442 	zone->start_pfn = start;
443 	zone->end_pfn = end;
444 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445 
446 	for (i = 0; i < nr_blocks; i++) {
447 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 			return NULL;
450 		}
451 	}
452 
453 	return zone;
454 }
455 
456 /*
457  *	free_zone_bm_rtree - Free the memory of the radix tree
458  *
459  *	Free all node pages of the radix tree. The mem_zone_bm_rtree
460  *	structure itself is not freed here nor are the rtree_node
461  *	structs.
462  */
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 			       int clear_nosave_free)
465 {
466 	struct rtree_node *node;
467 
468 	list_for_each_entry(node, &zone->nodes, list)
469 		free_image_page(node->data, clear_nosave_free);
470 
471 	list_for_each_entry(node, &zone->leaves, list)
472 		free_image_page(node->data, clear_nosave_free);
473 }
474 
475 static void memory_bm_position_reset(struct memory_bitmap *bm)
476 {
477 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 				  list);
479 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 				  struct rtree_node, list);
481 	bm->cur.node_pfn = 0;
482 	bm->cur.node_bit = 0;
483 }
484 
485 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486 
487 struct mem_extent {
488 	struct list_head hook;
489 	unsigned long start;
490 	unsigned long end;
491 };
492 
493 /**
494  *	free_mem_extents - free a list of memory extents
495  *	@list - list of extents to empty
496  */
497 static void free_mem_extents(struct list_head *list)
498 {
499 	struct mem_extent *ext, *aux;
500 
501 	list_for_each_entry_safe(ext, aux, list, hook) {
502 		list_del(&ext->hook);
503 		kfree(ext);
504 	}
505 }
506 
507 /**
508  *	create_mem_extents - create a list of memory extents representing
509  *	                     contiguous ranges of PFNs
510  *	@list - list to put the extents into
511  *	@gfp_mask - mask to use for memory allocations
512  */
513 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
514 {
515 	struct zone *zone;
516 
517 	INIT_LIST_HEAD(list);
518 
519 	for_each_populated_zone(zone) {
520 		unsigned long zone_start, zone_end;
521 		struct mem_extent *ext, *cur, *aux;
522 
523 		zone_start = zone->zone_start_pfn;
524 		zone_end = zone_end_pfn(zone);
525 
526 		list_for_each_entry(ext, list, hook)
527 			if (zone_start <= ext->end)
528 				break;
529 
530 		if (&ext->hook == list || zone_end < ext->start) {
531 			/* New extent is necessary */
532 			struct mem_extent *new_ext;
533 
534 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
535 			if (!new_ext) {
536 				free_mem_extents(list);
537 				return -ENOMEM;
538 			}
539 			new_ext->start = zone_start;
540 			new_ext->end = zone_end;
541 			list_add_tail(&new_ext->hook, &ext->hook);
542 			continue;
543 		}
544 
545 		/* Merge this zone's range of PFNs with the existing one */
546 		if (zone_start < ext->start)
547 			ext->start = zone_start;
548 		if (zone_end > ext->end)
549 			ext->end = zone_end;
550 
551 		/* More merging may be possible */
552 		cur = ext;
553 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
554 			if (zone_end < cur->start)
555 				break;
556 			if (zone_end < cur->end)
557 				ext->end = cur->end;
558 			list_del(&cur->hook);
559 			kfree(cur);
560 		}
561 	}
562 
563 	return 0;
564 }
565 
566 /**
567   *	memory_bm_create - allocate memory for a memory bitmap
568   */
569 static int
570 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
571 {
572 	struct chain_allocator ca;
573 	struct list_head mem_extents;
574 	struct mem_extent *ext;
575 	int error;
576 
577 	chain_init(&ca, gfp_mask, safe_needed);
578 	INIT_LIST_HEAD(&bm->zones);
579 
580 	error = create_mem_extents(&mem_extents, gfp_mask);
581 	if (error)
582 		return error;
583 
584 	list_for_each_entry(ext, &mem_extents, hook) {
585 		struct mem_zone_bm_rtree *zone;
586 
587 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
588 					    ext->start, ext->end);
589 		if (!zone) {
590 			error = -ENOMEM;
591 			goto Error;
592 		}
593 		list_add_tail(&zone->list, &bm->zones);
594 	}
595 
596 	bm->p_list = ca.chain;
597 	memory_bm_position_reset(bm);
598  Exit:
599 	free_mem_extents(&mem_extents);
600 	return error;
601 
602  Error:
603 	bm->p_list = ca.chain;
604 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
605 	goto Exit;
606 }
607 
608 /**
609   *	memory_bm_free - free memory occupied by the memory bitmap @bm
610   */
611 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
612 {
613 	struct mem_zone_bm_rtree *zone;
614 
615 	list_for_each_entry(zone, &bm->zones, list)
616 		free_zone_bm_rtree(zone, clear_nosave_free);
617 
618 	free_list_of_pages(bm->p_list, clear_nosave_free);
619 
620 	INIT_LIST_HEAD(&bm->zones);
621 }
622 
623 /**
624  *	memory_bm_find_bit - Find the bit for pfn in the memory
625  *			     bitmap
626  *
627  *	Find the bit in the bitmap @bm that corresponds to given pfn.
628  *	The cur.zone, cur.block and cur.node_pfn member of @bm are
629  *	updated.
630  *	It walks the radix tree to find the page which contains the bit for
631  *	pfn and returns the bit position in **addr and *bit_nr.
632  */
633 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
634 			      void **addr, unsigned int *bit_nr)
635 {
636 	struct mem_zone_bm_rtree *curr, *zone;
637 	struct rtree_node *node;
638 	int i, block_nr;
639 
640 	zone = bm->cur.zone;
641 
642 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 		goto zone_found;
644 
645 	zone = NULL;
646 
647 	/* Find the right zone */
648 	list_for_each_entry(curr, &bm->zones, list) {
649 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 			zone = curr;
651 			break;
652 		}
653 	}
654 
655 	if (!zone)
656 		return -EFAULT;
657 
658 zone_found:
659 	/*
660 	 * We have a zone. Now walk the radix tree to find the leave
661 	 * node for our pfn.
662 	 */
663 
664 	node = bm->cur.node;
665 	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
666 		goto node_found;
667 
668 	node      = zone->rtree;
669 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670 
671 	for (i = zone->levels; i > 0; i--) {
672 		int index;
673 
674 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 		index &= BM_RTREE_LEVEL_MASK;
676 		BUG_ON(node->data[index] == 0);
677 		node = (struct rtree_node *)node->data[index];
678 	}
679 
680 node_found:
681 	/* Update last position */
682 	bm->cur.zone = zone;
683 	bm->cur.node = node;
684 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685 
686 	/* Set return values */
687 	*addr = node->data;
688 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
689 
690 	return 0;
691 }
692 
693 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
694 {
695 	void *addr;
696 	unsigned int bit;
697 	int error;
698 
699 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
700 	BUG_ON(error);
701 	set_bit(bit, addr);
702 }
703 
704 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
705 {
706 	void *addr;
707 	unsigned int bit;
708 	int error;
709 
710 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
711 	if (!error)
712 		set_bit(bit, addr);
713 
714 	return error;
715 }
716 
717 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
718 {
719 	void *addr;
720 	unsigned int bit;
721 	int error;
722 
723 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
724 	BUG_ON(error);
725 	clear_bit(bit, addr);
726 }
727 
728 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
729 {
730 	void *addr;
731 	unsigned int bit;
732 	int error;
733 
734 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
735 	BUG_ON(error);
736 	return test_bit(bit, addr);
737 }
738 
739 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
740 {
741 	void *addr;
742 	unsigned int bit;
743 
744 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
745 }
746 
747 /*
748  *	rtree_next_node - Jumps to the next leave node
749  *
750  *	Sets the position to the beginning of the next node in the
751  *	memory bitmap. This is either the next node in the current
752  *	zone's radix tree or the first node in the radix tree of the
753  *	next zone.
754  *
755  *	Returns true if there is a next node, false otherwise.
756  */
757 static bool rtree_next_node(struct memory_bitmap *bm)
758 {
759 	bm->cur.node = list_entry(bm->cur.node->list.next,
760 				  struct rtree_node, list);
761 	if (&bm->cur.node->list != &bm->cur.zone->leaves) {
762 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
763 		bm->cur.node_bit  = 0;
764 		touch_softlockup_watchdog();
765 		return true;
766 	}
767 
768 	/* No more nodes, goto next zone */
769 	bm->cur.zone = list_entry(bm->cur.zone->list.next,
770 				  struct mem_zone_bm_rtree, list);
771 	if (&bm->cur.zone->list != &bm->zones) {
772 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
773 					  struct rtree_node, list);
774 		bm->cur.node_pfn = 0;
775 		bm->cur.node_bit = 0;
776 		return true;
777 	}
778 
779 	/* No more zones */
780 	return false;
781 }
782 
783 /**
784  *	memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
785  *
786  *	Starting from the last returned position this function searches
787  *	for the next set bit in the memory bitmap and returns its
788  *	number. If no more bit is set BM_END_OF_MAP is returned.
789  *
790  *	It is required to run memory_bm_position_reset() before the
791  *	first call to this function.
792  */
793 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
794 {
795 	unsigned long bits, pfn, pages;
796 	int bit;
797 
798 	do {
799 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
800 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
801 		bit	  = find_next_bit(bm->cur.node->data, bits,
802 					  bm->cur.node_bit);
803 		if (bit < bits) {
804 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
805 			bm->cur.node_bit = bit + 1;
806 			return pfn;
807 		}
808 	} while (rtree_next_node(bm));
809 
810 	return BM_END_OF_MAP;
811 }
812 
813 /**
814  *	This structure represents a range of page frames the contents of which
815  *	should not be saved during the suspend.
816  */
817 
818 struct nosave_region {
819 	struct list_head list;
820 	unsigned long start_pfn;
821 	unsigned long end_pfn;
822 };
823 
824 static LIST_HEAD(nosave_regions);
825 
826 /**
827  *	register_nosave_region - register a range of page frames the contents
828  *	of which should not be saved during the suspend (to be used in the early
829  *	initialization code)
830  */
831 
832 void __init
833 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
834 			 int use_kmalloc)
835 {
836 	struct nosave_region *region;
837 
838 	if (start_pfn >= end_pfn)
839 		return;
840 
841 	if (!list_empty(&nosave_regions)) {
842 		/* Try to extend the previous region (they should be sorted) */
843 		region = list_entry(nosave_regions.prev,
844 					struct nosave_region, list);
845 		if (region->end_pfn == start_pfn) {
846 			region->end_pfn = end_pfn;
847 			goto Report;
848 		}
849 	}
850 	if (use_kmalloc) {
851 		/* during init, this shouldn't fail */
852 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
853 		BUG_ON(!region);
854 	} else
855 		/* This allocation cannot fail */
856 		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
857 	region->start_pfn = start_pfn;
858 	region->end_pfn = end_pfn;
859 	list_add_tail(&region->list, &nosave_regions);
860  Report:
861 	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
862 		(unsigned long long) start_pfn << PAGE_SHIFT,
863 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
864 }
865 
866 /*
867  * Set bits in this map correspond to the page frames the contents of which
868  * should not be saved during the suspend.
869  */
870 static struct memory_bitmap *forbidden_pages_map;
871 
872 /* Set bits in this map correspond to free page frames. */
873 static struct memory_bitmap *free_pages_map;
874 
875 /*
876  * Each page frame allocated for creating the image is marked by setting the
877  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
878  */
879 
880 void swsusp_set_page_free(struct page *page)
881 {
882 	if (free_pages_map)
883 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
884 }
885 
886 static int swsusp_page_is_free(struct page *page)
887 {
888 	return free_pages_map ?
889 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
890 }
891 
892 void swsusp_unset_page_free(struct page *page)
893 {
894 	if (free_pages_map)
895 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
896 }
897 
898 static void swsusp_set_page_forbidden(struct page *page)
899 {
900 	if (forbidden_pages_map)
901 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
902 }
903 
904 int swsusp_page_is_forbidden(struct page *page)
905 {
906 	return forbidden_pages_map ?
907 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
908 }
909 
910 static void swsusp_unset_page_forbidden(struct page *page)
911 {
912 	if (forbidden_pages_map)
913 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
914 }
915 
916 /**
917  *	mark_nosave_pages - set bits corresponding to the page frames the
918  *	contents of which should not be saved in a given bitmap.
919  */
920 
921 static void mark_nosave_pages(struct memory_bitmap *bm)
922 {
923 	struct nosave_region *region;
924 
925 	if (list_empty(&nosave_regions))
926 		return;
927 
928 	list_for_each_entry(region, &nosave_regions, list) {
929 		unsigned long pfn;
930 
931 		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
932 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
933 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
934 				- 1);
935 
936 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
937 			if (pfn_valid(pfn)) {
938 				/*
939 				 * It is safe to ignore the result of
940 				 * mem_bm_set_bit_check() here, since we won't
941 				 * touch the PFNs for which the error is
942 				 * returned anyway.
943 				 */
944 				mem_bm_set_bit_check(bm, pfn);
945 			}
946 	}
947 }
948 
949 static bool is_nosave_page(unsigned long pfn)
950 {
951 	struct nosave_region *region;
952 
953 	list_for_each_entry(region, &nosave_regions, list) {
954 		if (pfn >= region->start_pfn && pfn < region->end_pfn) {
955 			pr_err("PM: %#010llx in e820 nosave region: "
956 			       "[mem %#010llx-%#010llx]\n",
957 			       (unsigned long long) pfn << PAGE_SHIFT,
958 			       (unsigned long long) region->start_pfn << PAGE_SHIFT,
959 			       ((unsigned long long) region->end_pfn << PAGE_SHIFT)
960 					- 1);
961 			return true;
962 		}
963 	}
964 
965 	return false;
966 }
967 
968 /**
969  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
970  *	frames that should not be saved and free page frames.  The pointers
971  *	forbidden_pages_map and free_pages_map are only modified if everything
972  *	goes well, because we don't want the bits to be used before both bitmaps
973  *	are set up.
974  */
975 
976 int create_basic_memory_bitmaps(void)
977 {
978 	struct memory_bitmap *bm1, *bm2;
979 	int error = 0;
980 
981 	if (forbidden_pages_map && free_pages_map)
982 		return 0;
983 	else
984 		BUG_ON(forbidden_pages_map || free_pages_map);
985 
986 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
987 	if (!bm1)
988 		return -ENOMEM;
989 
990 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
991 	if (error)
992 		goto Free_first_object;
993 
994 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
995 	if (!bm2)
996 		goto Free_first_bitmap;
997 
998 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
999 	if (error)
1000 		goto Free_second_object;
1001 
1002 	forbidden_pages_map = bm1;
1003 	free_pages_map = bm2;
1004 	mark_nosave_pages(forbidden_pages_map);
1005 
1006 	pr_debug("PM: Basic memory bitmaps created\n");
1007 
1008 	return 0;
1009 
1010  Free_second_object:
1011 	kfree(bm2);
1012  Free_first_bitmap:
1013  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1014  Free_first_object:
1015 	kfree(bm1);
1016 	return -ENOMEM;
1017 }
1018 
1019 /**
1020  *	free_basic_memory_bitmaps - free memory bitmaps allocated by
1021  *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
1022  *	so that the bitmaps themselves are not referred to while they are being
1023  *	freed.
1024  */
1025 
1026 void free_basic_memory_bitmaps(void)
1027 {
1028 	struct memory_bitmap *bm1, *bm2;
1029 
1030 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1031 		return;
1032 
1033 	bm1 = forbidden_pages_map;
1034 	bm2 = free_pages_map;
1035 	forbidden_pages_map = NULL;
1036 	free_pages_map = NULL;
1037 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1038 	kfree(bm1);
1039 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1040 	kfree(bm2);
1041 
1042 	pr_debug("PM: Basic memory bitmaps freed\n");
1043 }
1044 
1045 /**
1046  *	snapshot_additional_pages - estimate the number of additional pages
1047  *	be needed for setting up the suspend image data structures for given
1048  *	zone (usually the returned value is greater than the exact number)
1049  */
1050 
1051 unsigned int snapshot_additional_pages(struct zone *zone)
1052 {
1053 	unsigned int rtree, nodes;
1054 
1055 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1056 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1057 			      LINKED_PAGE_DATA_SIZE);
1058 	while (nodes > 1) {
1059 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1060 		rtree += nodes;
1061 	}
1062 
1063 	return 2 * rtree;
1064 }
1065 
1066 #ifdef CONFIG_HIGHMEM
1067 /**
1068  *	count_free_highmem_pages - compute the total number of free highmem
1069  *	pages, system-wide.
1070  */
1071 
1072 static unsigned int count_free_highmem_pages(void)
1073 {
1074 	struct zone *zone;
1075 	unsigned int cnt = 0;
1076 
1077 	for_each_populated_zone(zone)
1078 		if (is_highmem(zone))
1079 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1080 
1081 	return cnt;
1082 }
1083 
1084 /**
1085  *	saveable_highmem_page - Determine whether a highmem page should be
1086  *	included in the suspend image.
1087  *
1088  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1089  *	and it isn't a part of a free chunk of pages.
1090  */
1091 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1092 {
1093 	struct page *page;
1094 
1095 	if (!pfn_valid(pfn))
1096 		return NULL;
1097 
1098 	page = pfn_to_page(pfn);
1099 	if (page_zone(page) != zone)
1100 		return NULL;
1101 
1102 	BUG_ON(!PageHighMem(page));
1103 
1104 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1105 	    PageReserved(page))
1106 		return NULL;
1107 
1108 	if (page_is_guard(page))
1109 		return NULL;
1110 
1111 	return page;
1112 }
1113 
1114 /**
1115  *	count_highmem_pages - compute the total number of saveable highmem
1116  *	pages.
1117  */
1118 
1119 static unsigned int count_highmem_pages(void)
1120 {
1121 	struct zone *zone;
1122 	unsigned int n = 0;
1123 
1124 	for_each_populated_zone(zone) {
1125 		unsigned long pfn, max_zone_pfn;
1126 
1127 		if (!is_highmem(zone))
1128 			continue;
1129 
1130 		mark_free_pages(zone);
1131 		max_zone_pfn = zone_end_pfn(zone);
1132 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1133 			if (saveable_highmem_page(zone, pfn))
1134 				n++;
1135 	}
1136 	return n;
1137 }
1138 #else
1139 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1140 {
1141 	return NULL;
1142 }
1143 #endif /* CONFIG_HIGHMEM */
1144 
1145 /**
1146  *	saveable_page - Determine whether a non-highmem page should be included
1147  *	in the suspend image.
1148  *
1149  *	We should save the page if it isn't Nosave, and is not in the range
1150  *	of pages statically defined as 'unsaveable', and it isn't a part of
1151  *	a free chunk of pages.
1152  */
1153 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1154 {
1155 	struct page *page;
1156 
1157 	if (!pfn_valid(pfn))
1158 		return NULL;
1159 
1160 	page = pfn_to_page(pfn);
1161 	if (page_zone(page) != zone)
1162 		return NULL;
1163 
1164 	BUG_ON(PageHighMem(page));
1165 
1166 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1167 		return NULL;
1168 
1169 	if (PageReserved(page)
1170 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1171 		return NULL;
1172 
1173 	if (page_is_guard(page))
1174 		return NULL;
1175 
1176 	return page;
1177 }
1178 
1179 /**
1180  *	count_data_pages - compute the total number of saveable non-highmem
1181  *	pages.
1182  */
1183 
1184 static unsigned int count_data_pages(void)
1185 {
1186 	struct zone *zone;
1187 	unsigned long pfn, max_zone_pfn;
1188 	unsigned int n = 0;
1189 
1190 	for_each_populated_zone(zone) {
1191 		if (is_highmem(zone))
1192 			continue;
1193 
1194 		mark_free_pages(zone);
1195 		max_zone_pfn = zone_end_pfn(zone);
1196 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1197 			if (saveable_page(zone, pfn))
1198 				n++;
1199 	}
1200 	return n;
1201 }
1202 
1203 /* This is needed, because copy_page and memcpy are not usable for copying
1204  * task structs.
1205  */
1206 static inline void do_copy_page(long *dst, long *src)
1207 {
1208 	int n;
1209 
1210 	for (n = PAGE_SIZE / sizeof(long); n; n--)
1211 		*dst++ = *src++;
1212 }
1213 
1214 
1215 /**
1216  *	safe_copy_page - check if the page we are going to copy is marked as
1217  *		present in the kernel page tables (this always is the case if
1218  *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
1219  *		kernel_page_present() always returns 'true').
1220  */
1221 static void safe_copy_page(void *dst, struct page *s_page)
1222 {
1223 	if (kernel_page_present(s_page)) {
1224 		do_copy_page(dst, page_address(s_page));
1225 	} else {
1226 		kernel_map_pages(s_page, 1, 1);
1227 		do_copy_page(dst, page_address(s_page));
1228 		kernel_map_pages(s_page, 1, 0);
1229 	}
1230 }
1231 
1232 
1233 #ifdef CONFIG_HIGHMEM
1234 static inline struct page *
1235 page_is_saveable(struct zone *zone, unsigned long pfn)
1236 {
1237 	return is_highmem(zone) ?
1238 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1239 }
1240 
1241 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1242 {
1243 	struct page *s_page, *d_page;
1244 	void *src, *dst;
1245 
1246 	s_page = pfn_to_page(src_pfn);
1247 	d_page = pfn_to_page(dst_pfn);
1248 	if (PageHighMem(s_page)) {
1249 		src = kmap_atomic(s_page);
1250 		dst = kmap_atomic(d_page);
1251 		do_copy_page(dst, src);
1252 		kunmap_atomic(dst);
1253 		kunmap_atomic(src);
1254 	} else {
1255 		if (PageHighMem(d_page)) {
1256 			/* Page pointed to by src may contain some kernel
1257 			 * data modified by kmap_atomic()
1258 			 */
1259 			safe_copy_page(buffer, s_page);
1260 			dst = kmap_atomic(d_page);
1261 			copy_page(dst, buffer);
1262 			kunmap_atomic(dst);
1263 		} else {
1264 			safe_copy_page(page_address(d_page), s_page);
1265 		}
1266 	}
1267 }
1268 #else
1269 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1270 
1271 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1272 {
1273 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1274 				pfn_to_page(src_pfn));
1275 }
1276 #endif /* CONFIG_HIGHMEM */
1277 
1278 static void
1279 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1280 {
1281 	struct zone *zone;
1282 	unsigned long pfn;
1283 
1284 	for_each_populated_zone(zone) {
1285 		unsigned long max_zone_pfn;
1286 
1287 		mark_free_pages(zone);
1288 		max_zone_pfn = zone_end_pfn(zone);
1289 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1290 			if (page_is_saveable(zone, pfn))
1291 				memory_bm_set_bit(orig_bm, pfn);
1292 	}
1293 	memory_bm_position_reset(orig_bm);
1294 	memory_bm_position_reset(copy_bm);
1295 	for(;;) {
1296 		pfn = memory_bm_next_pfn(orig_bm);
1297 		if (unlikely(pfn == BM_END_OF_MAP))
1298 			break;
1299 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1300 	}
1301 }
1302 
1303 /* Total number of image pages */
1304 static unsigned int nr_copy_pages;
1305 /* Number of pages needed for saving the original pfns of the image pages */
1306 static unsigned int nr_meta_pages;
1307 /*
1308  * Numbers of normal and highmem page frames allocated for hibernation image
1309  * before suspending devices.
1310  */
1311 unsigned int alloc_normal, alloc_highmem;
1312 /*
1313  * Memory bitmap used for marking saveable pages (during hibernation) or
1314  * hibernation image pages (during restore)
1315  */
1316 static struct memory_bitmap orig_bm;
1317 /*
1318  * Memory bitmap used during hibernation for marking allocated page frames that
1319  * will contain copies of saveable pages.  During restore it is initially used
1320  * for marking hibernation image pages, but then the set bits from it are
1321  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1322  * used for marking "safe" highmem pages, but it has to be reinitialized for
1323  * this purpose.
1324  */
1325 static struct memory_bitmap copy_bm;
1326 
1327 /**
1328  *	swsusp_free - free pages allocated for the suspend.
1329  *
1330  *	Suspend pages are alocated before the atomic copy is made, so we
1331  *	need to release them after the resume.
1332  */
1333 
1334 void swsusp_free(void)
1335 {
1336 	struct zone *zone;
1337 	unsigned long pfn, max_zone_pfn;
1338 
1339 	for_each_populated_zone(zone) {
1340 		max_zone_pfn = zone_end_pfn(zone);
1341 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1342 			if (pfn_valid(pfn)) {
1343 				struct page *page = pfn_to_page(pfn);
1344 
1345 				if (swsusp_page_is_forbidden(page) &&
1346 				    swsusp_page_is_free(page)) {
1347 					swsusp_unset_page_forbidden(page);
1348 					swsusp_unset_page_free(page);
1349 					__free_page(page);
1350 				}
1351 			}
1352 	}
1353 	nr_copy_pages = 0;
1354 	nr_meta_pages = 0;
1355 	restore_pblist = NULL;
1356 	buffer = NULL;
1357 	alloc_normal = 0;
1358 	alloc_highmem = 0;
1359 }
1360 
1361 /* Helper functions used for the shrinking of memory. */
1362 
1363 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1364 
1365 /**
1366  * preallocate_image_pages - Allocate a number of pages for hibernation image
1367  * @nr_pages: Number of page frames to allocate.
1368  * @mask: GFP flags to use for the allocation.
1369  *
1370  * Return value: Number of page frames actually allocated
1371  */
1372 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1373 {
1374 	unsigned long nr_alloc = 0;
1375 
1376 	while (nr_pages > 0) {
1377 		struct page *page;
1378 
1379 		page = alloc_image_page(mask);
1380 		if (!page)
1381 			break;
1382 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1383 		if (PageHighMem(page))
1384 			alloc_highmem++;
1385 		else
1386 			alloc_normal++;
1387 		nr_pages--;
1388 		nr_alloc++;
1389 	}
1390 
1391 	return nr_alloc;
1392 }
1393 
1394 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1395 					      unsigned long avail_normal)
1396 {
1397 	unsigned long alloc;
1398 
1399 	if (avail_normal <= alloc_normal)
1400 		return 0;
1401 
1402 	alloc = avail_normal - alloc_normal;
1403 	if (nr_pages < alloc)
1404 		alloc = nr_pages;
1405 
1406 	return preallocate_image_pages(alloc, GFP_IMAGE);
1407 }
1408 
1409 #ifdef CONFIG_HIGHMEM
1410 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1411 {
1412 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1413 }
1414 
1415 /**
1416  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1417  */
1418 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1419 {
1420 	x *= multiplier;
1421 	do_div(x, base);
1422 	return (unsigned long)x;
1423 }
1424 
1425 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1426 						unsigned long highmem,
1427 						unsigned long total)
1428 {
1429 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1430 
1431 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1432 }
1433 #else /* CONFIG_HIGHMEM */
1434 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1435 {
1436 	return 0;
1437 }
1438 
1439 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1440 						unsigned long highmem,
1441 						unsigned long total)
1442 {
1443 	return 0;
1444 }
1445 #endif /* CONFIG_HIGHMEM */
1446 
1447 /**
1448  * free_unnecessary_pages - Release preallocated pages not needed for the image
1449  */
1450 static void free_unnecessary_pages(void)
1451 {
1452 	unsigned long save, to_free_normal, to_free_highmem;
1453 
1454 	save = count_data_pages();
1455 	if (alloc_normal >= save) {
1456 		to_free_normal = alloc_normal - save;
1457 		save = 0;
1458 	} else {
1459 		to_free_normal = 0;
1460 		save -= alloc_normal;
1461 	}
1462 	save += count_highmem_pages();
1463 	if (alloc_highmem >= save) {
1464 		to_free_highmem = alloc_highmem - save;
1465 	} else {
1466 		to_free_highmem = 0;
1467 		save -= alloc_highmem;
1468 		if (to_free_normal > save)
1469 			to_free_normal -= save;
1470 		else
1471 			to_free_normal = 0;
1472 	}
1473 
1474 	memory_bm_position_reset(&copy_bm);
1475 
1476 	while (to_free_normal > 0 || to_free_highmem > 0) {
1477 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1478 		struct page *page = pfn_to_page(pfn);
1479 
1480 		if (PageHighMem(page)) {
1481 			if (!to_free_highmem)
1482 				continue;
1483 			to_free_highmem--;
1484 			alloc_highmem--;
1485 		} else {
1486 			if (!to_free_normal)
1487 				continue;
1488 			to_free_normal--;
1489 			alloc_normal--;
1490 		}
1491 		memory_bm_clear_bit(&copy_bm, pfn);
1492 		swsusp_unset_page_forbidden(page);
1493 		swsusp_unset_page_free(page);
1494 		__free_page(page);
1495 	}
1496 }
1497 
1498 /**
1499  * minimum_image_size - Estimate the minimum acceptable size of an image
1500  * @saveable: Number of saveable pages in the system.
1501  *
1502  * We want to avoid attempting to free too much memory too hard, so estimate the
1503  * minimum acceptable size of a hibernation image to use as the lower limit for
1504  * preallocating memory.
1505  *
1506  * We assume that the minimum image size should be proportional to
1507  *
1508  * [number of saveable pages] - [number of pages that can be freed in theory]
1509  *
1510  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1511  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1512  * minus mapped file pages.
1513  */
1514 static unsigned long minimum_image_size(unsigned long saveable)
1515 {
1516 	unsigned long size;
1517 
1518 	size = global_page_state(NR_SLAB_RECLAIMABLE)
1519 		+ global_page_state(NR_ACTIVE_ANON)
1520 		+ global_page_state(NR_INACTIVE_ANON)
1521 		+ global_page_state(NR_ACTIVE_FILE)
1522 		+ global_page_state(NR_INACTIVE_FILE)
1523 		- global_page_state(NR_FILE_MAPPED);
1524 
1525 	return saveable <= size ? 0 : saveable - size;
1526 }
1527 
1528 /**
1529  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1530  *
1531  * To create a hibernation image it is necessary to make a copy of every page
1532  * frame in use.  We also need a number of page frames to be free during
1533  * hibernation for allocations made while saving the image and for device
1534  * drivers, in case they need to allocate memory from their hibernation
1535  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1536  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1537  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1538  * total number of available page frames and allocate at least
1539  *
1540  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1541  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1542  *
1543  * of them, which corresponds to the maximum size of a hibernation image.
1544  *
1545  * If image_size is set below the number following from the above formula,
1546  * the preallocation of memory is continued until the total number of saveable
1547  * pages in the system is below the requested image size or the minimum
1548  * acceptable image size returned by minimum_image_size(), whichever is greater.
1549  */
1550 int hibernate_preallocate_memory(void)
1551 {
1552 	struct zone *zone;
1553 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1554 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1555 	struct timeval start, stop;
1556 	int error;
1557 
1558 	printk(KERN_INFO "PM: Preallocating image memory... ");
1559 	do_gettimeofday(&start);
1560 
1561 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1562 	if (error)
1563 		goto err_out;
1564 
1565 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1566 	if (error)
1567 		goto err_out;
1568 
1569 	alloc_normal = 0;
1570 	alloc_highmem = 0;
1571 
1572 	/* Count the number of saveable data pages. */
1573 	save_highmem = count_highmem_pages();
1574 	saveable = count_data_pages();
1575 
1576 	/*
1577 	 * Compute the total number of page frames we can use (count) and the
1578 	 * number of pages needed for image metadata (size).
1579 	 */
1580 	count = saveable;
1581 	saveable += save_highmem;
1582 	highmem = save_highmem;
1583 	size = 0;
1584 	for_each_populated_zone(zone) {
1585 		size += snapshot_additional_pages(zone);
1586 		if (is_highmem(zone))
1587 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1588 		else
1589 			count += zone_page_state(zone, NR_FREE_PAGES);
1590 	}
1591 	avail_normal = count;
1592 	count += highmem;
1593 	count -= totalreserve_pages;
1594 
1595 	/* Add number of pages required for page keys (s390 only). */
1596 	size += page_key_additional_pages(saveable);
1597 
1598 	/* Compute the maximum number of saveable pages to leave in memory. */
1599 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1600 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1601 	/* Compute the desired number of image pages specified by image_size. */
1602 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1603 	if (size > max_size)
1604 		size = max_size;
1605 	/*
1606 	 * If the desired number of image pages is at least as large as the
1607 	 * current number of saveable pages in memory, allocate page frames for
1608 	 * the image and we're done.
1609 	 */
1610 	if (size >= saveable) {
1611 		pages = preallocate_image_highmem(save_highmem);
1612 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1613 		goto out;
1614 	}
1615 
1616 	/* Estimate the minimum size of the image. */
1617 	pages = minimum_image_size(saveable);
1618 	/*
1619 	 * To avoid excessive pressure on the normal zone, leave room in it to
1620 	 * accommodate an image of the minimum size (unless it's already too
1621 	 * small, in which case don't preallocate pages from it at all).
1622 	 */
1623 	if (avail_normal > pages)
1624 		avail_normal -= pages;
1625 	else
1626 		avail_normal = 0;
1627 	if (size < pages)
1628 		size = min_t(unsigned long, pages, max_size);
1629 
1630 	/*
1631 	 * Let the memory management subsystem know that we're going to need a
1632 	 * large number of page frames to allocate and make it free some memory.
1633 	 * NOTE: If this is not done, performance will be hurt badly in some
1634 	 * test cases.
1635 	 */
1636 	shrink_all_memory(saveable - size);
1637 
1638 	/*
1639 	 * The number of saveable pages in memory was too high, so apply some
1640 	 * pressure to decrease it.  First, make room for the largest possible
1641 	 * image and fail if that doesn't work.  Next, try to decrease the size
1642 	 * of the image as much as indicated by 'size' using allocations from
1643 	 * highmem and non-highmem zones separately.
1644 	 */
1645 	pages_highmem = preallocate_image_highmem(highmem / 2);
1646 	alloc = count - max_size;
1647 	if (alloc > pages_highmem)
1648 		alloc -= pages_highmem;
1649 	else
1650 		alloc = 0;
1651 	pages = preallocate_image_memory(alloc, avail_normal);
1652 	if (pages < alloc) {
1653 		/* We have exhausted non-highmem pages, try highmem. */
1654 		alloc -= pages;
1655 		pages += pages_highmem;
1656 		pages_highmem = preallocate_image_highmem(alloc);
1657 		if (pages_highmem < alloc)
1658 			goto err_out;
1659 		pages += pages_highmem;
1660 		/*
1661 		 * size is the desired number of saveable pages to leave in
1662 		 * memory, so try to preallocate (all memory - size) pages.
1663 		 */
1664 		alloc = (count - pages) - size;
1665 		pages += preallocate_image_highmem(alloc);
1666 	} else {
1667 		/*
1668 		 * There are approximately max_size saveable pages at this point
1669 		 * and we want to reduce this number down to size.
1670 		 */
1671 		alloc = max_size - size;
1672 		size = preallocate_highmem_fraction(alloc, highmem, count);
1673 		pages_highmem += size;
1674 		alloc -= size;
1675 		size = preallocate_image_memory(alloc, avail_normal);
1676 		pages_highmem += preallocate_image_highmem(alloc - size);
1677 		pages += pages_highmem + size;
1678 	}
1679 
1680 	/*
1681 	 * We only need as many page frames for the image as there are saveable
1682 	 * pages in memory, but we have allocated more.  Release the excessive
1683 	 * ones now.
1684 	 */
1685 	free_unnecessary_pages();
1686 
1687  out:
1688 	do_gettimeofday(&stop);
1689 	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1690 	swsusp_show_speed(&start, &stop, pages, "Allocated");
1691 
1692 	return 0;
1693 
1694  err_out:
1695 	printk(KERN_CONT "\n");
1696 	swsusp_free();
1697 	return -ENOMEM;
1698 }
1699 
1700 #ifdef CONFIG_HIGHMEM
1701 /**
1702   *	count_pages_for_highmem - compute the number of non-highmem pages
1703   *	that will be necessary for creating copies of highmem pages.
1704   */
1705 
1706 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1707 {
1708 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1709 
1710 	if (free_highmem >= nr_highmem)
1711 		nr_highmem = 0;
1712 	else
1713 		nr_highmem -= free_highmem;
1714 
1715 	return nr_highmem;
1716 }
1717 #else
1718 static unsigned int
1719 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1720 #endif /* CONFIG_HIGHMEM */
1721 
1722 /**
1723  *	enough_free_mem - Make sure we have enough free memory for the
1724  *	snapshot image.
1725  */
1726 
1727 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1728 {
1729 	struct zone *zone;
1730 	unsigned int free = alloc_normal;
1731 
1732 	for_each_populated_zone(zone)
1733 		if (!is_highmem(zone))
1734 			free += zone_page_state(zone, NR_FREE_PAGES);
1735 
1736 	nr_pages += count_pages_for_highmem(nr_highmem);
1737 	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1738 		nr_pages, PAGES_FOR_IO, free);
1739 
1740 	return free > nr_pages + PAGES_FOR_IO;
1741 }
1742 
1743 #ifdef CONFIG_HIGHMEM
1744 /**
1745  *	get_highmem_buffer - if there are some highmem pages in the suspend
1746  *	image, we may need the buffer to copy them and/or load their data.
1747  */
1748 
1749 static inline int get_highmem_buffer(int safe_needed)
1750 {
1751 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1752 	return buffer ? 0 : -ENOMEM;
1753 }
1754 
1755 /**
1756  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1757  *	Try to allocate as many pages as needed, but if the number of free
1758  *	highmem pages is lesser than that, allocate them all.
1759  */
1760 
1761 static inline unsigned int
1762 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1763 {
1764 	unsigned int to_alloc = count_free_highmem_pages();
1765 
1766 	if (to_alloc > nr_highmem)
1767 		to_alloc = nr_highmem;
1768 
1769 	nr_highmem -= to_alloc;
1770 	while (to_alloc-- > 0) {
1771 		struct page *page;
1772 
1773 		page = alloc_image_page(__GFP_HIGHMEM);
1774 		memory_bm_set_bit(bm, page_to_pfn(page));
1775 	}
1776 	return nr_highmem;
1777 }
1778 #else
1779 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1780 
1781 static inline unsigned int
1782 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1783 #endif /* CONFIG_HIGHMEM */
1784 
1785 /**
1786  *	swsusp_alloc - allocate memory for the suspend image
1787  *
1788  *	We first try to allocate as many highmem pages as there are
1789  *	saveable highmem pages in the system.  If that fails, we allocate
1790  *	non-highmem pages for the copies of the remaining highmem ones.
1791  *
1792  *	In this approach it is likely that the copies of highmem pages will
1793  *	also be located in the high memory, because of the way in which
1794  *	copy_data_pages() works.
1795  */
1796 
1797 static int
1798 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1799 		unsigned int nr_pages, unsigned int nr_highmem)
1800 {
1801 	if (nr_highmem > 0) {
1802 		if (get_highmem_buffer(PG_ANY))
1803 			goto err_out;
1804 		if (nr_highmem > alloc_highmem) {
1805 			nr_highmem -= alloc_highmem;
1806 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1807 		}
1808 	}
1809 	if (nr_pages > alloc_normal) {
1810 		nr_pages -= alloc_normal;
1811 		while (nr_pages-- > 0) {
1812 			struct page *page;
1813 
1814 			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1815 			if (!page)
1816 				goto err_out;
1817 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1818 		}
1819 	}
1820 
1821 	return 0;
1822 
1823  err_out:
1824 	swsusp_free();
1825 	return -ENOMEM;
1826 }
1827 
1828 asmlinkage __visible int swsusp_save(void)
1829 {
1830 	unsigned int nr_pages, nr_highmem;
1831 
1832 	printk(KERN_INFO "PM: Creating hibernation image:\n");
1833 
1834 	drain_local_pages(NULL);
1835 	nr_pages = count_data_pages();
1836 	nr_highmem = count_highmem_pages();
1837 	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1838 
1839 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1840 		printk(KERN_ERR "PM: Not enough free memory\n");
1841 		return -ENOMEM;
1842 	}
1843 
1844 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1845 		printk(KERN_ERR "PM: Memory allocation failed\n");
1846 		return -ENOMEM;
1847 	}
1848 
1849 	/* During allocating of suspend pagedir, new cold pages may appear.
1850 	 * Kill them.
1851 	 */
1852 	drain_local_pages(NULL);
1853 	copy_data_pages(&copy_bm, &orig_bm);
1854 
1855 	/*
1856 	 * End of critical section. From now on, we can write to memory,
1857 	 * but we should not touch disk. This specially means we must _not_
1858 	 * touch swap space! Except we must write out our image of course.
1859 	 */
1860 
1861 	nr_pages += nr_highmem;
1862 	nr_copy_pages = nr_pages;
1863 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1864 
1865 	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1866 		nr_pages);
1867 
1868 	return 0;
1869 }
1870 
1871 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1872 static int init_header_complete(struct swsusp_info *info)
1873 {
1874 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1875 	info->version_code = LINUX_VERSION_CODE;
1876 	return 0;
1877 }
1878 
1879 static char *check_image_kernel(struct swsusp_info *info)
1880 {
1881 	if (info->version_code != LINUX_VERSION_CODE)
1882 		return "kernel version";
1883 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1884 		return "system type";
1885 	if (strcmp(info->uts.release,init_utsname()->release))
1886 		return "kernel release";
1887 	if (strcmp(info->uts.version,init_utsname()->version))
1888 		return "version";
1889 	if (strcmp(info->uts.machine,init_utsname()->machine))
1890 		return "machine";
1891 	return NULL;
1892 }
1893 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1894 
1895 unsigned long snapshot_get_image_size(void)
1896 {
1897 	return nr_copy_pages + nr_meta_pages + 1;
1898 }
1899 
1900 static int init_header(struct swsusp_info *info)
1901 {
1902 	memset(info, 0, sizeof(struct swsusp_info));
1903 	info->num_physpages = get_num_physpages();
1904 	info->image_pages = nr_copy_pages;
1905 	info->pages = snapshot_get_image_size();
1906 	info->size = info->pages;
1907 	info->size <<= PAGE_SHIFT;
1908 	return init_header_complete(info);
1909 }
1910 
1911 /**
1912  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1913  *	are stored in the array @buf[] (1 page at a time)
1914  */
1915 
1916 static inline void
1917 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1918 {
1919 	int j;
1920 
1921 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1922 		buf[j] = memory_bm_next_pfn(bm);
1923 		if (unlikely(buf[j] == BM_END_OF_MAP))
1924 			break;
1925 		/* Save page key for data page (s390 only). */
1926 		page_key_read(buf + j);
1927 	}
1928 }
1929 
1930 /**
1931  *	snapshot_read_next - used for reading the system memory snapshot.
1932  *
1933  *	On the first call to it @handle should point to a zeroed
1934  *	snapshot_handle structure.  The structure gets updated and a pointer
1935  *	to it should be passed to this function every next time.
1936  *
1937  *	On success the function returns a positive number.  Then, the caller
1938  *	is allowed to read up to the returned number of bytes from the memory
1939  *	location computed by the data_of() macro.
1940  *
1941  *	The function returns 0 to indicate the end of data stream condition,
1942  *	and a negative number is returned on error.  In such cases the
1943  *	structure pointed to by @handle is not updated and should not be used
1944  *	any more.
1945  */
1946 
1947 int snapshot_read_next(struct snapshot_handle *handle)
1948 {
1949 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1950 		return 0;
1951 
1952 	if (!buffer) {
1953 		/* This makes the buffer be freed by swsusp_free() */
1954 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1955 		if (!buffer)
1956 			return -ENOMEM;
1957 	}
1958 	if (!handle->cur) {
1959 		int error;
1960 
1961 		error = init_header((struct swsusp_info *)buffer);
1962 		if (error)
1963 			return error;
1964 		handle->buffer = buffer;
1965 		memory_bm_position_reset(&orig_bm);
1966 		memory_bm_position_reset(&copy_bm);
1967 	} else if (handle->cur <= nr_meta_pages) {
1968 		clear_page(buffer);
1969 		pack_pfns(buffer, &orig_bm);
1970 	} else {
1971 		struct page *page;
1972 
1973 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1974 		if (PageHighMem(page)) {
1975 			/* Highmem pages are copied to the buffer,
1976 			 * because we can't return with a kmapped
1977 			 * highmem page (we may not be called again).
1978 			 */
1979 			void *kaddr;
1980 
1981 			kaddr = kmap_atomic(page);
1982 			copy_page(buffer, kaddr);
1983 			kunmap_atomic(kaddr);
1984 			handle->buffer = buffer;
1985 		} else {
1986 			handle->buffer = page_address(page);
1987 		}
1988 	}
1989 	handle->cur++;
1990 	return PAGE_SIZE;
1991 }
1992 
1993 /**
1994  *	mark_unsafe_pages - mark the pages that cannot be used for storing
1995  *	the image during resume, because they conflict with the pages that
1996  *	had been used before suspend
1997  */
1998 
1999 static int mark_unsafe_pages(struct memory_bitmap *bm)
2000 {
2001 	struct zone *zone;
2002 	unsigned long pfn, max_zone_pfn;
2003 
2004 	/* Clear page flags */
2005 	for_each_populated_zone(zone) {
2006 		max_zone_pfn = zone_end_pfn(zone);
2007 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2008 			if (pfn_valid(pfn))
2009 				swsusp_unset_page_free(pfn_to_page(pfn));
2010 	}
2011 
2012 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
2013 	memory_bm_position_reset(bm);
2014 	do {
2015 		pfn = memory_bm_next_pfn(bm);
2016 		if (likely(pfn != BM_END_OF_MAP)) {
2017 			if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
2018 				swsusp_set_page_free(pfn_to_page(pfn));
2019 			else
2020 				return -EFAULT;
2021 		}
2022 	} while (pfn != BM_END_OF_MAP);
2023 
2024 	allocated_unsafe_pages = 0;
2025 
2026 	return 0;
2027 }
2028 
2029 static void
2030 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
2031 {
2032 	unsigned long pfn;
2033 
2034 	memory_bm_position_reset(src);
2035 	pfn = memory_bm_next_pfn(src);
2036 	while (pfn != BM_END_OF_MAP) {
2037 		memory_bm_set_bit(dst, pfn);
2038 		pfn = memory_bm_next_pfn(src);
2039 	}
2040 }
2041 
2042 static int check_header(struct swsusp_info *info)
2043 {
2044 	char *reason;
2045 
2046 	reason = check_image_kernel(info);
2047 	if (!reason && info->num_physpages != get_num_physpages())
2048 		reason = "memory size";
2049 	if (reason) {
2050 		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2051 		return -EPERM;
2052 	}
2053 	return 0;
2054 }
2055 
2056 /**
2057  *	load header - check the image header and copy data from it
2058  */
2059 
2060 static int
2061 load_header(struct swsusp_info *info)
2062 {
2063 	int error;
2064 
2065 	restore_pblist = NULL;
2066 	error = check_header(info);
2067 	if (!error) {
2068 		nr_copy_pages = info->image_pages;
2069 		nr_meta_pages = info->pages - info->image_pages - 1;
2070 	}
2071 	return error;
2072 }
2073 
2074 /**
2075  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2076  *	the corresponding bit in the memory bitmap @bm
2077  */
2078 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2079 {
2080 	int j;
2081 
2082 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2083 		if (unlikely(buf[j] == BM_END_OF_MAP))
2084 			break;
2085 
2086 		/* Extract and buffer page key for data page (s390 only). */
2087 		page_key_memorize(buf + j);
2088 
2089 		if (memory_bm_pfn_present(bm, buf[j]))
2090 			memory_bm_set_bit(bm, buf[j]);
2091 		else
2092 			return -EFAULT;
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 /* List of "safe" pages that may be used to store data loaded from the suspend
2099  * image
2100  */
2101 static struct linked_page *safe_pages_list;
2102 
2103 #ifdef CONFIG_HIGHMEM
2104 /* struct highmem_pbe is used for creating the list of highmem pages that
2105  * should be restored atomically during the resume from disk, because the page
2106  * frames they have occupied before the suspend are in use.
2107  */
2108 struct highmem_pbe {
2109 	struct page *copy_page;	/* data is here now */
2110 	struct page *orig_page;	/* data was here before the suspend */
2111 	struct highmem_pbe *next;
2112 };
2113 
2114 /* List of highmem PBEs needed for restoring the highmem pages that were
2115  * allocated before the suspend and included in the suspend image, but have
2116  * also been allocated by the "resume" kernel, so their contents cannot be
2117  * written directly to their "original" page frames.
2118  */
2119 static struct highmem_pbe *highmem_pblist;
2120 
2121 /**
2122  *	count_highmem_image_pages - compute the number of highmem pages in the
2123  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
2124  *	image pages are assumed to be set.
2125  */
2126 
2127 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2128 {
2129 	unsigned long pfn;
2130 	unsigned int cnt = 0;
2131 
2132 	memory_bm_position_reset(bm);
2133 	pfn = memory_bm_next_pfn(bm);
2134 	while (pfn != BM_END_OF_MAP) {
2135 		if (PageHighMem(pfn_to_page(pfn)))
2136 			cnt++;
2137 
2138 		pfn = memory_bm_next_pfn(bm);
2139 	}
2140 	return cnt;
2141 }
2142 
2143 /**
2144  *	prepare_highmem_image - try to allocate as many highmem pages as
2145  *	there are highmem image pages (@nr_highmem_p points to the variable
2146  *	containing the number of highmem image pages).  The pages that are
2147  *	"safe" (ie. will not be overwritten when the suspend image is
2148  *	restored) have the corresponding bits set in @bm (it must be
2149  *	unitialized).
2150  *
2151  *	NOTE: This function should not be called if there are no highmem
2152  *	image pages.
2153  */
2154 
2155 static unsigned int safe_highmem_pages;
2156 
2157 static struct memory_bitmap *safe_highmem_bm;
2158 
2159 static int
2160 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2161 {
2162 	unsigned int to_alloc;
2163 
2164 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2165 		return -ENOMEM;
2166 
2167 	if (get_highmem_buffer(PG_SAFE))
2168 		return -ENOMEM;
2169 
2170 	to_alloc = count_free_highmem_pages();
2171 	if (to_alloc > *nr_highmem_p)
2172 		to_alloc = *nr_highmem_p;
2173 	else
2174 		*nr_highmem_p = to_alloc;
2175 
2176 	safe_highmem_pages = 0;
2177 	while (to_alloc-- > 0) {
2178 		struct page *page;
2179 
2180 		page = alloc_page(__GFP_HIGHMEM);
2181 		if (!swsusp_page_is_free(page)) {
2182 			/* The page is "safe", set its bit the bitmap */
2183 			memory_bm_set_bit(bm, page_to_pfn(page));
2184 			safe_highmem_pages++;
2185 		}
2186 		/* Mark the page as allocated */
2187 		swsusp_set_page_forbidden(page);
2188 		swsusp_set_page_free(page);
2189 	}
2190 	memory_bm_position_reset(bm);
2191 	safe_highmem_bm = bm;
2192 	return 0;
2193 }
2194 
2195 /**
2196  *	get_highmem_page_buffer - for given highmem image page find the buffer
2197  *	that suspend_write_next() should set for its caller to write to.
2198  *
2199  *	If the page is to be saved to its "original" page frame or a copy of
2200  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
2201  *	the copy of the page is to be made in normal memory, so the address of
2202  *	the copy is returned.
2203  *
2204  *	If @buffer is returned, the caller of suspend_write_next() will write
2205  *	the page's contents to @buffer, so they will have to be copied to the
2206  *	right location on the next call to suspend_write_next() and it is done
2207  *	with the help of copy_last_highmem_page().  For this purpose, if
2208  *	@buffer is returned, @last_highmem page is set to the page to which
2209  *	the data will have to be copied from @buffer.
2210  */
2211 
2212 static struct page *last_highmem_page;
2213 
2214 static void *
2215 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2216 {
2217 	struct highmem_pbe *pbe;
2218 	void *kaddr;
2219 
2220 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2221 		/* We have allocated the "original" page frame and we can
2222 		 * use it directly to store the loaded page.
2223 		 */
2224 		last_highmem_page = page;
2225 		return buffer;
2226 	}
2227 	/* The "original" page frame has not been allocated and we have to
2228 	 * use a "safe" page frame to store the loaded page.
2229 	 */
2230 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2231 	if (!pbe) {
2232 		swsusp_free();
2233 		return ERR_PTR(-ENOMEM);
2234 	}
2235 	pbe->orig_page = page;
2236 	if (safe_highmem_pages > 0) {
2237 		struct page *tmp;
2238 
2239 		/* Copy of the page will be stored in high memory */
2240 		kaddr = buffer;
2241 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2242 		safe_highmem_pages--;
2243 		last_highmem_page = tmp;
2244 		pbe->copy_page = tmp;
2245 	} else {
2246 		/* Copy of the page will be stored in normal memory */
2247 		kaddr = safe_pages_list;
2248 		safe_pages_list = safe_pages_list->next;
2249 		pbe->copy_page = virt_to_page(kaddr);
2250 	}
2251 	pbe->next = highmem_pblist;
2252 	highmem_pblist = pbe;
2253 	return kaddr;
2254 }
2255 
2256 /**
2257  *	copy_last_highmem_page - copy the contents of a highmem image from
2258  *	@buffer, where the caller of snapshot_write_next() has place them,
2259  *	to the right location represented by @last_highmem_page .
2260  */
2261 
2262 static void copy_last_highmem_page(void)
2263 {
2264 	if (last_highmem_page) {
2265 		void *dst;
2266 
2267 		dst = kmap_atomic(last_highmem_page);
2268 		copy_page(dst, buffer);
2269 		kunmap_atomic(dst);
2270 		last_highmem_page = NULL;
2271 	}
2272 }
2273 
2274 static inline int last_highmem_page_copied(void)
2275 {
2276 	return !last_highmem_page;
2277 }
2278 
2279 static inline void free_highmem_data(void)
2280 {
2281 	if (safe_highmem_bm)
2282 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2283 
2284 	if (buffer)
2285 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2286 }
2287 #else
2288 static inline int get_safe_write_buffer(void) { return 0; }
2289 
2290 static unsigned int
2291 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2292 
2293 static inline int
2294 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2295 {
2296 	return 0;
2297 }
2298 
2299 static inline void *
2300 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2301 {
2302 	return ERR_PTR(-EINVAL);
2303 }
2304 
2305 static inline void copy_last_highmem_page(void) {}
2306 static inline int last_highmem_page_copied(void) { return 1; }
2307 static inline void free_highmem_data(void) {}
2308 #endif /* CONFIG_HIGHMEM */
2309 
2310 /**
2311  *	prepare_image - use the memory bitmap @bm to mark the pages that will
2312  *	be overwritten in the process of restoring the system memory state
2313  *	from the suspend image ("unsafe" pages) and allocate memory for the
2314  *	image.
2315  *
2316  *	The idea is to allocate a new memory bitmap first and then allocate
2317  *	as many pages as needed for the image data, but not to assign these
2318  *	pages to specific tasks initially.  Instead, we just mark them as
2319  *	allocated and create a lists of "safe" pages that will be used
2320  *	later.  On systems with high memory a list of "safe" highmem pages is
2321  *	also created.
2322  */
2323 
2324 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2325 
2326 static int
2327 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2328 {
2329 	unsigned int nr_pages, nr_highmem;
2330 	struct linked_page *sp_list, *lp;
2331 	int error;
2332 
2333 	/* If there is no highmem, the buffer will not be necessary */
2334 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2335 	buffer = NULL;
2336 
2337 	nr_highmem = count_highmem_image_pages(bm);
2338 	error = mark_unsafe_pages(bm);
2339 	if (error)
2340 		goto Free;
2341 
2342 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2343 	if (error)
2344 		goto Free;
2345 
2346 	duplicate_memory_bitmap(new_bm, bm);
2347 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2348 	if (nr_highmem > 0) {
2349 		error = prepare_highmem_image(bm, &nr_highmem);
2350 		if (error)
2351 			goto Free;
2352 	}
2353 	/* Reserve some safe pages for potential later use.
2354 	 *
2355 	 * NOTE: This way we make sure there will be enough safe pages for the
2356 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2357 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2358 	 */
2359 	sp_list = NULL;
2360 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2361 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2362 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2363 	while (nr_pages > 0) {
2364 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2365 		if (!lp) {
2366 			error = -ENOMEM;
2367 			goto Free;
2368 		}
2369 		lp->next = sp_list;
2370 		sp_list = lp;
2371 		nr_pages--;
2372 	}
2373 	/* Preallocate memory for the image */
2374 	safe_pages_list = NULL;
2375 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2376 	while (nr_pages > 0) {
2377 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2378 		if (!lp) {
2379 			error = -ENOMEM;
2380 			goto Free;
2381 		}
2382 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2383 			/* The page is "safe", add it to the list */
2384 			lp->next = safe_pages_list;
2385 			safe_pages_list = lp;
2386 		}
2387 		/* Mark the page as allocated */
2388 		swsusp_set_page_forbidden(virt_to_page(lp));
2389 		swsusp_set_page_free(virt_to_page(lp));
2390 		nr_pages--;
2391 	}
2392 	/* Free the reserved safe pages so that chain_alloc() can use them */
2393 	while (sp_list) {
2394 		lp = sp_list->next;
2395 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2396 		sp_list = lp;
2397 	}
2398 	return 0;
2399 
2400  Free:
2401 	swsusp_free();
2402 	return error;
2403 }
2404 
2405 /**
2406  *	get_buffer - compute the address that snapshot_write_next() should
2407  *	set for its caller to write to.
2408  */
2409 
2410 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2411 {
2412 	struct pbe *pbe;
2413 	struct page *page;
2414 	unsigned long pfn = memory_bm_next_pfn(bm);
2415 
2416 	if (pfn == BM_END_OF_MAP)
2417 		return ERR_PTR(-EFAULT);
2418 
2419 	page = pfn_to_page(pfn);
2420 	if (PageHighMem(page))
2421 		return get_highmem_page_buffer(page, ca);
2422 
2423 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2424 		/* We have allocated the "original" page frame and we can
2425 		 * use it directly to store the loaded page.
2426 		 */
2427 		return page_address(page);
2428 
2429 	/* The "original" page frame has not been allocated and we have to
2430 	 * use a "safe" page frame to store the loaded page.
2431 	 */
2432 	pbe = chain_alloc(ca, sizeof(struct pbe));
2433 	if (!pbe) {
2434 		swsusp_free();
2435 		return ERR_PTR(-ENOMEM);
2436 	}
2437 	pbe->orig_address = page_address(page);
2438 	pbe->address = safe_pages_list;
2439 	safe_pages_list = safe_pages_list->next;
2440 	pbe->next = restore_pblist;
2441 	restore_pblist = pbe;
2442 	return pbe->address;
2443 }
2444 
2445 /**
2446  *	snapshot_write_next - used for writing the system memory snapshot.
2447  *
2448  *	On the first call to it @handle should point to a zeroed
2449  *	snapshot_handle structure.  The structure gets updated and a pointer
2450  *	to it should be passed to this function every next time.
2451  *
2452  *	On success the function returns a positive number.  Then, the caller
2453  *	is allowed to write up to the returned number of bytes to the memory
2454  *	location computed by the data_of() macro.
2455  *
2456  *	The function returns 0 to indicate the "end of file" condition,
2457  *	and a negative number is returned on error.  In such cases the
2458  *	structure pointed to by @handle is not updated and should not be used
2459  *	any more.
2460  */
2461 
2462 int snapshot_write_next(struct snapshot_handle *handle)
2463 {
2464 	static struct chain_allocator ca;
2465 	int error = 0;
2466 
2467 	/* Check if we have already loaded the entire image */
2468 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2469 		return 0;
2470 
2471 	handle->sync_read = 1;
2472 
2473 	if (!handle->cur) {
2474 		if (!buffer)
2475 			/* This makes the buffer be freed by swsusp_free() */
2476 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2477 
2478 		if (!buffer)
2479 			return -ENOMEM;
2480 
2481 		handle->buffer = buffer;
2482 	} else if (handle->cur == 1) {
2483 		error = load_header(buffer);
2484 		if (error)
2485 			return error;
2486 
2487 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2488 		if (error)
2489 			return error;
2490 
2491 		/* Allocate buffer for page keys. */
2492 		error = page_key_alloc(nr_copy_pages);
2493 		if (error)
2494 			return error;
2495 
2496 	} else if (handle->cur <= nr_meta_pages + 1) {
2497 		error = unpack_orig_pfns(buffer, &copy_bm);
2498 		if (error)
2499 			return error;
2500 
2501 		if (handle->cur == nr_meta_pages + 1) {
2502 			error = prepare_image(&orig_bm, &copy_bm);
2503 			if (error)
2504 				return error;
2505 
2506 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2507 			memory_bm_position_reset(&orig_bm);
2508 			restore_pblist = NULL;
2509 			handle->buffer = get_buffer(&orig_bm, &ca);
2510 			handle->sync_read = 0;
2511 			if (IS_ERR(handle->buffer))
2512 				return PTR_ERR(handle->buffer);
2513 		}
2514 	} else {
2515 		copy_last_highmem_page();
2516 		/* Restore page key for data page (s390 only). */
2517 		page_key_write(handle->buffer);
2518 		handle->buffer = get_buffer(&orig_bm, &ca);
2519 		if (IS_ERR(handle->buffer))
2520 			return PTR_ERR(handle->buffer);
2521 		if (handle->buffer != buffer)
2522 			handle->sync_read = 0;
2523 	}
2524 	handle->cur++;
2525 	return PAGE_SIZE;
2526 }
2527 
2528 /**
2529  *	snapshot_write_finalize - must be called after the last call to
2530  *	snapshot_write_next() in case the last page in the image happens
2531  *	to be a highmem page and its contents should be stored in the
2532  *	highmem.  Additionally, it releases the memory that will not be
2533  *	used any more.
2534  */
2535 
2536 void snapshot_write_finalize(struct snapshot_handle *handle)
2537 {
2538 	copy_last_highmem_page();
2539 	/* Restore page key for data page (s390 only). */
2540 	page_key_write(handle->buffer);
2541 	page_key_free();
2542 	/* Free only if we have loaded the image entirely */
2543 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2544 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2545 		free_highmem_data();
2546 	}
2547 }
2548 
2549 int snapshot_image_loaded(struct snapshot_handle *handle)
2550 {
2551 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2552 			handle->cur <= nr_meta_pages + nr_copy_pages);
2553 }
2554 
2555 #ifdef CONFIG_HIGHMEM
2556 /* Assumes that @buf is ready and points to a "safe" page */
2557 static inline void
2558 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2559 {
2560 	void *kaddr1, *kaddr2;
2561 
2562 	kaddr1 = kmap_atomic(p1);
2563 	kaddr2 = kmap_atomic(p2);
2564 	copy_page(buf, kaddr1);
2565 	copy_page(kaddr1, kaddr2);
2566 	copy_page(kaddr2, buf);
2567 	kunmap_atomic(kaddr2);
2568 	kunmap_atomic(kaddr1);
2569 }
2570 
2571 /**
2572  *	restore_highmem - for each highmem page that was allocated before
2573  *	the suspend and included in the suspend image, and also has been
2574  *	allocated by the "resume" kernel swap its current (ie. "before
2575  *	resume") contents with the previous (ie. "before suspend") one.
2576  *
2577  *	If the resume eventually fails, we can call this function once
2578  *	again and restore the "before resume" highmem state.
2579  */
2580 
2581 int restore_highmem(void)
2582 {
2583 	struct highmem_pbe *pbe = highmem_pblist;
2584 	void *buf;
2585 
2586 	if (!pbe)
2587 		return 0;
2588 
2589 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2590 	if (!buf)
2591 		return -ENOMEM;
2592 
2593 	while (pbe) {
2594 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2595 		pbe = pbe->next;
2596 	}
2597 	free_image_page(buf, PG_UNSAFE_CLEAR);
2598 	return 0;
2599 }
2600 #endif /* CONFIG_HIGHMEM */
2601