xref: /linux/kernel/power/snapshot.c (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h>
34 #include <asm/tlbflush.h>
35 #include <asm/io.h>
36 
37 #include "power.h"
38 
39 static int swsusp_page_is_free(struct page *);
40 static void swsusp_set_page_forbidden(struct page *);
41 static void swsusp_unset_page_forbidden(struct page *);
42 
43 /*
44  * Preferred image size in bytes (tunable via /sys/power/image_size).
45  * When it is set to N, swsusp will do its best to ensure the image
46  * size will not exceed N bytes, but if that is impossible, it will
47  * try to create the smallest image possible.
48  */
49 unsigned long image_size = 500 * 1024 * 1024;
50 
51 /* List of PBEs needed for restoring the pages that were allocated before
52  * the suspend and included in the suspend image, but have also been
53  * allocated by the "resume" kernel, so their contents cannot be written
54  * directly to their "original" page frames.
55  */
56 struct pbe *restore_pblist;
57 
58 /* Pointer to an auxiliary buffer (1 page) */
59 static void *buffer;
60 
61 /**
62  *	@safe_needed - on resume, for storing the PBE list and the image,
63  *	we can only use memory pages that do not conflict with the pages
64  *	used before suspend.  The unsafe pages have PageNosaveFree set
65  *	and we count them using unsafe_pages.
66  *
67  *	Each allocated image page is marked as PageNosave and PageNosaveFree
68  *	so that swsusp_free() can release it.
69  */
70 
71 #define PG_ANY		0
72 #define PG_SAFE		1
73 #define PG_UNSAFE_CLEAR	1
74 #define PG_UNSAFE_KEEP	0
75 
76 static unsigned int allocated_unsafe_pages;
77 
78 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
79 {
80 	void *res;
81 
82 	res = (void *)get_zeroed_page(gfp_mask);
83 	if (safe_needed)
84 		while (res && swsusp_page_is_free(virt_to_page(res))) {
85 			/* The page is unsafe, mark it for swsusp_free() */
86 			swsusp_set_page_forbidden(virt_to_page(res));
87 			allocated_unsafe_pages++;
88 			res = (void *)get_zeroed_page(gfp_mask);
89 		}
90 	if (res) {
91 		swsusp_set_page_forbidden(virt_to_page(res));
92 		swsusp_set_page_free(virt_to_page(res));
93 	}
94 	return res;
95 }
96 
97 unsigned long get_safe_page(gfp_t gfp_mask)
98 {
99 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
100 }
101 
102 static struct page *alloc_image_page(gfp_t gfp_mask)
103 {
104 	struct page *page;
105 
106 	page = alloc_page(gfp_mask);
107 	if (page) {
108 		swsusp_set_page_forbidden(page);
109 		swsusp_set_page_free(page);
110 	}
111 	return page;
112 }
113 
114 /**
115  *	free_image_page - free page represented by @addr, allocated with
116  *	get_image_page (page flags set by it must be cleared)
117  */
118 
119 static inline void free_image_page(void *addr, int clear_nosave_free)
120 {
121 	struct page *page;
122 
123 	BUG_ON(!virt_addr_valid(addr));
124 
125 	page = virt_to_page(addr);
126 
127 	swsusp_unset_page_forbidden(page);
128 	if (clear_nosave_free)
129 		swsusp_unset_page_free(page);
130 
131 	__free_page(page);
132 }
133 
134 /* struct linked_page is used to build chains of pages */
135 
136 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
137 
138 struct linked_page {
139 	struct linked_page *next;
140 	char data[LINKED_PAGE_DATA_SIZE];
141 } __attribute__((packed));
142 
143 static inline void
144 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
145 {
146 	while (list) {
147 		struct linked_page *lp = list->next;
148 
149 		free_image_page(list, clear_page_nosave);
150 		list = lp;
151 	}
152 }
153 
154 /**
155   *	struct chain_allocator is used for allocating small objects out of
156   *	a linked list of pages called 'the chain'.
157   *
158   *	The chain grows each time when there is no room for a new object in
159   *	the current page.  The allocated objects cannot be freed individually.
160   *	It is only possible to free them all at once, by freeing the entire
161   *	chain.
162   *
163   *	NOTE: The chain allocator may be inefficient if the allocated objects
164   *	are not much smaller than PAGE_SIZE.
165   */
166 
167 struct chain_allocator {
168 	struct linked_page *chain;	/* the chain */
169 	unsigned int used_space;	/* total size of objects allocated out
170 					 * of the current page
171 					 */
172 	gfp_t gfp_mask;		/* mask for allocating pages */
173 	int safe_needed;	/* if set, only "safe" pages are allocated */
174 };
175 
176 static void
177 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
178 {
179 	ca->chain = NULL;
180 	ca->used_space = LINKED_PAGE_DATA_SIZE;
181 	ca->gfp_mask = gfp_mask;
182 	ca->safe_needed = safe_needed;
183 }
184 
185 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
186 {
187 	void *ret;
188 
189 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
190 		struct linked_page *lp;
191 
192 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
193 		if (!lp)
194 			return NULL;
195 
196 		lp->next = ca->chain;
197 		ca->chain = lp;
198 		ca->used_space = 0;
199 	}
200 	ret = ca->chain->data + ca->used_space;
201 	ca->used_space += size;
202 	return ret;
203 }
204 
205 /**
206  *	Data types related to memory bitmaps.
207  *
208  *	Memory bitmap is a structure consiting of many linked lists of
209  *	objects.  The main list's elements are of type struct zone_bitmap
210  *	and each of them corresonds to one zone.  For each zone bitmap
211  *	object there is a list of objects of type struct bm_block that
212  *	represent each blocks of bitmap in which information is stored.
213  *
214  *	struct memory_bitmap contains a pointer to the main list of zone
215  *	bitmap objects, a struct bm_position used for browsing the bitmap,
216  *	and a pointer to the list of pages used for allocating all of the
217  *	zone bitmap objects and bitmap block objects.
218  *
219  *	NOTE: It has to be possible to lay out the bitmap in memory
220  *	using only allocations of order 0.  Additionally, the bitmap is
221  *	designed to work with arbitrary number of zones (this is over the
222  *	top for now, but let's avoid making unnecessary assumptions ;-).
223  *
224  *	struct zone_bitmap contains a pointer to a list of bitmap block
225  *	objects and a pointer to the bitmap block object that has been
226  *	most recently used for setting bits.  Additionally, it contains the
227  *	pfns that correspond to the start and end of the represented zone.
228  *
229  *	struct bm_block contains a pointer to the memory page in which
230  *	information is stored (in the form of a block of bitmap)
231  *	It also contains the pfns that correspond to the start and end of
232  *	the represented memory area.
233  */
234 
235 #define BM_END_OF_MAP	(~0UL)
236 
237 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
238 
239 struct bm_block {
240 	struct list_head hook;	/* hook into a list of bitmap blocks */
241 	unsigned long start_pfn;	/* pfn represented by the first bit */
242 	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
243 	unsigned long *data;	/* bitmap representing pages */
244 };
245 
246 static inline unsigned long bm_block_bits(struct bm_block *bb)
247 {
248 	return bb->end_pfn - bb->start_pfn;
249 }
250 
251 /* strcut bm_position is used for browsing memory bitmaps */
252 
253 struct bm_position {
254 	struct bm_block *block;
255 	int bit;
256 };
257 
258 struct memory_bitmap {
259 	struct list_head blocks;	/* list of bitmap blocks */
260 	struct linked_page *p_list;	/* list of pages used to store zone
261 					 * bitmap objects and bitmap block
262 					 * objects
263 					 */
264 	struct bm_position cur;	/* most recently used bit position */
265 };
266 
267 /* Functions that operate on memory bitmaps */
268 
269 static void memory_bm_position_reset(struct memory_bitmap *bm)
270 {
271 	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
272 	bm->cur.bit = 0;
273 }
274 
275 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
276 
277 /**
278  *	create_bm_block_list - create a list of block bitmap objects
279  *	@pages - number of pages to track
280  *	@list - list to put the allocated blocks into
281  *	@ca - chain allocator to be used for allocating memory
282  */
283 static int create_bm_block_list(unsigned long pages,
284 				struct list_head *list,
285 				struct chain_allocator *ca)
286 {
287 	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
288 
289 	while (nr_blocks-- > 0) {
290 		struct bm_block *bb;
291 
292 		bb = chain_alloc(ca, sizeof(struct bm_block));
293 		if (!bb)
294 			return -ENOMEM;
295 		list_add(&bb->hook, list);
296 	}
297 
298 	return 0;
299 }
300 
301 struct mem_extent {
302 	struct list_head hook;
303 	unsigned long start;
304 	unsigned long end;
305 };
306 
307 /**
308  *	free_mem_extents - free a list of memory extents
309  *	@list - list of extents to empty
310  */
311 static void free_mem_extents(struct list_head *list)
312 {
313 	struct mem_extent *ext, *aux;
314 
315 	list_for_each_entry_safe(ext, aux, list, hook) {
316 		list_del(&ext->hook);
317 		kfree(ext);
318 	}
319 }
320 
321 /**
322  *	create_mem_extents - create a list of memory extents representing
323  *	                     contiguous ranges of PFNs
324  *	@list - list to put the extents into
325  *	@gfp_mask - mask to use for memory allocations
326  */
327 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
328 {
329 	struct zone *zone;
330 
331 	INIT_LIST_HEAD(list);
332 
333 	for_each_populated_zone(zone) {
334 		unsigned long zone_start, zone_end;
335 		struct mem_extent *ext, *cur, *aux;
336 
337 		zone_start = zone->zone_start_pfn;
338 		zone_end = zone->zone_start_pfn + zone->spanned_pages;
339 
340 		list_for_each_entry(ext, list, hook)
341 			if (zone_start <= ext->end)
342 				break;
343 
344 		if (&ext->hook == list || zone_end < ext->start) {
345 			/* New extent is necessary */
346 			struct mem_extent *new_ext;
347 
348 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
349 			if (!new_ext) {
350 				free_mem_extents(list);
351 				return -ENOMEM;
352 			}
353 			new_ext->start = zone_start;
354 			new_ext->end = zone_end;
355 			list_add_tail(&new_ext->hook, &ext->hook);
356 			continue;
357 		}
358 
359 		/* Merge this zone's range of PFNs with the existing one */
360 		if (zone_start < ext->start)
361 			ext->start = zone_start;
362 		if (zone_end > ext->end)
363 			ext->end = zone_end;
364 
365 		/* More merging may be possible */
366 		cur = ext;
367 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
368 			if (zone_end < cur->start)
369 				break;
370 			if (zone_end < cur->end)
371 				ext->end = cur->end;
372 			list_del(&cur->hook);
373 			kfree(cur);
374 		}
375 	}
376 
377 	return 0;
378 }
379 
380 /**
381   *	memory_bm_create - allocate memory for a memory bitmap
382   */
383 static int
384 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
385 {
386 	struct chain_allocator ca;
387 	struct list_head mem_extents;
388 	struct mem_extent *ext;
389 	int error;
390 
391 	chain_init(&ca, gfp_mask, safe_needed);
392 	INIT_LIST_HEAD(&bm->blocks);
393 
394 	error = create_mem_extents(&mem_extents, gfp_mask);
395 	if (error)
396 		return error;
397 
398 	list_for_each_entry(ext, &mem_extents, hook) {
399 		struct bm_block *bb;
400 		unsigned long pfn = ext->start;
401 		unsigned long pages = ext->end - ext->start;
402 
403 		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
404 
405 		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
406 		if (error)
407 			goto Error;
408 
409 		list_for_each_entry_continue(bb, &bm->blocks, hook) {
410 			bb->data = get_image_page(gfp_mask, safe_needed);
411 			if (!bb->data) {
412 				error = -ENOMEM;
413 				goto Error;
414 			}
415 
416 			bb->start_pfn = pfn;
417 			if (pages >= BM_BITS_PER_BLOCK) {
418 				pfn += BM_BITS_PER_BLOCK;
419 				pages -= BM_BITS_PER_BLOCK;
420 			} else {
421 				/* This is executed only once in the loop */
422 				pfn += pages;
423 			}
424 			bb->end_pfn = pfn;
425 		}
426 	}
427 
428 	bm->p_list = ca.chain;
429 	memory_bm_position_reset(bm);
430  Exit:
431 	free_mem_extents(&mem_extents);
432 	return error;
433 
434  Error:
435 	bm->p_list = ca.chain;
436 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
437 	goto Exit;
438 }
439 
440 /**
441   *	memory_bm_free - free memory occupied by the memory bitmap @bm
442   */
443 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
444 {
445 	struct bm_block *bb;
446 
447 	list_for_each_entry(bb, &bm->blocks, hook)
448 		if (bb->data)
449 			free_image_page(bb->data, clear_nosave_free);
450 
451 	free_list_of_pages(bm->p_list, clear_nosave_free);
452 
453 	INIT_LIST_HEAD(&bm->blocks);
454 }
455 
456 /**
457  *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
458  *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
459  *	of @bm->cur_zone_bm are updated.
460  */
461 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
462 				void **addr, unsigned int *bit_nr)
463 {
464 	struct bm_block *bb;
465 
466 	/*
467 	 * Check if the pfn corresponds to the current bitmap block and find
468 	 * the block where it fits if this is not the case.
469 	 */
470 	bb = bm->cur.block;
471 	if (pfn < bb->start_pfn)
472 		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
473 			if (pfn >= bb->start_pfn)
474 				break;
475 
476 	if (pfn >= bb->end_pfn)
477 		list_for_each_entry_continue(bb, &bm->blocks, hook)
478 			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
479 				break;
480 
481 	if (&bb->hook == &bm->blocks)
482 		return -EFAULT;
483 
484 	/* The block has been found */
485 	bm->cur.block = bb;
486 	pfn -= bb->start_pfn;
487 	bm->cur.bit = pfn + 1;
488 	*bit_nr = pfn;
489 	*addr = bb->data;
490 	return 0;
491 }
492 
493 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
494 {
495 	void *addr;
496 	unsigned int bit;
497 	int error;
498 
499 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
500 	BUG_ON(error);
501 	set_bit(bit, addr);
502 }
503 
504 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
505 {
506 	void *addr;
507 	unsigned int bit;
508 	int error;
509 
510 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
511 	if (!error)
512 		set_bit(bit, addr);
513 	return error;
514 }
515 
516 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
517 {
518 	void *addr;
519 	unsigned int bit;
520 	int error;
521 
522 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
523 	BUG_ON(error);
524 	clear_bit(bit, addr);
525 }
526 
527 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
528 {
529 	void *addr;
530 	unsigned int bit;
531 	int error;
532 
533 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
534 	BUG_ON(error);
535 	return test_bit(bit, addr);
536 }
537 
538 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
539 {
540 	void *addr;
541 	unsigned int bit;
542 
543 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
544 }
545 
546 /**
547  *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
548  *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
549  *	returned.
550  *
551  *	It is required to run memory_bm_position_reset() before the first call to
552  *	this function.
553  */
554 
555 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
556 {
557 	struct bm_block *bb;
558 	int bit;
559 
560 	bb = bm->cur.block;
561 	do {
562 		bit = bm->cur.bit;
563 		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
564 		if (bit < bm_block_bits(bb))
565 			goto Return_pfn;
566 
567 		bb = list_entry(bb->hook.next, struct bm_block, hook);
568 		bm->cur.block = bb;
569 		bm->cur.bit = 0;
570 	} while (&bb->hook != &bm->blocks);
571 
572 	memory_bm_position_reset(bm);
573 	return BM_END_OF_MAP;
574 
575  Return_pfn:
576 	bm->cur.bit = bit + 1;
577 	return bb->start_pfn + bit;
578 }
579 
580 /**
581  *	This structure represents a range of page frames the contents of which
582  *	should not be saved during the suspend.
583  */
584 
585 struct nosave_region {
586 	struct list_head list;
587 	unsigned long start_pfn;
588 	unsigned long end_pfn;
589 };
590 
591 static LIST_HEAD(nosave_regions);
592 
593 /**
594  *	register_nosave_region - register a range of page frames the contents
595  *	of which should not be saved during the suspend (to be used in the early
596  *	initialization code)
597  */
598 
599 void __init
600 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
601 			 int use_kmalloc)
602 {
603 	struct nosave_region *region;
604 
605 	if (start_pfn >= end_pfn)
606 		return;
607 
608 	if (!list_empty(&nosave_regions)) {
609 		/* Try to extend the previous region (they should be sorted) */
610 		region = list_entry(nosave_regions.prev,
611 					struct nosave_region, list);
612 		if (region->end_pfn == start_pfn) {
613 			region->end_pfn = end_pfn;
614 			goto Report;
615 		}
616 	}
617 	if (use_kmalloc) {
618 		/* during init, this shouldn't fail */
619 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
620 		BUG_ON(!region);
621 	} else
622 		/* This allocation cannot fail */
623 		region = alloc_bootmem(sizeof(struct nosave_region));
624 	region->start_pfn = start_pfn;
625 	region->end_pfn = end_pfn;
626 	list_add_tail(&region->list, &nosave_regions);
627  Report:
628 	printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
629 		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
630 }
631 
632 /*
633  * Set bits in this map correspond to the page frames the contents of which
634  * should not be saved during the suspend.
635  */
636 static struct memory_bitmap *forbidden_pages_map;
637 
638 /* Set bits in this map correspond to free page frames. */
639 static struct memory_bitmap *free_pages_map;
640 
641 /*
642  * Each page frame allocated for creating the image is marked by setting the
643  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
644  */
645 
646 void swsusp_set_page_free(struct page *page)
647 {
648 	if (free_pages_map)
649 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
650 }
651 
652 static int swsusp_page_is_free(struct page *page)
653 {
654 	return free_pages_map ?
655 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
656 }
657 
658 void swsusp_unset_page_free(struct page *page)
659 {
660 	if (free_pages_map)
661 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
662 }
663 
664 static void swsusp_set_page_forbidden(struct page *page)
665 {
666 	if (forbidden_pages_map)
667 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
668 }
669 
670 int swsusp_page_is_forbidden(struct page *page)
671 {
672 	return forbidden_pages_map ?
673 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
674 }
675 
676 static void swsusp_unset_page_forbidden(struct page *page)
677 {
678 	if (forbidden_pages_map)
679 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
680 }
681 
682 /**
683  *	mark_nosave_pages - set bits corresponding to the page frames the
684  *	contents of which should not be saved in a given bitmap.
685  */
686 
687 static void mark_nosave_pages(struct memory_bitmap *bm)
688 {
689 	struct nosave_region *region;
690 
691 	if (list_empty(&nosave_regions))
692 		return;
693 
694 	list_for_each_entry(region, &nosave_regions, list) {
695 		unsigned long pfn;
696 
697 		pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
698 				region->start_pfn << PAGE_SHIFT,
699 				region->end_pfn << PAGE_SHIFT);
700 
701 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
702 			if (pfn_valid(pfn)) {
703 				/*
704 				 * It is safe to ignore the result of
705 				 * mem_bm_set_bit_check() here, since we won't
706 				 * touch the PFNs for which the error is
707 				 * returned anyway.
708 				 */
709 				mem_bm_set_bit_check(bm, pfn);
710 			}
711 	}
712 }
713 
714 /**
715  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
716  *	frames that should not be saved and free page frames.  The pointers
717  *	forbidden_pages_map and free_pages_map are only modified if everything
718  *	goes well, because we don't want the bits to be used before both bitmaps
719  *	are set up.
720  */
721 
722 int create_basic_memory_bitmaps(void)
723 {
724 	struct memory_bitmap *bm1, *bm2;
725 	int error = 0;
726 
727 	BUG_ON(forbidden_pages_map || free_pages_map);
728 
729 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
730 	if (!bm1)
731 		return -ENOMEM;
732 
733 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
734 	if (error)
735 		goto Free_first_object;
736 
737 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
738 	if (!bm2)
739 		goto Free_first_bitmap;
740 
741 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
742 	if (error)
743 		goto Free_second_object;
744 
745 	forbidden_pages_map = bm1;
746 	free_pages_map = bm2;
747 	mark_nosave_pages(forbidden_pages_map);
748 
749 	pr_debug("PM: Basic memory bitmaps created\n");
750 
751 	return 0;
752 
753  Free_second_object:
754 	kfree(bm2);
755  Free_first_bitmap:
756  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
757  Free_first_object:
758 	kfree(bm1);
759 	return -ENOMEM;
760 }
761 
762 /**
763  *	free_basic_memory_bitmaps - free memory bitmaps allocated by
764  *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
765  *	so that the bitmaps themselves are not referred to while they are being
766  *	freed.
767  */
768 
769 void free_basic_memory_bitmaps(void)
770 {
771 	struct memory_bitmap *bm1, *bm2;
772 
773 	BUG_ON(!(forbidden_pages_map && free_pages_map));
774 
775 	bm1 = forbidden_pages_map;
776 	bm2 = free_pages_map;
777 	forbidden_pages_map = NULL;
778 	free_pages_map = NULL;
779 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
780 	kfree(bm1);
781 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
782 	kfree(bm2);
783 
784 	pr_debug("PM: Basic memory bitmaps freed\n");
785 }
786 
787 /**
788  *	snapshot_additional_pages - estimate the number of additional pages
789  *	be needed for setting up the suspend image data structures for given
790  *	zone (usually the returned value is greater than the exact number)
791  */
792 
793 unsigned int snapshot_additional_pages(struct zone *zone)
794 {
795 	unsigned int res;
796 
797 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
798 	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
799 	return 2 * res;
800 }
801 
802 #ifdef CONFIG_HIGHMEM
803 /**
804  *	count_free_highmem_pages - compute the total number of free highmem
805  *	pages, system-wide.
806  */
807 
808 static unsigned int count_free_highmem_pages(void)
809 {
810 	struct zone *zone;
811 	unsigned int cnt = 0;
812 
813 	for_each_populated_zone(zone)
814 		if (is_highmem(zone))
815 			cnt += zone_page_state(zone, NR_FREE_PAGES);
816 
817 	return cnt;
818 }
819 
820 /**
821  *	saveable_highmem_page - Determine whether a highmem page should be
822  *	included in the suspend image.
823  *
824  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
825  *	and it isn't a part of a free chunk of pages.
826  */
827 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
828 {
829 	struct page *page;
830 
831 	if (!pfn_valid(pfn))
832 		return NULL;
833 
834 	page = pfn_to_page(pfn);
835 	if (page_zone(page) != zone)
836 		return NULL;
837 
838 	BUG_ON(!PageHighMem(page));
839 
840 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
841 	    PageReserved(page))
842 		return NULL;
843 
844 	return page;
845 }
846 
847 /**
848  *	count_highmem_pages - compute the total number of saveable highmem
849  *	pages.
850  */
851 
852 static unsigned int count_highmem_pages(void)
853 {
854 	struct zone *zone;
855 	unsigned int n = 0;
856 
857 	for_each_populated_zone(zone) {
858 		unsigned long pfn, max_zone_pfn;
859 
860 		if (!is_highmem(zone))
861 			continue;
862 
863 		mark_free_pages(zone);
864 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
865 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
866 			if (saveable_highmem_page(zone, pfn))
867 				n++;
868 	}
869 	return n;
870 }
871 #else
872 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
873 {
874 	return NULL;
875 }
876 #endif /* CONFIG_HIGHMEM */
877 
878 /**
879  *	saveable_page - Determine whether a non-highmem page should be included
880  *	in the suspend image.
881  *
882  *	We should save the page if it isn't Nosave, and is not in the range
883  *	of pages statically defined as 'unsaveable', and it isn't a part of
884  *	a free chunk of pages.
885  */
886 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
887 {
888 	struct page *page;
889 
890 	if (!pfn_valid(pfn))
891 		return NULL;
892 
893 	page = pfn_to_page(pfn);
894 	if (page_zone(page) != zone)
895 		return NULL;
896 
897 	BUG_ON(PageHighMem(page));
898 
899 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
900 		return NULL;
901 
902 	if (PageReserved(page)
903 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
904 		return NULL;
905 
906 	return page;
907 }
908 
909 /**
910  *	count_data_pages - compute the total number of saveable non-highmem
911  *	pages.
912  */
913 
914 static unsigned int count_data_pages(void)
915 {
916 	struct zone *zone;
917 	unsigned long pfn, max_zone_pfn;
918 	unsigned int n = 0;
919 
920 	for_each_populated_zone(zone) {
921 		if (is_highmem(zone))
922 			continue;
923 
924 		mark_free_pages(zone);
925 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
926 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
927 			if (saveable_page(zone, pfn))
928 				n++;
929 	}
930 	return n;
931 }
932 
933 /* This is needed, because copy_page and memcpy are not usable for copying
934  * task structs.
935  */
936 static inline void do_copy_page(long *dst, long *src)
937 {
938 	int n;
939 
940 	for (n = PAGE_SIZE / sizeof(long); n; n--)
941 		*dst++ = *src++;
942 }
943 
944 
945 /**
946  *	safe_copy_page - check if the page we are going to copy is marked as
947  *		present in the kernel page tables (this always is the case if
948  *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
949  *		kernel_page_present() always returns 'true').
950  */
951 static void safe_copy_page(void *dst, struct page *s_page)
952 {
953 	if (kernel_page_present(s_page)) {
954 		do_copy_page(dst, page_address(s_page));
955 	} else {
956 		kernel_map_pages(s_page, 1, 1);
957 		do_copy_page(dst, page_address(s_page));
958 		kernel_map_pages(s_page, 1, 0);
959 	}
960 }
961 
962 
963 #ifdef CONFIG_HIGHMEM
964 static inline struct page *
965 page_is_saveable(struct zone *zone, unsigned long pfn)
966 {
967 	return is_highmem(zone) ?
968 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
969 }
970 
971 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
972 {
973 	struct page *s_page, *d_page;
974 	void *src, *dst;
975 
976 	s_page = pfn_to_page(src_pfn);
977 	d_page = pfn_to_page(dst_pfn);
978 	if (PageHighMem(s_page)) {
979 		src = kmap_atomic(s_page, KM_USER0);
980 		dst = kmap_atomic(d_page, KM_USER1);
981 		do_copy_page(dst, src);
982 		kunmap_atomic(src, KM_USER0);
983 		kunmap_atomic(dst, KM_USER1);
984 	} else {
985 		if (PageHighMem(d_page)) {
986 			/* Page pointed to by src may contain some kernel
987 			 * data modified by kmap_atomic()
988 			 */
989 			safe_copy_page(buffer, s_page);
990 			dst = kmap_atomic(d_page, KM_USER0);
991 			memcpy(dst, buffer, PAGE_SIZE);
992 			kunmap_atomic(dst, KM_USER0);
993 		} else {
994 			safe_copy_page(page_address(d_page), s_page);
995 		}
996 	}
997 }
998 #else
999 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1000 
1001 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1002 {
1003 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1004 				pfn_to_page(src_pfn));
1005 }
1006 #endif /* CONFIG_HIGHMEM */
1007 
1008 static void
1009 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1010 {
1011 	struct zone *zone;
1012 	unsigned long pfn;
1013 
1014 	for_each_populated_zone(zone) {
1015 		unsigned long max_zone_pfn;
1016 
1017 		mark_free_pages(zone);
1018 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1019 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1020 			if (page_is_saveable(zone, pfn))
1021 				memory_bm_set_bit(orig_bm, pfn);
1022 	}
1023 	memory_bm_position_reset(orig_bm);
1024 	memory_bm_position_reset(copy_bm);
1025 	for(;;) {
1026 		pfn = memory_bm_next_pfn(orig_bm);
1027 		if (unlikely(pfn == BM_END_OF_MAP))
1028 			break;
1029 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1030 	}
1031 }
1032 
1033 /* Total number of image pages */
1034 static unsigned int nr_copy_pages;
1035 /* Number of pages needed for saving the original pfns of the image pages */
1036 static unsigned int nr_meta_pages;
1037 /*
1038  * Numbers of normal and highmem page frames allocated for hibernation image
1039  * before suspending devices.
1040  */
1041 unsigned int alloc_normal, alloc_highmem;
1042 /*
1043  * Memory bitmap used for marking saveable pages (during hibernation) or
1044  * hibernation image pages (during restore)
1045  */
1046 static struct memory_bitmap orig_bm;
1047 /*
1048  * Memory bitmap used during hibernation for marking allocated page frames that
1049  * will contain copies of saveable pages.  During restore it is initially used
1050  * for marking hibernation image pages, but then the set bits from it are
1051  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1052  * used for marking "safe" highmem pages, but it has to be reinitialized for
1053  * this purpose.
1054  */
1055 static struct memory_bitmap copy_bm;
1056 
1057 /**
1058  *	swsusp_free - free pages allocated for the suspend.
1059  *
1060  *	Suspend pages are alocated before the atomic copy is made, so we
1061  *	need to release them after the resume.
1062  */
1063 
1064 void swsusp_free(void)
1065 {
1066 	struct zone *zone;
1067 	unsigned long pfn, max_zone_pfn;
1068 
1069 	for_each_populated_zone(zone) {
1070 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1071 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1072 			if (pfn_valid(pfn)) {
1073 				struct page *page = pfn_to_page(pfn);
1074 
1075 				if (swsusp_page_is_forbidden(page) &&
1076 				    swsusp_page_is_free(page)) {
1077 					swsusp_unset_page_forbidden(page);
1078 					swsusp_unset_page_free(page);
1079 					__free_page(page);
1080 				}
1081 			}
1082 	}
1083 	nr_copy_pages = 0;
1084 	nr_meta_pages = 0;
1085 	restore_pblist = NULL;
1086 	buffer = NULL;
1087 	alloc_normal = 0;
1088 	alloc_highmem = 0;
1089 }
1090 
1091 /* Helper functions used for the shrinking of memory. */
1092 
1093 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1094 
1095 /**
1096  * preallocate_image_pages - Allocate a number of pages for hibernation image
1097  * @nr_pages: Number of page frames to allocate.
1098  * @mask: GFP flags to use for the allocation.
1099  *
1100  * Return value: Number of page frames actually allocated
1101  */
1102 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1103 {
1104 	unsigned long nr_alloc = 0;
1105 
1106 	while (nr_pages > 0) {
1107 		struct page *page;
1108 
1109 		page = alloc_image_page(mask);
1110 		if (!page)
1111 			break;
1112 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1113 		if (PageHighMem(page))
1114 			alloc_highmem++;
1115 		else
1116 			alloc_normal++;
1117 		nr_pages--;
1118 		nr_alloc++;
1119 	}
1120 
1121 	return nr_alloc;
1122 }
1123 
1124 static unsigned long preallocate_image_memory(unsigned long nr_pages)
1125 {
1126 	return preallocate_image_pages(nr_pages, GFP_IMAGE);
1127 }
1128 
1129 #ifdef CONFIG_HIGHMEM
1130 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1131 {
1132 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1133 }
1134 
1135 /**
1136  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1137  */
1138 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1139 {
1140 	x *= multiplier;
1141 	do_div(x, base);
1142 	return (unsigned long)x;
1143 }
1144 
1145 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1146 						unsigned long highmem,
1147 						unsigned long total)
1148 {
1149 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1150 
1151 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1152 }
1153 #else /* CONFIG_HIGHMEM */
1154 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1155 {
1156 	return 0;
1157 }
1158 
1159 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1160 						unsigned long highmem,
1161 						unsigned long total)
1162 {
1163 	return 0;
1164 }
1165 #endif /* CONFIG_HIGHMEM */
1166 
1167 /**
1168  * free_unnecessary_pages - Release preallocated pages not needed for the image
1169  */
1170 static void free_unnecessary_pages(void)
1171 {
1172 	unsigned long save_highmem, to_free_normal, to_free_highmem;
1173 
1174 	to_free_normal = alloc_normal - count_data_pages();
1175 	save_highmem = count_highmem_pages();
1176 	if (alloc_highmem > save_highmem) {
1177 		to_free_highmem = alloc_highmem - save_highmem;
1178 	} else {
1179 		to_free_highmem = 0;
1180 		to_free_normal -= save_highmem - alloc_highmem;
1181 	}
1182 
1183 	memory_bm_position_reset(&copy_bm);
1184 
1185 	while (to_free_normal > 0 || to_free_highmem > 0) {
1186 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1187 		struct page *page = pfn_to_page(pfn);
1188 
1189 		if (PageHighMem(page)) {
1190 			if (!to_free_highmem)
1191 				continue;
1192 			to_free_highmem--;
1193 			alloc_highmem--;
1194 		} else {
1195 			if (!to_free_normal)
1196 				continue;
1197 			to_free_normal--;
1198 			alloc_normal--;
1199 		}
1200 		memory_bm_clear_bit(&copy_bm, pfn);
1201 		swsusp_unset_page_forbidden(page);
1202 		swsusp_unset_page_free(page);
1203 		__free_page(page);
1204 	}
1205 }
1206 
1207 /**
1208  * minimum_image_size - Estimate the minimum acceptable size of an image
1209  * @saveable: Number of saveable pages in the system.
1210  *
1211  * We want to avoid attempting to free too much memory too hard, so estimate the
1212  * minimum acceptable size of a hibernation image to use as the lower limit for
1213  * preallocating memory.
1214  *
1215  * We assume that the minimum image size should be proportional to
1216  *
1217  * [number of saveable pages] - [number of pages that can be freed in theory]
1218  *
1219  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1220  * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
1221  * minus mapped file pages.
1222  */
1223 static unsigned long minimum_image_size(unsigned long saveable)
1224 {
1225 	unsigned long size;
1226 
1227 	size = global_page_state(NR_SLAB_RECLAIMABLE)
1228 		+ global_page_state(NR_ACTIVE_ANON)
1229 		+ global_page_state(NR_INACTIVE_ANON)
1230 		+ global_page_state(NR_ACTIVE_FILE)
1231 		+ global_page_state(NR_INACTIVE_FILE)
1232 		- global_page_state(NR_FILE_MAPPED);
1233 
1234 	return saveable <= size ? 0 : saveable - size;
1235 }
1236 
1237 /**
1238  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1239  *
1240  * To create a hibernation image it is necessary to make a copy of every page
1241  * frame in use.  We also need a number of page frames to be free during
1242  * hibernation for allocations made while saving the image and for device
1243  * drivers, in case they need to allocate memory from their hibernation
1244  * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES,
1245  * respectively, both of which are rough estimates).  To make this happen, we
1246  * compute the total number of available page frames and allocate at least
1247  *
1248  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES
1249  *
1250  * of them, which corresponds to the maximum size of a hibernation image.
1251  *
1252  * If image_size is set below the number following from the above formula,
1253  * the preallocation of memory is continued until the total number of saveable
1254  * pages in the system is below the requested image size or the minimum
1255  * acceptable image size returned by minimum_image_size(), whichever is greater.
1256  */
1257 int hibernate_preallocate_memory(void)
1258 {
1259 	struct zone *zone;
1260 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1261 	unsigned long alloc, save_highmem, pages_highmem;
1262 	struct timeval start, stop;
1263 	int error;
1264 
1265 	printk(KERN_INFO "PM: Preallocating image memory... ");
1266 	do_gettimeofday(&start);
1267 
1268 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1269 	if (error)
1270 		goto err_out;
1271 
1272 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1273 	if (error)
1274 		goto err_out;
1275 
1276 	alloc_normal = 0;
1277 	alloc_highmem = 0;
1278 
1279 	/* Count the number of saveable data pages. */
1280 	save_highmem = count_highmem_pages();
1281 	saveable = count_data_pages();
1282 
1283 	/*
1284 	 * Compute the total number of page frames we can use (count) and the
1285 	 * number of pages needed for image metadata (size).
1286 	 */
1287 	count = saveable;
1288 	saveable += save_highmem;
1289 	highmem = save_highmem;
1290 	size = 0;
1291 	for_each_populated_zone(zone) {
1292 		size += snapshot_additional_pages(zone);
1293 		if (is_highmem(zone))
1294 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1295 		else
1296 			count += zone_page_state(zone, NR_FREE_PAGES);
1297 	}
1298 	count += highmem;
1299 	count -= totalreserve_pages;
1300 
1301 	/* Compute the maximum number of saveable pages to leave in memory. */
1302 	max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
1303 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1304 	if (size > max_size)
1305 		size = max_size;
1306 	/*
1307 	 * If the maximum is not less than the current number of saveable pages
1308 	 * in memory, allocate page frames for the image and we're done.
1309 	 */
1310 	if (size >= saveable) {
1311 		pages = preallocate_image_highmem(save_highmem);
1312 		pages += preallocate_image_memory(saveable - pages);
1313 		goto out;
1314 	}
1315 
1316 	/* Estimate the minimum size of the image. */
1317 	pages = minimum_image_size(saveable);
1318 	if (size < pages)
1319 		size = min_t(unsigned long, pages, max_size);
1320 
1321 	/*
1322 	 * Let the memory management subsystem know that we're going to need a
1323 	 * large number of page frames to allocate and make it free some memory.
1324 	 * NOTE: If this is not done, performance will be hurt badly in some
1325 	 * test cases.
1326 	 */
1327 	shrink_all_memory(saveable - size);
1328 
1329 	/*
1330 	 * The number of saveable pages in memory was too high, so apply some
1331 	 * pressure to decrease it.  First, make room for the largest possible
1332 	 * image and fail if that doesn't work.  Next, try to decrease the size
1333 	 * of the image as much as indicated by 'size' using allocations from
1334 	 * highmem and non-highmem zones separately.
1335 	 */
1336 	pages_highmem = preallocate_image_highmem(highmem / 2);
1337 	alloc = (count - max_size) - pages_highmem;
1338 	pages = preallocate_image_memory(alloc);
1339 	if (pages < alloc)
1340 		goto err_out;
1341 	size = max_size - size;
1342 	alloc = size;
1343 	size = preallocate_highmem_fraction(size, highmem, count);
1344 	pages_highmem += size;
1345 	alloc -= size;
1346 	pages += preallocate_image_memory(alloc);
1347 	pages += pages_highmem;
1348 
1349 	/*
1350 	 * We only need as many page frames for the image as there are saveable
1351 	 * pages in memory, but we have allocated more.  Release the excessive
1352 	 * ones now.
1353 	 */
1354 	free_unnecessary_pages();
1355 
1356  out:
1357 	do_gettimeofday(&stop);
1358 	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1359 	swsusp_show_speed(&start, &stop, pages, "Allocated");
1360 
1361 	return 0;
1362 
1363  err_out:
1364 	printk(KERN_CONT "\n");
1365 	swsusp_free();
1366 	return -ENOMEM;
1367 }
1368 
1369 #ifdef CONFIG_HIGHMEM
1370 /**
1371   *	count_pages_for_highmem - compute the number of non-highmem pages
1372   *	that will be necessary for creating copies of highmem pages.
1373   */
1374 
1375 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1376 {
1377 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1378 
1379 	if (free_highmem >= nr_highmem)
1380 		nr_highmem = 0;
1381 	else
1382 		nr_highmem -= free_highmem;
1383 
1384 	return nr_highmem;
1385 }
1386 #else
1387 static unsigned int
1388 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1389 #endif /* CONFIG_HIGHMEM */
1390 
1391 /**
1392  *	enough_free_mem - Make sure we have enough free memory for the
1393  *	snapshot image.
1394  */
1395 
1396 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1397 {
1398 	struct zone *zone;
1399 	unsigned int free = alloc_normal;
1400 
1401 	for_each_populated_zone(zone)
1402 		if (!is_highmem(zone))
1403 			free += zone_page_state(zone, NR_FREE_PAGES);
1404 
1405 	nr_pages += count_pages_for_highmem(nr_highmem);
1406 	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1407 		nr_pages, PAGES_FOR_IO, free);
1408 
1409 	return free > nr_pages + PAGES_FOR_IO;
1410 }
1411 
1412 #ifdef CONFIG_HIGHMEM
1413 /**
1414  *	get_highmem_buffer - if there are some highmem pages in the suspend
1415  *	image, we may need the buffer to copy them and/or load their data.
1416  */
1417 
1418 static inline int get_highmem_buffer(int safe_needed)
1419 {
1420 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1421 	return buffer ? 0 : -ENOMEM;
1422 }
1423 
1424 /**
1425  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1426  *	Try to allocate as many pages as needed, but if the number of free
1427  *	highmem pages is lesser than that, allocate them all.
1428  */
1429 
1430 static inline unsigned int
1431 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1432 {
1433 	unsigned int to_alloc = count_free_highmem_pages();
1434 
1435 	if (to_alloc > nr_highmem)
1436 		to_alloc = nr_highmem;
1437 
1438 	nr_highmem -= to_alloc;
1439 	while (to_alloc-- > 0) {
1440 		struct page *page;
1441 
1442 		page = alloc_image_page(__GFP_HIGHMEM);
1443 		memory_bm_set_bit(bm, page_to_pfn(page));
1444 	}
1445 	return nr_highmem;
1446 }
1447 #else
1448 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1449 
1450 static inline unsigned int
1451 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1452 #endif /* CONFIG_HIGHMEM */
1453 
1454 /**
1455  *	swsusp_alloc - allocate memory for the suspend image
1456  *
1457  *	We first try to allocate as many highmem pages as there are
1458  *	saveable highmem pages in the system.  If that fails, we allocate
1459  *	non-highmem pages for the copies of the remaining highmem ones.
1460  *
1461  *	In this approach it is likely that the copies of highmem pages will
1462  *	also be located in the high memory, because of the way in which
1463  *	copy_data_pages() works.
1464  */
1465 
1466 static int
1467 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1468 		unsigned int nr_pages, unsigned int nr_highmem)
1469 {
1470 	int error = 0;
1471 
1472 	if (nr_highmem > 0) {
1473 		error = get_highmem_buffer(PG_ANY);
1474 		if (error)
1475 			goto err_out;
1476 		if (nr_highmem > alloc_highmem) {
1477 			nr_highmem -= alloc_highmem;
1478 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1479 		}
1480 	}
1481 	if (nr_pages > alloc_normal) {
1482 		nr_pages -= alloc_normal;
1483 		while (nr_pages-- > 0) {
1484 			struct page *page;
1485 
1486 			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1487 			if (!page)
1488 				goto err_out;
1489 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1490 		}
1491 	}
1492 
1493 	return 0;
1494 
1495  err_out:
1496 	swsusp_free();
1497 	return error;
1498 }
1499 
1500 asmlinkage int swsusp_save(void)
1501 {
1502 	unsigned int nr_pages, nr_highmem;
1503 
1504 	printk(KERN_INFO "PM: Creating hibernation image:\n");
1505 
1506 	drain_local_pages(NULL);
1507 	nr_pages = count_data_pages();
1508 	nr_highmem = count_highmem_pages();
1509 	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1510 
1511 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1512 		printk(KERN_ERR "PM: Not enough free memory\n");
1513 		return -ENOMEM;
1514 	}
1515 
1516 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1517 		printk(KERN_ERR "PM: Memory allocation failed\n");
1518 		return -ENOMEM;
1519 	}
1520 
1521 	/* During allocating of suspend pagedir, new cold pages may appear.
1522 	 * Kill them.
1523 	 */
1524 	drain_local_pages(NULL);
1525 	copy_data_pages(&copy_bm, &orig_bm);
1526 
1527 	/*
1528 	 * End of critical section. From now on, we can write to memory,
1529 	 * but we should not touch disk. This specially means we must _not_
1530 	 * touch swap space! Except we must write out our image of course.
1531 	 */
1532 
1533 	nr_pages += nr_highmem;
1534 	nr_copy_pages = nr_pages;
1535 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1536 
1537 	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1538 		nr_pages);
1539 
1540 	return 0;
1541 }
1542 
1543 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1544 static int init_header_complete(struct swsusp_info *info)
1545 {
1546 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1547 	info->version_code = LINUX_VERSION_CODE;
1548 	return 0;
1549 }
1550 
1551 static char *check_image_kernel(struct swsusp_info *info)
1552 {
1553 	if (info->version_code != LINUX_VERSION_CODE)
1554 		return "kernel version";
1555 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1556 		return "system type";
1557 	if (strcmp(info->uts.release,init_utsname()->release))
1558 		return "kernel release";
1559 	if (strcmp(info->uts.version,init_utsname()->version))
1560 		return "version";
1561 	if (strcmp(info->uts.machine,init_utsname()->machine))
1562 		return "machine";
1563 	return NULL;
1564 }
1565 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1566 
1567 unsigned long snapshot_get_image_size(void)
1568 {
1569 	return nr_copy_pages + nr_meta_pages + 1;
1570 }
1571 
1572 static int init_header(struct swsusp_info *info)
1573 {
1574 	memset(info, 0, sizeof(struct swsusp_info));
1575 	info->num_physpages = num_physpages;
1576 	info->image_pages = nr_copy_pages;
1577 	info->pages = snapshot_get_image_size();
1578 	info->size = info->pages;
1579 	info->size <<= PAGE_SHIFT;
1580 	return init_header_complete(info);
1581 }
1582 
1583 /**
1584  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1585  *	are stored in the array @buf[] (1 page at a time)
1586  */
1587 
1588 static inline void
1589 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1590 {
1591 	int j;
1592 
1593 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1594 		buf[j] = memory_bm_next_pfn(bm);
1595 		if (unlikely(buf[j] == BM_END_OF_MAP))
1596 			break;
1597 	}
1598 }
1599 
1600 /**
1601  *	snapshot_read_next - used for reading the system memory snapshot.
1602  *
1603  *	On the first call to it @handle should point to a zeroed
1604  *	snapshot_handle structure.  The structure gets updated and a pointer
1605  *	to it should be passed to this function every next time.
1606  *
1607  *	The @count parameter should contain the number of bytes the caller
1608  *	wants to read from the snapshot.  It must not be zero.
1609  *
1610  *	On success the function returns a positive number.  Then, the caller
1611  *	is allowed to read up to the returned number of bytes from the memory
1612  *	location computed by the data_of() macro.  The number returned
1613  *	may be smaller than @count, but this only happens if the read would
1614  *	cross a page boundary otherwise.
1615  *
1616  *	The function returns 0 to indicate the end of data stream condition,
1617  *	and a negative number is returned on error.  In such cases the
1618  *	structure pointed to by @handle is not updated and should not be used
1619  *	any more.
1620  */
1621 
1622 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1623 {
1624 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1625 		return 0;
1626 
1627 	if (!buffer) {
1628 		/* This makes the buffer be freed by swsusp_free() */
1629 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1630 		if (!buffer)
1631 			return -ENOMEM;
1632 	}
1633 	if (!handle->offset) {
1634 		int error;
1635 
1636 		error = init_header((struct swsusp_info *)buffer);
1637 		if (error)
1638 			return error;
1639 		handle->buffer = buffer;
1640 		memory_bm_position_reset(&orig_bm);
1641 		memory_bm_position_reset(&copy_bm);
1642 	}
1643 	if (handle->prev < handle->cur) {
1644 		if (handle->cur <= nr_meta_pages) {
1645 			memset(buffer, 0, PAGE_SIZE);
1646 			pack_pfns(buffer, &orig_bm);
1647 		} else {
1648 			struct page *page;
1649 
1650 			page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1651 			if (PageHighMem(page)) {
1652 				/* Highmem pages are copied to the buffer,
1653 				 * because we can't return with a kmapped
1654 				 * highmem page (we may not be called again).
1655 				 */
1656 				void *kaddr;
1657 
1658 				kaddr = kmap_atomic(page, KM_USER0);
1659 				memcpy(buffer, kaddr, PAGE_SIZE);
1660 				kunmap_atomic(kaddr, KM_USER0);
1661 				handle->buffer = buffer;
1662 			} else {
1663 				handle->buffer = page_address(page);
1664 			}
1665 		}
1666 		handle->prev = handle->cur;
1667 	}
1668 	handle->buf_offset = handle->cur_offset;
1669 	if (handle->cur_offset + count >= PAGE_SIZE) {
1670 		count = PAGE_SIZE - handle->cur_offset;
1671 		handle->cur_offset = 0;
1672 		handle->cur++;
1673 	} else {
1674 		handle->cur_offset += count;
1675 	}
1676 	handle->offset += count;
1677 	return count;
1678 }
1679 
1680 /**
1681  *	mark_unsafe_pages - mark the pages that cannot be used for storing
1682  *	the image during resume, because they conflict with the pages that
1683  *	had been used before suspend
1684  */
1685 
1686 static int mark_unsafe_pages(struct memory_bitmap *bm)
1687 {
1688 	struct zone *zone;
1689 	unsigned long pfn, max_zone_pfn;
1690 
1691 	/* Clear page flags */
1692 	for_each_populated_zone(zone) {
1693 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1694 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1695 			if (pfn_valid(pfn))
1696 				swsusp_unset_page_free(pfn_to_page(pfn));
1697 	}
1698 
1699 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1700 	memory_bm_position_reset(bm);
1701 	do {
1702 		pfn = memory_bm_next_pfn(bm);
1703 		if (likely(pfn != BM_END_OF_MAP)) {
1704 			if (likely(pfn_valid(pfn)))
1705 				swsusp_set_page_free(pfn_to_page(pfn));
1706 			else
1707 				return -EFAULT;
1708 		}
1709 	} while (pfn != BM_END_OF_MAP);
1710 
1711 	allocated_unsafe_pages = 0;
1712 
1713 	return 0;
1714 }
1715 
1716 static void
1717 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1718 {
1719 	unsigned long pfn;
1720 
1721 	memory_bm_position_reset(src);
1722 	pfn = memory_bm_next_pfn(src);
1723 	while (pfn != BM_END_OF_MAP) {
1724 		memory_bm_set_bit(dst, pfn);
1725 		pfn = memory_bm_next_pfn(src);
1726 	}
1727 }
1728 
1729 static int check_header(struct swsusp_info *info)
1730 {
1731 	char *reason;
1732 
1733 	reason = check_image_kernel(info);
1734 	if (!reason && info->num_physpages != num_physpages)
1735 		reason = "memory size";
1736 	if (reason) {
1737 		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1738 		return -EPERM;
1739 	}
1740 	return 0;
1741 }
1742 
1743 /**
1744  *	load header - check the image header and copy data from it
1745  */
1746 
1747 static int
1748 load_header(struct swsusp_info *info)
1749 {
1750 	int error;
1751 
1752 	restore_pblist = NULL;
1753 	error = check_header(info);
1754 	if (!error) {
1755 		nr_copy_pages = info->image_pages;
1756 		nr_meta_pages = info->pages - info->image_pages - 1;
1757 	}
1758 	return error;
1759 }
1760 
1761 /**
1762  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1763  *	the corresponding bit in the memory bitmap @bm
1764  */
1765 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1766 {
1767 	int j;
1768 
1769 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1770 		if (unlikely(buf[j] == BM_END_OF_MAP))
1771 			break;
1772 
1773 		if (memory_bm_pfn_present(bm, buf[j]))
1774 			memory_bm_set_bit(bm, buf[j]);
1775 		else
1776 			return -EFAULT;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
1782 /* List of "safe" pages that may be used to store data loaded from the suspend
1783  * image
1784  */
1785 static struct linked_page *safe_pages_list;
1786 
1787 #ifdef CONFIG_HIGHMEM
1788 /* struct highmem_pbe is used for creating the list of highmem pages that
1789  * should be restored atomically during the resume from disk, because the page
1790  * frames they have occupied before the suspend are in use.
1791  */
1792 struct highmem_pbe {
1793 	struct page *copy_page;	/* data is here now */
1794 	struct page *orig_page;	/* data was here before the suspend */
1795 	struct highmem_pbe *next;
1796 };
1797 
1798 /* List of highmem PBEs needed for restoring the highmem pages that were
1799  * allocated before the suspend and included in the suspend image, but have
1800  * also been allocated by the "resume" kernel, so their contents cannot be
1801  * written directly to their "original" page frames.
1802  */
1803 static struct highmem_pbe *highmem_pblist;
1804 
1805 /**
1806  *	count_highmem_image_pages - compute the number of highmem pages in the
1807  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1808  *	image pages are assumed to be set.
1809  */
1810 
1811 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1812 {
1813 	unsigned long pfn;
1814 	unsigned int cnt = 0;
1815 
1816 	memory_bm_position_reset(bm);
1817 	pfn = memory_bm_next_pfn(bm);
1818 	while (pfn != BM_END_OF_MAP) {
1819 		if (PageHighMem(pfn_to_page(pfn)))
1820 			cnt++;
1821 
1822 		pfn = memory_bm_next_pfn(bm);
1823 	}
1824 	return cnt;
1825 }
1826 
1827 /**
1828  *	prepare_highmem_image - try to allocate as many highmem pages as
1829  *	there are highmem image pages (@nr_highmem_p points to the variable
1830  *	containing the number of highmem image pages).  The pages that are
1831  *	"safe" (ie. will not be overwritten when the suspend image is
1832  *	restored) have the corresponding bits set in @bm (it must be
1833  *	unitialized).
1834  *
1835  *	NOTE: This function should not be called if there are no highmem
1836  *	image pages.
1837  */
1838 
1839 static unsigned int safe_highmem_pages;
1840 
1841 static struct memory_bitmap *safe_highmem_bm;
1842 
1843 static int
1844 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1845 {
1846 	unsigned int to_alloc;
1847 
1848 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1849 		return -ENOMEM;
1850 
1851 	if (get_highmem_buffer(PG_SAFE))
1852 		return -ENOMEM;
1853 
1854 	to_alloc = count_free_highmem_pages();
1855 	if (to_alloc > *nr_highmem_p)
1856 		to_alloc = *nr_highmem_p;
1857 	else
1858 		*nr_highmem_p = to_alloc;
1859 
1860 	safe_highmem_pages = 0;
1861 	while (to_alloc-- > 0) {
1862 		struct page *page;
1863 
1864 		page = alloc_page(__GFP_HIGHMEM);
1865 		if (!swsusp_page_is_free(page)) {
1866 			/* The page is "safe", set its bit the bitmap */
1867 			memory_bm_set_bit(bm, page_to_pfn(page));
1868 			safe_highmem_pages++;
1869 		}
1870 		/* Mark the page as allocated */
1871 		swsusp_set_page_forbidden(page);
1872 		swsusp_set_page_free(page);
1873 	}
1874 	memory_bm_position_reset(bm);
1875 	safe_highmem_bm = bm;
1876 	return 0;
1877 }
1878 
1879 /**
1880  *	get_highmem_page_buffer - for given highmem image page find the buffer
1881  *	that suspend_write_next() should set for its caller to write to.
1882  *
1883  *	If the page is to be saved to its "original" page frame or a copy of
1884  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1885  *	the copy of the page is to be made in normal memory, so the address of
1886  *	the copy is returned.
1887  *
1888  *	If @buffer is returned, the caller of suspend_write_next() will write
1889  *	the page's contents to @buffer, so they will have to be copied to the
1890  *	right location on the next call to suspend_write_next() and it is done
1891  *	with the help of copy_last_highmem_page().  For this purpose, if
1892  *	@buffer is returned, @last_highmem page is set to the page to which
1893  *	the data will have to be copied from @buffer.
1894  */
1895 
1896 static struct page *last_highmem_page;
1897 
1898 static void *
1899 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1900 {
1901 	struct highmem_pbe *pbe;
1902 	void *kaddr;
1903 
1904 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1905 		/* We have allocated the "original" page frame and we can
1906 		 * use it directly to store the loaded page.
1907 		 */
1908 		last_highmem_page = page;
1909 		return buffer;
1910 	}
1911 	/* The "original" page frame has not been allocated and we have to
1912 	 * use a "safe" page frame to store the loaded page.
1913 	 */
1914 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1915 	if (!pbe) {
1916 		swsusp_free();
1917 		return ERR_PTR(-ENOMEM);
1918 	}
1919 	pbe->orig_page = page;
1920 	if (safe_highmem_pages > 0) {
1921 		struct page *tmp;
1922 
1923 		/* Copy of the page will be stored in high memory */
1924 		kaddr = buffer;
1925 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1926 		safe_highmem_pages--;
1927 		last_highmem_page = tmp;
1928 		pbe->copy_page = tmp;
1929 	} else {
1930 		/* Copy of the page will be stored in normal memory */
1931 		kaddr = safe_pages_list;
1932 		safe_pages_list = safe_pages_list->next;
1933 		pbe->copy_page = virt_to_page(kaddr);
1934 	}
1935 	pbe->next = highmem_pblist;
1936 	highmem_pblist = pbe;
1937 	return kaddr;
1938 }
1939 
1940 /**
1941  *	copy_last_highmem_page - copy the contents of a highmem image from
1942  *	@buffer, where the caller of snapshot_write_next() has place them,
1943  *	to the right location represented by @last_highmem_page .
1944  */
1945 
1946 static void copy_last_highmem_page(void)
1947 {
1948 	if (last_highmem_page) {
1949 		void *dst;
1950 
1951 		dst = kmap_atomic(last_highmem_page, KM_USER0);
1952 		memcpy(dst, buffer, PAGE_SIZE);
1953 		kunmap_atomic(dst, KM_USER0);
1954 		last_highmem_page = NULL;
1955 	}
1956 }
1957 
1958 static inline int last_highmem_page_copied(void)
1959 {
1960 	return !last_highmem_page;
1961 }
1962 
1963 static inline void free_highmem_data(void)
1964 {
1965 	if (safe_highmem_bm)
1966 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1967 
1968 	if (buffer)
1969 		free_image_page(buffer, PG_UNSAFE_CLEAR);
1970 }
1971 #else
1972 static inline int get_safe_write_buffer(void) { return 0; }
1973 
1974 static unsigned int
1975 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1976 
1977 static inline int
1978 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1979 {
1980 	return 0;
1981 }
1982 
1983 static inline void *
1984 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1985 {
1986 	return ERR_PTR(-EINVAL);
1987 }
1988 
1989 static inline void copy_last_highmem_page(void) {}
1990 static inline int last_highmem_page_copied(void) { return 1; }
1991 static inline void free_highmem_data(void) {}
1992 #endif /* CONFIG_HIGHMEM */
1993 
1994 /**
1995  *	prepare_image - use the memory bitmap @bm to mark the pages that will
1996  *	be overwritten in the process of restoring the system memory state
1997  *	from the suspend image ("unsafe" pages) and allocate memory for the
1998  *	image.
1999  *
2000  *	The idea is to allocate a new memory bitmap first and then allocate
2001  *	as many pages as needed for the image data, but not to assign these
2002  *	pages to specific tasks initially.  Instead, we just mark them as
2003  *	allocated and create a lists of "safe" pages that will be used
2004  *	later.  On systems with high memory a list of "safe" highmem pages is
2005  *	also created.
2006  */
2007 
2008 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2009 
2010 static int
2011 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2012 {
2013 	unsigned int nr_pages, nr_highmem;
2014 	struct linked_page *sp_list, *lp;
2015 	int error;
2016 
2017 	/* If there is no highmem, the buffer will not be necessary */
2018 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2019 	buffer = NULL;
2020 
2021 	nr_highmem = count_highmem_image_pages(bm);
2022 	error = mark_unsafe_pages(bm);
2023 	if (error)
2024 		goto Free;
2025 
2026 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2027 	if (error)
2028 		goto Free;
2029 
2030 	duplicate_memory_bitmap(new_bm, bm);
2031 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2032 	if (nr_highmem > 0) {
2033 		error = prepare_highmem_image(bm, &nr_highmem);
2034 		if (error)
2035 			goto Free;
2036 	}
2037 	/* Reserve some safe pages for potential later use.
2038 	 *
2039 	 * NOTE: This way we make sure there will be enough safe pages for the
2040 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2041 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2042 	 */
2043 	sp_list = NULL;
2044 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2045 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2046 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2047 	while (nr_pages > 0) {
2048 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2049 		if (!lp) {
2050 			error = -ENOMEM;
2051 			goto Free;
2052 		}
2053 		lp->next = sp_list;
2054 		sp_list = lp;
2055 		nr_pages--;
2056 	}
2057 	/* Preallocate memory for the image */
2058 	safe_pages_list = NULL;
2059 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2060 	while (nr_pages > 0) {
2061 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2062 		if (!lp) {
2063 			error = -ENOMEM;
2064 			goto Free;
2065 		}
2066 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2067 			/* The page is "safe", add it to the list */
2068 			lp->next = safe_pages_list;
2069 			safe_pages_list = lp;
2070 		}
2071 		/* Mark the page as allocated */
2072 		swsusp_set_page_forbidden(virt_to_page(lp));
2073 		swsusp_set_page_free(virt_to_page(lp));
2074 		nr_pages--;
2075 	}
2076 	/* Free the reserved safe pages so that chain_alloc() can use them */
2077 	while (sp_list) {
2078 		lp = sp_list->next;
2079 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2080 		sp_list = lp;
2081 	}
2082 	return 0;
2083 
2084  Free:
2085 	swsusp_free();
2086 	return error;
2087 }
2088 
2089 /**
2090  *	get_buffer - compute the address that snapshot_write_next() should
2091  *	set for its caller to write to.
2092  */
2093 
2094 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2095 {
2096 	struct pbe *pbe;
2097 	struct page *page;
2098 	unsigned long pfn = memory_bm_next_pfn(bm);
2099 
2100 	if (pfn == BM_END_OF_MAP)
2101 		return ERR_PTR(-EFAULT);
2102 
2103 	page = pfn_to_page(pfn);
2104 	if (PageHighMem(page))
2105 		return get_highmem_page_buffer(page, ca);
2106 
2107 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2108 		/* We have allocated the "original" page frame and we can
2109 		 * use it directly to store the loaded page.
2110 		 */
2111 		return page_address(page);
2112 
2113 	/* The "original" page frame has not been allocated and we have to
2114 	 * use a "safe" page frame to store the loaded page.
2115 	 */
2116 	pbe = chain_alloc(ca, sizeof(struct pbe));
2117 	if (!pbe) {
2118 		swsusp_free();
2119 		return ERR_PTR(-ENOMEM);
2120 	}
2121 	pbe->orig_address = page_address(page);
2122 	pbe->address = safe_pages_list;
2123 	safe_pages_list = safe_pages_list->next;
2124 	pbe->next = restore_pblist;
2125 	restore_pblist = pbe;
2126 	return pbe->address;
2127 }
2128 
2129 /**
2130  *	snapshot_write_next - used for writing the system memory snapshot.
2131  *
2132  *	On the first call to it @handle should point to a zeroed
2133  *	snapshot_handle structure.  The structure gets updated and a pointer
2134  *	to it should be passed to this function every next time.
2135  *
2136  *	The @count parameter should contain the number of bytes the caller
2137  *	wants to write to the image.  It must not be zero.
2138  *
2139  *	On success the function returns a positive number.  Then, the caller
2140  *	is allowed to write up to the returned number of bytes to the memory
2141  *	location computed by the data_of() macro.  The number returned
2142  *	may be smaller than @count, but this only happens if the write would
2143  *	cross a page boundary otherwise.
2144  *
2145  *	The function returns 0 to indicate the "end of file" condition,
2146  *	and a negative number is returned on error.  In such cases the
2147  *	structure pointed to by @handle is not updated and should not be used
2148  *	any more.
2149  */
2150 
2151 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
2152 {
2153 	static struct chain_allocator ca;
2154 	int error = 0;
2155 
2156 	/* Check if we have already loaded the entire image */
2157 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
2158 		return 0;
2159 
2160 	if (handle->offset == 0) {
2161 		if (!buffer)
2162 			/* This makes the buffer be freed by swsusp_free() */
2163 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2164 
2165 		if (!buffer)
2166 			return -ENOMEM;
2167 
2168 		handle->buffer = buffer;
2169 	}
2170 	handle->sync_read = 1;
2171 	if (handle->prev < handle->cur) {
2172 		if (handle->prev == 0) {
2173 			error = load_header(buffer);
2174 			if (error)
2175 				return error;
2176 
2177 			error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2178 			if (error)
2179 				return error;
2180 
2181 		} else if (handle->prev <= nr_meta_pages) {
2182 			error = unpack_orig_pfns(buffer, &copy_bm);
2183 			if (error)
2184 				return error;
2185 
2186 			if (handle->prev == nr_meta_pages) {
2187 				error = prepare_image(&orig_bm, &copy_bm);
2188 				if (error)
2189 					return error;
2190 
2191 				chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2192 				memory_bm_position_reset(&orig_bm);
2193 				restore_pblist = NULL;
2194 				handle->buffer = get_buffer(&orig_bm, &ca);
2195 				handle->sync_read = 0;
2196 				if (IS_ERR(handle->buffer))
2197 					return PTR_ERR(handle->buffer);
2198 			}
2199 		} else {
2200 			copy_last_highmem_page();
2201 			handle->buffer = get_buffer(&orig_bm, &ca);
2202 			if (IS_ERR(handle->buffer))
2203 				return PTR_ERR(handle->buffer);
2204 			if (handle->buffer != buffer)
2205 				handle->sync_read = 0;
2206 		}
2207 		handle->prev = handle->cur;
2208 	}
2209 	handle->buf_offset = handle->cur_offset;
2210 	if (handle->cur_offset + count >= PAGE_SIZE) {
2211 		count = PAGE_SIZE - handle->cur_offset;
2212 		handle->cur_offset = 0;
2213 		handle->cur++;
2214 	} else {
2215 		handle->cur_offset += count;
2216 	}
2217 	handle->offset += count;
2218 	return count;
2219 }
2220 
2221 /**
2222  *	snapshot_write_finalize - must be called after the last call to
2223  *	snapshot_write_next() in case the last page in the image happens
2224  *	to be a highmem page and its contents should be stored in the
2225  *	highmem.  Additionally, it releases the memory that will not be
2226  *	used any more.
2227  */
2228 
2229 void snapshot_write_finalize(struct snapshot_handle *handle)
2230 {
2231 	copy_last_highmem_page();
2232 	/* Free only if we have loaded the image entirely */
2233 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
2234 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2235 		free_highmem_data();
2236 	}
2237 }
2238 
2239 int snapshot_image_loaded(struct snapshot_handle *handle)
2240 {
2241 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2242 			handle->cur <= nr_meta_pages + nr_copy_pages);
2243 }
2244 
2245 #ifdef CONFIG_HIGHMEM
2246 /* Assumes that @buf is ready and points to a "safe" page */
2247 static inline void
2248 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2249 {
2250 	void *kaddr1, *kaddr2;
2251 
2252 	kaddr1 = kmap_atomic(p1, KM_USER0);
2253 	kaddr2 = kmap_atomic(p2, KM_USER1);
2254 	memcpy(buf, kaddr1, PAGE_SIZE);
2255 	memcpy(kaddr1, kaddr2, PAGE_SIZE);
2256 	memcpy(kaddr2, buf, PAGE_SIZE);
2257 	kunmap_atomic(kaddr1, KM_USER0);
2258 	kunmap_atomic(kaddr2, KM_USER1);
2259 }
2260 
2261 /**
2262  *	restore_highmem - for each highmem page that was allocated before
2263  *	the suspend and included in the suspend image, and also has been
2264  *	allocated by the "resume" kernel swap its current (ie. "before
2265  *	resume") contents with the previous (ie. "before suspend") one.
2266  *
2267  *	If the resume eventually fails, we can call this function once
2268  *	again and restore the "before resume" highmem state.
2269  */
2270 
2271 int restore_highmem(void)
2272 {
2273 	struct highmem_pbe *pbe = highmem_pblist;
2274 	void *buf;
2275 
2276 	if (!pbe)
2277 		return 0;
2278 
2279 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2280 	if (!buf)
2281 		return -ENOMEM;
2282 
2283 	while (pbe) {
2284 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2285 		pbe = pbe->next;
2286 	}
2287 	free_image_page(buf, PG_UNSAFE_CLEAR);
2288 	return 0;
2289 }
2290 #endif /* CONFIG_HIGHMEM */
2291