xref: /linux/kernel/power/snapshot.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
22 #include <linux/pm.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <asm/io.h>
34 
35 #include "power.h"
36 
37 /* List of PBEs needed for restoring the pages that were allocated before
38  * the suspend and included in the suspend image, but have also been
39  * allocated by the "resume" kernel, so their contents cannot be written
40  * directly to their "original" page frames.
41  */
42 struct pbe *restore_pblist;
43 
44 /* Pointer to an auxiliary buffer (1 page) */
45 static void *buffer;
46 
47 /**
48  *	@safe_needed - on resume, for storing the PBE list and the image,
49  *	we can only use memory pages that do not conflict with the pages
50  *	used before suspend.  The unsafe pages have PageNosaveFree set
51  *	and we count them using unsafe_pages.
52  *
53  *	Each allocated image page is marked as PageNosave and PageNosaveFree
54  *	so that swsusp_free() can release it.
55  */
56 
57 #define PG_ANY		0
58 #define PG_SAFE		1
59 #define PG_UNSAFE_CLEAR	1
60 #define PG_UNSAFE_KEEP	0
61 
62 static unsigned int allocated_unsafe_pages;
63 
64 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
65 {
66 	void *res;
67 
68 	res = (void *)get_zeroed_page(gfp_mask);
69 	if (safe_needed)
70 		while (res && PageNosaveFree(virt_to_page(res))) {
71 			/* The page is unsafe, mark it for swsusp_free() */
72 			SetPageNosave(virt_to_page(res));
73 			allocated_unsafe_pages++;
74 			res = (void *)get_zeroed_page(gfp_mask);
75 		}
76 	if (res) {
77 		SetPageNosave(virt_to_page(res));
78 		SetPageNosaveFree(virt_to_page(res));
79 	}
80 	return res;
81 }
82 
83 unsigned long get_safe_page(gfp_t gfp_mask)
84 {
85 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
86 }
87 
88 static struct page *alloc_image_page(gfp_t gfp_mask)
89 {
90 	struct page *page;
91 
92 	page = alloc_page(gfp_mask);
93 	if (page) {
94 		SetPageNosave(page);
95 		SetPageNosaveFree(page);
96 	}
97 	return page;
98 }
99 
100 /**
101  *	free_image_page - free page represented by @addr, allocated with
102  *	get_image_page (page flags set by it must be cleared)
103  */
104 
105 static inline void free_image_page(void *addr, int clear_nosave_free)
106 {
107 	struct page *page;
108 
109 	BUG_ON(!virt_addr_valid(addr));
110 
111 	page = virt_to_page(addr);
112 
113 	ClearPageNosave(page);
114 	if (clear_nosave_free)
115 		ClearPageNosaveFree(page);
116 
117 	__free_page(page);
118 }
119 
120 /* struct linked_page is used to build chains of pages */
121 
122 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
123 
124 struct linked_page {
125 	struct linked_page *next;
126 	char data[LINKED_PAGE_DATA_SIZE];
127 } __attribute__((packed));
128 
129 static inline void
130 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
131 {
132 	while (list) {
133 		struct linked_page *lp = list->next;
134 
135 		free_image_page(list, clear_page_nosave);
136 		list = lp;
137 	}
138 }
139 
140 /**
141   *	struct chain_allocator is used for allocating small objects out of
142   *	a linked list of pages called 'the chain'.
143   *
144   *	The chain grows each time when there is no room for a new object in
145   *	the current page.  The allocated objects cannot be freed individually.
146   *	It is only possible to free them all at once, by freeing the entire
147   *	chain.
148   *
149   *	NOTE: The chain allocator may be inefficient if the allocated objects
150   *	are not much smaller than PAGE_SIZE.
151   */
152 
153 struct chain_allocator {
154 	struct linked_page *chain;	/* the chain */
155 	unsigned int used_space;	/* total size of objects allocated out
156 					 * of the current page
157 					 */
158 	gfp_t gfp_mask;		/* mask for allocating pages */
159 	int safe_needed;	/* if set, only "safe" pages are allocated */
160 };
161 
162 static void
163 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
164 {
165 	ca->chain = NULL;
166 	ca->used_space = LINKED_PAGE_DATA_SIZE;
167 	ca->gfp_mask = gfp_mask;
168 	ca->safe_needed = safe_needed;
169 }
170 
171 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
172 {
173 	void *ret;
174 
175 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
176 		struct linked_page *lp;
177 
178 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
179 		if (!lp)
180 			return NULL;
181 
182 		lp->next = ca->chain;
183 		ca->chain = lp;
184 		ca->used_space = 0;
185 	}
186 	ret = ca->chain->data + ca->used_space;
187 	ca->used_space += size;
188 	return ret;
189 }
190 
191 static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
192 {
193 	free_list_of_pages(ca->chain, clear_page_nosave);
194 	memset(ca, 0, sizeof(struct chain_allocator));
195 }
196 
197 /**
198  *	Data types related to memory bitmaps.
199  *
200  *	Memory bitmap is a structure consiting of many linked lists of
201  *	objects.  The main list's elements are of type struct zone_bitmap
202  *	and each of them corresonds to one zone.  For each zone bitmap
203  *	object there is a list of objects of type struct bm_block that
204  *	represent each blocks of bit chunks in which information is
205  *	stored.
206  *
207  *	struct memory_bitmap contains a pointer to the main list of zone
208  *	bitmap objects, a struct bm_position used for browsing the bitmap,
209  *	and a pointer to the list of pages used for allocating all of the
210  *	zone bitmap objects and bitmap block objects.
211  *
212  *	NOTE: It has to be possible to lay out the bitmap in memory
213  *	using only allocations of order 0.  Additionally, the bitmap is
214  *	designed to work with arbitrary number of zones (this is over the
215  *	top for now, but let's avoid making unnecessary assumptions ;-).
216  *
217  *	struct zone_bitmap contains a pointer to a list of bitmap block
218  *	objects and a pointer to the bitmap block object that has been
219  *	most recently used for setting bits.  Additionally, it contains the
220  *	pfns that correspond to the start and end of the represented zone.
221  *
222  *	struct bm_block contains a pointer to the memory page in which
223  *	information is stored (in the form of a block of bit chunks
224  *	of type unsigned long each).  It also contains the pfns that
225  *	correspond to the start and end of the represented memory area and
226  *	the number of bit chunks in the block.
227  *
228  *	NOTE: Memory bitmaps are used for two types of operations only:
229  *	"set a bit" and "find the next bit set".  Moreover, the searching
230  *	is always carried out after all of the "set a bit" operations
231  *	on given bitmap.
232  */
233 
234 #define BM_END_OF_MAP	(~0UL)
235 
236 #define BM_CHUNKS_PER_BLOCK	(PAGE_SIZE / sizeof(long))
237 #define BM_BITS_PER_CHUNK	(sizeof(long) << 3)
238 #define BM_BITS_PER_BLOCK	(PAGE_SIZE << 3)
239 
240 struct bm_block {
241 	struct bm_block *next;		/* next element of the list */
242 	unsigned long start_pfn;	/* pfn represented by the first bit */
243 	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
244 	unsigned int size;	/* number of bit chunks */
245 	unsigned long *data;	/* chunks of bits representing pages */
246 };
247 
248 struct zone_bitmap {
249 	struct zone_bitmap *next;	/* next element of the list */
250 	unsigned long start_pfn;	/* minimal pfn in this zone */
251 	unsigned long end_pfn;		/* maximal pfn in this zone plus 1 */
252 	struct bm_block *bm_blocks;	/* list of bitmap blocks */
253 	struct bm_block *cur_block;	/* recently used bitmap block */
254 };
255 
256 /* strcut bm_position is used for browsing memory bitmaps */
257 
258 struct bm_position {
259 	struct zone_bitmap *zone_bm;
260 	struct bm_block *block;
261 	int chunk;
262 	int bit;
263 };
264 
265 struct memory_bitmap {
266 	struct zone_bitmap *zone_bm_list;	/* list of zone bitmaps */
267 	struct linked_page *p_list;	/* list of pages used to store zone
268 					 * bitmap objects and bitmap block
269 					 * objects
270 					 */
271 	struct bm_position cur;	/* most recently used bit position */
272 };
273 
274 /* Functions that operate on memory bitmaps */
275 
276 static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
277 {
278 	bm->cur.chunk = 0;
279 	bm->cur.bit = -1;
280 }
281 
282 static void memory_bm_position_reset(struct memory_bitmap *bm)
283 {
284 	struct zone_bitmap *zone_bm;
285 
286 	zone_bm = bm->zone_bm_list;
287 	bm->cur.zone_bm = zone_bm;
288 	bm->cur.block = zone_bm->bm_blocks;
289 	memory_bm_reset_chunk(bm);
290 }
291 
292 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
293 
294 /**
295  *	create_bm_block_list - create a list of block bitmap objects
296  */
297 
298 static inline struct bm_block *
299 create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
300 {
301 	struct bm_block *bblist = NULL;
302 
303 	while (nr_blocks-- > 0) {
304 		struct bm_block *bb;
305 
306 		bb = chain_alloc(ca, sizeof(struct bm_block));
307 		if (!bb)
308 			return NULL;
309 
310 		bb->next = bblist;
311 		bblist = bb;
312 	}
313 	return bblist;
314 }
315 
316 /**
317  *	create_zone_bm_list - create a list of zone bitmap objects
318  */
319 
320 static inline struct zone_bitmap *
321 create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
322 {
323 	struct zone_bitmap *zbmlist = NULL;
324 
325 	while (nr_zones-- > 0) {
326 		struct zone_bitmap *zbm;
327 
328 		zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
329 		if (!zbm)
330 			return NULL;
331 
332 		zbm->next = zbmlist;
333 		zbmlist = zbm;
334 	}
335 	return zbmlist;
336 }
337 
338 /**
339   *	memory_bm_create - allocate memory for a memory bitmap
340   */
341 
342 static int
343 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
344 {
345 	struct chain_allocator ca;
346 	struct zone *zone;
347 	struct zone_bitmap *zone_bm;
348 	struct bm_block *bb;
349 	unsigned int nr;
350 
351 	chain_init(&ca, gfp_mask, safe_needed);
352 
353 	/* Compute the number of zones */
354 	nr = 0;
355 	for_each_zone(zone)
356 		if (populated_zone(zone))
357 			nr++;
358 
359 	/* Allocate the list of zones bitmap objects */
360 	zone_bm = create_zone_bm_list(nr, &ca);
361 	bm->zone_bm_list = zone_bm;
362 	if (!zone_bm) {
363 		chain_free(&ca, PG_UNSAFE_CLEAR);
364 		return -ENOMEM;
365 	}
366 
367 	/* Initialize the zone bitmap objects */
368 	for_each_zone(zone) {
369 		unsigned long pfn;
370 
371 		if (!populated_zone(zone))
372 			continue;
373 
374 		zone_bm->start_pfn = zone->zone_start_pfn;
375 		zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
376 		/* Allocate the list of bitmap block objects */
377 		nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
378 		bb = create_bm_block_list(nr, &ca);
379 		zone_bm->bm_blocks = bb;
380 		zone_bm->cur_block = bb;
381 		if (!bb)
382 			goto Free;
383 
384 		nr = zone->spanned_pages;
385 		pfn = zone->zone_start_pfn;
386 		/* Initialize the bitmap block objects */
387 		while (bb) {
388 			unsigned long *ptr;
389 
390 			ptr = get_image_page(gfp_mask, safe_needed);
391 			bb->data = ptr;
392 			if (!ptr)
393 				goto Free;
394 
395 			bb->start_pfn = pfn;
396 			if (nr >= BM_BITS_PER_BLOCK) {
397 				pfn += BM_BITS_PER_BLOCK;
398 				bb->size = BM_CHUNKS_PER_BLOCK;
399 				nr -= BM_BITS_PER_BLOCK;
400 			} else {
401 				/* This is executed only once in the loop */
402 				pfn += nr;
403 				bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
404 			}
405 			bb->end_pfn = pfn;
406 			bb = bb->next;
407 		}
408 		zone_bm = zone_bm->next;
409 	}
410 	bm->p_list = ca.chain;
411 	memory_bm_position_reset(bm);
412 	return 0;
413 
414  Free:
415 	bm->p_list = ca.chain;
416 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
417 	return -ENOMEM;
418 }
419 
420 /**
421   *	memory_bm_free - free memory occupied by the memory bitmap @bm
422   */
423 
424 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
425 {
426 	struct zone_bitmap *zone_bm;
427 
428 	/* Free the list of bit blocks for each zone_bitmap object */
429 	zone_bm = bm->zone_bm_list;
430 	while (zone_bm) {
431 		struct bm_block *bb;
432 
433 		bb = zone_bm->bm_blocks;
434 		while (bb) {
435 			if (bb->data)
436 				free_image_page(bb->data, clear_nosave_free);
437 			bb = bb->next;
438 		}
439 		zone_bm = zone_bm->next;
440 	}
441 	free_list_of_pages(bm->p_list, clear_nosave_free);
442 	bm->zone_bm_list = NULL;
443 }
444 
445 /**
446  *	memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
447  *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
448  *	of @bm->cur_zone_bm are updated.
449  *
450  *	If the bit cannot be set, the function returns -EINVAL .
451  */
452 
453 static int
454 memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
455 {
456 	struct zone_bitmap *zone_bm;
457 	struct bm_block *bb;
458 
459 	/* Check if the pfn is from the current zone */
460 	zone_bm = bm->cur.zone_bm;
461 	if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
462 		zone_bm = bm->zone_bm_list;
463 		/* We don't assume that the zones are sorted by pfns */
464 		while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
465 			zone_bm = zone_bm->next;
466 			if (unlikely(!zone_bm))
467 				return -EINVAL;
468 		}
469 		bm->cur.zone_bm = zone_bm;
470 	}
471 	/* Check if the pfn corresponds to the current bitmap block */
472 	bb = zone_bm->cur_block;
473 	if (pfn < bb->start_pfn)
474 		bb = zone_bm->bm_blocks;
475 
476 	while (pfn >= bb->end_pfn) {
477 		bb = bb->next;
478 		if (unlikely(!bb))
479 			return -EINVAL;
480 	}
481 	zone_bm->cur_block = bb;
482 	pfn -= bb->start_pfn;
483 	set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK);
484 	return 0;
485 }
486 
487 /* Two auxiliary functions for memory_bm_next_pfn */
488 
489 /* Find the first set bit in the given chunk, if there is one */
490 
491 static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
492 {
493 	bit++;
494 	while (bit < BM_BITS_PER_CHUNK) {
495 		if (test_bit(bit, chunk_p))
496 			return bit;
497 
498 		bit++;
499 	}
500 	return -1;
501 }
502 
503 /* Find a chunk containing some bits set in given block of bits */
504 
505 static inline int next_chunk_in_block(int n, struct bm_block *bb)
506 {
507 	n++;
508 	while (n < bb->size) {
509 		if (bb->data[n])
510 			return n;
511 
512 		n++;
513 	}
514 	return -1;
515 }
516 
517 /**
518  *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
519  *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
520  *	returned.
521  *
522  *	It is required to run memory_bm_position_reset() before the first call to
523  *	this function.
524  */
525 
526 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
527 {
528 	struct zone_bitmap *zone_bm;
529 	struct bm_block *bb;
530 	int chunk;
531 	int bit;
532 
533 	do {
534 		bb = bm->cur.block;
535 		do {
536 			chunk = bm->cur.chunk;
537 			bit = bm->cur.bit;
538 			do {
539 				bit = next_bit_in_chunk(bit, bb->data + chunk);
540 				if (bit >= 0)
541 					goto Return_pfn;
542 
543 				chunk = next_chunk_in_block(chunk, bb);
544 				bit = -1;
545 			} while (chunk >= 0);
546 			bb = bb->next;
547 			bm->cur.block = bb;
548 			memory_bm_reset_chunk(bm);
549 		} while (bb);
550 		zone_bm = bm->cur.zone_bm->next;
551 		if (zone_bm) {
552 			bm->cur.zone_bm = zone_bm;
553 			bm->cur.block = zone_bm->bm_blocks;
554 			memory_bm_reset_chunk(bm);
555 		}
556 	} while (zone_bm);
557 	memory_bm_position_reset(bm);
558 	return BM_END_OF_MAP;
559 
560  Return_pfn:
561 	bm->cur.chunk = chunk;
562 	bm->cur.bit = bit;
563 	return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
564 }
565 
566 /**
567  *	snapshot_additional_pages - estimate the number of additional pages
568  *	be needed for setting up the suspend image data structures for given
569  *	zone (usually the returned value is greater than the exact number)
570  */
571 
572 unsigned int snapshot_additional_pages(struct zone *zone)
573 {
574 	unsigned int res;
575 
576 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
577 	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
578 	return 2 * res;
579 }
580 
581 #ifdef CONFIG_HIGHMEM
582 /**
583  *	count_free_highmem_pages - compute the total number of free highmem
584  *	pages, system-wide.
585  */
586 
587 static unsigned int count_free_highmem_pages(void)
588 {
589 	struct zone *zone;
590 	unsigned int cnt = 0;
591 
592 	for_each_zone(zone)
593 		if (populated_zone(zone) && is_highmem(zone))
594 			cnt += zone_page_state(zone, NR_FREE_PAGES);
595 
596 	return cnt;
597 }
598 
599 /**
600  *	saveable_highmem_page - Determine whether a highmem page should be
601  *	included in the suspend image.
602  *
603  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
604  *	and it isn't a part of a free chunk of pages.
605  */
606 
607 static struct page *saveable_highmem_page(unsigned long pfn)
608 {
609 	struct page *page;
610 
611 	if (!pfn_valid(pfn))
612 		return NULL;
613 
614 	page = pfn_to_page(pfn);
615 
616 	BUG_ON(!PageHighMem(page));
617 
618 	if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page))
619 		return NULL;
620 
621 	return page;
622 }
623 
624 /**
625  *	count_highmem_pages - compute the total number of saveable highmem
626  *	pages.
627  */
628 
629 unsigned int count_highmem_pages(void)
630 {
631 	struct zone *zone;
632 	unsigned int n = 0;
633 
634 	for_each_zone(zone) {
635 		unsigned long pfn, max_zone_pfn;
636 
637 		if (!is_highmem(zone))
638 			continue;
639 
640 		mark_free_pages(zone);
641 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
642 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
643 			if (saveable_highmem_page(pfn))
644 				n++;
645 	}
646 	return n;
647 }
648 #else
649 static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
650 static inline unsigned int count_highmem_pages(void) { return 0; }
651 #endif /* CONFIG_HIGHMEM */
652 
653 /**
654  *	pfn_is_nosave - check if given pfn is in the 'nosave' section
655  */
656 
657 static inline int pfn_is_nosave(unsigned long pfn)
658 {
659 	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
660 	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
661 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
662 }
663 
664 /**
665  *	saveable - Determine whether a non-highmem page should be included in
666  *	the suspend image.
667  *
668  *	We should save the page if it isn't Nosave, and is not in the range
669  *	of pages statically defined as 'unsaveable', and it isn't a part of
670  *	a free chunk of pages.
671  */
672 
673 static struct page *saveable_page(unsigned long pfn)
674 {
675 	struct page *page;
676 
677 	if (!pfn_valid(pfn))
678 		return NULL;
679 
680 	page = pfn_to_page(pfn);
681 
682 	BUG_ON(PageHighMem(page));
683 
684 	if (PageNosave(page) || PageNosaveFree(page))
685 		return NULL;
686 
687 	if (PageReserved(page) && pfn_is_nosave(pfn))
688 		return NULL;
689 
690 	return page;
691 }
692 
693 /**
694  *	count_data_pages - compute the total number of saveable non-highmem
695  *	pages.
696  */
697 
698 unsigned int count_data_pages(void)
699 {
700 	struct zone *zone;
701 	unsigned long pfn, max_zone_pfn;
702 	unsigned int n = 0;
703 
704 	for_each_zone(zone) {
705 		if (is_highmem(zone))
706 			continue;
707 
708 		mark_free_pages(zone);
709 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
710 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
711 			if(saveable_page(pfn))
712 				n++;
713 	}
714 	return n;
715 }
716 
717 /* This is needed, because copy_page and memcpy are not usable for copying
718  * task structs.
719  */
720 static inline void do_copy_page(long *dst, long *src)
721 {
722 	int n;
723 
724 	for (n = PAGE_SIZE / sizeof(long); n; n--)
725 		*dst++ = *src++;
726 }
727 
728 #ifdef CONFIG_HIGHMEM
729 static inline struct page *
730 page_is_saveable(struct zone *zone, unsigned long pfn)
731 {
732 	return is_highmem(zone) ?
733 			saveable_highmem_page(pfn) : saveable_page(pfn);
734 }
735 
736 static inline void
737 copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
738 {
739 	struct page *s_page, *d_page;
740 	void *src, *dst;
741 
742 	s_page = pfn_to_page(src_pfn);
743 	d_page = pfn_to_page(dst_pfn);
744 	if (PageHighMem(s_page)) {
745 		src = kmap_atomic(s_page, KM_USER0);
746 		dst = kmap_atomic(d_page, KM_USER1);
747 		do_copy_page(dst, src);
748 		kunmap_atomic(src, KM_USER0);
749 		kunmap_atomic(dst, KM_USER1);
750 	} else {
751 		src = page_address(s_page);
752 		if (PageHighMem(d_page)) {
753 			/* Page pointed to by src may contain some kernel
754 			 * data modified by kmap_atomic()
755 			 */
756 			do_copy_page(buffer, src);
757 			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
758 			memcpy(dst, buffer, PAGE_SIZE);
759 			kunmap_atomic(dst, KM_USER0);
760 		} else {
761 			dst = page_address(d_page);
762 			do_copy_page(dst, src);
763 		}
764 	}
765 }
766 #else
767 #define page_is_saveable(zone, pfn)	saveable_page(pfn)
768 
769 static inline void
770 copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
771 {
772 	do_copy_page(page_address(pfn_to_page(dst_pfn)),
773 			page_address(pfn_to_page(src_pfn)));
774 }
775 #endif /* CONFIG_HIGHMEM */
776 
777 static void
778 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
779 {
780 	struct zone *zone;
781 	unsigned long pfn;
782 
783 	for_each_zone(zone) {
784 		unsigned long max_zone_pfn;
785 
786 		mark_free_pages(zone);
787 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
788 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
789 			if (page_is_saveable(zone, pfn))
790 				memory_bm_set_bit(orig_bm, pfn);
791 	}
792 	memory_bm_position_reset(orig_bm);
793 	memory_bm_position_reset(copy_bm);
794 	do {
795 		pfn = memory_bm_next_pfn(orig_bm);
796 		if (likely(pfn != BM_END_OF_MAP))
797 			copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
798 	} while (pfn != BM_END_OF_MAP);
799 }
800 
801 /* Total number of image pages */
802 static unsigned int nr_copy_pages;
803 /* Number of pages needed for saving the original pfns of the image pages */
804 static unsigned int nr_meta_pages;
805 
806 /**
807  *	swsusp_free - free pages allocated for the suspend.
808  *
809  *	Suspend pages are alocated before the atomic copy is made, so we
810  *	need to release them after the resume.
811  */
812 
813 void swsusp_free(void)
814 {
815 	struct zone *zone;
816 	unsigned long pfn, max_zone_pfn;
817 
818 	for_each_zone(zone) {
819 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
820 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
821 			if (pfn_valid(pfn)) {
822 				struct page *page = pfn_to_page(pfn);
823 
824 				if (PageNosave(page) && PageNosaveFree(page)) {
825 					ClearPageNosave(page);
826 					ClearPageNosaveFree(page);
827 					__free_page(page);
828 				}
829 			}
830 	}
831 	nr_copy_pages = 0;
832 	nr_meta_pages = 0;
833 	restore_pblist = NULL;
834 	buffer = NULL;
835 }
836 
837 #ifdef CONFIG_HIGHMEM
838 /**
839   *	count_pages_for_highmem - compute the number of non-highmem pages
840   *	that will be necessary for creating copies of highmem pages.
841   */
842 
843 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
844 {
845 	unsigned int free_highmem = count_free_highmem_pages();
846 
847 	if (free_highmem >= nr_highmem)
848 		nr_highmem = 0;
849 	else
850 		nr_highmem -= free_highmem;
851 
852 	return nr_highmem;
853 }
854 #else
855 static unsigned int
856 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
857 #endif /* CONFIG_HIGHMEM */
858 
859 /**
860  *	enough_free_mem - Make sure we have enough free memory for the
861  *	snapshot image.
862  */
863 
864 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
865 {
866 	struct zone *zone;
867 	unsigned int free = 0, meta = 0;
868 
869 	for_each_zone(zone) {
870 		meta += snapshot_additional_pages(zone);
871 		if (!is_highmem(zone))
872 			free += zone_page_state(zone, NR_FREE_PAGES);
873 	}
874 
875 	nr_pages += count_pages_for_highmem(nr_highmem);
876 	pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
877 		nr_pages, PAGES_FOR_IO, meta, free);
878 
879 	return free > nr_pages + PAGES_FOR_IO + meta;
880 }
881 
882 #ifdef CONFIG_HIGHMEM
883 /**
884  *	get_highmem_buffer - if there are some highmem pages in the suspend
885  *	image, we may need the buffer to copy them and/or load their data.
886  */
887 
888 static inline int get_highmem_buffer(int safe_needed)
889 {
890 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
891 	return buffer ? 0 : -ENOMEM;
892 }
893 
894 /**
895  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
896  *	Try to allocate as many pages as needed, but if the number of free
897  *	highmem pages is lesser than that, allocate them all.
898  */
899 
900 static inline unsigned int
901 alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
902 {
903 	unsigned int to_alloc = count_free_highmem_pages();
904 
905 	if (to_alloc > nr_highmem)
906 		to_alloc = nr_highmem;
907 
908 	nr_highmem -= to_alloc;
909 	while (to_alloc-- > 0) {
910 		struct page *page;
911 
912 		page = alloc_image_page(__GFP_HIGHMEM);
913 		memory_bm_set_bit(bm, page_to_pfn(page));
914 	}
915 	return nr_highmem;
916 }
917 #else
918 static inline int get_highmem_buffer(int safe_needed) { return 0; }
919 
920 static inline unsigned int
921 alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
922 #endif /* CONFIG_HIGHMEM */
923 
924 /**
925  *	swsusp_alloc - allocate memory for the suspend image
926  *
927  *	We first try to allocate as many highmem pages as there are
928  *	saveable highmem pages in the system.  If that fails, we allocate
929  *	non-highmem pages for the copies of the remaining highmem ones.
930  *
931  *	In this approach it is likely that the copies of highmem pages will
932  *	also be located in the high memory, because of the way in which
933  *	copy_data_pages() works.
934  */
935 
936 static int
937 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
938 		unsigned int nr_pages, unsigned int nr_highmem)
939 {
940 	int error;
941 
942 	error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
943 	if (error)
944 		goto Free;
945 
946 	error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
947 	if (error)
948 		goto Free;
949 
950 	if (nr_highmem > 0) {
951 		error = get_highmem_buffer(PG_ANY);
952 		if (error)
953 			goto Free;
954 
955 		nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
956 	}
957 	while (nr_pages-- > 0) {
958 		struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
959 
960 		if (!page)
961 			goto Free;
962 
963 		memory_bm_set_bit(copy_bm, page_to_pfn(page));
964 	}
965 	return 0;
966 
967  Free:
968 	swsusp_free();
969 	return -ENOMEM;
970 }
971 
972 /* Memory bitmap used for marking saveable pages (during suspend) or the
973  * suspend image pages (during resume)
974  */
975 static struct memory_bitmap orig_bm;
976 /* Memory bitmap used on suspend for marking allocated pages that will contain
977  * the copies of saveable pages.  During resume it is initially used for
978  * marking the suspend image pages, but then its set bits are duplicated in
979  * @orig_bm and it is released.  Next, on systems with high memory, it may be
980  * used for marking "safe" highmem pages, but it has to be reinitialized for
981  * this purpose.
982  */
983 static struct memory_bitmap copy_bm;
984 
985 asmlinkage int swsusp_save(void)
986 {
987 	unsigned int nr_pages, nr_highmem;
988 
989 	printk("swsusp: critical section: \n");
990 
991 	drain_local_pages();
992 	nr_pages = count_data_pages();
993 	nr_highmem = count_highmem_pages();
994 	printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
995 
996 	if (!enough_free_mem(nr_pages, nr_highmem)) {
997 		printk(KERN_ERR "swsusp: Not enough free memory\n");
998 		return -ENOMEM;
999 	}
1000 
1001 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1002 		printk(KERN_ERR "swsusp: Memory allocation failed\n");
1003 		return -ENOMEM;
1004 	}
1005 
1006 	/* During allocating of suspend pagedir, new cold pages may appear.
1007 	 * Kill them.
1008 	 */
1009 	drain_local_pages();
1010 	copy_data_pages(&copy_bm, &orig_bm);
1011 
1012 	/*
1013 	 * End of critical section. From now on, we can write to memory,
1014 	 * but we should not touch disk. This specially means we must _not_
1015 	 * touch swap space! Except we must write out our image of course.
1016 	 */
1017 
1018 	nr_pages += nr_highmem;
1019 	nr_copy_pages = nr_pages;
1020 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1021 
1022 	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
1023 
1024 	return 0;
1025 }
1026 
1027 static void init_header(struct swsusp_info *info)
1028 {
1029 	memset(info, 0, sizeof(struct swsusp_info));
1030 	info->version_code = LINUX_VERSION_CODE;
1031 	info->num_physpages = num_physpages;
1032 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1033 	info->cpus = num_online_cpus();
1034 	info->image_pages = nr_copy_pages;
1035 	info->pages = nr_copy_pages + nr_meta_pages + 1;
1036 	info->size = info->pages;
1037 	info->size <<= PAGE_SHIFT;
1038 }
1039 
1040 /**
1041  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1042  *	are stored in the array @buf[] (1 page at a time)
1043  */
1044 
1045 static inline void
1046 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1047 {
1048 	int j;
1049 
1050 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1051 		buf[j] = memory_bm_next_pfn(bm);
1052 		if (unlikely(buf[j] == BM_END_OF_MAP))
1053 			break;
1054 	}
1055 }
1056 
1057 /**
1058  *	snapshot_read_next - used for reading the system memory snapshot.
1059  *
1060  *	On the first call to it @handle should point to a zeroed
1061  *	snapshot_handle structure.  The structure gets updated and a pointer
1062  *	to it should be passed to this function every next time.
1063  *
1064  *	The @count parameter should contain the number of bytes the caller
1065  *	wants to read from the snapshot.  It must not be zero.
1066  *
1067  *	On success the function returns a positive number.  Then, the caller
1068  *	is allowed to read up to the returned number of bytes from the memory
1069  *	location computed by the data_of() macro.  The number returned
1070  *	may be smaller than @count, but this only happens if the read would
1071  *	cross a page boundary otherwise.
1072  *
1073  *	The function returns 0 to indicate the end of data stream condition,
1074  *	and a negative number is returned on error.  In such cases the
1075  *	structure pointed to by @handle is not updated and should not be used
1076  *	any more.
1077  */
1078 
1079 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1080 {
1081 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1082 		return 0;
1083 
1084 	if (!buffer) {
1085 		/* This makes the buffer be freed by swsusp_free() */
1086 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1087 		if (!buffer)
1088 			return -ENOMEM;
1089 	}
1090 	if (!handle->offset) {
1091 		init_header((struct swsusp_info *)buffer);
1092 		handle->buffer = buffer;
1093 		memory_bm_position_reset(&orig_bm);
1094 		memory_bm_position_reset(&copy_bm);
1095 	}
1096 	if (handle->prev < handle->cur) {
1097 		if (handle->cur <= nr_meta_pages) {
1098 			memset(buffer, 0, PAGE_SIZE);
1099 			pack_pfns(buffer, &orig_bm);
1100 		} else {
1101 			struct page *page;
1102 
1103 			page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1104 			if (PageHighMem(page)) {
1105 				/* Highmem pages are copied to the buffer,
1106 				 * because we can't return with a kmapped
1107 				 * highmem page (we may not be called again).
1108 				 */
1109 				void *kaddr;
1110 
1111 				kaddr = kmap_atomic(page, KM_USER0);
1112 				memcpy(buffer, kaddr, PAGE_SIZE);
1113 				kunmap_atomic(kaddr, KM_USER0);
1114 				handle->buffer = buffer;
1115 			} else {
1116 				handle->buffer = page_address(page);
1117 			}
1118 		}
1119 		handle->prev = handle->cur;
1120 	}
1121 	handle->buf_offset = handle->cur_offset;
1122 	if (handle->cur_offset + count >= PAGE_SIZE) {
1123 		count = PAGE_SIZE - handle->cur_offset;
1124 		handle->cur_offset = 0;
1125 		handle->cur++;
1126 	} else {
1127 		handle->cur_offset += count;
1128 	}
1129 	handle->offset += count;
1130 	return count;
1131 }
1132 
1133 /**
1134  *	mark_unsafe_pages - mark the pages that cannot be used for storing
1135  *	the image during resume, because they conflict with the pages that
1136  *	had been used before suspend
1137  */
1138 
1139 static int mark_unsafe_pages(struct memory_bitmap *bm)
1140 {
1141 	struct zone *zone;
1142 	unsigned long pfn, max_zone_pfn;
1143 
1144 	/* Clear page flags */
1145 	for_each_zone(zone) {
1146 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1147 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1148 			if (pfn_valid(pfn))
1149 				ClearPageNosaveFree(pfn_to_page(pfn));
1150 	}
1151 
1152 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1153 	memory_bm_position_reset(bm);
1154 	do {
1155 		pfn = memory_bm_next_pfn(bm);
1156 		if (likely(pfn != BM_END_OF_MAP)) {
1157 			if (likely(pfn_valid(pfn)))
1158 				SetPageNosaveFree(pfn_to_page(pfn));
1159 			else
1160 				return -EFAULT;
1161 		}
1162 	} while (pfn != BM_END_OF_MAP);
1163 
1164 	allocated_unsafe_pages = 0;
1165 
1166 	return 0;
1167 }
1168 
1169 static void
1170 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1171 {
1172 	unsigned long pfn;
1173 
1174 	memory_bm_position_reset(src);
1175 	pfn = memory_bm_next_pfn(src);
1176 	while (pfn != BM_END_OF_MAP) {
1177 		memory_bm_set_bit(dst, pfn);
1178 		pfn = memory_bm_next_pfn(src);
1179 	}
1180 }
1181 
1182 static inline int check_header(struct swsusp_info *info)
1183 {
1184 	char *reason = NULL;
1185 
1186 	if (info->version_code != LINUX_VERSION_CODE)
1187 		reason = "kernel version";
1188 	if (info->num_physpages != num_physpages)
1189 		reason = "memory size";
1190 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1191 		reason = "system type";
1192 	if (strcmp(info->uts.release,init_utsname()->release))
1193 		reason = "kernel release";
1194 	if (strcmp(info->uts.version,init_utsname()->version))
1195 		reason = "version";
1196 	if (strcmp(info->uts.machine,init_utsname()->machine))
1197 		reason = "machine";
1198 	if (reason) {
1199 		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1200 		return -EPERM;
1201 	}
1202 	return 0;
1203 }
1204 
1205 /**
1206  *	load header - check the image header and copy data from it
1207  */
1208 
1209 static int
1210 load_header(struct swsusp_info *info)
1211 {
1212 	int error;
1213 
1214 	restore_pblist = NULL;
1215 	error = check_header(info);
1216 	if (!error) {
1217 		nr_copy_pages = info->image_pages;
1218 		nr_meta_pages = info->pages - info->image_pages - 1;
1219 	}
1220 	return error;
1221 }
1222 
1223 /**
1224  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1225  *	the corresponding bit in the memory bitmap @bm
1226  */
1227 
1228 static inline void
1229 unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1230 {
1231 	int j;
1232 
1233 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1234 		if (unlikely(buf[j] == BM_END_OF_MAP))
1235 			break;
1236 
1237 		memory_bm_set_bit(bm, buf[j]);
1238 	}
1239 }
1240 
1241 /* List of "safe" pages that may be used to store data loaded from the suspend
1242  * image
1243  */
1244 static struct linked_page *safe_pages_list;
1245 
1246 #ifdef CONFIG_HIGHMEM
1247 /* struct highmem_pbe is used for creating the list of highmem pages that
1248  * should be restored atomically during the resume from disk, because the page
1249  * frames they have occupied before the suspend are in use.
1250  */
1251 struct highmem_pbe {
1252 	struct page *copy_page;	/* data is here now */
1253 	struct page *orig_page;	/* data was here before the suspend */
1254 	struct highmem_pbe *next;
1255 };
1256 
1257 /* List of highmem PBEs needed for restoring the highmem pages that were
1258  * allocated before the suspend and included in the suspend image, but have
1259  * also been allocated by the "resume" kernel, so their contents cannot be
1260  * written directly to their "original" page frames.
1261  */
1262 static struct highmem_pbe *highmem_pblist;
1263 
1264 /**
1265  *	count_highmem_image_pages - compute the number of highmem pages in the
1266  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1267  *	image pages are assumed to be set.
1268  */
1269 
1270 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1271 {
1272 	unsigned long pfn;
1273 	unsigned int cnt = 0;
1274 
1275 	memory_bm_position_reset(bm);
1276 	pfn = memory_bm_next_pfn(bm);
1277 	while (pfn != BM_END_OF_MAP) {
1278 		if (PageHighMem(pfn_to_page(pfn)))
1279 			cnt++;
1280 
1281 		pfn = memory_bm_next_pfn(bm);
1282 	}
1283 	return cnt;
1284 }
1285 
1286 /**
1287  *	prepare_highmem_image - try to allocate as many highmem pages as
1288  *	there are highmem image pages (@nr_highmem_p points to the variable
1289  *	containing the number of highmem image pages).  The pages that are
1290  *	"safe" (ie. will not be overwritten when the suspend image is
1291  *	restored) have the corresponding bits set in @bm (it must be
1292  *	unitialized).
1293  *
1294  *	NOTE: This function should not be called if there are no highmem
1295  *	image pages.
1296  */
1297 
1298 static unsigned int safe_highmem_pages;
1299 
1300 static struct memory_bitmap *safe_highmem_bm;
1301 
1302 static int
1303 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1304 {
1305 	unsigned int to_alloc;
1306 
1307 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1308 		return -ENOMEM;
1309 
1310 	if (get_highmem_buffer(PG_SAFE))
1311 		return -ENOMEM;
1312 
1313 	to_alloc = count_free_highmem_pages();
1314 	if (to_alloc > *nr_highmem_p)
1315 		to_alloc = *nr_highmem_p;
1316 	else
1317 		*nr_highmem_p = to_alloc;
1318 
1319 	safe_highmem_pages = 0;
1320 	while (to_alloc-- > 0) {
1321 		struct page *page;
1322 
1323 		page = alloc_page(__GFP_HIGHMEM);
1324 		if (!PageNosaveFree(page)) {
1325 			/* The page is "safe", set its bit the bitmap */
1326 			memory_bm_set_bit(bm, page_to_pfn(page));
1327 			safe_highmem_pages++;
1328 		}
1329 		/* Mark the page as allocated */
1330 		SetPageNosave(page);
1331 		SetPageNosaveFree(page);
1332 	}
1333 	memory_bm_position_reset(bm);
1334 	safe_highmem_bm = bm;
1335 	return 0;
1336 }
1337 
1338 /**
1339  *	get_highmem_page_buffer - for given highmem image page find the buffer
1340  *	that suspend_write_next() should set for its caller to write to.
1341  *
1342  *	If the page is to be saved to its "original" page frame or a copy of
1343  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1344  *	the copy of the page is to be made in normal memory, so the address of
1345  *	the copy is returned.
1346  *
1347  *	If @buffer is returned, the caller of suspend_write_next() will write
1348  *	the page's contents to @buffer, so they will have to be copied to the
1349  *	right location on the next call to suspend_write_next() and it is done
1350  *	with the help of copy_last_highmem_page().  For this purpose, if
1351  *	@buffer is returned, @last_highmem page is set to the page to which
1352  *	the data will have to be copied from @buffer.
1353  */
1354 
1355 static struct page *last_highmem_page;
1356 
1357 static void *
1358 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1359 {
1360 	struct highmem_pbe *pbe;
1361 	void *kaddr;
1362 
1363 	if (PageNosave(page) && PageNosaveFree(page)) {
1364 		/* We have allocated the "original" page frame and we can
1365 		 * use it directly to store the loaded page.
1366 		 */
1367 		last_highmem_page = page;
1368 		return buffer;
1369 	}
1370 	/* The "original" page frame has not been allocated and we have to
1371 	 * use a "safe" page frame to store the loaded page.
1372 	 */
1373 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1374 	if (!pbe) {
1375 		swsusp_free();
1376 		return NULL;
1377 	}
1378 	pbe->orig_page = page;
1379 	if (safe_highmem_pages > 0) {
1380 		struct page *tmp;
1381 
1382 		/* Copy of the page will be stored in high memory */
1383 		kaddr = buffer;
1384 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1385 		safe_highmem_pages--;
1386 		last_highmem_page = tmp;
1387 		pbe->copy_page = tmp;
1388 	} else {
1389 		/* Copy of the page will be stored in normal memory */
1390 		kaddr = safe_pages_list;
1391 		safe_pages_list = safe_pages_list->next;
1392 		pbe->copy_page = virt_to_page(kaddr);
1393 	}
1394 	pbe->next = highmem_pblist;
1395 	highmem_pblist = pbe;
1396 	return kaddr;
1397 }
1398 
1399 /**
1400  *	copy_last_highmem_page - copy the contents of a highmem image from
1401  *	@buffer, where the caller of snapshot_write_next() has place them,
1402  *	to the right location represented by @last_highmem_page .
1403  */
1404 
1405 static void copy_last_highmem_page(void)
1406 {
1407 	if (last_highmem_page) {
1408 		void *dst;
1409 
1410 		dst = kmap_atomic(last_highmem_page, KM_USER0);
1411 		memcpy(dst, buffer, PAGE_SIZE);
1412 		kunmap_atomic(dst, KM_USER0);
1413 		last_highmem_page = NULL;
1414 	}
1415 }
1416 
1417 static inline int last_highmem_page_copied(void)
1418 {
1419 	return !last_highmem_page;
1420 }
1421 
1422 static inline void free_highmem_data(void)
1423 {
1424 	if (safe_highmem_bm)
1425 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1426 
1427 	if (buffer)
1428 		free_image_page(buffer, PG_UNSAFE_CLEAR);
1429 }
1430 #else
1431 static inline int get_safe_write_buffer(void) { return 0; }
1432 
1433 static unsigned int
1434 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1435 
1436 static inline int
1437 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1438 {
1439 	return 0;
1440 }
1441 
1442 static inline void *
1443 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1444 {
1445 	return NULL;
1446 }
1447 
1448 static inline void copy_last_highmem_page(void) {}
1449 static inline int last_highmem_page_copied(void) { return 1; }
1450 static inline void free_highmem_data(void) {}
1451 #endif /* CONFIG_HIGHMEM */
1452 
1453 /**
1454  *	prepare_image - use the memory bitmap @bm to mark the pages that will
1455  *	be overwritten in the process of restoring the system memory state
1456  *	from the suspend image ("unsafe" pages) and allocate memory for the
1457  *	image.
1458  *
1459  *	The idea is to allocate a new memory bitmap first and then allocate
1460  *	as many pages as needed for the image data, but not to assign these
1461  *	pages to specific tasks initially.  Instead, we just mark them as
1462  *	allocated and create a lists of "safe" pages that will be used
1463  *	later.  On systems with high memory a list of "safe" highmem pages is
1464  *	also created.
1465  */
1466 
1467 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1468 
1469 static int
1470 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1471 {
1472 	unsigned int nr_pages, nr_highmem;
1473 	struct linked_page *sp_list, *lp;
1474 	int error;
1475 
1476 	/* If there is no highmem, the buffer will not be necessary */
1477 	free_image_page(buffer, PG_UNSAFE_CLEAR);
1478 	buffer = NULL;
1479 
1480 	nr_highmem = count_highmem_image_pages(bm);
1481 	error = mark_unsafe_pages(bm);
1482 	if (error)
1483 		goto Free;
1484 
1485 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
1486 	if (error)
1487 		goto Free;
1488 
1489 	duplicate_memory_bitmap(new_bm, bm);
1490 	memory_bm_free(bm, PG_UNSAFE_KEEP);
1491 	if (nr_highmem > 0) {
1492 		error = prepare_highmem_image(bm, &nr_highmem);
1493 		if (error)
1494 			goto Free;
1495 	}
1496 	/* Reserve some safe pages for potential later use.
1497 	 *
1498 	 * NOTE: This way we make sure there will be enough safe pages for the
1499 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
1500 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1501 	 */
1502 	sp_list = NULL;
1503 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1504 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1505 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
1506 	while (nr_pages > 0) {
1507 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
1508 		if (!lp) {
1509 			error = -ENOMEM;
1510 			goto Free;
1511 		}
1512 		lp->next = sp_list;
1513 		sp_list = lp;
1514 		nr_pages--;
1515 	}
1516 	/* Preallocate memory for the image */
1517 	safe_pages_list = NULL;
1518 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1519 	while (nr_pages > 0) {
1520 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
1521 		if (!lp) {
1522 			error = -ENOMEM;
1523 			goto Free;
1524 		}
1525 		if (!PageNosaveFree(virt_to_page(lp))) {
1526 			/* The page is "safe", add it to the list */
1527 			lp->next = safe_pages_list;
1528 			safe_pages_list = lp;
1529 		}
1530 		/* Mark the page as allocated */
1531 		SetPageNosave(virt_to_page(lp));
1532 		SetPageNosaveFree(virt_to_page(lp));
1533 		nr_pages--;
1534 	}
1535 	/* Free the reserved safe pages so that chain_alloc() can use them */
1536 	while (sp_list) {
1537 		lp = sp_list->next;
1538 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
1539 		sp_list = lp;
1540 	}
1541 	return 0;
1542 
1543  Free:
1544 	swsusp_free();
1545 	return error;
1546 }
1547 
1548 /**
1549  *	get_buffer - compute the address that snapshot_write_next() should
1550  *	set for its caller to write to.
1551  */
1552 
1553 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1554 {
1555 	struct pbe *pbe;
1556 	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
1557 
1558 	if (PageHighMem(page))
1559 		return get_highmem_page_buffer(page, ca);
1560 
1561 	if (PageNosave(page) && PageNosaveFree(page))
1562 		/* We have allocated the "original" page frame and we can
1563 		 * use it directly to store the loaded page.
1564 		 */
1565 		return page_address(page);
1566 
1567 	/* The "original" page frame has not been allocated and we have to
1568 	 * use a "safe" page frame to store the loaded page.
1569 	 */
1570 	pbe = chain_alloc(ca, sizeof(struct pbe));
1571 	if (!pbe) {
1572 		swsusp_free();
1573 		return NULL;
1574 	}
1575 	pbe->orig_address = page_address(page);
1576 	pbe->address = safe_pages_list;
1577 	safe_pages_list = safe_pages_list->next;
1578 	pbe->next = restore_pblist;
1579 	restore_pblist = pbe;
1580 	return pbe->address;
1581 }
1582 
1583 /**
1584  *	snapshot_write_next - used for writing the system memory snapshot.
1585  *
1586  *	On the first call to it @handle should point to a zeroed
1587  *	snapshot_handle structure.  The structure gets updated and a pointer
1588  *	to it should be passed to this function every next time.
1589  *
1590  *	The @count parameter should contain the number of bytes the caller
1591  *	wants to write to the image.  It must not be zero.
1592  *
1593  *	On success the function returns a positive number.  Then, the caller
1594  *	is allowed to write up to the returned number of bytes to the memory
1595  *	location computed by the data_of() macro.  The number returned
1596  *	may be smaller than @count, but this only happens if the write would
1597  *	cross a page boundary otherwise.
1598  *
1599  *	The function returns 0 to indicate the "end of file" condition,
1600  *	and a negative number is returned on error.  In such cases the
1601  *	structure pointed to by @handle is not updated and should not be used
1602  *	any more.
1603  */
1604 
1605 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1606 {
1607 	static struct chain_allocator ca;
1608 	int error = 0;
1609 
1610 	/* Check if we have already loaded the entire image */
1611 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1612 		return 0;
1613 
1614 	if (handle->offset == 0) {
1615 		if (!buffer)
1616 			/* This makes the buffer be freed by swsusp_free() */
1617 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1618 
1619 		if (!buffer)
1620 			return -ENOMEM;
1621 
1622 		handle->buffer = buffer;
1623 	}
1624 	handle->sync_read = 1;
1625 	if (handle->prev < handle->cur) {
1626 		if (handle->prev == 0) {
1627 			error = load_header(buffer);
1628 			if (error)
1629 				return error;
1630 
1631 			error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
1632 			if (error)
1633 				return error;
1634 
1635 		} else if (handle->prev <= nr_meta_pages) {
1636 			unpack_orig_pfns(buffer, &copy_bm);
1637 			if (handle->prev == nr_meta_pages) {
1638 				error = prepare_image(&orig_bm, &copy_bm);
1639 				if (error)
1640 					return error;
1641 
1642 				chain_init(&ca, GFP_ATOMIC, PG_SAFE);
1643 				memory_bm_position_reset(&orig_bm);
1644 				restore_pblist = NULL;
1645 				handle->buffer = get_buffer(&orig_bm, &ca);
1646 				handle->sync_read = 0;
1647 				if (!handle->buffer)
1648 					return -ENOMEM;
1649 			}
1650 		} else {
1651 			copy_last_highmem_page();
1652 			handle->buffer = get_buffer(&orig_bm, &ca);
1653 			if (handle->buffer != buffer)
1654 				handle->sync_read = 0;
1655 		}
1656 		handle->prev = handle->cur;
1657 	}
1658 	handle->buf_offset = handle->cur_offset;
1659 	if (handle->cur_offset + count >= PAGE_SIZE) {
1660 		count = PAGE_SIZE - handle->cur_offset;
1661 		handle->cur_offset = 0;
1662 		handle->cur++;
1663 	} else {
1664 		handle->cur_offset += count;
1665 	}
1666 	handle->offset += count;
1667 	return count;
1668 }
1669 
1670 /**
1671  *	snapshot_write_finalize - must be called after the last call to
1672  *	snapshot_write_next() in case the last page in the image happens
1673  *	to be a highmem page and its contents should be stored in the
1674  *	highmem.  Additionally, it releases the memory that will not be
1675  *	used any more.
1676  */
1677 
1678 void snapshot_write_finalize(struct snapshot_handle *handle)
1679 {
1680 	copy_last_highmem_page();
1681 	/* Free only if we have loaded the image entirely */
1682 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
1683 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
1684 		free_highmem_data();
1685 	}
1686 }
1687 
1688 int snapshot_image_loaded(struct snapshot_handle *handle)
1689 {
1690 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
1691 			handle->cur <= nr_meta_pages + nr_copy_pages);
1692 }
1693 
1694 #ifdef CONFIG_HIGHMEM
1695 /* Assumes that @buf is ready and points to a "safe" page */
1696 static inline void
1697 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
1698 {
1699 	void *kaddr1, *kaddr2;
1700 
1701 	kaddr1 = kmap_atomic(p1, KM_USER0);
1702 	kaddr2 = kmap_atomic(p2, KM_USER1);
1703 	memcpy(buf, kaddr1, PAGE_SIZE);
1704 	memcpy(kaddr1, kaddr2, PAGE_SIZE);
1705 	memcpy(kaddr2, buf, PAGE_SIZE);
1706 	kunmap_atomic(kaddr1, KM_USER0);
1707 	kunmap_atomic(kaddr2, KM_USER1);
1708 }
1709 
1710 /**
1711  *	restore_highmem - for each highmem page that was allocated before
1712  *	the suspend and included in the suspend image, and also has been
1713  *	allocated by the "resume" kernel swap its current (ie. "before
1714  *	resume") contents with the previous (ie. "before suspend") one.
1715  *
1716  *	If the resume eventually fails, we can call this function once
1717  *	again and restore the "before resume" highmem state.
1718  */
1719 
1720 int restore_highmem(void)
1721 {
1722 	struct highmem_pbe *pbe = highmem_pblist;
1723 	void *buf;
1724 
1725 	if (!pbe)
1726 		return 0;
1727 
1728 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
1729 	if (!buf)
1730 		return -ENOMEM;
1731 
1732 	while (pbe) {
1733 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
1734 		pbe = pbe->next;
1735 	}
1736 	free_image_page(buf, PG_UNSAFE_CLEAR);
1737 	return 0;
1738 }
1739 #endif /* CONFIG_HIGHMEM */
1740