xref: /linux/kernel/power/snapshot.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provide system snapshot/restore functionality.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7  *
8  * This file is released under the GPLv2, and is based on swsusp.c.
9  *
10  */
11 
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
22 #include <linux/pm.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <asm/io.h>
34 
35 #include "power.h"
36 
37 struct pbe *pagedir_nosave;
38 static unsigned int nr_copy_pages;
39 static unsigned int nr_meta_pages;
40 static unsigned long *buffer;
41 
42 #ifdef CONFIG_HIGHMEM
43 unsigned int count_highmem_pages(void)
44 {
45 	struct zone *zone;
46 	unsigned long zone_pfn;
47 	unsigned int n = 0;
48 
49 	for_each_zone (zone)
50 		if (is_highmem(zone)) {
51 			mark_free_pages(zone);
52 			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
53 				struct page *page;
54 				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
55 				if (!pfn_valid(pfn))
56 					continue;
57 				page = pfn_to_page(pfn);
58 				if (PageReserved(page))
59 					continue;
60 				if (PageNosaveFree(page))
61 					continue;
62 				n++;
63 			}
64 		}
65 	return n;
66 }
67 
68 struct highmem_page {
69 	char *data;
70 	struct page *page;
71 	struct highmem_page *next;
72 };
73 
74 static struct highmem_page *highmem_copy;
75 
76 static int save_highmem_zone(struct zone *zone)
77 {
78 	unsigned long zone_pfn;
79 	mark_free_pages(zone);
80 	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
81 		struct page *page;
82 		struct highmem_page *save;
83 		void *kaddr;
84 		unsigned long pfn = zone_pfn + zone->zone_start_pfn;
85 
86 		if (!(pfn%10000))
87 			printk(".");
88 		if (!pfn_valid(pfn))
89 			continue;
90 		page = pfn_to_page(pfn);
91 		/*
92 		 * This condition results from rvmalloc() sans vmalloc_32()
93 		 * and architectural memory reservations. This should be
94 		 * corrected eventually when the cases giving rise to this
95 		 * are better understood.
96 		 */
97 		if (PageReserved(page))
98 			continue;
99 		BUG_ON(PageNosave(page));
100 		if (PageNosaveFree(page))
101 			continue;
102 		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
103 		if (!save)
104 			return -ENOMEM;
105 		save->next = highmem_copy;
106 		save->page = page;
107 		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
108 		if (!save->data) {
109 			kfree(save);
110 			return -ENOMEM;
111 		}
112 		kaddr = kmap_atomic(page, KM_USER0);
113 		memcpy(save->data, kaddr, PAGE_SIZE);
114 		kunmap_atomic(kaddr, KM_USER0);
115 		highmem_copy = save;
116 	}
117 	return 0;
118 }
119 
120 int save_highmem(void)
121 {
122 	struct zone *zone;
123 	int res = 0;
124 
125 	pr_debug("swsusp: Saving Highmem");
126 	drain_local_pages();
127 	for_each_zone (zone) {
128 		if (is_highmem(zone))
129 			res = save_highmem_zone(zone);
130 		if (res)
131 			return res;
132 	}
133 	printk("\n");
134 	return 0;
135 }
136 
137 int restore_highmem(void)
138 {
139 	printk("swsusp: Restoring Highmem\n");
140 	while (highmem_copy) {
141 		struct highmem_page *save = highmem_copy;
142 		void *kaddr;
143 		highmem_copy = save->next;
144 
145 		kaddr = kmap_atomic(save->page, KM_USER0);
146 		memcpy(kaddr, save->data, PAGE_SIZE);
147 		kunmap_atomic(kaddr, KM_USER0);
148 		free_page((long) save->data);
149 		kfree(save);
150 	}
151 	return 0;
152 }
153 #endif
154 
155 static int pfn_is_nosave(unsigned long pfn)
156 {
157 	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
158 	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
159 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
160 }
161 
162 /**
163  *	saveable - Determine whether a page should be cloned or not.
164  *	@pfn:	The page
165  *
166  *	We save a page if it's Reserved, and not in the range of pages
167  *	statically defined as 'unsaveable', or if it isn't reserved, and
168  *	isn't part of a free chunk of pages.
169  */
170 
171 static int saveable(struct zone *zone, unsigned long *zone_pfn)
172 {
173 	unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
174 	struct page *page;
175 
176 	if (!pfn_valid(pfn))
177 		return 0;
178 
179 	page = pfn_to_page(pfn);
180 	BUG_ON(PageReserved(page) && PageNosave(page));
181 	if (PageNosave(page))
182 		return 0;
183 	if (PageReserved(page) && pfn_is_nosave(pfn))
184 		return 0;
185 	if (PageNosaveFree(page))
186 		return 0;
187 
188 	return 1;
189 }
190 
191 unsigned int count_data_pages(void)
192 {
193 	struct zone *zone;
194 	unsigned long zone_pfn;
195 	unsigned int n = 0;
196 
197 	for_each_zone (zone) {
198 		if (is_highmem(zone))
199 			continue;
200 		mark_free_pages(zone);
201 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
202 			n += saveable(zone, &zone_pfn);
203 	}
204 	return n;
205 }
206 
207 static void copy_data_pages(struct pbe *pblist)
208 {
209 	struct zone *zone;
210 	unsigned long zone_pfn;
211 	struct pbe *pbe, *p;
212 
213 	pbe = pblist;
214 	for_each_zone (zone) {
215 		if (is_highmem(zone))
216 			continue;
217 		mark_free_pages(zone);
218 		/* This is necessary for swsusp_free() */
219 		for_each_pb_page (p, pblist)
220 			SetPageNosaveFree(virt_to_page(p));
221 		for_each_pbe (p, pblist)
222 			SetPageNosaveFree(virt_to_page(p->address));
223 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
224 			if (saveable(zone, &zone_pfn)) {
225 				struct page *page;
226 				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
227 				BUG_ON(!pbe);
228 				pbe->orig_address = (unsigned long)page_address(page);
229 				/* copy_page is not usable for copying task structs. */
230 				memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
231 				pbe = pbe->next;
232 			}
233 		}
234 	}
235 	BUG_ON(pbe);
236 }
237 
238 
239 /**
240  *	free_pagedir - free pages allocated with alloc_pagedir()
241  */
242 
243 static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
244 {
245 	struct pbe *pbe;
246 
247 	while (pblist) {
248 		pbe = (pblist + PB_PAGE_SKIP)->next;
249 		ClearPageNosave(virt_to_page(pblist));
250 		if (clear_nosave_free)
251 			ClearPageNosaveFree(virt_to_page(pblist));
252 		free_page((unsigned long)pblist);
253 		pblist = pbe;
254 	}
255 }
256 
257 /**
258  *	fill_pb_page - Create a list of PBEs on a given memory page
259  */
260 
261 static inline void fill_pb_page(struct pbe *pbpage)
262 {
263 	struct pbe *p;
264 
265 	p = pbpage;
266 	pbpage += PB_PAGE_SKIP;
267 	do
268 		p->next = p + 1;
269 	while (++p < pbpage);
270 }
271 
272 /**
273  *	create_pbe_list - Create a list of PBEs on top of a given chain
274  *	of memory pages allocated with alloc_pagedir()
275  */
276 
277 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
278 {
279 	struct pbe *pbpage, *p;
280 	unsigned int num = PBES_PER_PAGE;
281 
282 	for_each_pb_page (pbpage, pblist) {
283 		if (num >= nr_pages)
284 			break;
285 
286 		fill_pb_page(pbpage);
287 		num += PBES_PER_PAGE;
288 	}
289 	if (pbpage) {
290 		for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
291 			p->next = p + 1;
292 		p->next = NULL;
293 	}
294 }
295 
296 /**
297  *	On resume it is necessary to trace and eventually free the unsafe
298  *	pages that have been allocated, because they are needed for I/O
299  *	(on x86-64 we likely will "eat" these pages once again while
300  *	creating the temporary page translation tables)
301  */
302 
303 struct eaten_page {
304 	struct eaten_page *next;
305 	char padding[PAGE_SIZE - sizeof(void *)];
306 };
307 
308 static struct eaten_page *eaten_pages = NULL;
309 
310 static void release_eaten_pages(void)
311 {
312 	struct eaten_page *p, *q;
313 
314 	p = eaten_pages;
315 	while (p) {
316 		q = p->next;
317 		/* We don't want swsusp_free() to free this page again */
318 		ClearPageNosave(virt_to_page(p));
319 		free_page((unsigned long)p);
320 		p = q;
321 	}
322 	eaten_pages = NULL;
323 }
324 
325 /**
326  *	@safe_needed - on resume, for storing the PBE list and the image,
327  *	we can only use memory pages that do not conflict with the pages
328  *	which had been used before suspend.
329  *
330  *	The unsafe pages are marked with the PG_nosave_free flag
331  *
332  *	Allocated but unusable (ie eaten) memory pages should be marked
333  *	so that swsusp_free() can release them
334  */
335 
336 static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
337 {
338 	void *res;
339 
340 	if (safe_needed)
341 		do {
342 			res = (void *)get_zeroed_page(gfp_mask);
343 			if (res && PageNosaveFree(virt_to_page(res))) {
344 				/* This is for swsusp_free() */
345 				SetPageNosave(virt_to_page(res));
346 				((struct eaten_page *)res)->next = eaten_pages;
347 				eaten_pages = res;
348 			}
349 		} while (res && PageNosaveFree(virt_to_page(res)));
350 	else
351 		res = (void *)get_zeroed_page(gfp_mask);
352 	if (res) {
353 		SetPageNosave(virt_to_page(res));
354 		SetPageNosaveFree(virt_to_page(res));
355 	}
356 	return res;
357 }
358 
359 unsigned long get_safe_page(gfp_t gfp_mask)
360 {
361 	return (unsigned long)alloc_image_page(gfp_mask, 1);
362 }
363 
364 /**
365  *	alloc_pagedir - Allocate the page directory.
366  *
367  *	First, determine exactly how many pages we need and
368  *	allocate them.
369  *
370  *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
371  *	struct pbe elements (pbes) and the last element in the page points
372  *	to the next page.
373  *
374  *	On each page we set up a list of struct_pbe elements.
375  */
376 
377 struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
378 {
379 	unsigned int num;
380 	struct pbe *pblist, *pbe;
381 
382 	if (!nr_pages)
383 		return NULL;
384 
385 	pblist = alloc_image_page(gfp_mask, safe_needed);
386 	/* FIXME: rewrite this ugly loop */
387 	for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
388         		pbe = pbe->next, num += PBES_PER_PAGE) {
389 		pbe += PB_PAGE_SKIP;
390 		pbe->next = alloc_image_page(gfp_mask, safe_needed);
391 	}
392 	if (!pbe) { /* get_zeroed_page() failed */
393 		free_pagedir(pblist, 1);
394 		pblist = NULL;
395         } else
396 		create_pbe_list(pblist, nr_pages);
397 	return pblist;
398 }
399 
400 /**
401  * Free pages we allocated for suspend. Suspend pages are alocated
402  * before atomic copy, so we need to free them after resume.
403  */
404 
405 void swsusp_free(void)
406 {
407 	struct zone *zone;
408 	unsigned long zone_pfn;
409 
410 	for_each_zone(zone) {
411 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
412 			if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
413 				struct page *page;
414 				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
415 				if (PageNosave(page) && PageNosaveFree(page)) {
416 					ClearPageNosave(page);
417 					ClearPageNosaveFree(page);
418 					free_page((long) page_address(page));
419 				}
420 			}
421 	}
422 	nr_copy_pages = 0;
423 	nr_meta_pages = 0;
424 	pagedir_nosave = NULL;
425 	buffer = NULL;
426 }
427 
428 
429 /**
430  *	enough_free_mem - Make sure we enough free memory to snapshot.
431  *
432  *	Returns TRUE or FALSE after checking the number of available
433  *	free pages.
434  */
435 
436 static int enough_free_mem(unsigned int nr_pages)
437 {
438 	struct zone *zone;
439 	unsigned int n = 0;
440 
441 	for_each_zone (zone)
442 		if (!is_highmem(zone))
443 			n += zone->free_pages;
444 	pr_debug("swsusp: available memory: %u pages\n", n);
445 	return n > (nr_pages + PAGES_FOR_IO +
446 		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
447 }
448 
449 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
450 {
451 	struct pbe *p;
452 
453 	for_each_pbe (p, pblist) {
454 		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
455 		if (!p->address)
456 			return -ENOMEM;
457 	}
458 	return 0;
459 }
460 
461 static struct pbe *swsusp_alloc(unsigned int nr_pages)
462 {
463 	struct pbe *pblist;
464 
465 	if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
466 		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
467 		return NULL;
468 	}
469 
470 	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
471 		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
472 		swsusp_free();
473 		return NULL;
474 	}
475 
476 	return pblist;
477 }
478 
479 asmlinkage int swsusp_save(void)
480 {
481 	unsigned int nr_pages;
482 
483 	pr_debug("swsusp: critical section: \n");
484 
485 	drain_local_pages();
486 	nr_pages = count_data_pages();
487 	printk("swsusp: Need to copy %u pages\n", nr_pages);
488 
489 	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
490 		 nr_pages,
491 		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
492 		 PAGES_FOR_IO, nr_free_pages());
493 
494 	if (!enough_free_mem(nr_pages)) {
495 		printk(KERN_ERR "swsusp: Not enough free memory\n");
496 		return -ENOMEM;
497 	}
498 
499 	pagedir_nosave = swsusp_alloc(nr_pages);
500 	if (!pagedir_nosave)
501 		return -ENOMEM;
502 
503 	/* During allocating of suspend pagedir, new cold pages may appear.
504 	 * Kill them.
505 	 */
506 	drain_local_pages();
507 	copy_data_pages(pagedir_nosave);
508 
509 	/*
510 	 * End of critical section. From now on, we can write to memory,
511 	 * but we should not touch disk. This specially means we must _not_
512 	 * touch swap space! Except we must write out our image of course.
513 	 */
514 
515 	nr_copy_pages = nr_pages;
516 	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
517 
518 	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
519 	return 0;
520 }
521 
522 static void init_header(struct swsusp_info *info)
523 {
524 	memset(info, 0, sizeof(struct swsusp_info));
525 	info->version_code = LINUX_VERSION_CODE;
526 	info->num_physpages = num_physpages;
527 	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
528 	info->cpus = num_online_cpus();
529 	info->image_pages = nr_copy_pages;
530 	info->pages = nr_copy_pages + nr_meta_pages + 1;
531 	info->size = info->pages;
532 	info->size <<= PAGE_SHIFT;
533 }
534 
535 /**
536  *	pack_orig_addresses - the .orig_address fields of the PBEs from the
537  *	list starting at @pbe are stored in the array @buf[] (1 page)
538  */
539 
540 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
541 {
542 	int j;
543 
544 	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
545 		buf[j] = pbe->orig_address;
546 		pbe = pbe->next;
547 	}
548 	if (!pbe)
549 		for (; j < PAGE_SIZE / sizeof(long); j++)
550 			buf[j] = 0;
551 	return pbe;
552 }
553 
554 /**
555  *	snapshot_read_next - used for reading the system memory snapshot.
556  *
557  *	On the first call to it @handle should point to a zeroed
558  *	snapshot_handle structure.  The structure gets updated and a pointer
559  *	to it should be passed to this function every next time.
560  *
561  *	The @count parameter should contain the number of bytes the caller
562  *	wants to read from the snapshot.  It must not be zero.
563  *
564  *	On success the function returns a positive number.  Then, the caller
565  *	is allowed to read up to the returned number of bytes from the memory
566  *	location computed by the data_of() macro.  The number returned
567  *	may be smaller than @count, but this only happens if the read would
568  *	cross a page boundary otherwise.
569  *
570  *	The function returns 0 to indicate the end of data stream condition,
571  *	and a negative number is returned on error.  In such cases the
572  *	structure pointed to by @handle is not updated and should not be used
573  *	any more.
574  */
575 
576 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
577 {
578 	if (handle->page > nr_meta_pages + nr_copy_pages)
579 		return 0;
580 	if (!buffer) {
581 		/* This makes the buffer be freed by swsusp_free() */
582 		buffer = alloc_image_page(GFP_ATOMIC, 0);
583 		if (!buffer)
584 			return -ENOMEM;
585 	}
586 	if (!handle->offset) {
587 		init_header((struct swsusp_info *)buffer);
588 		handle->buffer = buffer;
589 		handle->pbe = pagedir_nosave;
590 	}
591 	if (handle->prev < handle->page) {
592 		if (handle->page <= nr_meta_pages) {
593 			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
594 			if (!handle->pbe)
595 				handle->pbe = pagedir_nosave;
596 		} else {
597 			handle->buffer = (void *)handle->pbe->address;
598 			handle->pbe = handle->pbe->next;
599 		}
600 		handle->prev = handle->page;
601 	}
602 	handle->buf_offset = handle->page_offset;
603 	if (handle->page_offset + count >= PAGE_SIZE) {
604 		count = PAGE_SIZE - handle->page_offset;
605 		handle->page_offset = 0;
606 		handle->page++;
607 	} else {
608 		handle->page_offset += count;
609 	}
610 	handle->offset += count;
611 	return count;
612 }
613 
614 /**
615  *	mark_unsafe_pages - mark the pages that cannot be used for storing
616  *	the image during resume, because they conflict with the pages that
617  *	had been used before suspend
618  */
619 
620 static int mark_unsafe_pages(struct pbe *pblist)
621 {
622 	struct zone *zone;
623 	unsigned long zone_pfn;
624 	struct pbe *p;
625 
626 	if (!pblist) /* a sanity check */
627 		return -EINVAL;
628 
629 	/* Clear page flags */
630 	for_each_zone (zone) {
631 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
632 			if (pfn_valid(zone_pfn + zone->zone_start_pfn))
633 				ClearPageNosaveFree(pfn_to_page(zone_pfn +
634 					zone->zone_start_pfn));
635 	}
636 
637 	/* Mark orig addresses */
638 	for_each_pbe (p, pblist) {
639 		if (virt_addr_valid(p->orig_address))
640 			SetPageNosaveFree(virt_to_page(p->orig_address));
641 		else
642 			return -EFAULT;
643 	}
644 
645 	return 0;
646 }
647 
648 static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
649 {
650 	/* We assume both lists contain the same number of elements */
651 	while (src) {
652 		dst->orig_address = src->orig_address;
653 		dst = dst->next;
654 		src = src->next;
655 	}
656 }
657 
658 static int check_header(struct swsusp_info *info)
659 {
660 	char *reason = NULL;
661 
662 	if (info->version_code != LINUX_VERSION_CODE)
663 		reason = "kernel version";
664 	if (info->num_physpages != num_physpages)
665 		reason = "memory size";
666 	if (strcmp(info->uts.sysname,system_utsname.sysname))
667 		reason = "system type";
668 	if (strcmp(info->uts.release,system_utsname.release))
669 		reason = "kernel release";
670 	if (strcmp(info->uts.version,system_utsname.version))
671 		reason = "version";
672 	if (strcmp(info->uts.machine,system_utsname.machine))
673 		reason = "machine";
674 	if (reason) {
675 		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
676 		return -EPERM;
677 	}
678 	return 0;
679 }
680 
681 /**
682  *	load header - check the image header and copy data from it
683  */
684 
685 static int load_header(struct snapshot_handle *handle,
686                               struct swsusp_info *info)
687 {
688 	int error;
689 	struct pbe *pblist;
690 
691 	error = check_header(info);
692 	if (!error) {
693 		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
694 		if (!pblist)
695 			return -ENOMEM;
696 		pagedir_nosave = pblist;
697 		handle->pbe = pblist;
698 		nr_copy_pages = info->image_pages;
699 		nr_meta_pages = info->pages - info->image_pages - 1;
700 	}
701 	return error;
702 }
703 
704 /**
705  *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
706  *	the PBEs in the list starting at @pbe
707  */
708 
709 static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
710                                                 struct pbe *pbe)
711 {
712 	int j;
713 
714 	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
715 		pbe->orig_address = buf[j];
716 		pbe = pbe->next;
717 	}
718 	return pbe;
719 }
720 
721 /**
722  *	create_image - use metadata contained in the PBE list
723  *	pointed to by pagedir_nosave to mark the pages that will
724  *	be overwritten in the process of restoring the system
725  *	memory state from the image and allocate memory for
726  *	the image avoiding these pages
727  */
728 
729 static int create_image(struct snapshot_handle *handle)
730 {
731 	int error = 0;
732 	struct pbe *p, *pblist;
733 
734 	p = pagedir_nosave;
735 	error = mark_unsafe_pages(p);
736 	if (!error) {
737 		pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
738 		if (pblist)
739 			copy_page_backup_list(pblist, p);
740 		free_pagedir(p, 0);
741 		if (!pblist)
742 			error = -ENOMEM;
743 	}
744 	if (!error)
745 		error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
746 	if (!error) {
747 		release_eaten_pages();
748 		pagedir_nosave = pblist;
749 	} else {
750 		pagedir_nosave = NULL;
751 		handle->pbe = NULL;
752 		nr_copy_pages = 0;
753 		nr_meta_pages = 0;
754 	}
755 	return error;
756 }
757 
758 /**
759  *	snapshot_write_next - used for writing the system memory snapshot.
760  *
761  *	On the first call to it @handle should point to a zeroed
762  *	snapshot_handle structure.  The structure gets updated and a pointer
763  *	to it should be passed to this function every next time.
764  *
765  *	The @count parameter should contain the number of bytes the caller
766  *	wants to write to the image.  It must not be zero.
767  *
768  *	On success the function returns a positive number.  Then, the caller
769  *	is allowed to write up to the returned number of bytes to the memory
770  *	location computed by the data_of() macro.  The number returned
771  *	may be smaller than @count, but this only happens if the write would
772  *	cross a page boundary otherwise.
773  *
774  *	The function returns 0 to indicate the "end of file" condition,
775  *	and a negative number is returned on error.  In such cases the
776  *	structure pointed to by @handle is not updated and should not be used
777  *	any more.
778  */
779 
780 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
781 {
782 	int error = 0;
783 
784 	if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages)
785 		return 0;
786 	if (!buffer) {
787 		/* This makes the buffer be freed by swsusp_free() */
788 		buffer = alloc_image_page(GFP_ATOMIC, 0);
789 		if (!buffer)
790 			return -ENOMEM;
791 	}
792 	if (!handle->offset)
793 		handle->buffer = buffer;
794 	if (handle->prev < handle->page) {
795 		if (!handle->prev) {
796 			error = load_header(handle, (struct swsusp_info *)buffer);
797 			if (error)
798 				return error;
799 		} else if (handle->prev <= nr_meta_pages) {
800 			handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
801 			if (!handle->pbe) {
802 				error = create_image(handle);
803 				if (error)
804 					return error;
805 				handle->pbe = pagedir_nosave;
806 				handle->buffer = (void *)handle->pbe->address;
807 			}
808 		} else {
809 			handle->pbe = handle->pbe->next;
810 			handle->buffer = (void *)handle->pbe->address;
811 		}
812 		handle->prev = handle->page;
813 	}
814 	handle->buf_offset = handle->page_offset;
815 	if (handle->page_offset + count >= PAGE_SIZE) {
816 		count = PAGE_SIZE - handle->page_offset;
817 		handle->page_offset = 0;
818 		handle->page++;
819 	} else {
820 		handle->page_offset += count;
821 	}
822 	handle->offset += count;
823 	return count;
824 }
825 
826 int snapshot_image_loaded(struct snapshot_handle *handle)
827 {
828 	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
829 		handle->page <= nr_meta_pages + nr_copy_pages);
830 }
831