xref: /linux/kernel/power/snapshot.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provide system snapshot/restore functionality.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7  *
8  * This file is released under the GPLv2, and is based on swsusp.c.
9  *
10  */
11 
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
22 #include <linux/pm.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <asm/io.h>
34 
35 #include "power.h"
36 
37 struct pbe *pagedir_nosave;
38 static unsigned int nr_copy_pages;
39 static unsigned int nr_meta_pages;
40 static unsigned long *buffer;
41 
42 #ifdef CONFIG_HIGHMEM
43 unsigned int count_highmem_pages(void)
44 {
45 	struct zone *zone;
46 	unsigned long zone_pfn;
47 	unsigned int n = 0;
48 
49 	for_each_zone (zone)
50 		if (is_highmem(zone)) {
51 			mark_free_pages(zone);
52 			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
53 				struct page *page;
54 				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
55 				if (!pfn_valid(pfn))
56 					continue;
57 				page = pfn_to_page(pfn);
58 				if (PageReserved(page))
59 					continue;
60 				if (PageNosaveFree(page))
61 					continue;
62 				n++;
63 			}
64 		}
65 	return n;
66 }
67 
68 struct highmem_page {
69 	char *data;
70 	struct page *page;
71 	struct highmem_page *next;
72 };
73 
74 static struct highmem_page *highmem_copy;
75 
76 static int save_highmem_zone(struct zone *zone)
77 {
78 	unsigned long zone_pfn;
79 	mark_free_pages(zone);
80 	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
81 		struct page *page;
82 		struct highmem_page *save;
83 		void *kaddr;
84 		unsigned long pfn = zone_pfn + zone->zone_start_pfn;
85 
86 		if (!(pfn%10000))
87 			printk(".");
88 		if (!pfn_valid(pfn))
89 			continue;
90 		page = pfn_to_page(pfn);
91 		/*
92 		 * This condition results from rvmalloc() sans vmalloc_32()
93 		 * and architectural memory reservations. This should be
94 		 * corrected eventually when the cases giving rise to this
95 		 * are better understood.
96 		 */
97 		if (PageReserved(page))
98 			continue;
99 		BUG_ON(PageNosave(page));
100 		if (PageNosaveFree(page))
101 			continue;
102 		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
103 		if (!save)
104 			return -ENOMEM;
105 		save->next = highmem_copy;
106 		save->page = page;
107 		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
108 		if (!save->data) {
109 			kfree(save);
110 			return -ENOMEM;
111 		}
112 		kaddr = kmap_atomic(page, KM_USER0);
113 		memcpy(save->data, kaddr, PAGE_SIZE);
114 		kunmap_atomic(kaddr, KM_USER0);
115 		highmem_copy = save;
116 	}
117 	return 0;
118 }
119 
120 int save_highmem(void)
121 {
122 	struct zone *zone;
123 	int res = 0;
124 
125 	pr_debug("swsusp: Saving Highmem");
126 	drain_local_pages();
127 	for_each_zone (zone) {
128 		if (is_highmem(zone))
129 			res = save_highmem_zone(zone);
130 		if (res)
131 			return res;
132 	}
133 	printk("\n");
134 	return 0;
135 }
136 
137 int restore_highmem(void)
138 {
139 	printk("swsusp: Restoring Highmem\n");
140 	while (highmem_copy) {
141 		struct highmem_page *save = highmem_copy;
142 		void *kaddr;
143 		highmem_copy = save->next;
144 
145 		kaddr = kmap_atomic(save->page, KM_USER0);
146 		memcpy(kaddr, save->data, PAGE_SIZE);
147 		kunmap_atomic(kaddr, KM_USER0);
148 		free_page((long) save->data);
149 		kfree(save);
150 	}
151 	return 0;
152 }
153 #endif
154 
155 static int pfn_is_nosave(unsigned long pfn)
156 {
157 	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
158 	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
159 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
160 }
161 
162 /**
163  *	saveable - Determine whether a page should be cloned or not.
164  *	@pfn:	The page
165  *
166  *	We save a page if it's Reserved, and not in the range of pages
167  *	statically defined as 'unsaveable', or if it isn't reserved, and
168  *	isn't part of a free chunk of pages.
169  */
170 
171 static int saveable(struct zone *zone, unsigned long *zone_pfn)
172 {
173 	unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
174 	struct page *page;
175 
176 	if (!pfn_valid(pfn))
177 		return 0;
178 
179 	page = pfn_to_page(pfn);
180 	BUG_ON(PageReserved(page) && PageNosave(page));
181 	if (PageNosave(page))
182 		return 0;
183 	if (PageReserved(page) && pfn_is_nosave(pfn))
184 		return 0;
185 	if (PageNosaveFree(page))
186 		return 0;
187 
188 	return 1;
189 }
190 
191 unsigned int count_data_pages(void)
192 {
193 	struct zone *zone;
194 	unsigned long zone_pfn;
195 	unsigned int n = 0;
196 
197 	for_each_zone (zone) {
198 		if (is_highmem(zone))
199 			continue;
200 		mark_free_pages(zone);
201 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
202 			n += saveable(zone, &zone_pfn);
203 	}
204 	return n;
205 }
206 
207 static void copy_data_pages(struct pbe *pblist)
208 {
209 	struct zone *zone;
210 	unsigned long zone_pfn;
211 	struct pbe *pbe, *p;
212 
213 	pbe = pblist;
214 	for_each_zone (zone) {
215 		if (is_highmem(zone))
216 			continue;
217 		mark_free_pages(zone);
218 		/* This is necessary for swsusp_free() */
219 		for_each_pb_page (p, pblist)
220 			SetPageNosaveFree(virt_to_page(p));
221 		for_each_pbe (p, pblist)
222 			SetPageNosaveFree(virt_to_page(p->address));
223 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
224 			if (saveable(zone, &zone_pfn)) {
225 				struct page *page;
226 				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
227 				BUG_ON(!pbe);
228 				pbe->orig_address = (unsigned long)page_address(page);
229 				/* copy_page is not usable for copying task structs. */
230 				memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
231 				pbe = pbe->next;
232 			}
233 		}
234 	}
235 	BUG_ON(pbe);
236 }
237 
238 
239 /**
240  *	free_pagedir - free pages allocated with alloc_pagedir()
241  */
242 
243 static void free_pagedir(struct pbe *pblist)
244 {
245 	struct pbe *pbe;
246 
247 	while (pblist) {
248 		pbe = (pblist + PB_PAGE_SKIP)->next;
249 		ClearPageNosave(virt_to_page(pblist));
250 		ClearPageNosaveFree(virt_to_page(pblist));
251 		free_page((unsigned long)pblist);
252 		pblist = pbe;
253 	}
254 }
255 
256 /**
257  *	fill_pb_page - Create a list of PBEs on a given memory page
258  */
259 
260 static inline void fill_pb_page(struct pbe *pbpage)
261 {
262 	struct pbe *p;
263 
264 	p = pbpage;
265 	pbpage += PB_PAGE_SKIP;
266 	do
267 		p->next = p + 1;
268 	while (++p < pbpage);
269 }
270 
271 /**
272  *	create_pbe_list - Create a list of PBEs on top of a given chain
273  *	of memory pages allocated with alloc_pagedir()
274  */
275 
276 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
277 {
278 	struct pbe *pbpage, *p;
279 	unsigned int num = PBES_PER_PAGE;
280 
281 	for_each_pb_page (pbpage, pblist) {
282 		if (num >= nr_pages)
283 			break;
284 
285 		fill_pb_page(pbpage);
286 		num += PBES_PER_PAGE;
287 	}
288 	if (pbpage) {
289 		for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
290 			p->next = p + 1;
291 		p->next = NULL;
292 	}
293 }
294 
295 /**
296  *	On resume it is necessary to trace and eventually free the unsafe
297  *	pages that have been allocated, because they are needed for I/O
298  *	(on x86-64 we likely will "eat" these pages once again while
299  *	creating the temporary page translation tables)
300  */
301 
302 struct eaten_page {
303 	struct eaten_page *next;
304 	char padding[PAGE_SIZE - sizeof(void *)];
305 };
306 
307 static struct eaten_page *eaten_pages = NULL;
308 
309 static void release_eaten_pages(void)
310 {
311 	struct eaten_page *p, *q;
312 
313 	p = eaten_pages;
314 	while (p) {
315 		q = p->next;
316 		/* We don't want swsusp_free() to free this page again */
317 		ClearPageNosave(virt_to_page(p));
318 		free_page((unsigned long)p);
319 		p = q;
320 	}
321 	eaten_pages = NULL;
322 }
323 
324 /**
325  *	@safe_needed - on resume, for storing the PBE list and the image,
326  *	we can only use memory pages that do not conflict with the pages
327  *	which had been used before suspend.
328  *
329  *	The unsafe pages are marked with the PG_nosave_free flag
330  *
331  *	Allocated but unusable (ie eaten) memory pages should be marked
332  *	so that swsusp_free() can release them
333  */
334 
335 static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
336 {
337 	void *res;
338 
339 	if (safe_needed)
340 		do {
341 			res = (void *)get_zeroed_page(gfp_mask);
342 			if (res && PageNosaveFree(virt_to_page(res))) {
343 				/* This is for swsusp_free() */
344 				SetPageNosave(virt_to_page(res));
345 				((struct eaten_page *)res)->next = eaten_pages;
346 				eaten_pages = res;
347 			}
348 		} while (res && PageNosaveFree(virt_to_page(res)));
349 	else
350 		res = (void *)get_zeroed_page(gfp_mask);
351 	if (res) {
352 		SetPageNosave(virt_to_page(res));
353 		SetPageNosaveFree(virt_to_page(res));
354 	}
355 	return res;
356 }
357 
358 unsigned long get_safe_page(gfp_t gfp_mask)
359 {
360 	return (unsigned long)alloc_image_page(gfp_mask, 1);
361 }
362 
363 /**
364  *	alloc_pagedir - Allocate the page directory.
365  *
366  *	First, determine exactly how many pages we need and
367  *	allocate them.
368  *
369  *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
370  *	struct pbe elements (pbes) and the last element in the page points
371  *	to the next page.
372  *
373  *	On each page we set up a list of struct_pbe elements.
374  */
375 
376 struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
377 {
378 	unsigned int num;
379 	struct pbe *pblist, *pbe;
380 
381 	if (!nr_pages)
382 		return NULL;
383 
384 	pblist = alloc_image_page(gfp_mask, safe_needed);
385 	/* FIXME: rewrite this ugly loop */
386 	for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
387         		pbe = pbe->next, num += PBES_PER_PAGE) {
388 		pbe += PB_PAGE_SKIP;
389 		pbe->next = alloc_image_page(gfp_mask, safe_needed);
390 	}
391 	if (!pbe) { /* get_zeroed_page() failed */
392 		free_pagedir(pblist);
393 		pblist = NULL;
394         } else
395 		create_pbe_list(pblist, nr_pages);
396 	return pblist;
397 }
398 
399 /**
400  * Free pages we allocated for suspend. Suspend pages are alocated
401  * before atomic copy, so we need to free them after resume.
402  */
403 
404 void swsusp_free(void)
405 {
406 	struct zone *zone;
407 	unsigned long zone_pfn;
408 
409 	for_each_zone(zone) {
410 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
411 			if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
412 				struct page *page;
413 				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
414 				if (PageNosave(page) && PageNosaveFree(page)) {
415 					ClearPageNosave(page);
416 					ClearPageNosaveFree(page);
417 					free_page((long) page_address(page));
418 				}
419 			}
420 	}
421 	nr_copy_pages = 0;
422 	nr_meta_pages = 0;
423 	pagedir_nosave = NULL;
424 	buffer = NULL;
425 }
426 
427 
428 /**
429  *	enough_free_mem - Make sure we enough free memory to snapshot.
430  *
431  *	Returns TRUE or FALSE after checking the number of available
432  *	free pages.
433  */
434 
435 static int enough_free_mem(unsigned int nr_pages)
436 {
437 	struct zone *zone;
438 	unsigned int n = 0;
439 
440 	for_each_zone (zone)
441 		if (!is_highmem(zone))
442 			n += zone->free_pages;
443 	pr_debug("swsusp: available memory: %u pages\n", n);
444 	return n > (nr_pages + PAGES_FOR_IO +
445 		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
446 }
447 
448 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
449 {
450 	struct pbe *p;
451 
452 	for_each_pbe (p, pblist) {
453 		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
454 		if (!p->address)
455 			return -ENOMEM;
456 	}
457 	return 0;
458 }
459 
460 static struct pbe *swsusp_alloc(unsigned int nr_pages)
461 {
462 	struct pbe *pblist;
463 
464 	if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
465 		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
466 		return NULL;
467 	}
468 
469 	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
470 		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
471 		swsusp_free();
472 		return NULL;
473 	}
474 
475 	return pblist;
476 }
477 
478 asmlinkage int swsusp_save(void)
479 {
480 	unsigned int nr_pages;
481 
482 	pr_debug("swsusp: critical section: \n");
483 
484 	drain_local_pages();
485 	nr_pages = count_data_pages();
486 	printk("swsusp: Need to copy %u pages\n", nr_pages);
487 
488 	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
489 		 nr_pages,
490 		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
491 		 PAGES_FOR_IO, nr_free_pages());
492 
493 	if (!enough_free_mem(nr_pages)) {
494 		printk(KERN_ERR "swsusp: Not enough free memory\n");
495 		return -ENOMEM;
496 	}
497 
498 	pagedir_nosave = swsusp_alloc(nr_pages);
499 	if (!pagedir_nosave)
500 		return -ENOMEM;
501 
502 	/* During allocating of suspend pagedir, new cold pages may appear.
503 	 * Kill them.
504 	 */
505 	drain_local_pages();
506 	copy_data_pages(pagedir_nosave);
507 
508 	/*
509 	 * End of critical section. From now on, we can write to memory,
510 	 * but we should not touch disk. This specially means we must _not_
511 	 * touch swap space! Except we must write out our image of course.
512 	 */
513 
514 	nr_copy_pages = nr_pages;
515 	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
516 
517 	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
518 	return 0;
519 }
520 
521 static void init_header(struct swsusp_info *info)
522 {
523 	memset(info, 0, sizeof(struct swsusp_info));
524 	info->version_code = LINUX_VERSION_CODE;
525 	info->num_physpages = num_physpages;
526 	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
527 	info->cpus = num_online_cpus();
528 	info->image_pages = nr_copy_pages;
529 	info->pages = nr_copy_pages + nr_meta_pages + 1;
530 	info->size = info->pages;
531 	info->size <<= PAGE_SHIFT;
532 }
533 
534 /**
535  *	pack_orig_addresses - the .orig_address fields of the PBEs from the
536  *	list starting at @pbe are stored in the array @buf[] (1 page)
537  */
538 
539 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
540 {
541 	int j;
542 
543 	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
544 		buf[j] = pbe->orig_address;
545 		pbe = pbe->next;
546 	}
547 	if (!pbe)
548 		for (; j < PAGE_SIZE / sizeof(long); j++)
549 			buf[j] = 0;
550 	return pbe;
551 }
552 
553 /**
554  *	snapshot_read_next - used for reading the system memory snapshot.
555  *
556  *	On the first call to it @handle should point to a zeroed
557  *	snapshot_handle structure.  The structure gets updated and a pointer
558  *	to it should be passed to this function every next time.
559  *
560  *	The @count parameter should contain the number of bytes the caller
561  *	wants to read from the snapshot.  It must not be zero.
562  *
563  *	On success the function returns a positive number.  Then, the caller
564  *	is allowed to read up to the returned number of bytes from the memory
565  *	location computed by the data_of() macro.  The number returned
566  *	may be smaller than @count, but this only happens if the read would
567  *	cross a page boundary otherwise.
568  *
569  *	The function returns 0 to indicate the end of data stream condition,
570  *	and a negative number is returned on error.  In such cases the
571  *	structure pointed to by @handle is not updated and should not be used
572  *	any more.
573  */
574 
575 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
576 {
577 	if (handle->page > nr_meta_pages + nr_copy_pages)
578 		return 0;
579 	if (!buffer) {
580 		/* This makes the buffer be freed by swsusp_free() */
581 		buffer = alloc_image_page(GFP_ATOMIC, 0);
582 		if (!buffer)
583 			return -ENOMEM;
584 	}
585 	if (!handle->offset) {
586 		init_header((struct swsusp_info *)buffer);
587 		handle->buffer = buffer;
588 		handle->pbe = pagedir_nosave;
589 	}
590 	if (handle->prev < handle->page) {
591 		if (handle->page <= nr_meta_pages) {
592 			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
593 			if (!handle->pbe)
594 				handle->pbe = pagedir_nosave;
595 		} else {
596 			handle->buffer = (void *)handle->pbe->address;
597 			handle->pbe = handle->pbe->next;
598 		}
599 		handle->prev = handle->page;
600 	}
601 	handle->buf_offset = handle->page_offset;
602 	if (handle->page_offset + count >= PAGE_SIZE) {
603 		count = PAGE_SIZE - handle->page_offset;
604 		handle->page_offset = 0;
605 		handle->page++;
606 	} else {
607 		handle->page_offset += count;
608 	}
609 	handle->offset += count;
610 	return count;
611 }
612 
613 /**
614  *	mark_unsafe_pages - mark the pages that cannot be used for storing
615  *	the image during resume, because they conflict with the pages that
616  *	had been used before suspend
617  */
618 
619 static int mark_unsafe_pages(struct pbe *pblist)
620 {
621 	struct zone *zone;
622 	unsigned long zone_pfn;
623 	struct pbe *p;
624 
625 	if (!pblist) /* a sanity check */
626 		return -EINVAL;
627 
628 	/* Clear page flags */
629 	for_each_zone (zone) {
630 		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
631 			if (pfn_valid(zone_pfn + zone->zone_start_pfn))
632 				ClearPageNosaveFree(pfn_to_page(zone_pfn +
633 					zone->zone_start_pfn));
634 	}
635 
636 	/* Mark orig addresses */
637 	for_each_pbe (p, pblist) {
638 		if (virt_addr_valid(p->orig_address))
639 			SetPageNosaveFree(virt_to_page(p->orig_address));
640 		else
641 			return -EFAULT;
642 	}
643 
644 	return 0;
645 }
646 
647 static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
648 {
649 	/* We assume both lists contain the same number of elements */
650 	while (src) {
651 		dst->orig_address = src->orig_address;
652 		dst = dst->next;
653 		src = src->next;
654 	}
655 }
656 
657 static int check_header(struct swsusp_info *info)
658 {
659 	char *reason = NULL;
660 
661 	if (info->version_code != LINUX_VERSION_CODE)
662 		reason = "kernel version";
663 	if (info->num_physpages != num_physpages)
664 		reason = "memory size";
665 	if (strcmp(info->uts.sysname,system_utsname.sysname))
666 		reason = "system type";
667 	if (strcmp(info->uts.release,system_utsname.release))
668 		reason = "kernel release";
669 	if (strcmp(info->uts.version,system_utsname.version))
670 		reason = "version";
671 	if (strcmp(info->uts.machine,system_utsname.machine))
672 		reason = "machine";
673 	if (reason) {
674 		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
675 		return -EPERM;
676 	}
677 	return 0;
678 }
679 
680 /**
681  *	load header - check the image header and copy data from it
682  */
683 
684 static int load_header(struct snapshot_handle *handle,
685                               struct swsusp_info *info)
686 {
687 	int error;
688 	struct pbe *pblist;
689 
690 	error = check_header(info);
691 	if (!error) {
692 		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
693 		if (!pblist)
694 			return -ENOMEM;
695 		pagedir_nosave = pblist;
696 		handle->pbe = pblist;
697 		nr_copy_pages = info->image_pages;
698 		nr_meta_pages = info->pages - info->image_pages - 1;
699 	}
700 	return error;
701 }
702 
703 /**
704  *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
705  *	the PBEs in the list starting at @pbe
706  */
707 
708 static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
709                                                 struct pbe *pbe)
710 {
711 	int j;
712 
713 	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
714 		pbe->orig_address = buf[j];
715 		pbe = pbe->next;
716 	}
717 	return pbe;
718 }
719 
720 /**
721  *	create_image - use metadata contained in the PBE list
722  *	pointed to by pagedir_nosave to mark the pages that will
723  *	be overwritten in the process of restoring the system
724  *	memory state from the image and allocate memory for
725  *	the image avoiding these pages
726  */
727 
728 static int create_image(struct snapshot_handle *handle)
729 {
730 	int error = 0;
731 	struct pbe *p, *pblist;
732 
733 	p = pagedir_nosave;
734 	error = mark_unsafe_pages(p);
735 	if (!error) {
736 		pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
737 		if (pblist)
738 			copy_page_backup_list(pblist, p);
739 		free_pagedir(p);
740 		if (!pblist)
741 			error = -ENOMEM;
742 	}
743 	if (!error)
744 		error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
745 	if (!error) {
746 		release_eaten_pages();
747 		pagedir_nosave = pblist;
748 	} else {
749 		pagedir_nosave = NULL;
750 		handle->pbe = NULL;
751 		nr_copy_pages = 0;
752 		nr_meta_pages = 0;
753 	}
754 	return error;
755 }
756 
757 /**
758  *	snapshot_write_next - used for writing the system memory snapshot.
759  *
760  *	On the first call to it @handle should point to a zeroed
761  *	snapshot_handle structure.  The structure gets updated and a pointer
762  *	to it should be passed to this function every next time.
763  *
764  *	The @count parameter should contain the number of bytes the caller
765  *	wants to write to the image.  It must not be zero.
766  *
767  *	On success the function returns a positive number.  Then, the caller
768  *	is allowed to write up to the returned number of bytes to the memory
769  *	location computed by the data_of() macro.  The number returned
770  *	may be smaller than @count, but this only happens if the write would
771  *	cross a page boundary otherwise.
772  *
773  *	The function returns 0 to indicate the "end of file" condition,
774  *	and a negative number is returned on error.  In such cases the
775  *	structure pointed to by @handle is not updated and should not be used
776  *	any more.
777  */
778 
779 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
780 {
781 	int error = 0;
782 
783 	if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages)
784 		return 0;
785 	if (!buffer) {
786 		/* This makes the buffer be freed by swsusp_free() */
787 		buffer = alloc_image_page(GFP_ATOMIC, 0);
788 		if (!buffer)
789 			return -ENOMEM;
790 	}
791 	if (!handle->offset)
792 		handle->buffer = buffer;
793 	if (handle->prev < handle->page) {
794 		if (!handle->prev) {
795 			error = load_header(handle, (struct swsusp_info *)buffer);
796 			if (error)
797 				return error;
798 		} else if (handle->prev <= nr_meta_pages) {
799 			handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
800 			if (!handle->pbe) {
801 				error = create_image(handle);
802 				if (error)
803 					return error;
804 				handle->pbe = pagedir_nosave;
805 				handle->buffer = (void *)handle->pbe->address;
806 			}
807 		} else {
808 			handle->pbe = handle->pbe->next;
809 			handle->buffer = (void *)handle->pbe->address;
810 		}
811 		handle->prev = handle->page;
812 	}
813 	handle->buf_offset = handle->page_offset;
814 	if (handle->page_offset + count >= PAGE_SIZE) {
815 		count = PAGE_SIZE - handle->page_offset;
816 		handle->page_offset = 0;
817 		handle->page++;
818 	} else {
819 		handle->page_offset += count;
820 	}
821 	handle->offset += count;
822 	return count;
823 }
824 
825 int snapshot_image_loaded(struct snapshot_handle *handle)
826 {
827 	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
828 		handle->page <= nr_meta_pages + nr_copy_pages);
829 }
830