1 /* 2 * linux/kernel/power/snapshot.c 3 * 4 * This file provide system snapshot/restore functionality. 5 * 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz> 7 * 8 * This file is released under the GPLv2, and is based on swsusp.c. 9 * 10 */ 11 12 13 #include <linux/version.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/suspend.h> 17 #include <linux/smp_lock.h> 18 #include <linux/delay.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/kernel.h> 22 #include <linux/pm.h> 23 #include <linux/device.h> 24 #include <linux/bootmem.h> 25 #include <linux/syscalls.h> 26 #include <linux/console.h> 27 #include <linux/highmem.h> 28 29 #include <asm/uaccess.h> 30 #include <asm/mmu_context.h> 31 #include <asm/pgtable.h> 32 #include <asm/tlbflush.h> 33 #include <asm/io.h> 34 35 #include "power.h" 36 37 struct pbe *pagedir_nosave; 38 static unsigned int nr_copy_pages; 39 static unsigned int nr_meta_pages; 40 static unsigned long *buffer; 41 42 #ifdef CONFIG_HIGHMEM 43 unsigned int count_highmem_pages(void) 44 { 45 struct zone *zone; 46 unsigned long zone_pfn; 47 unsigned int n = 0; 48 49 for_each_zone (zone) 50 if (is_highmem(zone)) { 51 mark_free_pages(zone); 52 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { 53 struct page *page; 54 unsigned long pfn = zone_pfn + zone->zone_start_pfn; 55 if (!pfn_valid(pfn)) 56 continue; 57 page = pfn_to_page(pfn); 58 if (PageReserved(page)) 59 continue; 60 if (PageNosaveFree(page)) 61 continue; 62 n++; 63 } 64 } 65 return n; 66 } 67 68 struct highmem_page { 69 char *data; 70 struct page *page; 71 struct highmem_page *next; 72 }; 73 74 static struct highmem_page *highmem_copy; 75 76 static int save_highmem_zone(struct zone *zone) 77 { 78 unsigned long zone_pfn; 79 mark_free_pages(zone); 80 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { 81 struct page *page; 82 struct highmem_page *save; 83 void *kaddr; 84 unsigned long pfn = zone_pfn + zone->zone_start_pfn; 85 86 if (!(pfn%10000)) 87 printk("."); 88 if (!pfn_valid(pfn)) 89 continue; 90 page = pfn_to_page(pfn); 91 /* 92 * This condition results from rvmalloc() sans vmalloc_32() 93 * and architectural memory reservations. This should be 94 * corrected eventually when the cases giving rise to this 95 * are better understood. 96 */ 97 if (PageReserved(page)) 98 continue; 99 BUG_ON(PageNosave(page)); 100 if (PageNosaveFree(page)) 101 continue; 102 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); 103 if (!save) 104 return -ENOMEM; 105 save->next = highmem_copy; 106 save->page = page; 107 save->data = (void *) get_zeroed_page(GFP_ATOMIC); 108 if (!save->data) { 109 kfree(save); 110 return -ENOMEM; 111 } 112 kaddr = kmap_atomic(page, KM_USER0); 113 memcpy(save->data, kaddr, PAGE_SIZE); 114 kunmap_atomic(kaddr, KM_USER0); 115 highmem_copy = save; 116 } 117 return 0; 118 } 119 120 int save_highmem(void) 121 { 122 struct zone *zone; 123 int res = 0; 124 125 pr_debug("swsusp: Saving Highmem"); 126 drain_local_pages(); 127 for_each_zone (zone) { 128 if (is_highmem(zone)) 129 res = save_highmem_zone(zone); 130 if (res) 131 return res; 132 } 133 printk("\n"); 134 return 0; 135 } 136 137 int restore_highmem(void) 138 { 139 printk("swsusp: Restoring Highmem\n"); 140 while (highmem_copy) { 141 struct highmem_page *save = highmem_copy; 142 void *kaddr; 143 highmem_copy = save->next; 144 145 kaddr = kmap_atomic(save->page, KM_USER0); 146 memcpy(kaddr, save->data, PAGE_SIZE); 147 kunmap_atomic(kaddr, KM_USER0); 148 free_page((long) save->data); 149 kfree(save); 150 } 151 return 0; 152 } 153 #else 154 static inline unsigned int count_highmem_pages(void) {return 0;} 155 static inline int save_highmem(void) {return 0;} 156 static inline int restore_highmem(void) {return 0;} 157 #endif 158 159 static int pfn_is_nosave(unsigned long pfn) 160 { 161 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; 162 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; 163 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 164 } 165 166 /** 167 * saveable - Determine whether a page should be cloned or not. 168 * @pfn: The page 169 * 170 * We save a page if it's Reserved, and not in the range of pages 171 * statically defined as 'unsaveable', or if it isn't reserved, and 172 * isn't part of a free chunk of pages. 173 */ 174 175 static int saveable(struct zone *zone, unsigned long *zone_pfn) 176 { 177 unsigned long pfn = *zone_pfn + zone->zone_start_pfn; 178 struct page *page; 179 180 if (!pfn_valid(pfn)) 181 return 0; 182 183 page = pfn_to_page(pfn); 184 BUG_ON(PageReserved(page) && PageNosave(page)); 185 if (PageNosave(page)) 186 return 0; 187 if (PageReserved(page) && pfn_is_nosave(pfn)) 188 return 0; 189 if (PageNosaveFree(page)) 190 return 0; 191 192 return 1; 193 } 194 195 unsigned int count_data_pages(void) 196 { 197 struct zone *zone; 198 unsigned long zone_pfn; 199 unsigned int n = 0; 200 201 for_each_zone (zone) { 202 if (is_highmem(zone)) 203 continue; 204 mark_free_pages(zone); 205 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 206 n += saveable(zone, &zone_pfn); 207 } 208 return n; 209 } 210 211 static void copy_data_pages(struct pbe *pblist) 212 { 213 struct zone *zone; 214 unsigned long zone_pfn; 215 struct pbe *pbe, *p; 216 217 pbe = pblist; 218 for_each_zone (zone) { 219 if (is_highmem(zone)) 220 continue; 221 mark_free_pages(zone); 222 /* This is necessary for swsusp_free() */ 223 for_each_pb_page (p, pblist) 224 SetPageNosaveFree(virt_to_page(p)); 225 for_each_pbe (p, pblist) 226 SetPageNosaveFree(virt_to_page(p->address)); 227 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { 228 if (saveable(zone, &zone_pfn)) { 229 struct page *page; 230 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 231 BUG_ON(!pbe); 232 pbe->orig_address = (unsigned long)page_address(page); 233 /* copy_page is not usable for copying task structs. */ 234 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); 235 pbe = pbe->next; 236 } 237 } 238 } 239 BUG_ON(pbe); 240 } 241 242 243 /** 244 * free_pagedir - free pages allocated with alloc_pagedir() 245 */ 246 247 static void free_pagedir(struct pbe *pblist, int clear_nosave_free) 248 { 249 struct pbe *pbe; 250 251 while (pblist) { 252 pbe = (pblist + PB_PAGE_SKIP)->next; 253 ClearPageNosave(virt_to_page(pblist)); 254 if (clear_nosave_free) 255 ClearPageNosaveFree(virt_to_page(pblist)); 256 free_page((unsigned long)pblist); 257 pblist = pbe; 258 } 259 } 260 261 /** 262 * fill_pb_page - Create a list of PBEs on a given memory page 263 */ 264 265 static inline void fill_pb_page(struct pbe *pbpage) 266 { 267 struct pbe *p; 268 269 p = pbpage; 270 pbpage += PB_PAGE_SKIP; 271 do 272 p->next = p + 1; 273 while (++p < pbpage); 274 } 275 276 /** 277 * create_pbe_list - Create a list of PBEs on top of a given chain 278 * of memory pages allocated with alloc_pagedir() 279 */ 280 281 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) 282 { 283 struct pbe *pbpage, *p; 284 unsigned int num = PBES_PER_PAGE; 285 286 for_each_pb_page (pbpage, pblist) { 287 if (num >= nr_pages) 288 break; 289 290 fill_pb_page(pbpage); 291 num += PBES_PER_PAGE; 292 } 293 if (pbpage) { 294 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++) 295 p->next = p + 1; 296 p->next = NULL; 297 } 298 } 299 300 static unsigned int unsafe_pages; 301 302 /** 303 * @safe_needed - on resume, for storing the PBE list and the image, 304 * we can only use memory pages that do not conflict with the pages 305 * used before suspend. 306 * 307 * The unsafe pages are marked with the PG_nosave_free flag 308 * and we count them using unsafe_pages 309 */ 310 311 static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) 312 { 313 void *res; 314 315 res = (void *)get_zeroed_page(gfp_mask); 316 if (safe_needed) 317 while (res && PageNosaveFree(virt_to_page(res))) { 318 /* The page is unsafe, mark it for swsusp_free() */ 319 SetPageNosave(virt_to_page(res)); 320 unsafe_pages++; 321 res = (void *)get_zeroed_page(gfp_mask); 322 } 323 if (res) { 324 SetPageNosave(virt_to_page(res)); 325 SetPageNosaveFree(virt_to_page(res)); 326 } 327 return res; 328 } 329 330 unsigned long get_safe_page(gfp_t gfp_mask) 331 { 332 return (unsigned long)alloc_image_page(gfp_mask, 1); 333 } 334 335 /** 336 * alloc_pagedir - Allocate the page directory. 337 * 338 * First, determine exactly how many pages we need and 339 * allocate them. 340 * 341 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE 342 * struct pbe elements (pbes) and the last element in the page points 343 * to the next page. 344 * 345 * On each page we set up a list of struct_pbe elements. 346 */ 347 348 static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, 349 int safe_needed) 350 { 351 unsigned int num; 352 struct pbe *pblist, *pbe; 353 354 if (!nr_pages) 355 return NULL; 356 357 pblist = alloc_image_page(gfp_mask, safe_needed); 358 /* FIXME: rewrite this ugly loop */ 359 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; 360 pbe = pbe->next, num += PBES_PER_PAGE) { 361 pbe += PB_PAGE_SKIP; 362 pbe->next = alloc_image_page(gfp_mask, safe_needed); 363 } 364 if (!pbe) { /* get_zeroed_page() failed */ 365 free_pagedir(pblist, 1); 366 pblist = NULL; 367 } else 368 create_pbe_list(pblist, nr_pages); 369 return pblist; 370 } 371 372 /** 373 * Free pages we allocated for suspend. Suspend pages are alocated 374 * before atomic copy, so we need to free them after resume. 375 */ 376 377 void swsusp_free(void) 378 { 379 struct zone *zone; 380 unsigned long zone_pfn; 381 382 for_each_zone(zone) { 383 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 384 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) { 385 struct page *page; 386 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 387 if (PageNosave(page) && PageNosaveFree(page)) { 388 ClearPageNosave(page); 389 ClearPageNosaveFree(page); 390 free_page((long) page_address(page)); 391 } 392 } 393 } 394 nr_copy_pages = 0; 395 nr_meta_pages = 0; 396 pagedir_nosave = NULL; 397 buffer = NULL; 398 } 399 400 401 /** 402 * enough_free_mem - Make sure we enough free memory to snapshot. 403 * 404 * Returns TRUE or FALSE after checking the number of available 405 * free pages. 406 */ 407 408 static int enough_free_mem(unsigned int nr_pages) 409 { 410 struct zone *zone; 411 unsigned int n = 0; 412 413 for_each_zone (zone) 414 if (!is_highmem(zone)) 415 n += zone->free_pages; 416 pr_debug("swsusp: available memory: %u pages\n", n); 417 return n > (nr_pages + PAGES_FOR_IO + 418 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 419 } 420 421 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) 422 { 423 struct pbe *p; 424 425 for_each_pbe (p, pblist) { 426 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); 427 if (!p->address) 428 return -ENOMEM; 429 } 430 return 0; 431 } 432 433 static struct pbe *swsusp_alloc(unsigned int nr_pages) 434 { 435 struct pbe *pblist; 436 437 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) { 438 printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); 439 return NULL; 440 } 441 442 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { 443 printk(KERN_ERR "suspend: Allocating image pages failed.\n"); 444 swsusp_free(); 445 return NULL; 446 } 447 448 return pblist; 449 } 450 451 asmlinkage int swsusp_save(void) 452 { 453 unsigned int nr_pages; 454 455 pr_debug("swsusp: critical section: \n"); 456 457 drain_local_pages(); 458 nr_pages = count_data_pages(); 459 printk("swsusp: Need to copy %u pages\n", nr_pages); 460 461 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n", 462 nr_pages, 463 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, 464 PAGES_FOR_IO, nr_free_pages()); 465 466 if (!enough_free_mem(nr_pages)) { 467 printk(KERN_ERR "swsusp: Not enough free memory\n"); 468 return -ENOMEM; 469 } 470 471 pagedir_nosave = swsusp_alloc(nr_pages); 472 if (!pagedir_nosave) 473 return -ENOMEM; 474 475 /* During allocating of suspend pagedir, new cold pages may appear. 476 * Kill them. 477 */ 478 drain_local_pages(); 479 copy_data_pages(pagedir_nosave); 480 481 /* 482 * End of critical section. From now on, we can write to memory, 483 * but we should not touch disk. This specially means we must _not_ 484 * touch swap space! Except we must write out our image of course. 485 */ 486 487 nr_copy_pages = nr_pages; 488 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; 489 490 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); 491 return 0; 492 } 493 494 static void init_header(struct swsusp_info *info) 495 { 496 memset(info, 0, sizeof(struct swsusp_info)); 497 info->version_code = LINUX_VERSION_CODE; 498 info->num_physpages = num_physpages; 499 memcpy(&info->uts, &system_utsname, sizeof(system_utsname)); 500 info->cpus = num_online_cpus(); 501 info->image_pages = nr_copy_pages; 502 info->pages = nr_copy_pages + nr_meta_pages + 1; 503 info->size = info->pages; 504 info->size <<= PAGE_SHIFT; 505 } 506 507 /** 508 * pack_orig_addresses - the .orig_address fields of the PBEs from the 509 * list starting at @pbe are stored in the array @buf[] (1 page) 510 */ 511 512 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) 513 { 514 int j; 515 516 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { 517 buf[j] = pbe->orig_address; 518 pbe = pbe->next; 519 } 520 if (!pbe) 521 for (; j < PAGE_SIZE / sizeof(long); j++) 522 buf[j] = 0; 523 return pbe; 524 } 525 526 /** 527 * snapshot_read_next - used for reading the system memory snapshot. 528 * 529 * On the first call to it @handle should point to a zeroed 530 * snapshot_handle structure. The structure gets updated and a pointer 531 * to it should be passed to this function every next time. 532 * 533 * The @count parameter should contain the number of bytes the caller 534 * wants to read from the snapshot. It must not be zero. 535 * 536 * On success the function returns a positive number. Then, the caller 537 * is allowed to read up to the returned number of bytes from the memory 538 * location computed by the data_of() macro. The number returned 539 * may be smaller than @count, but this only happens if the read would 540 * cross a page boundary otherwise. 541 * 542 * The function returns 0 to indicate the end of data stream condition, 543 * and a negative number is returned on error. In such cases the 544 * structure pointed to by @handle is not updated and should not be used 545 * any more. 546 */ 547 548 int snapshot_read_next(struct snapshot_handle *handle, size_t count) 549 { 550 if (handle->page > nr_meta_pages + nr_copy_pages) 551 return 0; 552 if (!buffer) { 553 /* This makes the buffer be freed by swsusp_free() */ 554 buffer = alloc_image_page(GFP_ATOMIC, 0); 555 if (!buffer) 556 return -ENOMEM; 557 } 558 if (!handle->offset) { 559 init_header((struct swsusp_info *)buffer); 560 handle->buffer = buffer; 561 handle->pbe = pagedir_nosave; 562 } 563 if (handle->prev < handle->page) { 564 if (handle->page <= nr_meta_pages) { 565 handle->pbe = pack_orig_addresses(buffer, handle->pbe); 566 if (!handle->pbe) 567 handle->pbe = pagedir_nosave; 568 } else { 569 handle->buffer = (void *)handle->pbe->address; 570 handle->pbe = handle->pbe->next; 571 } 572 handle->prev = handle->page; 573 } 574 handle->buf_offset = handle->page_offset; 575 if (handle->page_offset + count >= PAGE_SIZE) { 576 count = PAGE_SIZE - handle->page_offset; 577 handle->page_offset = 0; 578 handle->page++; 579 } else { 580 handle->page_offset += count; 581 } 582 handle->offset += count; 583 return count; 584 } 585 586 /** 587 * mark_unsafe_pages - mark the pages that cannot be used for storing 588 * the image during resume, because they conflict with the pages that 589 * had been used before suspend 590 */ 591 592 static int mark_unsafe_pages(struct pbe *pblist) 593 { 594 struct zone *zone; 595 unsigned long zone_pfn; 596 struct pbe *p; 597 598 if (!pblist) /* a sanity check */ 599 return -EINVAL; 600 601 /* Clear page flags */ 602 for_each_zone (zone) { 603 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 604 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) 605 ClearPageNosaveFree(pfn_to_page(zone_pfn + 606 zone->zone_start_pfn)); 607 } 608 609 /* Mark orig addresses */ 610 for_each_pbe (p, pblist) { 611 if (virt_addr_valid(p->orig_address)) 612 SetPageNosaveFree(virt_to_page(p->orig_address)); 613 else 614 return -EFAULT; 615 } 616 617 unsafe_pages = 0; 618 619 return 0; 620 } 621 622 static void copy_page_backup_list(struct pbe *dst, struct pbe *src) 623 { 624 /* We assume both lists contain the same number of elements */ 625 while (src) { 626 dst->orig_address = src->orig_address; 627 dst = dst->next; 628 src = src->next; 629 } 630 } 631 632 static int check_header(struct swsusp_info *info) 633 { 634 char *reason = NULL; 635 636 if (info->version_code != LINUX_VERSION_CODE) 637 reason = "kernel version"; 638 if (info->num_physpages != num_physpages) 639 reason = "memory size"; 640 if (strcmp(info->uts.sysname,system_utsname.sysname)) 641 reason = "system type"; 642 if (strcmp(info->uts.release,system_utsname.release)) 643 reason = "kernel release"; 644 if (strcmp(info->uts.version,system_utsname.version)) 645 reason = "version"; 646 if (strcmp(info->uts.machine,system_utsname.machine)) 647 reason = "machine"; 648 if (reason) { 649 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); 650 return -EPERM; 651 } 652 return 0; 653 } 654 655 /** 656 * load header - check the image header and copy data from it 657 */ 658 659 static int load_header(struct snapshot_handle *handle, 660 struct swsusp_info *info) 661 { 662 int error; 663 struct pbe *pblist; 664 665 error = check_header(info); 666 if (!error) { 667 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); 668 if (!pblist) 669 return -ENOMEM; 670 pagedir_nosave = pblist; 671 handle->pbe = pblist; 672 nr_copy_pages = info->image_pages; 673 nr_meta_pages = info->pages - info->image_pages - 1; 674 } 675 return error; 676 } 677 678 /** 679 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to 680 * the PBEs in the list starting at @pbe 681 */ 682 683 static inline struct pbe *unpack_orig_addresses(unsigned long *buf, 684 struct pbe *pbe) 685 { 686 int j; 687 688 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { 689 pbe->orig_address = buf[j]; 690 pbe = pbe->next; 691 } 692 return pbe; 693 } 694 695 /** 696 * prepare_image - use metadata contained in the PBE list 697 * pointed to by pagedir_nosave to mark the pages that will 698 * be overwritten in the process of restoring the system 699 * memory state from the image ("unsafe" pages) and allocate 700 * memory for the image 701 * 702 * The idea is to allocate the PBE list first and then 703 * allocate as many pages as it's needed for the image data, 704 * but not to assign these pages to the PBEs initially. 705 * Instead, we just mark them as allocated and create a list 706 * of "safe" which will be used later 707 */ 708 709 struct safe_page { 710 struct safe_page *next; 711 char padding[PAGE_SIZE - sizeof(void *)]; 712 }; 713 714 static struct safe_page *safe_pages; 715 716 static int prepare_image(struct snapshot_handle *handle) 717 { 718 int error = 0; 719 unsigned int nr_pages = nr_copy_pages; 720 struct pbe *p, *pblist = NULL; 721 722 p = pagedir_nosave; 723 error = mark_unsafe_pages(p); 724 if (!error) { 725 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); 726 if (pblist) 727 copy_page_backup_list(pblist, p); 728 free_pagedir(p, 0); 729 if (!pblist) 730 error = -ENOMEM; 731 } 732 safe_pages = NULL; 733 if (!error && nr_pages > unsafe_pages) { 734 nr_pages -= unsafe_pages; 735 while (nr_pages--) { 736 struct safe_page *ptr; 737 738 ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC); 739 if (!ptr) { 740 error = -ENOMEM; 741 break; 742 } 743 if (!PageNosaveFree(virt_to_page(ptr))) { 744 /* The page is "safe", add it to the list */ 745 ptr->next = safe_pages; 746 safe_pages = ptr; 747 } 748 /* Mark the page as allocated */ 749 SetPageNosave(virt_to_page(ptr)); 750 SetPageNosaveFree(virt_to_page(ptr)); 751 } 752 } 753 if (!error) { 754 pagedir_nosave = pblist; 755 } else { 756 handle->pbe = NULL; 757 swsusp_free(); 758 } 759 return error; 760 } 761 762 static void *get_buffer(struct snapshot_handle *handle) 763 { 764 struct pbe *pbe = handle->pbe, *last = handle->last_pbe; 765 struct page *page = virt_to_page(pbe->orig_address); 766 767 if (PageNosave(page) && PageNosaveFree(page)) { 768 /* 769 * We have allocated the "original" page frame and we can 770 * use it directly to store the read page 771 */ 772 pbe->address = 0; 773 if (last && last->next) 774 last->next = NULL; 775 return (void *)pbe->orig_address; 776 } 777 /* 778 * The "original" page frame has not been allocated and we have to 779 * use a "safe" page frame to store the read page 780 */ 781 pbe->address = (unsigned long)safe_pages; 782 safe_pages = safe_pages->next; 783 if (last) 784 last->next = pbe; 785 handle->last_pbe = pbe; 786 return (void *)pbe->address; 787 } 788 789 /** 790 * snapshot_write_next - used for writing the system memory snapshot. 791 * 792 * On the first call to it @handle should point to a zeroed 793 * snapshot_handle structure. The structure gets updated and a pointer 794 * to it should be passed to this function every next time. 795 * 796 * The @count parameter should contain the number of bytes the caller 797 * wants to write to the image. It must not be zero. 798 * 799 * On success the function returns a positive number. Then, the caller 800 * is allowed to write up to the returned number of bytes to the memory 801 * location computed by the data_of() macro. The number returned 802 * may be smaller than @count, but this only happens if the write would 803 * cross a page boundary otherwise. 804 * 805 * The function returns 0 to indicate the "end of file" condition, 806 * and a negative number is returned on error. In such cases the 807 * structure pointed to by @handle is not updated and should not be used 808 * any more. 809 */ 810 811 int snapshot_write_next(struct snapshot_handle *handle, size_t count) 812 { 813 int error = 0; 814 815 if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages) 816 return 0; 817 if (!buffer) { 818 /* This makes the buffer be freed by swsusp_free() */ 819 buffer = alloc_image_page(GFP_ATOMIC, 0); 820 if (!buffer) 821 return -ENOMEM; 822 } 823 if (!handle->offset) 824 handle->buffer = buffer; 825 if (handle->prev < handle->page) { 826 if (!handle->prev) { 827 error = load_header(handle, (struct swsusp_info *)buffer); 828 if (error) 829 return error; 830 } else if (handle->prev <= nr_meta_pages) { 831 handle->pbe = unpack_orig_addresses(buffer, handle->pbe); 832 if (!handle->pbe) { 833 error = prepare_image(handle); 834 if (error) 835 return error; 836 handle->pbe = pagedir_nosave; 837 handle->last_pbe = NULL; 838 handle->buffer = get_buffer(handle); 839 } 840 } else { 841 handle->pbe = handle->pbe->next; 842 handle->buffer = get_buffer(handle); 843 } 844 handle->prev = handle->page; 845 } 846 handle->buf_offset = handle->page_offset; 847 if (handle->page_offset + count >= PAGE_SIZE) { 848 count = PAGE_SIZE - handle->page_offset; 849 handle->page_offset = 0; 850 handle->page++; 851 } else { 852 handle->page_offset += count; 853 } 854 handle->offset += count; 855 return count; 856 } 857 858 int snapshot_image_loaded(struct snapshot_handle *handle) 859 { 860 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || 861 handle->page <= nr_meta_pages + nr_copy_pages); 862 } 863