1 /* 2 * linux/kernel/power/snapshot.c 3 * 4 * This file provide system snapshot/restore functionality. 5 * 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz> 7 * 8 * This file is released under the GPLv2, and is based on swsusp.c. 9 * 10 */ 11 12 13 #include <linux/version.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/suspend.h> 17 #include <linux/smp_lock.h> 18 #include <linux/delay.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/kernel.h> 22 #include <linux/pm.h> 23 #include <linux/device.h> 24 #include <linux/bootmem.h> 25 #include <linux/syscalls.h> 26 #include <linux/console.h> 27 #include <linux/highmem.h> 28 29 #include <asm/uaccess.h> 30 #include <asm/mmu_context.h> 31 #include <asm/pgtable.h> 32 #include <asm/tlbflush.h> 33 #include <asm/io.h> 34 35 #include "power.h" 36 37 struct pbe *pagedir_nosave; 38 static unsigned int nr_copy_pages; 39 static unsigned int nr_meta_pages; 40 static unsigned long *buffer; 41 42 #ifdef CONFIG_HIGHMEM 43 unsigned int count_highmem_pages(void) 44 { 45 struct zone *zone; 46 unsigned long zone_pfn; 47 unsigned int n = 0; 48 49 for_each_zone (zone) 50 if (is_highmem(zone)) { 51 mark_free_pages(zone); 52 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { 53 struct page *page; 54 unsigned long pfn = zone_pfn + zone->zone_start_pfn; 55 if (!pfn_valid(pfn)) 56 continue; 57 page = pfn_to_page(pfn); 58 if (PageReserved(page)) 59 continue; 60 if (PageNosaveFree(page)) 61 continue; 62 n++; 63 } 64 } 65 return n; 66 } 67 68 struct highmem_page { 69 char *data; 70 struct page *page; 71 struct highmem_page *next; 72 }; 73 74 static struct highmem_page *highmem_copy; 75 76 static int save_highmem_zone(struct zone *zone) 77 { 78 unsigned long zone_pfn; 79 mark_free_pages(zone); 80 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { 81 struct page *page; 82 struct highmem_page *save; 83 void *kaddr; 84 unsigned long pfn = zone_pfn + zone->zone_start_pfn; 85 86 if (!(pfn%10000)) 87 printk("."); 88 if (!pfn_valid(pfn)) 89 continue; 90 page = pfn_to_page(pfn); 91 /* 92 * This condition results from rvmalloc() sans vmalloc_32() 93 * and architectural memory reservations. This should be 94 * corrected eventually when the cases giving rise to this 95 * are better understood. 96 */ 97 if (PageReserved(page)) 98 continue; 99 BUG_ON(PageNosave(page)); 100 if (PageNosaveFree(page)) 101 continue; 102 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); 103 if (!save) 104 return -ENOMEM; 105 save->next = highmem_copy; 106 save->page = page; 107 save->data = (void *) get_zeroed_page(GFP_ATOMIC); 108 if (!save->data) { 109 kfree(save); 110 return -ENOMEM; 111 } 112 kaddr = kmap_atomic(page, KM_USER0); 113 memcpy(save->data, kaddr, PAGE_SIZE); 114 kunmap_atomic(kaddr, KM_USER0); 115 highmem_copy = save; 116 } 117 return 0; 118 } 119 120 int save_highmem(void) 121 { 122 struct zone *zone; 123 int res = 0; 124 125 pr_debug("swsusp: Saving Highmem"); 126 drain_local_pages(); 127 for_each_zone (zone) { 128 if (is_highmem(zone)) 129 res = save_highmem_zone(zone); 130 if (res) 131 return res; 132 } 133 printk("\n"); 134 return 0; 135 } 136 137 int restore_highmem(void) 138 { 139 printk("swsusp: Restoring Highmem\n"); 140 while (highmem_copy) { 141 struct highmem_page *save = highmem_copy; 142 void *kaddr; 143 highmem_copy = save->next; 144 145 kaddr = kmap_atomic(save->page, KM_USER0); 146 memcpy(kaddr, save->data, PAGE_SIZE); 147 kunmap_atomic(kaddr, KM_USER0); 148 free_page((long) save->data); 149 kfree(save); 150 } 151 return 0; 152 } 153 #else 154 static inline unsigned int count_highmem_pages(void) {return 0;} 155 static inline int save_highmem(void) {return 0;} 156 static inline int restore_highmem(void) {return 0;} 157 #endif 158 159 static int pfn_is_nosave(unsigned long pfn) 160 { 161 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; 162 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; 163 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 164 } 165 166 /** 167 * saveable - Determine whether a page should be cloned or not. 168 * @pfn: The page 169 * 170 * We save a page if it's Reserved, and not in the range of pages 171 * statically defined as 'unsaveable', or if it isn't reserved, and 172 * isn't part of a free chunk of pages. 173 */ 174 175 static int saveable(struct zone *zone, unsigned long *zone_pfn) 176 { 177 unsigned long pfn = *zone_pfn + zone->zone_start_pfn; 178 struct page *page; 179 180 if (!pfn_valid(pfn)) 181 return 0; 182 183 page = pfn_to_page(pfn); 184 BUG_ON(PageReserved(page) && PageNosave(page)); 185 if (PageNosave(page)) 186 return 0; 187 if (PageReserved(page) && pfn_is_nosave(pfn)) 188 return 0; 189 if (PageNosaveFree(page)) 190 return 0; 191 192 return 1; 193 } 194 195 unsigned int count_data_pages(void) 196 { 197 struct zone *zone; 198 unsigned long zone_pfn; 199 unsigned int n = 0; 200 201 for_each_zone (zone) { 202 if (is_highmem(zone)) 203 continue; 204 mark_free_pages(zone); 205 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 206 n += saveable(zone, &zone_pfn); 207 } 208 return n; 209 } 210 211 static void copy_data_pages(struct pbe *pblist) 212 { 213 struct zone *zone; 214 unsigned long zone_pfn; 215 struct pbe *pbe, *p; 216 217 pbe = pblist; 218 for_each_zone (zone) { 219 if (is_highmem(zone)) 220 continue; 221 mark_free_pages(zone); 222 /* This is necessary for swsusp_free() */ 223 for_each_pb_page (p, pblist) 224 SetPageNosaveFree(virt_to_page(p)); 225 for_each_pbe (p, pblist) 226 SetPageNosaveFree(virt_to_page(p->address)); 227 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { 228 if (saveable(zone, &zone_pfn)) { 229 struct page *page; 230 long *src, *dst; 231 int n; 232 233 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 234 BUG_ON(!pbe); 235 pbe->orig_address = (unsigned long)page_address(page); 236 /* copy_page and memcpy are not usable for copying task structs. */ 237 dst = (long *)pbe->address; 238 src = (long *)pbe->orig_address; 239 for (n = PAGE_SIZE / sizeof(long); n; n--) 240 *dst++ = *src++; 241 pbe = pbe->next; 242 } 243 } 244 } 245 BUG_ON(pbe); 246 } 247 248 249 /** 250 * free_pagedir - free pages allocated with alloc_pagedir() 251 */ 252 253 static void free_pagedir(struct pbe *pblist, int clear_nosave_free) 254 { 255 struct pbe *pbe; 256 257 while (pblist) { 258 pbe = (pblist + PB_PAGE_SKIP)->next; 259 ClearPageNosave(virt_to_page(pblist)); 260 if (clear_nosave_free) 261 ClearPageNosaveFree(virt_to_page(pblist)); 262 free_page((unsigned long)pblist); 263 pblist = pbe; 264 } 265 } 266 267 /** 268 * fill_pb_page - Create a list of PBEs on a given memory page 269 */ 270 271 static inline void fill_pb_page(struct pbe *pbpage) 272 { 273 struct pbe *p; 274 275 p = pbpage; 276 pbpage += PB_PAGE_SKIP; 277 do 278 p->next = p + 1; 279 while (++p < pbpage); 280 } 281 282 /** 283 * create_pbe_list - Create a list of PBEs on top of a given chain 284 * of memory pages allocated with alloc_pagedir() 285 */ 286 287 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) 288 { 289 struct pbe *pbpage, *p; 290 unsigned int num = PBES_PER_PAGE; 291 292 for_each_pb_page (pbpage, pblist) { 293 if (num >= nr_pages) 294 break; 295 296 fill_pb_page(pbpage); 297 num += PBES_PER_PAGE; 298 } 299 if (pbpage) { 300 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++) 301 p->next = p + 1; 302 p->next = NULL; 303 } 304 } 305 306 static unsigned int unsafe_pages; 307 308 /** 309 * @safe_needed - on resume, for storing the PBE list and the image, 310 * we can only use memory pages that do not conflict with the pages 311 * used before suspend. 312 * 313 * The unsafe pages are marked with the PG_nosave_free flag 314 * and we count them using unsafe_pages 315 */ 316 317 static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) 318 { 319 void *res; 320 321 res = (void *)get_zeroed_page(gfp_mask); 322 if (safe_needed) 323 while (res && PageNosaveFree(virt_to_page(res))) { 324 /* The page is unsafe, mark it for swsusp_free() */ 325 SetPageNosave(virt_to_page(res)); 326 unsafe_pages++; 327 res = (void *)get_zeroed_page(gfp_mask); 328 } 329 if (res) { 330 SetPageNosave(virt_to_page(res)); 331 SetPageNosaveFree(virt_to_page(res)); 332 } 333 return res; 334 } 335 336 unsigned long get_safe_page(gfp_t gfp_mask) 337 { 338 return (unsigned long)alloc_image_page(gfp_mask, 1); 339 } 340 341 /** 342 * alloc_pagedir - Allocate the page directory. 343 * 344 * First, determine exactly how many pages we need and 345 * allocate them. 346 * 347 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE 348 * struct pbe elements (pbes) and the last element in the page points 349 * to the next page. 350 * 351 * On each page we set up a list of struct_pbe elements. 352 */ 353 354 static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, 355 int safe_needed) 356 { 357 unsigned int num; 358 struct pbe *pblist, *pbe; 359 360 if (!nr_pages) 361 return NULL; 362 363 pblist = alloc_image_page(gfp_mask, safe_needed); 364 /* FIXME: rewrite this ugly loop */ 365 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; 366 pbe = pbe->next, num += PBES_PER_PAGE) { 367 pbe += PB_PAGE_SKIP; 368 pbe->next = alloc_image_page(gfp_mask, safe_needed); 369 } 370 if (!pbe) { /* get_zeroed_page() failed */ 371 free_pagedir(pblist, 1); 372 pblist = NULL; 373 } else 374 create_pbe_list(pblist, nr_pages); 375 return pblist; 376 } 377 378 /** 379 * Free pages we allocated for suspend. Suspend pages are alocated 380 * before atomic copy, so we need to free them after resume. 381 */ 382 383 void swsusp_free(void) 384 { 385 struct zone *zone; 386 unsigned long zone_pfn; 387 388 for_each_zone(zone) { 389 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 390 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) { 391 struct page *page; 392 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 393 if (PageNosave(page) && PageNosaveFree(page)) { 394 ClearPageNosave(page); 395 ClearPageNosaveFree(page); 396 free_page((long) page_address(page)); 397 } 398 } 399 } 400 nr_copy_pages = 0; 401 nr_meta_pages = 0; 402 pagedir_nosave = NULL; 403 buffer = NULL; 404 } 405 406 407 /** 408 * enough_free_mem - Make sure we enough free memory to snapshot. 409 * 410 * Returns TRUE or FALSE after checking the number of available 411 * free pages. 412 */ 413 414 static int enough_free_mem(unsigned int nr_pages) 415 { 416 struct zone *zone; 417 unsigned int n = 0; 418 419 for_each_zone (zone) 420 if (!is_highmem(zone)) 421 n += zone->free_pages; 422 pr_debug("swsusp: available memory: %u pages\n", n); 423 return n > (nr_pages + PAGES_FOR_IO + 424 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 425 } 426 427 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) 428 { 429 struct pbe *p; 430 431 for_each_pbe (p, pblist) { 432 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); 433 if (!p->address) 434 return -ENOMEM; 435 } 436 return 0; 437 } 438 439 static struct pbe *swsusp_alloc(unsigned int nr_pages) 440 { 441 struct pbe *pblist; 442 443 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) { 444 printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); 445 return NULL; 446 } 447 448 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { 449 printk(KERN_ERR "suspend: Allocating image pages failed.\n"); 450 swsusp_free(); 451 return NULL; 452 } 453 454 return pblist; 455 } 456 457 asmlinkage int swsusp_save(void) 458 { 459 unsigned int nr_pages; 460 461 pr_debug("swsusp: critical section: \n"); 462 463 drain_local_pages(); 464 nr_pages = count_data_pages(); 465 printk("swsusp: Need to copy %u pages\n", nr_pages); 466 467 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n", 468 nr_pages, 469 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, 470 PAGES_FOR_IO, nr_free_pages()); 471 472 if (!enough_free_mem(nr_pages)) { 473 printk(KERN_ERR "swsusp: Not enough free memory\n"); 474 return -ENOMEM; 475 } 476 477 pagedir_nosave = swsusp_alloc(nr_pages); 478 if (!pagedir_nosave) 479 return -ENOMEM; 480 481 /* During allocating of suspend pagedir, new cold pages may appear. 482 * Kill them. 483 */ 484 drain_local_pages(); 485 copy_data_pages(pagedir_nosave); 486 487 /* 488 * End of critical section. From now on, we can write to memory, 489 * but we should not touch disk. This specially means we must _not_ 490 * touch swap space! Except we must write out our image of course. 491 */ 492 493 nr_copy_pages = nr_pages; 494 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; 495 496 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); 497 return 0; 498 } 499 500 static void init_header(struct swsusp_info *info) 501 { 502 memset(info, 0, sizeof(struct swsusp_info)); 503 info->version_code = LINUX_VERSION_CODE; 504 info->num_physpages = num_physpages; 505 memcpy(&info->uts, &system_utsname, sizeof(system_utsname)); 506 info->cpus = num_online_cpus(); 507 info->image_pages = nr_copy_pages; 508 info->pages = nr_copy_pages + nr_meta_pages + 1; 509 info->size = info->pages; 510 info->size <<= PAGE_SHIFT; 511 } 512 513 /** 514 * pack_orig_addresses - the .orig_address fields of the PBEs from the 515 * list starting at @pbe are stored in the array @buf[] (1 page) 516 */ 517 518 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) 519 { 520 int j; 521 522 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { 523 buf[j] = pbe->orig_address; 524 pbe = pbe->next; 525 } 526 if (!pbe) 527 for (; j < PAGE_SIZE / sizeof(long); j++) 528 buf[j] = 0; 529 return pbe; 530 } 531 532 /** 533 * snapshot_read_next - used for reading the system memory snapshot. 534 * 535 * On the first call to it @handle should point to a zeroed 536 * snapshot_handle structure. The structure gets updated and a pointer 537 * to it should be passed to this function every next time. 538 * 539 * The @count parameter should contain the number of bytes the caller 540 * wants to read from the snapshot. It must not be zero. 541 * 542 * On success the function returns a positive number. Then, the caller 543 * is allowed to read up to the returned number of bytes from the memory 544 * location computed by the data_of() macro. The number returned 545 * may be smaller than @count, but this only happens if the read would 546 * cross a page boundary otherwise. 547 * 548 * The function returns 0 to indicate the end of data stream condition, 549 * and a negative number is returned on error. In such cases the 550 * structure pointed to by @handle is not updated and should not be used 551 * any more. 552 */ 553 554 int snapshot_read_next(struct snapshot_handle *handle, size_t count) 555 { 556 if (handle->page > nr_meta_pages + nr_copy_pages) 557 return 0; 558 if (!buffer) { 559 /* This makes the buffer be freed by swsusp_free() */ 560 buffer = alloc_image_page(GFP_ATOMIC, 0); 561 if (!buffer) 562 return -ENOMEM; 563 } 564 if (!handle->offset) { 565 init_header((struct swsusp_info *)buffer); 566 handle->buffer = buffer; 567 handle->pbe = pagedir_nosave; 568 } 569 if (handle->prev < handle->page) { 570 if (handle->page <= nr_meta_pages) { 571 handle->pbe = pack_orig_addresses(buffer, handle->pbe); 572 if (!handle->pbe) 573 handle->pbe = pagedir_nosave; 574 } else { 575 handle->buffer = (void *)handle->pbe->address; 576 handle->pbe = handle->pbe->next; 577 } 578 handle->prev = handle->page; 579 } 580 handle->buf_offset = handle->page_offset; 581 if (handle->page_offset + count >= PAGE_SIZE) { 582 count = PAGE_SIZE - handle->page_offset; 583 handle->page_offset = 0; 584 handle->page++; 585 } else { 586 handle->page_offset += count; 587 } 588 handle->offset += count; 589 return count; 590 } 591 592 /** 593 * mark_unsafe_pages - mark the pages that cannot be used for storing 594 * the image during resume, because they conflict with the pages that 595 * had been used before suspend 596 */ 597 598 static int mark_unsafe_pages(struct pbe *pblist) 599 { 600 struct zone *zone; 601 unsigned long zone_pfn; 602 struct pbe *p; 603 604 if (!pblist) /* a sanity check */ 605 return -EINVAL; 606 607 /* Clear page flags */ 608 for_each_zone (zone) { 609 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 610 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) 611 ClearPageNosaveFree(pfn_to_page(zone_pfn + 612 zone->zone_start_pfn)); 613 } 614 615 /* Mark orig addresses */ 616 for_each_pbe (p, pblist) { 617 if (virt_addr_valid(p->orig_address)) 618 SetPageNosaveFree(virt_to_page(p->orig_address)); 619 else 620 return -EFAULT; 621 } 622 623 unsafe_pages = 0; 624 625 return 0; 626 } 627 628 static void copy_page_backup_list(struct pbe *dst, struct pbe *src) 629 { 630 /* We assume both lists contain the same number of elements */ 631 while (src) { 632 dst->orig_address = src->orig_address; 633 dst = dst->next; 634 src = src->next; 635 } 636 } 637 638 static int check_header(struct swsusp_info *info) 639 { 640 char *reason = NULL; 641 642 if (info->version_code != LINUX_VERSION_CODE) 643 reason = "kernel version"; 644 if (info->num_physpages != num_physpages) 645 reason = "memory size"; 646 if (strcmp(info->uts.sysname,system_utsname.sysname)) 647 reason = "system type"; 648 if (strcmp(info->uts.release,system_utsname.release)) 649 reason = "kernel release"; 650 if (strcmp(info->uts.version,system_utsname.version)) 651 reason = "version"; 652 if (strcmp(info->uts.machine,system_utsname.machine)) 653 reason = "machine"; 654 if (reason) { 655 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); 656 return -EPERM; 657 } 658 return 0; 659 } 660 661 /** 662 * load header - check the image header and copy data from it 663 */ 664 665 static int load_header(struct snapshot_handle *handle, 666 struct swsusp_info *info) 667 { 668 int error; 669 struct pbe *pblist; 670 671 error = check_header(info); 672 if (!error) { 673 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); 674 if (!pblist) 675 return -ENOMEM; 676 pagedir_nosave = pblist; 677 handle->pbe = pblist; 678 nr_copy_pages = info->image_pages; 679 nr_meta_pages = info->pages - info->image_pages - 1; 680 } 681 return error; 682 } 683 684 /** 685 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to 686 * the PBEs in the list starting at @pbe 687 */ 688 689 static inline struct pbe *unpack_orig_addresses(unsigned long *buf, 690 struct pbe *pbe) 691 { 692 int j; 693 694 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { 695 pbe->orig_address = buf[j]; 696 pbe = pbe->next; 697 } 698 return pbe; 699 } 700 701 /** 702 * prepare_image - use metadata contained in the PBE list 703 * pointed to by pagedir_nosave to mark the pages that will 704 * be overwritten in the process of restoring the system 705 * memory state from the image ("unsafe" pages) and allocate 706 * memory for the image 707 * 708 * The idea is to allocate the PBE list first and then 709 * allocate as many pages as it's needed for the image data, 710 * but not to assign these pages to the PBEs initially. 711 * Instead, we just mark them as allocated and create a list 712 * of "safe" which will be used later 713 */ 714 715 struct safe_page { 716 struct safe_page *next; 717 char padding[PAGE_SIZE - sizeof(void *)]; 718 }; 719 720 static struct safe_page *safe_pages; 721 722 static int prepare_image(struct snapshot_handle *handle) 723 { 724 int error = 0; 725 unsigned int nr_pages = nr_copy_pages; 726 struct pbe *p, *pblist = NULL; 727 728 p = pagedir_nosave; 729 error = mark_unsafe_pages(p); 730 if (!error) { 731 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); 732 if (pblist) 733 copy_page_backup_list(pblist, p); 734 free_pagedir(p, 0); 735 if (!pblist) 736 error = -ENOMEM; 737 } 738 safe_pages = NULL; 739 if (!error && nr_pages > unsafe_pages) { 740 nr_pages -= unsafe_pages; 741 while (nr_pages--) { 742 struct safe_page *ptr; 743 744 ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC); 745 if (!ptr) { 746 error = -ENOMEM; 747 break; 748 } 749 if (!PageNosaveFree(virt_to_page(ptr))) { 750 /* The page is "safe", add it to the list */ 751 ptr->next = safe_pages; 752 safe_pages = ptr; 753 } 754 /* Mark the page as allocated */ 755 SetPageNosave(virt_to_page(ptr)); 756 SetPageNosaveFree(virt_to_page(ptr)); 757 } 758 } 759 if (!error) { 760 pagedir_nosave = pblist; 761 } else { 762 handle->pbe = NULL; 763 swsusp_free(); 764 } 765 return error; 766 } 767 768 static void *get_buffer(struct snapshot_handle *handle) 769 { 770 struct pbe *pbe = handle->pbe, *last = handle->last_pbe; 771 struct page *page = virt_to_page(pbe->orig_address); 772 773 if (PageNosave(page) && PageNosaveFree(page)) { 774 /* 775 * We have allocated the "original" page frame and we can 776 * use it directly to store the read page 777 */ 778 pbe->address = 0; 779 if (last && last->next) 780 last->next = NULL; 781 return (void *)pbe->orig_address; 782 } 783 /* 784 * The "original" page frame has not been allocated and we have to 785 * use a "safe" page frame to store the read page 786 */ 787 pbe->address = (unsigned long)safe_pages; 788 safe_pages = safe_pages->next; 789 if (last) 790 last->next = pbe; 791 handle->last_pbe = pbe; 792 return (void *)pbe->address; 793 } 794 795 /** 796 * snapshot_write_next - used for writing the system memory snapshot. 797 * 798 * On the first call to it @handle should point to a zeroed 799 * snapshot_handle structure. The structure gets updated and a pointer 800 * to it should be passed to this function every next time. 801 * 802 * The @count parameter should contain the number of bytes the caller 803 * wants to write to the image. It must not be zero. 804 * 805 * On success the function returns a positive number. Then, the caller 806 * is allowed to write up to the returned number of bytes to the memory 807 * location computed by the data_of() macro. The number returned 808 * may be smaller than @count, but this only happens if the write would 809 * cross a page boundary otherwise. 810 * 811 * The function returns 0 to indicate the "end of file" condition, 812 * and a negative number is returned on error. In such cases the 813 * structure pointed to by @handle is not updated and should not be used 814 * any more. 815 */ 816 817 int snapshot_write_next(struct snapshot_handle *handle, size_t count) 818 { 819 int error = 0; 820 821 if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages) 822 return 0; 823 if (!buffer) { 824 /* This makes the buffer be freed by swsusp_free() */ 825 buffer = alloc_image_page(GFP_ATOMIC, 0); 826 if (!buffer) 827 return -ENOMEM; 828 } 829 if (!handle->offset) 830 handle->buffer = buffer; 831 if (handle->prev < handle->page) { 832 if (!handle->prev) { 833 error = load_header(handle, (struct swsusp_info *)buffer); 834 if (error) 835 return error; 836 } else if (handle->prev <= nr_meta_pages) { 837 handle->pbe = unpack_orig_addresses(buffer, handle->pbe); 838 if (!handle->pbe) { 839 error = prepare_image(handle); 840 if (error) 841 return error; 842 handle->pbe = pagedir_nosave; 843 handle->last_pbe = NULL; 844 handle->buffer = get_buffer(handle); 845 } 846 } else { 847 handle->pbe = handle->pbe->next; 848 handle->buffer = get_buffer(handle); 849 } 850 handle->prev = handle->page; 851 } 852 handle->buf_offset = handle->page_offset; 853 if (handle->page_offset + count >= PAGE_SIZE) { 854 count = PAGE_SIZE - handle->page_offset; 855 handle->page_offset = 0; 856 handle->page++; 857 } else { 858 handle->page_offset += count; 859 } 860 handle->offset += count; 861 return count; 862 } 863 864 int snapshot_image_loaded(struct snapshot_handle *handle) 865 { 866 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || 867 handle->page <= nr_meta_pages + nr_copy_pages); 868 } 869