1 /* 2 * linux/kernel/power/swap.c 3 * 4 * This file provides functions for reading the suspend image from 5 * and writing it to a swap partition. 6 * 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 9 * 10 * This file is released under the GPLv2. 11 * 12 */ 13 14 #include <linux/module.h> 15 #include <linux/file.h> 16 #include <linux/delay.h> 17 #include <linux/bitops.h> 18 #include <linux/genhd.h> 19 #include <linux/device.h> 20 #include <linux/buffer_head.h> 21 #include <linux/bio.h> 22 #include <linux/blkdev.h> 23 #include <linux/swap.h> 24 #include <linux/swapops.h> 25 #include <linux/pm.h> 26 #include <linux/slab.h> 27 #include <linux/lzo.h> 28 #include <linux/vmalloc.h> 29 30 #include "power.h" 31 32 #define HIBERNATE_SIG "LINHIB0001" 33 34 /* 35 * The swap map is a data structure used for keeping track of each page 36 * written to a swap partition. It consists of many swap_map_page 37 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 38 * These structures are stored on the swap and linked together with the 39 * help of the .next_swap member. 40 * 41 * The swap map is created during suspend. The swap map pages are 42 * allocated and populated one at a time, so we only need one memory 43 * page to set up the entire structure. 44 * 45 * During resume we also only need to use one swap_map_page structure 46 * at a time. 47 */ 48 49 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 50 51 struct swap_map_page { 52 sector_t entries[MAP_PAGE_ENTRIES]; 53 sector_t next_swap; 54 }; 55 56 /** 57 * The swap_map_handle structure is used for handling swap in 58 * a file-alike way 59 */ 60 61 struct swap_map_handle { 62 struct swap_map_page *cur; 63 sector_t cur_swap; 64 sector_t first_sector; 65 unsigned int k; 66 }; 67 68 struct swsusp_header { 69 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)]; 70 sector_t image; 71 unsigned int flags; /* Flags to pass to the "boot" kernel */ 72 char orig_sig[10]; 73 char sig[10]; 74 } __attribute__((packed)); 75 76 static struct swsusp_header *swsusp_header; 77 78 /** 79 * The following functions are used for tracing the allocated 80 * swap pages, so that they can be freed in case of an error. 81 */ 82 83 struct swsusp_extent { 84 struct rb_node node; 85 unsigned long start; 86 unsigned long end; 87 }; 88 89 static struct rb_root swsusp_extents = RB_ROOT; 90 91 static int swsusp_extents_insert(unsigned long swap_offset) 92 { 93 struct rb_node **new = &(swsusp_extents.rb_node); 94 struct rb_node *parent = NULL; 95 struct swsusp_extent *ext; 96 97 /* Figure out where to put the new node */ 98 while (*new) { 99 ext = container_of(*new, struct swsusp_extent, node); 100 parent = *new; 101 if (swap_offset < ext->start) { 102 /* Try to merge */ 103 if (swap_offset == ext->start - 1) { 104 ext->start--; 105 return 0; 106 } 107 new = &((*new)->rb_left); 108 } else if (swap_offset > ext->end) { 109 /* Try to merge */ 110 if (swap_offset == ext->end + 1) { 111 ext->end++; 112 return 0; 113 } 114 new = &((*new)->rb_right); 115 } else { 116 /* It already is in the tree */ 117 return -EINVAL; 118 } 119 } 120 /* Add the new node and rebalance the tree. */ 121 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); 122 if (!ext) 123 return -ENOMEM; 124 125 ext->start = swap_offset; 126 ext->end = swap_offset; 127 rb_link_node(&ext->node, parent, new); 128 rb_insert_color(&ext->node, &swsusp_extents); 129 return 0; 130 } 131 132 /** 133 * alloc_swapdev_block - allocate a swap page and register that it has 134 * been allocated, so that it can be freed in case of an error. 135 */ 136 137 sector_t alloc_swapdev_block(int swap) 138 { 139 unsigned long offset; 140 141 offset = swp_offset(get_swap_page_of_type(swap)); 142 if (offset) { 143 if (swsusp_extents_insert(offset)) 144 swap_free(swp_entry(swap, offset)); 145 else 146 return swapdev_block(swap, offset); 147 } 148 return 0; 149 } 150 151 /** 152 * free_all_swap_pages - free swap pages allocated for saving image data. 153 * It also frees the extents used to register which swap entries had been 154 * allocated. 155 */ 156 157 void free_all_swap_pages(int swap) 158 { 159 struct rb_node *node; 160 161 while ((node = swsusp_extents.rb_node)) { 162 struct swsusp_extent *ext; 163 unsigned long offset; 164 165 ext = container_of(node, struct swsusp_extent, node); 166 rb_erase(node, &swsusp_extents); 167 for (offset = ext->start; offset <= ext->end; offset++) 168 swap_free(swp_entry(swap, offset)); 169 170 kfree(ext); 171 } 172 } 173 174 int swsusp_swap_in_use(void) 175 { 176 return (swsusp_extents.rb_node != NULL); 177 } 178 179 /* 180 * General things 181 */ 182 183 static unsigned short root_swap = 0xffff; 184 struct block_device *hib_resume_bdev; 185 186 /* 187 * Saving part 188 */ 189 190 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) 191 { 192 int error; 193 194 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); 195 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 196 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { 197 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); 198 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); 199 swsusp_header->image = handle->first_sector; 200 swsusp_header->flags = flags; 201 error = hib_bio_write_page(swsusp_resume_block, 202 swsusp_header, NULL); 203 } else { 204 printk(KERN_ERR "PM: Swap header not found!\n"); 205 error = -ENODEV; 206 } 207 return error; 208 } 209 210 /** 211 * swsusp_swap_check - check if the resume device is a swap device 212 * and get its index (if so) 213 * 214 * This is called before saving image 215 */ 216 static int swsusp_swap_check(void) 217 { 218 int res; 219 220 res = swap_type_of(swsusp_resume_device, swsusp_resume_block, 221 &hib_resume_bdev); 222 if (res < 0) 223 return res; 224 225 root_swap = res; 226 res = blkdev_get(hib_resume_bdev, FMODE_WRITE); 227 if (res) 228 return res; 229 230 res = set_blocksize(hib_resume_bdev, PAGE_SIZE); 231 if (res < 0) 232 blkdev_put(hib_resume_bdev, FMODE_WRITE); 233 234 return res; 235 } 236 237 /** 238 * write_page - Write one page to given swap location. 239 * @buf: Address we're writing. 240 * @offset: Offset of the swap page we're writing to. 241 * @bio_chain: Link the next write BIO here 242 */ 243 244 static int write_page(void *buf, sector_t offset, struct bio **bio_chain) 245 { 246 void *src; 247 248 if (!offset) 249 return -ENOSPC; 250 251 if (bio_chain) { 252 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 253 if (src) { 254 memcpy(src, buf, PAGE_SIZE); 255 } else { 256 WARN_ON_ONCE(1); 257 bio_chain = NULL; /* Go synchronous */ 258 src = buf; 259 } 260 } else { 261 src = buf; 262 } 263 return hib_bio_write_page(offset, src, bio_chain); 264 } 265 266 static void release_swap_writer(struct swap_map_handle *handle) 267 { 268 if (handle->cur) 269 free_page((unsigned long)handle->cur); 270 handle->cur = NULL; 271 } 272 273 static int get_swap_writer(struct swap_map_handle *handle) 274 { 275 int ret; 276 277 ret = swsusp_swap_check(); 278 if (ret) { 279 if (ret != -ENOSPC) 280 printk(KERN_ERR "PM: Cannot find swap device, try " 281 "swapon -a.\n"); 282 return ret; 283 } 284 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); 285 if (!handle->cur) { 286 ret = -ENOMEM; 287 goto err_close; 288 } 289 handle->cur_swap = alloc_swapdev_block(root_swap); 290 if (!handle->cur_swap) { 291 ret = -ENOSPC; 292 goto err_rel; 293 } 294 handle->k = 0; 295 handle->first_sector = handle->cur_swap; 296 return 0; 297 err_rel: 298 release_swap_writer(handle); 299 err_close: 300 swsusp_close(FMODE_WRITE); 301 return ret; 302 } 303 304 static int swap_write_page(struct swap_map_handle *handle, void *buf, 305 struct bio **bio_chain) 306 { 307 int error = 0; 308 sector_t offset; 309 310 if (!handle->cur) 311 return -EINVAL; 312 offset = alloc_swapdev_block(root_swap); 313 error = write_page(buf, offset, bio_chain); 314 if (error) 315 return error; 316 handle->cur->entries[handle->k++] = offset; 317 if (handle->k >= MAP_PAGE_ENTRIES) { 318 error = hib_wait_on_bio_chain(bio_chain); 319 if (error) 320 goto out; 321 offset = alloc_swapdev_block(root_swap); 322 if (!offset) 323 return -ENOSPC; 324 handle->cur->next_swap = offset; 325 error = write_page(handle->cur, handle->cur_swap, NULL); 326 if (error) 327 goto out; 328 memset(handle->cur, 0, PAGE_SIZE); 329 handle->cur_swap = offset; 330 handle->k = 0; 331 } 332 out: 333 return error; 334 } 335 336 static int flush_swap_writer(struct swap_map_handle *handle) 337 { 338 if (handle->cur && handle->cur_swap) 339 return write_page(handle->cur, handle->cur_swap, NULL); 340 else 341 return -EINVAL; 342 } 343 344 static int swap_writer_finish(struct swap_map_handle *handle, 345 unsigned int flags, int error) 346 { 347 if (!error) { 348 flush_swap_writer(handle); 349 printk(KERN_INFO "PM: S"); 350 error = mark_swapfiles(handle, flags); 351 printk("|\n"); 352 } 353 354 if (error) 355 free_all_swap_pages(root_swap); 356 release_swap_writer(handle); 357 swsusp_close(FMODE_WRITE); 358 359 return error; 360 } 361 362 /* We need to remember how much compressed data we need to read. */ 363 #define LZO_HEADER sizeof(size_t) 364 365 /* Number of pages/bytes we'll compress at one time. */ 366 #define LZO_UNC_PAGES 32 367 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) 368 369 /* Number of pages/bytes we need for compressed data (worst case). */ 370 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ 371 LZO_HEADER, PAGE_SIZE) 372 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) 373 374 /** 375 * save_image - save the suspend image data 376 */ 377 378 static int save_image(struct swap_map_handle *handle, 379 struct snapshot_handle *snapshot, 380 unsigned int nr_to_write) 381 { 382 unsigned int m; 383 int ret; 384 int nr_pages; 385 int err2; 386 struct bio *bio; 387 struct timeval start; 388 struct timeval stop; 389 390 printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", 391 nr_to_write); 392 m = nr_to_write / 100; 393 if (!m) 394 m = 1; 395 nr_pages = 0; 396 bio = NULL; 397 do_gettimeofday(&start); 398 while (1) { 399 ret = snapshot_read_next(snapshot); 400 if (ret <= 0) 401 break; 402 ret = swap_write_page(handle, data_of(*snapshot), &bio); 403 if (ret) 404 break; 405 if (!(nr_pages % m)) 406 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); 407 nr_pages++; 408 } 409 err2 = hib_wait_on_bio_chain(&bio); 410 do_gettimeofday(&stop); 411 if (!ret) 412 ret = err2; 413 if (!ret) 414 printk(KERN_CONT "\b\b\b\bdone\n"); 415 else 416 printk(KERN_CONT "\n"); 417 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 418 return ret; 419 } 420 421 422 /** 423 * save_image_lzo - Save the suspend image data compressed with LZO. 424 * @handle: Swap mam handle to use for saving the image. 425 * @snapshot: Image to read data from. 426 * @nr_to_write: Number of pages to save. 427 */ 428 static int save_image_lzo(struct swap_map_handle *handle, 429 struct snapshot_handle *snapshot, 430 unsigned int nr_to_write) 431 { 432 unsigned int m; 433 int ret = 0; 434 int nr_pages; 435 int err2; 436 struct bio *bio; 437 struct timeval start; 438 struct timeval stop; 439 size_t off, unc_len, cmp_len; 440 unsigned char *unc, *cmp, *wrk, *page; 441 442 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 443 if (!page) { 444 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 445 return -ENOMEM; 446 } 447 448 wrk = vmalloc(LZO1X_1_MEM_COMPRESS); 449 if (!wrk) { 450 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); 451 free_page((unsigned long)page); 452 return -ENOMEM; 453 } 454 455 unc = vmalloc(LZO_UNC_SIZE); 456 if (!unc) { 457 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 458 vfree(wrk); 459 free_page((unsigned long)page); 460 return -ENOMEM; 461 } 462 463 cmp = vmalloc(LZO_CMP_SIZE); 464 if (!cmp) { 465 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 466 vfree(unc); 467 vfree(wrk); 468 free_page((unsigned long)page); 469 return -ENOMEM; 470 } 471 472 printk(KERN_INFO 473 "PM: Compressing and saving image data (%u pages) ... ", 474 nr_to_write); 475 m = nr_to_write / 100; 476 if (!m) 477 m = 1; 478 nr_pages = 0; 479 bio = NULL; 480 do_gettimeofday(&start); 481 for (;;) { 482 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { 483 ret = snapshot_read_next(snapshot); 484 if (ret < 0) 485 goto out_finish; 486 487 if (!ret) 488 break; 489 490 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); 491 492 if (!(nr_pages % m)) 493 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); 494 nr_pages++; 495 } 496 497 if (!off) 498 break; 499 500 unc_len = off; 501 ret = lzo1x_1_compress(unc, unc_len, 502 cmp + LZO_HEADER, &cmp_len, wrk); 503 if (ret < 0) { 504 printk(KERN_ERR "PM: LZO compression failed\n"); 505 break; 506 } 507 508 if (unlikely(!cmp_len || 509 cmp_len > lzo1x_worst_compress(unc_len))) { 510 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 511 ret = -1; 512 break; 513 } 514 515 *(size_t *)cmp = cmp_len; 516 517 /* 518 * Given we are writing one page at a time to disk, we copy 519 * that much from the buffer, although the last bit will likely 520 * be smaller than full page. This is OK - we saved the length 521 * of the compressed data, so any garbage at the end will be 522 * discarded when we read it. 523 */ 524 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { 525 memcpy(page, cmp + off, PAGE_SIZE); 526 527 ret = swap_write_page(handle, page, &bio); 528 if (ret) 529 goto out_finish; 530 } 531 } 532 533 out_finish: 534 err2 = hib_wait_on_bio_chain(&bio); 535 do_gettimeofday(&stop); 536 if (!ret) 537 ret = err2; 538 if (!ret) 539 printk(KERN_CONT "\b\b\b\bdone\n"); 540 else 541 printk(KERN_CONT "\n"); 542 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 543 544 vfree(cmp); 545 vfree(unc); 546 vfree(wrk); 547 free_page((unsigned long)page); 548 549 return ret; 550 } 551 552 /** 553 * enough_swap - Make sure we have enough swap to save the image. 554 * 555 * Returns TRUE or FALSE after checking the total amount of swap 556 * space avaiable from the resume partition. 557 */ 558 559 static int enough_swap(unsigned int nr_pages, unsigned int flags) 560 { 561 unsigned int free_swap = count_swap_pages(root_swap, 1); 562 unsigned int required; 563 564 pr_debug("PM: Free swap pages: %u\n", free_swap); 565 566 required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ? 567 nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1); 568 return free_swap > required; 569 } 570 571 /** 572 * swsusp_write - Write entire image and metadata. 573 * @flags: flags to pass to the "boot" kernel in the image header 574 * 575 * It is important _NOT_ to umount filesystems at this point. We want 576 * them synced (in case something goes wrong) but we DO not want to mark 577 * filesystem clean: it is not. (And it does not matter, if we resume 578 * correctly, we'll mark system clean, anyway.) 579 */ 580 581 int swsusp_write(unsigned int flags) 582 { 583 struct swap_map_handle handle; 584 struct snapshot_handle snapshot; 585 struct swsusp_info *header; 586 unsigned long pages; 587 int error; 588 589 pages = snapshot_get_image_size(); 590 error = get_swap_writer(&handle); 591 if (error) { 592 printk(KERN_ERR "PM: Cannot get swap writer\n"); 593 return error; 594 } 595 if (!enough_swap(pages, flags)) { 596 printk(KERN_ERR "PM: Not enough free swap\n"); 597 error = -ENOSPC; 598 goto out_finish; 599 } 600 memset(&snapshot, 0, sizeof(struct snapshot_handle)); 601 error = snapshot_read_next(&snapshot); 602 if (error < PAGE_SIZE) { 603 if (error >= 0) 604 error = -EFAULT; 605 606 goto out_finish; 607 } 608 header = (struct swsusp_info *)data_of(snapshot); 609 error = swap_write_page(&handle, header, NULL); 610 if (!error) { 611 error = (flags & SF_NOCOMPRESS_MODE) ? 612 save_image(&handle, &snapshot, pages - 1) : 613 save_image_lzo(&handle, &snapshot, pages - 1); 614 } 615 out_finish: 616 error = swap_writer_finish(&handle, flags, error); 617 return error; 618 } 619 620 /** 621 * The following functions allow us to read data using a swap map 622 * in a file-alike way 623 */ 624 625 static void release_swap_reader(struct swap_map_handle *handle) 626 { 627 if (handle->cur) 628 free_page((unsigned long)handle->cur); 629 handle->cur = NULL; 630 } 631 632 static int get_swap_reader(struct swap_map_handle *handle, 633 unsigned int *flags_p) 634 { 635 int error; 636 637 *flags_p = swsusp_header->flags; 638 639 if (!swsusp_header->image) /* how can this happen? */ 640 return -EINVAL; 641 642 handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); 643 if (!handle->cur) 644 return -ENOMEM; 645 646 error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL); 647 if (error) { 648 release_swap_reader(handle); 649 return error; 650 } 651 handle->k = 0; 652 return 0; 653 } 654 655 static int swap_read_page(struct swap_map_handle *handle, void *buf, 656 struct bio **bio_chain) 657 { 658 sector_t offset; 659 int error; 660 661 if (!handle->cur) 662 return -EINVAL; 663 offset = handle->cur->entries[handle->k]; 664 if (!offset) 665 return -EFAULT; 666 error = hib_bio_read_page(offset, buf, bio_chain); 667 if (error) 668 return error; 669 if (++handle->k >= MAP_PAGE_ENTRIES) { 670 error = hib_wait_on_bio_chain(bio_chain); 671 handle->k = 0; 672 offset = handle->cur->next_swap; 673 if (!offset) 674 release_swap_reader(handle); 675 else if (!error) 676 error = hib_bio_read_page(offset, handle->cur, NULL); 677 } 678 return error; 679 } 680 681 static int swap_reader_finish(struct swap_map_handle *handle) 682 { 683 release_swap_reader(handle); 684 685 return 0; 686 } 687 688 /** 689 * load_image - load the image using the swap map handle 690 * @handle and the snapshot handle @snapshot 691 * (assume there are @nr_pages pages to load) 692 */ 693 694 static int load_image(struct swap_map_handle *handle, 695 struct snapshot_handle *snapshot, 696 unsigned int nr_to_read) 697 { 698 unsigned int m; 699 int error = 0; 700 struct timeval start; 701 struct timeval stop; 702 struct bio *bio; 703 int err2; 704 unsigned nr_pages; 705 706 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", 707 nr_to_read); 708 m = nr_to_read / 100; 709 if (!m) 710 m = 1; 711 nr_pages = 0; 712 bio = NULL; 713 do_gettimeofday(&start); 714 for ( ; ; ) { 715 error = snapshot_write_next(snapshot); 716 if (error <= 0) 717 break; 718 error = swap_read_page(handle, data_of(*snapshot), &bio); 719 if (error) 720 break; 721 if (snapshot->sync_read) 722 error = hib_wait_on_bio_chain(&bio); 723 if (error) 724 break; 725 if (!(nr_pages % m)) 726 printk("\b\b\b\b%3d%%", nr_pages / m); 727 nr_pages++; 728 } 729 err2 = hib_wait_on_bio_chain(&bio); 730 do_gettimeofday(&stop); 731 if (!error) 732 error = err2; 733 if (!error) { 734 printk("\b\b\b\bdone\n"); 735 snapshot_write_finalize(snapshot); 736 if (!snapshot_image_loaded(snapshot)) 737 error = -ENODATA; 738 } else 739 printk("\n"); 740 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 741 return error; 742 } 743 744 /** 745 * load_image_lzo - Load compressed image data and decompress them with LZO. 746 * @handle: Swap map handle to use for loading data. 747 * @snapshot: Image to copy uncompressed data into. 748 * @nr_to_read: Number of pages to load. 749 */ 750 static int load_image_lzo(struct swap_map_handle *handle, 751 struct snapshot_handle *snapshot, 752 unsigned int nr_to_read) 753 { 754 unsigned int m; 755 int error = 0; 756 struct timeval start; 757 struct timeval stop; 758 unsigned nr_pages; 759 size_t off, unc_len, cmp_len; 760 unsigned char *unc, *cmp, *page; 761 762 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 763 if (!page) { 764 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 765 return -ENOMEM; 766 } 767 768 unc = vmalloc(LZO_UNC_SIZE); 769 if (!unc) { 770 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 771 free_page((unsigned long)page); 772 return -ENOMEM; 773 } 774 775 cmp = vmalloc(LZO_CMP_SIZE); 776 if (!cmp) { 777 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 778 vfree(unc); 779 free_page((unsigned long)page); 780 return -ENOMEM; 781 } 782 783 printk(KERN_INFO 784 "PM: Loading and decompressing image data (%u pages) ... ", 785 nr_to_read); 786 m = nr_to_read / 100; 787 if (!m) 788 m = 1; 789 nr_pages = 0; 790 do_gettimeofday(&start); 791 792 error = snapshot_write_next(snapshot); 793 if (error <= 0) 794 goto out_finish; 795 796 for (;;) { 797 error = swap_read_page(handle, page, NULL); /* sync */ 798 if (error) 799 break; 800 801 cmp_len = *(size_t *)page; 802 if (unlikely(!cmp_len || 803 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { 804 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 805 error = -1; 806 break; 807 } 808 809 memcpy(cmp, page, PAGE_SIZE); 810 for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { 811 error = swap_read_page(handle, page, NULL); /* sync */ 812 if (error) 813 goto out_finish; 814 815 memcpy(cmp + off, page, PAGE_SIZE); 816 } 817 818 unc_len = LZO_UNC_SIZE; 819 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, 820 unc, &unc_len); 821 if (error < 0) { 822 printk(KERN_ERR "PM: LZO decompression failed\n"); 823 break; 824 } 825 826 if (unlikely(!unc_len || 827 unc_len > LZO_UNC_SIZE || 828 unc_len & (PAGE_SIZE - 1))) { 829 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); 830 error = -1; 831 break; 832 } 833 834 for (off = 0; off < unc_len; off += PAGE_SIZE) { 835 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); 836 837 if (!(nr_pages % m)) 838 printk("\b\b\b\b%3d%%", nr_pages / m); 839 nr_pages++; 840 841 error = snapshot_write_next(snapshot); 842 if (error <= 0) 843 goto out_finish; 844 } 845 } 846 847 out_finish: 848 do_gettimeofday(&stop); 849 if (!error) { 850 printk("\b\b\b\bdone\n"); 851 snapshot_write_finalize(snapshot); 852 if (!snapshot_image_loaded(snapshot)) 853 error = -ENODATA; 854 } else 855 printk("\n"); 856 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 857 858 vfree(cmp); 859 vfree(unc); 860 free_page((unsigned long)page); 861 862 return error; 863 } 864 865 /** 866 * swsusp_read - read the hibernation image. 867 * @flags_p: flags passed by the "frozen" kernel in the image header should 868 * be written into this memeory location 869 */ 870 871 int swsusp_read(unsigned int *flags_p) 872 { 873 int error; 874 struct swap_map_handle handle; 875 struct snapshot_handle snapshot; 876 struct swsusp_info *header; 877 878 memset(&snapshot, 0, sizeof(struct snapshot_handle)); 879 error = snapshot_write_next(&snapshot); 880 if (error < PAGE_SIZE) 881 return error < 0 ? error : -EFAULT; 882 header = (struct swsusp_info *)data_of(snapshot); 883 error = get_swap_reader(&handle, flags_p); 884 if (error) 885 goto end; 886 if (!error) 887 error = swap_read_page(&handle, header, NULL); 888 if (!error) { 889 error = (*flags_p & SF_NOCOMPRESS_MODE) ? 890 load_image(&handle, &snapshot, header->pages - 1) : 891 load_image_lzo(&handle, &snapshot, header->pages - 1); 892 } 893 swap_reader_finish(&handle); 894 end: 895 if (!error) 896 pr_debug("PM: Image successfully loaded\n"); 897 else 898 pr_debug("PM: Error %d resuming\n", error); 899 return error; 900 } 901 902 /** 903 * swsusp_check - Check for swsusp signature in the resume device 904 */ 905 906 int swsusp_check(void) 907 { 908 int error; 909 910 hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); 911 if (!IS_ERR(hib_resume_bdev)) { 912 set_blocksize(hib_resume_bdev, PAGE_SIZE); 913 memset(swsusp_header, 0, PAGE_SIZE); 914 error = hib_bio_read_page(swsusp_resume_block, 915 swsusp_header, NULL); 916 if (error) 917 goto put; 918 919 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { 920 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 921 /* Reset swap signature now */ 922 error = hib_bio_write_page(swsusp_resume_block, 923 swsusp_header, NULL); 924 } else { 925 error = -EINVAL; 926 } 927 928 put: 929 if (error) 930 blkdev_put(hib_resume_bdev, FMODE_READ); 931 else 932 pr_debug("PM: Image signature found, resuming\n"); 933 } else { 934 error = PTR_ERR(hib_resume_bdev); 935 } 936 937 if (error) 938 pr_debug("PM: Image not found (code %d)\n", error); 939 940 return error; 941 } 942 943 /** 944 * swsusp_close - close swap device. 945 */ 946 947 void swsusp_close(fmode_t mode) 948 { 949 if (IS_ERR(hib_resume_bdev)) { 950 pr_debug("PM: Image device not initialised\n"); 951 return; 952 } 953 954 blkdev_put(hib_resume_bdev, mode); 955 } 956 957 static int swsusp_header_init(void) 958 { 959 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); 960 if (!swsusp_header) 961 panic("Could not allocate memory for swsusp_header\n"); 962 return 0; 963 } 964 965 core_initcall(swsusp_header_init); 966