1 /* 2 * linux/kernel/power/swap.c 3 * 4 * This file provides functions for reading the suspend image from 5 * and writing it to a swap partition. 6 * 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> 10 * 11 * This file is released under the GPLv2. 12 * 13 */ 14 15 #include <linux/module.h> 16 #include <linux/file.h> 17 #include <linux/delay.h> 18 #include <linux/bitops.h> 19 #include <linux/genhd.h> 20 #include <linux/device.h> 21 #include <linux/bio.h> 22 #include <linux/blkdev.h> 23 #include <linux/swap.h> 24 #include <linux/swapops.h> 25 #include <linux/pm.h> 26 #include <linux/slab.h> 27 #include <linux/lzo.h> 28 #include <linux/vmalloc.h> 29 #include <linux/cpumask.h> 30 #include <linux/atomic.h> 31 #include <linux/kthread.h> 32 #include <linux/crc32.h> 33 34 #include "power.h" 35 36 #define HIBERNATE_SIG "S1SUSPEND" 37 38 /* 39 * The swap map is a data structure used for keeping track of each page 40 * written to a swap partition. It consists of many swap_map_page 41 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 42 * These structures are stored on the swap and linked together with the 43 * help of the .next_swap member. 44 * 45 * The swap map is created during suspend. The swap map pages are 46 * allocated and populated one at a time, so we only need one memory 47 * page to set up the entire structure. 48 * 49 * During resume we pick up all swap_map_page structures into a list. 50 */ 51 52 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 53 54 /* 55 * Number of free pages that are not high. 56 */ 57 static inline unsigned long low_free_pages(void) 58 { 59 return nr_free_pages() - nr_free_highpages(); 60 } 61 62 /* 63 * Number of pages required to be kept free while writing the image. Always 64 * half of all available low pages before the writing starts. 65 */ 66 static inline unsigned long reqd_free_pages(void) 67 { 68 return low_free_pages() / 2; 69 } 70 71 struct swap_map_page { 72 sector_t entries[MAP_PAGE_ENTRIES]; 73 sector_t next_swap; 74 }; 75 76 struct swap_map_page_list { 77 struct swap_map_page *map; 78 struct swap_map_page_list *next; 79 }; 80 81 /** 82 * The swap_map_handle structure is used for handling swap in 83 * a file-alike way 84 */ 85 86 struct swap_map_handle { 87 struct swap_map_page *cur; 88 struct swap_map_page_list *maps; 89 sector_t cur_swap; 90 sector_t first_sector; 91 unsigned int k; 92 unsigned long reqd_free_pages; 93 u32 crc32; 94 }; 95 96 struct swsusp_header { 97 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - 98 sizeof(u32)]; 99 u32 crc32; 100 sector_t image; 101 unsigned int flags; /* Flags to pass to the "boot" kernel */ 102 char orig_sig[10]; 103 char sig[10]; 104 } __attribute__((packed)); 105 106 static struct swsusp_header *swsusp_header; 107 108 /** 109 * The following functions are used for tracing the allocated 110 * swap pages, so that they can be freed in case of an error. 111 */ 112 113 struct swsusp_extent { 114 struct rb_node node; 115 unsigned long start; 116 unsigned long end; 117 }; 118 119 static struct rb_root swsusp_extents = RB_ROOT; 120 121 static int swsusp_extents_insert(unsigned long swap_offset) 122 { 123 struct rb_node **new = &(swsusp_extents.rb_node); 124 struct rb_node *parent = NULL; 125 struct swsusp_extent *ext; 126 127 /* Figure out where to put the new node */ 128 while (*new) { 129 ext = container_of(*new, struct swsusp_extent, node); 130 parent = *new; 131 if (swap_offset < ext->start) { 132 /* Try to merge */ 133 if (swap_offset == ext->start - 1) { 134 ext->start--; 135 return 0; 136 } 137 new = &((*new)->rb_left); 138 } else if (swap_offset > ext->end) { 139 /* Try to merge */ 140 if (swap_offset == ext->end + 1) { 141 ext->end++; 142 return 0; 143 } 144 new = &((*new)->rb_right); 145 } else { 146 /* It already is in the tree */ 147 return -EINVAL; 148 } 149 } 150 /* Add the new node and rebalance the tree. */ 151 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); 152 if (!ext) 153 return -ENOMEM; 154 155 ext->start = swap_offset; 156 ext->end = swap_offset; 157 rb_link_node(&ext->node, parent, new); 158 rb_insert_color(&ext->node, &swsusp_extents); 159 return 0; 160 } 161 162 /** 163 * alloc_swapdev_block - allocate a swap page and register that it has 164 * been allocated, so that it can be freed in case of an error. 165 */ 166 167 sector_t alloc_swapdev_block(int swap) 168 { 169 unsigned long offset; 170 171 offset = swp_offset(get_swap_page_of_type(swap)); 172 if (offset) { 173 if (swsusp_extents_insert(offset)) 174 swap_free(swp_entry(swap, offset)); 175 else 176 return swapdev_block(swap, offset); 177 } 178 return 0; 179 } 180 181 /** 182 * free_all_swap_pages - free swap pages allocated for saving image data. 183 * It also frees the extents used to register which swap entries had been 184 * allocated. 185 */ 186 187 void free_all_swap_pages(int swap) 188 { 189 struct rb_node *node; 190 191 while ((node = swsusp_extents.rb_node)) { 192 struct swsusp_extent *ext; 193 unsigned long offset; 194 195 ext = container_of(node, struct swsusp_extent, node); 196 rb_erase(node, &swsusp_extents); 197 for (offset = ext->start; offset <= ext->end; offset++) 198 swap_free(swp_entry(swap, offset)); 199 200 kfree(ext); 201 } 202 } 203 204 int swsusp_swap_in_use(void) 205 { 206 return (swsusp_extents.rb_node != NULL); 207 } 208 209 /* 210 * General things 211 */ 212 213 static unsigned short root_swap = 0xffff; 214 struct block_device *hib_resume_bdev; 215 216 /* 217 * Saving part 218 */ 219 220 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) 221 { 222 int error; 223 224 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); 225 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 226 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { 227 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); 228 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); 229 swsusp_header->image = handle->first_sector; 230 swsusp_header->flags = flags; 231 if (flags & SF_CRC32_MODE) 232 swsusp_header->crc32 = handle->crc32; 233 error = hib_bio_write_page(swsusp_resume_block, 234 swsusp_header, NULL); 235 } else { 236 printk(KERN_ERR "PM: Swap header not found!\n"); 237 error = -ENODEV; 238 } 239 return error; 240 } 241 242 /** 243 * swsusp_swap_check - check if the resume device is a swap device 244 * and get its index (if so) 245 * 246 * This is called before saving image 247 */ 248 static int swsusp_swap_check(void) 249 { 250 int res; 251 252 res = swap_type_of(swsusp_resume_device, swsusp_resume_block, 253 &hib_resume_bdev); 254 if (res < 0) 255 return res; 256 257 root_swap = res; 258 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); 259 if (res) 260 return res; 261 262 res = set_blocksize(hib_resume_bdev, PAGE_SIZE); 263 if (res < 0) 264 blkdev_put(hib_resume_bdev, FMODE_WRITE); 265 266 return res; 267 } 268 269 /** 270 * write_page - Write one page to given swap location. 271 * @buf: Address we're writing. 272 * @offset: Offset of the swap page we're writing to. 273 * @bio_chain: Link the next write BIO here 274 */ 275 276 static int write_page(void *buf, sector_t offset, struct bio **bio_chain) 277 { 278 void *src; 279 int ret; 280 281 if (!offset) 282 return -ENOSPC; 283 284 if (bio_chain) { 285 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | 286 __GFP_NORETRY); 287 if (src) { 288 copy_page(src, buf); 289 } else { 290 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ 291 if (ret) 292 return ret; 293 src = (void *)__get_free_page(__GFP_WAIT | 294 __GFP_NOWARN | 295 __GFP_NORETRY); 296 if (src) { 297 copy_page(src, buf); 298 } else { 299 WARN_ON_ONCE(1); 300 bio_chain = NULL; /* Go synchronous */ 301 src = buf; 302 } 303 } 304 } else { 305 src = buf; 306 } 307 return hib_bio_write_page(offset, src, bio_chain); 308 } 309 310 static void release_swap_writer(struct swap_map_handle *handle) 311 { 312 if (handle->cur) 313 free_page((unsigned long)handle->cur); 314 handle->cur = NULL; 315 } 316 317 static int get_swap_writer(struct swap_map_handle *handle) 318 { 319 int ret; 320 321 ret = swsusp_swap_check(); 322 if (ret) { 323 if (ret != -ENOSPC) 324 printk(KERN_ERR "PM: Cannot find swap device, try " 325 "swapon -a.\n"); 326 return ret; 327 } 328 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); 329 if (!handle->cur) { 330 ret = -ENOMEM; 331 goto err_close; 332 } 333 handle->cur_swap = alloc_swapdev_block(root_swap); 334 if (!handle->cur_swap) { 335 ret = -ENOSPC; 336 goto err_rel; 337 } 338 handle->k = 0; 339 handle->reqd_free_pages = reqd_free_pages(); 340 handle->first_sector = handle->cur_swap; 341 return 0; 342 err_rel: 343 release_swap_writer(handle); 344 err_close: 345 swsusp_close(FMODE_WRITE); 346 return ret; 347 } 348 349 static int swap_write_page(struct swap_map_handle *handle, void *buf, 350 struct bio **bio_chain) 351 { 352 int error = 0; 353 sector_t offset; 354 355 if (!handle->cur) 356 return -EINVAL; 357 offset = alloc_swapdev_block(root_swap); 358 error = write_page(buf, offset, bio_chain); 359 if (error) 360 return error; 361 handle->cur->entries[handle->k++] = offset; 362 if (handle->k >= MAP_PAGE_ENTRIES) { 363 offset = alloc_swapdev_block(root_swap); 364 if (!offset) 365 return -ENOSPC; 366 handle->cur->next_swap = offset; 367 error = write_page(handle->cur, handle->cur_swap, bio_chain); 368 if (error) 369 goto out; 370 clear_page(handle->cur); 371 handle->cur_swap = offset; 372 handle->k = 0; 373 374 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { 375 error = hib_wait_on_bio_chain(bio_chain); 376 if (error) 377 goto out; 378 /* 379 * Recalculate the number of required free pages, to 380 * make sure we never take more than half. 381 */ 382 handle->reqd_free_pages = reqd_free_pages(); 383 } 384 } 385 out: 386 return error; 387 } 388 389 static int flush_swap_writer(struct swap_map_handle *handle) 390 { 391 if (handle->cur && handle->cur_swap) 392 return write_page(handle->cur, handle->cur_swap, NULL); 393 else 394 return -EINVAL; 395 } 396 397 static int swap_writer_finish(struct swap_map_handle *handle, 398 unsigned int flags, int error) 399 { 400 if (!error) { 401 flush_swap_writer(handle); 402 printk(KERN_INFO "PM: S"); 403 error = mark_swapfiles(handle, flags); 404 printk("|\n"); 405 } 406 407 if (error) 408 free_all_swap_pages(root_swap); 409 release_swap_writer(handle); 410 swsusp_close(FMODE_WRITE); 411 412 return error; 413 } 414 415 /* We need to remember how much compressed data we need to read. */ 416 #define LZO_HEADER sizeof(size_t) 417 418 /* Number of pages/bytes we'll compress at one time. */ 419 #define LZO_UNC_PAGES 32 420 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) 421 422 /* Number of pages/bytes we need for compressed data (worst case). */ 423 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ 424 LZO_HEADER, PAGE_SIZE) 425 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) 426 427 /* Maximum number of threads for compression/decompression. */ 428 #define LZO_THREADS 3 429 430 /* Minimum/maximum number of pages for read buffering. */ 431 #define LZO_MIN_RD_PAGES 1024 432 #define LZO_MAX_RD_PAGES 8192 433 434 435 /** 436 * save_image - save the suspend image data 437 */ 438 439 static int save_image(struct swap_map_handle *handle, 440 struct snapshot_handle *snapshot, 441 unsigned int nr_to_write) 442 { 443 unsigned int m; 444 int ret; 445 int nr_pages; 446 int err2; 447 struct bio *bio; 448 struct timeval start; 449 struct timeval stop; 450 451 printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", 452 nr_to_write); 453 m = nr_to_write / 100; 454 if (!m) 455 m = 1; 456 nr_pages = 0; 457 bio = NULL; 458 do_gettimeofday(&start); 459 while (1) { 460 ret = snapshot_read_next(snapshot); 461 if (ret <= 0) 462 break; 463 ret = swap_write_page(handle, data_of(*snapshot), &bio); 464 if (ret) 465 break; 466 if (!(nr_pages % m)) 467 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); 468 nr_pages++; 469 } 470 err2 = hib_wait_on_bio_chain(&bio); 471 do_gettimeofday(&stop); 472 if (!ret) 473 ret = err2; 474 if (!ret) 475 printk(KERN_CONT "\b\b\b\bdone\n"); 476 else 477 printk(KERN_CONT "\n"); 478 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 479 return ret; 480 } 481 482 /** 483 * Structure used for CRC32. 484 */ 485 struct crc_data { 486 struct task_struct *thr; /* thread */ 487 atomic_t ready; /* ready to start flag */ 488 atomic_t stop; /* ready to stop flag */ 489 unsigned run_threads; /* nr current threads */ 490 wait_queue_head_t go; /* start crc update */ 491 wait_queue_head_t done; /* crc update done */ 492 u32 *crc32; /* points to handle's crc32 */ 493 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ 494 unsigned char *unc[LZO_THREADS]; /* uncompressed data */ 495 }; 496 497 /** 498 * CRC32 update function that runs in its own thread. 499 */ 500 static int crc32_threadfn(void *data) 501 { 502 struct crc_data *d = data; 503 unsigned i; 504 505 while (1) { 506 wait_event(d->go, atomic_read(&d->ready) || 507 kthread_should_stop()); 508 if (kthread_should_stop()) { 509 d->thr = NULL; 510 atomic_set(&d->stop, 1); 511 wake_up(&d->done); 512 break; 513 } 514 atomic_set(&d->ready, 0); 515 516 for (i = 0; i < d->run_threads; i++) 517 *d->crc32 = crc32_le(*d->crc32, 518 d->unc[i], *d->unc_len[i]); 519 atomic_set(&d->stop, 1); 520 wake_up(&d->done); 521 } 522 return 0; 523 } 524 /** 525 * Structure used for LZO data compression. 526 */ 527 struct cmp_data { 528 struct task_struct *thr; /* thread */ 529 atomic_t ready; /* ready to start flag */ 530 atomic_t stop; /* ready to stop flag */ 531 int ret; /* return code */ 532 wait_queue_head_t go; /* start compression */ 533 wait_queue_head_t done; /* compression done */ 534 size_t unc_len; /* uncompressed length */ 535 size_t cmp_len; /* compressed length */ 536 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ 537 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ 538 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ 539 }; 540 541 /** 542 * Compression function that runs in its own thread. 543 */ 544 static int lzo_compress_threadfn(void *data) 545 { 546 struct cmp_data *d = data; 547 548 while (1) { 549 wait_event(d->go, atomic_read(&d->ready) || 550 kthread_should_stop()); 551 if (kthread_should_stop()) { 552 d->thr = NULL; 553 d->ret = -1; 554 atomic_set(&d->stop, 1); 555 wake_up(&d->done); 556 break; 557 } 558 atomic_set(&d->ready, 0); 559 560 d->ret = lzo1x_1_compress(d->unc, d->unc_len, 561 d->cmp + LZO_HEADER, &d->cmp_len, 562 d->wrk); 563 atomic_set(&d->stop, 1); 564 wake_up(&d->done); 565 } 566 return 0; 567 } 568 569 /** 570 * save_image_lzo - Save the suspend image data compressed with LZO. 571 * @handle: Swap mam handle to use for saving the image. 572 * @snapshot: Image to read data from. 573 * @nr_to_write: Number of pages to save. 574 */ 575 static int save_image_lzo(struct swap_map_handle *handle, 576 struct snapshot_handle *snapshot, 577 unsigned int nr_to_write) 578 { 579 unsigned int m; 580 int ret = 0; 581 int nr_pages; 582 int err2; 583 struct bio *bio; 584 struct timeval start; 585 struct timeval stop; 586 size_t off; 587 unsigned thr, run_threads, nr_threads; 588 unsigned char *page = NULL; 589 struct cmp_data *data = NULL; 590 struct crc_data *crc = NULL; 591 592 /* 593 * We'll limit the number of threads for compression to limit memory 594 * footprint. 595 */ 596 nr_threads = num_online_cpus() - 1; 597 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 598 599 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 600 if (!page) { 601 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 602 ret = -ENOMEM; 603 goto out_clean; 604 } 605 606 data = vmalloc(sizeof(*data) * nr_threads); 607 if (!data) { 608 printk(KERN_ERR "PM: Failed to allocate LZO data\n"); 609 ret = -ENOMEM; 610 goto out_clean; 611 } 612 for (thr = 0; thr < nr_threads; thr++) 613 memset(&data[thr], 0, offsetof(struct cmp_data, go)); 614 615 crc = kmalloc(sizeof(*crc), GFP_KERNEL); 616 if (!crc) { 617 printk(KERN_ERR "PM: Failed to allocate crc\n"); 618 ret = -ENOMEM; 619 goto out_clean; 620 } 621 memset(crc, 0, offsetof(struct crc_data, go)); 622 623 /* 624 * Start the compression threads. 625 */ 626 for (thr = 0; thr < nr_threads; thr++) { 627 init_waitqueue_head(&data[thr].go); 628 init_waitqueue_head(&data[thr].done); 629 630 data[thr].thr = kthread_run(lzo_compress_threadfn, 631 &data[thr], 632 "image_compress/%u", thr); 633 if (IS_ERR(data[thr].thr)) { 634 data[thr].thr = NULL; 635 printk(KERN_ERR 636 "PM: Cannot start compression threads\n"); 637 ret = -ENOMEM; 638 goto out_clean; 639 } 640 } 641 642 /* 643 * Start the CRC32 thread. 644 */ 645 init_waitqueue_head(&crc->go); 646 init_waitqueue_head(&crc->done); 647 648 handle->crc32 = 0; 649 crc->crc32 = &handle->crc32; 650 for (thr = 0; thr < nr_threads; thr++) { 651 crc->unc[thr] = data[thr].unc; 652 crc->unc_len[thr] = &data[thr].unc_len; 653 } 654 655 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); 656 if (IS_ERR(crc->thr)) { 657 crc->thr = NULL; 658 printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); 659 ret = -ENOMEM; 660 goto out_clean; 661 } 662 663 /* 664 * Adjust the number of required free pages after all allocations have 665 * been done. We don't want to run out of pages when writing. 666 */ 667 handle->reqd_free_pages = reqd_free_pages(); 668 669 printk(KERN_INFO 670 "PM: Using %u thread(s) for compression.\n" 671 "PM: Compressing and saving image data (%u pages) ... ", 672 nr_threads, nr_to_write); 673 m = nr_to_write / 100; 674 if (!m) 675 m = 1; 676 nr_pages = 0; 677 bio = NULL; 678 do_gettimeofday(&start); 679 for (;;) { 680 for (thr = 0; thr < nr_threads; thr++) { 681 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { 682 ret = snapshot_read_next(snapshot); 683 if (ret < 0) 684 goto out_finish; 685 686 if (!ret) 687 break; 688 689 memcpy(data[thr].unc + off, 690 data_of(*snapshot), PAGE_SIZE); 691 692 if (!(nr_pages % m)) 693 printk(KERN_CONT "\b\b\b\b%3d%%", 694 nr_pages / m); 695 nr_pages++; 696 } 697 if (!off) 698 break; 699 700 data[thr].unc_len = off; 701 702 atomic_set(&data[thr].ready, 1); 703 wake_up(&data[thr].go); 704 } 705 706 if (!thr) 707 break; 708 709 crc->run_threads = thr; 710 atomic_set(&crc->ready, 1); 711 wake_up(&crc->go); 712 713 for (run_threads = thr, thr = 0; thr < run_threads; thr++) { 714 wait_event(data[thr].done, 715 atomic_read(&data[thr].stop)); 716 atomic_set(&data[thr].stop, 0); 717 718 ret = data[thr].ret; 719 720 if (ret < 0) { 721 printk(KERN_ERR "PM: LZO compression failed\n"); 722 goto out_finish; 723 } 724 725 if (unlikely(!data[thr].cmp_len || 726 data[thr].cmp_len > 727 lzo1x_worst_compress(data[thr].unc_len))) { 728 printk(KERN_ERR 729 "PM: Invalid LZO compressed length\n"); 730 ret = -1; 731 goto out_finish; 732 } 733 734 *(size_t *)data[thr].cmp = data[thr].cmp_len; 735 736 /* 737 * Given we are writing one page at a time to disk, we 738 * copy that much from the buffer, although the last 739 * bit will likely be smaller than full page. This is 740 * OK - we saved the length of the compressed data, so 741 * any garbage at the end will be discarded when we 742 * read it. 743 */ 744 for (off = 0; 745 off < LZO_HEADER + data[thr].cmp_len; 746 off += PAGE_SIZE) { 747 memcpy(page, data[thr].cmp + off, PAGE_SIZE); 748 749 ret = swap_write_page(handle, page, &bio); 750 if (ret) 751 goto out_finish; 752 } 753 } 754 755 wait_event(crc->done, atomic_read(&crc->stop)); 756 atomic_set(&crc->stop, 0); 757 } 758 759 out_finish: 760 err2 = hib_wait_on_bio_chain(&bio); 761 do_gettimeofday(&stop); 762 if (!ret) 763 ret = err2; 764 if (!ret) { 765 printk(KERN_CONT "\b\b\b\bdone\n"); 766 } else { 767 printk(KERN_CONT "\n"); 768 } 769 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 770 out_clean: 771 if (crc) { 772 if (crc->thr) 773 kthread_stop(crc->thr); 774 kfree(crc); 775 } 776 if (data) { 777 for (thr = 0; thr < nr_threads; thr++) 778 if (data[thr].thr) 779 kthread_stop(data[thr].thr); 780 vfree(data); 781 } 782 if (page) free_page((unsigned long)page); 783 784 return ret; 785 } 786 787 /** 788 * enough_swap - Make sure we have enough swap to save the image. 789 * 790 * Returns TRUE or FALSE after checking the total amount of swap 791 * space avaiable from the resume partition. 792 */ 793 794 static int enough_swap(unsigned int nr_pages, unsigned int flags) 795 { 796 unsigned int free_swap = count_swap_pages(root_swap, 1); 797 unsigned int required; 798 799 pr_debug("PM: Free swap pages: %u\n", free_swap); 800 801 required = PAGES_FOR_IO + nr_pages; 802 return free_swap > required; 803 } 804 805 /** 806 * swsusp_write - Write entire image and metadata. 807 * @flags: flags to pass to the "boot" kernel in the image header 808 * 809 * It is important _NOT_ to umount filesystems at this point. We want 810 * them synced (in case something goes wrong) but we DO not want to mark 811 * filesystem clean: it is not. (And it does not matter, if we resume 812 * correctly, we'll mark system clean, anyway.) 813 */ 814 815 int swsusp_write(unsigned int flags) 816 { 817 struct swap_map_handle handle; 818 struct snapshot_handle snapshot; 819 struct swsusp_info *header; 820 unsigned long pages; 821 int error; 822 823 pages = snapshot_get_image_size(); 824 error = get_swap_writer(&handle); 825 if (error) { 826 printk(KERN_ERR "PM: Cannot get swap writer\n"); 827 return error; 828 } 829 if (flags & SF_NOCOMPRESS_MODE) { 830 if (!enough_swap(pages, flags)) { 831 printk(KERN_ERR "PM: Not enough free swap\n"); 832 error = -ENOSPC; 833 goto out_finish; 834 } 835 } 836 memset(&snapshot, 0, sizeof(struct snapshot_handle)); 837 error = snapshot_read_next(&snapshot); 838 if (error < PAGE_SIZE) { 839 if (error >= 0) 840 error = -EFAULT; 841 842 goto out_finish; 843 } 844 header = (struct swsusp_info *)data_of(snapshot); 845 error = swap_write_page(&handle, header, NULL); 846 if (!error) { 847 error = (flags & SF_NOCOMPRESS_MODE) ? 848 save_image(&handle, &snapshot, pages - 1) : 849 save_image_lzo(&handle, &snapshot, pages - 1); 850 } 851 out_finish: 852 error = swap_writer_finish(&handle, flags, error); 853 return error; 854 } 855 856 /** 857 * The following functions allow us to read data using a swap map 858 * in a file-alike way 859 */ 860 861 static void release_swap_reader(struct swap_map_handle *handle) 862 { 863 struct swap_map_page_list *tmp; 864 865 while (handle->maps) { 866 if (handle->maps->map) 867 free_page((unsigned long)handle->maps->map); 868 tmp = handle->maps; 869 handle->maps = handle->maps->next; 870 kfree(tmp); 871 } 872 handle->cur = NULL; 873 } 874 875 static int get_swap_reader(struct swap_map_handle *handle, 876 unsigned int *flags_p) 877 { 878 int error; 879 struct swap_map_page_list *tmp, *last; 880 sector_t offset; 881 882 *flags_p = swsusp_header->flags; 883 884 if (!swsusp_header->image) /* how can this happen? */ 885 return -EINVAL; 886 887 handle->cur = NULL; 888 last = handle->maps = NULL; 889 offset = swsusp_header->image; 890 while (offset) { 891 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); 892 if (!tmp) { 893 release_swap_reader(handle); 894 return -ENOMEM; 895 } 896 memset(tmp, 0, sizeof(*tmp)); 897 if (!handle->maps) 898 handle->maps = tmp; 899 if (last) 900 last->next = tmp; 901 last = tmp; 902 903 tmp->map = (struct swap_map_page *) 904 __get_free_page(__GFP_WAIT | __GFP_HIGH); 905 if (!tmp->map) { 906 release_swap_reader(handle); 907 return -ENOMEM; 908 } 909 910 error = hib_bio_read_page(offset, tmp->map, NULL); 911 if (error) { 912 release_swap_reader(handle); 913 return error; 914 } 915 offset = tmp->map->next_swap; 916 } 917 handle->k = 0; 918 handle->cur = handle->maps->map; 919 return 0; 920 } 921 922 static int swap_read_page(struct swap_map_handle *handle, void *buf, 923 struct bio **bio_chain) 924 { 925 sector_t offset; 926 int error; 927 struct swap_map_page_list *tmp; 928 929 if (!handle->cur) 930 return -EINVAL; 931 offset = handle->cur->entries[handle->k]; 932 if (!offset) 933 return -EFAULT; 934 error = hib_bio_read_page(offset, buf, bio_chain); 935 if (error) 936 return error; 937 if (++handle->k >= MAP_PAGE_ENTRIES) { 938 handle->k = 0; 939 free_page((unsigned long)handle->maps->map); 940 tmp = handle->maps; 941 handle->maps = handle->maps->next; 942 kfree(tmp); 943 if (!handle->maps) 944 release_swap_reader(handle); 945 else 946 handle->cur = handle->maps->map; 947 } 948 return error; 949 } 950 951 static int swap_reader_finish(struct swap_map_handle *handle) 952 { 953 release_swap_reader(handle); 954 955 return 0; 956 } 957 958 /** 959 * load_image - load the image using the swap map handle 960 * @handle and the snapshot handle @snapshot 961 * (assume there are @nr_pages pages to load) 962 */ 963 964 static int load_image(struct swap_map_handle *handle, 965 struct snapshot_handle *snapshot, 966 unsigned int nr_to_read) 967 { 968 unsigned int m; 969 int ret = 0; 970 struct timeval start; 971 struct timeval stop; 972 struct bio *bio; 973 int err2; 974 unsigned nr_pages; 975 976 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", 977 nr_to_read); 978 m = nr_to_read / 100; 979 if (!m) 980 m = 1; 981 nr_pages = 0; 982 bio = NULL; 983 do_gettimeofday(&start); 984 for ( ; ; ) { 985 ret = snapshot_write_next(snapshot); 986 if (ret <= 0) 987 break; 988 ret = swap_read_page(handle, data_of(*snapshot), &bio); 989 if (ret) 990 break; 991 if (snapshot->sync_read) 992 ret = hib_wait_on_bio_chain(&bio); 993 if (ret) 994 break; 995 if (!(nr_pages % m)) 996 printk("\b\b\b\b%3d%%", nr_pages / m); 997 nr_pages++; 998 } 999 err2 = hib_wait_on_bio_chain(&bio); 1000 do_gettimeofday(&stop); 1001 if (!ret) 1002 ret = err2; 1003 if (!ret) { 1004 printk("\b\b\b\bdone\n"); 1005 snapshot_write_finalize(snapshot); 1006 if (!snapshot_image_loaded(snapshot)) 1007 ret = -ENODATA; 1008 } else 1009 printk("\n"); 1010 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 1011 return ret; 1012 } 1013 1014 /** 1015 * Structure used for LZO data decompression. 1016 */ 1017 struct dec_data { 1018 struct task_struct *thr; /* thread */ 1019 atomic_t ready; /* ready to start flag */ 1020 atomic_t stop; /* ready to stop flag */ 1021 int ret; /* return code */ 1022 wait_queue_head_t go; /* start decompression */ 1023 wait_queue_head_t done; /* decompression done */ 1024 size_t unc_len; /* uncompressed length */ 1025 size_t cmp_len; /* compressed length */ 1026 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ 1027 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ 1028 }; 1029 1030 /** 1031 * Deompression function that runs in its own thread. 1032 */ 1033 static int lzo_decompress_threadfn(void *data) 1034 { 1035 struct dec_data *d = data; 1036 1037 while (1) { 1038 wait_event(d->go, atomic_read(&d->ready) || 1039 kthread_should_stop()); 1040 if (kthread_should_stop()) { 1041 d->thr = NULL; 1042 d->ret = -1; 1043 atomic_set(&d->stop, 1); 1044 wake_up(&d->done); 1045 break; 1046 } 1047 atomic_set(&d->ready, 0); 1048 1049 d->unc_len = LZO_UNC_SIZE; 1050 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, 1051 d->unc, &d->unc_len); 1052 atomic_set(&d->stop, 1); 1053 wake_up(&d->done); 1054 } 1055 return 0; 1056 } 1057 1058 /** 1059 * load_image_lzo - Load compressed image data and decompress them with LZO. 1060 * @handle: Swap map handle to use for loading data. 1061 * @snapshot: Image to copy uncompressed data into. 1062 * @nr_to_read: Number of pages to load. 1063 */ 1064 static int load_image_lzo(struct swap_map_handle *handle, 1065 struct snapshot_handle *snapshot, 1066 unsigned int nr_to_read) 1067 { 1068 unsigned int m; 1069 int ret = 0; 1070 int eof = 0; 1071 struct bio *bio; 1072 struct timeval start; 1073 struct timeval stop; 1074 unsigned nr_pages; 1075 size_t off; 1076 unsigned i, thr, run_threads, nr_threads; 1077 unsigned ring = 0, pg = 0, ring_size = 0, 1078 have = 0, want, need, asked = 0; 1079 unsigned long read_pages = 0; 1080 unsigned char **page = NULL; 1081 struct dec_data *data = NULL; 1082 struct crc_data *crc = NULL; 1083 1084 /* 1085 * We'll limit the number of threads for decompression to limit memory 1086 * footprint. 1087 */ 1088 nr_threads = num_online_cpus() - 1; 1089 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 1090 1091 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); 1092 if (!page) { 1093 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 1094 ret = -ENOMEM; 1095 goto out_clean; 1096 } 1097 1098 data = vmalloc(sizeof(*data) * nr_threads); 1099 if (!data) { 1100 printk(KERN_ERR "PM: Failed to allocate LZO data\n"); 1101 ret = -ENOMEM; 1102 goto out_clean; 1103 } 1104 for (thr = 0; thr < nr_threads; thr++) 1105 memset(&data[thr], 0, offsetof(struct dec_data, go)); 1106 1107 crc = kmalloc(sizeof(*crc), GFP_KERNEL); 1108 if (!crc) { 1109 printk(KERN_ERR "PM: Failed to allocate crc\n"); 1110 ret = -ENOMEM; 1111 goto out_clean; 1112 } 1113 memset(crc, 0, offsetof(struct crc_data, go)); 1114 1115 /* 1116 * Start the decompression threads. 1117 */ 1118 for (thr = 0; thr < nr_threads; thr++) { 1119 init_waitqueue_head(&data[thr].go); 1120 init_waitqueue_head(&data[thr].done); 1121 1122 data[thr].thr = kthread_run(lzo_decompress_threadfn, 1123 &data[thr], 1124 "image_decompress/%u", thr); 1125 if (IS_ERR(data[thr].thr)) { 1126 data[thr].thr = NULL; 1127 printk(KERN_ERR 1128 "PM: Cannot start decompression threads\n"); 1129 ret = -ENOMEM; 1130 goto out_clean; 1131 } 1132 } 1133 1134 /* 1135 * Start the CRC32 thread. 1136 */ 1137 init_waitqueue_head(&crc->go); 1138 init_waitqueue_head(&crc->done); 1139 1140 handle->crc32 = 0; 1141 crc->crc32 = &handle->crc32; 1142 for (thr = 0; thr < nr_threads; thr++) { 1143 crc->unc[thr] = data[thr].unc; 1144 crc->unc_len[thr] = &data[thr].unc_len; 1145 } 1146 1147 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); 1148 if (IS_ERR(crc->thr)) { 1149 crc->thr = NULL; 1150 printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); 1151 ret = -ENOMEM; 1152 goto out_clean; 1153 } 1154 1155 /* 1156 * Set the number of pages for read buffering. 1157 * This is complete guesswork, because we'll only know the real 1158 * picture once prepare_image() is called, which is much later on 1159 * during the image load phase. We'll assume the worst case and 1160 * say that none of the image pages are from high memory. 1161 */ 1162 if (low_free_pages() > snapshot_get_image_size()) 1163 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; 1164 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); 1165 1166 for (i = 0; i < read_pages; i++) { 1167 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? 1168 __GFP_WAIT | __GFP_HIGH : 1169 __GFP_WAIT | __GFP_NOWARN | 1170 __GFP_NORETRY); 1171 1172 if (!page[i]) { 1173 if (i < LZO_CMP_PAGES) { 1174 ring_size = i; 1175 printk(KERN_ERR 1176 "PM: Failed to allocate LZO pages\n"); 1177 ret = -ENOMEM; 1178 goto out_clean; 1179 } else { 1180 break; 1181 } 1182 } 1183 } 1184 want = ring_size = i; 1185 1186 printk(KERN_INFO 1187 "PM: Using %u thread(s) for decompression.\n" 1188 "PM: Loading and decompressing image data (%u pages) ... ", 1189 nr_threads, nr_to_read); 1190 m = nr_to_read / 100; 1191 if (!m) 1192 m = 1; 1193 nr_pages = 0; 1194 bio = NULL; 1195 do_gettimeofday(&start); 1196 1197 ret = snapshot_write_next(snapshot); 1198 if (ret <= 0) 1199 goto out_finish; 1200 1201 for(;;) { 1202 for (i = 0; !eof && i < want; i++) { 1203 ret = swap_read_page(handle, page[ring], &bio); 1204 if (ret) { 1205 /* 1206 * On real read error, finish. On end of data, 1207 * set EOF flag and just exit the read loop. 1208 */ 1209 if (handle->cur && 1210 handle->cur->entries[handle->k]) { 1211 goto out_finish; 1212 } else { 1213 eof = 1; 1214 break; 1215 } 1216 } 1217 if (++ring >= ring_size) 1218 ring = 0; 1219 } 1220 asked += i; 1221 want -= i; 1222 1223 /* 1224 * We are out of data, wait for some more. 1225 */ 1226 if (!have) { 1227 if (!asked) 1228 break; 1229 1230 ret = hib_wait_on_bio_chain(&bio); 1231 if (ret) 1232 goto out_finish; 1233 have += asked; 1234 asked = 0; 1235 if (eof) 1236 eof = 2; 1237 } 1238 1239 if (crc->run_threads) { 1240 wait_event(crc->done, atomic_read(&crc->stop)); 1241 atomic_set(&crc->stop, 0); 1242 crc->run_threads = 0; 1243 } 1244 1245 for (thr = 0; have && thr < nr_threads; thr++) { 1246 data[thr].cmp_len = *(size_t *)page[pg]; 1247 if (unlikely(!data[thr].cmp_len || 1248 data[thr].cmp_len > 1249 lzo1x_worst_compress(LZO_UNC_SIZE))) { 1250 printk(KERN_ERR 1251 "PM: Invalid LZO compressed length\n"); 1252 ret = -1; 1253 goto out_finish; 1254 } 1255 1256 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, 1257 PAGE_SIZE); 1258 if (need > have) { 1259 if (eof > 1) { 1260 ret = -1; 1261 goto out_finish; 1262 } 1263 break; 1264 } 1265 1266 for (off = 0; 1267 off < LZO_HEADER + data[thr].cmp_len; 1268 off += PAGE_SIZE) { 1269 memcpy(data[thr].cmp + off, 1270 page[pg], PAGE_SIZE); 1271 have--; 1272 want++; 1273 if (++pg >= ring_size) 1274 pg = 0; 1275 } 1276 1277 atomic_set(&data[thr].ready, 1); 1278 wake_up(&data[thr].go); 1279 } 1280 1281 /* 1282 * Wait for more data while we are decompressing. 1283 */ 1284 if (have < LZO_CMP_PAGES && asked) { 1285 ret = hib_wait_on_bio_chain(&bio); 1286 if (ret) 1287 goto out_finish; 1288 have += asked; 1289 asked = 0; 1290 if (eof) 1291 eof = 2; 1292 } 1293 1294 for (run_threads = thr, thr = 0; thr < run_threads; thr++) { 1295 wait_event(data[thr].done, 1296 atomic_read(&data[thr].stop)); 1297 atomic_set(&data[thr].stop, 0); 1298 1299 ret = data[thr].ret; 1300 1301 if (ret < 0) { 1302 printk(KERN_ERR 1303 "PM: LZO decompression failed\n"); 1304 goto out_finish; 1305 } 1306 1307 if (unlikely(!data[thr].unc_len || 1308 data[thr].unc_len > LZO_UNC_SIZE || 1309 data[thr].unc_len & (PAGE_SIZE - 1))) { 1310 printk(KERN_ERR 1311 "PM: Invalid LZO uncompressed length\n"); 1312 ret = -1; 1313 goto out_finish; 1314 } 1315 1316 for (off = 0; 1317 off < data[thr].unc_len; off += PAGE_SIZE) { 1318 memcpy(data_of(*snapshot), 1319 data[thr].unc + off, PAGE_SIZE); 1320 1321 if (!(nr_pages % m)) 1322 printk("\b\b\b\b%3d%%", nr_pages / m); 1323 nr_pages++; 1324 1325 ret = snapshot_write_next(snapshot); 1326 if (ret <= 0) { 1327 crc->run_threads = thr + 1; 1328 atomic_set(&crc->ready, 1); 1329 wake_up(&crc->go); 1330 goto out_finish; 1331 } 1332 } 1333 } 1334 1335 crc->run_threads = thr; 1336 atomic_set(&crc->ready, 1); 1337 wake_up(&crc->go); 1338 } 1339 1340 out_finish: 1341 if (crc->run_threads) { 1342 wait_event(crc->done, atomic_read(&crc->stop)); 1343 atomic_set(&crc->stop, 0); 1344 } 1345 do_gettimeofday(&stop); 1346 if (!ret) { 1347 printk("\b\b\b\bdone\n"); 1348 snapshot_write_finalize(snapshot); 1349 if (!snapshot_image_loaded(snapshot)) 1350 ret = -ENODATA; 1351 if (!ret) { 1352 if (swsusp_header->flags & SF_CRC32_MODE) { 1353 if(handle->crc32 != swsusp_header->crc32) { 1354 printk(KERN_ERR 1355 "PM: Invalid image CRC32!\n"); 1356 ret = -ENODATA; 1357 } 1358 } 1359 } 1360 } else 1361 printk("\n"); 1362 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 1363 out_clean: 1364 for (i = 0; i < ring_size; i++) 1365 free_page((unsigned long)page[i]); 1366 if (crc) { 1367 if (crc->thr) 1368 kthread_stop(crc->thr); 1369 kfree(crc); 1370 } 1371 if (data) { 1372 for (thr = 0; thr < nr_threads; thr++) 1373 if (data[thr].thr) 1374 kthread_stop(data[thr].thr); 1375 vfree(data); 1376 } 1377 if (page) vfree(page); 1378 1379 return ret; 1380 } 1381 1382 /** 1383 * swsusp_read - read the hibernation image. 1384 * @flags_p: flags passed by the "frozen" kernel in the image header should 1385 * be written into this memory location 1386 */ 1387 1388 int swsusp_read(unsigned int *flags_p) 1389 { 1390 int error; 1391 struct swap_map_handle handle; 1392 struct snapshot_handle snapshot; 1393 struct swsusp_info *header; 1394 1395 memset(&snapshot, 0, sizeof(struct snapshot_handle)); 1396 error = snapshot_write_next(&snapshot); 1397 if (error < PAGE_SIZE) 1398 return error < 0 ? error : -EFAULT; 1399 header = (struct swsusp_info *)data_of(snapshot); 1400 error = get_swap_reader(&handle, flags_p); 1401 if (error) 1402 goto end; 1403 if (!error) 1404 error = swap_read_page(&handle, header, NULL); 1405 if (!error) { 1406 error = (*flags_p & SF_NOCOMPRESS_MODE) ? 1407 load_image(&handle, &snapshot, header->pages - 1) : 1408 load_image_lzo(&handle, &snapshot, header->pages - 1); 1409 } 1410 swap_reader_finish(&handle); 1411 end: 1412 if (!error) 1413 pr_debug("PM: Image successfully loaded\n"); 1414 else 1415 pr_debug("PM: Error %d resuming\n", error); 1416 return error; 1417 } 1418 1419 /** 1420 * swsusp_check - Check for swsusp signature in the resume device 1421 */ 1422 1423 int swsusp_check(void) 1424 { 1425 int error; 1426 1427 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, 1428 FMODE_READ, NULL); 1429 if (!IS_ERR(hib_resume_bdev)) { 1430 set_blocksize(hib_resume_bdev, PAGE_SIZE); 1431 clear_page(swsusp_header); 1432 error = hib_bio_read_page(swsusp_resume_block, 1433 swsusp_header, NULL); 1434 if (error) 1435 goto put; 1436 1437 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { 1438 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 1439 /* Reset swap signature now */ 1440 error = hib_bio_write_page(swsusp_resume_block, 1441 swsusp_header, NULL); 1442 } else { 1443 error = -EINVAL; 1444 } 1445 1446 put: 1447 if (error) 1448 blkdev_put(hib_resume_bdev, FMODE_READ); 1449 else 1450 pr_debug("PM: Image signature found, resuming\n"); 1451 } else { 1452 error = PTR_ERR(hib_resume_bdev); 1453 } 1454 1455 if (error) 1456 pr_debug("PM: Image not found (code %d)\n", error); 1457 1458 return error; 1459 } 1460 1461 /** 1462 * swsusp_close - close swap device. 1463 */ 1464 1465 void swsusp_close(fmode_t mode) 1466 { 1467 if (IS_ERR(hib_resume_bdev)) { 1468 pr_debug("PM: Image device not initialised\n"); 1469 return; 1470 } 1471 1472 blkdev_put(hib_resume_bdev, mode); 1473 } 1474 1475 static int swsusp_header_init(void) 1476 { 1477 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); 1478 if (!swsusp_header) 1479 panic("Could not allocate memory for swsusp_header\n"); 1480 return 0; 1481 } 1482 1483 core_initcall(swsusp_header_init); 1484