swap.c (020abf03cd659388f94cb328e1e1df0656e0d7ff) | swap.c (081a9d043c983f161b78fdc4671324d1342b86bc) |
---|---|
1/* 2 * linux/kernel/power/swap.c 3 * 4 * This file provides functions for reading the suspend image from 5 * and writing it to a swap partition. 6 * 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> --- 13 unchanged lines hidden (view full) --- 22#include <linux/bio.h> 23#include <linux/blkdev.h> 24#include <linux/swap.h> 25#include <linux/swapops.h> 26#include <linux/pm.h> 27#include <linux/slab.h> 28#include <linux/lzo.h> 29#include <linux/vmalloc.h> | 1/* 2 * linux/kernel/power/swap.c 3 * 4 * This file provides functions for reading the suspend image from 5 * and writing it to a swap partition. 6 * 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> --- 13 unchanged lines hidden (view full) --- 22#include <linux/bio.h> 23#include <linux/blkdev.h> 24#include <linux/swap.h> 25#include <linux/swapops.h> 26#include <linux/pm.h> 27#include <linux/slab.h> 28#include <linux/lzo.h> 29#include <linux/vmalloc.h> |
30#include <linux/cpumask.h> 31#include <linux/atomic.h> 32#include <linux/kthread.h> 33#include <linux/crc32.h> |
|
30 31#include "power.h" 32 33#define HIBERNATE_SIG "S1SUSPEND" 34 35/* 36 * The swap map is a data structure used for keeping track of each page 37 * written to a swap partition. It consists of many swap_map_page 38 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 39 * These structures are stored on the swap and linked together with the 40 * help of the .next_swap member. 41 * 42 * The swap map is created during suspend. The swap map pages are 43 * allocated and populated one at a time, so we only need one memory 44 * page to set up the entire structure. 45 * | 34 35#include "power.h" 36 37#define HIBERNATE_SIG "S1SUSPEND" 38 39/* 40 * The swap map is a data structure used for keeping track of each page 41 * written to a swap partition. It consists of many swap_map_page 42 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 43 * These structures are stored on the swap and linked together with the 44 * help of the .next_swap member. 45 * 46 * The swap map is created during suspend. The swap map pages are 47 * allocated and populated one at a time, so we only need one memory 48 * page to set up the entire structure. 49 * |
46 * During resume we also only need to use one swap_map_page structure 47 * at a time. | 50 * During resume we pick up all swap_map_page structures into a list. |
48 */ 49 50#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 51 52struct swap_map_page { 53 sector_t entries[MAP_PAGE_ENTRIES]; 54 sector_t next_swap; 55}; 56 | 51 */ 52 53#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 54 55struct swap_map_page { 56 sector_t entries[MAP_PAGE_ENTRIES]; 57 sector_t next_swap; 58}; 59 |
60struct swap_map_page_list { 61 struct swap_map_page *map; 62 struct swap_map_page_list *next; 63}; 64 |
|
57/** 58 * The swap_map_handle structure is used for handling swap in 59 * a file-alike way 60 */ 61 62struct swap_map_handle { 63 struct swap_map_page *cur; | 65/** 66 * The swap_map_handle structure is used for handling swap in 67 * a file-alike way 68 */ 69 70struct swap_map_handle { 71 struct swap_map_page *cur; |
72 struct swap_map_page_list *maps; |
|
64 sector_t cur_swap; 65 sector_t first_sector; 66 unsigned int k; | 73 sector_t cur_swap; 74 sector_t first_sector; 75 unsigned int k; |
76 unsigned long nr_free_pages, written; 77 u32 crc32; |
|
67}; 68 69struct swsusp_header { | 78}; 79 80struct swsusp_header { |
70 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)]; | 81 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - 82 sizeof(u32)]; 83 u32 crc32; |
71 sector_t image; 72 unsigned int flags; /* Flags to pass to the "boot" kernel */ 73 char orig_sig[10]; 74 char sig[10]; 75} __attribute__((packed)); 76 77static struct swsusp_header *swsusp_header; 78 --- 115 unchanged lines hidden (view full) --- 194 195 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); 196 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 197 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { 198 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); 199 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); 200 swsusp_header->image = handle->first_sector; 201 swsusp_header->flags = flags; | 84 sector_t image; 85 unsigned int flags; /* Flags to pass to the "boot" kernel */ 86 char orig_sig[10]; 87 char sig[10]; 88} __attribute__((packed)); 89 90static struct swsusp_header *swsusp_header; 91 --- 115 unchanged lines hidden (view full) --- 207 208 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); 209 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 210 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { 211 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); 212 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); 213 swsusp_header->image = handle->first_sector; 214 swsusp_header->flags = flags; |
215 if (flags & SF_CRC32_MODE) 216 swsusp_header->crc32 = handle->crc32; |
|
202 error = hib_bio_write_page(swsusp_resume_block, 203 swsusp_header, NULL); 204 } else { 205 printk(KERN_ERR "PM: Swap header not found!\n"); 206 error = -ENODEV; 207 } 208 return error; 209} --- 30 unchanged lines hidden (view full) --- 240 * @buf: Address we're writing. 241 * @offset: Offset of the swap page we're writing to. 242 * @bio_chain: Link the next write BIO here 243 */ 244 245static int write_page(void *buf, sector_t offset, struct bio **bio_chain) 246{ 247 void *src; | 217 error = hib_bio_write_page(swsusp_resume_block, 218 swsusp_header, NULL); 219 } else { 220 printk(KERN_ERR "PM: Swap header not found!\n"); 221 error = -ENODEV; 222 } 223 return error; 224} --- 30 unchanged lines hidden (view full) --- 255 * @buf: Address we're writing. 256 * @offset: Offset of the swap page we're writing to. 257 * @bio_chain: Link the next write BIO here 258 */ 259 260static int write_page(void *buf, sector_t offset, struct bio **bio_chain) 261{ 262 void *src; |
263 int ret; |
|
248 249 if (!offset) 250 return -ENOSPC; 251 252 if (bio_chain) { 253 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 254 if (src) { 255 copy_page(src, buf); 256 } else { | 264 265 if (!offset) 266 return -ENOSPC; 267 268 if (bio_chain) { 269 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 270 if (src) { 271 copy_page(src, buf); 272 } else { |
257 WARN_ON_ONCE(1); 258 bio_chain = NULL; /* Go synchronous */ 259 src = buf; | 273 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ 274 if (ret) 275 return ret; 276 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 277 if (src) { 278 copy_page(src, buf); 279 } else { 280 WARN_ON_ONCE(1); 281 bio_chain = NULL; /* Go synchronous */ 282 src = buf; 283 } |
260 } 261 } else { 262 src = buf; 263 } 264 return hib_bio_write_page(offset, src, bio_chain); 265} 266 267static void release_swap_writer(struct swap_map_handle *handle) --- 20 unchanged lines hidden (view full) --- 288 goto err_close; 289 } 290 handle->cur_swap = alloc_swapdev_block(root_swap); 291 if (!handle->cur_swap) { 292 ret = -ENOSPC; 293 goto err_rel; 294 } 295 handle->k = 0; | 284 } 285 } else { 286 src = buf; 287 } 288 return hib_bio_write_page(offset, src, bio_chain); 289} 290 291static void release_swap_writer(struct swap_map_handle *handle) --- 20 unchanged lines hidden (view full) --- 312 goto err_close; 313 } 314 handle->cur_swap = alloc_swapdev_block(root_swap); 315 if (!handle->cur_swap) { 316 ret = -ENOSPC; 317 goto err_rel; 318 } 319 handle->k = 0; |
320 handle->nr_free_pages = nr_free_pages() >> 1; 321 handle->written = 0; |
|
296 handle->first_sector = handle->cur_swap; 297 return 0; 298err_rel: 299 release_swap_writer(handle); 300err_close: 301 swsusp_close(FMODE_WRITE); 302 return ret; 303} --- 7 unchanged lines hidden (view full) --- 311 if (!handle->cur) 312 return -EINVAL; 313 offset = alloc_swapdev_block(root_swap); 314 error = write_page(buf, offset, bio_chain); 315 if (error) 316 return error; 317 handle->cur->entries[handle->k++] = offset; 318 if (handle->k >= MAP_PAGE_ENTRIES) { | 322 handle->first_sector = handle->cur_swap; 323 return 0; 324err_rel: 325 release_swap_writer(handle); 326err_close: 327 swsusp_close(FMODE_WRITE); 328 return ret; 329} --- 7 unchanged lines hidden (view full) --- 337 if (!handle->cur) 338 return -EINVAL; 339 offset = alloc_swapdev_block(root_swap); 340 error = write_page(buf, offset, bio_chain); 341 if (error) 342 return error; 343 handle->cur->entries[handle->k++] = offset; 344 if (handle->k >= MAP_PAGE_ENTRIES) { |
319 error = hib_wait_on_bio_chain(bio_chain); 320 if (error) 321 goto out; | |
322 offset = alloc_swapdev_block(root_swap); 323 if (!offset) 324 return -ENOSPC; 325 handle->cur->next_swap = offset; | 345 offset = alloc_swapdev_block(root_swap); 346 if (!offset) 347 return -ENOSPC; 348 handle->cur->next_swap = offset; |
326 error = write_page(handle->cur, handle->cur_swap, NULL); | 349 error = write_page(handle->cur, handle->cur_swap, bio_chain); |
327 if (error) 328 goto out; 329 clear_page(handle->cur); 330 handle->cur_swap = offset; 331 handle->k = 0; 332 } | 350 if (error) 351 goto out; 352 clear_page(handle->cur); 353 handle->cur_swap = offset; 354 handle->k = 0; 355 } |
356 if (bio_chain && ++handle->written > handle->nr_free_pages) { 357 error = hib_wait_on_bio_chain(bio_chain); 358 if (error) 359 goto out; 360 handle->written = 0; 361 } |
|
333 out: 334 return error; 335} 336 337static int flush_swap_writer(struct swap_map_handle *handle) 338{ 339 if (handle->cur && handle->cur_swap) 340 return write_page(handle->cur, handle->cur_swap, NULL); --- 26 unchanged lines hidden (view full) --- 367#define LZO_UNC_PAGES 32 368#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) 369 370/* Number of pages/bytes we need for compressed data (worst case). */ 371#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ 372 LZO_HEADER, PAGE_SIZE) 373#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) 374 | 362 out: 363 return error; 364} 365 366static int flush_swap_writer(struct swap_map_handle *handle) 367{ 368 if (handle->cur && handle->cur_swap) 369 return write_page(handle->cur, handle->cur_swap, NULL); --- 26 unchanged lines hidden (view full) --- 396#define LZO_UNC_PAGES 32 397#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) 398 399/* Number of pages/bytes we need for compressed data (worst case). */ 400#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ 401 LZO_HEADER, PAGE_SIZE) 402#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) 403 |
404/* Maximum number of threads for compression/decompression. */ 405#define LZO_THREADS 3 406 407/* Maximum number of pages for read buffering. */ 408#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) 409 410 |
|
375/** 376 * save_image - save the suspend image data 377 */ 378 379static int save_image(struct swap_map_handle *handle, 380 struct snapshot_handle *snapshot, 381 unsigned int nr_to_write) 382{ --- 31 unchanged lines hidden (view full) --- 414 if (!ret) 415 printk(KERN_CONT "\b\b\b\bdone\n"); 416 else 417 printk(KERN_CONT "\n"); 418 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 419 return ret; 420} 421 | 411/** 412 * save_image - save the suspend image data 413 */ 414 415static int save_image(struct swap_map_handle *handle, 416 struct snapshot_handle *snapshot, 417 unsigned int nr_to_write) 418{ --- 31 unchanged lines hidden (view full) --- 450 if (!ret) 451 printk(KERN_CONT "\b\b\b\bdone\n"); 452 else 453 printk(KERN_CONT "\n"); 454 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 455 return ret; 456} 457 |
458/** 459 * Structure used for CRC32. 460 */ 461struct crc_data { 462 struct task_struct *thr; /* thread */ 463 atomic_t ready; /* ready to start flag */ 464 atomic_t stop; /* ready to stop flag */ 465 unsigned run_threads; /* nr current threads */ 466 wait_queue_head_t go; /* start crc update */ 467 wait_queue_head_t done; /* crc update done */ 468 u32 *crc32; /* points to handle's crc32 */ 469 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ 470 unsigned char *unc[LZO_THREADS]; /* uncompressed data */ 471}; |
|
422 423/** | 472 473/** |
474 * CRC32 update function that runs in its own thread. 475 */ 476static int crc32_threadfn(void *data) 477{ 478 struct crc_data *d = data; 479 unsigned i; 480 481 while (1) { 482 wait_event(d->go, atomic_read(&d->ready) || 483 kthread_should_stop()); 484 if (kthread_should_stop()) { 485 d->thr = NULL; 486 atomic_set(&d->stop, 1); 487 wake_up(&d->done); 488 break; 489 } 490 atomic_set(&d->ready, 0); 491 492 for (i = 0; i < d->run_threads; i++) 493 *d->crc32 = crc32_le(*d->crc32, 494 d->unc[i], *d->unc_len[i]); 495 atomic_set(&d->stop, 1); 496 wake_up(&d->done); 497 } 498 return 0; 499} 500/** 501 * Structure used for LZO data compression. 502 */ 503struct cmp_data { 504 struct task_struct *thr; /* thread */ 505 atomic_t ready; /* ready to start flag */ 506 atomic_t stop; /* ready to stop flag */ 507 int ret; /* return code */ 508 wait_queue_head_t go; /* start compression */ 509 wait_queue_head_t done; /* compression done */ 510 size_t unc_len; /* uncompressed length */ 511 size_t cmp_len; /* compressed length */ 512 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ 513 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ 514 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ 515}; 516 517/** 518 * Compression function that runs in its own thread. 519 */ 520static int lzo_compress_threadfn(void *data) 521{ 522 struct cmp_data *d = data; 523 524 while (1) { 525 wait_event(d->go, atomic_read(&d->ready) || 526 kthread_should_stop()); 527 if (kthread_should_stop()) { 528 d->thr = NULL; 529 d->ret = -1; 530 atomic_set(&d->stop, 1); 531 wake_up(&d->done); 532 break; 533 } 534 atomic_set(&d->ready, 0); 535 536 d->ret = lzo1x_1_compress(d->unc, d->unc_len, 537 d->cmp + LZO_HEADER, &d->cmp_len, 538 d->wrk); 539 atomic_set(&d->stop, 1); 540 wake_up(&d->done); 541 } 542 return 0; 543} 544 545/** |
|
424 * save_image_lzo - Save the suspend image data compressed with LZO. 425 * @handle: Swap mam handle to use for saving the image. 426 * @snapshot: Image to read data from. 427 * @nr_to_write: Number of pages to save. 428 */ 429static int save_image_lzo(struct swap_map_handle *handle, 430 struct snapshot_handle *snapshot, 431 unsigned int nr_to_write) 432{ 433 unsigned int m; 434 int ret = 0; 435 int nr_pages; 436 int err2; 437 struct bio *bio; 438 struct timeval start; 439 struct timeval stop; | 546 * save_image_lzo - Save the suspend image data compressed with LZO. 547 * @handle: Swap mam handle to use for saving the image. 548 * @snapshot: Image to read data from. 549 * @nr_to_write: Number of pages to save. 550 */ 551static int save_image_lzo(struct swap_map_handle *handle, 552 struct snapshot_handle *snapshot, 553 unsigned int nr_to_write) 554{ 555 unsigned int m; 556 int ret = 0; 557 int nr_pages; 558 int err2; 559 struct bio *bio; 560 struct timeval start; 561 struct timeval stop; |
440 size_t off, unc_len, cmp_len; 441 unsigned char *unc, *cmp, *wrk, *page; | 562 size_t off; 563 unsigned thr, run_threads, nr_threads; 564 unsigned char *page = NULL; 565 struct cmp_data *data = NULL; 566 struct crc_data *crc = NULL; |
442 | 567 |
568 /* 569 * We'll limit the number of threads for compression to limit memory 570 * footprint. 571 */ 572 nr_threads = num_online_cpus() - 1; 573 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 574 |
|
443 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 444 if (!page) { 445 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 575 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 576 if (!page) { 577 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
446 return -ENOMEM; | 578 ret = -ENOMEM; 579 goto out_clean; |
447 } 448 | 580 } 581 |
449 wrk = vmalloc(LZO1X_1_MEM_COMPRESS); 450 if (!wrk) { 451 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); 452 free_page((unsigned long)page); 453 return -ENOMEM; | 582 data = vmalloc(sizeof(*data) * nr_threads); 583 if (!data) { 584 printk(KERN_ERR "PM: Failed to allocate LZO data\n"); 585 ret = -ENOMEM; 586 goto out_clean; |
454 } | 587 } |
588 for (thr = 0; thr < nr_threads; thr++) 589 memset(&data[thr], 0, offsetof(struct cmp_data, go)); |
|
455 | 590 |
456 unc = vmalloc(LZO_UNC_SIZE); 457 if (!unc) { 458 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 459 vfree(wrk); 460 free_page((unsigned long)page); 461 return -ENOMEM; | 591 crc = kmalloc(sizeof(*crc), GFP_KERNEL); 592 if (!crc) { 593 printk(KERN_ERR "PM: Failed to allocate crc\n"); 594 ret = -ENOMEM; 595 goto out_clean; |
462 } | 596 } |
597 memset(crc, 0, offsetof(struct crc_data, go)); |
|
463 | 598 |
464 cmp = vmalloc(LZO_CMP_SIZE); 465 if (!cmp) { 466 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 467 vfree(unc); 468 vfree(wrk); 469 free_page((unsigned long)page); 470 return -ENOMEM; | 599 /* 600 * Start the compression threads. 601 */ 602 for (thr = 0; thr < nr_threads; thr++) { 603 init_waitqueue_head(&data[thr].go); 604 init_waitqueue_head(&data[thr].done); 605 606 data[thr].thr = kthread_run(lzo_compress_threadfn, 607 &data[thr], 608 "image_compress/%u", thr); 609 if (IS_ERR(data[thr].thr)) { 610 data[thr].thr = NULL; 611 printk(KERN_ERR 612 "PM: Cannot start compression threads\n"); 613 ret = -ENOMEM; 614 goto out_clean; 615 } |
471 } 472 | 616 } 617 |
618 /* 619 * Adjust number of free pages after all allocations have been done. 620 * We don't want to run out of pages when writing. 621 */ 622 handle->nr_free_pages = nr_free_pages() >> 1; 623 624 /* 625 * Start the CRC32 thread. 626 */ 627 init_waitqueue_head(&crc->go); 628 init_waitqueue_head(&crc->done); 629 630 handle->crc32 = 0; 631 crc->crc32 = &handle->crc32; 632 for (thr = 0; thr < nr_threads; thr++) { 633 crc->unc[thr] = data[thr].unc; 634 crc->unc_len[thr] = &data[thr].unc_len; 635 } 636 637 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); 638 if (IS_ERR(crc->thr)) { 639 crc->thr = NULL; 640 printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); 641 ret = -ENOMEM; 642 goto out_clean; 643 } 644 |
|
473 printk(KERN_INFO | 645 printk(KERN_INFO |
646 "PM: Using %u thread(s) for compression.\n" |
|
474 "PM: Compressing and saving image data (%u pages) ... ", | 647 "PM: Compressing and saving image data (%u pages) ... ", |
475 nr_to_write); | 648 nr_threads, nr_to_write); |
476 m = nr_to_write / 100; 477 if (!m) 478 m = 1; 479 nr_pages = 0; 480 bio = NULL; 481 do_gettimeofday(&start); 482 for (;;) { | 649 m = nr_to_write / 100; 650 if (!m) 651 m = 1; 652 nr_pages = 0; 653 bio = NULL; 654 do_gettimeofday(&start); 655 for (;;) { |
483 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { 484 ret = snapshot_read_next(snapshot); 485 if (ret < 0) 486 goto out_finish; | 656 for (thr = 0; thr < nr_threads; thr++) { 657 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { 658 ret = snapshot_read_next(snapshot); 659 if (ret < 0) 660 goto out_finish; |
487 | 661 |
488 if (!ret) | 662 if (!ret) 663 break; 664 665 memcpy(data[thr].unc + off, 666 data_of(*snapshot), PAGE_SIZE); 667 668 if (!(nr_pages % m)) 669 printk(KERN_CONT "\b\b\b\b%3d%%", 670 nr_pages / m); 671 nr_pages++; 672 } 673 if (!off) |
489 break; 490 | 674 break; 675 |
491 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); | 676 data[thr].unc_len = off; |
492 | 677 |
493 if (!(nr_pages % m)) 494 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); 495 nr_pages++; | 678 atomic_set(&data[thr].ready, 1); 679 wake_up(&data[thr].go); |
496 } 497 | 680 } 681 |
498 if (!off) | 682 if (!thr) |
499 break; 500 | 683 break; 684 |
501 unc_len = off; 502 ret = lzo1x_1_compress(unc, unc_len, 503 cmp + LZO_HEADER, &cmp_len, wrk); 504 if (ret < 0) { 505 printk(KERN_ERR "PM: LZO compression failed\n"); 506 break; 507 } | 685 crc->run_threads = thr; 686 atomic_set(&crc->ready, 1); 687 wake_up(&crc->go); |
508 | 688 |
509 if (unlikely(!cmp_len || 510 cmp_len > lzo1x_worst_compress(unc_len))) { 511 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 512 ret = -1; 513 break; 514 } | 689 for (run_threads = thr, thr = 0; thr < run_threads; thr++) { 690 wait_event(data[thr].done, 691 atomic_read(&data[thr].stop)); 692 atomic_set(&data[thr].stop, 0); |
515 | 693 |
516 *(size_t *)cmp = cmp_len; | 694 ret = data[thr].ret; |
517 | 695 |
518 /* 519 * Given we are writing one page at a time to disk, we copy 520 * that much from the buffer, although the last bit will likely 521 * be smaller than full page. This is OK - we saved the length 522 * of the compressed data, so any garbage at the end will be 523 * discarded when we read it. 524 */ 525 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { 526 memcpy(page, cmp + off, PAGE_SIZE); | 696 if (ret < 0) { 697 printk(KERN_ERR "PM: LZO compression failed\n"); 698 goto out_finish; 699 } |
527 | 700 |
528 ret = swap_write_page(handle, page, &bio); 529 if (ret) | 701 if (unlikely(!data[thr].cmp_len || 702 data[thr].cmp_len > 703 lzo1x_worst_compress(data[thr].unc_len))) { 704 printk(KERN_ERR 705 "PM: Invalid LZO compressed length\n"); 706 ret = -1; |
530 goto out_finish; | 707 goto out_finish; |
708 } 709 710 *(size_t *)data[thr].cmp = data[thr].cmp_len; 711 712 /* 713 * Given we are writing one page at a time to disk, we 714 * copy that much from the buffer, although the last 715 * bit will likely be smaller than full page. This is 716 * OK - we saved the length of the compressed data, so 717 * any garbage at the end will be discarded when we 718 * read it. 719 */ 720 for (off = 0; 721 off < LZO_HEADER + data[thr].cmp_len; 722 off += PAGE_SIZE) { 723 memcpy(page, data[thr].cmp + off, PAGE_SIZE); 724 725 ret = swap_write_page(handle, page, &bio); 726 if (ret) 727 goto out_finish; 728 } |
|
531 } | 729 } |
730 731 wait_event(crc->done, atomic_read(&crc->stop)); 732 atomic_set(&crc->stop, 0); |
|
532 } 533 534out_finish: 535 err2 = hib_wait_on_bio_chain(&bio); 536 do_gettimeofday(&stop); 537 if (!ret) 538 ret = err2; | 733 } 734 735out_finish: 736 err2 = hib_wait_on_bio_chain(&bio); 737 do_gettimeofday(&stop); 738 if (!ret) 739 ret = err2; |
539 if (!ret) | 740 if (!ret) { |
540 printk(KERN_CONT "\b\b\b\bdone\n"); | 741 printk(KERN_CONT "\b\b\b\bdone\n"); |
541 else | 742 } else { |
542 printk(KERN_CONT "\n"); | 743 printk(KERN_CONT "\n"); |
744 } |
|
543 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 745 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
746out_clean: 747 if (crc) { 748 if (crc->thr) 749 kthread_stop(crc->thr); 750 kfree(crc); 751 } 752 if (data) { 753 for (thr = 0; thr < nr_threads; thr++) 754 if (data[thr].thr) 755 kthread_stop(data[thr].thr); 756 vfree(data); 757 } 758 if (page) free_page((unsigned long)page); |
|
544 | 759 |
545 vfree(cmp); 546 vfree(unc); 547 vfree(wrk); 548 free_page((unsigned long)page); 549 | |
550 return ret; 551} 552 553/** 554 * enough_swap - Make sure we have enough swap to save the image. 555 * 556 * Returns TRUE or FALSE after checking the total amount of swap 557 * space avaiable from the resume partition. --- 62 unchanged lines hidden (view full) --- 620 621/** 622 * The following functions allow us to read data using a swap map 623 * in a file-alike way 624 */ 625 626static void release_swap_reader(struct swap_map_handle *handle) 627{ | 760 return ret; 761} 762 763/** 764 * enough_swap - Make sure we have enough swap to save the image. 765 * 766 * Returns TRUE or FALSE after checking the total amount of swap 767 * space avaiable from the resume partition. --- 62 unchanged lines hidden (view full) --- 830 831/** 832 * The following functions allow us to read data using a swap map 833 * in a file-alike way 834 */ 835 836static void release_swap_reader(struct swap_map_handle *handle) 837{ |
628 if (handle->cur) 629 free_page((unsigned long)handle->cur); | 838 struct swap_map_page_list *tmp; 839 840 while (handle->maps) { 841 if (handle->maps->map) 842 free_page((unsigned long)handle->maps->map); 843 tmp = handle->maps; 844 handle->maps = handle->maps->next; 845 kfree(tmp); 846 } |
630 handle->cur = NULL; 631} 632 633static int get_swap_reader(struct swap_map_handle *handle, 634 unsigned int *flags_p) 635{ 636 int error; | 847 handle->cur = NULL; 848} 849 850static int get_swap_reader(struct swap_map_handle *handle, 851 unsigned int *flags_p) 852{ 853 int error; |
854 struct swap_map_page_list *tmp, *last; 855 sector_t offset; |
|
637 638 *flags_p = swsusp_header->flags; 639 640 if (!swsusp_header->image) /* how can this happen? */ 641 return -EINVAL; 642 | 856 857 *flags_p = swsusp_header->flags; 858 859 if (!swsusp_header->image) /* how can this happen? */ 860 return -EINVAL; 861 |
643 handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); 644 if (!handle->cur) 645 return -ENOMEM; | 862 handle->cur = NULL; 863 last = handle->maps = NULL; 864 offset = swsusp_header->image; 865 while (offset) { 866 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); 867 if (!tmp) { 868 release_swap_reader(handle); 869 return -ENOMEM; 870 } 871 memset(tmp, 0, sizeof(*tmp)); 872 if (!handle->maps) 873 handle->maps = tmp; 874 if (last) 875 last->next = tmp; 876 last = tmp; |
646 | 877 |
647 error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL); 648 if (error) { 649 release_swap_reader(handle); 650 return error; | 878 tmp->map = (struct swap_map_page *) 879 __get_free_page(__GFP_WAIT | __GFP_HIGH); 880 if (!tmp->map) { 881 release_swap_reader(handle); 882 return -ENOMEM; 883 } 884 885 error = hib_bio_read_page(offset, tmp->map, NULL); 886 if (error) { 887 release_swap_reader(handle); 888 return error; 889 } 890 offset = tmp->map->next_swap; |
651 } 652 handle->k = 0; | 891 } 892 handle->k = 0; |
893 handle->cur = handle->maps->map; |
|
653 return 0; 654} 655 656static int swap_read_page(struct swap_map_handle *handle, void *buf, 657 struct bio **bio_chain) 658{ 659 sector_t offset; 660 int error; | 894 return 0; 895} 896 897static int swap_read_page(struct swap_map_handle *handle, void *buf, 898 struct bio **bio_chain) 899{ 900 sector_t offset; 901 int error; |
902 struct swap_map_page_list *tmp; |
|
661 662 if (!handle->cur) 663 return -EINVAL; 664 offset = handle->cur->entries[handle->k]; 665 if (!offset) 666 return -EFAULT; 667 error = hib_bio_read_page(offset, buf, bio_chain); 668 if (error) 669 return error; 670 if (++handle->k >= MAP_PAGE_ENTRIES) { | 903 904 if (!handle->cur) 905 return -EINVAL; 906 offset = handle->cur->entries[handle->k]; 907 if (!offset) 908 return -EFAULT; 909 error = hib_bio_read_page(offset, buf, bio_chain); 910 if (error) 911 return error; 912 if (++handle->k >= MAP_PAGE_ENTRIES) { |
671 error = hib_wait_on_bio_chain(bio_chain); | |
672 handle->k = 0; | 913 handle->k = 0; |
673 offset = handle->cur->next_swap; 674 if (!offset) | 914 free_page((unsigned long)handle->maps->map); 915 tmp = handle->maps; 916 handle->maps = handle->maps->next; 917 kfree(tmp); 918 if (!handle->maps) |
675 release_swap_reader(handle); | 919 release_swap_reader(handle); |
676 else if (!error) 677 error = hib_bio_read_page(offset, handle->cur, NULL); | 920 else 921 handle->cur = handle->maps->map; |
678 } 679 return error; 680} 681 682static int swap_reader_finish(struct swap_map_handle *handle) 683{ 684 release_swap_reader(handle); 685 --- 6 unchanged lines hidden (view full) --- 692 * (assume there are @nr_pages pages to load) 693 */ 694 695static int load_image(struct swap_map_handle *handle, 696 struct snapshot_handle *snapshot, 697 unsigned int nr_to_read) 698{ 699 unsigned int m; | 922 } 923 return error; 924} 925 926static int swap_reader_finish(struct swap_map_handle *handle) 927{ 928 release_swap_reader(handle); 929 --- 6 unchanged lines hidden (view full) --- 936 * (assume there are @nr_pages pages to load) 937 */ 938 939static int load_image(struct swap_map_handle *handle, 940 struct snapshot_handle *snapshot, 941 unsigned int nr_to_read) 942{ 943 unsigned int m; |
700 int error = 0; | 944 int ret = 0; |
701 struct timeval start; 702 struct timeval stop; 703 struct bio *bio; 704 int err2; 705 unsigned nr_pages; 706 707 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", 708 nr_to_read); 709 m = nr_to_read / 100; 710 if (!m) 711 m = 1; 712 nr_pages = 0; 713 bio = NULL; 714 do_gettimeofday(&start); 715 for ( ; ; ) { | 945 struct timeval start; 946 struct timeval stop; 947 struct bio *bio; 948 int err2; 949 unsigned nr_pages; 950 951 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", 952 nr_to_read); 953 m = nr_to_read / 100; 954 if (!m) 955 m = 1; 956 nr_pages = 0; 957 bio = NULL; 958 do_gettimeofday(&start); 959 for ( ; ; ) { |
716 error = snapshot_write_next(snapshot); 717 if (error <= 0) | 960 ret = snapshot_write_next(snapshot); 961 if (ret <= 0) |
718 break; | 962 break; |
719 error = swap_read_page(handle, data_of(*snapshot), &bio); 720 if (error) | 963 ret = swap_read_page(handle, data_of(*snapshot), &bio); 964 if (ret) |
721 break; 722 if (snapshot->sync_read) | 965 break; 966 if (snapshot->sync_read) |
723 error = hib_wait_on_bio_chain(&bio); 724 if (error) | 967 ret = hib_wait_on_bio_chain(&bio); 968 if (ret) |
725 break; 726 if (!(nr_pages % m)) 727 printk("\b\b\b\b%3d%%", nr_pages / m); 728 nr_pages++; 729 } 730 err2 = hib_wait_on_bio_chain(&bio); 731 do_gettimeofday(&stop); | 969 break; 970 if (!(nr_pages % m)) 971 printk("\b\b\b\b%3d%%", nr_pages / m); 972 nr_pages++; 973 } 974 err2 = hib_wait_on_bio_chain(&bio); 975 do_gettimeofday(&stop); |
732 if (!error) 733 error = err2; 734 if (!error) { | 976 if (!ret) 977 ret = err2; 978 if (!ret) { |
735 printk("\b\b\b\bdone\n"); 736 snapshot_write_finalize(snapshot); 737 if (!snapshot_image_loaded(snapshot)) | 979 printk("\b\b\b\bdone\n"); 980 snapshot_write_finalize(snapshot); 981 if (!snapshot_image_loaded(snapshot)) |
738 error = -ENODATA; | 982 ret = -ENODATA; |
739 } else 740 printk("\n"); 741 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 983 } else 984 printk("\n"); 985 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
742 return error; | 986 return ret; |
743} 744 745/** | 987} 988 989/** |
990 * Structure used for LZO data decompression. 991 */ 992struct dec_data { 993 struct task_struct *thr; /* thread */ 994 atomic_t ready; /* ready to start flag */ 995 atomic_t stop; /* ready to stop flag */ 996 int ret; /* return code */ 997 wait_queue_head_t go; /* start decompression */ 998 wait_queue_head_t done; /* decompression done */ 999 size_t unc_len; /* uncompressed length */ 1000 size_t cmp_len; /* compressed length */ 1001 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ 1002 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ 1003}; 1004 1005/** 1006 * Deompression function that runs in its own thread. 1007 */ 1008static int lzo_decompress_threadfn(void *data) 1009{ 1010 struct dec_data *d = data; 1011 1012 while (1) { 1013 wait_event(d->go, atomic_read(&d->ready) || 1014 kthread_should_stop()); 1015 if (kthread_should_stop()) { 1016 d->thr = NULL; 1017 d->ret = -1; 1018 atomic_set(&d->stop, 1); 1019 wake_up(&d->done); 1020 break; 1021 } 1022 atomic_set(&d->ready, 0); 1023 1024 d->unc_len = LZO_UNC_SIZE; 1025 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, 1026 d->unc, &d->unc_len); 1027 atomic_set(&d->stop, 1); 1028 wake_up(&d->done); 1029 } 1030 return 0; 1031} 1032 1033/** |
|
746 * load_image_lzo - Load compressed image data and decompress them with LZO. 747 * @handle: Swap map handle to use for loading data. 748 * @snapshot: Image to copy uncompressed data into. 749 * @nr_to_read: Number of pages to load. 750 */ 751static int load_image_lzo(struct swap_map_handle *handle, 752 struct snapshot_handle *snapshot, 753 unsigned int nr_to_read) 754{ 755 unsigned int m; | 1034 * load_image_lzo - Load compressed image data and decompress them with LZO. 1035 * @handle: Swap map handle to use for loading data. 1036 * @snapshot: Image to copy uncompressed data into. 1037 * @nr_to_read: Number of pages to load. 1038 */ 1039static int load_image_lzo(struct swap_map_handle *handle, 1040 struct snapshot_handle *snapshot, 1041 unsigned int nr_to_read) 1042{ 1043 unsigned int m; |
756 int error = 0; | 1044 int ret = 0; 1045 int eof = 0; |
757 struct bio *bio; 758 struct timeval start; 759 struct timeval stop; 760 unsigned nr_pages; | 1046 struct bio *bio; 1047 struct timeval start; 1048 struct timeval stop; 1049 unsigned nr_pages; |
761 size_t i, off, unc_len, cmp_len; 762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES]; | 1050 size_t off; 1051 unsigned i, thr, run_threads, nr_threads; 1052 unsigned ring = 0, pg = 0, ring_size = 0, 1053 have = 0, want, need, asked = 0; 1054 unsigned long read_pages; 1055 unsigned char **page = NULL; 1056 struct dec_data *data = NULL; 1057 struct crc_data *crc = NULL; |
763 | 1058 |
764 for (i = 0; i < LZO_CMP_PAGES; i++) { 765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 766 if (!page[i]) { 767 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1059 /* 1060 * We'll limit the number of threads for decompression to limit memory 1061 * footprint. 1062 */ 1063 nr_threads = num_online_cpus() - 1; 1064 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
768 | 1065 |
769 while (i) 770 free_page((unsigned long)page[--i]); | 1066 page = vmalloc(sizeof(*page) * LZO_READ_PAGES); 1067 if (!page) { 1068 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 1069 ret = -ENOMEM; 1070 goto out_clean; 1071 } |
771 | 1072 |
772 return -ENOMEM; 773 } | 1073 data = vmalloc(sizeof(*data) * nr_threads); 1074 if (!data) { 1075 printk(KERN_ERR "PM: Failed to allocate LZO data\n"); 1076 ret = -ENOMEM; 1077 goto out_clean; |
774 } | 1078 } |
1079 for (thr = 0; thr < nr_threads; thr++) 1080 memset(&data[thr], 0, offsetof(struct dec_data, go)); |
|
775 | 1081 |
776 unc = vmalloc(LZO_UNC_SIZE); 777 if (!unc) { 778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); | 1082 crc = kmalloc(sizeof(*crc), GFP_KERNEL); 1083 if (!crc) { 1084 printk(KERN_ERR "PM: Failed to allocate crc\n"); 1085 ret = -ENOMEM; 1086 goto out_clean; 1087 } 1088 memset(crc, 0, offsetof(struct crc_data, go)); |
779 | 1089 |
780 for (i = 0; i < LZO_CMP_PAGES; i++) 781 free_page((unsigned long)page[i]); | 1090 /* 1091 * Start the decompression threads. 1092 */ 1093 for (thr = 0; thr < nr_threads; thr++) { 1094 init_waitqueue_head(&data[thr].go); 1095 init_waitqueue_head(&data[thr].done); |
782 | 1096 |
783 return -ENOMEM; | 1097 data[thr].thr = kthread_run(lzo_decompress_threadfn, 1098 &data[thr], 1099 "image_decompress/%u", thr); 1100 if (IS_ERR(data[thr].thr)) { 1101 data[thr].thr = NULL; 1102 printk(KERN_ERR 1103 "PM: Cannot start decompression threads\n"); 1104 ret = -ENOMEM; 1105 goto out_clean; 1106 } |
784 } 785 | 1107 } 1108 |
786 cmp = vmalloc(LZO_CMP_SIZE); 787 if (!cmp) { 788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); | 1109 /* 1110 * Start the CRC32 thread. 1111 */ 1112 init_waitqueue_head(&crc->go); 1113 init_waitqueue_head(&crc->done); |
789 | 1114 |
790 vfree(unc); 791 for (i = 0; i < LZO_CMP_PAGES; i++) 792 free_page((unsigned long)page[i]); | 1115 handle->crc32 = 0; 1116 crc->crc32 = &handle->crc32; 1117 for (thr = 0; thr < nr_threads; thr++) { 1118 crc->unc[thr] = data[thr].unc; 1119 crc->unc_len[thr] = &data[thr].unc_len; 1120 } |
793 | 1121 |
794 return -ENOMEM; | 1122 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); 1123 if (IS_ERR(crc->thr)) { 1124 crc->thr = NULL; 1125 printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); 1126 ret = -ENOMEM; 1127 goto out_clean; |
795 } 796 | 1128 } 1129 |
1130 /* 1131 * Adjust number of pages for read buffering, in case we are short. 1132 */ 1133 read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; 1134 read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); 1135 1136 for (i = 0; i < read_pages; i++) { 1137 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? 1138 __GFP_WAIT | __GFP_HIGH : 1139 __GFP_WAIT); 1140 if (!page[i]) { 1141 if (i < LZO_CMP_PAGES) { 1142 ring_size = i; 1143 printk(KERN_ERR 1144 "PM: Failed to allocate LZO pages\n"); 1145 ret = -ENOMEM; 1146 goto out_clean; 1147 } else { 1148 break; 1149 } 1150 } 1151 } 1152 want = ring_size = i; 1153 |
|
797 printk(KERN_INFO | 1154 printk(KERN_INFO |
1155 "PM: Using %u thread(s) for decompression.\n" |
|
798 "PM: Loading and decompressing image data (%u pages) ... ", | 1156 "PM: Loading and decompressing image data (%u pages) ... ", |
799 nr_to_read); | 1157 nr_threads, nr_to_read); |
800 m = nr_to_read / 100; 801 if (!m) 802 m = 1; 803 nr_pages = 0; 804 bio = NULL; 805 do_gettimeofday(&start); 806 | 1158 m = nr_to_read / 100; 1159 if (!m) 1160 m = 1; 1161 nr_pages = 0; 1162 bio = NULL; 1163 do_gettimeofday(&start); 1164 |
807 error = snapshot_write_next(snapshot); 808 if (error <= 0) | 1165 ret = snapshot_write_next(snapshot); 1166 if (ret <= 0) |
809 goto out_finish; 810 | 1167 goto out_finish; 1168 |
811 for (;;) { 812 error = swap_read_page(handle, page[0], NULL); /* sync */ 813 if (error) 814 break; 815 816 cmp_len = *(size_t *)page[0]; 817 if (unlikely(!cmp_len || 818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { 819 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 820 error = -1; 821 break; | 1169 for(;;) { 1170 for (i = 0; !eof && i < want; i++) { 1171 ret = swap_read_page(handle, page[ring], &bio); 1172 if (ret) { 1173 /* 1174 * On real read error, finish. On end of data, 1175 * set EOF flag and just exit the read loop. 1176 */ 1177 if (handle->cur && 1178 handle->cur->entries[handle->k]) { 1179 goto out_finish; 1180 } else { 1181 eof = 1; 1182 break; 1183 } 1184 } 1185 if (++ring >= ring_size) 1186 ring = 0; |
822 } | 1187 } |
1188 asked += i; 1189 want -= i; |
|
823 | 1190 |
824 for (off = PAGE_SIZE, i = 1; 825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { 826 error = swap_read_page(handle, page[i], &bio); 827 if (error) | 1191 /* 1192 * We are out of data, wait for some more. 1193 */ 1194 if (!have) { 1195 if (!asked) 1196 break; 1197 1198 ret = hib_wait_on_bio_chain(&bio); 1199 if (ret) |
828 goto out_finish; | 1200 goto out_finish; |
1201 have += asked; 1202 asked = 0; 1203 if (eof) 1204 eof = 2; |
|
829 } 830 | 1205 } 1206 |
831 error = hib_wait_on_bio_chain(&bio); /* need all data now */ 832 if (error) 833 goto out_finish; 834 835 for (off = 0, i = 0; 836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { 837 memcpy(cmp + off, page[i], PAGE_SIZE); | 1207 if (crc->run_threads) { 1208 wait_event(crc->done, atomic_read(&crc->stop)); 1209 atomic_set(&crc->stop, 0); 1210 crc->run_threads = 0; |
838 } 839 | 1211 } 1212 |
840 unc_len = LZO_UNC_SIZE; 841 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, 842 unc, &unc_len); 843 if (error < 0) { 844 printk(KERN_ERR "PM: LZO decompression failed\n"); 845 break; | 1213 for (thr = 0; have && thr < nr_threads; thr++) { 1214 data[thr].cmp_len = *(size_t *)page[pg]; 1215 if (unlikely(!data[thr].cmp_len || 1216 data[thr].cmp_len > 1217 lzo1x_worst_compress(LZO_UNC_SIZE))) { 1218 printk(KERN_ERR 1219 "PM: Invalid LZO compressed length\n"); 1220 ret = -1; 1221 goto out_finish; 1222 } 1223 1224 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, 1225 PAGE_SIZE); 1226 if (need > have) { 1227 if (eof > 1) { 1228 ret = -1; 1229 goto out_finish; 1230 } 1231 break; 1232 } 1233 1234 for (off = 0; 1235 off < LZO_HEADER + data[thr].cmp_len; 1236 off += PAGE_SIZE) { 1237 memcpy(data[thr].cmp + off, 1238 page[pg], PAGE_SIZE); 1239 have--; 1240 want++; 1241 if (++pg >= ring_size) 1242 pg = 0; 1243 } 1244 1245 atomic_set(&data[thr].ready, 1); 1246 wake_up(&data[thr].go); |
846 } 847 | 1247 } 1248 |
848 if (unlikely(!unc_len || 849 unc_len > LZO_UNC_SIZE || 850 unc_len & (PAGE_SIZE - 1))) { 851 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); 852 error = -1; 853 break; | 1249 /* 1250 * Wait for more data while we are decompressing. 1251 */ 1252 if (have < LZO_CMP_PAGES && asked) { 1253 ret = hib_wait_on_bio_chain(&bio); 1254 if (ret) 1255 goto out_finish; 1256 have += asked; 1257 asked = 0; 1258 if (eof) 1259 eof = 2; |
854 } 855 | 1260 } 1261 |
856 for (off = 0; off < unc_len; off += PAGE_SIZE) { 857 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); | 1262 for (run_threads = thr, thr = 0; thr < run_threads; thr++) { 1263 wait_event(data[thr].done, 1264 atomic_read(&data[thr].stop)); 1265 atomic_set(&data[thr].stop, 0); |
858 | 1266 |
859 if (!(nr_pages % m)) 860 printk("\b\b\b\b%3d%%", nr_pages / m); 861 nr_pages++; | 1267 ret = data[thr].ret; |
862 | 1268 |
863 error = snapshot_write_next(snapshot); 864 if (error <= 0) | 1269 if (ret < 0) { 1270 printk(KERN_ERR 1271 "PM: LZO decompression failed\n"); |
865 goto out_finish; | 1272 goto out_finish; |
1273 } 1274 1275 if (unlikely(!data[thr].unc_len || 1276 data[thr].unc_len > LZO_UNC_SIZE || 1277 data[thr].unc_len & (PAGE_SIZE - 1))) { 1278 printk(KERN_ERR 1279 "PM: Invalid LZO uncompressed length\n"); 1280 ret = -1; 1281 goto out_finish; 1282 } 1283 1284 for (off = 0; 1285 off < data[thr].unc_len; off += PAGE_SIZE) { 1286 memcpy(data_of(*snapshot), 1287 data[thr].unc + off, PAGE_SIZE); 1288 1289 if (!(nr_pages % m)) 1290 printk("\b\b\b\b%3d%%", nr_pages / m); 1291 nr_pages++; 1292 1293 ret = snapshot_write_next(snapshot); 1294 if (ret <= 0) { 1295 crc->run_threads = thr + 1; 1296 atomic_set(&crc->ready, 1); 1297 wake_up(&crc->go); 1298 goto out_finish; 1299 } 1300 } |
|
866 } | 1301 } |
1302 1303 crc->run_threads = thr; 1304 atomic_set(&crc->ready, 1); 1305 wake_up(&crc->go); |
|
867 } 868 869out_finish: | 1306 } 1307 1308out_finish: |
1309 if (crc->run_threads) { 1310 wait_event(crc->done, atomic_read(&crc->stop)); 1311 atomic_set(&crc->stop, 0); 1312 } |
|
870 do_gettimeofday(&stop); | 1313 do_gettimeofday(&stop); |
871 if (!error) { | 1314 if (!ret) { |
872 printk("\b\b\b\bdone\n"); 873 snapshot_write_finalize(snapshot); 874 if (!snapshot_image_loaded(snapshot)) | 1315 printk("\b\b\b\bdone\n"); 1316 snapshot_write_finalize(snapshot); 1317 if (!snapshot_image_loaded(snapshot)) |
875 error = -ENODATA; | 1318 ret = -ENODATA; 1319 if (!ret) { 1320 if (swsusp_header->flags & SF_CRC32_MODE) { 1321 if(handle->crc32 != swsusp_header->crc32) { 1322 printk(KERN_ERR 1323 "PM: Invalid image CRC32!\n"); 1324 ret = -ENODATA; 1325 } 1326 } 1327 } |
876 } else 877 printk("\n"); 878 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 1328 } else 1329 printk("\n"); 1330 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
879 880 vfree(cmp); 881 vfree(unc); 882 for (i = 0; i < LZO_CMP_PAGES; i++) | 1331out_clean: 1332 for (i = 0; i < ring_size; i++) |
883 free_page((unsigned long)page[i]); | 1333 free_page((unsigned long)page[i]); |
1334 if (crc) { 1335 if (crc->thr) 1336 kthread_stop(crc->thr); 1337 kfree(crc); 1338 } 1339 if (data) { 1340 for (thr = 0; thr < nr_threads; thr++) 1341 if (data[thr].thr) 1342 kthread_stop(data[thr].thr); 1343 vfree(data); 1344 } 1345 if (page) vfree(page); |
|
884 | 1346 |
885 return error; | 1347 return ret; |
886} 887 888/** 889 * swsusp_read - read the hibernation image. 890 * @flags_p: flags passed by the "frozen" kernel in the image header should 891 * be written into this memory location 892 */ 893 --- 96 unchanged lines hidden --- | 1348} 1349 1350/** 1351 * swsusp_read - read the hibernation image. 1352 * @flags_p: flags passed by the "frozen" kernel in the image header should 1353 * be written into this memory location 1354 */ 1355 --- 96 unchanged lines hidden --- |