Lines Matching +full:data +full:- +full:ready
1 // SPDX-License-Identifier: GPL-2.0-only
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
49 * The swap map is a data structure used for keeping track of each page
62 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
69 return nr_free_pages() - nr_free_highpages(); in low_free_pages()
93 * a file-alike way
107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 sizeof(u32) - sizeof(u32)];
142 if (swap_offset < ext->start) { in swsusp_extents_insert()
144 if (swap_offset == ext->start - 1) { in swsusp_extents_insert()
145 ext->start--; in swsusp_extents_insert()
148 new = &((*new)->rb_left); in swsusp_extents_insert()
149 } else if (swap_offset > ext->end) { in swsusp_extents_insert()
151 if (swap_offset == ext->end + 1) { in swsusp_extents_insert()
152 ext->end++; in swsusp_extents_insert()
155 new = &((*new)->rb_right); in swsusp_extents_insert()
158 return -EINVAL; in swsusp_extents_insert()
164 return -ENOMEM; in swsusp_extents_insert()
166 ext->start = swap_offset; in swsusp_extents_insert()
167 ext->end = swap_offset; in swsusp_extents_insert()
168 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
169 rb_insert_color(&ext->node, &swsusp_extents); in swsusp_extents_insert()
174 * alloc_swapdev_block - allocate a swap page and register that it has
193 * free_all_swap_pages - free swap pages allocated for saving image data.
207 swap_free_nr(swp_entry(swap, ext->start), in free_all_swap_pages()
208 ext->end - ext->start + 1); in free_all_swap_pages()
235 atomic_set(&hb->count, 0); in hib_init_batch()
236 init_waitqueue_head(&hb->wait); in hib_init_batch()
237 hb->error = BLK_STS_OK; in hib_init_batch()
238 blk_start_plug(&hb->plug); in hib_init_batch()
243 blk_finish_plug(&hb->plug); in hib_finish_batch()
248 struct hib_bio_batch *hb = bio->bi_private; in hib_end_io()
251 if (bio->bi_status) { in hib_end_io()
252 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n", in hib_end_io()
254 (unsigned long long)bio->bi_iter.bi_sector); in hib_end_io()
263 if (bio->bi_status && !hb->error) in hib_end_io()
264 hb->error = bio->bi_status; in hib_end_io()
265 if (atomic_dec_and_test(&hb->count)) in hib_end_io()
266 wake_up(&hb->wait); in hib_end_io()
284 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); in hib_submit_io_async()
286 bio->bi_end_io = hib_end_io; in hib_submit_io_async()
287 bio->bi_private = hb; in hib_submit_io_async()
288 atomic_inc(&hb->count); in hib_submit_io_async()
299 wait_event(hb->wait, atomic_read(&hb->count) == 0); in hib_wait_io()
300 return blk_status_to_errno(hb->error); in hib_wait_io()
311 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || in mark_swapfiles()
312 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { in mark_swapfiles()
313 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); in mark_swapfiles()
314 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); in mark_swapfiles()
315 swsusp_header->image = handle->first_sector; in mark_swapfiles()
317 swsusp_header->hw_sig = swsusp_hardware_signature; in mark_swapfiles()
320 swsusp_header->flags = flags; in mark_swapfiles()
322 swsusp_header->crc32 = handle->crc32; in mark_swapfiles()
327 error = -ENODEV; in mark_swapfiles()
340 * swsusp_swap_check - check if the resume device is a swap device
366 * write_page - Write one page to given swap location.
379 return -ENOSPC; in write_page()
402 if (handle->cur) in release_swap_writer()
403 free_page((unsigned long)handle->cur); in release_swap_writer()
404 handle->cur = NULL; in release_swap_writer()
413 if (ret != -ENOSPC) in get_swap_writer()
414 pr_err("Cannot find swap device, try swapon -a\n"); in get_swap_writer()
417 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); in get_swap_writer()
418 if (!handle->cur) { in get_swap_writer()
419 ret = -ENOMEM; in get_swap_writer()
422 handle->cur_swap = alloc_swapdev_block(root_swap); in get_swap_writer()
423 if (!handle->cur_swap) { in get_swap_writer()
424 ret = -ENOSPC; in get_swap_writer()
427 handle->k = 0; in get_swap_writer()
428 handle->reqd_free_pages = reqd_free_pages(); in get_swap_writer()
429 handle->first_sector = handle->cur_swap; in get_swap_writer()
444 if (!handle->cur) in swap_write_page()
445 return -EINVAL; in swap_write_page()
450 handle->cur->entries[handle->k++] = offset; in swap_write_page()
451 if (handle->k >= MAP_PAGE_ENTRIES) { in swap_write_page()
454 return -ENOSPC; in swap_write_page()
455 handle->cur->next_swap = offset; in swap_write_page()
456 error = write_page(handle->cur, handle->cur_swap, hb); in swap_write_page()
459 clear_page(handle->cur); in swap_write_page()
460 handle->cur_swap = offset; in swap_write_page()
461 handle->k = 0; in swap_write_page()
463 if (hb && low_free_pages() <= handle->reqd_free_pages) { in swap_write_page()
471 handle->reqd_free_pages = reqd_free_pages(); in swap_write_page()
480 if (handle->cur && handle->cur_swap) in flush_swap_writer()
481 return write_page(handle->cur, handle->cur_swap, NULL); in flush_swap_writer()
483 return -EINVAL; in flush_swap_writer()
505 * Bytes we need for compressed data in worst case. We assume(limitation)
510 /* We need to remember how much compressed data we need to read. */
517 /* Number of pages we need for compressed data (worst case). */
530 * save_image - save the suspend image data
547 pr_info("Saving image data pages (%u pages)...\n", in save_image()
582 atomic_t ready; /* ready to start flag */ member
583 atomic_t stop; /* ready to stop flag */
589 unsigned char *unc[CMP_THREADS]; /* uncompressed data */
595 static int crc32_threadfn(void *data) in crc32_threadfn() argument
597 struct crc_data *d = data; in crc32_threadfn()
601 wait_event(d->go, atomic_read_acquire(&d->ready) || in crc32_threadfn()
604 d->thr = NULL; in crc32_threadfn()
605 atomic_set_release(&d->stop, 1); in crc32_threadfn()
606 wake_up(&d->done); in crc32_threadfn()
609 atomic_set(&d->ready, 0); in crc32_threadfn()
611 for (i = 0; i < d->run_threads; i++) in crc32_threadfn()
612 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn()
613 d->unc[i], *d->unc_len[i]); in crc32_threadfn()
614 atomic_set_release(&d->stop, 1); in crc32_threadfn()
615 wake_up(&d->done); in crc32_threadfn()
620 * Structure used for data compression.
626 atomic_t ready; /* ready to start flag */ member
627 atomic_t stop; /* ready to stop flag */
643 static int compress_threadfn(void *data) in compress_threadfn() argument
645 struct cmp_data *d = data; in compress_threadfn()
648 wait_event(d->go, atomic_read_acquire(&d->ready) || in compress_threadfn()
651 d->thr = NULL; in compress_threadfn()
652 d->ret = -1; in compress_threadfn()
653 atomic_set_release(&d->stop, 1); in compress_threadfn()
654 wake_up(&d->done); in compress_threadfn()
657 atomic_set(&d->ready, 0); in compress_threadfn()
659 acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP, in compress_threadfn()
661 acomp_request_set_src_nondma(d->cr, d->unc, d->unc_len); in compress_threadfn()
662 acomp_request_set_dst_nondma(d->cr, d->cmp + CMP_HEADER, in compress_threadfn()
663 CMP_SIZE - CMP_HEADER); in compress_threadfn()
664 d->ret = crypto_acomp_compress(d->cr); in compress_threadfn()
665 d->cmp_len = d->cr->dlen; in compress_threadfn()
667 atomic64_add(d->cmp_len, &compressed_size); in compress_threadfn()
668 atomic_set_release(&d->stop, 1); in compress_threadfn()
669 wake_up(&d->done); in compress_threadfn()
675 * save_compressed_image - Save the suspend image data after compression.
677 * @snapshot: Image to read data from.
694 struct cmp_data *data = NULL; in save_compressed_image() local
705 nr_threads = num_online_cpus() - 1; in save_compressed_image()
711 ret = -ENOMEM; in save_compressed_image()
715 data = vcalloc(nr_threads, sizeof(*data)); in save_compressed_image()
716 if (!data) { in save_compressed_image()
717 pr_err("Failed to allocate %s data\n", hib_comp_algo); in save_compressed_image()
718 ret = -ENOMEM; in save_compressed_image()
725 ret = -ENOMEM; in save_compressed_image()
733 init_waitqueue_head(&data[thr].go); in save_compressed_image()
734 init_waitqueue_head(&data[thr].done); in save_compressed_image()
736 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC); in save_compressed_image()
737 if (IS_ERR_OR_NULL(data[thr].cc)) { in save_compressed_image()
738 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); in save_compressed_image()
739 ret = -EFAULT; in save_compressed_image()
743 data[thr].cr = acomp_request_alloc(data[thr].cc); in save_compressed_image()
744 if (!data[thr].cr) { in save_compressed_image()
746 ret = -ENOMEM; in save_compressed_image()
750 data[thr].thr = kthread_run(compress_threadfn, in save_compressed_image()
751 &data[thr], in save_compressed_image()
753 if (IS_ERR(data[thr].thr)) { in save_compressed_image()
754 data[thr].thr = NULL; in save_compressed_image()
756 ret = -ENOMEM; in save_compressed_image()
764 init_waitqueue_head(&crc->go); in save_compressed_image()
765 init_waitqueue_head(&crc->done); in save_compressed_image()
767 handle->crc32 = 0; in save_compressed_image()
768 crc->crc32 = &handle->crc32; in save_compressed_image()
770 crc->unc[thr] = data[thr].unc; in save_compressed_image()
771 crc->unc_len[thr] = &data[thr].unc_len; in save_compressed_image()
774 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); in save_compressed_image()
775 if (IS_ERR(crc->thr)) { in save_compressed_image()
776 crc->thr = NULL; in save_compressed_image()
778 ret = -ENOMEM; in save_compressed_image()
786 handle->reqd_free_pages = reqd_free_pages(); in save_compressed_image()
789 pr_info("Compressing and saving image data (%u pages)...\n", in save_compressed_image()
806 memcpy(data[thr].unc + off, in save_compressed_image()
817 data[thr].unc_len = off; in save_compressed_image()
819 atomic_set_release(&data[thr].ready, 1); in save_compressed_image()
820 wake_up(&data[thr].go); in save_compressed_image()
826 crc->run_threads = thr; in save_compressed_image()
827 atomic_set_release(&crc->ready, 1); in save_compressed_image()
828 wake_up(&crc->go); in save_compressed_image()
831 wait_event(data[thr].done, in save_compressed_image()
832 atomic_read_acquire(&data[thr].stop)); in save_compressed_image()
833 atomic_set(&data[thr].stop, 0); in save_compressed_image()
835 ret = data[thr].ret; in save_compressed_image()
842 if (unlikely(!data[thr].cmp_len || in save_compressed_image()
843 data[thr].cmp_len > in save_compressed_image()
844 bytes_worst_compress(data[thr].unc_len))) { in save_compressed_image()
846 ret = -1; in save_compressed_image()
850 *(size_t *)data[thr].cmp = data[thr].cmp_len; in save_compressed_image()
856 * OK - we saved the length of the compressed data, so in save_compressed_image()
861 off < CMP_HEADER + data[thr].cmp_len; in save_compressed_image()
863 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_compressed_image()
871 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in save_compressed_image()
872 atomic_set(&crc->stop, 0); in save_compressed_image()
892 if (crc->thr) in save_compressed_image()
893 kthread_stop(crc->thr); in save_compressed_image()
896 if (data) { in save_compressed_image()
898 if (data[thr].thr) in save_compressed_image()
899 kthread_stop(data[thr].thr); in save_compressed_image()
900 acomp_request_free(data[thr].cr); in save_compressed_image()
901 crypto_free_acomp(data[thr].cc); in save_compressed_image()
903 vfree(data); in save_compressed_image()
912 * enough_swap - Make sure we have enough swap to save the image.
930 * swsusp_write - Write entire image and metadata.
956 error = -ENOSPC; in swsusp_write()
964 error = -EFAULT; in swsusp_write()
972 save_image(&handle, &snapshot, pages - 1) : in swsusp_write()
973 save_compressed_image(&handle, &snapshot, pages - 1); in swsusp_write()
981 * The following functions allow us to read data using a swap map
982 * in a file-like way.
989 while (handle->maps) { in release_swap_reader()
990 if (handle->maps->map) in release_swap_reader()
991 free_page((unsigned long)handle->maps->map); in release_swap_reader()
992 tmp = handle->maps; in release_swap_reader()
993 handle->maps = handle->maps->next; in release_swap_reader()
996 handle->cur = NULL; in release_swap_reader()
1006 *flags_p = swsusp_header->flags; in get_swap_reader()
1008 if (!swsusp_header->image) /* how can this happen? */ in get_swap_reader()
1009 return -EINVAL; in get_swap_reader()
1011 handle->cur = NULL; in get_swap_reader()
1012 last = handle->maps = NULL; in get_swap_reader()
1013 offset = swsusp_header->image; in get_swap_reader()
1015 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); in get_swap_reader()
1018 return -ENOMEM; in get_swap_reader()
1020 if (!handle->maps) in get_swap_reader()
1021 handle->maps = tmp; in get_swap_reader()
1023 last->next = tmp; in get_swap_reader()
1026 tmp->map = (struct swap_map_page *) in get_swap_reader()
1028 if (!tmp->map) { in get_swap_reader()
1030 return -ENOMEM; in get_swap_reader()
1033 error = hib_submit_io_sync(REQ_OP_READ, offset, tmp->map); in get_swap_reader()
1038 offset = tmp->map->next_swap; in get_swap_reader()
1040 handle->k = 0; in get_swap_reader()
1041 handle->cur = handle->maps->map; in get_swap_reader()
1052 if (!handle->cur) in swap_read_page()
1053 return -EINVAL; in swap_read_page()
1054 offset = handle->cur->entries[handle->k]; in swap_read_page()
1056 return -EFAULT; in swap_read_page()
1063 if (++handle->k >= MAP_PAGE_ENTRIES) { in swap_read_page()
1064 handle->k = 0; in swap_read_page()
1065 free_page((unsigned long)handle->maps->map); in swap_read_page()
1066 tmp = handle->maps; in swap_read_page()
1067 handle->maps = handle->maps->next; in swap_read_page()
1069 if (!handle->maps) in swap_read_page()
1072 handle->cur = handle->maps->map; in swap_read_page()
1085 * load_image - load the image using the swap map handle
1105 pr_info("Loading image data pages (%u pages)...\n", nr_to_read); in load_image()
1118 if (snapshot->sync_read) in load_image()
1136 ret = -ENODATA; in load_image()
1143 * Structure used for data decompression.
1149 atomic_t ready; /* ready to start flag */ member
1150 atomic_t stop; /* ready to stop flag */
1163 static int decompress_threadfn(void *data) in decompress_threadfn() argument
1165 struct dec_data *d = data; in decompress_threadfn()
1168 wait_event(d->go, atomic_read_acquire(&d->ready) || in decompress_threadfn()
1171 d->thr = NULL; in decompress_threadfn()
1172 d->ret = -1; in decompress_threadfn()
1173 atomic_set_release(&d->stop, 1); in decompress_threadfn()
1174 wake_up(&d->done); in decompress_threadfn()
1177 atomic_set(&d->ready, 0); in decompress_threadfn()
1179 acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP, in decompress_threadfn()
1181 acomp_request_set_src_nondma(d->cr, d->cmp + CMP_HEADER, in decompress_threadfn()
1182 d->cmp_len); in decompress_threadfn()
1183 acomp_request_set_dst_nondma(d->cr, d->unc, UNC_SIZE); in decompress_threadfn()
1184 d->ret = crypto_acomp_decompress(d->cr); in decompress_threadfn()
1185 d->unc_len = d->cr->dlen; in decompress_threadfn()
1188 flush_icache_range((unsigned long)d->unc, in decompress_threadfn()
1189 (unsigned long)d->unc + d->unc_len); in decompress_threadfn()
1191 atomic_set_release(&d->stop, 1); in decompress_threadfn()
1192 wake_up(&d->done); in decompress_threadfn()
1198 * load_compressed_image - Load compressed image data and decompress it.
1199 * @handle: Swap map handle to use for loading data.
1200 * @snapshot: Image to copy uncompressed data into.
1220 struct dec_data *data = NULL; in load_compressed_image() local
1229 nr_threads = num_online_cpus() - 1; in load_compressed_image()
1235 ret = -ENOMEM; in load_compressed_image()
1239 data = vcalloc(nr_threads, sizeof(*data)); in load_compressed_image()
1240 if (!data) { in load_compressed_image()
1241 pr_err("Failed to allocate %s data\n", hib_comp_algo); in load_compressed_image()
1242 ret = -ENOMEM; in load_compressed_image()
1249 ret = -ENOMEM; in load_compressed_image()
1259 init_waitqueue_head(&data[thr].go); in load_compressed_image()
1260 init_waitqueue_head(&data[thr].done); in load_compressed_image()
1262 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC); in load_compressed_image()
1263 if (IS_ERR_OR_NULL(data[thr].cc)) { in load_compressed_image()
1264 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); in load_compressed_image()
1265 ret = -EFAULT; in load_compressed_image()
1269 data[thr].cr = acomp_request_alloc(data[thr].cc); in load_compressed_image()
1270 if (!data[thr].cr) { in load_compressed_image()
1272 ret = -ENOMEM; in load_compressed_image()
1276 data[thr].thr = kthread_run(decompress_threadfn, in load_compressed_image()
1277 &data[thr], in load_compressed_image()
1279 if (IS_ERR(data[thr].thr)) { in load_compressed_image()
1280 data[thr].thr = NULL; in load_compressed_image()
1282 ret = -ENOMEM; in load_compressed_image()
1290 init_waitqueue_head(&crc->go); in load_compressed_image()
1291 init_waitqueue_head(&crc->done); in load_compressed_image()
1293 handle->crc32 = 0; in load_compressed_image()
1294 crc->crc32 = &handle->crc32; in load_compressed_image()
1296 crc->unc[thr] = data[thr].unc; in load_compressed_image()
1297 crc->unc_len[thr] = &data[thr].unc_len; in load_compressed_image()
1300 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); in load_compressed_image()
1301 if (IS_ERR(crc->thr)) { in load_compressed_image()
1302 crc->thr = NULL; in load_compressed_image()
1304 ret = -ENOMEM; in load_compressed_image()
1316 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; in load_compressed_image()
1329 ret = -ENOMEM; in load_compressed_image()
1339 pr_info("Loading and decompressing image data (%u pages)...\n", in load_compressed_image()
1356 * On real read error, finish. On end of data, in load_compressed_image()
1359 if (handle->cur && in load_compressed_image()
1360 handle->cur->entries[handle->k]) { in load_compressed_image()
1371 want -= i; in load_compressed_image()
1374 * We are out of data, wait for some more. in load_compressed_image()
1389 if (crc->run_threads) { in load_compressed_image()
1390 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in load_compressed_image()
1391 atomic_set(&crc->stop, 0); in load_compressed_image()
1392 crc->run_threads = 0; in load_compressed_image()
1396 data[thr].cmp_len = *(size_t *)page[pg]; in load_compressed_image()
1397 if (unlikely(!data[thr].cmp_len || in load_compressed_image()
1398 data[thr].cmp_len > in load_compressed_image()
1401 ret = -1; in load_compressed_image()
1405 need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER, in load_compressed_image()
1409 ret = -1; in load_compressed_image()
1416 off < CMP_HEADER + data[thr].cmp_len; in load_compressed_image()
1418 memcpy(data[thr].cmp + off, in load_compressed_image()
1420 have--; in load_compressed_image()
1426 atomic_set_release(&data[thr].ready, 1); in load_compressed_image()
1427 wake_up(&data[thr].go); in load_compressed_image()
1431 * Wait for more data while we are decompressing. in load_compressed_image()
1444 wait_event(data[thr].done, in load_compressed_image()
1445 atomic_read_acquire(&data[thr].stop)); in load_compressed_image()
1446 atomic_set(&data[thr].stop, 0); in load_compressed_image()
1448 ret = data[thr].ret; in load_compressed_image()
1455 if (unlikely(!data[thr].unc_len || in load_compressed_image()
1456 data[thr].unc_len > UNC_SIZE || in load_compressed_image()
1457 data[thr].unc_len & (PAGE_SIZE - 1))) { in load_compressed_image()
1459 ret = -1; in load_compressed_image()
1464 off < data[thr].unc_len; off += PAGE_SIZE) { in load_compressed_image()
1466 data[thr].unc + off, PAGE_SIZE); in load_compressed_image()
1475 crc->run_threads = thr + 1; in load_compressed_image()
1476 atomic_set_release(&crc->ready, 1); in load_compressed_image()
1477 wake_up(&crc->go); in load_compressed_image()
1483 crc->run_threads = thr; in load_compressed_image()
1484 atomic_set_release(&crc->ready, 1); in load_compressed_image()
1485 wake_up(&crc->go); in load_compressed_image()
1489 if (crc->run_threads) { in load_compressed_image()
1490 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in load_compressed_image()
1491 atomic_set(&crc->stop, 0); in load_compressed_image()
1498 ret = -ENODATA; in load_compressed_image()
1500 if (swsusp_header->flags & SF_CRC32_MODE) { in load_compressed_image()
1501 if(handle->crc32 != swsusp_header->crc32) { in load_compressed_image()
1503 ret = -ENODATA; in load_compressed_image()
1514 if (crc->thr) in load_compressed_image()
1515 kthread_stop(crc->thr); in load_compressed_image()
1518 if (data) { in load_compressed_image()
1520 if (data[thr].thr) in load_compressed_image()
1521 kthread_stop(data[thr].thr); in load_compressed_image()
1522 acomp_request_free(data[thr].cr); in load_compressed_image()
1523 crypto_free_acomp(data[thr].cc); in load_compressed_image()
1525 vfree(data); in load_compressed_image()
1533 * swsusp_read - read the hibernation image.
1548 return error < 0 ? error : -EFAULT; in swsusp_read()
1557 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read()
1558 load_compressed_image(&handle, &snapshot, header->pages - 1); in swsusp_read()
1572 * swsusp_check - Open the resume device and check for the swsusp signature.
1590 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { in swsusp_check()
1591 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); in swsusp_check()
1592 swsusp_header_flags = swsusp_header->flags; in swsusp_check()
1598 error = -EINVAL; in swsusp_check()
1600 if (!error && swsusp_header->flags & SF_HW_SIG && in swsusp_check()
1601 swsusp_header->hw_sig != swsusp_hardware_signature) { in swsusp_check()
1603 swsusp_header->hw_sig, swsusp_hardware_signature); in swsusp_check()
1604 error = -EINVAL; in swsusp_check()
1623 * swsusp_close - close resume device.
1637 * swsusp_unmark - Unmark swsusp signature in the resume device
1646 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { in swsusp_unmark()
1647 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); in swsusp_unmark()
1653 error = -ENODEV; in swsusp_unmark()