Lines Matching +full:non +full:- +full:tunable

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
121 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
122 * cause image creation to fail (tunable via /sys/power/reserved_size).
132 * Preferred image size in bytes (tunable via /sys/power/image_size).
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
179 * get_image_page - Allocate a page for a hibernation image.
215 safe_pages_list = safe_pages_list->next;
243 lp->next = safe_pages_list;
248 * free_image_page - Free a page allocated for hibernation image.
274 struct linked_page *lp = list->next;
304 ca->chain = NULL;
305 ca->used_space = LINKED_PAGE_DATA_SIZE;
306 ca->gfp_mask = gfp_mask;
307 ca->safe_needed = safe_needed;
314 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
317 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
318 get_image_page(ca->gfp_mask, PG_ANY);
322 lp->next = ca->chain;
323 ca->chain = lp;
324 ca->used_space = 0;
326 ret = ca->chain->data + ca->used_space;
327 ca->used_space += size;
348 * top for now, but let's avoid making unnecessary assumptions ;-).
376 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
425 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
427 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
429 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
432 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
452 node->data = get_image_page(gfp_mask, safe_needed);
453 if (!node->data)
456 list_add_tail(&node->list, list);
462 * add_rtree_block - Add a new leave node to the radix tree.
465 * linked list in order. This is guaranteed by the zone->blocks
475 block_nr = zone->blocks;
485 for (i = zone->levels; i < levels_needed; i++) {
487 &zone->nodes);
489 return -ENOMEM;
491 node->data[0] = (unsigned long)zone->rtree;
492 zone->rtree = node;
493 zone->levels += 1;
497 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
499 return -ENOMEM;
502 node = zone->rtree;
503 dst = &zone->rtree;
504 block_nr = zone->blocks;
505 for (i = zone->levels; i > 0; i--) {
510 &zone->nodes);
512 return -ENOMEM;
516 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
518 dst = (struct rtree_node **)&((*dst)->data[index]);
522 zone->blocks += 1;
532 * create_zone_bm_rtree - Create a radix tree for one zone.
548 pages = end - start;
553 INIT_LIST_HEAD(&zone->nodes);
554 INIT_LIST_HEAD(&zone->leaves);
555 zone->start_pfn = start;
556 zone->end_pfn = end;
570 * free_zone_bm_rtree - Free the memory of the radix tree.
581 list_for_each_entry(node, &zone->nodes, list)
582 free_image_page(node->data, clear_nosave_free);
584 list_for_each_entry(node, &zone->leaves, list)
585 free_image_page(node->data, clear_nosave_free);
590 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
592 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
594 bm->cur.node_pfn = 0;
595 bm->cur.cur_pfn = BM_END_OF_MAP;
596 bm->cur.node_bit = 0;
608 * free_mem_extents - Free a list of memory extents.
616 list_del(&ext->hook);
622 * create_mem_extents - Create a list of memory extents.
638 zone_start = zone->zone_start_pfn;
642 if (zone_start <= ext->end)
645 if (&ext->hook == list || zone_end < ext->start) {
652 return -ENOMEM;
654 new_ext->start = zone_start;
655 new_ext->end = zone_end;
656 list_add_tail(&new_ext->hook, &ext->hook);
661 if (zone_start < ext->start)
662 ext->start = zone_start;
663 if (zone_end > ext->end)
664 ext->end = zone_end;
669 if (zone_end < cur->start)
671 if (zone_end < cur->end)
672 ext->end = cur->end;
673 list_del(&cur->hook);
682 * memory_bm_create - Allocate memory for a memory bitmap.
693 INIT_LIST_HEAD(&bm->zones);
703 ext->start, ext->end);
705 error = -ENOMEM;
708 list_add_tail(&zone->list, &bm->zones);
711 bm->p_list = ca.chain;
718 bm->p_list = ca.chain;
724 * memory_bm_free - Free memory occupied by the memory bitmap.
731 list_for_each_entry(zone, &bm->zones, list)
734 free_list_of_pages(bm->p_list, clear_nosave_free);
736 INIT_LIST_HEAD(&bm->zones);
740 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
755 zone = bm->cur.zone;
757 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
763 list_for_each_entry(curr, &bm->zones, list) {
764 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
771 return -EFAULT;
784 node = bm->cur.node;
785 if (zone == bm->cur.zone &&
786 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
789 node = zone->rtree;
790 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
792 for (i = zone->levels; i > 0; i--) {
795 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
797 BUG_ON(node->data[index] == 0);
798 node = (struct rtree_node *)node->data[index];
803 bm->cur.zone = zone;
804 bm->cur.node = node;
805 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
806 bm->cur.cur_pfn = pfn;
809 *addr = node->data;
810 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
854 bit = max(bm->cur.node_bit - 1, 0);
855 clear_bit(bit, bm->cur.node->data);
860 return bm->cur.cur_pfn;
883 * rtree_next_node - Jump to the next leaf node.
894 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
895 bm->cur.node = list_entry(bm->cur.node->list.next,
897 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
898 bm->cur.node_bit = 0;
904 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
905 bm->cur.zone = list_entry(bm->cur.zone->list.next,
907 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
909 bm->cur.node_pfn = 0;
910 bm->cur.node_bit = 0;
919 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
936 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
937 bit = find_next_bit(bm->cur.node->data, bits,
938 bm->cur.node_bit);
940 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
941 bm->cur.node_bit = bit + 1;
942 bm->cur.cur_pfn = pfn;
947 bm->cur.cur_pfn = BM_END_OF_MAP;
967 list_for_each_entry(node, &zone->nodes, list)
968 recycle_safe_page(node->data);
970 list_for_each_entry(node, &zone->leaves, list)
971 recycle_safe_page(node->data);
979 list_for_each_entry(zone, &bm->zones, list)
982 p_list = bm->p_list;
986 p_list = lp->next;
992 * register_nosave_region - Register a region of unsaveable memory.
1008 if (region->end_pfn == start_pfn) {
1009 region->end_pfn = end_pfn;
1016 region->start_pfn = start_pfn;
1017 region->end_pfn = end_pfn;
1018 list_add_tail(&region->list, &nosave_regions);
1020 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1022 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1076 * mark_nosave_pages - Mark pages that should not be saved.
1092 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1093 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1094 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1095 - 1);
1097 for_each_valid_pfn(pfn, region->start_pfn, region->end_pfn) {
1110 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1129 return -ENOMEM;
1157 return -ENOMEM;
1161 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1217 * snapshot_additional_pages - Estimate the number of extra pages needed.
1228 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1254 spin_lock_irqsave(&zone->lock, flags);
1257 for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
1260 if (!--page_count) {
1274 &zone->free_area[order].free_list[t], buddy_list) {
1279 if (!--page_count) {
1287 spin_unlock_irqrestore(&zone->lock, flags);
1292 * count_free_highmem_pages - Compute the total number of free highmem pages.
1294 * The returned number is system-wide.
1309 * saveable_highmem_page - Check if a highmem page is saveable.
1342 * count_highmem_pages - Compute the total number of saveable highmem pages.
1357 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1366 * saveable_page - Check if the given page is saveable.
1368 * Determine whether a non-highmem page should be included in a hibernation
1405 * count_data_pages - Compute the total number of saveable non-highmem pages.
1419 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1436 for (n = PAGE_SIZE / sizeof(long); n; n--) {
1444 * safe_copy_page - Copy a page in a safe way.
1532 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1585 * swsusp_free - Free pages allocated for hibernation image.
1641 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1662 nr_pages--;
1677 alloc = avail_normal - alloc_normal;
1691 * __fraction - Compute (an approximation of) x * (multiplier / base).
1721 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1729 to_free_normal = alloc_normal - save;
1733 save -= alloc_normal;
1737 to_free_highmem = alloc_highmem - save;
1740 save -= alloc_highmem;
1742 to_free_normal -= save;
1757 to_free_highmem--;
1758 alloc_highmem--;
1762 to_free_normal--;
1763 alloc_normal--;
1775 * minimum_image_size - Estimate the minimum acceptable size of an image.
1784 * [number of saveable pages] - [number of pages that can be freed in theory]
1799 return saveable <= size ? 0 : saveable - size;
1803 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1810 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1814 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1815 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1878 count -= totalreserve_pages;
1881 max_size = (count - (size + PAGES_FOR_IO)) / 2
1882 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1894 pages += preallocate_image_memory(saveable - pages, avail_normal);
1906 avail_normal -= pages;
1918 shrink_all_memory(saveable - size);
1925 * highmem and non-highmem zones separately.
1928 alloc = count - max_size;
1930 alloc -= pages_highmem;
1935 /* We have exhausted non-highmem pages, try highmem. */
1936 alloc -= pages;
1941 alloc - pages_highmem);
1947 * memory, so try to preallocate (all memory - size) pages.
1949 alloc = (count - pages) - size;
1956 alloc = max_size - size;
1959 alloc -= size;
1961 pages_highmem += preallocate_image_highmem(alloc - size);
1970 pages -= free_unnecessary_pages();
1981 return -ENOMEM;
1986 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1988 * Compute the number of non-highmem pages that will be necessary for creating
1998 nr_highmem -= free_highmem;
2007 * enough_free_mem - Check if there is enough free memory for the image.
2027 * get_highmem_buffer - Allocate a buffer for highmem pages.
2035 return buffer ? 0 : -ENOMEM;
2039 * alloc_highmem_pages - Allocate some highmem pages for the image.
2052 nr_highmem -= to_alloc;
2053 while (to_alloc-- > 0) {
2069 * swsusp_alloc - Allocate memory for hibernation image.
2073 * non-highmem pages for the copies of the remaining highmem ones.
2086 nr_highmem -= alloc_highmem;
2091 nr_pages -= alloc_normal;
2092 while (nr_pages-- > 0) {
2106 return -ENOMEM;
2122 return -ENOMEM;
2127 return -ENOMEM;
2144 nr_zero_pages = nr_pages - nr_copy_pages;
2155 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2156 info->version_code = LINUX_VERSION_CODE;
2162 if (info->version_code != LINUX_VERSION_CODE)
2164 if (strcmp(info->uts.sysname, init_utsname()->sysname))
2166 if (strcmp(info->uts.release, init_utsname()->release))
2168 if (strcmp(info->uts.version, init_utsname()->version))
2170 if (strcmp(info->uts.machine, init_utsname()->machine))
2184 info->num_physpages = get_num_physpages();
2185 info->image_pages = nr_copy_pages;
2186 info->pages = snapshot_get_image_size();
2187 info->size = info->pages;
2188 info->size <<= PAGE_SHIFT;
2192 #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2196 * pack_pfns - Prepare PFNs for saving.
2221 * snapshot_read_next - Get the address to read the next image page from.
2238 if (handle->cur > nr_meta_pages + nr_copy_pages)
2245 return -ENOMEM;
2247 if (!handle->cur) {
2253 handle->buffer = buffer;
2256 } else if (handle->cur <= nr_meta_pages) {
2274 handle->buffer = buffer;
2276 handle->buffer = page_address(page);
2279 handle->cur++;
2297 * mark_unsafe_pages - Mark pages that were used before hibernation.
2325 if (!reason && info->num_physpages != get_num_physpages())
2329 return -EPERM;
2335 * load_header - Check the image header and copy the data from it.
2344 nr_copy_pages = info->image_pages;
2345 nr_meta_pages = info->pages - info->image_pages - 1;
2351 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2383 return -EFAULT;
2411 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2437 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2455 return -ENOMEM;
2458 return -ENOMEM;
2467 while (to_alloc-- > 0) {
2488 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2526 return ERR_PTR(-ENOMEM);
2528 pbe->orig_page = page;
2535 safe_highmem_pages--;
2537 pbe->copy_page = tmp;
2540 kaddr = __get_safe_page(ca->gfp_mask);
2542 return ERR_PTR(-ENOMEM);
2543 pbe->copy_page = virt_to_page(kaddr);
2545 pbe->next = highmem_pblist;
2551 * copy_last_highmem_page - Copy most the most recent highmem image page.
2591 return ERR_PTR(-EINVAL);
2602 * prepare_image - Make room for loading hibernation image.
2673 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2678 error = -ENOMEM;
2681 lp->next = safe_pages_list;
2683 nr_pages--;
2686 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2690 error = -ENOMEM;
2695 lp->next = safe_pages_list;
2701 nr_pages--;
2711 * get_buffer - Get the address to store the next image data page.
2723 return ERR_PTR(-EFAULT);
2743 return ERR_PTR(-ENOMEM);
2745 pbe->orig_address = page_address(page);
2746 pbe->address = __get_safe_page(ca->gfp_mask);
2747 if (!pbe->address)
2748 return ERR_PTR(-ENOMEM);
2749 pbe->next = restore_pblist;
2751 return pbe->address;
2755 * snapshot_write_next - Get the address to store the next image page.
2777 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2780 if (!handle->cur) {
2786 return -ENOMEM;
2788 handle->buffer = buffer;
2789 } else if (handle->cur == 1) {
2807 } else if (handle->cur <= nr_meta_pages + 1) {
2812 if (handle->cur == nr_meta_pages + 1) {
2821 handle->buffer = get_buffer(&orig_bm, &ca);
2822 if (IS_ERR(handle->buffer))
2823 return PTR_ERR(handle->buffer);
2827 error = hibernate_restore_protect_page(handle->buffer);
2830 handle->buffer = get_buffer(&orig_bm, &ca);
2831 if (IS_ERR(handle->buffer))
2832 return PTR_ERR(handle->buffer);
2834 handle->sync_read = (handle->buffer == buffer);
2835 handle->cur++;
2838 if (handle->cur > nr_meta_pages + 1 &&
2840 memset(handle->buffer, 0, PAGE_SIZE);
2848 * snapshot_write_finalize - Complete the loading of a hibernation image.
2860 error = hibernate_restore_protect_page(handle->buffer);
2862 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2872 handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2892 * restore_highmem - Put highmem image pages into their original locations.
2911 return -ENOMEM;
2914 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2915 pbe = pbe->next;