1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/power/snapshot.c 4 * 5 * This file provides system snapshot/restore functionality for swsusp. 6 * 7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 9 */ 10 11 #define pr_fmt(fmt) "PM: " fmt 12 13 #include <linux/version.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/suspend.h> 17 #include <linux/delay.h> 18 #include <linux/bitops.h> 19 #include <linux/spinlock.h> 20 #include <linux/kernel.h> 21 #include <linux/pm.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/memblock.h> 25 #include <linux/nmi.h> 26 #include <linux/syscalls.h> 27 #include <linux/console.h> 28 #include <linux/highmem.h> 29 #include <linux/list.h> 30 #include <linux/slab.h> 31 #include <linux/compiler.h> 32 #include <linux/ktime.h> 33 #include <linux/set_memory.h> 34 35 #include <linux/uaccess.h> 36 #include <asm/mmu_context.h> 37 #include <asm/pgtable.h> 38 #include <asm/tlbflush.h> 39 #include <asm/io.h> 40 41 #include "power.h" 42 43 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY) 44 static bool hibernate_restore_protection; 45 static bool hibernate_restore_protection_active; 46 47 void enable_restore_image_protection(void) 48 { 49 hibernate_restore_protection = true; 50 } 51 52 static inline void hibernate_restore_protection_begin(void) 53 { 54 hibernate_restore_protection_active = hibernate_restore_protection; 55 } 56 57 static inline void hibernate_restore_protection_end(void) 58 { 59 hibernate_restore_protection_active = false; 60 } 61 62 static inline void hibernate_restore_protect_page(void *page_address) 63 { 64 if (hibernate_restore_protection_active) 65 set_memory_ro((unsigned long)page_address, 1); 66 } 67 68 static inline void hibernate_restore_unprotect_page(void *page_address) 69 { 70 if (hibernate_restore_protection_active) 71 set_memory_rw((unsigned long)page_address, 1); 72 } 73 #else 74 static inline void hibernate_restore_protection_begin(void) {} 75 static inline void hibernate_restore_protection_end(void) {} 76 static inline void hibernate_restore_protect_page(void *page_address) {} 77 static inline void hibernate_restore_unprotect_page(void *page_address) {} 78 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ 79 80 static int swsusp_page_is_free(struct page *); 81 static void swsusp_set_page_forbidden(struct page *); 82 static void swsusp_unset_page_forbidden(struct page *); 83 84 /* 85 * Number of bytes to reserve for memory allocations made by device drivers 86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't 87 * cause image creation to fail (tunable via /sys/power/reserved_size). 88 */ 89 unsigned long reserved_size; 90 91 void __init hibernate_reserved_size_init(void) 92 { 93 reserved_size = SPARE_PAGES * PAGE_SIZE; 94 } 95 96 /* 97 * Preferred image size in bytes (tunable via /sys/power/image_size). 98 * When it is set to N, swsusp will do its best to ensure the image 99 * size will not exceed N bytes, but if that is impossible, it will 100 * try to create the smallest image possible. 101 */ 102 unsigned long image_size; 103 104 void __init hibernate_image_size_init(void) 105 { 106 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE; 107 } 108 109 /* 110 * List of PBEs needed for restoring the pages that were allocated before 111 * the suspend and included in the suspend image, but have also been 112 * allocated by the "resume" kernel, so their contents cannot be written 113 * directly to their "original" page frames. 114 */ 115 struct pbe *restore_pblist; 116 117 /* struct linked_page is used to build chains of pages */ 118 119 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) 120 121 struct linked_page { 122 struct linked_page *next; 123 char data[LINKED_PAGE_DATA_SIZE]; 124 } __packed; 125 126 /* 127 * List of "safe" pages (ie. pages that were not used by the image kernel 128 * before hibernation) that may be used as temporary storage for image kernel 129 * memory contents. 130 */ 131 static struct linked_page *safe_pages_list; 132 133 /* Pointer to an auxiliary buffer (1 page) */ 134 static void *buffer; 135 136 #define PG_ANY 0 137 #define PG_SAFE 1 138 #define PG_UNSAFE_CLEAR 1 139 #define PG_UNSAFE_KEEP 0 140 141 static unsigned int allocated_unsafe_pages; 142 143 /** 144 * get_image_page - Allocate a page for a hibernation image. 145 * @gfp_mask: GFP mask for the allocation. 146 * @safe_needed: Get pages that were not used before hibernation (restore only) 147 * 148 * During image restoration, for storing the PBE list and the image data, we can 149 * only use memory pages that do not conflict with the pages used before 150 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them 151 * using allocated_unsafe_pages. 152 * 153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that 154 * swsusp_free() can release it. 155 */ 156 static void *get_image_page(gfp_t gfp_mask, int safe_needed) 157 { 158 void *res; 159 160 res = (void *)get_zeroed_page(gfp_mask); 161 if (safe_needed) 162 while (res && swsusp_page_is_free(virt_to_page(res))) { 163 /* The page is unsafe, mark it for swsusp_free() */ 164 swsusp_set_page_forbidden(virt_to_page(res)); 165 allocated_unsafe_pages++; 166 res = (void *)get_zeroed_page(gfp_mask); 167 } 168 if (res) { 169 swsusp_set_page_forbidden(virt_to_page(res)); 170 swsusp_set_page_free(virt_to_page(res)); 171 } 172 return res; 173 } 174 175 static void *__get_safe_page(gfp_t gfp_mask) 176 { 177 if (safe_pages_list) { 178 void *ret = safe_pages_list; 179 180 safe_pages_list = safe_pages_list->next; 181 memset(ret, 0, PAGE_SIZE); 182 return ret; 183 } 184 return get_image_page(gfp_mask, PG_SAFE); 185 } 186 187 unsigned long get_safe_page(gfp_t gfp_mask) 188 { 189 return (unsigned long)__get_safe_page(gfp_mask); 190 } 191 192 static struct page *alloc_image_page(gfp_t gfp_mask) 193 { 194 struct page *page; 195 196 page = alloc_page(gfp_mask); 197 if (page) { 198 swsusp_set_page_forbidden(page); 199 swsusp_set_page_free(page); 200 } 201 return page; 202 } 203 204 static void recycle_safe_page(void *page_address) 205 { 206 struct linked_page *lp = page_address; 207 208 lp->next = safe_pages_list; 209 safe_pages_list = lp; 210 } 211 212 /** 213 * free_image_page - Free a page allocated for hibernation image. 214 * @addr: Address of the page to free. 215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page. 216 * 217 * The page to free should have been allocated by get_image_page() (page flags 218 * set by it are affected). 219 */ 220 static inline void free_image_page(void *addr, int clear_nosave_free) 221 { 222 struct page *page; 223 224 BUG_ON(!virt_addr_valid(addr)); 225 226 page = virt_to_page(addr); 227 228 swsusp_unset_page_forbidden(page); 229 if (clear_nosave_free) 230 swsusp_unset_page_free(page); 231 232 __free_page(page); 233 } 234 235 static inline void free_list_of_pages(struct linked_page *list, 236 int clear_page_nosave) 237 { 238 while (list) { 239 struct linked_page *lp = list->next; 240 241 free_image_page(list, clear_page_nosave); 242 list = lp; 243 } 244 } 245 246 /* 247 * struct chain_allocator is used for allocating small objects out of 248 * a linked list of pages called 'the chain'. 249 * 250 * The chain grows each time when there is no room for a new object in 251 * the current page. The allocated objects cannot be freed individually. 252 * It is only possible to free them all at once, by freeing the entire 253 * chain. 254 * 255 * NOTE: The chain allocator may be inefficient if the allocated objects 256 * are not much smaller than PAGE_SIZE. 257 */ 258 struct chain_allocator { 259 struct linked_page *chain; /* the chain */ 260 unsigned int used_space; /* total size of objects allocated out 261 of the current page */ 262 gfp_t gfp_mask; /* mask for allocating pages */ 263 int safe_needed; /* if set, only "safe" pages are allocated */ 264 }; 265 266 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, 267 int safe_needed) 268 { 269 ca->chain = NULL; 270 ca->used_space = LINKED_PAGE_DATA_SIZE; 271 ca->gfp_mask = gfp_mask; 272 ca->safe_needed = safe_needed; 273 } 274 275 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) 276 { 277 void *ret; 278 279 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 280 struct linked_page *lp; 281 282 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : 283 get_image_page(ca->gfp_mask, PG_ANY); 284 if (!lp) 285 return NULL; 286 287 lp->next = ca->chain; 288 ca->chain = lp; 289 ca->used_space = 0; 290 } 291 ret = ca->chain->data + ca->used_space; 292 ca->used_space += size; 293 return ret; 294 } 295 296 /** 297 * Data types related to memory bitmaps. 298 * 299 * Memory bitmap is a structure consiting of many linked lists of 300 * objects. The main list's elements are of type struct zone_bitmap 301 * and each of them corresonds to one zone. For each zone bitmap 302 * object there is a list of objects of type struct bm_block that 303 * represent each blocks of bitmap in which information is stored. 304 * 305 * struct memory_bitmap contains a pointer to the main list of zone 306 * bitmap objects, a struct bm_position used for browsing the bitmap, 307 * and a pointer to the list of pages used for allocating all of the 308 * zone bitmap objects and bitmap block objects. 309 * 310 * NOTE: It has to be possible to lay out the bitmap in memory 311 * using only allocations of order 0. Additionally, the bitmap is 312 * designed to work with arbitrary number of zones (this is over the 313 * top for now, but let's avoid making unnecessary assumptions ;-). 314 * 315 * struct zone_bitmap contains a pointer to a list of bitmap block 316 * objects and a pointer to the bitmap block object that has been 317 * most recently used for setting bits. Additionally, it contains the 318 * PFNs that correspond to the start and end of the represented zone. 319 * 320 * struct bm_block contains a pointer to the memory page in which 321 * information is stored (in the form of a block of bitmap) 322 * It also contains the pfns that correspond to the start and end of 323 * the represented memory area. 324 * 325 * The memory bitmap is organized as a radix tree to guarantee fast random 326 * access to the bits. There is one radix tree for each zone (as returned 327 * from create_mem_extents). 328 * 329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are 330 * two linked lists for the nodes of the tree, one for the inner nodes and 331 * one for the leave nodes. The linked leave nodes are used for fast linear 332 * access of the memory bitmap. 333 * 334 * The struct rtree_node represents one node of the radix tree. 335 */ 336 337 #define BM_END_OF_MAP (~0UL) 338 339 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 340 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) 341 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) 342 343 /* 344 * struct rtree_node is a wrapper struct to link the nodes 345 * of the rtree together for easy linear iteration over 346 * bits and easy freeing 347 */ 348 struct rtree_node { 349 struct list_head list; 350 unsigned long *data; 351 }; 352 353 /* 354 * struct mem_zone_bm_rtree represents a bitmap used for one 355 * populated memory zone. 356 */ 357 struct mem_zone_bm_rtree { 358 struct list_head list; /* Link Zones together */ 359 struct list_head nodes; /* Radix Tree inner nodes */ 360 struct list_head leaves; /* Radix Tree leaves */ 361 unsigned long start_pfn; /* Zone start page frame */ 362 unsigned long end_pfn; /* Zone end page frame + 1 */ 363 struct rtree_node *rtree; /* Radix Tree Root */ 364 int levels; /* Number of Radix Tree Levels */ 365 unsigned int blocks; /* Number of Bitmap Blocks */ 366 }; 367 368 /* strcut bm_position is used for browsing memory bitmaps */ 369 370 struct bm_position { 371 struct mem_zone_bm_rtree *zone; 372 struct rtree_node *node; 373 unsigned long node_pfn; 374 int node_bit; 375 }; 376 377 struct memory_bitmap { 378 struct list_head zones; 379 struct linked_page *p_list; /* list of pages used to store zone 380 bitmap objects and bitmap block 381 objects */ 382 struct bm_position cur; /* most recently used bit position */ 383 }; 384 385 /* Functions that operate on memory bitmaps */ 386 387 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long)) 388 #if BITS_PER_LONG == 32 389 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2) 390 #else 391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3) 392 #endif 393 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) 394 395 /** 396 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 397 * 398 * This function is used to allocate inner nodes as well as the 399 * leave nodes of the radix tree. It also adds the node to the 400 * corresponding linked list passed in by the *list parameter. 401 */ 402 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, 403 struct chain_allocator *ca, 404 struct list_head *list) 405 { 406 struct rtree_node *node; 407 408 node = chain_alloc(ca, sizeof(struct rtree_node)); 409 if (!node) 410 return NULL; 411 412 node->data = get_image_page(gfp_mask, safe_needed); 413 if (!node->data) 414 return NULL; 415 416 list_add_tail(&node->list, list); 417 418 return node; 419 } 420 421 /** 422 * add_rtree_block - Add a new leave node to the radix tree. 423 * 424 * The leave nodes need to be allocated in order to keep the leaves 425 * linked list in order. This is guaranteed by the zone->blocks 426 * counter. 427 */ 428 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, 429 int safe_needed, struct chain_allocator *ca) 430 { 431 struct rtree_node *node, *block, **dst; 432 unsigned int levels_needed, block_nr; 433 int i; 434 435 block_nr = zone->blocks; 436 levels_needed = 0; 437 438 /* How many levels do we need for this block nr? */ 439 while (block_nr) { 440 levels_needed += 1; 441 block_nr >>= BM_RTREE_LEVEL_SHIFT; 442 } 443 444 /* Make sure the rtree has enough levels */ 445 for (i = zone->levels; i < levels_needed; i++) { 446 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 447 &zone->nodes); 448 if (!node) 449 return -ENOMEM; 450 451 node->data[0] = (unsigned long)zone->rtree; 452 zone->rtree = node; 453 zone->levels += 1; 454 } 455 456 /* Allocate new block */ 457 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); 458 if (!block) 459 return -ENOMEM; 460 461 /* Now walk the rtree to insert the block */ 462 node = zone->rtree; 463 dst = &zone->rtree; 464 block_nr = zone->blocks; 465 for (i = zone->levels; i > 0; i--) { 466 int index; 467 468 if (!node) { 469 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 470 &zone->nodes); 471 if (!node) 472 return -ENOMEM; 473 *dst = node; 474 } 475 476 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 477 index &= BM_RTREE_LEVEL_MASK; 478 dst = (struct rtree_node **)&((*dst)->data[index]); 479 node = *dst; 480 } 481 482 zone->blocks += 1; 483 *dst = block; 484 485 return 0; 486 } 487 488 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 489 int clear_nosave_free); 490 491 /** 492 * create_zone_bm_rtree - Create a radix tree for one zone. 493 * 494 * Allocated the mem_zone_bm_rtree structure and initializes it. 495 * This function also allocated and builds the radix tree for the 496 * zone. 497 */ 498 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, 499 int safe_needed, 500 struct chain_allocator *ca, 501 unsigned long start, 502 unsigned long end) 503 { 504 struct mem_zone_bm_rtree *zone; 505 unsigned int i, nr_blocks; 506 unsigned long pages; 507 508 pages = end - start; 509 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); 510 if (!zone) 511 return NULL; 512 513 INIT_LIST_HEAD(&zone->nodes); 514 INIT_LIST_HEAD(&zone->leaves); 515 zone->start_pfn = start; 516 zone->end_pfn = end; 517 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 518 519 for (i = 0; i < nr_blocks; i++) { 520 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { 521 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); 522 return NULL; 523 } 524 } 525 526 return zone; 527 } 528 529 /** 530 * free_zone_bm_rtree - Free the memory of the radix tree. 531 * 532 * Free all node pages of the radix tree. The mem_zone_bm_rtree 533 * structure itself is not freed here nor are the rtree_node 534 * structs. 535 */ 536 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 537 int clear_nosave_free) 538 { 539 struct rtree_node *node; 540 541 list_for_each_entry(node, &zone->nodes, list) 542 free_image_page(node->data, clear_nosave_free); 543 544 list_for_each_entry(node, &zone->leaves, list) 545 free_image_page(node->data, clear_nosave_free); 546 } 547 548 static void memory_bm_position_reset(struct memory_bitmap *bm) 549 { 550 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, 551 list); 552 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 553 struct rtree_node, list); 554 bm->cur.node_pfn = 0; 555 bm->cur.node_bit = 0; 556 } 557 558 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 559 560 struct mem_extent { 561 struct list_head hook; 562 unsigned long start; 563 unsigned long end; 564 }; 565 566 /** 567 * free_mem_extents - Free a list of memory extents. 568 * @list: List of extents to free. 569 */ 570 static void free_mem_extents(struct list_head *list) 571 { 572 struct mem_extent *ext, *aux; 573 574 list_for_each_entry_safe(ext, aux, list, hook) { 575 list_del(&ext->hook); 576 kfree(ext); 577 } 578 } 579 580 /** 581 * create_mem_extents - Create a list of memory extents. 582 * @list: List to put the extents into. 583 * @gfp_mask: Mask to use for memory allocations. 584 * 585 * The extents represent contiguous ranges of PFNs. 586 */ 587 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 588 { 589 struct zone *zone; 590 591 INIT_LIST_HEAD(list); 592 593 for_each_populated_zone(zone) { 594 unsigned long zone_start, zone_end; 595 struct mem_extent *ext, *cur, *aux; 596 597 zone_start = zone->zone_start_pfn; 598 zone_end = zone_end_pfn(zone); 599 600 list_for_each_entry(ext, list, hook) 601 if (zone_start <= ext->end) 602 break; 603 604 if (&ext->hook == list || zone_end < ext->start) { 605 /* New extent is necessary */ 606 struct mem_extent *new_ext; 607 608 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); 609 if (!new_ext) { 610 free_mem_extents(list); 611 return -ENOMEM; 612 } 613 new_ext->start = zone_start; 614 new_ext->end = zone_end; 615 list_add_tail(&new_ext->hook, &ext->hook); 616 continue; 617 } 618 619 /* Merge this zone's range of PFNs with the existing one */ 620 if (zone_start < ext->start) 621 ext->start = zone_start; 622 if (zone_end > ext->end) 623 ext->end = zone_end; 624 625 /* More merging may be possible */ 626 cur = ext; 627 list_for_each_entry_safe_continue(cur, aux, list, hook) { 628 if (zone_end < cur->start) 629 break; 630 if (zone_end < cur->end) 631 ext->end = cur->end; 632 list_del(&cur->hook); 633 kfree(cur); 634 } 635 } 636 637 return 0; 638 } 639 640 /** 641 * memory_bm_create - Allocate memory for a memory bitmap. 642 */ 643 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, 644 int safe_needed) 645 { 646 struct chain_allocator ca; 647 struct list_head mem_extents; 648 struct mem_extent *ext; 649 int error; 650 651 chain_init(&ca, gfp_mask, safe_needed); 652 INIT_LIST_HEAD(&bm->zones); 653 654 error = create_mem_extents(&mem_extents, gfp_mask); 655 if (error) 656 return error; 657 658 list_for_each_entry(ext, &mem_extents, hook) { 659 struct mem_zone_bm_rtree *zone; 660 661 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, 662 ext->start, ext->end); 663 if (!zone) { 664 error = -ENOMEM; 665 goto Error; 666 } 667 list_add_tail(&zone->list, &bm->zones); 668 } 669 670 bm->p_list = ca.chain; 671 memory_bm_position_reset(bm); 672 Exit: 673 free_mem_extents(&mem_extents); 674 return error; 675 676 Error: 677 bm->p_list = ca.chain; 678 memory_bm_free(bm, PG_UNSAFE_CLEAR); 679 goto Exit; 680 } 681 682 /** 683 * memory_bm_free - Free memory occupied by the memory bitmap. 684 * @bm: Memory bitmap. 685 */ 686 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 687 { 688 struct mem_zone_bm_rtree *zone; 689 690 list_for_each_entry(zone, &bm->zones, list) 691 free_zone_bm_rtree(zone, clear_nosave_free); 692 693 free_list_of_pages(bm->p_list, clear_nosave_free); 694 695 INIT_LIST_HEAD(&bm->zones); 696 } 697 698 /** 699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap. 700 * 701 * Find the bit in memory bitmap @bm that corresponds to the given PFN. 702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated. 703 * 704 * Walk the radix tree to find the page containing the bit that represents @pfn 705 * and return the position of the bit in @addr and @bit_nr. 706 */ 707 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 708 void **addr, unsigned int *bit_nr) 709 { 710 struct mem_zone_bm_rtree *curr, *zone; 711 struct rtree_node *node; 712 int i, block_nr; 713 714 zone = bm->cur.zone; 715 716 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) 717 goto zone_found; 718 719 zone = NULL; 720 721 /* Find the right zone */ 722 list_for_each_entry(curr, &bm->zones, list) { 723 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { 724 zone = curr; 725 break; 726 } 727 } 728 729 if (!zone) 730 return -EFAULT; 731 732 zone_found: 733 /* 734 * We have found the zone. Now walk the radix tree to find the leaf node 735 * for our PFN. 736 */ 737 node = bm->cur.node; 738 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) 739 goto node_found; 740 741 node = zone->rtree; 742 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; 743 744 for (i = zone->levels; i > 0; i--) { 745 int index; 746 747 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 748 index &= BM_RTREE_LEVEL_MASK; 749 BUG_ON(node->data[index] == 0); 750 node = (struct rtree_node *)node->data[index]; 751 } 752 753 node_found: 754 /* Update last position */ 755 bm->cur.zone = zone; 756 bm->cur.node = node; 757 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; 758 759 /* Set return values */ 760 *addr = node->data; 761 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; 762 763 return 0; 764 } 765 766 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 767 { 768 void *addr; 769 unsigned int bit; 770 int error; 771 772 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 773 BUG_ON(error); 774 set_bit(bit, addr); 775 } 776 777 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) 778 { 779 void *addr; 780 unsigned int bit; 781 int error; 782 783 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 784 if (!error) 785 set_bit(bit, addr); 786 787 return error; 788 } 789 790 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 791 { 792 void *addr; 793 unsigned int bit; 794 int error; 795 796 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 797 BUG_ON(error); 798 clear_bit(bit, addr); 799 } 800 801 static void memory_bm_clear_current(struct memory_bitmap *bm) 802 { 803 int bit; 804 805 bit = max(bm->cur.node_bit - 1, 0); 806 clear_bit(bit, bm->cur.node->data); 807 } 808 809 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 810 { 811 void *addr; 812 unsigned int bit; 813 int error; 814 815 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 816 BUG_ON(error); 817 return test_bit(bit, addr); 818 } 819 820 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) 821 { 822 void *addr; 823 unsigned int bit; 824 825 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 826 } 827 828 /* 829 * rtree_next_node - Jump to the next leaf node. 830 * 831 * Set the position to the beginning of the next node in the 832 * memory bitmap. This is either the next node in the current 833 * zone's radix tree or the first node in the radix tree of the 834 * next zone. 835 * 836 * Return true if there is a next node, false otherwise. 837 */ 838 static bool rtree_next_node(struct memory_bitmap *bm) 839 { 840 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { 841 bm->cur.node = list_entry(bm->cur.node->list.next, 842 struct rtree_node, list); 843 bm->cur.node_pfn += BM_BITS_PER_BLOCK; 844 bm->cur.node_bit = 0; 845 touch_softlockup_watchdog(); 846 return true; 847 } 848 849 /* No more nodes, goto next zone */ 850 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { 851 bm->cur.zone = list_entry(bm->cur.zone->list.next, 852 struct mem_zone_bm_rtree, list); 853 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 854 struct rtree_node, list); 855 bm->cur.node_pfn = 0; 856 bm->cur.node_bit = 0; 857 return true; 858 } 859 860 /* No more zones */ 861 return false; 862 } 863 864 /** 865 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap. 866 * @bm: Memory bitmap. 867 * 868 * Starting from the last returned position this function searches for the next 869 * set bit in @bm and returns the PFN represented by it. If no more bits are 870 * set, BM_END_OF_MAP is returned. 871 * 872 * It is required to run memory_bm_position_reset() before the first call to 873 * this function for the given memory bitmap. 874 */ 875 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 876 { 877 unsigned long bits, pfn, pages; 878 int bit; 879 880 do { 881 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; 882 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); 883 bit = find_next_bit(bm->cur.node->data, bits, 884 bm->cur.node_bit); 885 if (bit < bits) { 886 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; 887 bm->cur.node_bit = bit + 1; 888 return pfn; 889 } 890 } while (rtree_next_node(bm)); 891 892 return BM_END_OF_MAP; 893 } 894 895 /* 896 * This structure represents a range of page frames the contents of which 897 * should not be saved during hibernation. 898 */ 899 struct nosave_region { 900 struct list_head list; 901 unsigned long start_pfn; 902 unsigned long end_pfn; 903 }; 904 905 static LIST_HEAD(nosave_regions); 906 907 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) 908 { 909 struct rtree_node *node; 910 911 list_for_each_entry(node, &zone->nodes, list) 912 recycle_safe_page(node->data); 913 914 list_for_each_entry(node, &zone->leaves, list) 915 recycle_safe_page(node->data); 916 } 917 918 static void memory_bm_recycle(struct memory_bitmap *bm) 919 { 920 struct mem_zone_bm_rtree *zone; 921 struct linked_page *p_list; 922 923 list_for_each_entry(zone, &bm->zones, list) 924 recycle_zone_bm_rtree(zone); 925 926 p_list = bm->p_list; 927 while (p_list) { 928 struct linked_page *lp = p_list; 929 930 p_list = lp->next; 931 recycle_safe_page(lp); 932 } 933 } 934 935 /** 936 * register_nosave_region - Register a region of unsaveable memory. 937 * 938 * Register a range of page frames the contents of which should not be saved 939 * during hibernation (to be used in the early initialization code). 940 */ 941 void __init __register_nosave_region(unsigned long start_pfn, 942 unsigned long end_pfn, int use_kmalloc) 943 { 944 struct nosave_region *region; 945 946 if (start_pfn >= end_pfn) 947 return; 948 949 if (!list_empty(&nosave_regions)) { 950 /* Try to extend the previous region (they should be sorted) */ 951 region = list_entry(nosave_regions.prev, 952 struct nosave_region, list); 953 if (region->end_pfn == start_pfn) { 954 region->end_pfn = end_pfn; 955 goto Report; 956 } 957 } 958 if (use_kmalloc) { 959 /* During init, this shouldn't fail */ 960 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); 961 BUG_ON(!region); 962 } else { 963 /* This allocation cannot fail */ 964 region = memblock_alloc(sizeof(struct nosave_region), 965 SMP_CACHE_BYTES); 966 if (!region) 967 panic("%s: Failed to allocate %zu bytes\n", __func__, 968 sizeof(struct nosave_region)); 969 } 970 region->start_pfn = start_pfn; 971 region->end_pfn = end_pfn; 972 list_add_tail(®ion->list, &nosave_regions); 973 Report: 974 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n", 975 (unsigned long long) start_pfn << PAGE_SHIFT, 976 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 977 } 978 979 /* 980 * Set bits in this map correspond to the page frames the contents of which 981 * should not be saved during the suspend. 982 */ 983 static struct memory_bitmap *forbidden_pages_map; 984 985 /* Set bits in this map correspond to free page frames. */ 986 static struct memory_bitmap *free_pages_map; 987 988 /* 989 * Each page frame allocated for creating the image is marked by setting the 990 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously 991 */ 992 993 void swsusp_set_page_free(struct page *page) 994 { 995 if (free_pages_map) 996 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); 997 } 998 999 static int swsusp_page_is_free(struct page *page) 1000 { 1001 return free_pages_map ? 1002 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; 1003 } 1004 1005 void swsusp_unset_page_free(struct page *page) 1006 { 1007 if (free_pages_map) 1008 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); 1009 } 1010 1011 static void swsusp_set_page_forbidden(struct page *page) 1012 { 1013 if (forbidden_pages_map) 1014 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); 1015 } 1016 1017 int swsusp_page_is_forbidden(struct page *page) 1018 { 1019 return forbidden_pages_map ? 1020 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; 1021 } 1022 1023 static void swsusp_unset_page_forbidden(struct page *page) 1024 { 1025 if (forbidden_pages_map) 1026 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); 1027 } 1028 1029 /** 1030 * mark_nosave_pages - Mark pages that should not be saved. 1031 * @bm: Memory bitmap. 1032 * 1033 * Set the bits in @bm that correspond to the page frames the contents of which 1034 * should not be saved. 1035 */ 1036 static void mark_nosave_pages(struct memory_bitmap *bm) 1037 { 1038 struct nosave_region *region; 1039 1040 if (list_empty(&nosave_regions)) 1041 return; 1042 1043 list_for_each_entry(region, &nosave_regions, list) { 1044 unsigned long pfn; 1045 1046 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n", 1047 (unsigned long long) region->start_pfn << PAGE_SHIFT, 1048 ((unsigned long long) region->end_pfn << PAGE_SHIFT) 1049 - 1); 1050 1051 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 1052 if (pfn_valid(pfn)) { 1053 /* 1054 * It is safe to ignore the result of 1055 * mem_bm_set_bit_check() here, since we won't 1056 * touch the PFNs for which the error is 1057 * returned anyway. 1058 */ 1059 mem_bm_set_bit_check(bm, pfn); 1060 } 1061 } 1062 } 1063 1064 /** 1065 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information. 1066 * 1067 * Create bitmaps needed for marking page frames that should not be saved and 1068 * free page frames. The forbidden_pages_map and free_pages_map pointers are 1069 * only modified if everything goes well, because we don't want the bits to be 1070 * touched before both bitmaps are set up. 1071 */ 1072 int create_basic_memory_bitmaps(void) 1073 { 1074 struct memory_bitmap *bm1, *bm2; 1075 int error = 0; 1076 1077 if (forbidden_pages_map && free_pages_map) 1078 return 0; 1079 else 1080 BUG_ON(forbidden_pages_map || free_pages_map); 1081 1082 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1083 if (!bm1) 1084 return -ENOMEM; 1085 1086 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY); 1087 if (error) 1088 goto Free_first_object; 1089 1090 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1091 if (!bm2) 1092 goto Free_first_bitmap; 1093 1094 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY); 1095 if (error) 1096 goto Free_second_object; 1097 1098 forbidden_pages_map = bm1; 1099 free_pages_map = bm2; 1100 mark_nosave_pages(forbidden_pages_map); 1101 1102 pr_debug("Basic memory bitmaps created\n"); 1103 1104 return 0; 1105 1106 Free_second_object: 1107 kfree(bm2); 1108 Free_first_bitmap: 1109 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1110 Free_first_object: 1111 kfree(bm1); 1112 return -ENOMEM; 1113 } 1114 1115 /** 1116 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information. 1117 * 1118 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The 1119 * auxiliary pointers are necessary so that the bitmaps themselves are not 1120 * referred to while they are being freed. 1121 */ 1122 void free_basic_memory_bitmaps(void) 1123 { 1124 struct memory_bitmap *bm1, *bm2; 1125 1126 if (WARN_ON(!(forbidden_pages_map && free_pages_map))) 1127 return; 1128 1129 bm1 = forbidden_pages_map; 1130 bm2 = free_pages_map; 1131 forbidden_pages_map = NULL; 1132 free_pages_map = NULL; 1133 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1134 kfree(bm1); 1135 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 1136 kfree(bm2); 1137 1138 pr_debug("Basic memory bitmaps freed\n"); 1139 } 1140 1141 void clear_free_pages(void) 1142 { 1143 #ifdef CONFIG_PAGE_POISONING_ZERO 1144 struct memory_bitmap *bm = free_pages_map; 1145 unsigned long pfn; 1146 1147 if (WARN_ON(!(free_pages_map))) 1148 return; 1149 1150 memory_bm_position_reset(bm); 1151 pfn = memory_bm_next_pfn(bm); 1152 while (pfn != BM_END_OF_MAP) { 1153 if (pfn_valid(pfn)) 1154 clear_highpage(pfn_to_page(pfn)); 1155 1156 pfn = memory_bm_next_pfn(bm); 1157 } 1158 memory_bm_position_reset(bm); 1159 pr_info("free pages cleared after restore\n"); 1160 #endif /* PAGE_POISONING_ZERO */ 1161 } 1162 1163 /** 1164 * snapshot_additional_pages - Estimate the number of extra pages needed. 1165 * @zone: Memory zone to carry out the computation for. 1166 * 1167 * Estimate the number of additional pages needed for setting up a hibernation 1168 * image data structures for @zone (usually, the returned value is greater than 1169 * the exact number). 1170 */ 1171 unsigned int snapshot_additional_pages(struct zone *zone) 1172 { 1173 unsigned int rtree, nodes; 1174 1175 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1176 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), 1177 LINKED_PAGE_DATA_SIZE); 1178 while (nodes > 1) { 1179 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL); 1180 rtree += nodes; 1181 } 1182 1183 return 2 * rtree; 1184 } 1185 1186 #ifdef CONFIG_HIGHMEM 1187 /** 1188 * count_free_highmem_pages - Compute the total number of free highmem pages. 1189 * 1190 * The returned number is system-wide. 1191 */ 1192 static unsigned int count_free_highmem_pages(void) 1193 { 1194 struct zone *zone; 1195 unsigned int cnt = 0; 1196 1197 for_each_populated_zone(zone) 1198 if (is_highmem(zone)) 1199 cnt += zone_page_state(zone, NR_FREE_PAGES); 1200 1201 return cnt; 1202 } 1203 1204 /** 1205 * saveable_highmem_page - Check if a highmem page is saveable. 1206 * 1207 * Determine whether a highmem page should be included in a hibernation image. 1208 * 1209 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 1210 * and it isn't part of a free chunk of pages. 1211 */ 1212 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 1213 { 1214 struct page *page; 1215 1216 if (!pfn_valid(pfn)) 1217 return NULL; 1218 1219 page = pfn_to_online_page(pfn); 1220 if (!page || page_zone(page) != zone) 1221 return NULL; 1222 1223 BUG_ON(!PageHighMem(page)); 1224 1225 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1226 return NULL; 1227 1228 if (PageReserved(page) || PageOffline(page)) 1229 return NULL; 1230 1231 if (page_is_guard(page)) 1232 return NULL; 1233 1234 return page; 1235 } 1236 1237 /** 1238 * count_highmem_pages - Compute the total number of saveable highmem pages. 1239 */ 1240 static unsigned int count_highmem_pages(void) 1241 { 1242 struct zone *zone; 1243 unsigned int n = 0; 1244 1245 for_each_populated_zone(zone) { 1246 unsigned long pfn, max_zone_pfn; 1247 1248 if (!is_highmem(zone)) 1249 continue; 1250 1251 mark_free_pages(zone); 1252 max_zone_pfn = zone_end_pfn(zone); 1253 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1254 if (saveable_highmem_page(zone, pfn)) 1255 n++; 1256 } 1257 return n; 1258 } 1259 #else 1260 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) 1261 { 1262 return NULL; 1263 } 1264 #endif /* CONFIG_HIGHMEM */ 1265 1266 /** 1267 * saveable_page - Check if the given page is saveable. 1268 * 1269 * Determine whether a non-highmem page should be included in a hibernation 1270 * image. 1271 * 1272 * We should save the page if it isn't Nosave, and is not in the range 1273 * of pages statically defined as 'unsaveable', and it isn't part of 1274 * a free chunk of pages. 1275 */ 1276 static struct page *saveable_page(struct zone *zone, unsigned long pfn) 1277 { 1278 struct page *page; 1279 1280 if (!pfn_valid(pfn)) 1281 return NULL; 1282 1283 page = pfn_to_online_page(pfn); 1284 if (!page || page_zone(page) != zone) 1285 return NULL; 1286 1287 BUG_ON(PageHighMem(page)); 1288 1289 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1290 return NULL; 1291 1292 if (PageOffline(page)) 1293 return NULL; 1294 1295 if (PageReserved(page) 1296 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 1297 return NULL; 1298 1299 if (page_is_guard(page)) 1300 return NULL; 1301 1302 return page; 1303 } 1304 1305 /** 1306 * count_data_pages - Compute the total number of saveable non-highmem pages. 1307 */ 1308 static unsigned int count_data_pages(void) 1309 { 1310 struct zone *zone; 1311 unsigned long pfn, max_zone_pfn; 1312 unsigned int n = 0; 1313 1314 for_each_populated_zone(zone) { 1315 if (is_highmem(zone)) 1316 continue; 1317 1318 mark_free_pages(zone); 1319 max_zone_pfn = zone_end_pfn(zone); 1320 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1321 if (saveable_page(zone, pfn)) 1322 n++; 1323 } 1324 return n; 1325 } 1326 1327 /* 1328 * This is needed, because copy_page and memcpy are not usable for copying 1329 * task structs. 1330 */ 1331 static inline void do_copy_page(long *dst, long *src) 1332 { 1333 int n; 1334 1335 for (n = PAGE_SIZE / sizeof(long); n; n--) 1336 *dst++ = *src++; 1337 } 1338 1339 /** 1340 * safe_copy_page - Copy a page in a safe way. 1341 * 1342 * Check if the page we are going to copy is marked as present in the kernel 1343 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or 1344 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present() 1345 * always returns 'true'. 1346 */ 1347 static void safe_copy_page(void *dst, struct page *s_page) 1348 { 1349 if (kernel_page_present(s_page)) { 1350 do_copy_page(dst, page_address(s_page)); 1351 } else { 1352 kernel_map_pages(s_page, 1, 1); 1353 do_copy_page(dst, page_address(s_page)); 1354 kernel_map_pages(s_page, 1, 0); 1355 } 1356 } 1357 1358 #ifdef CONFIG_HIGHMEM 1359 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) 1360 { 1361 return is_highmem(zone) ? 1362 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); 1363 } 1364 1365 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1366 { 1367 struct page *s_page, *d_page; 1368 void *src, *dst; 1369 1370 s_page = pfn_to_page(src_pfn); 1371 d_page = pfn_to_page(dst_pfn); 1372 if (PageHighMem(s_page)) { 1373 src = kmap_atomic(s_page); 1374 dst = kmap_atomic(d_page); 1375 do_copy_page(dst, src); 1376 kunmap_atomic(dst); 1377 kunmap_atomic(src); 1378 } else { 1379 if (PageHighMem(d_page)) { 1380 /* 1381 * The page pointed to by src may contain some kernel 1382 * data modified by kmap_atomic() 1383 */ 1384 safe_copy_page(buffer, s_page); 1385 dst = kmap_atomic(d_page); 1386 copy_page(dst, buffer); 1387 kunmap_atomic(dst); 1388 } else { 1389 safe_copy_page(page_address(d_page), s_page); 1390 } 1391 } 1392 } 1393 #else 1394 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) 1395 1396 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1397 { 1398 safe_copy_page(page_address(pfn_to_page(dst_pfn)), 1399 pfn_to_page(src_pfn)); 1400 } 1401 #endif /* CONFIG_HIGHMEM */ 1402 1403 static void copy_data_pages(struct memory_bitmap *copy_bm, 1404 struct memory_bitmap *orig_bm) 1405 { 1406 struct zone *zone; 1407 unsigned long pfn; 1408 1409 for_each_populated_zone(zone) { 1410 unsigned long max_zone_pfn; 1411 1412 mark_free_pages(zone); 1413 max_zone_pfn = zone_end_pfn(zone); 1414 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1415 if (page_is_saveable(zone, pfn)) 1416 memory_bm_set_bit(orig_bm, pfn); 1417 } 1418 memory_bm_position_reset(orig_bm); 1419 memory_bm_position_reset(copy_bm); 1420 for(;;) { 1421 pfn = memory_bm_next_pfn(orig_bm); 1422 if (unlikely(pfn == BM_END_OF_MAP)) 1423 break; 1424 copy_data_page(memory_bm_next_pfn(copy_bm), pfn); 1425 } 1426 } 1427 1428 /* Total number of image pages */ 1429 static unsigned int nr_copy_pages; 1430 /* Number of pages needed for saving the original pfns of the image pages */ 1431 static unsigned int nr_meta_pages; 1432 /* 1433 * Numbers of normal and highmem page frames allocated for hibernation image 1434 * before suspending devices. 1435 */ 1436 static unsigned int alloc_normal, alloc_highmem; 1437 /* 1438 * Memory bitmap used for marking saveable pages (during hibernation) or 1439 * hibernation image pages (during restore) 1440 */ 1441 static struct memory_bitmap orig_bm; 1442 /* 1443 * Memory bitmap used during hibernation for marking allocated page frames that 1444 * will contain copies of saveable pages. During restore it is initially used 1445 * for marking hibernation image pages, but then the set bits from it are 1446 * duplicated in @orig_bm and it is released. On highmem systems it is next 1447 * used for marking "safe" highmem pages, but it has to be reinitialized for 1448 * this purpose. 1449 */ 1450 static struct memory_bitmap copy_bm; 1451 1452 /** 1453 * swsusp_free - Free pages allocated for hibernation image. 1454 * 1455 * Image pages are alocated before snapshot creation, so they need to be 1456 * released after resume. 1457 */ 1458 void swsusp_free(void) 1459 { 1460 unsigned long fb_pfn, fr_pfn; 1461 1462 if (!forbidden_pages_map || !free_pages_map) 1463 goto out; 1464 1465 memory_bm_position_reset(forbidden_pages_map); 1466 memory_bm_position_reset(free_pages_map); 1467 1468 loop: 1469 fr_pfn = memory_bm_next_pfn(free_pages_map); 1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1471 1472 /* 1473 * Find the next bit set in both bitmaps. This is guaranteed to 1474 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. 1475 */ 1476 do { 1477 if (fb_pfn < fr_pfn) 1478 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1479 if (fr_pfn < fb_pfn) 1480 fr_pfn = memory_bm_next_pfn(free_pages_map); 1481 } while (fb_pfn != fr_pfn); 1482 1483 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { 1484 struct page *page = pfn_to_page(fr_pfn); 1485 1486 memory_bm_clear_current(forbidden_pages_map); 1487 memory_bm_clear_current(free_pages_map); 1488 hibernate_restore_unprotect_page(page_address(page)); 1489 __free_page(page); 1490 goto loop; 1491 } 1492 1493 out: 1494 nr_copy_pages = 0; 1495 nr_meta_pages = 0; 1496 restore_pblist = NULL; 1497 buffer = NULL; 1498 alloc_normal = 0; 1499 alloc_highmem = 0; 1500 hibernate_restore_protection_end(); 1501 } 1502 1503 /* Helper functions used for the shrinking of memory. */ 1504 1505 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1506 1507 /** 1508 * preallocate_image_pages - Allocate a number of pages for hibernation image. 1509 * @nr_pages: Number of page frames to allocate. 1510 * @mask: GFP flags to use for the allocation. 1511 * 1512 * Return value: Number of page frames actually allocated 1513 */ 1514 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) 1515 { 1516 unsigned long nr_alloc = 0; 1517 1518 while (nr_pages > 0) { 1519 struct page *page; 1520 1521 page = alloc_image_page(mask); 1522 if (!page) 1523 break; 1524 memory_bm_set_bit(©_bm, page_to_pfn(page)); 1525 if (PageHighMem(page)) 1526 alloc_highmem++; 1527 else 1528 alloc_normal++; 1529 nr_pages--; 1530 nr_alloc++; 1531 } 1532 1533 return nr_alloc; 1534 } 1535 1536 static unsigned long preallocate_image_memory(unsigned long nr_pages, 1537 unsigned long avail_normal) 1538 { 1539 unsigned long alloc; 1540 1541 if (avail_normal <= alloc_normal) 1542 return 0; 1543 1544 alloc = avail_normal - alloc_normal; 1545 if (nr_pages < alloc) 1546 alloc = nr_pages; 1547 1548 return preallocate_image_pages(alloc, GFP_IMAGE); 1549 } 1550 1551 #ifdef CONFIG_HIGHMEM 1552 static unsigned long preallocate_image_highmem(unsigned long nr_pages) 1553 { 1554 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); 1555 } 1556 1557 /** 1558 * __fraction - Compute (an approximation of) x * (multiplier / base). 1559 */ 1560 static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1561 { 1562 x *= multiplier; 1563 do_div(x, base); 1564 return (unsigned long)x; 1565 } 1566 1567 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1568 unsigned long highmem, 1569 unsigned long total) 1570 { 1571 unsigned long alloc = __fraction(nr_pages, highmem, total); 1572 1573 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM); 1574 } 1575 #else /* CONFIG_HIGHMEM */ 1576 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) 1577 { 1578 return 0; 1579 } 1580 1581 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1582 unsigned long highmem, 1583 unsigned long total) 1584 { 1585 return 0; 1586 } 1587 #endif /* CONFIG_HIGHMEM */ 1588 1589 /** 1590 * free_unnecessary_pages - Release preallocated pages not needed for the image. 1591 */ 1592 static unsigned long free_unnecessary_pages(void) 1593 { 1594 unsigned long save, to_free_normal, to_free_highmem, free; 1595 1596 save = count_data_pages(); 1597 if (alloc_normal >= save) { 1598 to_free_normal = alloc_normal - save; 1599 save = 0; 1600 } else { 1601 to_free_normal = 0; 1602 save -= alloc_normal; 1603 } 1604 save += count_highmem_pages(); 1605 if (alloc_highmem >= save) { 1606 to_free_highmem = alloc_highmem - save; 1607 } else { 1608 to_free_highmem = 0; 1609 save -= alloc_highmem; 1610 if (to_free_normal > save) 1611 to_free_normal -= save; 1612 else 1613 to_free_normal = 0; 1614 } 1615 free = to_free_normal + to_free_highmem; 1616 1617 memory_bm_position_reset(©_bm); 1618 1619 while (to_free_normal > 0 || to_free_highmem > 0) { 1620 unsigned long pfn = memory_bm_next_pfn(©_bm); 1621 struct page *page = pfn_to_page(pfn); 1622 1623 if (PageHighMem(page)) { 1624 if (!to_free_highmem) 1625 continue; 1626 to_free_highmem--; 1627 alloc_highmem--; 1628 } else { 1629 if (!to_free_normal) 1630 continue; 1631 to_free_normal--; 1632 alloc_normal--; 1633 } 1634 memory_bm_clear_bit(©_bm, pfn); 1635 swsusp_unset_page_forbidden(page); 1636 swsusp_unset_page_free(page); 1637 __free_page(page); 1638 } 1639 1640 return free; 1641 } 1642 1643 /** 1644 * minimum_image_size - Estimate the minimum acceptable size of an image. 1645 * @saveable: Number of saveable pages in the system. 1646 * 1647 * We want to avoid attempting to free too much memory too hard, so estimate the 1648 * minimum acceptable size of a hibernation image to use as the lower limit for 1649 * preallocating memory. 1650 * 1651 * We assume that the minimum image size should be proportional to 1652 * 1653 * [number of saveable pages] - [number of pages that can be freed in theory] 1654 * 1655 * where the second term is the sum of (1) reclaimable slab pages, (2) active 1656 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages. 1657 */ 1658 static unsigned long minimum_image_size(unsigned long saveable) 1659 { 1660 unsigned long size; 1661 1662 size = global_node_page_state(NR_SLAB_RECLAIMABLE) 1663 + global_node_page_state(NR_ACTIVE_ANON) 1664 + global_node_page_state(NR_INACTIVE_ANON) 1665 + global_node_page_state(NR_ACTIVE_FILE) 1666 + global_node_page_state(NR_INACTIVE_FILE); 1667 1668 return saveable <= size ? 0 : saveable - size; 1669 } 1670 1671 /** 1672 * hibernate_preallocate_memory - Preallocate memory for hibernation image. 1673 * 1674 * To create a hibernation image it is necessary to make a copy of every page 1675 * frame in use. We also need a number of page frames to be free during 1676 * hibernation for allocations made while saving the image and for device 1677 * drivers, in case they need to allocate memory from their hibernation 1678 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough 1679 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through 1680 * /sys/power/reserved_size, respectively). To make this happen, we compute the 1681 * total number of available page frames and allocate at least 1682 * 1683 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 1684 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) 1685 * 1686 * of them, which corresponds to the maximum size of a hibernation image. 1687 * 1688 * If image_size is set below the number following from the above formula, 1689 * the preallocation of memory is continued until the total number of saveable 1690 * pages in the system is below the requested image size or the minimum 1691 * acceptable image size returned by minimum_image_size(), whichever is greater. 1692 */ 1693 int hibernate_preallocate_memory(void) 1694 { 1695 struct zone *zone; 1696 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1697 unsigned long alloc, save_highmem, pages_highmem, avail_normal; 1698 ktime_t start, stop; 1699 int error; 1700 1701 pr_info("Preallocating image memory... "); 1702 start = ktime_get(); 1703 1704 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); 1705 if (error) 1706 goto err_out; 1707 1708 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY); 1709 if (error) 1710 goto err_out; 1711 1712 alloc_normal = 0; 1713 alloc_highmem = 0; 1714 1715 /* Count the number of saveable data pages. */ 1716 save_highmem = count_highmem_pages(); 1717 saveable = count_data_pages(); 1718 1719 /* 1720 * Compute the total number of page frames we can use (count) and the 1721 * number of pages needed for image metadata (size). 1722 */ 1723 count = saveable; 1724 saveable += save_highmem; 1725 highmem = save_highmem; 1726 size = 0; 1727 for_each_populated_zone(zone) { 1728 size += snapshot_additional_pages(zone); 1729 if (is_highmem(zone)) 1730 highmem += zone_page_state(zone, NR_FREE_PAGES); 1731 else 1732 count += zone_page_state(zone, NR_FREE_PAGES); 1733 } 1734 avail_normal = count; 1735 count += highmem; 1736 count -= totalreserve_pages; 1737 1738 /* Add number of pages required for page keys (s390 only). */ 1739 size += page_key_additional_pages(saveable); 1740 1741 /* Compute the maximum number of saveable pages to leave in memory. */ 1742 max_size = (count - (size + PAGES_FOR_IO)) / 2 1743 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); 1744 /* Compute the desired number of image pages specified by image_size. */ 1745 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1746 if (size > max_size) 1747 size = max_size; 1748 /* 1749 * If the desired number of image pages is at least as large as the 1750 * current number of saveable pages in memory, allocate page frames for 1751 * the image and we're done. 1752 */ 1753 if (size >= saveable) { 1754 pages = preallocate_image_highmem(save_highmem); 1755 pages += preallocate_image_memory(saveable - pages, avail_normal); 1756 goto out; 1757 } 1758 1759 /* Estimate the minimum size of the image. */ 1760 pages = minimum_image_size(saveable); 1761 /* 1762 * To avoid excessive pressure on the normal zone, leave room in it to 1763 * accommodate an image of the minimum size (unless it's already too 1764 * small, in which case don't preallocate pages from it at all). 1765 */ 1766 if (avail_normal > pages) 1767 avail_normal -= pages; 1768 else 1769 avail_normal = 0; 1770 if (size < pages) 1771 size = min_t(unsigned long, pages, max_size); 1772 1773 /* 1774 * Let the memory management subsystem know that we're going to need a 1775 * large number of page frames to allocate and make it free some memory. 1776 * NOTE: If this is not done, performance will be hurt badly in some 1777 * test cases. 1778 */ 1779 shrink_all_memory(saveable - size); 1780 1781 /* 1782 * The number of saveable pages in memory was too high, so apply some 1783 * pressure to decrease it. First, make room for the largest possible 1784 * image and fail if that doesn't work. Next, try to decrease the size 1785 * of the image as much as indicated by 'size' using allocations from 1786 * highmem and non-highmem zones separately. 1787 */ 1788 pages_highmem = preallocate_image_highmem(highmem / 2); 1789 alloc = count - max_size; 1790 if (alloc > pages_highmem) 1791 alloc -= pages_highmem; 1792 else 1793 alloc = 0; 1794 pages = preallocate_image_memory(alloc, avail_normal); 1795 if (pages < alloc) { 1796 /* We have exhausted non-highmem pages, try highmem. */ 1797 alloc -= pages; 1798 pages += pages_highmem; 1799 pages_highmem = preallocate_image_highmem(alloc); 1800 if (pages_highmem < alloc) 1801 goto err_out; 1802 pages += pages_highmem; 1803 /* 1804 * size is the desired number of saveable pages to leave in 1805 * memory, so try to preallocate (all memory - size) pages. 1806 */ 1807 alloc = (count - pages) - size; 1808 pages += preallocate_image_highmem(alloc); 1809 } else { 1810 /* 1811 * There are approximately max_size saveable pages at this point 1812 * and we want to reduce this number down to size. 1813 */ 1814 alloc = max_size - size; 1815 size = preallocate_highmem_fraction(alloc, highmem, count); 1816 pages_highmem += size; 1817 alloc -= size; 1818 size = preallocate_image_memory(alloc, avail_normal); 1819 pages_highmem += preallocate_image_highmem(alloc - size); 1820 pages += pages_highmem + size; 1821 } 1822 1823 /* 1824 * We only need as many page frames for the image as there are saveable 1825 * pages in memory, but we have allocated more. Release the excessive 1826 * ones now. 1827 */ 1828 pages -= free_unnecessary_pages(); 1829 1830 out: 1831 stop = ktime_get(); 1832 pr_cont("done (allocated %lu pages)\n", pages); 1833 swsusp_show_speed(start, stop, pages, "Allocated"); 1834 1835 return 0; 1836 1837 err_out: 1838 pr_cont("\n"); 1839 swsusp_free(); 1840 return -ENOMEM; 1841 } 1842 1843 #ifdef CONFIG_HIGHMEM 1844 /** 1845 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem. 1846 * 1847 * Compute the number of non-highmem pages that will be necessary for creating 1848 * copies of highmem pages. 1849 */ 1850 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1851 { 1852 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1853 1854 if (free_highmem >= nr_highmem) 1855 nr_highmem = 0; 1856 else 1857 nr_highmem -= free_highmem; 1858 1859 return nr_highmem; 1860 } 1861 #else 1862 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; } 1863 #endif /* CONFIG_HIGHMEM */ 1864 1865 /** 1866 * enough_free_mem - Check if there is enough free memory for the image. 1867 */ 1868 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1869 { 1870 struct zone *zone; 1871 unsigned int free = alloc_normal; 1872 1873 for_each_populated_zone(zone) 1874 if (!is_highmem(zone)) 1875 free += zone_page_state(zone, NR_FREE_PAGES); 1876 1877 nr_pages += count_pages_for_highmem(nr_highmem); 1878 pr_debug("Normal pages needed: %u + %u, available pages: %u\n", 1879 nr_pages, PAGES_FOR_IO, free); 1880 1881 return free > nr_pages + PAGES_FOR_IO; 1882 } 1883 1884 #ifdef CONFIG_HIGHMEM 1885 /** 1886 * get_highmem_buffer - Allocate a buffer for highmem pages. 1887 * 1888 * If there are some highmem pages in the hibernation image, we may need a 1889 * buffer to copy them and/or load their data. 1890 */ 1891 static inline int get_highmem_buffer(int safe_needed) 1892 { 1893 buffer = get_image_page(GFP_ATOMIC, safe_needed); 1894 return buffer ? 0 : -ENOMEM; 1895 } 1896 1897 /** 1898 * alloc_highmem_image_pages - Allocate some highmem pages for the image. 1899 * 1900 * Try to allocate as many pages as needed, but if the number of free highmem 1901 * pages is less than that, allocate them all. 1902 */ 1903 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1904 unsigned int nr_highmem) 1905 { 1906 unsigned int to_alloc = count_free_highmem_pages(); 1907 1908 if (to_alloc > nr_highmem) 1909 to_alloc = nr_highmem; 1910 1911 nr_highmem -= to_alloc; 1912 while (to_alloc-- > 0) { 1913 struct page *page; 1914 1915 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM); 1916 memory_bm_set_bit(bm, page_to_pfn(page)); 1917 } 1918 return nr_highmem; 1919 } 1920 #else 1921 static inline int get_highmem_buffer(int safe_needed) { return 0; } 1922 1923 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1924 unsigned int n) { return 0; } 1925 #endif /* CONFIG_HIGHMEM */ 1926 1927 /** 1928 * swsusp_alloc - Allocate memory for hibernation image. 1929 * 1930 * We first try to allocate as many highmem pages as there are 1931 * saveable highmem pages in the system. If that fails, we allocate 1932 * non-highmem pages for the copies of the remaining highmem ones. 1933 * 1934 * In this approach it is likely that the copies of highmem pages will 1935 * also be located in the high memory, because of the way in which 1936 * copy_data_pages() works. 1937 */ 1938 static int swsusp_alloc(struct memory_bitmap *copy_bm, 1939 unsigned int nr_pages, unsigned int nr_highmem) 1940 { 1941 if (nr_highmem > 0) { 1942 if (get_highmem_buffer(PG_ANY)) 1943 goto err_out; 1944 if (nr_highmem > alloc_highmem) { 1945 nr_highmem -= alloc_highmem; 1946 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); 1947 } 1948 } 1949 if (nr_pages > alloc_normal) { 1950 nr_pages -= alloc_normal; 1951 while (nr_pages-- > 0) { 1952 struct page *page; 1953 1954 page = alloc_image_page(GFP_ATOMIC); 1955 if (!page) 1956 goto err_out; 1957 memory_bm_set_bit(copy_bm, page_to_pfn(page)); 1958 } 1959 } 1960 1961 return 0; 1962 1963 err_out: 1964 swsusp_free(); 1965 return -ENOMEM; 1966 } 1967 1968 asmlinkage __visible int swsusp_save(void) 1969 { 1970 unsigned int nr_pages, nr_highmem; 1971 1972 pr_info("Creating hibernation image:\n"); 1973 1974 drain_local_pages(NULL); 1975 nr_pages = count_data_pages(); 1976 nr_highmem = count_highmem_pages(); 1977 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem); 1978 1979 if (!enough_free_mem(nr_pages, nr_highmem)) { 1980 pr_err("Not enough free memory\n"); 1981 return -ENOMEM; 1982 } 1983 1984 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) { 1985 pr_err("Memory allocation failed\n"); 1986 return -ENOMEM; 1987 } 1988 1989 /* 1990 * During allocating of suspend pagedir, new cold pages may appear. 1991 * Kill them. 1992 */ 1993 drain_local_pages(NULL); 1994 copy_data_pages(©_bm, &orig_bm); 1995 1996 /* 1997 * End of critical section. From now on, we can write to memory, 1998 * but we should not touch disk. This specially means we must _not_ 1999 * touch swap space! Except we must write out our image of course. 2000 */ 2001 2002 nr_pages += nr_highmem; 2003 nr_copy_pages = nr_pages; 2004 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 2005 2006 pr_info("Hibernation image created (%d pages copied)\n", nr_pages); 2007 2008 return 0; 2009 } 2010 2011 #ifndef CONFIG_ARCH_HIBERNATION_HEADER 2012 static int init_header_complete(struct swsusp_info *info) 2013 { 2014 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); 2015 info->version_code = LINUX_VERSION_CODE; 2016 return 0; 2017 } 2018 2019 static char *check_image_kernel(struct swsusp_info *info) 2020 { 2021 if (info->version_code != LINUX_VERSION_CODE) 2022 return "kernel version"; 2023 if (strcmp(info->uts.sysname,init_utsname()->sysname)) 2024 return "system type"; 2025 if (strcmp(info->uts.release,init_utsname()->release)) 2026 return "kernel release"; 2027 if (strcmp(info->uts.version,init_utsname()->version)) 2028 return "version"; 2029 if (strcmp(info->uts.machine,init_utsname()->machine)) 2030 return "machine"; 2031 return NULL; 2032 } 2033 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 2034 2035 unsigned long snapshot_get_image_size(void) 2036 { 2037 return nr_copy_pages + nr_meta_pages + 1; 2038 } 2039 2040 static int init_header(struct swsusp_info *info) 2041 { 2042 memset(info, 0, sizeof(struct swsusp_info)); 2043 info->num_physpages = get_num_physpages(); 2044 info->image_pages = nr_copy_pages; 2045 info->pages = snapshot_get_image_size(); 2046 info->size = info->pages; 2047 info->size <<= PAGE_SHIFT; 2048 return init_header_complete(info); 2049 } 2050 2051 /** 2052 * pack_pfns - Prepare PFNs for saving. 2053 * @bm: Memory bitmap. 2054 * @buf: Memory buffer to store the PFNs in. 2055 * 2056 * PFNs corresponding to set bits in @bm are stored in the area of memory 2057 * pointed to by @buf (1 page at a time). 2058 */ 2059 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 2060 { 2061 int j; 2062 2063 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2064 buf[j] = memory_bm_next_pfn(bm); 2065 if (unlikely(buf[j] == BM_END_OF_MAP)) 2066 break; 2067 /* Save page key for data page (s390 only). */ 2068 page_key_read(buf + j); 2069 } 2070 } 2071 2072 /** 2073 * snapshot_read_next - Get the address to read the next image page from. 2074 * @handle: Snapshot handle to be used for the reading. 2075 * 2076 * On the first call, @handle should point to a zeroed snapshot_handle 2077 * structure. The structure gets populated then and a pointer to it should be 2078 * passed to this function every next time. 2079 * 2080 * On success, the function returns a positive number. Then, the caller 2081 * is allowed to read up to the returned number of bytes from the memory 2082 * location computed by the data_of() macro. 2083 * 2084 * The function returns 0 to indicate the end of the data stream condition, 2085 * and negative numbers are returned on errors. If that happens, the structure 2086 * pointed to by @handle is not updated and should not be used any more. 2087 */ 2088 int snapshot_read_next(struct snapshot_handle *handle) 2089 { 2090 if (handle->cur > nr_meta_pages + nr_copy_pages) 2091 return 0; 2092 2093 if (!buffer) { 2094 /* This makes the buffer be freed by swsusp_free() */ 2095 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2096 if (!buffer) 2097 return -ENOMEM; 2098 } 2099 if (!handle->cur) { 2100 int error; 2101 2102 error = init_header((struct swsusp_info *)buffer); 2103 if (error) 2104 return error; 2105 handle->buffer = buffer; 2106 memory_bm_position_reset(&orig_bm); 2107 memory_bm_position_reset(©_bm); 2108 } else if (handle->cur <= nr_meta_pages) { 2109 clear_page(buffer); 2110 pack_pfns(buffer, &orig_bm); 2111 } else { 2112 struct page *page; 2113 2114 page = pfn_to_page(memory_bm_next_pfn(©_bm)); 2115 if (PageHighMem(page)) { 2116 /* 2117 * Highmem pages are copied to the buffer, 2118 * because we can't return with a kmapped 2119 * highmem page (we may not be called again). 2120 */ 2121 void *kaddr; 2122 2123 kaddr = kmap_atomic(page); 2124 copy_page(buffer, kaddr); 2125 kunmap_atomic(kaddr); 2126 handle->buffer = buffer; 2127 } else { 2128 handle->buffer = page_address(page); 2129 } 2130 } 2131 handle->cur++; 2132 return PAGE_SIZE; 2133 } 2134 2135 static void duplicate_memory_bitmap(struct memory_bitmap *dst, 2136 struct memory_bitmap *src) 2137 { 2138 unsigned long pfn; 2139 2140 memory_bm_position_reset(src); 2141 pfn = memory_bm_next_pfn(src); 2142 while (pfn != BM_END_OF_MAP) { 2143 memory_bm_set_bit(dst, pfn); 2144 pfn = memory_bm_next_pfn(src); 2145 } 2146 } 2147 2148 /** 2149 * mark_unsafe_pages - Mark pages that were used before hibernation. 2150 * 2151 * Mark the pages that cannot be used for storing the image during restoration, 2152 * because they conflict with the pages that had been used before hibernation. 2153 */ 2154 static void mark_unsafe_pages(struct memory_bitmap *bm) 2155 { 2156 unsigned long pfn; 2157 2158 /* Clear the "free"/"unsafe" bit for all PFNs */ 2159 memory_bm_position_reset(free_pages_map); 2160 pfn = memory_bm_next_pfn(free_pages_map); 2161 while (pfn != BM_END_OF_MAP) { 2162 memory_bm_clear_current(free_pages_map); 2163 pfn = memory_bm_next_pfn(free_pages_map); 2164 } 2165 2166 /* Mark pages that correspond to the "original" PFNs as "unsafe" */ 2167 duplicate_memory_bitmap(free_pages_map, bm); 2168 2169 allocated_unsafe_pages = 0; 2170 } 2171 2172 static int check_header(struct swsusp_info *info) 2173 { 2174 char *reason; 2175 2176 reason = check_image_kernel(info); 2177 if (!reason && info->num_physpages != get_num_physpages()) 2178 reason = "memory size"; 2179 if (reason) { 2180 pr_err("Image mismatch: %s\n", reason); 2181 return -EPERM; 2182 } 2183 return 0; 2184 } 2185 2186 /** 2187 * load header - Check the image header and copy the data from it. 2188 */ 2189 static int load_header(struct swsusp_info *info) 2190 { 2191 int error; 2192 2193 restore_pblist = NULL; 2194 error = check_header(info); 2195 if (!error) { 2196 nr_copy_pages = info->image_pages; 2197 nr_meta_pages = info->pages - info->image_pages - 1; 2198 } 2199 return error; 2200 } 2201 2202 /** 2203 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap. 2204 * @bm: Memory bitmap. 2205 * @buf: Area of memory containing the PFNs. 2206 * 2207 * For each element of the array pointed to by @buf (1 page at a time), set the 2208 * corresponding bit in @bm. 2209 */ 2210 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 2211 { 2212 int j; 2213 2214 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2215 if (unlikely(buf[j] == BM_END_OF_MAP)) 2216 break; 2217 2218 /* Extract and buffer page key for data page (s390 only). */ 2219 page_key_memorize(buf + j); 2220 2221 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) 2222 memory_bm_set_bit(bm, buf[j]); 2223 else 2224 return -EFAULT; 2225 } 2226 2227 return 0; 2228 } 2229 2230 #ifdef CONFIG_HIGHMEM 2231 /* 2232 * struct highmem_pbe is used for creating the list of highmem pages that 2233 * should be restored atomically during the resume from disk, because the page 2234 * frames they have occupied before the suspend are in use. 2235 */ 2236 struct highmem_pbe { 2237 struct page *copy_page; /* data is here now */ 2238 struct page *orig_page; /* data was here before the suspend */ 2239 struct highmem_pbe *next; 2240 }; 2241 2242 /* 2243 * List of highmem PBEs needed for restoring the highmem pages that were 2244 * allocated before the suspend and included in the suspend image, but have 2245 * also been allocated by the "resume" kernel, so their contents cannot be 2246 * written directly to their "original" page frames. 2247 */ 2248 static struct highmem_pbe *highmem_pblist; 2249 2250 /** 2251 * count_highmem_image_pages - Compute the number of highmem pages in the image. 2252 * @bm: Memory bitmap. 2253 * 2254 * The bits in @bm that correspond to image pages are assumed to be set. 2255 */ 2256 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 2257 { 2258 unsigned long pfn; 2259 unsigned int cnt = 0; 2260 2261 memory_bm_position_reset(bm); 2262 pfn = memory_bm_next_pfn(bm); 2263 while (pfn != BM_END_OF_MAP) { 2264 if (PageHighMem(pfn_to_page(pfn))) 2265 cnt++; 2266 2267 pfn = memory_bm_next_pfn(bm); 2268 } 2269 return cnt; 2270 } 2271 2272 static unsigned int safe_highmem_pages; 2273 2274 static struct memory_bitmap *safe_highmem_bm; 2275 2276 /** 2277 * prepare_highmem_image - Allocate memory for loading highmem data from image. 2278 * @bm: Pointer to an uninitialized memory bitmap structure. 2279 * @nr_highmem_p: Pointer to the number of highmem image pages. 2280 * 2281 * Try to allocate as many highmem pages as there are highmem image pages 2282 * (@nr_highmem_p points to the variable containing the number of highmem image 2283 * pages). The pages that are "safe" (ie. will not be overwritten when the 2284 * hibernation image is restored entirely) have the corresponding bits set in 2285 * @bm (it must be unitialized). 2286 * 2287 * NOTE: This function should not be called if there are no highmem image pages. 2288 */ 2289 static int prepare_highmem_image(struct memory_bitmap *bm, 2290 unsigned int *nr_highmem_p) 2291 { 2292 unsigned int to_alloc; 2293 2294 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) 2295 return -ENOMEM; 2296 2297 if (get_highmem_buffer(PG_SAFE)) 2298 return -ENOMEM; 2299 2300 to_alloc = count_free_highmem_pages(); 2301 if (to_alloc > *nr_highmem_p) 2302 to_alloc = *nr_highmem_p; 2303 else 2304 *nr_highmem_p = to_alloc; 2305 2306 safe_highmem_pages = 0; 2307 while (to_alloc-- > 0) { 2308 struct page *page; 2309 2310 page = alloc_page(__GFP_HIGHMEM); 2311 if (!swsusp_page_is_free(page)) { 2312 /* The page is "safe", set its bit the bitmap */ 2313 memory_bm_set_bit(bm, page_to_pfn(page)); 2314 safe_highmem_pages++; 2315 } 2316 /* Mark the page as allocated */ 2317 swsusp_set_page_forbidden(page); 2318 swsusp_set_page_free(page); 2319 } 2320 memory_bm_position_reset(bm); 2321 safe_highmem_bm = bm; 2322 return 0; 2323 } 2324 2325 static struct page *last_highmem_page; 2326 2327 /** 2328 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page. 2329 * 2330 * For a given highmem image page get a buffer that suspend_write_next() should 2331 * return to its caller to write to. 2332 * 2333 * If the page is to be saved to its "original" page frame or a copy of 2334 * the page is to be made in the highmem, @buffer is returned. Otherwise, 2335 * the copy of the page is to be made in normal memory, so the address of 2336 * the copy is returned. 2337 * 2338 * If @buffer is returned, the caller of suspend_write_next() will write 2339 * the page's contents to @buffer, so they will have to be copied to the 2340 * right location on the next call to suspend_write_next() and it is done 2341 * with the help of copy_last_highmem_page(). For this purpose, if 2342 * @buffer is returned, @last_highmem_page is set to the page to which 2343 * the data will have to be copied from @buffer. 2344 */ 2345 static void *get_highmem_page_buffer(struct page *page, 2346 struct chain_allocator *ca) 2347 { 2348 struct highmem_pbe *pbe; 2349 void *kaddr; 2350 2351 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 2352 /* 2353 * We have allocated the "original" page frame and we can 2354 * use it directly to store the loaded page. 2355 */ 2356 last_highmem_page = page; 2357 return buffer; 2358 } 2359 /* 2360 * The "original" page frame has not been allocated and we have to 2361 * use a "safe" page frame to store the loaded page. 2362 */ 2363 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 2364 if (!pbe) { 2365 swsusp_free(); 2366 return ERR_PTR(-ENOMEM); 2367 } 2368 pbe->orig_page = page; 2369 if (safe_highmem_pages > 0) { 2370 struct page *tmp; 2371 2372 /* Copy of the page will be stored in high memory */ 2373 kaddr = buffer; 2374 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); 2375 safe_highmem_pages--; 2376 last_highmem_page = tmp; 2377 pbe->copy_page = tmp; 2378 } else { 2379 /* Copy of the page will be stored in normal memory */ 2380 kaddr = safe_pages_list; 2381 safe_pages_list = safe_pages_list->next; 2382 pbe->copy_page = virt_to_page(kaddr); 2383 } 2384 pbe->next = highmem_pblist; 2385 highmem_pblist = pbe; 2386 return kaddr; 2387 } 2388 2389 /** 2390 * copy_last_highmem_page - Copy most the most recent highmem image page. 2391 * 2392 * Copy the contents of a highmem image from @buffer, where the caller of 2393 * snapshot_write_next() has stored them, to the right location represented by 2394 * @last_highmem_page . 2395 */ 2396 static void copy_last_highmem_page(void) 2397 { 2398 if (last_highmem_page) { 2399 void *dst; 2400 2401 dst = kmap_atomic(last_highmem_page); 2402 copy_page(dst, buffer); 2403 kunmap_atomic(dst); 2404 last_highmem_page = NULL; 2405 } 2406 } 2407 2408 static inline int last_highmem_page_copied(void) 2409 { 2410 return !last_highmem_page; 2411 } 2412 2413 static inline void free_highmem_data(void) 2414 { 2415 if (safe_highmem_bm) 2416 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); 2417 2418 if (buffer) 2419 free_image_page(buffer, PG_UNSAFE_CLEAR); 2420 } 2421 #else 2422 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2423 2424 static inline int prepare_highmem_image(struct memory_bitmap *bm, 2425 unsigned int *nr_highmem_p) { return 0; } 2426 2427 static inline void *get_highmem_page_buffer(struct page *page, 2428 struct chain_allocator *ca) 2429 { 2430 return ERR_PTR(-EINVAL); 2431 } 2432 2433 static inline void copy_last_highmem_page(void) {} 2434 static inline int last_highmem_page_copied(void) { return 1; } 2435 static inline void free_highmem_data(void) {} 2436 #endif /* CONFIG_HIGHMEM */ 2437 2438 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) 2439 2440 /** 2441 * prepare_image - Make room for loading hibernation image. 2442 * @new_bm: Unitialized memory bitmap structure. 2443 * @bm: Memory bitmap with unsafe pages marked. 2444 * 2445 * Use @bm to mark the pages that will be overwritten in the process of 2446 * restoring the system memory state from the suspend image ("unsafe" pages) 2447 * and allocate memory for the image. 2448 * 2449 * The idea is to allocate a new memory bitmap first and then allocate 2450 * as many pages as needed for image data, but without specifying what those 2451 * pages will be used for just yet. Instead, we mark them all as allocated and 2452 * create a lists of "safe" pages to be used later. On systems with high 2453 * memory a list of "safe" highmem pages is created too. 2454 */ 2455 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2456 { 2457 unsigned int nr_pages, nr_highmem; 2458 struct linked_page *lp; 2459 int error; 2460 2461 /* If there is no highmem, the buffer will not be necessary */ 2462 free_image_page(buffer, PG_UNSAFE_CLEAR); 2463 buffer = NULL; 2464 2465 nr_highmem = count_highmem_image_pages(bm); 2466 mark_unsafe_pages(bm); 2467 2468 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2469 if (error) 2470 goto Free; 2471 2472 duplicate_memory_bitmap(new_bm, bm); 2473 memory_bm_free(bm, PG_UNSAFE_KEEP); 2474 if (nr_highmem > 0) { 2475 error = prepare_highmem_image(bm, &nr_highmem); 2476 if (error) 2477 goto Free; 2478 } 2479 /* 2480 * Reserve some safe pages for potential later use. 2481 * 2482 * NOTE: This way we make sure there will be enough safe pages for the 2483 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2484 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2485 * 2486 * nr_copy_pages cannot be less than allocated_unsafe_pages too. 2487 */ 2488 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2489 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2490 while (nr_pages > 0) { 2491 lp = get_image_page(GFP_ATOMIC, PG_SAFE); 2492 if (!lp) { 2493 error = -ENOMEM; 2494 goto Free; 2495 } 2496 lp->next = safe_pages_list; 2497 safe_pages_list = lp; 2498 nr_pages--; 2499 } 2500 /* Preallocate memory for the image */ 2501 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2502 while (nr_pages > 0) { 2503 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2504 if (!lp) { 2505 error = -ENOMEM; 2506 goto Free; 2507 } 2508 if (!swsusp_page_is_free(virt_to_page(lp))) { 2509 /* The page is "safe", add it to the list */ 2510 lp->next = safe_pages_list; 2511 safe_pages_list = lp; 2512 } 2513 /* Mark the page as allocated */ 2514 swsusp_set_page_forbidden(virt_to_page(lp)); 2515 swsusp_set_page_free(virt_to_page(lp)); 2516 nr_pages--; 2517 } 2518 return 0; 2519 2520 Free: 2521 swsusp_free(); 2522 return error; 2523 } 2524 2525 /** 2526 * get_buffer - Get the address to store the next image data page. 2527 * 2528 * Get the address that snapshot_write_next() should return to its caller to 2529 * write to. 2530 */ 2531 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2532 { 2533 struct pbe *pbe; 2534 struct page *page; 2535 unsigned long pfn = memory_bm_next_pfn(bm); 2536 2537 if (pfn == BM_END_OF_MAP) 2538 return ERR_PTR(-EFAULT); 2539 2540 page = pfn_to_page(pfn); 2541 if (PageHighMem(page)) 2542 return get_highmem_page_buffer(page, ca); 2543 2544 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2545 /* 2546 * We have allocated the "original" page frame and we can 2547 * use it directly to store the loaded page. 2548 */ 2549 return page_address(page); 2550 2551 /* 2552 * The "original" page frame has not been allocated and we have to 2553 * use a "safe" page frame to store the loaded page. 2554 */ 2555 pbe = chain_alloc(ca, sizeof(struct pbe)); 2556 if (!pbe) { 2557 swsusp_free(); 2558 return ERR_PTR(-ENOMEM); 2559 } 2560 pbe->orig_address = page_address(page); 2561 pbe->address = safe_pages_list; 2562 safe_pages_list = safe_pages_list->next; 2563 pbe->next = restore_pblist; 2564 restore_pblist = pbe; 2565 return pbe->address; 2566 } 2567 2568 /** 2569 * snapshot_write_next - Get the address to store the next image page. 2570 * @handle: Snapshot handle structure to guide the writing. 2571 * 2572 * On the first call, @handle should point to a zeroed snapshot_handle 2573 * structure. The structure gets populated then and a pointer to it should be 2574 * passed to this function every next time. 2575 * 2576 * On success, the function returns a positive number. Then, the caller 2577 * is allowed to write up to the returned number of bytes to the memory 2578 * location computed by the data_of() macro. 2579 * 2580 * The function returns 0 to indicate the "end of file" condition. Negative 2581 * numbers are returned on errors, in which cases the structure pointed to by 2582 * @handle is not updated and should not be used any more. 2583 */ 2584 int snapshot_write_next(struct snapshot_handle *handle) 2585 { 2586 static struct chain_allocator ca; 2587 int error = 0; 2588 2589 /* Check if we have already loaded the entire image */ 2590 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) 2591 return 0; 2592 2593 handle->sync_read = 1; 2594 2595 if (!handle->cur) { 2596 if (!buffer) 2597 /* This makes the buffer be freed by swsusp_free() */ 2598 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2599 2600 if (!buffer) 2601 return -ENOMEM; 2602 2603 handle->buffer = buffer; 2604 } else if (handle->cur == 1) { 2605 error = load_header(buffer); 2606 if (error) 2607 return error; 2608 2609 safe_pages_list = NULL; 2610 2611 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); 2612 if (error) 2613 return error; 2614 2615 /* Allocate buffer for page keys. */ 2616 error = page_key_alloc(nr_copy_pages); 2617 if (error) 2618 return error; 2619 2620 hibernate_restore_protection_begin(); 2621 } else if (handle->cur <= nr_meta_pages + 1) { 2622 error = unpack_orig_pfns(buffer, ©_bm); 2623 if (error) 2624 return error; 2625 2626 if (handle->cur == nr_meta_pages + 1) { 2627 error = prepare_image(&orig_bm, ©_bm); 2628 if (error) 2629 return error; 2630 2631 chain_init(&ca, GFP_ATOMIC, PG_SAFE); 2632 memory_bm_position_reset(&orig_bm); 2633 restore_pblist = NULL; 2634 handle->buffer = get_buffer(&orig_bm, &ca); 2635 handle->sync_read = 0; 2636 if (IS_ERR(handle->buffer)) 2637 return PTR_ERR(handle->buffer); 2638 } 2639 } else { 2640 copy_last_highmem_page(); 2641 /* Restore page key for data page (s390 only). */ 2642 page_key_write(handle->buffer); 2643 hibernate_restore_protect_page(handle->buffer); 2644 handle->buffer = get_buffer(&orig_bm, &ca); 2645 if (IS_ERR(handle->buffer)) 2646 return PTR_ERR(handle->buffer); 2647 if (handle->buffer != buffer) 2648 handle->sync_read = 0; 2649 } 2650 handle->cur++; 2651 return PAGE_SIZE; 2652 } 2653 2654 /** 2655 * snapshot_write_finalize - Complete the loading of a hibernation image. 2656 * 2657 * Must be called after the last call to snapshot_write_next() in case the last 2658 * page in the image happens to be a highmem page and its contents should be 2659 * stored in highmem. Additionally, it recycles bitmap memory that's not 2660 * necessary any more. 2661 */ 2662 void snapshot_write_finalize(struct snapshot_handle *handle) 2663 { 2664 copy_last_highmem_page(); 2665 /* Restore page key for data page (s390 only). */ 2666 page_key_write(handle->buffer); 2667 page_key_free(); 2668 hibernate_restore_protect_page(handle->buffer); 2669 /* Do that only if we have loaded the image entirely */ 2670 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2671 memory_bm_recycle(&orig_bm); 2672 free_highmem_data(); 2673 } 2674 } 2675 2676 int snapshot_image_loaded(struct snapshot_handle *handle) 2677 { 2678 return !(!nr_copy_pages || !last_highmem_page_copied() || 2679 handle->cur <= nr_meta_pages + nr_copy_pages); 2680 } 2681 2682 #ifdef CONFIG_HIGHMEM 2683 /* Assumes that @buf is ready and points to a "safe" page */ 2684 static inline void swap_two_pages_data(struct page *p1, struct page *p2, 2685 void *buf) 2686 { 2687 void *kaddr1, *kaddr2; 2688 2689 kaddr1 = kmap_atomic(p1); 2690 kaddr2 = kmap_atomic(p2); 2691 copy_page(buf, kaddr1); 2692 copy_page(kaddr1, kaddr2); 2693 copy_page(kaddr2, buf); 2694 kunmap_atomic(kaddr2); 2695 kunmap_atomic(kaddr1); 2696 } 2697 2698 /** 2699 * restore_highmem - Put highmem image pages into their original locations. 2700 * 2701 * For each highmem page that was in use before hibernation and is included in 2702 * the image, and also has been allocated by the "restore" kernel, swap its 2703 * current contents with the previous (ie. "before hibernation") ones. 2704 * 2705 * If the restore eventually fails, we can call this function once again and 2706 * restore the highmem state as seen by the restore kernel. 2707 */ 2708 int restore_highmem(void) 2709 { 2710 struct highmem_pbe *pbe = highmem_pblist; 2711 void *buf; 2712 2713 if (!pbe) 2714 return 0; 2715 2716 buf = get_image_page(GFP_ATOMIC, PG_SAFE); 2717 if (!buf) 2718 return -ENOMEM; 2719 2720 while (pbe) { 2721 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); 2722 pbe = pbe->next; 2723 } 2724 free_image_page(buf, PG_UNSAFE_CLEAR); 2725 return 0; 2726 } 2727 #endif /* CONFIG_HIGHMEM */ 2728