1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/export.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/nsproxy.h> 23 #include <linux/pagevec.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/topology.h> 27 #include <linux/cpu.h> 28 #include <linux/cpuset.h> 29 #include <linux/writeback.h> 30 #include <linux/mempolicy.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/memcontrol.h> 34 #include <linux/syscalls.h> 35 #include <linux/hugetlb.h> 36 #include <linux/hugetlb_cgroup.h> 37 #include <linux/gfp.h> 38 #include <linux/balloon_compaction.h> 39 40 #include <asm/tlbflush.h> 41 42 #include "internal.h" 43 44 /* 45 * migrate_prep() needs to be called before we start compiling a list of pages 46 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 47 * undesirable, use migrate_prep_local() 48 */ 49 int migrate_prep(void) 50 { 51 /* 52 * Clear the LRU lists so pages can be isolated. 53 * Note that pages may be moved off the LRU after we have 54 * drained them. Those pages will fail to migrate like other 55 * pages that may be busy. 56 */ 57 lru_add_drain_all(); 58 59 return 0; 60 } 61 62 /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 63 int migrate_prep_local(void) 64 { 65 lru_add_drain(); 66 67 return 0; 68 } 69 70 /* 71 * Add isolated pages on the list back to the LRU under page lock 72 * to avoid leaking evictable pages back onto unevictable list. 73 */ 74 void putback_lru_pages(struct list_head *l) 75 { 76 struct page *page; 77 struct page *page2; 78 79 list_for_each_entry_safe(page, page2, l, lru) { 80 list_del(&page->lru); 81 dec_zone_page_state(page, NR_ISOLATED_ANON + 82 page_is_file_cache(page)); 83 putback_lru_page(page); 84 } 85 } 86 87 /* 88 * Put previously isolated pages back onto the appropriate lists 89 * from where they were once taken off for compaction/migration. 90 * 91 * This function shall be used instead of putback_lru_pages(), 92 * whenever the isolated pageset has been built by isolate_migratepages_range() 93 */ 94 void putback_movable_pages(struct list_head *l) 95 { 96 struct page *page; 97 struct page *page2; 98 99 list_for_each_entry_safe(page, page2, l, lru) { 100 list_del(&page->lru); 101 dec_zone_page_state(page, NR_ISOLATED_ANON + 102 page_is_file_cache(page)); 103 if (unlikely(balloon_page_movable(page))) 104 balloon_page_putback(page); 105 else 106 putback_lru_page(page); 107 } 108 } 109 110 /* 111 * Restore a potential migration pte to a working pte entry 112 */ 113 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 114 unsigned long addr, void *old) 115 { 116 struct mm_struct *mm = vma->vm_mm; 117 swp_entry_t entry; 118 pmd_t *pmd; 119 pte_t *ptep, pte; 120 spinlock_t *ptl; 121 122 if (unlikely(PageHuge(new))) { 123 ptep = huge_pte_offset(mm, addr); 124 if (!ptep) 125 goto out; 126 ptl = &mm->page_table_lock; 127 } else { 128 pmd = mm_find_pmd(mm, addr); 129 if (!pmd) 130 goto out; 131 if (pmd_trans_huge(*pmd)) 132 goto out; 133 134 ptep = pte_offset_map(pmd, addr); 135 136 /* 137 * Peek to check is_swap_pte() before taking ptlock? No, we 138 * can race mremap's move_ptes(), which skips anon_vma lock. 139 */ 140 141 ptl = pte_lockptr(mm, pmd); 142 } 143 144 spin_lock(ptl); 145 pte = *ptep; 146 if (!is_swap_pte(pte)) 147 goto unlock; 148 149 entry = pte_to_swp_entry(pte); 150 151 if (!is_migration_entry(entry) || 152 migration_entry_to_page(entry) != old) 153 goto unlock; 154 155 get_page(new); 156 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 157 if (is_write_migration_entry(entry)) 158 pte = pte_mkwrite(pte); 159 #ifdef CONFIG_HUGETLB_PAGE 160 if (PageHuge(new)) 161 pte = pte_mkhuge(pte); 162 #endif 163 flush_cache_page(vma, addr, pte_pfn(pte)); 164 set_pte_at(mm, addr, ptep, pte); 165 166 if (PageHuge(new)) { 167 if (PageAnon(new)) 168 hugepage_add_anon_rmap(new, vma, addr); 169 else 170 page_dup_rmap(new); 171 } else if (PageAnon(new)) 172 page_add_anon_rmap(new, vma, addr); 173 else 174 page_add_file_rmap(new); 175 176 /* No need to invalidate - it was non-present before */ 177 update_mmu_cache(vma, addr, ptep); 178 unlock: 179 pte_unmap_unlock(ptep, ptl); 180 out: 181 return SWAP_AGAIN; 182 } 183 184 /* 185 * Get rid of all migration entries and replace them by 186 * references to the indicated page. 187 */ 188 static void remove_migration_ptes(struct page *old, struct page *new) 189 { 190 rmap_walk(new, remove_migration_pte, old); 191 } 192 193 /* 194 * Something used the pte of a page under migration. We need to 195 * get to the page and wait until migration is finished. 196 * When we return from this function the fault will be retried. 197 */ 198 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 199 unsigned long address) 200 { 201 pte_t *ptep, pte; 202 spinlock_t *ptl; 203 swp_entry_t entry; 204 struct page *page; 205 206 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 207 pte = *ptep; 208 if (!is_swap_pte(pte)) 209 goto out; 210 211 entry = pte_to_swp_entry(pte); 212 if (!is_migration_entry(entry)) 213 goto out; 214 215 page = migration_entry_to_page(entry); 216 217 /* 218 * Once radix-tree replacement of page migration started, page_count 219 * *must* be zero. And, we don't want to call wait_on_page_locked() 220 * against a page without get_page(). 221 * So, we use get_page_unless_zero(), here. Even failed, page fault 222 * will occur again. 223 */ 224 if (!get_page_unless_zero(page)) 225 goto out; 226 pte_unmap_unlock(ptep, ptl); 227 wait_on_page_locked(page); 228 put_page(page); 229 return; 230 out: 231 pte_unmap_unlock(ptep, ptl); 232 } 233 234 #ifdef CONFIG_BLOCK 235 /* Returns true if all buffers are successfully locked */ 236 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 237 enum migrate_mode mode) 238 { 239 struct buffer_head *bh = head; 240 241 /* Simple case, sync compaction */ 242 if (mode != MIGRATE_ASYNC) { 243 do { 244 get_bh(bh); 245 lock_buffer(bh); 246 bh = bh->b_this_page; 247 248 } while (bh != head); 249 250 return true; 251 } 252 253 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 254 do { 255 get_bh(bh); 256 if (!trylock_buffer(bh)) { 257 /* 258 * We failed to lock the buffer and cannot stall in 259 * async migration. Release the taken locks 260 */ 261 struct buffer_head *failed_bh = bh; 262 put_bh(failed_bh); 263 bh = head; 264 while (bh != failed_bh) { 265 unlock_buffer(bh); 266 put_bh(bh); 267 bh = bh->b_this_page; 268 } 269 return false; 270 } 271 272 bh = bh->b_this_page; 273 } while (bh != head); 274 return true; 275 } 276 #else 277 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 278 enum migrate_mode mode) 279 { 280 return true; 281 } 282 #endif /* CONFIG_BLOCK */ 283 284 /* 285 * Replace the page in the mapping. 286 * 287 * The number of remaining references must be: 288 * 1 for anonymous pages without a mapping 289 * 2 for pages with a mapping 290 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 291 */ 292 static int migrate_page_move_mapping(struct address_space *mapping, 293 struct page *newpage, struct page *page, 294 struct buffer_head *head, enum migrate_mode mode) 295 { 296 int expected_count; 297 void **pslot; 298 299 if (!mapping) { 300 /* Anonymous page without mapping */ 301 if (page_count(page) != 1) 302 return -EAGAIN; 303 return MIGRATEPAGE_SUCCESS; 304 } 305 306 spin_lock_irq(&mapping->tree_lock); 307 308 pslot = radix_tree_lookup_slot(&mapping->page_tree, 309 page_index(page)); 310 311 expected_count = 2 + page_has_private(page); 312 if (page_count(page) != expected_count || 313 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 314 spin_unlock_irq(&mapping->tree_lock); 315 return -EAGAIN; 316 } 317 318 if (!page_freeze_refs(page, expected_count)) { 319 spin_unlock_irq(&mapping->tree_lock); 320 return -EAGAIN; 321 } 322 323 /* 324 * In the async migration case of moving a page with buffers, lock the 325 * buffers using trylock before the mapping is moved. If the mapping 326 * was moved, we later failed to lock the buffers and could not move 327 * the mapping back due to an elevated page count, we would have to 328 * block waiting on other references to be dropped. 329 */ 330 if (mode == MIGRATE_ASYNC && head && 331 !buffer_migrate_lock_buffers(head, mode)) { 332 page_unfreeze_refs(page, expected_count); 333 spin_unlock_irq(&mapping->tree_lock); 334 return -EAGAIN; 335 } 336 337 /* 338 * Now we know that no one else is looking at the page. 339 */ 340 get_page(newpage); /* add cache reference */ 341 if (PageSwapCache(page)) { 342 SetPageSwapCache(newpage); 343 set_page_private(newpage, page_private(page)); 344 } 345 346 radix_tree_replace_slot(pslot, newpage); 347 348 /* 349 * Drop cache reference from old page by unfreezing 350 * to one less reference. 351 * We know this isn't the last reference. 352 */ 353 page_unfreeze_refs(page, expected_count - 1); 354 355 /* 356 * If moved to a different zone then also account 357 * the page for that zone. Other VM counters will be 358 * taken care of when we establish references to the 359 * new page and drop references to the old page. 360 * 361 * Note that anonymous pages are accounted for 362 * via NR_FILE_PAGES and NR_ANON_PAGES if they 363 * are mapped to swap space. 364 */ 365 __dec_zone_page_state(page, NR_FILE_PAGES); 366 __inc_zone_page_state(newpage, NR_FILE_PAGES); 367 if (!PageSwapCache(page) && PageSwapBacked(page)) { 368 __dec_zone_page_state(page, NR_SHMEM); 369 __inc_zone_page_state(newpage, NR_SHMEM); 370 } 371 spin_unlock_irq(&mapping->tree_lock); 372 373 return MIGRATEPAGE_SUCCESS; 374 } 375 376 /* 377 * The expected number of remaining references is the same as that 378 * of migrate_page_move_mapping(). 379 */ 380 int migrate_huge_page_move_mapping(struct address_space *mapping, 381 struct page *newpage, struct page *page) 382 { 383 int expected_count; 384 void **pslot; 385 386 if (!mapping) { 387 if (page_count(page) != 1) 388 return -EAGAIN; 389 return MIGRATEPAGE_SUCCESS; 390 } 391 392 spin_lock_irq(&mapping->tree_lock); 393 394 pslot = radix_tree_lookup_slot(&mapping->page_tree, 395 page_index(page)); 396 397 expected_count = 2 + page_has_private(page); 398 if (page_count(page) != expected_count || 399 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 400 spin_unlock_irq(&mapping->tree_lock); 401 return -EAGAIN; 402 } 403 404 if (!page_freeze_refs(page, expected_count)) { 405 spin_unlock_irq(&mapping->tree_lock); 406 return -EAGAIN; 407 } 408 409 get_page(newpage); 410 411 radix_tree_replace_slot(pslot, newpage); 412 413 page_unfreeze_refs(page, expected_count - 1); 414 415 spin_unlock_irq(&mapping->tree_lock); 416 return MIGRATEPAGE_SUCCESS; 417 } 418 419 /* 420 * Copy the page to its new location 421 */ 422 void migrate_page_copy(struct page *newpage, struct page *page) 423 { 424 if (PageHuge(page)) 425 copy_huge_page(newpage, page); 426 else 427 copy_highpage(newpage, page); 428 429 if (PageError(page)) 430 SetPageError(newpage); 431 if (PageReferenced(page)) 432 SetPageReferenced(newpage); 433 if (PageUptodate(page)) 434 SetPageUptodate(newpage); 435 if (TestClearPageActive(page)) { 436 VM_BUG_ON(PageUnevictable(page)); 437 SetPageActive(newpage); 438 } else if (TestClearPageUnevictable(page)) 439 SetPageUnevictable(newpage); 440 if (PageChecked(page)) 441 SetPageChecked(newpage); 442 if (PageMappedToDisk(page)) 443 SetPageMappedToDisk(newpage); 444 445 if (PageDirty(page)) { 446 clear_page_dirty_for_io(page); 447 /* 448 * Want to mark the page and the radix tree as dirty, and 449 * redo the accounting that clear_page_dirty_for_io undid, 450 * but we can't use set_page_dirty because that function 451 * is actually a signal that all of the page has become dirty. 452 * Whereas only part of our page may be dirty. 453 */ 454 if (PageSwapBacked(page)) 455 SetPageDirty(newpage); 456 else 457 __set_page_dirty_nobuffers(newpage); 458 } 459 460 mlock_migrate_page(newpage, page); 461 ksm_migrate_page(newpage, page); 462 463 ClearPageSwapCache(page); 464 ClearPagePrivate(page); 465 set_page_private(page, 0); 466 467 /* 468 * If any waiters have accumulated on the new page then 469 * wake them up. 470 */ 471 if (PageWriteback(newpage)) 472 end_page_writeback(newpage); 473 } 474 475 /************************************************************ 476 * Migration functions 477 ***********************************************************/ 478 479 /* Always fail migration. Used for mappings that are not movable */ 480 int fail_migrate_page(struct address_space *mapping, 481 struct page *newpage, struct page *page) 482 { 483 return -EIO; 484 } 485 EXPORT_SYMBOL(fail_migrate_page); 486 487 /* 488 * Common logic to directly migrate a single page suitable for 489 * pages that do not use PagePrivate/PagePrivate2. 490 * 491 * Pages are locked upon entry and exit. 492 */ 493 int migrate_page(struct address_space *mapping, 494 struct page *newpage, struct page *page, 495 enum migrate_mode mode) 496 { 497 int rc; 498 499 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 500 501 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 502 503 if (rc != MIGRATEPAGE_SUCCESS) 504 return rc; 505 506 migrate_page_copy(newpage, page); 507 return MIGRATEPAGE_SUCCESS; 508 } 509 EXPORT_SYMBOL(migrate_page); 510 511 #ifdef CONFIG_BLOCK 512 /* 513 * Migration function for pages with buffers. This function can only be used 514 * if the underlying filesystem guarantees that no other references to "page" 515 * exist. 516 */ 517 int buffer_migrate_page(struct address_space *mapping, 518 struct page *newpage, struct page *page, enum migrate_mode mode) 519 { 520 struct buffer_head *bh, *head; 521 int rc; 522 523 if (!page_has_buffers(page)) 524 return migrate_page(mapping, newpage, page, mode); 525 526 head = page_buffers(page); 527 528 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 529 530 if (rc != MIGRATEPAGE_SUCCESS) 531 return rc; 532 533 /* 534 * In the async case, migrate_page_move_mapping locked the buffers 535 * with an IRQ-safe spinlock held. In the sync case, the buffers 536 * need to be locked now 537 */ 538 if (mode != MIGRATE_ASYNC) 539 BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 540 541 ClearPagePrivate(page); 542 set_page_private(newpage, page_private(page)); 543 set_page_private(page, 0); 544 put_page(page); 545 get_page(newpage); 546 547 bh = head; 548 do { 549 set_bh_page(bh, newpage, bh_offset(bh)); 550 bh = bh->b_this_page; 551 552 } while (bh != head); 553 554 SetPagePrivate(newpage); 555 556 migrate_page_copy(newpage, page); 557 558 bh = head; 559 do { 560 unlock_buffer(bh); 561 put_bh(bh); 562 bh = bh->b_this_page; 563 564 } while (bh != head); 565 566 return MIGRATEPAGE_SUCCESS; 567 } 568 EXPORT_SYMBOL(buffer_migrate_page); 569 #endif 570 571 /* 572 * Writeback a page to clean the dirty state 573 */ 574 static int writeout(struct address_space *mapping, struct page *page) 575 { 576 struct writeback_control wbc = { 577 .sync_mode = WB_SYNC_NONE, 578 .nr_to_write = 1, 579 .range_start = 0, 580 .range_end = LLONG_MAX, 581 .for_reclaim = 1 582 }; 583 int rc; 584 585 if (!mapping->a_ops->writepage) 586 /* No write method for the address space */ 587 return -EINVAL; 588 589 if (!clear_page_dirty_for_io(page)) 590 /* Someone else already triggered a write */ 591 return -EAGAIN; 592 593 /* 594 * A dirty page may imply that the underlying filesystem has 595 * the page on some queue. So the page must be clean for 596 * migration. Writeout may mean we loose the lock and the 597 * page state is no longer what we checked for earlier. 598 * At this point we know that the migration attempt cannot 599 * be successful. 600 */ 601 remove_migration_ptes(page, page); 602 603 rc = mapping->a_ops->writepage(page, &wbc); 604 605 if (rc != AOP_WRITEPAGE_ACTIVATE) 606 /* unlocked. Relock */ 607 lock_page(page); 608 609 return (rc < 0) ? -EIO : -EAGAIN; 610 } 611 612 /* 613 * Default handling if a filesystem does not provide a migration function. 614 */ 615 static int fallback_migrate_page(struct address_space *mapping, 616 struct page *newpage, struct page *page, enum migrate_mode mode) 617 { 618 if (PageDirty(page)) { 619 /* Only writeback pages in full synchronous migration */ 620 if (mode != MIGRATE_SYNC) 621 return -EBUSY; 622 return writeout(mapping, page); 623 } 624 625 /* 626 * Buffers may be managed in a filesystem specific way. 627 * We must have no buffers or drop them. 628 */ 629 if (page_has_private(page) && 630 !try_to_release_page(page, GFP_KERNEL)) 631 return -EAGAIN; 632 633 return migrate_page(mapping, newpage, page, mode); 634 } 635 636 /* 637 * Move a page to a newly allocated page 638 * The page is locked and all ptes have been successfully removed. 639 * 640 * The new page will have replaced the old page if this function 641 * is successful. 642 * 643 * Return value: 644 * < 0 - error code 645 * MIGRATEPAGE_SUCCESS - success 646 */ 647 static int move_to_new_page(struct page *newpage, struct page *page, 648 int remap_swapcache, enum migrate_mode mode) 649 { 650 struct address_space *mapping; 651 int rc; 652 653 /* 654 * Block others from accessing the page when we get around to 655 * establishing additional references. We are the only one 656 * holding a reference to the new page at this point. 657 */ 658 if (!trylock_page(newpage)) 659 BUG(); 660 661 /* Prepare mapping for the new page.*/ 662 newpage->index = page->index; 663 newpage->mapping = page->mapping; 664 if (PageSwapBacked(page)) 665 SetPageSwapBacked(newpage); 666 667 mapping = page_mapping(page); 668 if (!mapping) 669 rc = migrate_page(mapping, newpage, page, mode); 670 else if (mapping->a_ops->migratepage) 671 /* 672 * Most pages have a mapping and most filesystems provide a 673 * migratepage callback. Anonymous pages are part of swap 674 * space which also has its own migratepage callback. This 675 * is the most common path for page migration. 676 */ 677 rc = mapping->a_ops->migratepage(mapping, 678 newpage, page, mode); 679 else 680 rc = fallback_migrate_page(mapping, newpage, page, mode); 681 682 if (rc != MIGRATEPAGE_SUCCESS) { 683 newpage->mapping = NULL; 684 } else { 685 if (remap_swapcache) 686 remove_migration_ptes(page, newpage); 687 page->mapping = NULL; 688 } 689 690 unlock_page(newpage); 691 692 return rc; 693 } 694 695 static int __unmap_and_move(struct page *page, struct page *newpage, 696 int force, bool offlining, enum migrate_mode mode) 697 { 698 int rc = -EAGAIN; 699 int remap_swapcache = 1; 700 struct mem_cgroup *mem; 701 struct anon_vma *anon_vma = NULL; 702 703 if (!trylock_page(page)) { 704 if (!force || mode == MIGRATE_ASYNC) 705 goto out; 706 707 /* 708 * It's not safe for direct compaction to call lock_page. 709 * For example, during page readahead pages are added locked 710 * to the LRU. Later, when the IO completes the pages are 711 * marked uptodate and unlocked. However, the queueing 712 * could be merging multiple pages for one bio (e.g. 713 * mpage_readpages). If an allocation happens for the 714 * second or third page, the process can end up locking 715 * the same page twice and deadlocking. Rather than 716 * trying to be clever about what pages can be locked, 717 * avoid the use of lock_page for direct compaction 718 * altogether. 719 */ 720 if (current->flags & PF_MEMALLOC) 721 goto out; 722 723 lock_page(page); 724 } 725 726 /* 727 * Only memory hotplug's offline_pages() caller has locked out KSM, 728 * and can safely migrate a KSM page. The other cases have skipped 729 * PageKsm along with PageReserved - but it is only now when we have 730 * the page lock that we can be certain it will not go KSM beneath us 731 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees 732 * its pagecount raised, but only here do we take the page lock which 733 * serializes that). 734 */ 735 if (PageKsm(page) && !offlining) { 736 rc = -EBUSY; 737 goto unlock; 738 } 739 740 /* charge against new page */ 741 mem_cgroup_prepare_migration(page, newpage, &mem); 742 743 if (PageWriteback(page)) { 744 /* 745 * Only in the case of a full syncronous migration is it 746 * necessary to wait for PageWriteback. In the async case, 747 * the retry loop is too short and in the sync-light case, 748 * the overhead of stalling is too much 749 */ 750 if (mode != MIGRATE_SYNC) { 751 rc = -EBUSY; 752 goto uncharge; 753 } 754 if (!force) 755 goto uncharge; 756 wait_on_page_writeback(page); 757 } 758 /* 759 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 760 * we cannot notice that anon_vma is freed while we migrates a page. 761 * This get_anon_vma() delays freeing anon_vma pointer until the end 762 * of migration. File cache pages are no problem because of page_lock() 763 * File Caches may use write_page() or lock_page() in migration, then, 764 * just care Anon page here. 765 */ 766 if (PageAnon(page)) { 767 /* 768 * Only page_lock_anon_vma() understands the subtleties of 769 * getting a hold on an anon_vma from outside one of its mms. 770 */ 771 anon_vma = page_get_anon_vma(page); 772 if (anon_vma) { 773 /* 774 * Anon page 775 */ 776 } else if (PageSwapCache(page)) { 777 /* 778 * We cannot be sure that the anon_vma of an unmapped 779 * swapcache page is safe to use because we don't 780 * know in advance if the VMA that this page belonged 781 * to still exists. If the VMA and others sharing the 782 * data have been freed, then the anon_vma could 783 * already be invalid. 784 * 785 * To avoid this possibility, swapcache pages get 786 * migrated but are not remapped when migration 787 * completes 788 */ 789 remap_swapcache = 0; 790 } else { 791 goto uncharge; 792 } 793 } 794 795 if (unlikely(balloon_page_movable(page))) { 796 /* 797 * A ballooned page does not need any special attention from 798 * physical to virtual reverse mapping procedures. 799 * Skip any attempt to unmap PTEs or to remap swap cache, 800 * in order to avoid burning cycles at rmap level, and perform 801 * the page migration right away (proteced by page lock). 802 */ 803 rc = balloon_page_migrate(newpage, page, mode); 804 goto uncharge; 805 } 806 807 /* 808 * Corner case handling: 809 * 1. When a new swap-cache page is read into, it is added to the LRU 810 * and treated as swapcache but it has no rmap yet. 811 * Calling try_to_unmap() against a page->mapping==NULL page will 812 * trigger a BUG. So handle it here. 813 * 2. An orphaned page (see truncate_complete_page) might have 814 * fs-private metadata. The page can be picked up due to memory 815 * offlining. Everywhere else except page reclaim, the page is 816 * invisible to the vm, so the page can not be migrated. So try to 817 * free the metadata, so the page can be freed. 818 */ 819 if (!page->mapping) { 820 VM_BUG_ON(PageAnon(page)); 821 if (page_has_private(page)) { 822 try_to_free_buffers(page); 823 goto uncharge; 824 } 825 goto skip_unmap; 826 } 827 828 /* Establish migration ptes or remove ptes */ 829 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 830 831 skip_unmap: 832 if (!page_mapped(page)) 833 rc = move_to_new_page(newpage, page, remap_swapcache, mode); 834 835 if (rc && remap_swapcache) 836 remove_migration_ptes(page, page); 837 838 /* Drop an anon_vma reference if we took one */ 839 if (anon_vma) 840 put_anon_vma(anon_vma); 841 842 uncharge: 843 mem_cgroup_end_migration(mem, page, newpage, 844 (rc == MIGRATEPAGE_SUCCESS || 845 rc == MIGRATEPAGE_BALLOON_SUCCESS)); 846 unlock: 847 unlock_page(page); 848 out: 849 return rc; 850 } 851 852 /* 853 * Obtain the lock on page, remove all ptes and migrate the page 854 * to the newly allocated page in newpage. 855 */ 856 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 857 struct page *page, int force, bool offlining, 858 enum migrate_mode mode) 859 { 860 int rc = 0; 861 int *result = NULL; 862 struct page *newpage = get_new_page(page, private, &result); 863 864 if (!newpage) 865 return -ENOMEM; 866 867 if (page_count(page) == 1) { 868 /* page was freed from under us. So we are done. */ 869 goto out; 870 } 871 872 if (unlikely(PageTransHuge(page))) 873 if (unlikely(split_huge_page(page))) 874 goto out; 875 876 rc = __unmap_and_move(page, newpage, force, offlining, mode); 877 878 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { 879 /* 880 * A ballooned page has been migrated already. 881 * Now, it's the time to wrap-up counters, 882 * handle the page back to Buddy and return. 883 */ 884 dec_zone_page_state(page, NR_ISOLATED_ANON + 885 page_is_file_cache(page)); 886 balloon_page_free(page); 887 return MIGRATEPAGE_SUCCESS; 888 } 889 out: 890 if (rc != -EAGAIN) { 891 /* 892 * A page that has been migrated has all references 893 * removed and will be freed. A page that has not been 894 * migrated will have kepts its references and be 895 * restored. 896 */ 897 list_del(&page->lru); 898 dec_zone_page_state(page, NR_ISOLATED_ANON + 899 page_is_file_cache(page)); 900 putback_lru_page(page); 901 } 902 /* 903 * Move the new page to the LRU. If migration was not successful 904 * then this will free the page. 905 */ 906 putback_lru_page(newpage); 907 if (result) { 908 if (rc) 909 *result = rc; 910 else 911 *result = page_to_nid(newpage); 912 } 913 return rc; 914 } 915 916 /* 917 * Counterpart of unmap_and_move_page() for hugepage migration. 918 * 919 * This function doesn't wait the completion of hugepage I/O 920 * because there is no race between I/O and migration for hugepage. 921 * Note that currently hugepage I/O occurs only in direct I/O 922 * where no lock is held and PG_writeback is irrelevant, 923 * and writeback status of all subpages are counted in the reference 924 * count of the head page (i.e. if all subpages of a 2MB hugepage are 925 * under direct I/O, the reference of the head page is 512 and a bit more.) 926 * This means that when we try to migrate hugepage whose subpages are 927 * doing direct I/O, some references remain after try_to_unmap() and 928 * hugepage migration fails without data corruption. 929 * 930 * There is also no race when direct I/O is issued on the page under migration, 931 * because then pte is replaced with migration swap entry and direct I/O code 932 * will wait in the page fault for migration to complete. 933 */ 934 static int unmap_and_move_huge_page(new_page_t get_new_page, 935 unsigned long private, struct page *hpage, 936 int force, bool offlining, 937 enum migrate_mode mode) 938 { 939 int rc = 0; 940 int *result = NULL; 941 struct page *new_hpage = get_new_page(hpage, private, &result); 942 struct anon_vma *anon_vma = NULL; 943 944 if (!new_hpage) 945 return -ENOMEM; 946 947 rc = -EAGAIN; 948 949 if (!trylock_page(hpage)) { 950 if (!force || mode != MIGRATE_SYNC) 951 goto out; 952 lock_page(hpage); 953 } 954 955 if (PageAnon(hpage)) 956 anon_vma = page_get_anon_vma(hpage); 957 958 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 959 960 if (!page_mapped(hpage)) 961 rc = move_to_new_page(new_hpage, hpage, 1, mode); 962 963 if (rc) 964 remove_migration_ptes(hpage, hpage); 965 966 if (anon_vma) 967 put_anon_vma(anon_vma); 968 969 if (!rc) 970 hugetlb_cgroup_migrate(hpage, new_hpage); 971 972 unlock_page(hpage); 973 out: 974 put_page(new_hpage); 975 if (result) { 976 if (rc) 977 *result = rc; 978 else 979 *result = page_to_nid(new_hpage); 980 } 981 return rc; 982 } 983 984 /* 985 * migrate_pages 986 * 987 * The function takes one list of pages to migrate and a function 988 * that determines from the page to be migrated and the private data 989 * the target of the move and allocates the page. 990 * 991 * The function returns after 10 attempts or if no pages 992 * are movable anymore because to has become empty 993 * or no retryable pages exist anymore. 994 * Caller should call putback_lru_pages to return pages to the LRU 995 * or free list only if ret != 0. 996 * 997 * Return: Number of pages not migrated or error code. 998 */ 999 int migrate_pages(struct list_head *from, 1000 new_page_t get_new_page, unsigned long private, bool offlining, 1001 enum migrate_mode mode) 1002 { 1003 int retry = 1; 1004 int nr_failed = 0; 1005 int pass = 0; 1006 struct page *page; 1007 struct page *page2; 1008 int swapwrite = current->flags & PF_SWAPWRITE; 1009 int rc; 1010 1011 if (!swapwrite) 1012 current->flags |= PF_SWAPWRITE; 1013 1014 for(pass = 0; pass < 10 && retry; pass++) { 1015 retry = 0; 1016 1017 list_for_each_entry_safe(page, page2, from, lru) { 1018 cond_resched(); 1019 1020 rc = unmap_and_move(get_new_page, private, 1021 page, pass > 2, offlining, 1022 mode); 1023 1024 switch(rc) { 1025 case -ENOMEM: 1026 goto out; 1027 case -EAGAIN: 1028 retry++; 1029 break; 1030 case MIGRATEPAGE_SUCCESS: 1031 break; 1032 default: 1033 /* Permanent failure */ 1034 nr_failed++; 1035 break; 1036 } 1037 } 1038 } 1039 rc = nr_failed + retry; 1040 out: 1041 if (!swapwrite) 1042 current->flags &= ~PF_SWAPWRITE; 1043 1044 return rc; 1045 } 1046 1047 int migrate_huge_page(struct page *hpage, new_page_t get_new_page, 1048 unsigned long private, bool offlining, 1049 enum migrate_mode mode) 1050 { 1051 int pass, rc; 1052 1053 for (pass = 0; pass < 10; pass++) { 1054 rc = unmap_and_move_huge_page(get_new_page, 1055 private, hpage, pass > 2, offlining, 1056 mode); 1057 switch (rc) { 1058 case -ENOMEM: 1059 goto out; 1060 case -EAGAIN: 1061 /* try again */ 1062 cond_resched(); 1063 break; 1064 case MIGRATEPAGE_SUCCESS: 1065 goto out; 1066 default: 1067 rc = -EIO; 1068 goto out; 1069 } 1070 } 1071 out: 1072 return rc; 1073 } 1074 1075 #ifdef CONFIG_NUMA 1076 /* 1077 * Move a list of individual pages 1078 */ 1079 struct page_to_node { 1080 unsigned long addr; 1081 struct page *page; 1082 int node; 1083 int status; 1084 }; 1085 1086 static struct page *new_page_node(struct page *p, unsigned long private, 1087 int **result) 1088 { 1089 struct page_to_node *pm = (struct page_to_node *)private; 1090 1091 while (pm->node != MAX_NUMNODES && pm->page != p) 1092 pm++; 1093 1094 if (pm->node == MAX_NUMNODES) 1095 return NULL; 1096 1097 *result = &pm->status; 1098 1099 return alloc_pages_exact_node(pm->node, 1100 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1101 } 1102 1103 /* 1104 * Move a set of pages as indicated in the pm array. The addr 1105 * field must be set to the virtual address of the page to be moved 1106 * and the node number must contain a valid target node. 1107 * The pm array ends with node = MAX_NUMNODES. 1108 */ 1109 static int do_move_page_to_node_array(struct mm_struct *mm, 1110 struct page_to_node *pm, 1111 int migrate_all) 1112 { 1113 int err; 1114 struct page_to_node *pp; 1115 LIST_HEAD(pagelist); 1116 1117 down_read(&mm->mmap_sem); 1118 1119 /* 1120 * Build a list of pages to migrate 1121 */ 1122 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1123 struct vm_area_struct *vma; 1124 struct page *page; 1125 1126 err = -EFAULT; 1127 vma = find_vma(mm, pp->addr); 1128 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1129 goto set_status; 1130 1131 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); 1132 1133 err = PTR_ERR(page); 1134 if (IS_ERR(page)) 1135 goto set_status; 1136 1137 err = -ENOENT; 1138 if (!page) 1139 goto set_status; 1140 1141 /* Use PageReserved to check for zero page */ 1142 if (PageReserved(page) || PageKsm(page)) 1143 goto put_and_set; 1144 1145 pp->page = page; 1146 err = page_to_nid(page); 1147 1148 if (err == pp->node) 1149 /* 1150 * Node already in the right place 1151 */ 1152 goto put_and_set; 1153 1154 err = -EACCES; 1155 if (page_mapcount(page) > 1 && 1156 !migrate_all) 1157 goto put_and_set; 1158 1159 err = isolate_lru_page(page); 1160 if (!err) { 1161 list_add_tail(&page->lru, &pagelist); 1162 inc_zone_page_state(page, NR_ISOLATED_ANON + 1163 page_is_file_cache(page)); 1164 } 1165 put_and_set: 1166 /* 1167 * Either remove the duplicate refcount from 1168 * isolate_lru_page() or drop the page ref if it was 1169 * not isolated. 1170 */ 1171 put_page(page); 1172 set_status: 1173 pp->status = err; 1174 } 1175 1176 err = 0; 1177 if (!list_empty(&pagelist)) { 1178 err = migrate_pages(&pagelist, new_page_node, 1179 (unsigned long)pm, 0, MIGRATE_SYNC); 1180 if (err) 1181 putback_lru_pages(&pagelist); 1182 } 1183 1184 up_read(&mm->mmap_sem); 1185 return err; 1186 } 1187 1188 /* 1189 * Migrate an array of page address onto an array of nodes and fill 1190 * the corresponding array of status. 1191 */ 1192 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1193 unsigned long nr_pages, 1194 const void __user * __user *pages, 1195 const int __user *nodes, 1196 int __user *status, int flags) 1197 { 1198 struct page_to_node *pm; 1199 unsigned long chunk_nr_pages; 1200 unsigned long chunk_start; 1201 int err; 1202 1203 err = -ENOMEM; 1204 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1205 if (!pm) 1206 goto out; 1207 1208 migrate_prep(); 1209 1210 /* 1211 * Store a chunk of page_to_node array in a page, 1212 * but keep the last one as a marker 1213 */ 1214 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1215 1216 for (chunk_start = 0; 1217 chunk_start < nr_pages; 1218 chunk_start += chunk_nr_pages) { 1219 int j; 1220 1221 if (chunk_start + chunk_nr_pages > nr_pages) 1222 chunk_nr_pages = nr_pages - chunk_start; 1223 1224 /* fill the chunk pm with addrs and nodes from user-space */ 1225 for (j = 0; j < chunk_nr_pages; j++) { 1226 const void __user *p; 1227 int node; 1228 1229 err = -EFAULT; 1230 if (get_user(p, pages + j + chunk_start)) 1231 goto out_pm; 1232 pm[j].addr = (unsigned long) p; 1233 1234 if (get_user(node, nodes + j + chunk_start)) 1235 goto out_pm; 1236 1237 err = -ENODEV; 1238 if (node < 0 || node >= MAX_NUMNODES) 1239 goto out_pm; 1240 1241 if (!node_state(node, N_HIGH_MEMORY)) 1242 goto out_pm; 1243 1244 err = -EACCES; 1245 if (!node_isset(node, task_nodes)) 1246 goto out_pm; 1247 1248 pm[j].node = node; 1249 } 1250 1251 /* End marker for this chunk */ 1252 pm[chunk_nr_pages].node = MAX_NUMNODES; 1253 1254 /* Migrate this chunk */ 1255 err = do_move_page_to_node_array(mm, pm, 1256 flags & MPOL_MF_MOVE_ALL); 1257 if (err < 0) 1258 goto out_pm; 1259 1260 /* Return status information */ 1261 for (j = 0; j < chunk_nr_pages; j++) 1262 if (put_user(pm[j].status, status + j + chunk_start)) { 1263 err = -EFAULT; 1264 goto out_pm; 1265 } 1266 } 1267 err = 0; 1268 1269 out_pm: 1270 free_page((unsigned long)pm); 1271 out: 1272 return err; 1273 } 1274 1275 /* 1276 * Determine the nodes of an array of pages and store it in an array of status. 1277 */ 1278 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1279 const void __user **pages, int *status) 1280 { 1281 unsigned long i; 1282 1283 down_read(&mm->mmap_sem); 1284 1285 for (i = 0; i < nr_pages; i++) { 1286 unsigned long addr = (unsigned long)(*pages); 1287 struct vm_area_struct *vma; 1288 struct page *page; 1289 int err = -EFAULT; 1290 1291 vma = find_vma(mm, addr); 1292 if (!vma || addr < vma->vm_start) 1293 goto set_status; 1294 1295 page = follow_page(vma, addr, 0); 1296 1297 err = PTR_ERR(page); 1298 if (IS_ERR(page)) 1299 goto set_status; 1300 1301 err = -ENOENT; 1302 /* Use PageReserved to check for zero page */ 1303 if (!page || PageReserved(page) || PageKsm(page)) 1304 goto set_status; 1305 1306 err = page_to_nid(page); 1307 set_status: 1308 *status = err; 1309 1310 pages++; 1311 status++; 1312 } 1313 1314 up_read(&mm->mmap_sem); 1315 } 1316 1317 /* 1318 * Determine the nodes of a user array of pages and store it in 1319 * a user array of status. 1320 */ 1321 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1322 const void __user * __user *pages, 1323 int __user *status) 1324 { 1325 #define DO_PAGES_STAT_CHUNK_NR 16 1326 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1327 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1328 1329 while (nr_pages) { 1330 unsigned long chunk_nr; 1331 1332 chunk_nr = nr_pages; 1333 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1334 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1335 1336 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1337 break; 1338 1339 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1340 1341 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1342 break; 1343 1344 pages += chunk_nr; 1345 status += chunk_nr; 1346 nr_pages -= chunk_nr; 1347 } 1348 return nr_pages ? -EFAULT : 0; 1349 } 1350 1351 /* 1352 * Move a list of pages in the address space of the currently executing 1353 * process. 1354 */ 1355 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1356 const void __user * __user *, pages, 1357 const int __user *, nodes, 1358 int __user *, status, int, flags) 1359 { 1360 const struct cred *cred = current_cred(), *tcred; 1361 struct task_struct *task; 1362 struct mm_struct *mm; 1363 int err; 1364 nodemask_t task_nodes; 1365 1366 /* Check flags */ 1367 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1368 return -EINVAL; 1369 1370 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1371 return -EPERM; 1372 1373 /* Find the mm_struct */ 1374 rcu_read_lock(); 1375 task = pid ? find_task_by_vpid(pid) : current; 1376 if (!task) { 1377 rcu_read_unlock(); 1378 return -ESRCH; 1379 } 1380 get_task_struct(task); 1381 1382 /* 1383 * Check if this process has the right to modify the specified 1384 * process. The right exists if the process has administrative 1385 * capabilities, superuser privileges or the same 1386 * userid as the target process. 1387 */ 1388 tcred = __task_cred(task); 1389 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1390 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 1391 !capable(CAP_SYS_NICE)) { 1392 rcu_read_unlock(); 1393 err = -EPERM; 1394 goto out; 1395 } 1396 rcu_read_unlock(); 1397 1398 err = security_task_movememory(task); 1399 if (err) 1400 goto out; 1401 1402 task_nodes = cpuset_mems_allowed(task); 1403 mm = get_task_mm(task); 1404 put_task_struct(task); 1405 1406 if (!mm) 1407 return -EINVAL; 1408 1409 if (nodes) 1410 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1411 nodes, status, flags); 1412 else 1413 err = do_pages_stat(mm, nr_pages, pages, status); 1414 1415 mmput(mm); 1416 return err; 1417 1418 out: 1419 put_task_struct(task); 1420 return err; 1421 } 1422 1423 /* 1424 * Call migration functions in the vma_ops that may prepare 1425 * memory in a vm for migration. migration functions may perform 1426 * the migration for vmas that do not have an underlying page struct. 1427 */ 1428 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1429 const nodemask_t *from, unsigned long flags) 1430 { 1431 struct vm_area_struct *vma; 1432 int err = 0; 1433 1434 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1435 if (vma->vm_ops && vma->vm_ops->migrate) { 1436 err = vma->vm_ops->migrate(vma, to, from, flags); 1437 if (err) 1438 break; 1439 } 1440 } 1441 return err; 1442 } 1443 #endif 1444