1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pagewalk.h> 42 #include <linux/pfn_t.h> 43 #include <linux/memremap.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/balloon_compaction.h> 46 #include <linux/mmu_notifier.h> 47 #include <linux/page_idle.h> 48 #include <linux/page_owner.h> 49 #include <linux/sched/mm.h> 50 #include <linux/ptrace.h> 51 #include <linux/oom.h> 52 53 #include <asm/tlbflush.h> 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/migrate.h> 57 58 #include "internal.h" 59 60 /* 61 * migrate_prep() needs to be called before we start compiling a list of pages 62 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 63 * undesirable, use migrate_prep_local() 64 */ 65 int migrate_prep(void) 66 { 67 /* 68 * Clear the LRU lists so pages can be isolated. 69 * Note that pages may be moved off the LRU after we have 70 * drained them. Those pages will fail to migrate like other 71 * pages that may be busy. 72 */ 73 lru_add_drain_all(); 74 75 return 0; 76 } 77 78 /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 79 int migrate_prep_local(void) 80 { 81 lru_add_drain(); 82 83 return 0; 84 } 85 86 int isolate_movable_page(struct page *page, isolate_mode_t mode) 87 { 88 struct address_space *mapping; 89 90 /* 91 * Avoid burning cycles with pages that are yet under __free_pages(), 92 * or just got freed under us. 93 * 94 * In case we 'win' a race for a movable page being freed under us and 95 * raise its refcount preventing __free_pages() from doing its job 96 * the put_page() at the end of this block will take care of 97 * release this page, thus avoiding a nasty leakage. 98 */ 99 if (unlikely(!get_page_unless_zero(page))) 100 goto out; 101 102 /* 103 * Check PageMovable before holding a PG_lock because page's owner 104 * assumes anybody doesn't touch PG_lock of newly allocated page 105 * so unconditionally grabbing the lock ruins page's owner side. 106 */ 107 if (unlikely(!__PageMovable(page))) 108 goto out_putpage; 109 /* 110 * As movable pages are not isolated from LRU lists, concurrent 111 * compaction threads can race against page migration functions 112 * as well as race against the releasing a page. 113 * 114 * In order to avoid having an already isolated movable page 115 * being (wrongly) re-isolated while it is under migration, 116 * or to avoid attempting to isolate pages being released, 117 * lets be sure we have the page lock 118 * before proceeding with the movable page isolation steps. 119 */ 120 if (unlikely(!trylock_page(page))) 121 goto out_putpage; 122 123 if (!PageMovable(page) || PageIsolated(page)) 124 goto out_no_isolated; 125 126 mapping = page_mapping(page); 127 VM_BUG_ON_PAGE(!mapping, page); 128 129 if (!mapping->a_ops->isolate_page(page, mode)) 130 goto out_no_isolated; 131 132 /* Driver shouldn't use PG_isolated bit of page->flags */ 133 WARN_ON_ONCE(PageIsolated(page)); 134 __SetPageIsolated(page); 135 unlock_page(page); 136 137 return 0; 138 139 out_no_isolated: 140 unlock_page(page); 141 out_putpage: 142 put_page(page); 143 out: 144 return -EBUSY; 145 } 146 147 /* It should be called on page which is PG_movable */ 148 void putback_movable_page(struct page *page) 149 { 150 struct address_space *mapping; 151 152 VM_BUG_ON_PAGE(!PageLocked(page), page); 153 VM_BUG_ON_PAGE(!PageMovable(page), page); 154 VM_BUG_ON_PAGE(!PageIsolated(page), page); 155 156 mapping = page_mapping(page); 157 mapping->a_ops->putback_page(page); 158 __ClearPageIsolated(page); 159 } 160 161 /* 162 * Put previously isolated pages back onto the appropriate lists 163 * from where they were once taken off for compaction/migration. 164 * 165 * This function shall be used whenever the isolated pageset has been 166 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 167 * and isolate_huge_page(). 168 */ 169 void putback_movable_pages(struct list_head *l) 170 { 171 struct page *page; 172 struct page *page2; 173 174 list_for_each_entry_safe(page, page2, l, lru) { 175 if (unlikely(PageHuge(page))) { 176 putback_active_hugepage(page); 177 continue; 178 } 179 list_del(&page->lru); 180 /* 181 * We isolated non-lru movable page so here we can use 182 * __PageMovable because LRU page's mapping cannot have 183 * PAGE_MAPPING_MOVABLE. 184 */ 185 if (unlikely(__PageMovable(page))) { 186 VM_BUG_ON_PAGE(!PageIsolated(page), page); 187 lock_page(page); 188 if (PageMovable(page)) 189 putback_movable_page(page); 190 else 191 __ClearPageIsolated(page); 192 unlock_page(page); 193 put_page(page); 194 } else { 195 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 196 page_is_file_cache(page), -hpage_nr_pages(page)); 197 putback_lru_page(page); 198 } 199 } 200 } 201 202 /* 203 * Restore a potential migration pte to a working pte entry 204 */ 205 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 206 unsigned long addr, void *old) 207 { 208 struct page_vma_mapped_walk pvmw = { 209 .page = old, 210 .vma = vma, 211 .address = addr, 212 .flags = PVMW_SYNC | PVMW_MIGRATION, 213 }; 214 struct page *new; 215 pte_t pte; 216 swp_entry_t entry; 217 218 VM_BUG_ON_PAGE(PageTail(page), page); 219 while (page_vma_mapped_walk(&pvmw)) { 220 if (PageKsm(page)) 221 new = page; 222 else 223 new = page - pvmw.page->index + 224 linear_page_index(vma, pvmw.address); 225 226 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 227 /* PMD-mapped THP migration entry */ 228 if (!pvmw.pte) { 229 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 230 remove_migration_pmd(&pvmw, new); 231 continue; 232 } 233 #endif 234 235 get_page(new); 236 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 237 if (pte_swp_soft_dirty(*pvmw.pte)) 238 pte = pte_mksoft_dirty(pte); 239 240 /* 241 * Recheck VMA as permissions can change since migration started 242 */ 243 entry = pte_to_swp_entry(*pvmw.pte); 244 if (is_write_migration_entry(entry)) 245 pte = maybe_mkwrite(pte, vma); 246 247 if (unlikely(is_zone_device_page(new))) { 248 if (is_device_private_page(new)) { 249 entry = make_device_private_entry(new, pte_write(pte)); 250 pte = swp_entry_to_pte(entry); 251 } 252 } 253 254 #ifdef CONFIG_HUGETLB_PAGE 255 if (PageHuge(new)) { 256 pte = pte_mkhuge(pte); 257 pte = arch_make_huge_pte(pte, vma, new, 0); 258 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 259 if (PageAnon(new)) 260 hugepage_add_anon_rmap(new, vma, pvmw.address); 261 else 262 page_dup_rmap(new, true); 263 } else 264 #endif 265 { 266 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 267 268 if (PageAnon(new)) 269 page_add_anon_rmap(new, vma, pvmw.address, false); 270 else 271 page_add_file_rmap(new, false); 272 } 273 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 274 mlock_vma_page(new); 275 276 if (PageTransHuge(page) && PageMlocked(page)) 277 clear_page_mlock(page); 278 279 /* No need to invalidate - it was non-present before */ 280 update_mmu_cache(vma, pvmw.address, pvmw.pte); 281 } 282 283 return true; 284 } 285 286 /* 287 * Get rid of all migration entries and replace them by 288 * references to the indicated page. 289 */ 290 void remove_migration_ptes(struct page *old, struct page *new, bool locked) 291 { 292 struct rmap_walk_control rwc = { 293 .rmap_one = remove_migration_pte, 294 .arg = old, 295 }; 296 297 if (locked) 298 rmap_walk_locked(new, &rwc); 299 else 300 rmap_walk(new, &rwc); 301 } 302 303 /* 304 * Something used the pte of a page under migration. We need to 305 * get to the page and wait until migration is finished. 306 * When we return from this function the fault will be retried. 307 */ 308 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 309 spinlock_t *ptl) 310 { 311 pte_t pte; 312 swp_entry_t entry; 313 struct page *page; 314 315 spin_lock(ptl); 316 pte = *ptep; 317 if (!is_swap_pte(pte)) 318 goto out; 319 320 entry = pte_to_swp_entry(pte); 321 if (!is_migration_entry(entry)) 322 goto out; 323 324 page = migration_entry_to_page(entry); 325 326 /* 327 * Once page cache replacement of page migration started, page_count 328 * is zero; but we must not call put_and_wait_on_page_locked() without 329 * a ref. Use get_page_unless_zero(), and just fault again if it fails. 330 */ 331 if (!get_page_unless_zero(page)) 332 goto out; 333 pte_unmap_unlock(ptep, ptl); 334 put_and_wait_on_page_locked(page); 335 return; 336 out: 337 pte_unmap_unlock(ptep, ptl); 338 } 339 340 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 341 unsigned long address) 342 { 343 spinlock_t *ptl = pte_lockptr(mm, pmd); 344 pte_t *ptep = pte_offset_map(pmd, address); 345 __migration_entry_wait(mm, ptep, ptl); 346 } 347 348 void migration_entry_wait_huge(struct vm_area_struct *vma, 349 struct mm_struct *mm, pte_t *pte) 350 { 351 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 352 __migration_entry_wait(mm, pte, ptl); 353 } 354 355 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 356 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 357 { 358 spinlock_t *ptl; 359 struct page *page; 360 361 ptl = pmd_lock(mm, pmd); 362 if (!is_pmd_migration_entry(*pmd)) 363 goto unlock; 364 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); 365 if (!get_page_unless_zero(page)) 366 goto unlock; 367 spin_unlock(ptl); 368 put_and_wait_on_page_locked(page); 369 return; 370 unlock: 371 spin_unlock(ptl); 372 } 373 #endif 374 375 static int expected_page_refs(struct address_space *mapping, struct page *page) 376 { 377 int expected_count = 1; 378 379 /* 380 * Device public or private pages have an extra refcount as they are 381 * ZONE_DEVICE pages. 382 */ 383 expected_count += is_device_private_page(page); 384 if (mapping) 385 expected_count += hpage_nr_pages(page) + page_has_private(page); 386 387 return expected_count; 388 } 389 390 /* 391 * Replace the page in the mapping. 392 * 393 * The number of remaining references must be: 394 * 1 for anonymous pages without a mapping 395 * 2 for pages with a mapping 396 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 397 */ 398 int migrate_page_move_mapping(struct address_space *mapping, 399 struct page *newpage, struct page *page, int extra_count) 400 { 401 XA_STATE(xas, &mapping->i_pages, page_index(page)); 402 struct zone *oldzone, *newzone; 403 int dirty; 404 int expected_count = expected_page_refs(mapping, page) + extra_count; 405 406 if (!mapping) { 407 /* Anonymous page without mapping */ 408 if (page_count(page) != expected_count) 409 return -EAGAIN; 410 411 /* No turning back from here */ 412 newpage->index = page->index; 413 newpage->mapping = page->mapping; 414 if (PageSwapBacked(page)) 415 __SetPageSwapBacked(newpage); 416 417 return MIGRATEPAGE_SUCCESS; 418 } 419 420 oldzone = page_zone(page); 421 newzone = page_zone(newpage); 422 423 xas_lock_irq(&xas); 424 if (page_count(page) != expected_count || xas_load(&xas) != page) { 425 xas_unlock_irq(&xas); 426 return -EAGAIN; 427 } 428 429 if (!page_ref_freeze(page, expected_count)) { 430 xas_unlock_irq(&xas); 431 return -EAGAIN; 432 } 433 434 /* 435 * Now we know that no one else is looking at the page: 436 * no turning back from here. 437 */ 438 newpage->index = page->index; 439 newpage->mapping = page->mapping; 440 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ 441 if (PageSwapBacked(page)) { 442 __SetPageSwapBacked(newpage); 443 if (PageSwapCache(page)) { 444 SetPageSwapCache(newpage); 445 set_page_private(newpage, page_private(page)); 446 } 447 } else { 448 VM_BUG_ON_PAGE(PageSwapCache(page), page); 449 } 450 451 /* Move dirty while page refs frozen and newpage not yet exposed */ 452 dirty = PageDirty(page); 453 if (dirty) { 454 ClearPageDirty(page); 455 SetPageDirty(newpage); 456 } 457 458 xas_store(&xas, newpage); 459 if (PageTransHuge(page)) { 460 int i; 461 462 for (i = 1; i < HPAGE_PMD_NR; i++) { 463 xas_next(&xas); 464 xas_store(&xas, newpage); 465 } 466 } 467 468 /* 469 * Drop cache reference from old page by unfreezing 470 * to one less reference. 471 * We know this isn't the last reference. 472 */ 473 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); 474 475 xas_unlock(&xas); 476 /* Leave irq disabled to prevent preemption while updating stats */ 477 478 /* 479 * If moved to a different zone then also account 480 * the page for that zone. Other VM counters will be 481 * taken care of when we establish references to the 482 * new page and drop references to the old page. 483 * 484 * Note that anonymous pages are accounted for 485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 486 * are mapped to swap space. 487 */ 488 if (newzone != oldzone) { 489 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); 490 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); 491 if (PageSwapBacked(page) && !PageSwapCache(page)) { 492 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); 493 __inc_node_state(newzone->zone_pgdat, NR_SHMEM); 494 } 495 if (dirty && mapping_cap_account_dirty(mapping)) { 496 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); 497 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); 498 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); 499 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); 500 } 501 } 502 local_irq_enable(); 503 504 return MIGRATEPAGE_SUCCESS; 505 } 506 EXPORT_SYMBOL(migrate_page_move_mapping); 507 508 /* 509 * The expected number of remaining references is the same as that 510 * of migrate_page_move_mapping(). 511 */ 512 int migrate_huge_page_move_mapping(struct address_space *mapping, 513 struct page *newpage, struct page *page) 514 { 515 XA_STATE(xas, &mapping->i_pages, page_index(page)); 516 int expected_count; 517 518 xas_lock_irq(&xas); 519 expected_count = 2 + page_has_private(page); 520 if (page_count(page) != expected_count || xas_load(&xas) != page) { 521 xas_unlock_irq(&xas); 522 return -EAGAIN; 523 } 524 525 if (!page_ref_freeze(page, expected_count)) { 526 xas_unlock_irq(&xas); 527 return -EAGAIN; 528 } 529 530 newpage->index = page->index; 531 newpage->mapping = page->mapping; 532 533 get_page(newpage); 534 535 xas_store(&xas, newpage); 536 537 page_ref_unfreeze(page, expected_count - 1); 538 539 xas_unlock_irq(&xas); 540 541 return MIGRATEPAGE_SUCCESS; 542 } 543 544 /* 545 * Gigantic pages are so large that we do not guarantee that page++ pointer 546 * arithmetic will work across the entire page. We need something more 547 * specialized. 548 */ 549 static void __copy_gigantic_page(struct page *dst, struct page *src, 550 int nr_pages) 551 { 552 int i; 553 struct page *dst_base = dst; 554 struct page *src_base = src; 555 556 for (i = 0; i < nr_pages; ) { 557 cond_resched(); 558 copy_highpage(dst, src); 559 560 i++; 561 dst = mem_map_next(dst, dst_base, i); 562 src = mem_map_next(src, src_base, i); 563 } 564 } 565 566 static void copy_huge_page(struct page *dst, struct page *src) 567 { 568 int i; 569 int nr_pages; 570 571 if (PageHuge(src)) { 572 /* hugetlbfs page */ 573 struct hstate *h = page_hstate(src); 574 nr_pages = pages_per_huge_page(h); 575 576 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 577 __copy_gigantic_page(dst, src, nr_pages); 578 return; 579 } 580 } else { 581 /* thp page */ 582 BUG_ON(!PageTransHuge(src)); 583 nr_pages = hpage_nr_pages(src); 584 } 585 586 for (i = 0; i < nr_pages; i++) { 587 cond_resched(); 588 copy_highpage(dst + i, src + i); 589 } 590 } 591 592 /* 593 * Copy the page to its new location 594 */ 595 void migrate_page_states(struct page *newpage, struct page *page) 596 { 597 int cpupid; 598 599 if (PageError(page)) 600 SetPageError(newpage); 601 if (PageReferenced(page)) 602 SetPageReferenced(newpage); 603 if (PageUptodate(page)) 604 SetPageUptodate(newpage); 605 if (TestClearPageActive(page)) { 606 VM_BUG_ON_PAGE(PageUnevictable(page), page); 607 SetPageActive(newpage); 608 } else if (TestClearPageUnevictable(page)) 609 SetPageUnevictable(newpage); 610 if (PageWorkingset(page)) 611 SetPageWorkingset(newpage); 612 if (PageChecked(page)) 613 SetPageChecked(newpage); 614 if (PageMappedToDisk(page)) 615 SetPageMappedToDisk(newpage); 616 617 /* Move dirty on pages not done by migrate_page_move_mapping() */ 618 if (PageDirty(page)) 619 SetPageDirty(newpage); 620 621 if (page_is_young(page)) 622 set_page_young(newpage); 623 if (page_is_idle(page)) 624 set_page_idle(newpage); 625 626 /* 627 * Copy NUMA information to the new page, to prevent over-eager 628 * future migrations of this same page. 629 */ 630 cpupid = page_cpupid_xchg_last(page, -1); 631 page_cpupid_xchg_last(newpage, cpupid); 632 633 ksm_migrate_page(newpage, page); 634 /* 635 * Please do not reorder this without considering how mm/ksm.c's 636 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 637 */ 638 if (PageSwapCache(page)) 639 ClearPageSwapCache(page); 640 ClearPagePrivate(page); 641 set_page_private(page, 0); 642 643 /* 644 * If any waiters have accumulated on the new page then 645 * wake them up. 646 */ 647 if (PageWriteback(newpage)) 648 end_page_writeback(newpage); 649 650 copy_page_owner(page, newpage); 651 652 mem_cgroup_migrate(page, newpage); 653 } 654 EXPORT_SYMBOL(migrate_page_states); 655 656 void migrate_page_copy(struct page *newpage, struct page *page) 657 { 658 if (PageHuge(page) || PageTransHuge(page)) 659 copy_huge_page(newpage, page); 660 else 661 copy_highpage(newpage, page); 662 663 migrate_page_states(newpage, page); 664 } 665 EXPORT_SYMBOL(migrate_page_copy); 666 667 /************************************************************ 668 * Migration functions 669 ***********************************************************/ 670 671 /* 672 * Common logic to directly migrate a single LRU page suitable for 673 * pages that do not use PagePrivate/PagePrivate2. 674 * 675 * Pages are locked upon entry and exit. 676 */ 677 int migrate_page(struct address_space *mapping, 678 struct page *newpage, struct page *page, 679 enum migrate_mode mode) 680 { 681 int rc; 682 683 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 684 685 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 686 687 if (rc != MIGRATEPAGE_SUCCESS) 688 return rc; 689 690 if (mode != MIGRATE_SYNC_NO_COPY) 691 migrate_page_copy(newpage, page); 692 else 693 migrate_page_states(newpage, page); 694 return MIGRATEPAGE_SUCCESS; 695 } 696 EXPORT_SYMBOL(migrate_page); 697 698 #ifdef CONFIG_BLOCK 699 /* Returns true if all buffers are successfully locked */ 700 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 701 enum migrate_mode mode) 702 { 703 struct buffer_head *bh = head; 704 705 /* Simple case, sync compaction */ 706 if (mode != MIGRATE_ASYNC) { 707 do { 708 lock_buffer(bh); 709 bh = bh->b_this_page; 710 711 } while (bh != head); 712 713 return true; 714 } 715 716 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 717 do { 718 if (!trylock_buffer(bh)) { 719 /* 720 * We failed to lock the buffer and cannot stall in 721 * async migration. Release the taken locks 722 */ 723 struct buffer_head *failed_bh = bh; 724 bh = head; 725 while (bh != failed_bh) { 726 unlock_buffer(bh); 727 bh = bh->b_this_page; 728 } 729 return false; 730 } 731 732 bh = bh->b_this_page; 733 } while (bh != head); 734 return true; 735 } 736 737 static int __buffer_migrate_page(struct address_space *mapping, 738 struct page *newpage, struct page *page, enum migrate_mode mode, 739 bool check_refs) 740 { 741 struct buffer_head *bh, *head; 742 int rc; 743 int expected_count; 744 745 if (!page_has_buffers(page)) 746 return migrate_page(mapping, newpage, page, mode); 747 748 /* Check whether page does not have extra refs before we do more work */ 749 expected_count = expected_page_refs(mapping, page); 750 if (page_count(page) != expected_count) 751 return -EAGAIN; 752 753 head = page_buffers(page); 754 if (!buffer_migrate_lock_buffers(head, mode)) 755 return -EAGAIN; 756 757 if (check_refs) { 758 bool busy; 759 bool invalidated = false; 760 761 recheck_buffers: 762 busy = false; 763 spin_lock(&mapping->private_lock); 764 bh = head; 765 do { 766 if (atomic_read(&bh->b_count)) { 767 busy = true; 768 break; 769 } 770 bh = bh->b_this_page; 771 } while (bh != head); 772 if (busy) { 773 if (invalidated) { 774 rc = -EAGAIN; 775 goto unlock_buffers; 776 } 777 spin_unlock(&mapping->private_lock); 778 invalidate_bh_lrus(); 779 invalidated = true; 780 goto recheck_buffers; 781 } 782 } 783 784 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 785 if (rc != MIGRATEPAGE_SUCCESS) 786 goto unlock_buffers; 787 788 ClearPagePrivate(page); 789 set_page_private(newpage, page_private(page)); 790 set_page_private(page, 0); 791 put_page(page); 792 get_page(newpage); 793 794 bh = head; 795 do { 796 set_bh_page(bh, newpage, bh_offset(bh)); 797 bh = bh->b_this_page; 798 799 } while (bh != head); 800 801 SetPagePrivate(newpage); 802 803 if (mode != MIGRATE_SYNC_NO_COPY) 804 migrate_page_copy(newpage, page); 805 else 806 migrate_page_states(newpage, page); 807 808 rc = MIGRATEPAGE_SUCCESS; 809 unlock_buffers: 810 if (check_refs) 811 spin_unlock(&mapping->private_lock); 812 bh = head; 813 do { 814 unlock_buffer(bh); 815 bh = bh->b_this_page; 816 817 } while (bh != head); 818 819 return rc; 820 } 821 822 /* 823 * Migration function for pages with buffers. This function can only be used 824 * if the underlying filesystem guarantees that no other references to "page" 825 * exist. For example attached buffer heads are accessed only under page lock. 826 */ 827 int buffer_migrate_page(struct address_space *mapping, 828 struct page *newpage, struct page *page, enum migrate_mode mode) 829 { 830 return __buffer_migrate_page(mapping, newpage, page, mode, false); 831 } 832 EXPORT_SYMBOL(buffer_migrate_page); 833 834 /* 835 * Same as above except that this variant is more careful and checks that there 836 * are also no buffer head references. This function is the right one for 837 * mappings where buffer heads are directly looked up and referenced (such as 838 * block device mappings). 839 */ 840 int buffer_migrate_page_norefs(struct address_space *mapping, 841 struct page *newpage, struct page *page, enum migrate_mode mode) 842 { 843 return __buffer_migrate_page(mapping, newpage, page, mode, true); 844 } 845 #endif 846 847 /* 848 * Writeback a page to clean the dirty state 849 */ 850 static int writeout(struct address_space *mapping, struct page *page) 851 { 852 struct writeback_control wbc = { 853 .sync_mode = WB_SYNC_NONE, 854 .nr_to_write = 1, 855 .range_start = 0, 856 .range_end = LLONG_MAX, 857 .for_reclaim = 1 858 }; 859 int rc; 860 861 if (!mapping->a_ops->writepage) 862 /* No write method for the address space */ 863 return -EINVAL; 864 865 if (!clear_page_dirty_for_io(page)) 866 /* Someone else already triggered a write */ 867 return -EAGAIN; 868 869 /* 870 * A dirty page may imply that the underlying filesystem has 871 * the page on some queue. So the page must be clean for 872 * migration. Writeout may mean we loose the lock and the 873 * page state is no longer what we checked for earlier. 874 * At this point we know that the migration attempt cannot 875 * be successful. 876 */ 877 remove_migration_ptes(page, page, false); 878 879 rc = mapping->a_ops->writepage(page, &wbc); 880 881 if (rc != AOP_WRITEPAGE_ACTIVATE) 882 /* unlocked. Relock */ 883 lock_page(page); 884 885 return (rc < 0) ? -EIO : -EAGAIN; 886 } 887 888 /* 889 * Default handling if a filesystem does not provide a migration function. 890 */ 891 static int fallback_migrate_page(struct address_space *mapping, 892 struct page *newpage, struct page *page, enum migrate_mode mode) 893 { 894 if (PageDirty(page)) { 895 /* Only writeback pages in full synchronous migration */ 896 switch (mode) { 897 case MIGRATE_SYNC: 898 case MIGRATE_SYNC_NO_COPY: 899 break; 900 default: 901 return -EBUSY; 902 } 903 return writeout(mapping, page); 904 } 905 906 /* 907 * Buffers may be managed in a filesystem specific way. 908 * We must have no buffers or drop them. 909 */ 910 if (page_has_private(page) && 911 !try_to_release_page(page, GFP_KERNEL)) 912 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 913 914 return migrate_page(mapping, newpage, page, mode); 915 } 916 917 /* 918 * Move a page to a newly allocated page 919 * The page is locked and all ptes have been successfully removed. 920 * 921 * The new page will have replaced the old page if this function 922 * is successful. 923 * 924 * Return value: 925 * < 0 - error code 926 * MIGRATEPAGE_SUCCESS - success 927 */ 928 static int move_to_new_page(struct page *newpage, struct page *page, 929 enum migrate_mode mode) 930 { 931 struct address_space *mapping; 932 int rc = -EAGAIN; 933 bool is_lru = !__PageMovable(page); 934 935 VM_BUG_ON_PAGE(!PageLocked(page), page); 936 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 937 938 mapping = page_mapping(page); 939 940 if (likely(is_lru)) { 941 if (!mapping) 942 rc = migrate_page(mapping, newpage, page, mode); 943 else if (mapping->a_ops->migratepage) 944 /* 945 * Most pages have a mapping and most filesystems 946 * provide a migratepage callback. Anonymous pages 947 * are part of swap space which also has its own 948 * migratepage callback. This is the most common path 949 * for page migration. 950 */ 951 rc = mapping->a_ops->migratepage(mapping, newpage, 952 page, mode); 953 else 954 rc = fallback_migrate_page(mapping, newpage, 955 page, mode); 956 } else { 957 /* 958 * In case of non-lru page, it could be released after 959 * isolation step. In that case, we shouldn't try migration. 960 */ 961 VM_BUG_ON_PAGE(!PageIsolated(page), page); 962 if (!PageMovable(page)) { 963 rc = MIGRATEPAGE_SUCCESS; 964 __ClearPageIsolated(page); 965 goto out; 966 } 967 968 rc = mapping->a_ops->migratepage(mapping, newpage, 969 page, mode); 970 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 971 !PageIsolated(page)); 972 } 973 974 /* 975 * When successful, old pagecache page->mapping must be cleared before 976 * page is freed; but stats require that PageAnon be left as PageAnon. 977 */ 978 if (rc == MIGRATEPAGE_SUCCESS) { 979 if (__PageMovable(page)) { 980 VM_BUG_ON_PAGE(!PageIsolated(page), page); 981 982 /* 983 * We clear PG_movable under page_lock so any compactor 984 * cannot try to migrate this page. 985 */ 986 __ClearPageIsolated(page); 987 } 988 989 /* 990 * Anonymous and movable page->mapping will be cleared by 991 * free_pages_prepare so don't reset it here for keeping 992 * the type to work PageAnon, for example. 993 */ 994 if (!PageMappingFlags(page)) 995 page->mapping = NULL; 996 997 if (likely(!is_zone_device_page(newpage))) 998 flush_dcache_page(newpage); 999 1000 } 1001 out: 1002 return rc; 1003 } 1004 1005 static int __unmap_and_move(struct page *page, struct page *newpage, 1006 int force, enum migrate_mode mode) 1007 { 1008 int rc = -EAGAIN; 1009 int page_was_mapped = 0; 1010 struct anon_vma *anon_vma = NULL; 1011 bool is_lru = !__PageMovable(page); 1012 1013 if (!trylock_page(page)) { 1014 if (!force || mode == MIGRATE_ASYNC) 1015 goto out; 1016 1017 /* 1018 * It's not safe for direct compaction to call lock_page. 1019 * For example, during page readahead pages are added locked 1020 * to the LRU. Later, when the IO completes the pages are 1021 * marked uptodate and unlocked. However, the queueing 1022 * could be merging multiple pages for one bio (e.g. 1023 * mpage_readpages). If an allocation happens for the 1024 * second or third page, the process can end up locking 1025 * the same page twice and deadlocking. Rather than 1026 * trying to be clever about what pages can be locked, 1027 * avoid the use of lock_page for direct compaction 1028 * altogether. 1029 */ 1030 if (current->flags & PF_MEMALLOC) 1031 goto out; 1032 1033 lock_page(page); 1034 } 1035 1036 if (PageWriteback(page)) { 1037 /* 1038 * Only in the case of a full synchronous migration is it 1039 * necessary to wait for PageWriteback. In the async case, 1040 * the retry loop is too short and in the sync-light case, 1041 * the overhead of stalling is too much 1042 */ 1043 switch (mode) { 1044 case MIGRATE_SYNC: 1045 case MIGRATE_SYNC_NO_COPY: 1046 break; 1047 default: 1048 rc = -EBUSY; 1049 goto out_unlock; 1050 } 1051 if (!force) 1052 goto out_unlock; 1053 wait_on_page_writeback(page); 1054 } 1055 1056 /* 1057 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 1058 * we cannot notice that anon_vma is freed while we migrates a page. 1059 * This get_anon_vma() delays freeing anon_vma pointer until the end 1060 * of migration. File cache pages are no problem because of page_lock() 1061 * File Caches may use write_page() or lock_page() in migration, then, 1062 * just care Anon page here. 1063 * 1064 * Only page_get_anon_vma() understands the subtleties of 1065 * getting a hold on an anon_vma from outside one of its mms. 1066 * But if we cannot get anon_vma, then we won't need it anyway, 1067 * because that implies that the anon page is no longer mapped 1068 * (and cannot be remapped so long as we hold the page lock). 1069 */ 1070 if (PageAnon(page) && !PageKsm(page)) 1071 anon_vma = page_get_anon_vma(page); 1072 1073 /* 1074 * Block others from accessing the new page when we get around to 1075 * establishing additional references. We are usually the only one 1076 * holding a reference to newpage at this point. We used to have a BUG 1077 * here if trylock_page(newpage) fails, but would like to allow for 1078 * cases where there might be a race with the previous use of newpage. 1079 * This is much like races on refcount of oldpage: just don't BUG(). 1080 */ 1081 if (unlikely(!trylock_page(newpage))) 1082 goto out_unlock; 1083 1084 if (unlikely(!is_lru)) { 1085 rc = move_to_new_page(newpage, page, mode); 1086 goto out_unlock_both; 1087 } 1088 1089 /* 1090 * Corner case handling: 1091 * 1. When a new swap-cache page is read into, it is added to the LRU 1092 * and treated as swapcache but it has no rmap yet. 1093 * Calling try_to_unmap() against a page->mapping==NULL page will 1094 * trigger a BUG. So handle it here. 1095 * 2. An orphaned page (see truncate_complete_page) might have 1096 * fs-private metadata. The page can be picked up due to memory 1097 * offlining. Everywhere else except page reclaim, the page is 1098 * invisible to the vm, so the page can not be migrated. So try to 1099 * free the metadata, so the page can be freed. 1100 */ 1101 if (!page->mapping) { 1102 VM_BUG_ON_PAGE(PageAnon(page), page); 1103 if (page_has_private(page)) { 1104 try_to_free_buffers(page); 1105 goto out_unlock_both; 1106 } 1107 } else if (page_mapped(page)) { 1108 /* Establish migration ptes */ 1109 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1110 page); 1111 try_to_unmap(page, 1112 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1113 page_was_mapped = 1; 1114 } 1115 1116 if (!page_mapped(page)) 1117 rc = move_to_new_page(newpage, page, mode); 1118 1119 if (page_was_mapped) 1120 remove_migration_ptes(page, 1121 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 1122 1123 out_unlock_both: 1124 unlock_page(newpage); 1125 out_unlock: 1126 /* Drop an anon_vma reference if we took one */ 1127 if (anon_vma) 1128 put_anon_vma(anon_vma); 1129 unlock_page(page); 1130 out: 1131 /* 1132 * If migration is successful, decrease refcount of the newpage 1133 * which will not free the page because new page owner increased 1134 * refcounter. As well, if it is LRU page, add the page to LRU 1135 * list in here. Use the old state of the isolated source page to 1136 * determine if we migrated a LRU page. newpage was already unlocked 1137 * and possibly modified by its owner - don't rely on the page 1138 * state. 1139 */ 1140 if (rc == MIGRATEPAGE_SUCCESS) { 1141 if (unlikely(!is_lru)) 1142 put_page(newpage); 1143 else 1144 putback_lru_page(newpage); 1145 } 1146 1147 return rc; 1148 } 1149 1150 /* 1151 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work 1152 * around it. 1153 */ 1154 #if defined(CONFIG_ARM) && \ 1155 defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700 1156 #define ICE_noinline noinline 1157 #else 1158 #define ICE_noinline 1159 #endif 1160 1161 /* 1162 * Obtain the lock on page, remove all ptes and migrate the page 1163 * to the newly allocated page in newpage. 1164 */ 1165 static ICE_noinline int unmap_and_move(new_page_t get_new_page, 1166 free_page_t put_new_page, 1167 unsigned long private, struct page *page, 1168 int force, enum migrate_mode mode, 1169 enum migrate_reason reason) 1170 { 1171 int rc = MIGRATEPAGE_SUCCESS; 1172 struct page *newpage = NULL; 1173 1174 if (!thp_migration_supported() && PageTransHuge(page)) 1175 return -ENOMEM; 1176 1177 if (page_count(page) == 1) { 1178 /* page was freed from under us. So we are done. */ 1179 ClearPageActive(page); 1180 ClearPageUnevictable(page); 1181 if (unlikely(__PageMovable(page))) { 1182 lock_page(page); 1183 if (!PageMovable(page)) 1184 __ClearPageIsolated(page); 1185 unlock_page(page); 1186 } 1187 goto out; 1188 } 1189 1190 newpage = get_new_page(page, private); 1191 if (!newpage) 1192 return -ENOMEM; 1193 1194 rc = __unmap_and_move(page, newpage, force, mode); 1195 if (rc == MIGRATEPAGE_SUCCESS) 1196 set_page_owner_migrate_reason(newpage, reason); 1197 1198 out: 1199 if (rc != -EAGAIN) { 1200 /* 1201 * A page that has been migrated has all references 1202 * removed and will be freed. A page that has not been 1203 * migrated will have kept its references and be restored. 1204 */ 1205 list_del(&page->lru); 1206 1207 /* 1208 * Compaction can migrate also non-LRU pages which are 1209 * not accounted to NR_ISOLATED_*. They can be recognized 1210 * as __PageMovable 1211 */ 1212 if (likely(!__PageMovable(page))) 1213 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1214 page_is_file_cache(page), -hpage_nr_pages(page)); 1215 } 1216 1217 /* 1218 * If migration is successful, releases reference grabbed during 1219 * isolation. Otherwise, restore the page to right list unless 1220 * we want to retry. 1221 */ 1222 if (rc == MIGRATEPAGE_SUCCESS) { 1223 put_page(page); 1224 if (reason == MR_MEMORY_FAILURE) { 1225 /* 1226 * Set PG_HWPoison on just freed page 1227 * intentionally. Although it's rather weird, 1228 * it's how HWPoison flag works at the moment. 1229 */ 1230 if (set_hwpoison_free_buddy_page(page)) 1231 num_poisoned_pages_inc(); 1232 } 1233 } else { 1234 if (rc != -EAGAIN) { 1235 if (likely(!__PageMovable(page))) { 1236 putback_lru_page(page); 1237 goto put_new; 1238 } 1239 1240 lock_page(page); 1241 if (PageMovable(page)) 1242 putback_movable_page(page); 1243 else 1244 __ClearPageIsolated(page); 1245 unlock_page(page); 1246 put_page(page); 1247 } 1248 put_new: 1249 if (put_new_page) 1250 put_new_page(newpage, private); 1251 else 1252 put_page(newpage); 1253 } 1254 1255 return rc; 1256 } 1257 1258 /* 1259 * Counterpart of unmap_and_move_page() for hugepage migration. 1260 * 1261 * This function doesn't wait the completion of hugepage I/O 1262 * because there is no race between I/O and migration for hugepage. 1263 * Note that currently hugepage I/O occurs only in direct I/O 1264 * where no lock is held and PG_writeback is irrelevant, 1265 * and writeback status of all subpages are counted in the reference 1266 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1267 * under direct I/O, the reference of the head page is 512 and a bit more.) 1268 * This means that when we try to migrate hugepage whose subpages are 1269 * doing direct I/O, some references remain after try_to_unmap() and 1270 * hugepage migration fails without data corruption. 1271 * 1272 * There is also no race when direct I/O is issued on the page under migration, 1273 * because then pte is replaced with migration swap entry and direct I/O code 1274 * will wait in the page fault for migration to complete. 1275 */ 1276 static int unmap_and_move_huge_page(new_page_t get_new_page, 1277 free_page_t put_new_page, unsigned long private, 1278 struct page *hpage, int force, 1279 enum migrate_mode mode, int reason) 1280 { 1281 int rc = -EAGAIN; 1282 int page_was_mapped = 0; 1283 struct page *new_hpage; 1284 struct anon_vma *anon_vma = NULL; 1285 struct address_space *mapping = NULL; 1286 1287 /* 1288 * Migratability of hugepages depends on architectures and their size. 1289 * This check is necessary because some callers of hugepage migration 1290 * like soft offline and memory hotremove don't walk through page 1291 * tables or check whether the hugepage is pmd-based or not before 1292 * kicking migration. 1293 */ 1294 if (!hugepage_migration_supported(page_hstate(hpage))) { 1295 putback_active_hugepage(hpage); 1296 return -ENOSYS; 1297 } 1298 1299 new_hpage = get_new_page(hpage, private); 1300 if (!new_hpage) 1301 return -ENOMEM; 1302 1303 if (!trylock_page(hpage)) { 1304 if (!force) 1305 goto out; 1306 switch (mode) { 1307 case MIGRATE_SYNC: 1308 case MIGRATE_SYNC_NO_COPY: 1309 break; 1310 default: 1311 goto out; 1312 } 1313 lock_page(hpage); 1314 } 1315 1316 /* 1317 * Check for pages which are in the process of being freed. Without 1318 * page_mapping() set, hugetlbfs specific move page routine will not 1319 * be called and we could leak usage counts for subpools. 1320 */ 1321 if (page_private(hpage) && !page_mapping(hpage)) { 1322 rc = -EBUSY; 1323 goto out_unlock; 1324 } 1325 1326 if (PageAnon(hpage)) 1327 anon_vma = page_get_anon_vma(hpage); 1328 1329 if (unlikely(!trylock_page(new_hpage))) 1330 goto put_anon; 1331 1332 if (page_mapped(hpage)) { 1333 /* 1334 * try_to_unmap could potentially call huge_pmd_unshare. 1335 * Because of this, take semaphore in write mode here and 1336 * set TTU_RMAP_LOCKED to let lower levels know we have 1337 * taken the lock. 1338 */ 1339 mapping = hugetlb_page_mapping_lock_write(hpage); 1340 if (unlikely(!mapping)) 1341 goto unlock_put_anon; 1342 1343 try_to_unmap(hpage, 1344 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| 1345 TTU_RMAP_LOCKED); 1346 page_was_mapped = 1; 1347 /* 1348 * Leave mapping locked until after subsequent call to 1349 * remove_migration_ptes() 1350 */ 1351 } 1352 1353 if (!page_mapped(hpage)) 1354 rc = move_to_new_page(new_hpage, hpage, mode); 1355 1356 if (page_was_mapped) { 1357 remove_migration_ptes(hpage, 1358 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true); 1359 i_mmap_unlock_write(mapping); 1360 } 1361 1362 unlock_put_anon: 1363 unlock_page(new_hpage); 1364 1365 put_anon: 1366 if (anon_vma) 1367 put_anon_vma(anon_vma); 1368 1369 if (rc == MIGRATEPAGE_SUCCESS) { 1370 move_hugetlb_state(hpage, new_hpage, reason); 1371 put_new_page = NULL; 1372 } 1373 1374 out_unlock: 1375 unlock_page(hpage); 1376 out: 1377 if (rc != -EAGAIN) 1378 putback_active_hugepage(hpage); 1379 1380 /* 1381 * If migration was not successful and there's a freeing callback, use 1382 * it. Otherwise, put_page() will drop the reference grabbed during 1383 * isolation. 1384 */ 1385 if (put_new_page) 1386 put_new_page(new_hpage, private); 1387 else 1388 putback_active_hugepage(new_hpage); 1389 1390 return rc; 1391 } 1392 1393 /* 1394 * migrate_pages - migrate the pages specified in a list, to the free pages 1395 * supplied as the target for the page migration 1396 * 1397 * @from: The list of pages to be migrated. 1398 * @get_new_page: The function used to allocate free pages to be used 1399 * as the target of the page migration. 1400 * @put_new_page: The function used to free target pages if migration 1401 * fails, or NULL if no special handling is necessary. 1402 * @private: Private data to be passed on to get_new_page() 1403 * @mode: The migration mode that specifies the constraints for 1404 * page migration, if any. 1405 * @reason: The reason for page migration. 1406 * 1407 * The function returns after 10 attempts or if no pages are movable any more 1408 * because the list has become empty or no retryable pages exist any more. 1409 * The caller should call putback_movable_pages() to return pages to the LRU 1410 * or free list only if ret != 0. 1411 * 1412 * Returns the number of pages that were not migrated, or an error code. 1413 */ 1414 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1415 free_page_t put_new_page, unsigned long private, 1416 enum migrate_mode mode, int reason) 1417 { 1418 int retry = 1; 1419 int nr_failed = 0; 1420 int nr_succeeded = 0; 1421 int pass = 0; 1422 struct page *page; 1423 struct page *page2; 1424 int swapwrite = current->flags & PF_SWAPWRITE; 1425 int rc; 1426 1427 if (!swapwrite) 1428 current->flags |= PF_SWAPWRITE; 1429 1430 for(pass = 0; pass < 10 && retry; pass++) { 1431 retry = 0; 1432 1433 list_for_each_entry_safe(page, page2, from, lru) { 1434 retry: 1435 cond_resched(); 1436 1437 if (PageHuge(page)) 1438 rc = unmap_and_move_huge_page(get_new_page, 1439 put_new_page, private, page, 1440 pass > 2, mode, reason); 1441 else 1442 rc = unmap_and_move(get_new_page, put_new_page, 1443 private, page, pass > 2, mode, 1444 reason); 1445 1446 switch(rc) { 1447 case -ENOMEM: 1448 /* 1449 * THP migration might be unsupported or the 1450 * allocation could've failed so we should 1451 * retry on the same page with the THP split 1452 * to base pages. 1453 * 1454 * Head page is retried immediately and tail 1455 * pages are added to the tail of the list so 1456 * we encounter them after the rest of the list 1457 * is processed. 1458 */ 1459 if (PageTransHuge(page) && !PageHuge(page)) { 1460 lock_page(page); 1461 rc = split_huge_page_to_list(page, from); 1462 unlock_page(page); 1463 if (!rc) { 1464 list_safe_reset_next(page, page2, lru); 1465 goto retry; 1466 } 1467 } 1468 nr_failed++; 1469 goto out; 1470 case -EAGAIN: 1471 retry++; 1472 break; 1473 case MIGRATEPAGE_SUCCESS: 1474 nr_succeeded++; 1475 break; 1476 default: 1477 /* 1478 * Permanent failure (-EBUSY, -ENOSYS, etc.): 1479 * unlike -EAGAIN case, the failed page is 1480 * removed from migration page list and not 1481 * retried in the next outer loop. 1482 */ 1483 nr_failed++; 1484 break; 1485 } 1486 } 1487 } 1488 nr_failed += retry; 1489 rc = nr_failed; 1490 out: 1491 if (nr_succeeded) 1492 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1493 if (nr_failed) 1494 count_vm_events(PGMIGRATE_FAIL, nr_failed); 1495 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 1496 1497 if (!swapwrite) 1498 current->flags &= ~PF_SWAPWRITE; 1499 1500 return rc; 1501 } 1502 1503 #ifdef CONFIG_NUMA 1504 1505 static int store_status(int __user *status, int start, int value, int nr) 1506 { 1507 while (nr-- > 0) { 1508 if (put_user(value, status + start)) 1509 return -EFAULT; 1510 start++; 1511 } 1512 1513 return 0; 1514 } 1515 1516 static int do_move_pages_to_node(struct mm_struct *mm, 1517 struct list_head *pagelist, int node) 1518 { 1519 int err; 1520 1521 if (list_empty(pagelist)) 1522 return 0; 1523 1524 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node, 1525 MIGRATE_SYNC, MR_SYSCALL); 1526 if (err) 1527 putback_movable_pages(pagelist); 1528 return err; 1529 } 1530 1531 /* 1532 * Resolves the given address to a struct page, isolates it from the LRU and 1533 * puts it to the given pagelist. 1534 * Returns: 1535 * errno - if the page cannot be found/isolated 1536 * 0 - when it doesn't have to be migrated because it is already on the 1537 * target node 1538 * 1 - when it has been queued 1539 */ 1540 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1541 int node, struct list_head *pagelist, bool migrate_all) 1542 { 1543 struct vm_area_struct *vma; 1544 struct page *page; 1545 unsigned int follflags; 1546 int err; 1547 1548 down_read(&mm->mmap_sem); 1549 err = -EFAULT; 1550 vma = find_vma(mm, addr); 1551 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1552 goto out; 1553 1554 /* FOLL_DUMP to ignore special (like zero) pages */ 1555 follflags = FOLL_GET | FOLL_DUMP; 1556 page = follow_page(vma, addr, follflags); 1557 1558 err = PTR_ERR(page); 1559 if (IS_ERR(page)) 1560 goto out; 1561 1562 err = -ENOENT; 1563 if (!page) 1564 goto out; 1565 1566 err = 0; 1567 if (page_to_nid(page) == node) 1568 goto out_putpage; 1569 1570 err = -EACCES; 1571 if (page_mapcount(page) > 1 && !migrate_all) 1572 goto out_putpage; 1573 1574 if (PageHuge(page)) { 1575 if (PageHead(page)) { 1576 isolate_huge_page(page, pagelist); 1577 err = 1; 1578 } 1579 } else { 1580 struct page *head; 1581 1582 head = compound_head(page); 1583 err = isolate_lru_page(head); 1584 if (err) 1585 goto out_putpage; 1586 1587 err = 1; 1588 list_add_tail(&head->lru, pagelist); 1589 mod_node_page_state(page_pgdat(head), 1590 NR_ISOLATED_ANON + page_is_file_cache(head), 1591 hpage_nr_pages(head)); 1592 } 1593 out_putpage: 1594 /* 1595 * Either remove the duplicate refcount from 1596 * isolate_lru_page() or drop the page ref if it was 1597 * not isolated. 1598 */ 1599 put_page(page); 1600 out: 1601 up_read(&mm->mmap_sem); 1602 return err; 1603 } 1604 1605 /* 1606 * Migrate an array of page address onto an array of nodes and fill 1607 * the corresponding array of status. 1608 */ 1609 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1610 unsigned long nr_pages, 1611 const void __user * __user *pages, 1612 const int __user *nodes, 1613 int __user *status, int flags) 1614 { 1615 int current_node = NUMA_NO_NODE; 1616 LIST_HEAD(pagelist); 1617 int start, i; 1618 int err = 0, err1; 1619 1620 migrate_prep(); 1621 1622 for (i = start = 0; i < nr_pages; i++) { 1623 const void __user *p; 1624 unsigned long addr; 1625 int node; 1626 1627 err = -EFAULT; 1628 if (get_user(p, pages + i)) 1629 goto out_flush; 1630 if (get_user(node, nodes + i)) 1631 goto out_flush; 1632 addr = (unsigned long)untagged_addr(p); 1633 1634 err = -ENODEV; 1635 if (node < 0 || node >= MAX_NUMNODES) 1636 goto out_flush; 1637 if (!node_state(node, N_MEMORY)) 1638 goto out_flush; 1639 1640 err = -EACCES; 1641 if (!node_isset(node, task_nodes)) 1642 goto out_flush; 1643 1644 if (current_node == NUMA_NO_NODE) { 1645 current_node = node; 1646 start = i; 1647 } else if (node != current_node) { 1648 err = do_move_pages_to_node(mm, &pagelist, current_node); 1649 if (err) { 1650 /* 1651 * Positive err means the number of failed 1652 * pages to migrate. Since we are going to 1653 * abort and return the number of non-migrated 1654 * pages, so need to incude the rest of the 1655 * nr_pages that have not been attempted as 1656 * well. 1657 */ 1658 if (err > 0) 1659 err += nr_pages - i - 1; 1660 goto out; 1661 } 1662 err = store_status(status, start, current_node, i - start); 1663 if (err) 1664 goto out; 1665 start = i; 1666 current_node = node; 1667 } 1668 1669 /* 1670 * Errors in the page lookup or isolation are not fatal and we simply 1671 * report them via status 1672 */ 1673 err = add_page_for_migration(mm, addr, current_node, 1674 &pagelist, flags & MPOL_MF_MOVE_ALL); 1675 1676 if (!err) { 1677 /* The page is already on the target node */ 1678 err = store_status(status, i, current_node, 1); 1679 if (err) 1680 goto out_flush; 1681 continue; 1682 } else if (err > 0) { 1683 /* The page is successfully queued for migration */ 1684 continue; 1685 } 1686 1687 err = store_status(status, i, err, 1); 1688 if (err) 1689 goto out_flush; 1690 1691 err = do_move_pages_to_node(mm, &pagelist, current_node); 1692 if (err) { 1693 if (err > 0) 1694 err += nr_pages - i - 1; 1695 goto out; 1696 } 1697 if (i > start) { 1698 err = store_status(status, start, current_node, i - start); 1699 if (err) 1700 goto out; 1701 } 1702 current_node = NUMA_NO_NODE; 1703 } 1704 out_flush: 1705 if (list_empty(&pagelist)) 1706 return err; 1707 1708 /* Make sure we do not overwrite the existing error */ 1709 err1 = do_move_pages_to_node(mm, &pagelist, current_node); 1710 /* 1711 * Don't have to report non-attempted pages here since: 1712 * - If the above loop is done gracefully all pages have been 1713 * attempted. 1714 * - If the above loop is aborted it means a fatal error 1715 * happened, should return ret. 1716 */ 1717 if (!err1) 1718 err1 = store_status(status, start, current_node, i - start); 1719 if (err >= 0) 1720 err = err1; 1721 out: 1722 return err; 1723 } 1724 1725 /* 1726 * Determine the nodes of an array of pages and store it in an array of status. 1727 */ 1728 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1729 const void __user **pages, int *status) 1730 { 1731 unsigned long i; 1732 1733 down_read(&mm->mmap_sem); 1734 1735 for (i = 0; i < nr_pages; i++) { 1736 unsigned long addr = (unsigned long)(*pages); 1737 struct vm_area_struct *vma; 1738 struct page *page; 1739 int err = -EFAULT; 1740 1741 vma = find_vma(mm, addr); 1742 if (!vma || addr < vma->vm_start) 1743 goto set_status; 1744 1745 /* FOLL_DUMP to ignore special (like zero) pages */ 1746 page = follow_page(vma, addr, FOLL_DUMP); 1747 1748 err = PTR_ERR(page); 1749 if (IS_ERR(page)) 1750 goto set_status; 1751 1752 err = page ? page_to_nid(page) : -ENOENT; 1753 set_status: 1754 *status = err; 1755 1756 pages++; 1757 status++; 1758 } 1759 1760 up_read(&mm->mmap_sem); 1761 } 1762 1763 /* 1764 * Determine the nodes of a user array of pages and store it in 1765 * a user array of status. 1766 */ 1767 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1768 const void __user * __user *pages, 1769 int __user *status) 1770 { 1771 #define DO_PAGES_STAT_CHUNK_NR 16 1772 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1773 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1774 1775 while (nr_pages) { 1776 unsigned long chunk_nr; 1777 1778 chunk_nr = nr_pages; 1779 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1780 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1781 1782 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1783 break; 1784 1785 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1786 1787 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1788 break; 1789 1790 pages += chunk_nr; 1791 status += chunk_nr; 1792 nr_pages -= chunk_nr; 1793 } 1794 return nr_pages ? -EFAULT : 0; 1795 } 1796 1797 /* 1798 * Move a list of pages in the address space of the currently executing 1799 * process. 1800 */ 1801 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1802 const void __user * __user *pages, 1803 const int __user *nodes, 1804 int __user *status, int flags) 1805 { 1806 struct task_struct *task; 1807 struct mm_struct *mm; 1808 int err; 1809 nodemask_t task_nodes; 1810 1811 /* Check flags */ 1812 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1813 return -EINVAL; 1814 1815 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1816 return -EPERM; 1817 1818 /* Find the mm_struct */ 1819 rcu_read_lock(); 1820 task = pid ? find_task_by_vpid(pid) : current; 1821 if (!task) { 1822 rcu_read_unlock(); 1823 return -ESRCH; 1824 } 1825 get_task_struct(task); 1826 1827 /* 1828 * Check if this process has the right to modify the specified 1829 * process. Use the regular "ptrace_may_access()" checks. 1830 */ 1831 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1832 rcu_read_unlock(); 1833 err = -EPERM; 1834 goto out; 1835 } 1836 rcu_read_unlock(); 1837 1838 err = security_task_movememory(task); 1839 if (err) 1840 goto out; 1841 1842 task_nodes = cpuset_mems_allowed(task); 1843 mm = get_task_mm(task); 1844 put_task_struct(task); 1845 1846 if (!mm) 1847 return -EINVAL; 1848 1849 if (nodes) 1850 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1851 nodes, status, flags); 1852 else 1853 err = do_pages_stat(mm, nr_pages, pages, status); 1854 1855 mmput(mm); 1856 return err; 1857 1858 out: 1859 put_task_struct(task); 1860 return err; 1861 } 1862 1863 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1864 const void __user * __user *, pages, 1865 const int __user *, nodes, 1866 int __user *, status, int, flags) 1867 { 1868 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1869 } 1870 1871 #ifdef CONFIG_COMPAT 1872 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 1873 compat_uptr_t __user *, pages32, 1874 const int __user *, nodes, 1875 int __user *, status, 1876 int, flags) 1877 { 1878 const void __user * __user *pages; 1879 int i; 1880 1881 pages = compat_alloc_user_space(nr_pages * sizeof(void *)); 1882 for (i = 0; i < nr_pages; i++) { 1883 compat_uptr_t p; 1884 1885 if (get_user(p, pages32 + i) || 1886 put_user(compat_ptr(p), pages + i)) 1887 return -EFAULT; 1888 } 1889 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1890 } 1891 #endif /* CONFIG_COMPAT */ 1892 1893 #ifdef CONFIG_NUMA_BALANCING 1894 /* 1895 * Returns true if this is a safe migration target node for misplaced NUMA 1896 * pages. Currently it only checks the watermarks which crude 1897 */ 1898 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1899 unsigned long nr_migrate_pages) 1900 { 1901 int z; 1902 1903 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1904 struct zone *zone = pgdat->node_zones + z; 1905 1906 if (!populated_zone(zone)) 1907 continue; 1908 1909 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1910 if (!zone_watermark_ok(zone, 0, 1911 high_wmark_pages(zone) + 1912 nr_migrate_pages, 1913 ZONE_MOVABLE, 0)) 1914 continue; 1915 return true; 1916 } 1917 return false; 1918 } 1919 1920 static struct page *alloc_misplaced_dst_page(struct page *page, 1921 unsigned long data) 1922 { 1923 int nid = (int) data; 1924 struct page *newpage; 1925 1926 newpage = __alloc_pages_node(nid, 1927 (GFP_HIGHUSER_MOVABLE | 1928 __GFP_THISNODE | __GFP_NOMEMALLOC | 1929 __GFP_NORETRY | __GFP_NOWARN) & 1930 ~__GFP_RECLAIM, 0); 1931 1932 return newpage; 1933 } 1934 1935 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1936 { 1937 int page_lru; 1938 1939 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 1940 1941 /* Avoid migrating to a node that is nearly full */ 1942 if (!migrate_balanced_pgdat(pgdat, compound_nr(page))) 1943 return 0; 1944 1945 if (isolate_lru_page(page)) 1946 return 0; 1947 1948 /* 1949 * migrate_misplaced_transhuge_page() skips page migration's usual 1950 * check on page_count(), so we must do it here, now that the page 1951 * has been isolated: a GUP pin, or any other pin, prevents migration. 1952 * The expected page count is 3: 1 for page's mapcount and 1 for the 1953 * caller's pin and 1 for the reference taken by isolate_lru_page(). 1954 */ 1955 if (PageTransHuge(page) && page_count(page) != 3) { 1956 putback_lru_page(page); 1957 return 0; 1958 } 1959 1960 page_lru = page_is_file_cache(page); 1961 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 1962 hpage_nr_pages(page)); 1963 1964 /* 1965 * Isolating the page has taken another reference, so the 1966 * caller's reference can be safely dropped without the page 1967 * disappearing underneath us during migration. 1968 */ 1969 put_page(page); 1970 return 1; 1971 } 1972 1973 bool pmd_trans_migrating(pmd_t pmd) 1974 { 1975 struct page *page = pmd_page(pmd); 1976 return PageLocked(page); 1977 } 1978 1979 /* 1980 * Attempt to migrate a misplaced page to the specified destination 1981 * node. Caller is expected to have an elevated reference count on 1982 * the page that will be dropped by this function before returning. 1983 */ 1984 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 1985 int node) 1986 { 1987 pg_data_t *pgdat = NODE_DATA(node); 1988 int isolated; 1989 int nr_remaining; 1990 LIST_HEAD(migratepages); 1991 1992 /* 1993 * Don't migrate file pages that are mapped in multiple processes 1994 * with execute permissions as they are probably shared libraries. 1995 */ 1996 if (page_mapcount(page) != 1 && page_is_file_cache(page) && 1997 (vma->vm_flags & VM_EXEC)) 1998 goto out; 1999 2000 /* 2001 * Also do not migrate dirty pages as not all filesystems can move 2002 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2003 */ 2004 if (page_is_file_cache(page) && PageDirty(page)) 2005 goto out; 2006 2007 isolated = numamigrate_isolate_page(pgdat, page); 2008 if (!isolated) 2009 goto out; 2010 2011 list_add(&page->lru, &migratepages); 2012 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2013 NULL, node, MIGRATE_ASYNC, 2014 MR_NUMA_MISPLACED); 2015 if (nr_remaining) { 2016 if (!list_empty(&migratepages)) { 2017 list_del(&page->lru); 2018 dec_node_page_state(page, NR_ISOLATED_ANON + 2019 page_is_file_cache(page)); 2020 putback_lru_page(page); 2021 } 2022 isolated = 0; 2023 } else 2024 count_vm_numa_event(NUMA_PAGE_MIGRATE); 2025 BUG_ON(!list_empty(&migratepages)); 2026 return isolated; 2027 2028 out: 2029 put_page(page); 2030 return 0; 2031 } 2032 #endif /* CONFIG_NUMA_BALANCING */ 2033 2034 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 2035 /* 2036 * Migrates a THP to a given target node. page must be locked and is unlocked 2037 * before returning. 2038 */ 2039 int migrate_misplaced_transhuge_page(struct mm_struct *mm, 2040 struct vm_area_struct *vma, 2041 pmd_t *pmd, pmd_t entry, 2042 unsigned long address, 2043 struct page *page, int node) 2044 { 2045 spinlock_t *ptl; 2046 pg_data_t *pgdat = NODE_DATA(node); 2047 int isolated = 0; 2048 struct page *new_page = NULL; 2049 int page_lru = page_is_file_cache(page); 2050 unsigned long start = address & HPAGE_PMD_MASK; 2051 2052 new_page = alloc_pages_node(node, 2053 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2054 HPAGE_PMD_ORDER); 2055 if (!new_page) 2056 goto out_fail; 2057 prep_transhuge_page(new_page); 2058 2059 isolated = numamigrate_isolate_page(pgdat, page); 2060 if (!isolated) { 2061 put_page(new_page); 2062 goto out_fail; 2063 } 2064 2065 /* Prepare a page as a migration target */ 2066 __SetPageLocked(new_page); 2067 if (PageSwapBacked(page)) 2068 __SetPageSwapBacked(new_page); 2069 2070 /* anon mapping, we can simply copy page->mapping to the new page: */ 2071 new_page->mapping = page->mapping; 2072 new_page->index = page->index; 2073 /* flush the cache before copying using the kernel virtual address */ 2074 flush_cache_range(vma, start, start + HPAGE_PMD_SIZE); 2075 migrate_page_copy(new_page, page); 2076 WARN_ON(PageLRU(new_page)); 2077 2078 /* Recheck the target PMD */ 2079 ptl = pmd_lock(mm, pmd); 2080 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { 2081 spin_unlock(ptl); 2082 2083 /* Reverse changes made by migrate_page_copy() */ 2084 if (TestClearPageActive(new_page)) 2085 SetPageActive(page); 2086 if (TestClearPageUnevictable(new_page)) 2087 SetPageUnevictable(page); 2088 2089 unlock_page(new_page); 2090 put_page(new_page); /* Free it */ 2091 2092 /* Retake the callers reference and putback on LRU */ 2093 get_page(page); 2094 putback_lru_page(page); 2095 mod_node_page_state(page_pgdat(page), 2096 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 2097 2098 goto out_unlock; 2099 } 2100 2101 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 2102 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 2103 2104 /* 2105 * Overwrite the old entry under pagetable lock and establish 2106 * the new PTE. Any parallel GUP will either observe the old 2107 * page blocking on the page lock, block on the page table 2108 * lock or observe the new page. The SetPageUptodate on the 2109 * new page and page_add_new_anon_rmap guarantee the copy is 2110 * visible before the pagetable update. 2111 */ 2112 page_add_anon_rmap(new_page, vma, start, true); 2113 /* 2114 * At this point the pmd is numa/protnone (i.e. non present) and the TLB 2115 * has already been flushed globally. So no TLB can be currently 2116 * caching this non present pmd mapping. There's no need to clear the 2117 * pmd before doing set_pmd_at(), nor to flush the TLB after 2118 * set_pmd_at(). Clearing the pmd here would introduce a race 2119 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the 2120 * mmap_sem for reading. If the pmd is set to NULL at any given time, 2121 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this 2122 * pmd. 2123 */ 2124 set_pmd_at(mm, start, pmd, entry); 2125 update_mmu_cache_pmd(vma, address, &entry); 2126 2127 page_ref_unfreeze(page, 2); 2128 mlock_migrate_page(new_page, page); 2129 page_remove_rmap(page, true); 2130 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); 2131 2132 spin_unlock(ptl); 2133 2134 /* Take an "isolate" reference and put new page on the LRU. */ 2135 get_page(new_page); 2136 putback_lru_page(new_page); 2137 2138 unlock_page(new_page); 2139 unlock_page(page); 2140 put_page(page); /* Drop the rmap reference */ 2141 put_page(page); /* Drop the LRU isolation reference */ 2142 2143 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 2144 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 2145 2146 mod_node_page_state(page_pgdat(page), 2147 NR_ISOLATED_ANON + page_lru, 2148 -HPAGE_PMD_NR); 2149 return isolated; 2150 2151 out_fail: 2152 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 2153 ptl = pmd_lock(mm, pmd); 2154 if (pmd_same(*pmd, entry)) { 2155 entry = pmd_modify(entry, vma->vm_page_prot); 2156 set_pmd_at(mm, start, pmd, entry); 2157 update_mmu_cache_pmd(vma, address, &entry); 2158 } 2159 spin_unlock(ptl); 2160 2161 out_unlock: 2162 unlock_page(page); 2163 put_page(page); 2164 return 0; 2165 } 2166 #endif /* CONFIG_NUMA_BALANCING */ 2167 2168 #endif /* CONFIG_NUMA */ 2169 2170 #ifdef CONFIG_DEVICE_PRIVATE 2171 static int migrate_vma_collect_hole(unsigned long start, 2172 unsigned long end, 2173 __always_unused int depth, 2174 struct mm_walk *walk) 2175 { 2176 struct migrate_vma *migrate = walk->private; 2177 unsigned long addr; 2178 2179 for (addr = start; addr < end; addr += PAGE_SIZE) { 2180 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 2181 migrate->dst[migrate->npages] = 0; 2182 migrate->npages++; 2183 migrate->cpages++; 2184 } 2185 2186 return 0; 2187 } 2188 2189 static int migrate_vma_collect_skip(unsigned long start, 2190 unsigned long end, 2191 struct mm_walk *walk) 2192 { 2193 struct migrate_vma *migrate = walk->private; 2194 unsigned long addr; 2195 2196 for (addr = start; addr < end; addr += PAGE_SIZE) { 2197 migrate->dst[migrate->npages] = 0; 2198 migrate->src[migrate->npages++] = 0; 2199 } 2200 2201 return 0; 2202 } 2203 2204 static int migrate_vma_collect_pmd(pmd_t *pmdp, 2205 unsigned long start, 2206 unsigned long end, 2207 struct mm_walk *walk) 2208 { 2209 struct migrate_vma *migrate = walk->private; 2210 struct vm_area_struct *vma = walk->vma; 2211 struct mm_struct *mm = vma->vm_mm; 2212 unsigned long addr = start, unmapped = 0; 2213 spinlock_t *ptl; 2214 pte_t *ptep; 2215 2216 again: 2217 if (pmd_none(*pmdp)) 2218 return migrate_vma_collect_hole(start, end, -1, walk); 2219 2220 if (pmd_trans_huge(*pmdp)) { 2221 struct page *page; 2222 2223 ptl = pmd_lock(mm, pmdp); 2224 if (unlikely(!pmd_trans_huge(*pmdp))) { 2225 spin_unlock(ptl); 2226 goto again; 2227 } 2228 2229 page = pmd_page(*pmdp); 2230 if (is_huge_zero_page(page)) { 2231 spin_unlock(ptl); 2232 split_huge_pmd(vma, pmdp, addr); 2233 if (pmd_trans_unstable(pmdp)) 2234 return migrate_vma_collect_skip(start, end, 2235 walk); 2236 } else { 2237 int ret; 2238 2239 get_page(page); 2240 spin_unlock(ptl); 2241 if (unlikely(!trylock_page(page))) 2242 return migrate_vma_collect_skip(start, end, 2243 walk); 2244 ret = split_huge_page(page); 2245 unlock_page(page); 2246 put_page(page); 2247 if (ret) 2248 return migrate_vma_collect_skip(start, end, 2249 walk); 2250 if (pmd_none(*pmdp)) 2251 return migrate_vma_collect_hole(start, end, -1, 2252 walk); 2253 } 2254 } 2255 2256 if (unlikely(pmd_bad(*pmdp))) 2257 return migrate_vma_collect_skip(start, end, walk); 2258 2259 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2260 arch_enter_lazy_mmu_mode(); 2261 2262 for (; addr < end; addr += PAGE_SIZE, ptep++) { 2263 unsigned long mpfn = 0, pfn; 2264 struct page *page; 2265 swp_entry_t entry; 2266 pte_t pte; 2267 2268 pte = *ptep; 2269 2270 if (pte_none(pte)) { 2271 mpfn = MIGRATE_PFN_MIGRATE; 2272 migrate->cpages++; 2273 goto next; 2274 } 2275 2276 if (!pte_present(pte)) { 2277 /* 2278 * Only care about unaddressable device page special 2279 * page table entry. Other special swap entries are not 2280 * migratable, and we ignore regular swapped page. 2281 */ 2282 entry = pte_to_swp_entry(pte); 2283 if (!is_device_private_entry(entry)) 2284 goto next; 2285 2286 page = device_private_entry_to_page(entry); 2287 if (page->pgmap->owner != migrate->src_owner) 2288 goto next; 2289 2290 mpfn = migrate_pfn(page_to_pfn(page)) | 2291 MIGRATE_PFN_MIGRATE; 2292 if (is_write_device_private_entry(entry)) 2293 mpfn |= MIGRATE_PFN_WRITE; 2294 } else { 2295 if (migrate->src_owner) 2296 goto next; 2297 pfn = pte_pfn(pte); 2298 if (is_zero_pfn(pfn)) { 2299 mpfn = MIGRATE_PFN_MIGRATE; 2300 migrate->cpages++; 2301 goto next; 2302 } 2303 page = vm_normal_page(migrate->vma, addr, pte); 2304 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2305 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2306 } 2307 2308 /* FIXME support THP */ 2309 if (!page || !page->mapping || PageTransCompound(page)) { 2310 mpfn = 0; 2311 goto next; 2312 } 2313 2314 /* 2315 * By getting a reference on the page we pin it and that blocks 2316 * any kind of migration. Side effect is that it "freezes" the 2317 * pte. 2318 * 2319 * We drop this reference after isolating the page from the lru 2320 * for non device page (device page are not on the lru and thus 2321 * can't be dropped from it). 2322 */ 2323 get_page(page); 2324 migrate->cpages++; 2325 2326 /* 2327 * Optimize for the common case where page is only mapped once 2328 * in one process. If we can lock the page, then we can safely 2329 * set up a special migration page table entry now. 2330 */ 2331 if (trylock_page(page)) { 2332 pte_t swp_pte; 2333 2334 mpfn |= MIGRATE_PFN_LOCKED; 2335 ptep_get_and_clear(mm, addr, ptep); 2336 2337 /* Setup special migration page table entry */ 2338 entry = make_migration_entry(page, mpfn & 2339 MIGRATE_PFN_WRITE); 2340 swp_pte = swp_entry_to_pte(entry); 2341 if (pte_soft_dirty(pte)) 2342 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2343 set_pte_at(mm, addr, ptep, swp_pte); 2344 2345 /* 2346 * This is like regular unmap: we remove the rmap and 2347 * drop page refcount. Page won't be freed, as we took 2348 * a reference just above. 2349 */ 2350 page_remove_rmap(page, false); 2351 put_page(page); 2352 2353 if (pte_present(pte)) 2354 unmapped++; 2355 } 2356 2357 next: 2358 migrate->dst[migrate->npages] = 0; 2359 migrate->src[migrate->npages++] = mpfn; 2360 } 2361 arch_leave_lazy_mmu_mode(); 2362 pte_unmap_unlock(ptep - 1, ptl); 2363 2364 /* Only flush the TLB if we actually modified any entries */ 2365 if (unmapped) 2366 flush_tlb_range(walk->vma, start, end); 2367 2368 return 0; 2369 } 2370 2371 static const struct mm_walk_ops migrate_vma_walk_ops = { 2372 .pmd_entry = migrate_vma_collect_pmd, 2373 .pte_hole = migrate_vma_collect_hole, 2374 }; 2375 2376 /* 2377 * migrate_vma_collect() - collect pages over a range of virtual addresses 2378 * @migrate: migrate struct containing all migration information 2379 * 2380 * This will walk the CPU page table. For each virtual address backed by a 2381 * valid page, it updates the src array and takes a reference on the page, in 2382 * order to pin the page until we lock it and unmap it. 2383 */ 2384 static void migrate_vma_collect(struct migrate_vma *migrate) 2385 { 2386 struct mmu_notifier_range range; 2387 2388 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, 2389 migrate->vma->vm_mm, migrate->start, migrate->end); 2390 mmu_notifier_invalidate_range_start(&range); 2391 2392 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 2393 &migrate_vma_walk_ops, migrate); 2394 2395 mmu_notifier_invalidate_range_end(&range); 2396 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 2397 } 2398 2399 /* 2400 * migrate_vma_check_page() - check if page is pinned or not 2401 * @page: struct page to check 2402 * 2403 * Pinned pages cannot be migrated. This is the same test as in 2404 * migrate_page_move_mapping(), except that here we allow migration of a 2405 * ZONE_DEVICE page. 2406 */ 2407 static bool migrate_vma_check_page(struct page *page) 2408 { 2409 /* 2410 * One extra ref because caller holds an extra reference, either from 2411 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 2412 * a device page. 2413 */ 2414 int extra = 1; 2415 2416 /* 2417 * FIXME support THP (transparent huge page), it is bit more complex to 2418 * check them than regular pages, because they can be mapped with a pmd 2419 * or with a pte (split pte mapping). 2420 */ 2421 if (PageCompound(page)) 2422 return false; 2423 2424 /* Page from ZONE_DEVICE have one extra reference */ 2425 if (is_zone_device_page(page)) { 2426 /* 2427 * Private page can never be pin as they have no valid pte and 2428 * GUP will fail for those. Yet if there is a pending migration 2429 * a thread might try to wait on the pte migration entry and 2430 * will bump the page reference count. Sadly there is no way to 2431 * differentiate a regular pin from migration wait. Hence to 2432 * avoid 2 racing thread trying to migrate back to CPU to enter 2433 * infinite loop (one stoping migration because the other is 2434 * waiting on pte migration entry). We always return true here. 2435 * 2436 * FIXME proper solution is to rework migration_entry_wait() so 2437 * it does not need to take a reference on page. 2438 */ 2439 return is_device_private_page(page); 2440 } 2441 2442 /* For file back page */ 2443 if (page_mapping(page)) 2444 extra += 1 + page_has_private(page); 2445 2446 if ((page_count(page) - extra) > page_mapcount(page)) 2447 return false; 2448 2449 return true; 2450 } 2451 2452 /* 2453 * migrate_vma_prepare() - lock pages and isolate them from the lru 2454 * @migrate: migrate struct containing all migration information 2455 * 2456 * This locks pages that have been collected by migrate_vma_collect(). Once each 2457 * page is locked it is isolated from the lru (for non-device pages). Finally, 2458 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be 2459 * migrated by concurrent kernel threads. 2460 */ 2461 static void migrate_vma_prepare(struct migrate_vma *migrate) 2462 { 2463 const unsigned long npages = migrate->npages; 2464 const unsigned long start = migrate->start; 2465 unsigned long addr, i, restore = 0; 2466 bool allow_drain = true; 2467 2468 lru_add_drain(); 2469 2470 for (i = 0; (i < npages) && migrate->cpages; i++) { 2471 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2472 bool remap = true; 2473 2474 if (!page) 2475 continue; 2476 2477 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { 2478 /* 2479 * Because we are migrating several pages there can be 2480 * a deadlock between 2 concurrent migration where each 2481 * are waiting on each other page lock. 2482 * 2483 * Make migrate_vma() a best effort thing and backoff 2484 * for any page we can not lock right away. 2485 */ 2486 if (!trylock_page(page)) { 2487 migrate->src[i] = 0; 2488 migrate->cpages--; 2489 put_page(page); 2490 continue; 2491 } 2492 remap = false; 2493 migrate->src[i] |= MIGRATE_PFN_LOCKED; 2494 } 2495 2496 /* ZONE_DEVICE pages are not on LRU */ 2497 if (!is_zone_device_page(page)) { 2498 if (!PageLRU(page) && allow_drain) { 2499 /* Drain CPU's pagevec */ 2500 lru_add_drain_all(); 2501 allow_drain = false; 2502 } 2503 2504 if (isolate_lru_page(page)) { 2505 if (remap) { 2506 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2507 migrate->cpages--; 2508 restore++; 2509 } else { 2510 migrate->src[i] = 0; 2511 unlock_page(page); 2512 migrate->cpages--; 2513 put_page(page); 2514 } 2515 continue; 2516 } 2517 2518 /* Drop the reference we took in collect */ 2519 put_page(page); 2520 } 2521 2522 if (!migrate_vma_check_page(page)) { 2523 if (remap) { 2524 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2525 migrate->cpages--; 2526 restore++; 2527 2528 if (!is_zone_device_page(page)) { 2529 get_page(page); 2530 putback_lru_page(page); 2531 } 2532 } else { 2533 migrate->src[i] = 0; 2534 unlock_page(page); 2535 migrate->cpages--; 2536 2537 if (!is_zone_device_page(page)) 2538 putback_lru_page(page); 2539 else 2540 put_page(page); 2541 } 2542 } 2543 } 2544 2545 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { 2546 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2547 2548 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2549 continue; 2550 2551 remove_migration_pte(page, migrate->vma, addr, page); 2552 2553 migrate->src[i] = 0; 2554 unlock_page(page); 2555 put_page(page); 2556 restore--; 2557 } 2558 } 2559 2560 /* 2561 * migrate_vma_unmap() - replace page mapping with special migration pte entry 2562 * @migrate: migrate struct containing all migration information 2563 * 2564 * Replace page mapping (CPU page table pte) with a special migration pte entry 2565 * and check again if it has been pinned. Pinned pages are restored because we 2566 * cannot migrate them. 2567 * 2568 * This is the last step before we call the device driver callback to allocate 2569 * destination memory and copy contents of original page over to new page. 2570 */ 2571 static void migrate_vma_unmap(struct migrate_vma *migrate) 2572 { 2573 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 2574 const unsigned long npages = migrate->npages; 2575 const unsigned long start = migrate->start; 2576 unsigned long addr, i, restore = 0; 2577 2578 for (i = 0; i < npages; i++) { 2579 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2580 2581 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2582 continue; 2583 2584 if (page_mapped(page)) { 2585 try_to_unmap(page, flags); 2586 if (page_mapped(page)) 2587 goto restore; 2588 } 2589 2590 if (migrate_vma_check_page(page)) 2591 continue; 2592 2593 restore: 2594 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2595 migrate->cpages--; 2596 restore++; 2597 } 2598 2599 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { 2600 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2601 2602 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2603 continue; 2604 2605 remove_migration_ptes(page, page, false); 2606 2607 migrate->src[i] = 0; 2608 unlock_page(page); 2609 restore--; 2610 2611 if (is_zone_device_page(page)) 2612 put_page(page); 2613 else 2614 putback_lru_page(page); 2615 } 2616 } 2617 2618 /** 2619 * migrate_vma_setup() - prepare to migrate a range of memory 2620 * @args: contains the vma, start, and and pfns arrays for the migration 2621 * 2622 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 2623 * without an error. 2624 * 2625 * Prepare to migrate a range of memory virtual address range by collecting all 2626 * the pages backing each virtual address in the range, saving them inside the 2627 * src array. Then lock those pages and unmap them. Once the pages are locked 2628 * and unmapped, check whether each page is pinned or not. Pages that aren't 2629 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 2630 * corresponding src array entry. Then restores any pages that are pinned, by 2631 * remapping and unlocking those pages. 2632 * 2633 * The caller should then allocate destination memory and copy source memory to 2634 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 2635 * flag set). Once these are allocated and copied, the caller must update each 2636 * corresponding entry in the dst array with the pfn value of the destination 2637 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set 2638 * (destination pages must have their struct pages locked, via lock_page()). 2639 * 2640 * Note that the caller does not have to migrate all the pages that are marked 2641 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 2642 * device memory to system memory. If the caller cannot migrate a device page 2643 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 2644 * consequences for the userspace process, so it must be avoided if at all 2645 * possible. 2646 * 2647 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 2648 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 2649 * allowing the caller to allocate device memory for those unback virtual 2650 * address. For this the caller simply has to allocate device memory and 2651 * properly set the destination entry like for regular migration. Note that 2652 * this can still fails and thus inside the device driver must check if the 2653 * migration was successful for those entries after calling migrate_vma_pages() 2654 * just like for regular migration. 2655 * 2656 * After that, the callers must call migrate_vma_pages() to go over each entry 2657 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 2658 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 2659 * then migrate_vma_pages() to migrate struct page information from the source 2660 * struct page to the destination struct page. If it fails to migrate the 2661 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 2662 * src array. 2663 * 2664 * At this point all successfully migrated pages have an entry in the src 2665 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 2666 * array entry with MIGRATE_PFN_VALID flag set. 2667 * 2668 * Once migrate_vma_pages() returns the caller may inspect which pages were 2669 * successfully migrated, and which were not. Successfully migrated pages will 2670 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 2671 * 2672 * It is safe to update device page table after migrate_vma_pages() because 2673 * both destination and source page are still locked, and the mmap_sem is held 2674 * in read mode (hence no one can unmap the range being migrated). 2675 * 2676 * Once the caller is done cleaning up things and updating its page table (if it 2677 * chose to do so, this is not an obligation) it finally calls 2678 * migrate_vma_finalize() to update the CPU page table to point to new pages 2679 * for successfully migrated pages or otherwise restore the CPU page table to 2680 * point to the original source pages. 2681 */ 2682 int migrate_vma_setup(struct migrate_vma *args) 2683 { 2684 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 2685 2686 args->start &= PAGE_MASK; 2687 args->end &= PAGE_MASK; 2688 if (!args->vma || is_vm_hugetlb_page(args->vma) || 2689 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 2690 return -EINVAL; 2691 if (nr_pages <= 0) 2692 return -EINVAL; 2693 if (args->start < args->vma->vm_start || 2694 args->start >= args->vma->vm_end) 2695 return -EINVAL; 2696 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 2697 return -EINVAL; 2698 if (!args->src || !args->dst) 2699 return -EINVAL; 2700 2701 memset(args->src, 0, sizeof(*args->src) * nr_pages); 2702 args->cpages = 0; 2703 args->npages = 0; 2704 2705 migrate_vma_collect(args); 2706 2707 if (args->cpages) 2708 migrate_vma_prepare(args); 2709 if (args->cpages) 2710 migrate_vma_unmap(args); 2711 2712 /* 2713 * At this point pages are locked and unmapped, and thus they have 2714 * stable content and can safely be copied to destination memory that 2715 * is allocated by the drivers. 2716 */ 2717 return 0; 2718 2719 } 2720 EXPORT_SYMBOL(migrate_vma_setup); 2721 2722 /* 2723 * This code closely matches the code in: 2724 * __handle_mm_fault() 2725 * handle_pte_fault() 2726 * do_anonymous_page() 2727 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 2728 * private page. 2729 */ 2730 static void migrate_vma_insert_page(struct migrate_vma *migrate, 2731 unsigned long addr, 2732 struct page *page, 2733 unsigned long *src, 2734 unsigned long *dst) 2735 { 2736 struct vm_area_struct *vma = migrate->vma; 2737 struct mm_struct *mm = vma->vm_mm; 2738 struct mem_cgroup *memcg; 2739 bool flush = false; 2740 spinlock_t *ptl; 2741 pte_t entry; 2742 pgd_t *pgdp; 2743 p4d_t *p4dp; 2744 pud_t *pudp; 2745 pmd_t *pmdp; 2746 pte_t *ptep; 2747 2748 /* Only allow populating anonymous memory */ 2749 if (!vma_is_anonymous(vma)) 2750 goto abort; 2751 2752 pgdp = pgd_offset(mm, addr); 2753 p4dp = p4d_alloc(mm, pgdp, addr); 2754 if (!p4dp) 2755 goto abort; 2756 pudp = pud_alloc(mm, p4dp, addr); 2757 if (!pudp) 2758 goto abort; 2759 pmdp = pmd_alloc(mm, pudp, addr); 2760 if (!pmdp) 2761 goto abort; 2762 2763 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 2764 goto abort; 2765 2766 /* 2767 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2768 * pte_offset_map() on pmds where a huge pmd might be created 2769 * from a different thread. 2770 * 2771 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 2772 * parallel threads are excluded by other means. 2773 * 2774 * Here we only have down_read(mmap_sem). 2775 */ 2776 if (pte_alloc(mm, pmdp)) 2777 goto abort; 2778 2779 /* See the comment in pte_alloc_one_map() */ 2780 if (unlikely(pmd_trans_unstable(pmdp))) 2781 goto abort; 2782 2783 if (unlikely(anon_vma_prepare(vma))) 2784 goto abort; 2785 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) 2786 goto abort; 2787 2788 /* 2789 * The memory barrier inside __SetPageUptodate makes sure that 2790 * preceding stores to the page contents become visible before 2791 * the set_pte_at() write. 2792 */ 2793 __SetPageUptodate(page); 2794 2795 if (is_zone_device_page(page)) { 2796 if (is_device_private_page(page)) { 2797 swp_entry_t swp_entry; 2798 2799 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); 2800 entry = swp_entry_to_pte(swp_entry); 2801 } 2802 } else { 2803 entry = mk_pte(page, vma->vm_page_prot); 2804 if (vma->vm_flags & VM_WRITE) 2805 entry = pte_mkwrite(pte_mkdirty(entry)); 2806 } 2807 2808 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2809 2810 if (check_stable_address_space(mm)) 2811 goto unlock_abort; 2812 2813 if (pte_present(*ptep)) { 2814 unsigned long pfn = pte_pfn(*ptep); 2815 2816 if (!is_zero_pfn(pfn)) 2817 goto unlock_abort; 2818 flush = true; 2819 } else if (!pte_none(*ptep)) 2820 goto unlock_abort; 2821 2822 /* 2823 * Check for userfaultfd but do not deliver the fault. Instead, 2824 * just back off. 2825 */ 2826 if (userfaultfd_missing(vma)) 2827 goto unlock_abort; 2828 2829 inc_mm_counter(mm, MM_ANONPAGES); 2830 page_add_new_anon_rmap(page, vma, addr, false); 2831 mem_cgroup_commit_charge(page, memcg, false, false); 2832 if (!is_zone_device_page(page)) 2833 lru_cache_add_active_or_unevictable(page, vma); 2834 get_page(page); 2835 2836 if (flush) { 2837 flush_cache_page(vma, addr, pte_pfn(*ptep)); 2838 ptep_clear_flush_notify(vma, addr, ptep); 2839 set_pte_at_notify(mm, addr, ptep, entry); 2840 update_mmu_cache(vma, addr, ptep); 2841 } else { 2842 /* No need to invalidate - it was non-present before */ 2843 set_pte_at(mm, addr, ptep, entry); 2844 update_mmu_cache(vma, addr, ptep); 2845 } 2846 2847 pte_unmap_unlock(ptep, ptl); 2848 *src = MIGRATE_PFN_MIGRATE; 2849 return; 2850 2851 unlock_abort: 2852 pte_unmap_unlock(ptep, ptl); 2853 mem_cgroup_cancel_charge(page, memcg, false); 2854 abort: 2855 *src &= ~MIGRATE_PFN_MIGRATE; 2856 } 2857 2858 /** 2859 * migrate_vma_pages() - migrate meta-data from src page to dst page 2860 * @migrate: migrate struct containing all migration information 2861 * 2862 * This migrates struct page meta-data from source struct page to destination 2863 * struct page. This effectively finishes the migration from source page to the 2864 * destination page. 2865 */ 2866 void migrate_vma_pages(struct migrate_vma *migrate) 2867 { 2868 const unsigned long npages = migrate->npages; 2869 const unsigned long start = migrate->start; 2870 struct mmu_notifier_range range; 2871 unsigned long addr, i; 2872 bool notified = false; 2873 2874 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 2875 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2876 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2877 struct address_space *mapping; 2878 int r; 2879 2880 if (!newpage) { 2881 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2882 continue; 2883 } 2884 2885 if (!page) { 2886 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2887 continue; 2888 if (!notified) { 2889 notified = true; 2890 2891 mmu_notifier_range_init(&range, 2892 MMU_NOTIFY_CLEAR, 0, 2893 NULL, 2894 migrate->vma->vm_mm, 2895 addr, migrate->end); 2896 mmu_notifier_invalidate_range_start(&range); 2897 } 2898 migrate_vma_insert_page(migrate, addr, newpage, 2899 &migrate->src[i], 2900 &migrate->dst[i]); 2901 continue; 2902 } 2903 2904 mapping = page_mapping(page); 2905 2906 if (is_zone_device_page(newpage)) { 2907 if (is_device_private_page(newpage)) { 2908 /* 2909 * For now only support private anonymous when 2910 * migrating to un-addressable device memory. 2911 */ 2912 if (mapping) { 2913 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2914 continue; 2915 } 2916 } else { 2917 /* 2918 * Other types of ZONE_DEVICE page are not 2919 * supported. 2920 */ 2921 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2922 continue; 2923 } 2924 } 2925 2926 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 2927 if (r != MIGRATEPAGE_SUCCESS) 2928 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2929 } 2930 2931 /* 2932 * No need to double call mmu_notifier->invalidate_range() callback as 2933 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 2934 * did already call it. 2935 */ 2936 if (notified) 2937 mmu_notifier_invalidate_range_only_end(&range); 2938 } 2939 EXPORT_SYMBOL(migrate_vma_pages); 2940 2941 /** 2942 * migrate_vma_finalize() - restore CPU page table entry 2943 * @migrate: migrate struct containing all migration information 2944 * 2945 * This replaces the special migration pte entry with either a mapping to the 2946 * new page if migration was successful for that page, or to the original page 2947 * otherwise. 2948 * 2949 * This also unlocks the pages and puts them back on the lru, or drops the extra 2950 * refcount, for device pages. 2951 */ 2952 void migrate_vma_finalize(struct migrate_vma *migrate) 2953 { 2954 const unsigned long npages = migrate->npages; 2955 unsigned long i; 2956 2957 for (i = 0; i < npages; i++) { 2958 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2959 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2960 2961 if (!page) { 2962 if (newpage) { 2963 unlock_page(newpage); 2964 put_page(newpage); 2965 } 2966 continue; 2967 } 2968 2969 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 2970 if (newpage) { 2971 unlock_page(newpage); 2972 put_page(newpage); 2973 } 2974 newpage = page; 2975 } 2976 2977 remove_migration_ptes(page, newpage, false); 2978 unlock_page(page); 2979 migrate->cpages--; 2980 2981 if (is_zone_device_page(page)) 2982 put_page(page); 2983 else 2984 putback_lru_page(page); 2985 2986 if (newpage != page) { 2987 unlock_page(newpage); 2988 if (is_zone_device_page(newpage)) 2989 put_page(newpage); 2990 else 2991 putback_lru_page(newpage); 2992 } 2993 } 2994 } 2995 EXPORT_SYMBOL(migrate_vma_finalize); 2996 #endif /* CONFIG_DEVICE_PRIVATE */ 2997