1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pagewalk.h> 42 #include <linux/pfn_t.h> 43 #include <linux/memremap.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/balloon_compaction.h> 46 #include <linux/mmu_notifier.h> 47 #include <linux/page_idle.h> 48 #include <linux/page_owner.h> 49 #include <linux/sched/mm.h> 50 #include <linux/ptrace.h> 51 #include <linux/oom.h> 52 #include <linux/memory.h> 53 #include <linux/random.h> 54 55 #include <asm/tlbflush.h> 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/migrate.h> 59 60 #include "internal.h" 61 62 int isolate_movable_page(struct page *page, isolate_mode_t mode) 63 { 64 struct address_space *mapping; 65 66 /* 67 * Avoid burning cycles with pages that are yet under __free_pages(), 68 * or just got freed under us. 69 * 70 * In case we 'win' a race for a movable page being freed under us and 71 * raise its refcount preventing __free_pages() from doing its job 72 * the put_page() at the end of this block will take care of 73 * release this page, thus avoiding a nasty leakage. 74 */ 75 if (unlikely(!get_page_unless_zero(page))) 76 goto out; 77 78 /* 79 * Check PageMovable before holding a PG_lock because page's owner 80 * assumes anybody doesn't touch PG_lock of newly allocated page 81 * so unconditionally grabbing the lock ruins page's owner side. 82 */ 83 if (unlikely(!__PageMovable(page))) 84 goto out_putpage; 85 /* 86 * As movable pages are not isolated from LRU lists, concurrent 87 * compaction threads can race against page migration functions 88 * as well as race against the releasing a page. 89 * 90 * In order to avoid having an already isolated movable page 91 * being (wrongly) re-isolated while it is under migration, 92 * or to avoid attempting to isolate pages being released, 93 * lets be sure we have the page lock 94 * before proceeding with the movable page isolation steps. 95 */ 96 if (unlikely(!trylock_page(page))) 97 goto out_putpage; 98 99 if (!PageMovable(page) || PageIsolated(page)) 100 goto out_no_isolated; 101 102 mapping = page_mapping(page); 103 VM_BUG_ON_PAGE(!mapping, page); 104 105 if (!mapping->a_ops->isolate_page(page, mode)) 106 goto out_no_isolated; 107 108 /* Driver shouldn't use PG_isolated bit of page->flags */ 109 WARN_ON_ONCE(PageIsolated(page)); 110 __SetPageIsolated(page); 111 unlock_page(page); 112 113 return 0; 114 115 out_no_isolated: 116 unlock_page(page); 117 out_putpage: 118 put_page(page); 119 out: 120 return -EBUSY; 121 } 122 123 static void putback_movable_page(struct page *page) 124 { 125 struct address_space *mapping; 126 127 mapping = page_mapping(page); 128 mapping->a_ops->putback_page(page); 129 __ClearPageIsolated(page); 130 } 131 132 /* 133 * Put previously isolated pages back onto the appropriate lists 134 * from where they were once taken off for compaction/migration. 135 * 136 * This function shall be used whenever the isolated pageset has been 137 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 138 * and isolate_huge_page(). 139 */ 140 void putback_movable_pages(struct list_head *l) 141 { 142 struct page *page; 143 struct page *page2; 144 145 list_for_each_entry_safe(page, page2, l, lru) { 146 if (unlikely(PageHuge(page))) { 147 putback_active_hugepage(page); 148 continue; 149 } 150 list_del(&page->lru); 151 /* 152 * We isolated non-lru movable page so here we can use 153 * __PageMovable because LRU page's mapping cannot have 154 * PAGE_MAPPING_MOVABLE. 155 */ 156 if (unlikely(__PageMovable(page))) { 157 VM_BUG_ON_PAGE(!PageIsolated(page), page); 158 lock_page(page); 159 if (PageMovable(page)) 160 putback_movable_page(page); 161 else 162 __ClearPageIsolated(page); 163 unlock_page(page); 164 put_page(page); 165 } else { 166 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 167 page_is_file_lru(page), -thp_nr_pages(page)); 168 putback_lru_page(page); 169 } 170 } 171 } 172 173 /* 174 * Restore a potential migration pte to a working pte entry 175 */ 176 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 177 unsigned long addr, void *old) 178 { 179 struct page_vma_mapped_walk pvmw = { 180 .page = old, 181 .vma = vma, 182 .address = addr, 183 .flags = PVMW_SYNC | PVMW_MIGRATION, 184 }; 185 struct page *new; 186 pte_t pte; 187 swp_entry_t entry; 188 189 VM_BUG_ON_PAGE(PageTail(page), page); 190 while (page_vma_mapped_walk(&pvmw)) { 191 if (PageKsm(page)) 192 new = page; 193 else 194 new = page - pvmw.page->index + 195 linear_page_index(vma, pvmw.address); 196 197 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 198 /* PMD-mapped THP migration entry */ 199 if (!pvmw.pte) { 200 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 201 remove_migration_pmd(&pvmw, new); 202 continue; 203 } 204 #endif 205 206 get_page(new); 207 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 208 if (pte_swp_soft_dirty(*pvmw.pte)) 209 pte = pte_mksoft_dirty(pte); 210 211 /* 212 * Recheck VMA as permissions can change since migration started 213 */ 214 entry = pte_to_swp_entry(*pvmw.pte); 215 if (is_writable_migration_entry(entry)) 216 pte = maybe_mkwrite(pte, vma); 217 else if (pte_swp_uffd_wp(*pvmw.pte)) 218 pte = pte_mkuffd_wp(pte); 219 220 if (unlikely(is_device_private_page(new))) { 221 if (pte_write(pte)) 222 entry = make_writable_device_private_entry( 223 page_to_pfn(new)); 224 else 225 entry = make_readable_device_private_entry( 226 page_to_pfn(new)); 227 pte = swp_entry_to_pte(entry); 228 if (pte_swp_soft_dirty(*pvmw.pte)) 229 pte = pte_swp_mksoft_dirty(pte); 230 if (pte_swp_uffd_wp(*pvmw.pte)) 231 pte = pte_swp_mkuffd_wp(pte); 232 } 233 234 #ifdef CONFIG_HUGETLB_PAGE 235 if (PageHuge(new)) { 236 unsigned int shift = huge_page_shift(hstate_vma(vma)); 237 238 pte = pte_mkhuge(pte); 239 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 240 if (PageAnon(new)) 241 hugepage_add_anon_rmap(new, vma, pvmw.address); 242 else 243 page_dup_rmap(new, true); 244 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 245 } else 246 #endif 247 { 248 if (PageAnon(new)) 249 page_add_anon_rmap(new, vma, pvmw.address, false); 250 else 251 page_add_file_rmap(new, false); 252 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 253 } 254 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 255 mlock_vma_page(new); 256 257 if (PageTransHuge(page) && PageMlocked(page)) 258 clear_page_mlock(page); 259 260 /* No need to invalidate - it was non-present before */ 261 update_mmu_cache(vma, pvmw.address, pvmw.pte); 262 } 263 264 return true; 265 } 266 267 /* 268 * Get rid of all migration entries and replace them by 269 * references to the indicated page. 270 */ 271 void remove_migration_ptes(struct page *old, struct page *new, bool locked) 272 { 273 struct rmap_walk_control rwc = { 274 .rmap_one = remove_migration_pte, 275 .arg = old, 276 }; 277 278 if (locked) 279 rmap_walk_locked(new, &rwc); 280 else 281 rmap_walk(new, &rwc); 282 } 283 284 /* 285 * Something used the pte of a page under migration. We need to 286 * get to the page and wait until migration is finished. 287 * When we return from this function the fault will be retried. 288 */ 289 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 290 spinlock_t *ptl) 291 { 292 pte_t pte; 293 swp_entry_t entry; 294 295 spin_lock(ptl); 296 pte = *ptep; 297 if (!is_swap_pte(pte)) 298 goto out; 299 300 entry = pte_to_swp_entry(pte); 301 if (!is_migration_entry(entry)) 302 goto out; 303 304 migration_entry_wait_on_locked(entry, ptep, ptl); 305 return; 306 out: 307 pte_unmap_unlock(ptep, ptl); 308 } 309 310 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 311 unsigned long address) 312 { 313 spinlock_t *ptl = pte_lockptr(mm, pmd); 314 pte_t *ptep = pte_offset_map(pmd, address); 315 __migration_entry_wait(mm, ptep, ptl); 316 } 317 318 void migration_entry_wait_huge(struct vm_area_struct *vma, 319 struct mm_struct *mm, pte_t *pte) 320 { 321 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 322 __migration_entry_wait(mm, pte, ptl); 323 } 324 325 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 326 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 327 { 328 spinlock_t *ptl; 329 330 ptl = pmd_lock(mm, pmd); 331 if (!is_pmd_migration_entry(*pmd)) 332 goto unlock; 333 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 334 return; 335 unlock: 336 spin_unlock(ptl); 337 } 338 #endif 339 340 static int expected_page_refs(struct address_space *mapping, struct page *page) 341 { 342 int expected_count = 1; 343 344 /* 345 * Device private pages have an extra refcount as they are 346 * ZONE_DEVICE pages. 347 */ 348 expected_count += is_device_private_page(page); 349 if (mapping) 350 expected_count += compound_nr(page) + page_has_private(page); 351 352 return expected_count; 353 } 354 355 /* 356 * Replace the page in the mapping. 357 * 358 * The number of remaining references must be: 359 * 1 for anonymous pages without a mapping 360 * 2 for pages with a mapping 361 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 362 */ 363 int folio_migrate_mapping(struct address_space *mapping, 364 struct folio *newfolio, struct folio *folio, int extra_count) 365 { 366 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 367 struct zone *oldzone, *newzone; 368 int dirty; 369 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; 370 long nr = folio_nr_pages(folio); 371 372 if (!mapping) { 373 /* Anonymous page without mapping */ 374 if (folio_ref_count(folio) != expected_count) 375 return -EAGAIN; 376 377 /* No turning back from here */ 378 newfolio->index = folio->index; 379 newfolio->mapping = folio->mapping; 380 if (folio_test_swapbacked(folio)) 381 __folio_set_swapbacked(newfolio); 382 383 return MIGRATEPAGE_SUCCESS; 384 } 385 386 oldzone = folio_zone(folio); 387 newzone = folio_zone(newfolio); 388 389 xas_lock_irq(&xas); 390 if (!folio_ref_freeze(folio, expected_count)) { 391 xas_unlock_irq(&xas); 392 return -EAGAIN; 393 } 394 395 /* 396 * Now we know that no one else is looking at the folio: 397 * no turning back from here. 398 */ 399 newfolio->index = folio->index; 400 newfolio->mapping = folio->mapping; 401 folio_ref_add(newfolio, nr); /* add cache reference */ 402 if (folio_test_swapbacked(folio)) { 403 __folio_set_swapbacked(newfolio); 404 if (folio_test_swapcache(folio)) { 405 folio_set_swapcache(newfolio); 406 newfolio->private = folio_get_private(folio); 407 } 408 } else { 409 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 410 } 411 412 /* Move dirty while page refs frozen and newpage not yet exposed */ 413 dirty = folio_test_dirty(folio); 414 if (dirty) { 415 folio_clear_dirty(folio); 416 folio_set_dirty(newfolio); 417 } 418 419 xas_store(&xas, newfolio); 420 421 /* 422 * Drop cache reference from old page by unfreezing 423 * to one less reference. 424 * We know this isn't the last reference. 425 */ 426 folio_ref_unfreeze(folio, expected_count - nr); 427 428 xas_unlock(&xas); 429 /* Leave irq disabled to prevent preemption while updating stats */ 430 431 /* 432 * If moved to a different zone then also account 433 * the page for that zone. Other VM counters will be 434 * taken care of when we establish references to the 435 * new page and drop references to the old page. 436 * 437 * Note that anonymous pages are accounted for 438 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 439 * are mapped to swap space. 440 */ 441 if (newzone != oldzone) { 442 struct lruvec *old_lruvec, *new_lruvec; 443 struct mem_cgroup *memcg; 444 445 memcg = folio_memcg(folio); 446 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 447 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 448 449 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 450 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 451 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 452 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 453 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 454 } 455 #ifdef CONFIG_SWAP 456 if (folio_test_swapcache(folio)) { 457 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 458 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 459 } 460 #endif 461 if (dirty && mapping_can_writeback(mapping)) { 462 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 463 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 464 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 465 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 466 } 467 } 468 local_irq_enable(); 469 470 return MIGRATEPAGE_SUCCESS; 471 } 472 EXPORT_SYMBOL(folio_migrate_mapping); 473 474 /* 475 * The expected number of remaining references is the same as that 476 * of folio_migrate_mapping(). 477 */ 478 int migrate_huge_page_move_mapping(struct address_space *mapping, 479 struct page *newpage, struct page *page) 480 { 481 XA_STATE(xas, &mapping->i_pages, page_index(page)); 482 int expected_count; 483 484 xas_lock_irq(&xas); 485 expected_count = 2 + page_has_private(page); 486 if (page_count(page) != expected_count || xas_load(&xas) != page) { 487 xas_unlock_irq(&xas); 488 return -EAGAIN; 489 } 490 491 if (!page_ref_freeze(page, expected_count)) { 492 xas_unlock_irq(&xas); 493 return -EAGAIN; 494 } 495 496 newpage->index = page->index; 497 newpage->mapping = page->mapping; 498 499 get_page(newpage); 500 501 xas_store(&xas, newpage); 502 503 page_ref_unfreeze(page, expected_count - 1); 504 505 xas_unlock_irq(&xas); 506 507 return MIGRATEPAGE_SUCCESS; 508 } 509 510 /* 511 * Copy the flags and some other ancillary information 512 */ 513 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 514 { 515 int cpupid; 516 517 if (folio_test_error(folio)) 518 folio_set_error(newfolio); 519 if (folio_test_referenced(folio)) 520 folio_set_referenced(newfolio); 521 if (folio_test_uptodate(folio)) 522 folio_mark_uptodate(newfolio); 523 if (folio_test_clear_active(folio)) { 524 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 525 folio_set_active(newfolio); 526 } else if (folio_test_clear_unevictable(folio)) 527 folio_set_unevictable(newfolio); 528 if (folio_test_workingset(folio)) 529 folio_set_workingset(newfolio); 530 if (folio_test_checked(folio)) 531 folio_set_checked(newfolio); 532 if (folio_test_mappedtodisk(folio)) 533 folio_set_mappedtodisk(newfolio); 534 535 /* Move dirty on pages not done by folio_migrate_mapping() */ 536 if (folio_test_dirty(folio)) 537 folio_set_dirty(newfolio); 538 539 if (folio_test_young(folio)) 540 folio_set_young(newfolio); 541 if (folio_test_idle(folio)) 542 folio_set_idle(newfolio); 543 544 /* 545 * Copy NUMA information to the new page, to prevent over-eager 546 * future migrations of this same page. 547 */ 548 cpupid = page_cpupid_xchg_last(&folio->page, -1); 549 page_cpupid_xchg_last(&newfolio->page, cpupid); 550 551 folio_migrate_ksm(newfolio, folio); 552 /* 553 * Please do not reorder this without considering how mm/ksm.c's 554 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 555 */ 556 if (folio_test_swapcache(folio)) 557 folio_clear_swapcache(folio); 558 folio_clear_private(folio); 559 560 /* page->private contains hugetlb specific flags */ 561 if (!folio_test_hugetlb(folio)) 562 folio->private = NULL; 563 564 /* 565 * If any waiters have accumulated on the new page then 566 * wake them up. 567 */ 568 if (folio_test_writeback(newfolio)) 569 folio_end_writeback(newfolio); 570 571 /* 572 * PG_readahead shares the same bit with PG_reclaim. The above 573 * end_page_writeback() may clear PG_readahead mistakenly, so set the 574 * bit after that. 575 */ 576 if (folio_test_readahead(folio)) 577 folio_set_readahead(newfolio); 578 579 folio_copy_owner(newfolio, folio); 580 581 if (!folio_test_hugetlb(folio)) 582 mem_cgroup_migrate(folio, newfolio); 583 } 584 EXPORT_SYMBOL(folio_migrate_flags); 585 586 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 587 { 588 folio_copy(newfolio, folio); 589 folio_migrate_flags(newfolio, folio); 590 } 591 EXPORT_SYMBOL(folio_migrate_copy); 592 593 /************************************************************ 594 * Migration functions 595 ***********************************************************/ 596 597 /* 598 * Common logic to directly migrate a single LRU page suitable for 599 * pages that do not use PagePrivate/PagePrivate2. 600 * 601 * Pages are locked upon entry and exit. 602 */ 603 int migrate_page(struct address_space *mapping, 604 struct page *newpage, struct page *page, 605 enum migrate_mode mode) 606 { 607 struct folio *newfolio = page_folio(newpage); 608 struct folio *folio = page_folio(page); 609 int rc; 610 611 BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ 612 613 rc = folio_migrate_mapping(mapping, newfolio, folio, 0); 614 615 if (rc != MIGRATEPAGE_SUCCESS) 616 return rc; 617 618 if (mode != MIGRATE_SYNC_NO_COPY) 619 folio_migrate_copy(newfolio, folio); 620 else 621 folio_migrate_flags(newfolio, folio); 622 return MIGRATEPAGE_SUCCESS; 623 } 624 EXPORT_SYMBOL(migrate_page); 625 626 #ifdef CONFIG_BLOCK 627 /* Returns true if all buffers are successfully locked */ 628 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 629 enum migrate_mode mode) 630 { 631 struct buffer_head *bh = head; 632 633 /* Simple case, sync compaction */ 634 if (mode != MIGRATE_ASYNC) { 635 do { 636 lock_buffer(bh); 637 bh = bh->b_this_page; 638 639 } while (bh != head); 640 641 return true; 642 } 643 644 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 645 do { 646 if (!trylock_buffer(bh)) { 647 /* 648 * We failed to lock the buffer and cannot stall in 649 * async migration. Release the taken locks 650 */ 651 struct buffer_head *failed_bh = bh; 652 bh = head; 653 while (bh != failed_bh) { 654 unlock_buffer(bh); 655 bh = bh->b_this_page; 656 } 657 return false; 658 } 659 660 bh = bh->b_this_page; 661 } while (bh != head); 662 return true; 663 } 664 665 static int __buffer_migrate_page(struct address_space *mapping, 666 struct page *newpage, struct page *page, enum migrate_mode mode, 667 bool check_refs) 668 { 669 struct buffer_head *bh, *head; 670 int rc; 671 int expected_count; 672 673 if (!page_has_buffers(page)) 674 return migrate_page(mapping, newpage, page, mode); 675 676 /* Check whether page does not have extra refs before we do more work */ 677 expected_count = expected_page_refs(mapping, page); 678 if (page_count(page) != expected_count) 679 return -EAGAIN; 680 681 head = page_buffers(page); 682 if (!buffer_migrate_lock_buffers(head, mode)) 683 return -EAGAIN; 684 685 if (check_refs) { 686 bool busy; 687 bool invalidated = false; 688 689 recheck_buffers: 690 busy = false; 691 spin_lock(&mapping->private_lock); 692 bh = head; 693 do { 694 if (atomic_read(&bh->b_count)) { 695 busy = true; 696 break; 697 } 698 bh = bh->b_this_page; 699 } while (bh != head); 700 if (busy) { 701 if (invalidated) { 702 rc = -EAGAIN; 703 goto unlock_buffers; 704 } 705 spin_unlock(&mapping->private_lock); 706 invalidate_bh_lrus(); 707 invalidated = true; 708 goto recheck_buffers; 709 } 710 } 711 712 rc = migrate_page_move_mapping(mapping, newpage, page, 0); 713 if (rc != MIGRATEPAGE_SUCCESS) 714 goto unlock_buffers; 715 716 attach_page_private(newpage, detach_page_private(page)); 717 718 bh = head; 719 do { 720 set_bh_page(bh, newpage, bh_offset(bh)); 721 bh = bh->b_this_page; 722 723 } while (bh != head); 724 725 if (mode != MIGRATE_SYNC_NO_COPY) 726 migrate_page_copy(newpage, page); 727 else 728 migrate_page_states(newpage, page); 729 730 rc = MIGRATEPAGE_SUCCESS; 731 unlock_buffers: 732 if (check_refs) 733 spin_unlock(&mapping->private_lock); 734 bh = head; 735 do { 736 unlock_buffer(bh); 737 bh = bh->b_this_page; 738 739 } while (bh != head); 740 741 return rc; 742 } 743 744 /* 745 * Migration function for pages with buffers. This function can only be used 746 * if the underlying filesystem guarantees that no other references to "page" 747 * exist. For example attached buffer heads are accessed only under page lock. 748 */ 749 int buffer_migrate_page(struct address_space *mapping, 750 struct page *newpage, struct page *page, enum migrate_mode mode) 751 { 752 return __buffer_migrate_page(mapping, newpage, page, mode, false); 753 } 754 EXPORT_SYMBOL(buffer_migrate_page); 755 756 /* 757 * Same as above except that this variant is more careful and checks that there 758 * are also no buffer head references. This function is the right one for 759 * mappings where buffer heads are directly looked up and referenced (such as 760 * block device mappings). 761 */ 762 int buffer_migrate_page_norefs(struct address_space *mapping, 763 struct page *newpage, struct page *page, enum migrate_mode mode) 764 { 765 return __buffer_migrate_page(mapping, newpage, page, mode, true); 766 } 767 #endif 768 769 /* 770 * Writeback a page to clean the dirty state 771 */ 772 static int writeout(struct address_space *mapping, struct page *page) 773 { 774 struct writeback_control wbc = { 775 .sync_mode = WB_SYNC_NONE, 776 .nr_to_write = 1, 777 .range_start = 0, 778 .range_end = LLONG_MAX, 779 .for_reclaim = 1 780 }; 781 int rc; 782 783 if (!mapping->a_ops->writepage) 784 /* No write method for the address space */ 785 return -EINVAL; 786 787 if (!clear_page_dirty_for_io(page)) 788 /* Someone else already triggered a write */ 789 return -EAGAIN; 790 791 /* 792 * A dirty page may imply that the underlying filesystem has 793 * the page on some queue. So the page must be clean for 794 * migration. Writeout may mean we loose the lock and the 795 * page state is no longer what we checked for earlier. 796 * At this point we know that the migration attempt cannot 797 * be successful. 798 */ 799 remove_migration_ptes(page, page, false); 800 801 rc = mapping->a_ops->writepage(page, &wbc); 802 803 if (rc != AOP_WRITEPAGE_ACTIVATE) 804 /* unlocked. Relock */ 805 lock_page(page); 806 807 return (rc < 0) ? -EIO : -EAGAIN; 808 } 809 810 /* 811 * Default handling if a filesystem does not provide a migration function. 812 */ 813 static int fallback_migrate_page(struct address_space *mapping, 814 struct page *newpage, struct page *page, enum migrate_mode mode) 815 { 816 if (PageDirty(page)) { 817 /* Only writeback pages in full synchronous migration */ 818 switch (mode) { 819 case MIGRATE_SYNC: 820 case MIGRATE_SYNC_NO_COPY: 821 break; 822 default: 823 return -EBUSY; 824 } 825 return writeout(mapping, page); 826 } 827 828 /* 829 * Buffers may be managed in a filesystem specific way. 830 * We must have no buffers or drop them. 831 */ 832 if (page_has_private(page) && 833 !try_to_release_page(page, GFP_KERNEL)) 834 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 835 836 return migrate_page(mapping, newpage, page, mode); 837 } 838 839 /* 840 * Move a page to a newly allocated page 841 * The page is locked and all ptes have been successfully removed. 842 * 843 * The new page will have replaced the old page if this function 844 * is successful. 845 * 846 * Return value: 847 * < 0 - error code 848 * MIGRATEPAGE_SUCCESS - success 849 */ 850 static int move_to_new_page(struct page *newpage, struct page *page, 851 enum migrate_mode mode) 852 { 853 struct address_space *mapping; 854 int rc = -EAGAIN; 855 bool is_lru = !__PageMovable(page); 856 857 VM_BUG_ON_PAGE(!PageLocked(page), page); 858 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 859 860 mapping = page_mapping(page); 861 862 if (likely(is_lru)) { 863 if (!mapping) 864 rc = migrate_page(mapping, newpage, page, mode); 865 else if (mapping->a_ops->migratepage) 866 /* 867 * Most pages have a mapping and most filesystems 868 * provide a migratepage callback. Anonymous pages 869 * are part of swap space which also has its own 870 * migratepage callback. This is the most common path 871 * for page migration. 872 */ 873 rc = mapping->a_ops->migratepage(mapping, newpage, 874 page, mode); 875 else 876 rc = fallback_migrate_page(mapping, newpage, 877 page, mode); 878 } else { 879 /* 880 * In case of non-lru page, it could be released after 881 * isolation step. In that case, we shouldn't try migration. 882 */ 883 VM_BUG_ON_PAGE(!PageIsolated(page), page); 884 if (!PageMovable(page)) { 885 rc = MIGRATEPAGE_SUCCESS; 886 __ClearPageIsolated(page); 887 goto out; 888 } 889 890 rc = mapping->a_ops->migratepage(mapping, newpage, 891 page, mode); 892 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 893 !PageIsolated(page)); 894 } 895 896 /* 897 * When successful, old pagecache page->mapping must be cleared before 898 * page is freed; but stats require that PageAnon be left as PageAnon. 899 */ 900 if (rc == MIGRATEPAGE_SUCCESS) { 901 if (__PageMovable(page)) { 902 VM_BUG_ON_PAGE(!PageIsolated(page), page); 903 904 /* 905 * We clear PG_movable under page_lock so any compactor 906 * cannot try to migrate this page. 907 */ 908 __ClearPageIsolated(page); 909 } 910 911 /* 912 * Anonymous and movable page->mapping will be cleared by 913 * free_pages_prepare so don't reset it here for keeping 914 * the type to work PageAnon, for example. 915 */ 916 if (!PageMappingFlags(page)) 917 page->mapping = NULL; 918 919 if (likely(!is_zone_device_page(newpage))) 920 flush_dcache_page(newpage); 921 922 } 923 out: 924 return rc; 925 } 926 927 static int __unmap_and_move(struct page *page, struct page *newpage, 928 int force, enum migrate_mode mode) 929 { 930 int rc = -EAGAIN; 931 bool page_was_mapped = false; 932 struct anon_vma *anon_vma = NULL; 933 bool is_lru = !__PageMovable(page); 934 935 if (!trylock_page(page)) { 936 if (!force || mode == MIGRATE_ASYNC) 937 goto out; 938 939 /* 940 * It's not safe for direct compaction to call lock_page. 941 * For example, during page readahead pages are added locked 942 * to the LRU. Later, when the IO completes the pages are 943 * marked uptodate and unlocked. However, the queueing 944 * could be merging multiple pages for one bio (e.g. 945 * mpage_readahead). If an allocation happens for the 946 * second or third page, the process can end up locking 947 * the same page twice and deadlocking. Rather than 948 * trying to be clever about what pages can be locked, 949 * avoid the use of lock_page for direct compaction 950 * altogether. 951 */ 952 if (current->flags & PF_MEMALLOC) 953 goto out; 954 955 lock_page(page); 956 } 957 958 if (PageWriteback(page)) { 959 /* 960 * Only in the case of a full synchronous migration is it 961 * necessary to wait for PageWriteback. In the async case, 962 * the retry loop is too short and in the sync-light case, 963 * the overhead of stalling is too much 964 */ 965 switch (mode) { 966 case MIGRATE_SYNC: 967 case MIGRATE_SYNC_NO_COPY: 968 break; 969 default: 970 rc = -EBUSY; 971 goto out_unlock; 972 } 973 if (!force) 974 goto out_unlock; 975 wait_on_page_writeback(page); 976 } 977 978 /* 979 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 980 * we cannot notice that anon_vma is freed while we migrates a page. 981 * This get_anon_vma() delays freeing anon_vma pointer until the end 982 * of migration. File cache pages are no problem because of page_lock() 983 * File Caches may use write_page() or lock_page() in migration, then, 984 * just care Anon page here. 985 * 986 * Only page_get_anon_vma() understands the subtleties of 987 * getting a hold on an anon_vma from outside one of its mms. 988 * But if we cannot get anon_vma, then we won't need it anyway, 989 * because that implies that the anon page is no longer mapped 990 * (and cannot be remapped so long as we hold the page lock). 991 */ 992 if (PageAnon(page) && !PageKsm(page)) 993 anon_vma = page_get_anon_vma(page); 994 995 /* 996 * Block others from accessing the new page when we get around to 997 * establishing additional references. We are usually the only one 998 * holding a reference to newpage at this point. We used to have a BUG 999 * here if trylock_page(newpage) fails, but would like to allow for 1000 * cases where there might be a race with the previous use of newpage. 1001 * This is much like races on refcount of oldpage: just don't BUG(). 1002 */ 1003 if (unlikely(!trylock_page(newpage))) 1004 goto out_unlock; 1005 1006 if (unlikely(!is_lru)) { 1007 rc = move_to_new_page(newpage, page, mode); 1008 goto out_unlock_both; 1009 } 1010 1011 /* 1012 * Corner case handling: 1013 * 1. When a new swap-cache page is read into, it is added to the LRU 1014 * and treated as swapcache but it has no rmap yet. 1015 * Calling try_to_unmap() against a page->mapping==NULL page will 1016 * trigger a BUG. So handle it here. 1017 * 2. An orphaned page (see truncate_cleanup_page) might have 1018 * fs-private metadata. The page can be picked up due to memory 1019 * offlining. Everywhere else except page reclaim, the page is 1020 * invisible to the vm, so the page can not be migrated. So try to 1021 * free the metadata, so the page can be freed. 1022 */ 1023 if (!page->mapping) { 1024 VM_BUG_ON_PAGE(PageAnon(page), page); 1025 if (page_has_private(page)) { 1026 try_to_free_buffers(page); 1027 goto out_unlock_both; 1028 } 1029 } else if (page_mapped(page)) { 1030 /* Establish migration ptes */ 1031 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1032 page); 1033 try_to_migrate(page, 0); 1034 page_was_mapped = true; 1035 } 1036 1037 if (!page_mapped(page)) 1038 rc = move_to_new_page(newpage, page, mode); 1039 1040 if (page_was_mapped) 1041 remove_migration_ptes(page, 1042 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 1043 1044 out_unlock_both: 1045 unlock_page(newpage); 1046 out_unlock: 1047 /* Drop an anon_vma reference if we took one */ 1048 if (anon_vma) 1049 put_anon_vma(anon_vma); 1050 unlock_page(page); 1051 out: 1052 /* 1053 * If migration is successful, decrease refcount of the newpage 1054 * which will not free the page because new page owner increased 1055 * refcounter. As well, if it is LRU page, add the page to LRU 1056 * list in here. Use the old state of the isolated source page to 1057 * determine if we migrated a LRU page. newpage was already unlocked 1058 * and possibly modified by its owner - don't rely on the page 1059 * state. 1060 */ 1061 if (rc == MIGRATEPAGE_SUCCESS) { 1062 if (unlikely(!is_lru)) 1063 put_page(newpage); 1064 else 1065 putback_lru_page(newpage); 1066 } 1067 1068 return rc; 1069 } 1070 1071 /* 1072 * Obtain the lock on page, remove all ptes and migrate the page 1073 * to the newly allocated page in newpage. 1074 */ 1075 static int unmap_and_move(new_page_t get_new_page, 1076 free_page_t put_new_page, 1077 unsigned long private, struct page *page, 1078 int force, enum migrate_mode mode, 1079 enum migrate_reason reason, 1080 struct list_head *ret) 1081 { 1082 int rc = MIGRATEPAGE_SUCCESS; 1083 struct page *newpage = NULL; 1084 1085 if (!thp_migration_supported() && PageTransHuge(page)) 1086 return -ENOSYS; 1087 1088 if (page_count(page) == 1) { 1089 /* page was freed from under us. So we are done. */ 1090 ClearPageActive(page); 1091 ClearPageUnevictable(page); 1092 if (unlikely(__PageMovable(page))) { 1093 lock_page(page); 1094 if (!PageMovable(page)) 1095 __ClearPageIsolated(page); 1096 unlock_page(page); 1097 } 1098 goto out; 1099 } 1100 1101 newpage = get_new_page(page, private); 1102 if (!newpage) 1103 return -ENOMEM; 1104 1105 rc = __unmap_and_move(page, newpage, force, mode); 1106 if (rc == MIGRATEPAGE_SUCCESS) 1107 set_page_owner_migrate_reason(newpage, reason); 1108 1109 out: 1110 if (rc != -EAGAIN) { 1111 /* 1112 * A page that has been migrated has all references 1113 * removed and will be freed. A page that has not been 1114 * migrated will have kept its references and be restored. 1115 */ 1116 list_del(&page->lru); 1117 } 1118 1119 /* 1120 * If migration is successful, releases reference grabbed during 1121 * isolation. Otherwise, restore the page to right list unless 1122 * we want to retry. 1123 */ 1124 if (rc == MIGRATEPAGE_SUCCESS) { 1125 /* 1126 * Compaction can migrate also non-LRU pages which are 1127 * not accounted to NR_ISOLATED_*. They can be recognized 1128 * as __PageMovable 1129 */ 1130 if (likely(!__PageMovable(page))) 1131 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1132 page_is_file_lru(page), -thp_nr_pages(page)); 1133 1134 if (reason != MR_MEMORY_FAILURE) 1135 /* 1136 * We release the page in page_handle_poison. 1137 */ 1138 put_page(page); 1139 } else { 1140 if (rc != -EAGAIN) 1141 list_add_tail(&page->lru, ret); 1142 1143 if (put_new_page) 1144 put_new_page(newpage, private); 1145 else 1146 put_page(newpage); 1147 } 1148 1149 return rc; 1150 } 1151 1152 /* 1153 * Counterpart of unmap_and_move_page() for hugepage migration. 1154 * 1155 * This function doesn't wait the completion of hugepage I/O 1156 * because there is no race between I/O and migration for hugepage. 1157 * Note that currently hugepage I/O occurs only in direct I/O 1158 * where no lock is held and PG_writeback is irrelevant, 1159 * and writeback status of all subpages are counted in the reference 1160 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1161 * under direct I/O, the reference of the head page is 512 and a bit more.) 1162 * This means that when we try to migrate hugepage whose subpages are 1163 * doing direct I/O, some references remain after try_to_unmap() and 1164 * hugepage migration fails without data corruption. 1165 * 1166 * There is also no race when direct I/O is issued on the page under migration, 1167 * because then pte is replaced with migration swap entry and direct I/O code 1168 * will wait in the page fault for migration to complete. 1169 */ 1170 static int unmap_and_move_huge_page(new_page_t get_new_page, 1171 free_page_t put_new_page, unsigned long private, 1172 struct page *hpage, int force, 1173 enum migrate_mode mode, int reason, 1174 struct list_head *ret) 1175 { 1176 int rc = -EAGAIN; 1177 int page_was_mapped = 0; 1178 struct page *new_hpage; 1179 struct anon_vma *anon_vma = NULL; 1180 struct address_space *mapping = NULL; 1181 1182 /* 1183 * Migratability of hugepages depends on architectures and their size. 1184 * This check is necessary because some callers of hugepage migration 1185 * like soft offline and memory hotremove don't walk through page 1186 * tables or check whether the hugepage is pmd-based or not before 1187 * kicking migration. 1188 */ 1189 if (!hugepage_migration_supported(page_hstate(hpage))) { 1190 list_move_tail(&hpage->lru, ret); 1191 return -ENOSYS; 1192 } 1193 1194 if (page_count(hpage) == 1) { 1195 /* page was freed from under us. So we are done. */ 1196 putback_active_hugepage(hpage); 1197 return MIGRATEPAGE_SUCCESS; 1198 } 1199 1200 new_hpage = get_new_page(hpage, private); 1201 if (!new_hpage) 1202 return -ENOMEM; 1203 1204 if (!trylock_page(hpage)) { 1205 if (!force) 1206 goto out; 1207 switch (mode) { 1208 case MIGRATE_SYNC: 1209 case MIGRATE_SYNC_NO_COPY: 1210 break; 1211 default: 1212 goto out; 1213 } 1214 lock_page(hpage); 1215 } 1216 1217 /* 1218 * Check for pages which are in the process of being freed. Without 1219 * page_mapping() set, hugetlbfs specific move page routine will not 1220 * be called and we could leak usage counts for subpools. 1221 */ 1222 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1223 rc = -EBUSY; 1224 goto out_unlock; 1225 } 1226 1227 if (PageAnon(hpage)) 1228 anon_vma = page_get_anon_vma(hpage); 1229 1230 if (unlikely(!trylock_page(new_hpage))) 1231 goto put_anon; 1232 1233 if (page_mapped(hpage)) { 1234 bool mapping_locked = false; 1235 enum ttu_flags ttu = 0; 1236 1237 if (!PageAnon(hpage)) { 1238 /* 1239 * In shared mappings, try_to_unmap could potentially 1240 * call huge_pmd_unshare. Because of this, take 1241 * semaphore in write mode here and set TTU_RMAP_LOCKED 1242 * to let lower levels know we have taken the lock. 1243 */ 1244 mapping = hugetlb_page_mapping_lock_write(hpage); 1245 if (unlikely(!mapping)) 1246 goto unlock_put_anon; 1247 1248 mapping_locked = true; 1249 ttu |= TTU_RMAP_LOCKED; 1250 } 1251 1252 try_to_migrate(hpage, ttu); 1253 page_was_mapped = 1; 1254 1255 if (mapping_locked) 1256 i_mmap_unlock_write(mapping); 1257 } 1258 1259 if (!page_mapped(hpage)) 1260 rc = move_to_new_page(new_hpage, hpage, mode); 1261 1262 if (page_was_mapped) 1263 remove_migration_ptes(hpage, 1264 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1265 1266 unlock_put_anon: 1267 unlock_page(new_hpage); 1268 1269 put_anon: 1270 if (anon_vma) 1271 put_anon_vma(anon_vma); 1272 1273 if (rc == MIGRATEPAGE_SUCCESS) { 1274 move_hugetlb_state(hpage, new_hpage, reason); 1275 put_new_page = NULL; 1276 } 1277 1278 out_unlock: 1279 unlock_page(hpage); 1280 out: 1281 if (rc == MIGRATEPAGE_SUCCESS) 1282 putback_active_hugepage(hpage); 1283 else if (rc != -EAGAIN) 1284 list_move_tail(&hpage->lru, ret); 1285 1286 /* 1287 * If migration was not successful and there's a freeing callback, use 1288 * it. Otherwise, put_page() will drop the reference grabbed during 1289 * isolation. 1290 */ 1291 if (put_new_page) 1292 put_new_page(new_hpage, private); 1293 else 1294 putback_active_hugepage(new_hpage); 1295 1296 return rc; 1297 } 1298 1299 static inline int try_split_thp(struct page *page, struct page **page2, 1300 struct list_head *from) 1301 { 1302 int rc = 0; 1303 1304 lock_page(page); 1305 rc = split_huge_page_to_list(page, from); 1306 unlock_page(page); 1307 if (!rc) 1308 list_safe_reset_next(page, *page2, lru); 1309 1310 return rc; 1311 } 1312 1313 /* 1314 * migrate_pages - migrate the pages specified in a list, to the free pages 1315 * supplied as the target for the page migration 1316 * 1317 * @from: The list of pages to be migrated. 1318 * @get_new_page: The function used to allocate free pages to be used 1319 * as the target of the page migration. 1320 * @put_new_page: The function used to free target pages if migration 1321 * fails, or NULL if no special handling is necessary. 1322 * @private: Private data to be passed on to get_new_page() 1323 * @mode: The migration mode that specifies the constraints for 1324 * page migration, if any. 1325 * @reason: The reason for page migration. 1326 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1327 * the caller passes a non-NULL pointer. 1328 * 1329 * The function returns after 10 attempts or if no pages are movable any more 1330 * because the list has become empty or no retryable pages exist any more. 1331 * It is caller's responsibility to call putback_movable_pages() to return pages 1332 * to the LRU or free list only if ret != 0. 1333 * 1334 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1335 * an error code. The number of THP splits will be considered as the number of 1336 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1337 */ 1338 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1339 free_page_t put_new_page, unsigned long private, 1340 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1341 { 1342 int retry = 1; 1343 int thp_retry = 1; 1344 int nr_failed = 0; 1345 int nr_failed_pages = 0; 1346 int nr_succeeded = 0; 1347 int nr_thp_succeeded = 0; 1348 int nr_thp_failed = 0; 1349 int nr_thp_split = 0; 1350 int pass = 0; 1351 bool is_thp = false; 1352 struct page *page; 1353 struct page *page2; 1354 int swapwrite = current->flags & PF_SWAPWRITE; 1355 int rc, nr_subpages; 1356 LIST_HEAD(ret_pages); 1357 LIST_HEAD(thp_split_pages); 1358 bool nosplit = (reason == MR_NUMA_MISPLACED); 1359 bool no_subpage_counting = false; 1360 1361 trace_mm_migrate_pages_start(mode, reason); 1362 1363 if (!swapwrite) 1364 current->flags |= PF_SWAPWRITE; 1365 1366 thp_subpage_migration: 1367 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1368 retry = 0; 1369 thp_retry = 0; 1370 1371 list_for_each_entry_safe(page, page2, from, lru) { 1372 retry: 1373 /* 1374 * THP statistics is based on the source huge page. 1375 * Capture required information that might get lost 1376 * during migration. 1377 */ 1378 is_thp = PageTransHuge(page) && !PageHuge(page); 1379 nr_subpages = compound_nr(page); 1380 cond_resched(); 1381 1382 if (PageHuge(page)) 1383 rc = unmap_and_move_huge_page(get_new_page, 1384 put_new_page, private, page, 1385 pass > 2, mode, reason, 1386 &ret_pages); 1387 else 1388 rc = unmap_and_move(get_new_page, put_new_page, 1389 private, page, pass > 2, mode, 1390 reason, &ret_pages); 1391 /* 1392 * The rules are: 1393 * Success: non hugetlb page will be freed, hugetlb 1394 * page will be put back 1395 * -EAGAIN: stay on the from list 1396 * -ENOMEM: stay on the from list 1397 * Other errno: put on ret_pages list then splice to 1398 * from list 1399 */ 1400 switch(rc) { 1401 /* 1402 * THP migration might be unsupported or the 1403 * allocation could've failed so we should 1404 * retry on the same page with the THP split 1405 * to base pages. 1406 * 1407 * Head page is retried immediately and tail 1408 * pages are added to the tail of the list so 1409 * we encounter them after the rest of the list 1410 * is processed. 1411 */ 1412 case -ENOSYS: 1413 /* THP migration is unsupported */ 1414 if (is_thp) { 1415 nr_thp_failed++; 1416 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1417 nr_thp_split++; 1418 goto retry; 1419 } 1420 1421 nr_failed_pages += nr_subpages; 1422 break; 1423 } 1424 1425 /* Hugetlb migration is unsupported */ 1426 if (!no_subpage_counting) 1427 nr_failed++; 1428 nr_failed_pages += nr_subpages; 1429 break; 1430 case -ENOMEM: 1431 /* 1432 * When memory is low, don't bother to try to migrate 1433 * other pages, just exit. 1434 * THP NUMA faulting doesn't split THP to retry. 1435 */ 1436 if (is_thp && !nosplit) { 1437 nr_thp_failed++; 1438 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1439 nr_thp_split++; 1440 goto retry; 1441 } 1442 1443 nr_failed_pages += nr_subpages; 1444 goto out; 1445 } 1446 1447 if (!no_subpage_counting) 1448 nr_failed++; 1449 nr_failed_pages += nr_subpages; 1450 goto out; 1451 case -EAGAIN: 1452 if (is_thp) { 1453 thp_retry++; 1454 break; 1455 } 1456 retry++; 1457 break; 1458 case MIGRATEPAGE_SUCCESS: 1459 nr_succeeded += nr_subpages; 1460 if (is_thp) { 1461 nr_thp_succeeded++; 1462 break; 1463 } 1464 break; 1465 default: 1466 /* 1467 * Permanent failure (-EBUSY, etc.): 1468 * unlike -EAGAIN case, the failed page is 1469 * removed from migration page list and not 1470 * retried in the next outer loop. 1471 */ 1472 if (is_thp) { 1473 nr_thp_failed++; 1474 nr_failed_pages += nr_subpages; 1475 break; 1476 } 1477 1478 if (!no_subpage_counting) 1479 nr_failed++; 1480 nr_failed_pages += nr_subpages; 1481 break; 1482 } 1483 } 1484 } 1485 nr_failed += retry; 1486 nr_thp_failed += thp_retry; 1487 /* 1488 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1489 * counting in this round, since all subpages of a THP is counted 1490 * as 1 failure in the first round. 1491 */ 1492 if (!list_empty(&thp_split_pages)) { 1493 /* 1494 * Move non-migrated pages (after 10 retries) to ret_pages 1495 * to avoid migrating them again. 1496 */ 1497 list_splice_init(from, &ret_pages); 1498 list_splice_init(&thp_split_pages, from); 1499 no_subpage_counting = true; 1500 retry = 1; 1501 goto thp_subpage_migration; 1502 } 1503 1504 rc = nr_failed + nr_thp_failed; 1505 out: 1506 /* 1507 * Put the permanent failure page back to migration list, they 1508 * will be put back to the right list by the caller. 1509 */ 1510 list_splice(&ret_pages, from); 1511 1512 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1513 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1514 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1515 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1516 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1517 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1518 nr_thp_failed, nr_thp_split, mode, reason); 1519 1520 if (!swapwrite) 1521 current->flags &= ~PF_SWAPWRITE; 1522 1523 if (ret_succeeded) 1524 *ret_succeeded = nr_succeeded; 1525 1526 return rc; 1527 } 1528 1529 struct page *alloc_migration_target(struct page *page, unsigned long private) 1530 { 1531 struct migration_target_control *mtc; 1532 gfp_t gfp_mask; 1533 unsigned int order = 0; 1534 struct page *new_page = NULL; 1535 int nid; 1536 int zidx; 1537 1538 mtc = (struct migration_target_control *)private; 1539 gfp_mask = mtc->gfp_mask; 1540 nid = mtc->nid; 1541 if (nid == NUMA_NO_NODE) 1542 nid = page_to_nid(page); 1543 1544 if (PageHuge(page)) { 1545 struct hstate *h = page_hstate(compound_head(page)); 1546 1547 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1548 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1549 } 1550 1551 if (PageTransHuge(page)) { 1552 /* 1553 * clear __GFP_RECLAIM to make the migration callback 1554 * consistent with regular THP allocations. 1555 */ 1556 gfp_mask &= ~__GFP_RECLAIM; 1557 gfp_mask |= GFP_TRANSHUGE; 1558 order = HPAGE_PMD_ORDER; 1559 } 1560 zidx = zone_idx(page_zone(page)); 1561 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1562 gfp_mask |= __GFP_HIGHMEM; 1563 1564 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); 1565 1566 if (new_page && PageTransHuge(new_page)) 1567 prep_transhuge_page(new_page); 1568 1569 return new_page; 1570 } 1571 1572 #ifdef CONFIG_NUMA 1573 1574 static int store_status(int __user *status, int start, int value, int nr) 1575 { 1576 while (nr-- > 0) { 1577 if (put_user(value, status + start)) 1578 return -EFAULT; 1579 start++; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int do_move_pages_to_node(struct mm_struct *mm, 1586 struct list_head *pagelist, int node) 1587 { 1588 int err; 1589 struct migration_target_control mtc = { 1590 .nid = node, 1591 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1592 }; 1593 1594 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1595 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1596 if (err) 1597 putback_movable_pages(pagelist); 1598 return err; 1599 } 1600 1601 /* 1602 * Resolves the given address to a struct page, isolates it from the LRU and 1603 * puts it to the given pagelist. 1604 * Returns: 1605 * errno - if the page cannot be found/isolated 1606 * 0 - when it doesn't have to be migrated because it is already on the 1607 * target node 1608 * 1 - when it has been queued 1609 */ 1610 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1611 int node, struct list_head *pagelist, bool migrate_all) 1612 { 1613 struct vm_area_struct *vma; 1614 struct page *page; 1615 unsigned int follflags; 1616 int err; 1617 1618 mmap_read_lock(mm); 1619 err = -EFAULT; 1620 vma = find_vma(mm, addr); 1621 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1622 goto out; 1623 1624 /* FOLL_DUMP to ignore special (like zero) pages */ 1625 follflags = FOLL_GET | FOLL_DUMP; 1626 page = follow_page(vma, addr, follflags); 1627 1628 err = PTR_ERR(page); 1629 if (IS_ERR(page)) 1630 goto out; 1631 1632 err = -ENOENT; 1633 if (!page) 1634 goto out; 1635 1636 err = 0; 1637 if (page_to_nid(page) == node) 1638 goto out_putpage; 1639 1640 err = -EACCES; 1641 if (page_mapcount(page) > 1 && !migrate_all) 1642 goto out_putpage; 1643 1644 if (PageHuge(page)) { 1645 if (PageHead(page)) { 1646 isolate_huge_page(page, pagelist); 1647 err = 1; 1648 } 1649 } else { 1650 struct page *head; 1651 1652 head = compound_head(page); 1653 err = isolate_lru_page(head); 1654 if (err) 1655 goto out_putpage; 1656 1657 err = 1; 1658 list_add_tail(&head->lru, pagelist); 1659 mod_node_page_state(page_pgdat(head), 1660 NR_ISOLATED_ANON + page_is_file_lru(head), 1661 thp_nr_pages(head)); 1662 } 1663 out_putpage: 1664 /* 1665 * Either remove the duplicate refcount from 1666 * isolate_lru_page() or drop the page ref if it was 1667 * not isolated. 1668 */ 1669 put_page(page); 1670 out: 1671 mmap_read_unlock(mm); 1672 return err; 1673 } 1674 1675 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1676 struct list_head *pagelist, int __user *status, 1677 int start, int i, unsigned long nr_pages) 1678 { 1679 int err; 1680 1681 if (list_empty(pagelist)) 1682 return 0; 1683 1684 err = do_move_pages_to_node(mm, pagelist, node); 1685 if (err) { 1686 /* 1687 * Positive err means the number of failed 1688 * pages to migrate. Since we are going to 1689 * abort and return the number of non-migrated 1690 * pages, so need to include the rest of the 1691 * nr_pages that have not been attempted as 1692 * well. 1693 */ 1694 if (err > 0) 1695 err += nr_pages - i - 1; 1696 return err; 1697 } 1698 return store_status(status, start, node, i - start); 1699 } 1700 1701 /* 1702 * Migrate an array of page address onto an array of nodes and fill 1703 * the corresponding array of status. 1704 */ 1705 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1706 unsigned long nr_pages, 1707 const void __user * __user *pages, 1708 const int __user *nodes, 1709 int __user *status, int flags) 1710 { 1711 int current_node = NUMA_NO_NODE; 1712 LIST_HEAD(pagelist); 1713 int start, i; 1714 int err = 0, err1; 1715 1716 lru_cache_disable(); 1717 1718 for (i = start = 0; i < nr_pages; i++) { 1719 const void __user *p; 1720 unsigned long addr; 1721 int node; 1722 1723 err = -EFAULT; 1724 if (get_user(p, pages + i)) 1725 goto out_flush; 1726 if (get_user(node, nodes + i)) 1727 goto out_flush; 1728 addr = (unsigned long)untagged_addr(p); 1729 1730 err = -ENODEV; 1731 if (node < 0 || node >= MAX_NUMNODES) 1732 goto out_flush; 1733 if (!node_state(node, N_MEMORY)) 1734 goto out_flush; 1735 1736 err = -EACCES; 1737 if (!node_isset(node, task_nodes)) 1738 goto out_flush; 1739 1740 if (current_node == NUMA_NO_NODE) { 1741 current_node = node; 1742 start = i; 1743 } else if (node != current_node) { 1744 err = move_pages_and_store_status(mm, current_node, 1745 &pagelist, status, start, i, nr_pages); 1746 if (err) 1747 goto out; 1748 start = i; 1749 current_node = node; 1750 } 1751 1752 /* 1753 * Errors in the page lookup or isolation are not fatal and we simply 1754 * report them via status 1755 */ 1756 err = add_page_for_migration(mm, addr, current_node, 1757 &pagelist, flags & MPOL_MF_MOVE_ALL); 1758 1759 if (err > 0) { 1760 /* The page is successfully queued for migration */ 1761 continue; 1762 } 1763 1764 /* 1765 * If the page is already on the target node (!err), store the 1766 * node, otherwise, store the err. 1767 */ 1768 err = store_status(status, i, err ? : current_node, 1); 1769 if (err) 1770 goto out_flush; 1771 1772 err = move_pages_and_store_status(mm, current_node, &pagelist, 1773 status, start, i, nr_pages); 1774 if (err) 1775 goto out; 1776 current_node = NUMA_NO_NODE; 1777 } 1778 out_flush: 1779 /* Make sure we do not overwrite the existing error */ 1780 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1781 status, start, i, nr_pages); 1782 if (err >= 0) 1783 err = err1; 1784 out: 1785 lru_cache_enable(); 1786 return err; 1787 } 1788 1789 /* 1790 * Determine the nodes of an array of pages and store it in an array of status. 1791 */ 1792 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1793 const void __user **pages, int *status) 1794 { 1795 unsigned long i; 1796 1797 mmap_read_lock(mm); 1798 1799 for (i = 0; i < nr_pages; i++) { 1800 unsigned long addr = (unsigned long)(*pages); 1801 struct vm_area_struct *vma; 1802 struct page *page; 1803 int err = -EFAULT; 1804 1805 vma = vma_lookup(mm, addr); 1806 if (!vma) 1807 goto set_status; 1808 1809 /* FOLL_DUMP to ignore special (like zero) pages */ 1810 page = follow_page(vma, addr, FOLL_DUMP); 1811 1812 err = PTR_ERR(page); 1813 if (IS_ERR(page)) 1814 goto set_status; 1815 1816 err = page ? page_to_nid(page) : -ENOENT; 1817 set_status: 1818 *status = err; 1819 1820 pages++; 1821 status++; 1822 } 1823 1824 mmap_read_unlock(mm); 1825 } 1826 1827 static int get_compat_pages_array(const void __user *chunk_pages[], 1828 const void __user * __user *pages, 1829 unsigned long chunk_nr) 1830 { 1831 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1832 compat_uptr_t p; 1833 int i; 1834 1835 for (i = 0; i < chunk_nr; i++) { 1836 if (get_user(p, pages32 + i)) 1837 return -EFAULT; 1838 chunk_pages[i] = compat_ptr(p); 1839 } 1840 1841 return 0; 1842 } 1843 1844 /* 1845 * Determine the nodes of a user array of pages and store it in 1846 * a user array of status. 1847 */ 1848 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1849 const void __user * __user *pages, 1850 int __user *status) 1851 { 1852 #define DO_PAGES_STAT_CHUNK_NR 16 1853 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1854 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1855 1856 while (nr_pages) { 1857 unsigned long chunk_nr; 1858 1859 chunk_nr = nr_pages; 1860 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1861 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1862 1863 if (in_compat_syscall()) { 1864 if (get_compat_pages_array(chunk_pages, pages, 1865 chunk_nr)) 1866 break; 1867 } else { 1868 if (copy_from_user(chunk_pages, pages, 1869 chunk_nr * sizeof(*chunk_pages))) 1870 break; 1871 } 1872 1873 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1874 1875 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1876 break; 1877 1878 pages += chunk_nr; 1879 status += chunk_nr; 1880 nr_pages -= chunk_nr; 1881 } 1882 return nr_pages ? -EFAULT : 0; 1883 } 1884 1885 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1886 { 1887 struct task_struct *task; 1888 struct mm_struct *mm; 1889 1890 /* 1891 * There is no need to check if current process has the right to modify 1892 * the specified process when they are same. 1893 */ 1894 if (!pid) { 1895 mmget(current->mm); 1896 *mem_nodes = cpuset_mems_allowed(current); 1897 return current->mm; 1898 } 1899 1900 /* Find the mm_struct */ 1901 rcu_read_lock(); 1902 task = find_task_by_vpid(pid); 1903 if (!task) { 1904 rcu_read_unlock(); 1905 return ERR_PTR(-ESRCH); 1906 } 1907 get_task_struct(task); 1908 1909 /* 1910 * Check if this process has the right to modify the specified 1911 * process. Use the regular "ptrace_may_access()" checks. 1912 */ 1913 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1914 rcu_read_unlock(); 1915 mm = ERR_PTR(-EPERM); 1916 goto out; 1917 } 1918 rcu_read_unlock(); 1919 1920 mm = ERR_PTR(security_task_movememory(task)); 1921 if (IS_ERR(mm)) 1922 goto out; 1923 *mem_nodes = cpuset_mems_allowed(task); 1924 mm = get_task_mm(task); 1925 out: 1926 put_task_struct(task); 1927 if (!mm) 1928 mm = ERR_PTR(-EINVAL); 1929 return mm; 1930 } 1931 1932 /* 1933 * Move a list of pages in the address space of the currently executing 1934 * process. 1935 */ 1936 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1937 const void __user * __user *pages, 1938 const int __user *nodes, 1939 int __user *status, int flags) 1940 { 1941 struct mm_struct *mm; 1942 int err; 1943 nodemask_t task_nodes; 1944 1945 /* Check flags */ 1946 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1947 return -EINVAL; 1948 1949 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1950 return -EPERM; 1951 1952 mm = find_mm_struct(pid, &task_nodes); 1953 if (IS_ERR(mm)) 1954 return PTR_ERR(mm); 1955 1956 if (nodes) 1957 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1958 nodes, status, flags); 1959 else 1960 err = do_pages_stat(mm, nr_pages, pages, status); 1961 1962 mmput(mm); 1963 return err; 1964 } 1965 1966 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1967 const void __user * __user *, pages, 1968 const int __user *, nodes, 1969 int __user *, status, int, flags) 1970 { 1971 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 1972 } 1973 1974 #ifdef CONFIG_NUMA_BALANCING 1975 /* 1976 * Returns true if this is a safe migration target node for misplaced NUMA 1977 * pages. Currently it only checks the watermarks which crude 1978 */ 1979 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1980 unsigned long nr_migrate_pages) 1981 { 1982 int z; 1983 1984 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1985 struct zone *zone = pgdat->node_zones + z; 1986 1987 if (!populated_zone(zone)) 1988 continue; 1989 1990 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 1991 if (!zone_watermark_ok(zone, 0, 1992 high_wmark_pages(zone) + 1993 nr_migrate_pages, 1994 ZONE_MOVABLE, 0)) 1995 continue; 1996 return true; 1997 } 1998 return false; 1999 } 2000 2001 static struct page *alloc_misplaced_dst_page(struct page *page, 2002 unsigned long data) 2003 { 2004 int nid = (int) data; 2005 struct page *newpage; 2006 2007 newpage = __alloc_pages_node(nid, 2008 (GFP_HIGHUSER_MOVABLE | 2009 __GFP_THISNODE | __GFP_NOMEMALLOC | 2010 __GFP_NORETRY | __GFP_NOWARN) & 2011 ~__GFP_RECLAIM, 0); 2012 2013 return newpage; 2014 } 2015 2016 static struct page *alloc_misplaced_dst_page_thp(struct page *page, 2017 unsigned long data) 2018 { 2019 int nid = (int) data; 2020 struct page *newpage; 2021 2022 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2023 HPAGE_PMD_ORDER); 2024 if (!newpage) 2025 goto out; 2026 2027 prep_transhuge_page(newpage); 2028 2029 out: 2030 return newpage; 2031 } 2032 2033 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2034 { 2035 int page_lru; 2036 int nr_pages = thp_nr_pages(page); 2037 2038 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 2039 2040 /* Do not migrate THP mapped by multiple processes */ 2041 if (PageTransHuge(page) && total_mapcount(page) > 1) 2042 return 0; 2043 2044 /* Avoid migrating to a node that is nearly full */ 2045 if (!migrate_balanced_pgdat(pgdat, nr_pages)) 2046 return 0; 2047 2048 if (isolate_lru_page(page)) 2049 return 0; 2050 2051 page_lru = page_is_file_lru(page); 2052 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 2053 nr_pages); 2054 2055 /* 2056 * Isolating the page has taken another reference, so the 2057 * caller's reference can be safely dropped without the page 2058 * disappearing underneath us during migration. 2059 */ 2060 put_page(page); 2061 return 1; 2062 } 2063 2064 /* 2065 * Attempt to migrate a misplaced page to the specified destination 2066 * node. Caller is expected to have an elevated reference count on 2067 * the page that will be dropped by this function before returning. 2068 */ 2069 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2070 int node) 2071 { 2072 pg_data_t *pgdat = NODE_DATA(node); 2073 int isolated; 2074 int nr_remaining; 2075 LIST_HEAD(migratepages); 2076 new_page_t *new; 2077 bool compound; 2078 int nr_pages = thp_nr_pages(page); 2079 2080 /* 2081 * PTE mapped THP or HugeTLB page can't reach here so the page could 2082 * be either base page or THP. And it must be head page if it is 2083 * THP. 2084 */ 2085 compound = PageTransHuge(page); 2086 2087 if (compound) 2088 new = alloc_misplaced_dst_page_thp; 2089 else 2090 new = alloc_misplaced_dst_page; 2091 2092 /* 2093 * Don't migrate file pages that are mapped in multiple processes 2094 * with execute permissions as they are probably shared libraries. 2095 */ 2096 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2097 (vma->vm_flags & VM_EXEC)) 2098 goto out; 2099 2100 /* 2101 * Also do not migrate dirty pages as not all filesystems can move 2102 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2103 */ 2104 if (page_is_file_lru(page) && PageDirty(page)) 2105 goto out; 2106 2107 isolated = numamigrate_isolate_page(pgdat, page); 2108 if (!isolated) 2109 goto out; 2110 2111 list_add(&page->lru, &migratepages); 2112 nr_remaining = migrate_pages(&migratepages, *new, NULL, node, 2113 MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL); 2114 if (nr_remaining) { 2115 if (!list_empty(&migratepages)) { 2116 list_del(&page->lru); 2117 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2118 page_is_file_lru(page), -nr_pages); 2119 putback_lru_page(page); 2120 } 2121 isolated = 0; 2122 } else 2123 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages); 2124 BUG_ON(!list_empty(&migratepages)); 2125 return isolated; 2126 2127 out: 2128 put_page(page); 2129 return 0; 2130 } 2131 #endif /* CONFIG_NUMA_BALANCING */ 2132 #endif /* CONFIG_NUMA */ 2133 2134 #ifdef CONFIG_DEVICE_PRIVATE 2135 static int migrate_vma_collect_skip(unsigned long start, 2136 unsigned long end, 2137 struct mm_walk *walk) 2138 { 2139 struct migrate_vma *migrate = walk->private; 2140 unsigned long addr; 2141 2142 for (addr = start; addr < end; addr += PAGE_SIZE) { 2143 migrate->dst[migrate->npages] = 0; 2144 migrate->src[migrate->npages++] = 0; 2145 } 2146 2147 return 0; 2148 } 2149 2150 static int migrate_vma_collect_hole(unsigned long start, 2151 unsigned long end, 2152 __always_unused int depth, 2153 struct mm_walk *walk) 2154 { 2155 struct migrate_vma *migrate = walk->private; 2156 unsigned long addr; 2157 2158 /* Only allow populating anonymous memory. */ 2159 if (!vma_is_anonymous(walk->vma)) 2160 return migrate_vma_collect_skip(start, end, walk); 2161 2162 for (addr = start; addr < end; addr += PAGE_SIZE) { 2163 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 2164 migrate->dst[migrate->npages] = 0; 2165 migrate->npages++; 2166 migrate->cpages++; 2167 } 2168 2169 return 0; 2170 } 2171 2172 static int migrate_vma_collect_pmd(pmd_t *pmdp, 2173 unsigned long start, 2174 unsigned long end, 2175 struct mm_walk *walk) 2176 { 2177 struct migrate_vma *migrate = walk->private; 2178 struct vm_area_struct *vma = walk->vma; 2179 struct mm_struct *mm = vma->vm_mm; 2180 unsigned long addr = start, unmapped = 0; 2181 spinlock_t *ptl; 2182 pte_t *ptep; 2183 2184 again: 2185 if (pmd_none(*pmdp)) 2186 return migrate_vma_collect_hole(start, end, -1, walk); 2187 2188 if (pmd_trans_huge(*pmdp)) { 2189 struct page *page; 2190 2191 ptl = pmd_lock(mm, pmdp); 2192 if (unlikely(!pmd_trans_huge(*pmdp))) { 2193 spin_unlock(ptl); 2194 goto again; 2195 } 2196 2197 page = pmd_page(*pmdp); 2198 if (is_huge_zero_page(page)) { 2199 spin_unlock(ptl); 2200 split_huge_pmd(vma, pmdp, addr); 2201 if (pmd_trans_unstable(pmdp)) 2202 return migrate_vma_collect_skip(start, end, 2203 walk); 2204 } else { 2205 int ret; 2206 2207 get_page(page); 2208 spin_unlock(ptl); 2209 if (unlikely(!trylock_page(page))) 2210 return migrate_vma_collect_skip(start, end, 2211 walk); 2212 ret = split_huge_page(page); 2213 unlock_page(page); 2214 put_page(page); 2215 if (ret) 2216 return migrate_vma_collect_skip(start, end, 2217 walk); 2218 if (pmd_none(*pmdp)) 2219 return migrate_vma_collect_hole(start, end, -1, 2220 walk); 2221 } 2222 } 2223 2224 if (unlikely(pmd_bad(*pmdp))) 2225 return migrate_vma_collect_skip(start, end, walk); 2226 2227 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2228 arch_enter_lazy_mmu_mode(); 2229 2230 for (; addr < end; addr += PAGE_SIZE, ptep++) { 2231 unsigned long mpfn = 0, pfn; 2232 struct page *page; 2233 swp_entry_t entry; 2234 pte_t pte; 2235 2236 pte = *ptep; 2237 2238 if (pte_none(pte)) { 2239 if (vma_is_anonymous(vma)) { 2240 mpfn = MIGRATE_PFN_MIGRATE; 2241 migrate->cpages++; 2242 } 2243 goto next; 2244 } 2245 2246 if (!pte_present(pte)) { 2247 /* 2248 * Only care about unaddressable device page special 2249 * page table entry. Other special swap entries are not 2250 * migratable, and we ignore regular swapped page. 2251 */ 2252 entry = pte_to_swp_entry(pte); 2253 if (!is_device_private_entry(entry)) 2254 goto next; 2255 2256 page = pfn_swap_entry_to_page(entry); 2257 if (!(migrate->flags & 2258 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 2259 page->pgmap->owner != migrate->pgmap_owner) 2260 goto next; 2261 2262 mpfn = migrate_pfn(page_to_pfn(page)) | 2263 MIGRATE_PFN_MIGRATE; 2264 if (is_writable_device_private_entry(entry)) 2265 mpfn |= MIGRATE_PFN_WRITE; 2266 } else { 2267 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 2268 goto next; 2269 pfn = pte_pfn(pte); 2270 if (is_zero_pfn(pfn)) { 2271 mpfn = MIGRATE_PFN_MIGRATE; 2272 migrate->cpages++; 2273 goto next; 2274 } 2275 page = vm_normal_page(migrate->vma, addr, pte); 2276 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2277 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2278 } 2279 2280 /* FIXME support THP */ 2281 if (!page || !page->mapping || PageTransCompound(page)) { 2282 mpfn = 0; 2283 goto next; 2284 } 2285 2286 /* 2287 * By getting a reference on the page we pin it and that blocks 2288 * any kind of migration. Side effect is that it "freezes" the 2289 * pte. 2290 * 2291 * We drop this reference after isolating the page from the lru 2292 * for non device page (device page are not on the lru and thus 2293 * can't be dropped from it). 2294 */ 2295 get_page(page); 2296 2297 /* 2298 * Optimize for the common case where page is only mapped once 2299 * in one process. If we can lock the page, then we can safely 2300 * set up a special migration page table entry now. 2301 */ 2302 if (trylock_page(page)) { 2303 pte_t swp_pte; 2304 2305 migrate->cpages++; 2306 ptep_get_and_clear(mm, addr, ptep); 2307 2308 /* Setup special migration page table entry */ 2309 if (mpfn & MIGRATE_PFN_WRITE) 2310 entry = make_writable_migration_entry( 2311 page_to_pfn(page)); 2312 else 2313 entry = make_readable_migration_entry( 2314 page_to_pfn(page)); 2315 swp_pte = swp_entry_to_pte(entry); 2316 if (pte_present(pte)) { 2317 if (pte_soft_dirty(pte)) 2318 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2319 if (pte_uffd_wp(pte)) 2320 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2321 } else { 2322 if (pte_swp_soft_dirty(pte)) 2323 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2324 if (pte_swp_uffd_wp(pte)) 2325 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2326 } 2327 set_pte_at(mm, addr, ptep, swp_pte); 2328 2329 /* 2330 * This is like regular unmap: we remove the rmap and 2331 * drop page refcount. Page won't be freed, as we took 2332 * a reference just above. 2333 */ 2334 page_remove_rmap(page, false); 2335 put_page(page); 2336 2337 if (pte_present(pte)) 2338 unmapped++; 2339 } else { 2340 put_page(page); 2341 mpfn = 0; 2342 } 2343 2344 next: 2345 migrate->dst[migrate->npages] = 0; 2346 migrate->src[migrate->npages++] = mpfn; 2347 } 2348 arch_leave_lazy_mmu_mode(); 2349 pte_unmap_unlock(ptep - 1, ptl); 2350 2351 /* Only flush the TLB if we actually modified any entries */ 2352 if (unmapped) 2353 flush_tlb_range(walk->vma, start, end); 2354 2355 return 0; 2356 } 2357 2358 static const struct mm_walk_ops migrate_vma_walk_ops = { 2359 .pmd_entry = migrate_vma_collect_pmd, 2360 .pte_hole = migrate_vma_collect_hole, 2361 }; 2362 2363 /* 2364 * migrate_vma_collect() - collect pages over a range of virtual addresses 2365 * @migrate: migrate struct containing all migration information 2366 * 2367 * This will walk the CPU page table. For each virtual address backed by a 2368 * valid page, it updates the src array and takes a reference on the page, in 2369 * order to pin the page until we lock it and unmap it. 2370 */ 2371 static void migrate_vma_collect(struct migrate_vma *migrate) 2372 { 2373 struct mmu_notifier_range range; 2374 2375 /* 2376 * Note that the pgmap_owner is passed to the mmu notifier callback so 2377 * that the registered device driver can skip invalidating device 2378 * private page mappings that won't be migrated. 2379 */ 2380 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, 2381 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, 2382 migrate->pgmap_owner); 2383 mmu_notifier_invalidate_range_start(&range); 2384 2385 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 2386 &migrate_vma_walk_ops, migrate); 2387 2388 mmu_notifier_invalidate_range_end(&range); 2389 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 2390 } 2391 2392 /* 2393 * migrate_vma_check_page() - check if page is pinned or not 2394 * @page: struct page to check 2395 * 2396 * Pinned pages cannot be migrated. This is the same test as in 2397 * folio_migrate_mapping(), except that here we allow migration of a 2398 * ZONE_DEVICE page. 2399 */ 2400 static bool migrate_vma_check_page(struct page *page) 2401 { 2402 /* 2403 * One extra ref because caller holds an extra reference, either from 2404 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 2405 * a device page. 2406 */ 2407 int extra = 1; 2408 2409 /* 2410 * FIXME support THP (transparent huge page), it is bit more complex to 2411 * check them than regular pages, because they can be mapped with a pmd 2412 * or with a pte (split pte mapping). 2413 */ 2414 if (PageCompound(page)) 2415 return false; 2416 2417 /* Page from ZONE_DEVICE have one extra reference */ 2418 if (is_zone_device_page(page)) 2419 extra++; 2420 2421 /* For file back page */ 2422 if (page_mapping(page)) 2423 extra += 1 + page_has_private(page); 2424 2425 if ((page_count(page) - extra) > page_mapcount(page)) 2426 return false; 2427 2428 return true; 2429 } 2430 2431 /* 2432 * migrate_vma_unmap() - replace page mapping with special migration pte entry 2433 * @migrate: migrate struct containing all migration information 2434 * 2435 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 2436 * special migration pte entry and check if it has been pinned. Pinned pages are 2437 * restored because we cannot migrate them. 2438 * 2439 * This is the last step before we call the device driver callback to allocate 2440 * destination memory and copy contents of original page over to new page. 2441 */ 2442 static void migrate_vma_unmap(struct migrate_vma *migrate) 2443 { 2444 const unsigned long npages = migrate->npages; 2445 unsigned long i, restore = 0; 2446 bool allow_drain = true; 2447 2448 lru_add_drain(); 2449 2450 for (i = 0; i < npages; i++) { 2451 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2452 2453 if (!page) 2454 continue; 2455 2456 /* ZONE_DEVICE pages are not on LRU */ 2457 if (!is_zone_device_page(page)) { 2458 if (!PageLRU(page) && allow_drain) { 2459 /* Drain CPU's pagevec */ 2460 lru_add_drain_all(); 2461 allow_drain = false; 2462 } 2463 2464 if (isolate_lru_page(page)) { 2465 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2466 migrate->cpages--; 2467 restore++; 2468 continue; 2469 } 2470 2471 /* Drop the reference we took in collect */ 2472 put_page(page); 2473 } 2474 2475 if (page_mapped(page)) 2476 try_to_migrate(page, 0); 2477 2478 if (page_mapped(page) || !migrate_vma_check_page(page)) { 2479 if (!is_zone_device_page(page)) { 2480 get_page(page); 2481 putback_lru_page(page); 2482 } 2483 2484 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2485 migrate->cpages--; 2486 restore++; 2487 continue; 2488 } 2489 } 2490 2491 for (i = 0; i < npages && restore; i++) { 2492 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2493 2494 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2495 continue; 2496 2497 remove_migration_ptes(page, page, false); 2498 2499 migrate->src[i] = 0; 2500 unlock_page(page); 2501 put_page(page); 2502 restore--; 2503 } 2504 } 2505 2506 /** 2507 * migrate_vma_setup() - prepare to migrate a range of memory 2508 * @args: contains the vma, start, and pfns arrays for the migration 2509 * 2510 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 2511 * without an error. 2512 * 2513 * Prepare to migrate a range of memory virtual address range by collecting all 2514 * the pages backing each virtual address in the range, saving them inside the 2515 * src array. Then lock those pages and unmap them. Once the pages are locked 2516 * and unmapped, check whether each page is pinned or not. Pages that aren't 2517 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 2518 * corresponding src array entry. Then restores any pages that are pinned, by 2519 * remapping and unlocking those pages. 2520 * 2521 * The caller should then allocate destination memory and copy source memory to 2522 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 2523 * flag set). Once these are allocated and copied, the caller must update each 2524 * corresponding entry in the dst array with the pfn value of the destination 2525 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via 2526 * lock_page(). 2527 * 2528 * Note that the caller does not have to migrate all the pages that are marked 2529 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 2530 * device memory to system memory. If the caller cannot migrate a device page 2531 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 2532 * consequences for the userspace process, so it must be avoided if at all 2533 * possible. 2534 * 2535 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 2536 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 2537 * allowing the caller to allocate device memory for those unbacked virtual 2538 * addresses. For this the caller simply has to allocate device memory and 2539 * properly set the destination entry like for regular migration. Note that 2540 * this can still fail, and thus inside the device driver you must check if the 2541 * migration was successful for those entries after calling migrate_vma_pages(), 2542 * just like for regular migration. 2543 * 2544 * After that, the callers must call migrate_vma_pages() to go over each entry 2545 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 2546 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 2547 * then migrate_vma_pages() to migrate struct page information from the source 2548 * struct page to the destination struct page. If it fails to migrate the 2549 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 2550 * src array. 2551 * 2552 * At this point all successfully migrated pages have an entry in the src 2553 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 2554 * array entry with MIGRATE_PFN_VALID flag set. 2555 * 2556 * Once migrate_vma_pages() returns the caller may inspect which pages were 2557 * successfully migrated, and which were not. Successfully migrated pages will 2558 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 2559 * 2560 * It is safe to update device page table after migrate_vma_pages() because 2561 * both destination and source page are still locked, and the mmap_lock is held 2562 * in read mode (hence no one can unmap the range being migrated). 2563 * 2564 * Once the caller is done cleaning up things and updating its page table (if it 2565 * chose to do so, this is not an obligation) it finally calls 2566 * migrate_vma_finalize() to update the CPU page table to point to new pages 2567 * for successfully migrated pages or otherwise restore the CPU page table to 2568 * point to the original source pages. 2569 */ 2570 int migrate_vma_setup(struct migrate_vma *args) 2571 { 2572 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 2573 2574 args->start &= PAGE_MASK; 2575 args->end &= PAGE_MASK; 2576 if (!args->vma || is_vm_hugetlb_page(args->vma) || 2577 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 2578 return -EINVAL; 2579 if (nr_pages <= 0) 2580 return -EINVAL; 2581 if (args->start < args->vma->vm_start || 2582 args->start >= args->vma->vm_end) 2583 return -EINVAL; 2584 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 2585 return -EINVAL; 2586 if (!args->src || !args->dst) 2587 return -EINVAL; 2588 2589 memset(args->src, 0, sizeof(*args->src) * nr_pages); 2590 args->cpages = 0; 2591 args->npages = 0; 2592 2593 migrate_vma_collect(args); 2594 2595 if (args->cpages) 2596 migrate_vma_unmap(args); 2597 2598 /* 2599 * At this point pages are locked and unmapped, and thus they have 2600 * stable content and can safely be copied to destination memory that 2601 * is allocated by the drivers. 2602 */ 2603 return 0; 2604 2605 } 2606 EXPORT_SYMBOL(migrate_vma_setup); 2607 2608 /* 2609 * This code closely matches the code in: 2610 * __handle_mm_fault() 2611 * handle_pte_fault() 2612 * do_anonymous_page() 2613 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 2614 * private page. 2615 */ 2616 static void migrate_vma_insert_page(struct migrate_vma *migrate, 2617 unsigned long addr, 2618 struct page *page, 2619 unsigned long *src) 2620 { 2621 struct vm_area_struct *vma = migrate->vma; 2622 struct mm_struct *mm = vma->vm_mm; 2623 bool flush = false; 2624 spinlock_t *ptl; 2625 pte_t entry; 2626 pgd_t *pgdp; 2627 p4d_t *p4dp; 2628 pud_t *pudp; 2629 pmd_t *pmdp; 2630 pte_t *ptep; 2631 2632 /* Only allow populating anonymous memory */ 2633 if (!vma_is_anonymous(vma)) 2634 goto abort; 2635 2636 pgdp = pgd_offset(mm, addr); 2637 p4dp = p4d_alloc(mm, pgdp, addr); 2638 if (!p4dp) 2639 goto abort; 2640 pudp = pud_alloc(mm, p4dp, addr); 2641 if (!pudp) 2642 goto abort; 2643 pmdp = pmd_alloc(mm, pudp, addr); 2644 if (!pmdp) 2645 goto abort; 2646 2647 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 2648 goto abort; 2649 2650 /* 2651 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2652 * pte_offset_map() on pmds where a huge pmd might be created 2653 * from a different thread. 2654 * 2655 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 2656 * parallel threads are excluded by other means. 2657 * 2658 * Here we only have mmap_read_lock(mm). 2659 */ 2660 if (pte_alloc(mm, pmdp)) 2661 goto abort; 2662 2663 /* See the comment in pte_alloc_one_map() */ 2664 if (unlikely(pmd_trans_unstable(pmdp))) 2665 goto abort; 2666 2667 if (unlikely(anon_vma_prepare(vma))) 2668 goto abort; 2669 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 2670 goto abort; 2671 2672 /* 2673 * The memory barrier inside __SetPageUptodate makes sure that 2674 * preceding stores to the page contents become visible before 2675 * the set_pte_at() write. 2676 */ 2677 __SetPageUptodate(page); 2678 2679 if (is_zone_device_page(page)) { 2680 if (is_device_private_page(page)) { 2681 swp_entry_t swp_entry; 2682 2683 if (vma->vm_flags & VM_WRITE) 2684 swp_entry = make_writable_device_private_entry( 2685 page_to_pfn(page)); 2686 else 2687 swp_entry = make_readable_device_private_entry( 2688 page_to_pfn(page)); 2689 entry = swp_entry_to_pte(swp_entry); 2690 } else { 2691 /* 2692 * For now we only support migrating to un-addressable 2693 * device memory. 2694 */ 2695 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 2696 goto abort; 2697 } 2698 } else { 2699 entry = mk_pte(page, vma->vm_page_prot); 2700 if (vma->vm_flags & VM_WRITE) 2701 entry = pte_mkwrite(pte_mkdirty(entry)); 2702 } 2703 2704 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2705 2706 if (check_stable_address_space(mm)) 2707 goto unlock_abort; 2708 2709 if (pte_present(*ptep)) { 2710 unsigned long pfn = pte_pfn(*ptep); 2711 2712 if (!is_zero_pfn(pfn)) 2713 goto unlock_abort; 2714 flush = true; 2715 } else if (!pte_none(*ptep)) 2716 goto unlock_abort; 2717 2718 /* 2719 * Check for userfaultfd but do not deliver the fault. Instead, 2720 * just back off. 2721 */ 2722 if (userfaultfd_missing(vma)) 2723 goto unlock_abort; 2724 2725 inc_mm_counter(mm, MM_ANONPAGES); 2726 page_add_new_anon_rmap(page, vma, addr, false); 2727 if (!is_zone_device_page(page)) 2728 lru_cache_add_inactive_or_unevictable(page, vma); 2729 get_page(page); 2730 2731 if (flush) { 2732 flush_cache_page(vma, addr, pte_pfn(*ptep)); 2733 ptep_clear_flush_notify(vma, addr, ptep); 2734 set_pte_at_notify(mm, addr, ptep, entry); 2735 update_mmu_cache(vma, addr, ptep); 2736 } else { 2737 /* No need to invalidate - it was non-present before */ 2738 set_pte_at(mm, addr, ptep, entry); 2739 update_mmu_cache(vma, addr, ptep); 2740 } 2741 2742 pte_unmap_unlock(ptep, ptl); 2743 *src = MIGRATE_PFN_MIGRATE; 2744 return; 2745 2746 unlock_abort: 2747 pte_unmap_unlock(ptep, ptl); 2748 abort: 2749 *src &= ~MIGRATE_PFN_MIGRATE; 2750 } 2751 2752 /** 2753 * migrate_vma_pages() - migrate meta-data from src page to dst page 2754 * @migrate: migrate struct containing all migration information 2755 * 2756 * This migrates struct page meta-data from source struct page to destination 2757 * struct page. This effectively finishes the migration from source page to the 2758 * destination page. 2759 */ 2760 void migrate_vma_pages(struct migrate_vma *migrate) 2761 { 2762 const unsigned long npages = migrate->npages; 2763 const unsigned long start = migrate->start; 2764 struct mmu_notifier_range range; 2765 unsigned long addr, i; 2766 bool notified = false; 2767 2768 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 2769 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2770 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2771 struct address_space *mapping; 2772 int r; 2773 2774 if (!newpage) { 2775 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2776 continue; 2777 } 2778 2779 if (!page) { 2780 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 2781 continue; 2782 if (!notified) { 2783 notified = true; 2784 2785 mmu_notifier_range_init_owner(&range, 2786 MMU_NOTIFY_MIGRATE, 0, migrate->vma, 2787 migrate->vma->vm_mm, addr, migrate->end, 2788 migrate->pgmap_owner); 2789 mmu_notifier_invalidate_range_start(&range); 2790 } 2791 migrate_vma_insert_page(migrate, addr, newpage, 2792 &migrate->src[i]); 2793 continue; 2794 } 2795 2796 mapping = page_mapping(page); 2797 2798 if (is_zone_device_page(newpage)) { 2799 if (is_device_private_page(newpage)) { 2800 /* 2801 * For now only support private anonymous when 2802 * migrating to un-addressable device memory. 2803 */ 2804 if (mapping) { 2805 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2806 continue; 2807 } 2808 } else { 2809 /* 2810 * Other types of ZONE_DEVICE page are not 2811 * supported. 2812 */ 2813 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2814 continue; 2815 } 2816 } 2817 2818 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 2819 if (r != MIGRATEPAGE_SUCCESS) 2820 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2821 } 2822 2823 /* 2824 * No need to double call mmu_notifier->invalidate_range() callback as 2825 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 2826 * did already call it. 2827 */ 2828 if (notified) 2829 mmu_notifier_invalidate_range_only_end(&range); 2830 } 2831 EXPORT_SYMBOL(migrate_vma_pages); 2832 2833 /** 2834 * migrate_vma_finalize() - restore CPU page table entry 2835 * @migrate: migrate struct containing all migration information 2836 * 2837 * This replaces the special migration pte entry with either a mapping to the 2838 * new page if migration was successful for that page, or to the original page 2839 * otherwise. 2840 * 2841 * This also unlocks the pages and puts them back on the lru, or drops the extra 2842 * refcount, for device pages. 2843 */ 2844 void migrate_vma_finalize(struct migrate_vma *migrate) 2845 { 2846 const unsigned long npages = migrate->npages; 2847 unsigned long i; 2848 2849 for (i = 0; i < npages; i++) { 2850 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 2851 struct page *page = migrate_pfn_to_page(migrate->src[i]); 2852 2853 if (!page) { 2854 if (newpage) { 2855 unlock_page(newpage); 2856 put_page(newpage); 2857 } 2858 continue; 2859 } 2860 2861 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 2862 if (newpage) { 2863 unlock_page(newpage); 2864 put_page(newpage); 2865 } 2866 newpage = page; 2867 } 2868 2869 remove_migration_ptes(page, newpage, false); 2870 unlock_page(page); 2871 2872 if (is_zone_device_page(page)) 2873 put_page(page); 2874 else 2875 putback_lru_page(page); 2876 2877 if (newpage != page) { 2878 unlock_page(newpage); 2879 if (is_zone_device_page(newpage)) 2880 put_page(newpage); 2881 else 2882 putback_lru_page(newpage); 2883 } 2884 } 2885 } 2886 EXPORT_SYMBOL(migrate_vma_finalize); 2887 #endif /* CONFIG_DEVICE_PRIVATE */ 2888 2889 /* 2890 * node_demotion[] example: 2891 * 2892 * Consider a system with two sockets. Each socket has 2893 * three classes of memory attached: fast, medium and slow. 2894 * Each memory class is placed in its own NUMA node. The 2895 * CPUs are placed in the node with the "fast" memory. The 2896 * 6 NUMA nodes (0-5) might be split among the sockets like 2897 * this: 2898 * 2899 * Socket A: 0, 1, 2 2900 * Socket B: 3, 4, 5 2901 * 2902 * When Node 0 fills up, its memory should be migrated to 2903 * Node 1. When Node 1 fills up, it should be migrated to 2904 * Node 2. The migration path start on the nodes with the 2905 * processors (since allocations default to this node) and 2906 * fast memory, progress through medium and end with the 2907 * slow memory: 2908 * 2909 * 0 -> 1 -> 2 -> stop 2910 * 3 -> 4 -> 5 -> stop 2911 * 2912 * This is represented in the node_demotion[] like this: 2913 * 2914 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2915 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2916 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2917 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2918 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2919 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2920 * 2921 * Moreover some systems may have multiple slow memory nodes. 2922 * Suppose a system has one socket with 3 memory nodes, node 0 2923 * is fast memory type, and node 1/2 both are slow memory 2924 * type, and the distance between fast memory node and slow 2925 * memory node is same. So the migration path should be: 2926 * 2927 * 0 -> 1/2 -> stop 2928 * 2929 * This is represented in the node_demotion[] like this: 2930 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2931 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2932 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2933 */ 2934 2935 /* 2936 * Writes to this array occur without locking. Cycles are 2937 * not allowed: Node X demotes to Y which demotes to X... 2938 * 2939 * If multiple reads are performed, a single rcu_read_lock() 2940 * must be held over all reads to ensure that no cycles are 2941 * observed. 2942 */ 2943 #define DEFAULT_DEMOTION_TARGET_NODES 15 2944 2945 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2946 #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2947 #else 2948 #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2949 #endif 2950 2951 struct demotion_nodes { 2952 unsigned short nr; 2953 short nodes[DEMOTION_TARGET_NODES]; 2954 }; 2955 2956 static struct demotion_nodes *node_demotion __read_mostly; 2957 2958 /** 2959 * next_demotion_node() - Get the next node in the demotion path 2960 * @node: The starting node to lookup the next node 2961 * 2962 * Return: node id for next memory node in the demotion path hierarchy 2963 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2964 * @node online or guarantee that it *continues* to be the next demotion 2965 * target. 2966 */ 2967 int next_demotion_node(int node) 2968 { 2969 struct demotion_nodes *nd; 2970 unsigned short target_nr, index; 2971 int target; 2972 2973 if (!node_demotion) 2974 return NUMA_NO_NODE; 2975 2976 nd = &node_demotion[node]; 2977 2978 /* 2979 * node_demotion[] is updated without excluding this 2980 * function from running. RCU doesn't provide any 2981 * compiler barriers, so the READ_ONCE() is required 2982 * to avoid compiler reordering or read merging. 2983 * 2984 * Make sure to use RCU over entire code blocks if 2985 * node_demotion[] reads need to be consistent. 2986 */ 2987 rcu_read_lock(); 2988 target_nr = READ_ONCE(nd->nr); 2989 2990 switch (target_nr) { 2991 case 0: 2992 target = NUMA_NO_NODE; 2993 goto out; 2994 case 1: 2995 index = 0; 2996 break; 2997 default: 2998 /* 2999 * If there are multiple target nodes, just select one 3000 * target node randomly. 3001 * 3002 * In addition, we can also use round-robin to select 3003 * target node, but we should introduce another variable 3004 * for node_demotion[] to record last selected target node, 3005 * that may cause cache ping-pong due to the changing of 3006 * last target node. Or introducing per-cpu data to avoid 3007 * caching issue, which seems more complicated. So selecting 3008 * target node randomly seems better until now. 3009 */ 3010 index = get_random_int() % target_nr; 3011 break; 3012 } 3013 3014 target = READ_ONCE(nd->nodes[index]); 3015 3016 out: 3017 rcu_read_unlock(); 3018 return target; 3019 } 3020 3021 #if defined(CONFIG_HOTPLUG_CPU) 3022 /* Disable reclaim-based migration. */ 3023 static void __disable_all_migrate_targets(void) 3024 { 3025 int node, i; 3026 3027 if (!node_demotion) 3028 return; 3029 3030 for_each_online_node(node) { 3031 node_demotion[node].nr = 0; 3032 for (i = 0; i < DEMOTION_TARGET_NODES; i++) 3033 node_demotion[node].nodes[i] = NUMA_NO_NODE; 3034 } 3035 } 3036 3037 static void disable_all_migrate_targets(void) 3038 { 3039 __disable_all_migrate_targets(); 3040 3041 /* 3042 * Ensure that the "disable" is visible across the system. 3043 * Readers will see either a combination of before+disable 3044 * state or disable+after. They will never see before and 3045 * after state together. 3046 * 3047 * The before+after state together might have cycles and 3048 * could cause readers to do things like loop until this 3049 * function finishes. This ensures they can only see a 3050 * single "bad" read and would, for instance, only loop 3051 * once. 3052 */ 3053 synchronize_rcu(); 3054 } 3055 3056 /* 3057 * Find an automatic demotion target for 'node'. 3058 * Failing here is OK. It might just indicate 3059 * being at the end of a chain. 3060 */ 3061 static int establish_migrate_target(int node, nodemask_t *used, 3062 int best_distance) 3063 { 3064 int migration_target, index, val; 3065 struct demotion_nodes *nd; 3066 3067 if (!node_demotion) 3068 return NUMA_NO_NODE; 3069 3070 nd = &node_demotion[node]; 3071 3072 migration_target = find_next_best_node(node, used); 3073 if (migration_target == NUMA_NO_NODE) 3074 return NUMA_NO_NODE; 3075 3076 /* 3077 * If the node has been set a migration target node before, 3078 * which means it's the best distance between them. Still 3079 * check if this node can be demoted to other target nodes 3080 * if they have a same best distance. 3081 */ 3082 if (best_distance != -1) { 3083 val = node_distance(node, migration_target); 3084 if (val > best_distance) 3085 return NUMA_NO_NODE; 3086 } 3087 3088 index = nd->nr; 3089 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 3090 "Exceeds maximum demotion target nodes\n")) 3091 return NUMA_NO_NODE; 3092 3093 nd->nodes[index] = migration_target; 3094 nd->nr++; 3095 3096 return migration_target; 3097 } 3098 3099 /* 3100 * When memory fills up on a node, memory contents can be 3101 * automatically migrated to another node instead of 3102 * discarded at reclaim. 3103 * 3104 * Establish a "migration path" which will start at nodes 3105 * with CPUs and will follow the priorities used to build the 3106 * page allocator zonelists. 3107 * 3108 * The difference here is that cycles must be avoided. If 3109 * node0 migrates to node1, then neither node1, nor anything 3110 * node1 migrates to can migrate to node0. Also one node can 3111 * be migrated to multiple nodes if the target nodes all have 3112 * a same best-distance against the source node. 3113 * 3114 * This function can run simultaneously with readers of 3115 * node_demotion[]. However, it can not run simultaneously 3116 * with itself. Exclusion is provided by memory hotplug events 3117 * being single-threaded. 3118 */ 3119 static void __set_migration_target_nodes(void) 3120 { 3121 nodemask_t next_pass = NODE_MASK_NONE; 3122 nodemask_t this_pass = NODE_MASK_NONE; 3123 nodemask_t used_targets = NODE_MASK_NONE; 3124 int node, best_distance; 3125 3126 /* 3127 * Avoid any oddities like cycles that could occur 3128 * from changes in the topology. This will leave 3129 * a momentary gap when migration is disabled. 3130 */ 3131 disable_all_migrate_targets(); 3132 3133 /* 3134 * Allocations go close to CPUs, first. Assume that 3135 * the migration path starts at the nodes with CPUs. 3136 */ 3137 next_pass = node_states[N_CPU]; 3138 again: 3139 this_pass = next_pass; 3140 next_pass = NODE_MASK_NONE; 3141 /* 3142 * To avoid cycles in the migration "graph", ensure 3143 * that migration sources are not future targets by 3144 * setting them in 'used_targets'. Do this only 3145 * once per pass so that multiple source nodes can 3146 * share a target node. 3147 * 3148 * 'used_targets' will become unavailable in future 3149 * passes. This limits some opportunities for 3150 * multiple source nodes to share a destination. 3151 */ 3152 nodes_or(used_targets, used_targets, this_pass); 3153 3154 for_each_node_mask(node, this_pass) { 3155 best_distance = -1; 3156 3157 /* 3158 * Try to set up the migration path for the node, and the target 3159 * migration nodes can be multiple, so doing a loop to find all 3160 * the target nodes if they all have a best node distance. 3161 */ 3162 do { 3163 int target_node = 3164 establish_migrate_target(node, &used_targets, 3165 best_distance); 3166 3167 if (target_node == NUMA_NO_NODE) 3168 break; 3169 3170 if (best_distance == -1) 3171 best_distance = node_distance(node, target_node); 3172 3173 /* 3174 * Visit targets from this pass in the next pass. 3175 * Eventually, every node will have been part of 3176 * a pass, and will become set in 'used_targets'. 3177 */ 3178 node_set(target_node, next_pass); 3179 } while (1); 3180 } 3181 /* 3182 * 'next_pass' contains nodes which became migration 3183 * targets in this pass. Make additional passes until 3184 * no more migrations targets are available. 3185 */ 3186 if (!nodes_empty(next_pass)) 3187 goto again; 3188 } 3189 3190 /* 3191 * For callers that do not hold get_online_mems() already. 3192 */ 3193 static void set_migration_target_nodes(void) 3194 { 3195 get_online_mems(); 3196 __set_migration_target_nodes(); 3197 put_online_mems(); 3198 } 3199 3200 /* 3201 * This leaves migrate-on-reclaim transiently disabled between 3202 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 3203 * whether reclaim-based migration is enabled or not, which 3204 * ensures that the user can turn reclaim-based migration at 3205 * any time without needing to recalculate migration targets. 3206 * 3207 * These callbacks already hold get_online_mems(). That is why 3208 * __set_migration_target_nodes() can be used as opposed to 3209 * set_migration_target_nodes(). 3210 */ 3211 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 3212 unsigned long action, void *_arg) 3213 { 3214 struct memory_notify *arg = _arg; 3215 3216 /* 3217 * Only update the node migration order when a node is 3218 * changing status, like online->offline. This avoids 3219 * the overhead of synchronize_rcu() in most cases. 3220 */ 3221 if (arg->status_change_nid < 0) 3222 return notifier_from_errno(0); 3223 3224 switch (action) { 3225 case MEM_GOING_OFFLINE: 3226 /* 3227 * Make sure there are not transient states where 3228 * an offline node is a migration target. This 3229 * will leave migration disabled until the offline 3230 * completes and the MEM_OFFLINE case below runs. 3231 */ 3232 disable_all_migrate_targets(); 3233 break; 3234 case MEM_OFFLINE: 3235 case MEM_ONLINE: 3236 /* 3237 * Recalculate the target nodes once the node 3238 * reaches its final state (online or offline). 3239 */ 3240 __set_migration_target_nodes(); 3241 break; 3242 case MEM_CANCEL_OFFLINE: 3243 /* 3244 * MEM_GOING_OFFLINE disabled all the migration 3245 * targets. Reenable them. 3246 */ 3247 __set_migration_target_nodes(); 3248 break; 3249 case MEM_GOING_ONLINE: 3250 case MEM_CANCEL_ONLINE: 3251 break; 3252 } 3253 3254 return notifier_from_errno(0); 3255 } 3256 3257 /* 3258 * React to hotplug events that might affect the migration targets 3259 * like events that online or offline NUMA nodes. 3260 * 3261 * The ordering is also currently dependent on which nodes have 3262 * CPUs. That means we need CPU on/offline notification too. 3263 */ 3264 static int migration_online_cpu(unsigned int cpu) 3265 { 3266 set_migration_target_nodes(); 3267 return 0; 3268 } 3269 3270 static int migration_offline_cpu(unsigned int cpu) 3271 { 3272 set_migration_target_nodes(); 3273 return 0; 3274 } 3275 3276 static int __init migrate_on_reclaim_init(void) 3277 { 3278 int ret; 3279 3280 node_demotion = kmalloc_array(nr_node_ids, 3281 sizeof(struct demotion_nodes), 3282 GFP_KERNEL); 3283 WARN_ON(!node_demotion); 3284 3285 ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline", 3286 NULL, migration_offline_cpu); 3287 /* 3288 * In the unlikely case that this fails, the automatic 3289 * migration targets may become suboptimal for nodes 3290 * where N_CPU changes. With such a small impact in a 3291 * rare case, do not bother trying to do anything special. 3292 */ 3293 WARN_ON(ret < 0); 3294 ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online", 3295 migration_online_cpu, NULL); 3296 WARN_ON(ret < 0); 3297 3298 hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 3299 return 0; 3300 } 3301 late_initcall(migrate_on_reclaim_init); 3302 #endif /* CONFIG_HOTPLUG_CPU */ 3303 3304 bool numa_demotion_enabled = false; 3305 3306 #ifdef CONFIG_SYSFS 3307 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 3308 struct kobj_attribute *attr, char *buf) 3309 { 3310 return sysfs_emit(buf, "%s\n", 3311 numa_demotion_enabled ? "true" : "false"); 3312 } 3313 3314 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 3315 struct kobj_attribute *attr, 3316 const char *buf, size_t count) 3317 { 3318 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 3319 numa_demotion_enabled = true; 3320 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 3321 numa_demotion_enabled = false; 3322 else 3323 return -EINVAL; 3324 3325 return count; 3326 } 3327 3328 static struct kobj_attribute numa_demotion_enabled_attr = 3329 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 3330 numa_demotion_enabled_store); 3331 3332 static struct attribute *numa_attrs[] = { 3333 &numa_demotion_enabled_attr.attr, 3334 NULL, 3335 }; 3336 3337 static const struct attribute_group numa_attr_group = { 3338 .attrs = numa_attrs, 3339 }; 3340 3341 static int __init numa_init_sysfs(void) 3342 { 3343 int err; 3344 struct kobject *numa_kobj; 3345 3346 numa_kobj = kobject_create_and_add("numa", mm_kobj); 3347 if (!numa_kobj) { 3348 pr_err("failed to create numa kobject\n"); 3349 return -ENOMEM; 3350 } 3351 err = sysfs_create_group(numa_kobj, &numa_attr_group); 3352 if (err) { 3353 pr_err("failed to register numa group\n"); 3354 goto delete_obj; 3355 } 3356 return 0; 3357 3358 delete_obj: 3359 kobject_put(numa_kobj); 3360 return err; 3361 } 3362 subsys_initcall(numa_init_sysfs); 3363 #endif 3364