1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 #include <linux/memory-tiers.h> 54 55 #include <asm/tlbflush.h> 56 57 #include <trace/events/migrate.h> 58 59 #include "internal.h" 60 61 int isolate_movable_page(struct page *page, isolate_mode_t mode) 62 { 63 const struct movable_operations *mops; 64 65 /* 66 * Avoid burning cycles with pages that are yet under __free_pages(), 67 * or just got freed under us. 68 * 69 * In case we 'win' a race for a movable page being freed under us and 70 * raise its refcount preventing __free_pages() from doing its job 71 * the put_page() at the end of this block will take care of 72 * release this page, thus avoiding a nasty leakage. 73 */ 74 if (unlikely(!get_page_unless_zero(page))) 75 goto out; 76 77 /* 78 * Check PageMovable before holding a PG_lock because page's owner 79 * assumes anybody doesn't touch PG_lock of newly allocated page 80 * so unconditionally grabbing the lock ruins page's owner side. 81 */ 82 if (unlikely(!__PageMovable(page))) 83 goto out_putpage; 84 /* 85 * As movable pages are not isolated from LRU lists, concurrent 86 * compaction threads can race against page migration functions 87 * as well as race against the releasing a page. 88 * 89 * In order to avoid having an already isolated movable page 90 * being (wrongly) re-isolated while it is under migration, 91 * or to avoid attempting to isolate pages being released, 92 * lets be sure we have the page lock 93 * before proceeding with the movable page isolation steps. 94 */ 95 if (unlikely(!trylock_page(page))) 96 goto out_putpage; 97 98 if (!PageMovable(page) || PageIsolated(page)) 99 goto out_no_isolated; 100 101 mops = page_movable_ops(page); 102 VM_BUG_ON_PAGE(!mops, page); 103 104 if (!mops->isolate_page(page, mode)) 105 goto out_no_isolated; 106 107 /* Driver shouldn't use PG_isolated bit of page->flags */ 108 WARN_ON_ONCE(PageIsolated(page)); 109 SetPageIsolated(page); 110 unlock_page(page); 111 112 return 0; 113 114 out_no_isolated: 115 unlock_page(page); 116 out_putpage: 117 put_page(page); 118 out: 119 return -EBUSY; 120 } 121 122 static void putback_movable_page(struct page *page) 123 { 124 const struct movable_operations *mops = page_movable_ops(page); 125 126 mops->putback_page(page); 127 ClearPageIsolated(page); 128 } 129 130 /* 131 * Put previously isolated pages back onto the appropriate lists 132 * from where they were once taken off for compaction/migration. 133 * 134 * This function shall be used whenever the isolated pageset has been 135 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 136 * and isolate_hugetlb(). 137 */ 138 void putback_movable_pages(struct list_head *l) 139 { 140 struct page *page; 141 struct page *page2; 142 143 list_for_each_entry_safe(page, page2, l, lru) { 144 if (unlikely(PageHuge(page))) { 145 putback_active_hugepage(page); 146 continue; 147 } 148 list_del(&page->lru); 149 /* 150 * We isolated non-lru movable page so here we can use 151 * __PageMovable because LRU page's mapping cannot have 152 * PAGE_MAPPING_MOVABLE. 153 */ 154 if (unlikely(__PageMovable(page))) { 155 VM_BUG_ON_PAGE(!PageIsolated(page), page); 156 lock_page(page); 157 if (PageMovable(page)) 158 putback_movable_page(page); 159 else 160 ClearPageIsolated(page); 161 unlock_page(page); 162 put_page(page); 163 } else { 164 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 165 page_is_file_lru(page), -thp_nr_pages(page)); 166 putback_lru_page(page); 167 } 168 } 169 } 170 171 /* 172 * Restore a potential migration pte to a working pte entry 173 */ 174 static bool remove_migration_pte(struct folio *folio, 175 struct vm_area_struct *vma, unsigned long addr, void *old) 176 { 177 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 178 179 while (page_vma_mapped_walk(&pvmw)) { 180 rmap_t rmap_flags = RMAP_NONE; 181 pte_t pte; 182 swp_entry_t entry; 183 struct page *new; 184 unsigned long idx = 0; 185 186 /* pgoff is invalid for ksm pages, but they are never large */ 187 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 188 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 189 new = folio_page(folio, idx); 190 191 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 192 /* PMD-mapped THP migration entry */ 193 if (!pvmw.pte) { 194 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 195 !folio_test_pmd_mappable(folio), folio); 196 remove_migration_pmd(&pvmw, new); 197 continue; 198 } 199 #endif 200 201 folio_get(folio); 202 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 203 if (pte_swp_soft_dirty(*pvmw.pte)) 204 pte = pte_mksoft_dirty(pte); 205 206 /* 207 * Recheck VMA as permissions can change since migration started 208 */ 209 entry = pte_to_swp_entry(*pvmw.pte); 210 if (!is_migration_entry_young(entry)) 211 pte = pte_mkold(pte); 212 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 213 pte = pte_mkdirty(pte); 214 if (is_writable_migration_entry(entry)) 215 pte = maybe_mkwrite(pte, vma); 216 else if (pte_swp_uffd_wp(*pvmw.pte)) 217 pte = pte_mkuffd_wp(pte); 218 219 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 220 rmap_flags |= RMAP_EXCLUSIVE; 221 222 if (unlikely(is_device_private_page(new))) { 223 if (pte_write(pte)) 224 entry = make_writable_device_private_entry( 225 page_to_pfn(new)); 226 else 227 entry = make_readable_device_private_entry( 228 page_to_pfn(new)); 229 pte = swp_entry_to_pte(entry); 230 if (pte_swp_soft_dirty(*pvmw.pte)) 231 pte = pte_swp_mksoft_dirty(pte); 232 if (pte_swp_uffd_wp(*pvmw.pte)) 233 pte = pte_swp_mkuffd_wp(pte); 234 } 235 236 #ifdef CONFIG_HUGETLB_PAGE 237 if (folio_test_hugetlb(folio)) { 238 unsigned int shift = huge_page_shift(hstate_vma(vma)); 239 240 pte = pte_mkhuge(pte); 241 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 242 if (folio_test_anon(folio)) 243 hugepage_add_anon_rmap(new, vma, pvmw.address, 244 rmap_flags); 245 else 246 page_dup_file_rmap(new, true); 247 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 248 } else 249 #endif 250 { 251 if (folio_test_anon(folio)) 252 page_add_anon_rmap(new, vma, pvmw.address, 253 rmap_flags); 254 else 255 page_add_file_rmap(new, vma, false); 256 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 257 } 258 if (vma->vm_flags & VM_LOCKED) 259 mlock_page_drain_local(); 260 261 trace_remove_migration_pte(pvmw.address, pte_val(pte), 262 compound_order(new)); 263 264 /* No need to invalidate - it was non-present before */ 265 update_mmu_cache(vma, pvmw.address, pvmw.pte); 266 } 267 268 return true; 269 } 270 271 /* 272 * Get rid of all migration entries and replace them by 273 * references to the indicated page. 274 */ 275 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 276 { 277 struct rmap_walk_control rwc = { 278 .rmap_one = remove_migration_pte, 279 .arg = src, 280 }; 281 282 if (locked) 283 rmap_walk_locked(dst, &rwc); 284 else 285 rmap_walk(dst, &rwc); 286 } 287 288 /* 289 * Something used the pte of a page under migration. We need to 290 * get to the page and wait until migration is finished. 291 * When we return from this function the fault will be retried. 292 */ 293 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 294 spinlock_t *ptl) 295 { 296 pte_t pte; 297 swp_entry_t entry; 298 299 spin_lock(ptl); 300 pte = *ptep; 301 if (!is_swap_pte(pte)) 302 goto out; 303 304 entry = pte_to_swp_entry(pte); 305 if (!is_migration_entry(entry)) 306 goto out; 307 308 migration_entry_wait_on_locked(entry, ptep, ptl); 309 return; 310 out: 311 pte_unmap_unlock(ptep, ptl); 312 } 313 314 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 315 unsigned long address) 316 { 317 spinlock_t *ptl = pte_lockptr(mm, pmd); 318 pte_t *ptep = pte_offset_map(pmd, address); 319 __migration_entry_wait(mm, ptep, ptl); 320 } 321 322 #ifdef CONFIG_HUGETLB_PAGE 323 void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) 324 { 325 pte_t pte; 326 327 spin_lock(ptl); 328 pte = huge_ptep_get(ptep); 329 330 if (unlikely(!is_hugetlb_entry_migration(pte))) 331 spin_unlock(ptl); 332 else 333 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 334 } 335 336 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 337 { 338 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 339 340 __migration_entry_wait_huge(pte, ptl); 341 } 342 #endif 343 344 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 345 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 346 { 347 spinlock_t *ptl; 348 349 ptl = pmd_lock(mm, pmd); 350 if (!is_pmd_migration_entry(*pmd)) 351 goto unlock; 352 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 353 return; 354 unlock: 355 spin_unlock(ptl); 356 } 357 #endif 358 359 static int folio_expected_refs(struct address_space *mapping, 360 struct folio *folio) 361 { 362 int refs = 1; 363 if (!mapping) 364 return refs; 365 366 refs += folio_nr_pages(folio); 367 if (folio_test_private(folio)) 368 refs++; 369 370 return refs; 371 } 372 373 /* 374 * Replace the page in the mapping. 375 * 376 * The number of remaining references must be: 377 * 1 for anonymous pages without a mapping 378 * 2 for pages with a mapping 379 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 380 */ 381 int folio_migrate_mapping(struct address_space *mapping, 382 struct folio *newfolio, struct folio *folio, int extra_count) 383 { 384 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 385 struct zone *oldzone, *newzone; 386 int dirty; 387 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 388 long nr = folio_nr_pages(folio); 389 390 if (!mapping) { 391 /* Anonymous page without mapping */ 392 if (folio_ref_count(folio) != expected_count) 393 return -EAGAIN; 394 395 /* No turning back from here */ 396 newfolio->index = folio->index; 397 newfolio->mapping = folio->mapping; 398 if (folio_test_swapbacked(folio)) 399 __folio_set_swapbacked(newfolio); 400 401 return MIGRATEPAGE_SUCCESS; 402 } 403 404 oldzone = folio_zone(folio); 405 newzone = folio_zone(newfolio); 406 407 xas_lock_irq(&xas); 408 if (!folio_ref_freeze(folio, expected_count)) { 409 xas_unlock_irq(&xas); 410 return -EAGAIN; 411 } 412 413 /* 414 * Now we know that no one else is looking at the folio: 415 * no turning back from here. 416 */ 417 newfolio->index = folio->index; 418 newfolio->mapping = folio->mapping; 419 folio_ref_add(newfolio, nr); /* add cache reference */ 420 if (folio_test_swapbacked(folio)) { 421 __folio_set_swapbacked(newfolio); 422 if (folio_test_swapcache(folio)) { 423 folio_set_swapcache(newfolio); 424 newfolio->private = folio_get_private(folio); 425 } 426 } else { 427 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 428 } 429 430 /* Move dirty while page refs frozen and newpage not yet exposed */ 431 dirty = folio_test_dirty(folio); 432 if (dirty) { 433 folio_clear_dirty(folio); 434 folio_set_dirty(newfolio); 435 } 436 437 xas_store(&xas, newfolio); 438 439 /* 440 * Drop cache reference from old page by unfreezing 441 * to one less reference. 442 * We know this isn't the last reference. 443 */ 444 folio_ref_unfreeze(folio, expected_count - nr); 445 446 xas_unlock(&xas); 447 /* Leave irq disabled to prevent preemption while updating stats */ 448 449 /* 450 * If moved to a different zone then also account 451 * the page for that zone. Other VM counters will be 452 * taken care of when we establish references to the 453 * new page and drop references to the old page. 454 * 455 * Note that anonymous pages are accounted for 456 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 457 * are mapped to swap space. 458 */ 459 if (newzone != oldzone) { 460 struct lruvec *old_lruvec, *new_lruvec; 461 struct mem_cgroup *memcg; 462 463 memcg = folio_memcg(folio); 464 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 465 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 466 467 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 468 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 469 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 470 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 471 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 472 } 473 #ifdef CONFIG_SWAP 474 if (folio_test_swapcache(folio)) { 475 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 476 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 477 } 478 #endif 479 if (dirty && mapping_can_writeback(mapping)) { 480 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 481 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 482 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 483 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 484 } 485 } 486 local_irq_enable(); 487 488 return MIGRATEPAGE_SUCCESS; 489 } 490 EXPORT_SYMBOL(folio_migrate_mapping); 491 492 /* 493 * The expected number of remaining references is the same as that 494 * of folio_migrate_mapping(). 495 */ 496 int migrate_huge_page_move_mapping(struct address_space *mapping, 497 struct folio *dst, struct folio *src) 498 { 499 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 500 int expected_count; 501 502 xas_lock_irq(&xas); 503 expected_count = 2 + folio_has_private(src); 504 if (!folio_ref_freeze(src, expected_count)) { 505 xas_unlock_irq(&xas); 506 return -EAGAIN; 507 } 508 509 dst->index = src->index; 510 dst->mapping = src->mapping; 511 512 folio_get(dst); 513 514 xas_store(&xas, dst); 515 516 folio_ref_unfreeze(src, expected_count - 1); 517 518 xas_unlock_irq(&xas); 519 520 return MIGRATEPAGE_SUCCESS; 521 } 522 523 /* 524 * Copy the flags and some other ancillary information 525 */ 526 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 527 { 528 int cpupid; 529 530 if (folio_test_error(folio)) 531 folio_set_error(newfolio); 532 if (folio_test_referenced(folio)) 533 folio_set_referenced(newfolio); 534 if (folio_test_uptodate(folio)) 535 folio_mark_uptodate(newfolio); 536 if (folio_test_clear_active(folio)) { 537 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 538 folio_set_active(newfolio); 539 } else if (folio_test_clear_unevictable(folio)) 540 folio_set_unevictable(newfolio); 541 if (folio_test_workingset(folio)) 542 folio_set_workingset(newfolio); 543 if (folio_test_checked(folio)) 544 folio_set_checked(newfolio); 545 /* 546 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 547 * migration entries. We can still have PG_anon_exclusive set on an 548 * effectively unmapped and unreferenced first sub-pages of an 549 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 550 */ 551 if (folio_test_mappedtodisk(folio)) 552 folio_set_mappedtodisk(newfolio); 553 554 /* Move dirty on pages not done by folio_migrate_mapping() */ 555 if (folio_test_dirty(folio)) 556 folio_set_dirty(newfolio); 557 558 if (folio_test_young(folio)) 559 folio_set_young(newfolio); 560 if (folio_test_idle(folio)) 561 folio_set_idle(newfolio); 562 563 /* 564 * Copy NUMA information to the new page, to prevent over-eager 565 * future migrations of this same page. 566 */ 567 cpupid = page_cpupid_xchg_last(&folio->page, -1); 568 /* 569 * For memory tiering mode, when migrate between slow and fast 570 * memory node, reset cpupid, because that is used to record 571 * page access time in slow memory node. 572 */ 573 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 574 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); 575 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); 576 577 if (f_toptier != t_toptier) 578 cpupid = -1; 579 } 580 page_cpupid_xchg_last(&newfolio->page, cpupid); 581 582 folio_migrate_ksm(newfolio, folio); 583 /* 584 * Please do not reorder this without considering how mm/ksm.c's 585 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 586 */ 587 if (folio_test_swapcache(folio)) 588 folio_clear_swapcache(folio); 589 folio_clear_private(folio); 590 591 /* page->private contains hugetlb specific flags */ 592 if (!folio_test_hugetlb(folio)) 593 folio->private = NULL; 594 595 /* 596 * If any waiters have accumulated on the new page then 597 * wake them up. 598 */ 599 if (folio_test_writeback(newfolio)) 600 folio_end_writeback(newfolio); 601 602 /* 603 * PG_readahead shares the same bit with PG_reclaim. The above 604 * end_page_writeback() may clear PG_readahead mistakenly, so set the 605 * bit after that. 606 */ 607 if (folio_test_readahead(folio)) 608 folio_set_readahead(newfolio); 609 610 folio_copy_owner(newfolio, folio); 611 612 if (!folio_test_hugetlb(folio)) 613 mem_cgroup_migrate(folio, newfolio); 614 } 615 EXPORT_SYMBOL(folio_migrate_flags); 616 617 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 618 { 619 folio_copy(newfolio, folio); 620 folio_migrate_flags(newfolio, folio); 621 } 622 EXPORT_SYMBOL(folio_migrate_copy); 623 624 /************************************************************ 625 * Migration functions 626 ***********************************************************/ 627 628 /** 629 * migrate_folio() - Simple folio migration. 630 * @mapping: The address_space containing the folio. 631 * @dst: The folio to migrate the data to. 632 * @src: The folio containing the current data. 633 * @mode: How to migrate the page. 634 * 635 * Common logic to directly migrate a single LRU folio suitable for 636 * folios that do not use PagePrivate/PagePrivate2. 637 * 638 * Folios are locked upon entry and exit. 639 */ 640 int migrate_folio(struct address_space *mapping, struct folio *dst, 641 struct folio *src, enum migrate_mode mode) 642 { 643 int rc; 644 645 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 646 647 rc = folio_migrate_mapping(mapping, dst, src, 0); 648 649 if (rc != MIGRATEPAGE_SUCCESS) 650 return rc; 651 652 if (mode != MIGRATE_SYNC_NO_COPY) 653 folio_migrate_copy(dst, src); 654 else 655 folio_migrate_flags(dst, src); 656 return MIGRATEPAGE_SUCCESS; 657 } 658 EXPORT_SYMBOL(migrate_folio); 659 660 #ifdef CONFIG_BLOCK 661 /* Returns true if all buffers are successfully locked */ 662 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 663 enum migrate_mode mode) 664 { 665 struct buffer_head *bh = head; 666 667 /* Simple case, sync compaction */ 668 if (mode != MIGRATE_ASYNC) { 669 do { 670 lock_buffer(bh); 671 bh = bh->b_this_page; 672 673 } while (bh != head); 674 675 return true; 676 } 677 678 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 679 do { 680 if (!trylock_buffer(bh)) { 681 /* 682 * We failed to lock the buffer and cannot stall in 683 * async migration. Release the taken locks 684 */ 685 struct buffer_head *failed_bh = bh; 686 bh = head; 687 while (bh != failed_bh) { 688 unlock_buffer(bh); 689 bh = bh->b_this_page; 690 } 691 return false; 692 } 693 694 bh = bh->b_this_page; 695 } while (bh != head); 696 return true; 697 } 698 699 static int __buffer_migrate_folio(struct address_space *mapping, 700 struct folio *dst, struct folio *src, enum migrate_mode mode, 701 bool check_refs) 702 { 703 struct buffer_head *bh, *head; 704 int rc; 705 int expected_count; 706 707 head = folio_buffers(src); 708 if (!head) 709 return migrate_folio(mapping, dst, src, mode); 710 711 /* Check whether page does not have extra refs before we do more work */ 712 expected_count = folio_expected_refs(mapping, src); 713 if (folio_ref_count(src) != expected_count) 714 return -EAGAIN; 715 716 if (!buffer_migrate_lock_buffers(head, mode)) 717 return -EAGAIN; 718 719 if (check_refs) { 720 bool busy; 721 bool invalidated = false; 722 723 recheck_buffers: 724 busy = false; 725 spin_lock(&mapping->private_lock); 726 bh = head; 727 do { 728 if (atomic_read(&bh->b_count)) { 729 busy = true; 730 break; 731 } 732 bh = bh->b_this_page; 733 } while (bh != head); 734 if (busy) { 735 if (invalidated) { 736 rc = -EAGAIN; 737 goto unlock_buffers; 738 } 739 spin_unlock(&mapping->private_lock); 740 invalidate_bh_lrus(); 741 invalidated = true; 742 goto recheck_buffers; 743 } 744 } 745 746 rc = folio_migrate_mapping(mapping, dst, src, 0); 747 if (rc != MIGRATEPAGE_SUCCESS) 748 goto unlock_buffers; 749 750 folio_attach_private(dst, folio_detach_private(src)); 751 752 bh = head; 753 do { 754 set_bh_page(bh, &dst->page, bh_offset(bh)); 755 bh = bh->b_this_page; 756 } while (bh != head); 757 758 if (mode != MIGRATE_SYNC_NO_COPY) 759 folio_migrate_copy(dst, src); 760 else 761 folio_migrate_flags(dst, src); 762 763 rc = MIGRATEPAGE_SUCCESS; 764 unlock_buffers: 765 if (check_refs) 766 spin_unlock(&mapping->private_lock); 767 bh = head; 768 do { 769 unlock_buffer(bh); 770 bh = bh->b_this_page; 771 } while (bh != head); 772 773 return rc; 774 } 775 776 /** 777 * buffer_migrate_folio() - Migration function for folios with buffers. 778 * @mapping: The address space containing @src. 779 * @dst: The folio to migrate to. 780 * @src: The folio to migrate from. 781 * @mode: How to migrate the folio. 782 * 783 * This function can only be used if the underlying filesystem guarantees 784 * that no other references to @src exist. For example attached buffer 785 * heads are accessed only under the folio lock. If your filesystem cannot 786 * provide this guarantee, buffer_migrate_folio_norefs() may be more 787 * appropriate. 788 * 789 * Return: 0 on success or a negative errno on failure. 790 */ 791 int buffer_migrate_folio(struct address_space *mapping, 792 struct folio *dst, struct folio *src, enum migrate_mode mode) 793 { 794 return __buffer_migrate_folio(mapping, dst, src, mode, false); 795 } 796 EXPORT_SYMBOL(buffer_migrate_folio); 797 798 /** 799 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 800 * @mapping: The address space containing @src. 801 * @dst: The folio to migrate to. 802 * @src: The folio to migrate from. 803 * @mode: How to migrate the folio. 804 * 805 * Like buffer_migrate_folio() except that this variant is more careful 806 * and checks that there are also no buffer head references. This function 807 * is the right one for mappings where buffer heads are directly looked 808 * up and referenced (such as block device mappings). 809 * 810 * Return: 0 on success or a negative errno on failure. 811 */ 812 int buffer_migrate_folio_norefs(struct address_space *mapping, 813 struct folio *dst, struct folio *src, enum migrate_mode mode) 814 { 815 return __buffer_migrate_folio(mapping, dst, src, mode, true); 816 } 817 #endif 818 819 int filemap_migrate_folio(struct address_space *mapping, 820 struct folio *dst, struct folio *src, enum migrate_mode mode) 821 { 822 int ret; 823 824 ret = folio_migrate_mapping(mapping, dst, src, 0); 825 if (ret != MIGRATEPAGE_SUCCESS) 826 return ret; 827 828 if (folio_get_private(src)) 829 folio_attach_private(dst, folio_detach_private(src)); 830 831 if (mode != MIGRATE_SYNC_NO_COPY) 832 folio_migrate_copy(dst, src); 833 else 834 folio_migrate_flags(dst, src); 835 return MIGRATEPAGE_SUCCESS; 836 } 837 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 838 839 /* 840 * Writeback a folio to clean the dirty state 841 */ 842 static int writeout(struct address_space *mapping, struct folio *folio) 843 { 844 struct writeback_control wbc = { 845 .sync_mode = WB_SYNC_NONE, 846 .nr_to_write = 1, 847 .range_start = 0, 848 .range_end = LLONG_MAX, 849 .for_reclaim = 1 850 }; 851 int rc; 852 853 if (!mapping->a_ops->writepage) 854 /* No write method for the address space */ 855 return -EINVAL; 856 857 if (!folio_clear_dirty_for_io(folio)) 858 /* Someone else already triggered a write */ 859 return -EAGAIN; 860 861 /* 862 * A dirty folio may imply that the underlying filesystem has 863 * the folio on some queue. So the folio must be clean for 864 * migration. Writeout may mean we lose the lock and the 865 * folio state is no longer what we checked for earlier. 866 * At this point we know that the migration attempt cannot 867 * be successful. 868 */ 869 remove_migration_ptes(folio, folio, false); 870 871 rc = mapping->a_ops->writepage(&folio->page, &wbc); 872 873 if (rc != AOP_WRITEPAGE_ACTIVATE) 874 /* unlocked. Relock */ 875 folio_lock(folio); 876 877 return (rc < 0) ? -EIO : -EAGAIN; 878 } 879 880 /* 881 * Default handling if a filesystem does not provide a migration function. 882 */ 883 static int fallback_migrate_folio(struct address_space *mapping, 884 struct folio *dst, struct folio *src, enum migrate_mode mode) 885 { 886 if (folio_test_dirty(src)) { 887 /* Only writeback folios in full synchronous migration */ 888 switch (mode) { 889 case MIGRATE_SYNC: 890 case MIGRATE_SYNC_NO_COPY: 891 break; 892 default: 893 return -EBUSY; 894 } 895 return writeout(mapping, src); 896 } 897 898 /* 899 * Buffers may be managed in a filesystem specific way. 900 * We must have no buffers or drop them. 901 */ 902 if (folio_test_private(src) && 903 !filemap_release_folio(src, GFP_KERNEL)) 904 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 905 906 return migrate_folio(mapping, dst, src, mode); 907 } 908 909 /* 910 * Move a page to a newly allocated page 911 * The page is locked and all ptes have been successfully removed. 912 * 913 * The new page will have replaced the old page if this function 914 * is successful. 915 * 916 * Return value: 917 * < 0 - error code 918 * MIGRATEPAGE_SUCCESS - success 919 */ 920 static int move_to_new_folio(struct folio *dst, struct folio *src, 921 enum migrate_mode mode) 922 { 923 int rc = -EAGAIN; 924 bool is_lru = !__PageMovable(&src->page); 925 926 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 927 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 928 929 if (likely(is_lru)) { 930 struct address_space *mapping = folio_mapping(src); 931 932 if (!mapping) 933 rc = migrate_folio(mapping, dst, src, mode); 934 else if (mapping->a_ops->migrate_folio) 935 /* 936 * Most folios have a mapping and most filesystems 937 * provide a migrate_folio callback. Anonymous folios 938 * are part of swap space which also has its own 939 * migrate_folio callback. This is the most common path 940 * for page migration. 941 */ 942 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 943 mode); 944 else 945 rc = fallback_migrate_folio(mapping, dst, src, mode); 946 } else { 947 const struct movable_operations *mops; 948 949 /* 950 * In case of non-lru page, it could be released after 951 * isolation step. In that case, we shouldn't try migration. 952 */ 953 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 954 if (!folio_test_movable(src)) { 955 rc = MIGRATEPAGE_SUCCESS; 956 folio_clear_isolated(src); 957 goto out; 958 } 959 960 mops = page_movable_ops(&src->page); 961 rc = mops->migrate_page(&dst->page, &src->page, mode); 962 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 963 !folio_test_isolated(src)); 964 } 965 966 /* 967 * When successful, old pagecache src->mapping must be cleared before 968 * src is freed; but stats require that PageAnon be left as PageAnon. 969 */ 970 if (rc == MIGRATEPAGE_SUCCESS) { 971 if (__PageMovable(&src->page)) { 972 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 973 974 /* 975 * We clear PG_movable under page_lock so any compactor 976 * cannot try to migrate this page. 977 */ 978 folio_clear_isolated(src); 979 } 980 981 /* 982 * Anonymous and movable src->mapping will be cleared by 983 * free_pages_prepare so don't reset it here for keeping 984 * the type to work PageAnon, for example. 985 */ 986 if (!folio_mapping_flags(src)) 987 src->mapping = NULL; 988 989 if (likely(!folio_is_zone_device(dst))) 990 flush_dcache_folio(dst); 991 } 992 out: 993 return rc; 994 } 995 996 static int __unmap_and_move(struct folio *src, struct folio *dst, 997 int force, enum migrate_mode mode) 998 { 999 int rc = -EAGAIN; 1000 bool page_was_mapped = false; 1001 struct anon_vma *anon_vma = NULL; 1002 bool is_lru = !__PageMovable(&src->page); 1003 1004 if (!folio_trylock(src)) { 1005 if (!force || mode == MIGRATE_ASYNC) 1006 goto out; 1007 1008 /* 1009 * It's not safe for direct compaction to call lock_page. 1010 * For example, during page readahead pages are added locked 1011 * to the LRU. Later, when the IO completes the pages are 1012 * marked uptodate and unlocked. However, the queueing 1013 * could be merging multiple pages for one bio (e.g. 1014 * mpage_readahead). If an allocation happens for the 1015 * second or third page, the process can end up locking 1016 * the same page twice and deadlocking. Rather than 1017 * trying to be clever about what pages can be locked, 1018 * avoid the use of lock_page for direct compaction 1019 * altogether. 1020 */ 1021 if (current->flags & PF_MEMALLOC) 1022 goto out; 1023 1024 folio_lock(src); 1025 } 1026 1027 if (folio_test_writeback(src)) { 1028 /* 1029 * Only in the case of a full synchronous migration is it 1030 * necessary to wait for PageWriteback. In the async case, 1031 * the retry loop is too short and in the sync-light case, 1032 * the overhead of stalling is too much 1033 */ 1034 switch (mode) { 1035 case MIGRATE_SYNC: 1036 case MIGRATE_SYNC_NO_COPY: 1037 break; 1038 default: 1039 rc = -EBUSY; 1040 goto out_unlock; 1041 } 1042 if (!force) 1043 goto out_unlock; 1044 folio_wait_writeback(src); 1045 } 1046 1047 /* 1048 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, 1049 * we cannot notice that anon_vma is freed while we migrate a page. 1050 * This get_anon_vma() delays freeing anon_vma pointer until the end 1051 * of migration. File cache pages are no problem because of page_lock() 1052 * File Caches may use write_page() or lock_page() in migration, then, 1053 * just care Anon page here. 1054 * 1055 * Only folio_get_anon_vma() understands the subtleties of 1056 * getting a hold on an anon_vma from outside one of its mms. 1057 * But if we cannot get anon_vma, then we won't need it anyway, 1058 * because that implies that the anon page is no longer mapped 1059 * (and cannot be remapped so long as we hold the page lock). 1060 */ 1061 if (folio_test_anon(src) && !folio_test_ksm(src)) 1062 anon_vma = folio_get_anon_vma(src); 1063 1064 /* 1065 * Block others from accessing the new page when we get around to 1066 * establishing additional references. We are usually the only one 1067 * holding a reference to dst at this point. We used to have a BUG 1068 * here if folio_trylock(dst) fails, but would like to allow for 1069 * cases where there might be a race with the previous use of dst. 1070 * This is much like races on refcount of oldpage: just don't BUG(). 1071 */ 1072 if (unlikely(!folio_trylock(dst))) 1073 goto out_unlock; 1074 1075 if (unlikely(!is_lru)) { 1076 rc = move_to_new_folio(dst, src, mode); 1077 goto out_unlock_both; 1078 } 1079 1080 /* 1081 * Corner case handling: 1082 * 1. When a new swap-cache page is read into, it is added to the LRU 1083 * and treated as swapcache but it has no rmap yet. 1084 * Calling try_to_unmap() against a src->mapping==NULL page will 1085 * trigger a BUG. So handle it here. 1086 * 2. An orphaned page (see truncate_cleanup_page) might have 1087 * fs-private metadata. The page can be picked up due to memory 1088 * offlining. Everywhere else except page reclaim, the page is 1089 * invisible to the vm, so the page can not be migrated. So try to 1090 * free the metadata, so the page can be freed. 1091 */ 1092 if (!src->mapping) { 1093 if (folio_test_private(src)) { 1094 try_to_free_buffers(src); 1095 goto out_unlock_both; 1096 } 1097 } else if (folio_mapped(src)) { 1098 /* Establish migration ptes */ 1099 VM_BUG_ON_FOLIO(folio_test_anon(src) && 1100 !folio_test_ksm(src) && !anon_vma, src); 1101 try_to_migrate(src, 0); 1102 page_was_mapped = true; 1103 } 1104 1105 if (!folio_mapped(src)) 1106 rc = move_to_new_folio(dst, src, mode); 1107 1108 /* 1109 * When successful, push dst to LRU immediately: so that if it 1110 * turns out to be an mlocked page, remove_migration_ptes() will 1111 * automatically build up the correct dst->mlock_count for it. 1112 * 1113 * We would like to do something similar for the old page, when 1114 * unsuccessful, and other cases when a page has been temporarily 1115 * isolated from the unevictable LRU: but this case is the easiest. 1116 */ 1117 if (rc == MIGRATEPAGE_SUCCESS) { 1118 folio_add_lru(dst); 1119 if (page_was_mapped) 1120 lru_add_drain(); 1121 } 1122 1123 if (page_was_mapped) 1124 remove_migration_ptes(src, 1125 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1126 1127 out_unlock_both: 1128 folio_unlock(dst); 1129 out_unlock: 1130 /* Drop an anon_vma reference if we took one */ 1131 if (anon_vma) 1132 put_anon_vma(anon_vma); 1133 folio_unlock(src); 1134 out: 1135 /* 1136 * If migration is successful, decrease refcount of dst, 1137 * which will not free the page because new page owner increased 1138 * refcounter. 1139 */ 1140 if (rc == MIGRATEPAGE_SUCCESS) 1141 folio_put(dst); 1142 1143 return rc; 1144 } 1145 1146 /* 1147 * Obtain the lock on page, remove all ptes and migrate the page 1148 * to the newly allocated page in newpage. 1149 */ 1150 static int unmap_and_move(new_page_t get_new_page, 1151 free_page_t put_new_page, 1152 unsigned long private, struct page *page, 1153 int force, enum migrate_mode mode, 1154 enum migrate_reason reason, 1155 struct list_head *ret) 1156 { 1157 struct folio *dst, *src = page_folio(page); 1158 int rc = MIGRATEPAGE_SUCCESS; 1159 struct page *newpage = NULL; 1160 1161 if (!thp_migration_supported() && PageTransHuge(page)) 1162 return -ENOSYS; 1163 1164 if (page_count(page) == 1) { 1165 /* Page was freed from under us. So we are done. */ 1166 ClearPageActive(page); 1167 ClearPageUnevictable(page); 1168 /* free_pages_prepare() will clear PG_isolated. */ 1169 goto out; 1170 } 1171 1172 newpage = get_new_page(page, private); 1173 if (!newpage) 1174 return -ENOMEM; 1175 dst = page_folio(newpage); 1176 1177 newpage->private = 0; 1178 rc = __unmap_and_move(src, dst, force, mode); 1179 if (rc == MIGRATEPAGE_SUCCESS) 1180 set_page_owner_migrate_reason(newpage, reason); 1181 1182 out: 1183 if (rc != -EAGAIN) { 1184 /* 1185 * A page that has been migrated has all references 1186 * removed and will be freed. A page that has not been 1187 * migrated will have kept its references and be restored. 1188 */ 1189 list_del(&page->lru); 1190 } 1191 1192 /* 1193 * If migration is successful, releases reference grabbed during 1194 * isolation. Otherwise, restore the page to right list unless 1195 * we want to retry. 1196 */ 1197 if (rc == MIGRATEPAGE_SUCCESS) { 1198 /* 1199 * Compaction can migrate also non-LRU pages which are 1200 * not accounted to NR_ISOLATED_*. They can be recognized 1201 * as __PageMovable 1202 */ 1203 if (likely(!__PageMovable(page))) 1204 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1205 page_is_file_lru(page), -thp_nr_pages(page)); 1206 1207 if (reason != MR_MEMORY_FAILURE) 1208 /* 1209 * We release the page in page_handle_poison. 1210 */ 1211 put_page(page); 1212 } else { 1213 if (rc != -EAGAIN) 1214 list_add_tail(&page->lru, ret); 1215 1216 if (put_new_page) 1217 put_new_page(newpage, private); 1218 else 1219 put_page(newpage); 1220 } 1221 1222 return rc; 1223 } 1224 1225 /* 1226 * Counterpart of unmap_and_move_page() for hugepage migration. 1227 * 1228 * This function doesn't wait the completion of hugepage I/O 1229 * because there is no race between I/O and migration for hugepage. 1230 * Note that currently hugepage I/O occurs only in direct I/O 1231 * where no lock is held and PG_writeback is irrelevant, 1232 * and writeback status of all subpages are counted in the reference 1233 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1234 * under direct I/O, the reference of the head page is 512 and a bit more.) 1235 * This means that when we try to migrate hugepage whose subpages are 1236 * doing direct I/O, some references remain after try_to_unmap() and 1237 * hugepage migration fails without data corruption. 1238 * 1239 * There is also no race when direct I/O is issued on the page under migration, 1240 * because then pte is replaced with migration swap entry and direct I/O code 1241 * will wait in the page fault for migration to complete. 1242 */ 1243 static int unmap_and_move_huge_page(new_page_t get_new_page, 1244 free_page_t put_new_page, unsigned long private, 1245 struct page *hpage, int force, 1246 enum migrate_mode mode, int reason, 1247 struct list_head *ret) 1248 { 1249 struct folio *dst, *src = page_folio(hpage); 1250 int rc = -EAGAIN; 1251 int page_was_mapped = 0; 1252 struct page *new_hpage; 1253 struct anon_vma *anon_vma = NULL; 1254 struct address_space *mapping = NULL; 1255 1256 /* 1257 * Migratability of hugepages depends on architectures and their size. 1258 * This check is necessary because some callers of hugepage migration 1259 * like soft offline and memory hotremove don't walk through page 1260 * tables or check whether the hugepage is pmd-based or not before 1261 * kicking migration. 1262 */ 1263 if (!hugepage_migration_supported(page_hstate(hpage))) 1264 return -ENOSYS; 1265 1266 if (folio_ref_count(src) == 1) { 1267 /* page was freed from under us. So we are done. */ 1268 putback_active_hugepage(hpage); 1269 return MIGRATEPAGE_SUCCESS; 1270 } 1271 1272 new_hpage = get_new_page(hpage, private); 1273 if (!new_hpage) 1274 return -ENOMEM; 1275 dst = page_folio(new_hpage); 1276 1277 if (!folio_trylock(src)) { 1278 if (!force) 1279 goto out; 1280 switch (mode) { 1281 case MIGRATE_SYNC: 1282 case MIGRATE_SYNC_NO_COPY: 1283 break; 1284 default: 1285 goto out; 1286 } 1287 folio_lock(src); 1288 } 1289 1290 /* 1291 * Check for pages which are in the process of being freed. Without 1292 * folio_mapping() set, hugetlbfs specific move page routine will not 1293 * be called and we could leak usage counts for subpools. 1294 */ 1295 if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) { 1296 rc = -EBUSY; 1297 goto out_unlock; 1298 } 1299 1300 if (folio_test_anon(src)) 1301 anon_vma = folio_get_anon_vma(src); 1302 1303 if (unlikely(!folio_trylock(dst))) 1304 goto put_anon; 1305 1306 if (folio_mapped(src)) { 1307 enum ttu_flags ttu = 0; 1308 1309 if (!folio_test_anon(src)) { 1310 /* 1311 * In shared mappings, try_to_unmap could potentially 1312 * call huge_pmd_unshare. Because of this, take 1313 * semaphore in write mode here and set TTU_RMAP_LOCKED 1314 * to let lower levels know we have taken the lock. 1315 */ 1316 mapping = hugetlb_page_mapping_lock_write(hpage); 1317 if (unlikely(!mapping)) 1318 goto unlock_put_anon; 1319 1320 ttu = TTU_RMAP_LOCKED; 1321 } 1322 1323 try_to_migrate(src, ttu); 1324 page_was_mapped = 1; 1325 1326 if (ttu & TTU_RMAP_LOCKED) 1327 i_mmap_unlock_write(mapping); 1328 } 1329 1330 if (!folio_mapped(src)) 1331 rc = move_to_new_folio(dst, src, mode); 1332 1333 if (page_was_mapped) 1334 remove_migration_ptes(src, 1335 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1336 1337 unlock_put_anon: 1338 folio_unlock(dst); 1339 1340 put_anon: 1341 if (anon_vma) 1342 put_anon_vma(anon_vma); 1343 1344 if (rc == MIGRATEPAGE_SUCCESS) { 1345 move_hugetlb_state(hpage, new_hpage, reason); 1346 put_new_page = NULL; 1347 } 1348 1349 out_unlock: 1350 folio_unlock(src); 1351 out: 1352 if (rc == MIGRATEPAGE_SUCCESS) 1353 putback_active_hugepage(hpage); 1354 else if (rc != -EAGAIN) 1355 list_move_tail(&src->lru, ret); 1356 1357 /* 1358 * If migration was not successful and there's a freeing callback, use 1359 * it. Otherwise, put_page() will drop the reference grabbed during 1360 * isolation. 1361 */ 1362 if (put_new_page) 1363 put_new_page(new_hpage, private); 1364 else 1365 putback_active_hugepage(new_hpage); 1366 1367 return rc; 1368 } 1369 1370 static inline int try_split_thp(struct page *page, struct list_head *split_pages) 1371 { 1372 int rc; 1373 1374 lock_page(page); 1375 rc = split_huge_page_to_list(page, split_pages); 1376 unlock_page(page); 1377 if (!rc) 1378 list_move_tail(&page->lru, split_pages); 1379 1380 return rc; 1381 } 1382 1383 /* 1384 * migrate_pages - migrate the pages specified in a list, to the free pages 1385 * supplied as the target for the page migration 1386 * 1387 * @from: The list of pages to be migrated. 1388 * @get_new_page: The function used to allocate free pages to be used 1389 * as the target of the page migration. 1390 * @put_new_page: The function used to free target pages if migration 1391 * fails, or NULL if no special handling is necessary. 1392 * @private: Private data to be passed on to get_new_page() 1393 * @mode: The migration mode that specifies the constraints for 1394 * page migration, if any. 1395 * @reason: The reason for page migration. 1396 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1397 * the caller passes a non-NULL pointer. 1398 * 1399 * The function returns after 10 attempts or if no pages are movable any more 1400 * because the list has become empty or no retryable pages exist any more. 1401 * It is caller's responsibility to call putback_movable_pages() to return pages 1402 * to the LRU or free list only if ret != 0. 1403 * 1404 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1405 * an error code. The number of THP splits will be considered as the number of 1406 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1407 */ 1408 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1409 free_page_t put_new_page, unsigned long private, 1410 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1411 { 1412 int retry = 1; 1413 int thp_retry = 1; 1414 int nr_failed = 0; 1415 int nr_failed_pages = 0; 1416 int nr_retry_pages = 0; 1417 int nr_succeeded = 0; 1418 int nr_thp_succeeded = 0; 1419 int nr_thp_failed = 0; 1420 int nr_thp_split = 0; 1421 int pass = 0; 1422 bool is_thp = false; 1423 struct page *page; 1424 struct page *page2; 1425 int rc, nr_subpages; 1426 LIST_HEAD(ret_pages); 1427 LIST_HEAD(thp_split_pages); 1428 bool nosplit = (reason == MR_NUMA_MISPLACED); 1429 bool no_subpage_counting = false; 1430 1431 trace_mm_migrate_pages_start(mode, reason); 1432 1433 thp_subpage_migration: 1434 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1435 retry = 0; 1436 thp_retry = 0; 1437 nr_retry_pages = 0; 1438 1439 list_for_each_entry_safe(page, page2, from, lru) { 1440 /* 1441 * THP statistics is based on the source huge page. 1442 * Capture required information that might get lost 1443 * during migration. 1444 */ 1445 is_thp = PageTransHuge(page) && !PageHuge(page); 1446 nr_subpages = compound_nr(page); 1447 cond_resched(); 1448 1449 if (PageHuge(page)) 1450 rc = unmap_and_move_huge_page(get_new_page, 1451 put_new_page, private, page, 1452 pass > 2, mode, reason, 1453 &ret_pages); 1454 else 1455 rc = unmap_and_move(get_new_page, put_new_page, 1456 private, page, pass > 2, mode, 1457 reason, &ret_pages); 1458 /* 1459 * The rules are: 1460 * Success: non hugetlb page will be freed, hugetlb 1461 * page will be put back 1462 * -EAGAIN: stay on the from list 1463 * -ENOMEM: stay on the from list 1464 * -ENOSYS: stay on the from list 1465 * Other errno: put on ret_pages list then splice to 1466 * from list 1467 */ 1468 switch(rc) { 1469 /* 1470 * THP migration might be unsupported or the 1471 * allocation could've failed so we should 1472 * retry on the same page with the THP split 1473 * to base pages. 1474 * 1475 * Sub-pages are put in thp_split_pages, and 1476 * we will migrate them after the rest of the 1477 * list is processed. 1478 */ 1479 case -ENOSYS: 1480 /* THP migration is unsupported */ 1481 if (is_thp) { 1482 nr_thp_failed++; 1483 if (!try_split_thp(page, &thp_split_pages)) { 1484 nr_thp_split++; 1485 break; 1486 } 1487 /* Hugetlb migration is unsupported */ 1488 } else if (!no_subpage_counting) { 1489 nr_failed++; 1490 } 1491 1492 nr_failed_pages += nr_subpages; 1493 list_move_tail(&page->lru, &ret_pages); 1494 break; 1495 case -ENOMEM: 1496 /* 1497 * When memory is low, don't bother to try to migrate 1498 * other pages, just exit. 1499 */ 1500 if (is_thp) { 1501 nr_thp_failed++; 1502 /* THP NUMA faulting doesn't split THP to retry. */ 1503 if (!nosplit && !try_split_thp(page, &thp_split_pages)) { 1504 nr_thp_split++; 1505 break; 1506 } 1507 } else if (!no_subpage_counting) { 1508 nr_failed++; 1509 } 1510 1511 nr_failed_pages += nr_subpages + nr_retry_pages; 1512 /* 1513 * There might be some subpages of fail-to-migrate THPs 1514 * left in thp_split_pages list. Move them back to migration 1515 * list so that they could be put back to the right list by 1516 * the caller otherwise the page refcnt will be leaked. 1517 */ 1518 list_splice_init(&thp_split_pages, from); 1519 /* nr_failed isn't updated for not used */ 1520 nr_thp_failed += thp_retry; 1521 goto out; 1522 case -EAGAIN: 1523 if (is_thp) 1524 thp_retry++; 1525 else if (!no_subpage_counting) 1526 retry++; 1527 nr_retry_pages += nr_subpages; 1528 break; 1529 case MIGRATEPAGE_SUCCESS: 1530 nr_succeeded += nr_subpages; 1531 if (is_thp) 1532 nr_thp_succeeded++; 1533 break; 1534 default: 1535 /* 1536 * Permanent failure (-EBUSY, etc.): 1537 * unlike -EAGAIN case, the failed page is 1538 * removed from migration page list and not 1539 * retried in the next outer loop. 1540 */ 1541 if (is_thp) 1542 nr_thp_failed++; 1543 else if (!no_subpage_counting) 1544 nr_failed++; 1545 1546 nr_failed_pages += nr_subpages; 1547 break; 1548 } 1549 } 1550 } 1551 nr_failed += retry; 1552 nr_thp_failed += thp_retry; 1553 nr_failed_pages += nr_retry_pages; 1554 /* 1555 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1556 * counting in this round, since all subpages of a THP is counted 1557 * as 1 failure in the first round. 1558 */ 1559 if (!list_empty(&thp_split_pages)) { 1560 /* 1561 * Move non-migrated pages (after 10 retries) to ret_pages 1562 * to avoid migrating them again. 1563 */ 1564 list_splice_init(from, &ret_pages); 1565 list_splice_init(&thp_split_pages, from); 1566 no_subpage_counting = true; 1567 retry = 1; 1568 goto thp_subpage_migration; 1569 } 1570 1571 rc = nr_failed + nr_thp_failed; 1572 out: 1573 /* 1574 * Put the permanent failure page back to migration list, they 1575 * will be put back to the right list by the caller. 1576 */ 1577 list_splice(&ret_pages, from); 1578 1579 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1580 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1581 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1582 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1583 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1584 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1585 nr_thp_failed, nr_thp_split, mode, reason); 1586 1587 if (ret_succeeded) 1588 *ret_succeeded = nr_succeeded; 1589 1590 return rc; 1591 } 1592 1593 struct page *alloc_migration_target(struct page *page, unsigned long private) 1594 { 1595 struct folio *folio = page_folio(page); 1596 struct migration_target_control *mtc; 1597 gfp_t gfp_mask; 1598 unsigned int order = 0; 1599 struct folio *new_folio = NULL; 1600 int nid; 1601 int zidx; 1602 1603 mtc = (struct migration_target_control *)private; 1604 gfp_mask = mtc->gfp_mask; 1605 nid = mtc->nid; 1606 if (nid == NUMA_NO_NODE) 1607 nid = folio_nid(folio); 1608 1609 if (folio_test_hugetlb(folio)) { 1610 struct hstate *h = page_hstate(&folio->page); 1611 1612 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1613 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1614 } 1615 1616 if (folio_test_large(folio)) { 1617 /* 1618 * clear __GFP_RECLAIM to make the migration callback 1619 * consistent with regular THP allocations. 1620 */ 1621 gfp_mask &= ~__GFP_RECLAIM; 1622 gfp_mask |= GFP_TRANSHUGE; 1623 order = folio_order(folio); 1624 } 1625 zidx = zone_idx(folio_zone(folio)); 1626 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1627 gfp_mask |= __GFP_HIGHMEM; 1628 1629 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 1630 1631 return &new_folio->page; 1632 } 1633 1634 #ifdef CONFIG_NUMA 1635 1636 static int store_status(int __user *status, int start, int value, int nr) 1637 { 1638 while (nr-- > 0) { 1639 if (put_user(value, status + start)) 1640 return -EFAULT; 1641 start++; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static int do_move_pages_to_node(struct mm_struct *mm, 1648 struct list_head *pagelist, int node) 1649 { 1650 int err; 1651 struct migration_target_control mtc = { 1652 .nid = node, 1653 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1654 }; 1655 1656 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1657 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1658 if (err) 1659 putback_movable_pages(pagelist); 1660 return err; 1661 } 1662 1663 /* 1664 * Resolves the given address to a struct page, isolates it from the LRU and 1665 * puts it to the given pagelist. 1666 * Returns: 1667 * errno - if the page cannot be found/isolated 1668 * 0 - when it doesn't have to be migrated because it is already on the 1669 * target node 1670 * 1 - when it has been queued 1671 */ 1672 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1673 int node, struct list_head *pagelist, bool migrate_all) 1674 { 1675 struct vm_area_struct *vma; 1676 struct page *page; 1677 int err; 1678 1679 mmap_read_lock(mm); 1680 err = -EFAULT; 1681 vma = vma_lookup(mm, addr); 1682 if (!vma || !vma_migratable(vma)) 1683 goto out; 1684 1685 /* FOLL_DUMP to ignore special (like zero) pages */ 1686 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1687 1688 err = PTR_ERR(page); 1689 if (IS_ERR(page)) 1690 goto out; 1691 1692 err = -ENOENT; 1693 if (!page) 1694 goto out; 1695 1696 if (is_zone_device_page(page)) 1697 goto out_putpage; 1698 1699 err = 0; 1700 if (page_to_nid(page) == node) 1701 goto out_putpage; 1702 1703 err = -EACCES; 1704 if (page_mapcount(page) > 1 && !migrate_all) 1705 goto out_putpage; 1706 1707 if (PageHuge(page)) { 1708 if (PageHead(page)) { 1709 err = isolate_hugetlb(page, pagelist); 1710 if (!err) 1711 err = 1; 1712 } 1713 } else { 1714 struct page *head; 1715 1716 head = compound_head(page); 1717 err = isolate_lru_page(head); 1718 if (err) 1719 goto out_putpage; 1720 1721 err = 1; 1722 list_add_tail(&head->lru, pagelist); 1723 mod_node_page_state(page_pgdat(head), 1724 NR_ISOLATED_ANON + page_is_file_lru(head), 1725 thp_nr_pages(head)); 1726 } 1727 out_putpage: 1728 /* 1729 * Either remove the duplicate refcount from 1730 * isolate_lru_page() or drop the page ref if it was 1731 * not isolated. 1732 */ 1733 put_page(page); 1734 out: 1735 mmap_read_unlock(mm); 1736 return err; 1737 } 1738 1739 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1740 struct list_head *pagelist, int __user *status, 1741 int start, int i, unsigned long nr_pages) 1742 { 1743 int err; 1744 1745 if (list_empty(pagelist)) 1746 return 0; 1747 1748 err = do_move_pages_to_node(mm, pagelist, node); 1749 if (err) { 1750 /* 1751 * Positive err means the number of failed 1752 * pages to migrate. Since we are going to 1753 * abort and return the number of non-migrated 1754 * pages, so need to include the rest of the 1755 * nr_pages that have not been attempted as 1756 * well. 1757 */ 1758 if (err > 0) 1759 err += nr_pages - i; 1760 return err; 1761 } 1762 return store_status(status, start, node, i - start); 1763 } 1764 1765 /* 1766 * Migrate an array of page address onto an array of nodes and fill 1767 * the corresponding array of status. 1768 */ 1769 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1770 unsigned long nr_pages, 1771 const void __user * __user *pages, 1772 const int __user *nodes, 1773 int __user *status, int flags) 1774 { 1775 int current_node = NUMA_NO_NODE; 1776 LIST_HEAD(pagelist); 1777 int start, i; 1778 int err = 0, err1; 1779 1780 lru_cache_disable(); 1781 1782 for (i = start = 0; i < nr_pages; i++) { 1783 const void __user *p; 1784 unsigned long addr; 1785 int node; 1786 1787 err = -EFAULT; 1788 if (get_user(p, pages + i)) 1789 goto out_flush; 1790 if (get_user(node, nodes + i)) 1791 goto out_flush; 1792 addr = (unsigned long)untagged_addr(p); 1793 1794 err = -ENODEV; 1795 if (node < 0 || node >= MAX_NUMNODES) 1796 goto out_flush; 1797 if (!node_state(node, N_MEMORY)) 1798 goto out_flush; 1799 1800 err = -EACCES; 1801 if (!node_isset(node, task_nodes)) 1802 goto out_flush; 1803 1804 if (current_node == NUMA_NO_NODE) { 1805 current_node = node; 1806 start = i; 1807 } else if (node != current_node) { 1808 err = move_pages_and_store_status(mm, current_node, 1809 &pagelist, status, start, i, nr_pages); 1810 if (err) 1811 goto out; 1812 start = i; 1813 current_node = node; 1814 } 1815 1816 /* 1817 * Errors in the page lookup or isolation are not fatal and we simply 1818 * report them via status 1819 */ 1820 err = add_page_for_migration(mm, addr, current_node, 1821 &pagelist, flags & MPOL_MF_MOVE_ALL); 1822 1823 if (err > 0) { 1824 /* The page is successfully queued for migration */ 1825 continue; 1826 } 1827 1828 /* 1829 * The move_pages() man page does not have an -EEXIST choice, so 1830 * use -EFAULT instead. 1831 */ 1832 if (err == -EEXIST) 1833 err = -EFAULT; 1834 1835 /* 1836 * If the page is already on the target node (!err), store the 1837 * node, otherwise, store the err. 1838 */ 1839 err = store_status(status, i, err ? : current_node, 1); 1840 if (err) 1841 goto out_flush; 1842 1843 err = move_pages_and_store_status(mm, current_node, &pagelist, 1844 status, start, i, nr_pages); 1845 if (err) { 1846 /* We have accounted for page i */ 1847 if (err > 0) 1848 err--; 1849 goto out; 1850 } 1851 current_node = NUMA_NO_NODE; 1852 } 1853 out_flush: 1854 /* Make sure we do not overwrite the existing error */ 1855 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1856 status, start, i, nr_pages); 1857 if (err >= 0) 1858 err = err1; 1859 out: 1860 lru_cache_enable(); 1861 return err; 1862 } 1863 1864 /* 1865 * Determine the nodes of an array of pages and store it in an array of status. 1866 */ 1867 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1868 const void __user **pages, int *status) 1869 { 1870 unsigned long i; 1871 1872 mmap_read_lock(mm); 1873 1874 for (i = 0; i < nr_pages; i++) { 1875 unsigned long addr = (unsigned long)(*pages); 1876 unsigned int foll_flags = FOLL_DUMP; 1877 struct vm_area_struct *vma; 1878 struct page *page; 1879 int err = -EFAULT; 1880 1881 vma = vma_lookup(mm, addr); 1882 if (!vma) 1883 goto set_status; 1884 1885 /* Not all huge page follow APIs support 'FOLL_GET' */ 1886 if (!is_vm_hugetlb_page(vma)) 1887 foll_flags |= FOLL_GET; 1888 1889 /* FOLL_DUMP to ignore special (like zero) pages */ 1890 page = follow_page(vma, addr, foll_flags); 1891 1892 err = PTR_ERR(page); 1893 if (IS_ERR(page)) 1894 goto set_status; 1895 1896 err = -ENOENT; 1897 if (!page) 1898 goto set_status; 1899 1900 if (!is_zone_device_page(page)) 1901 err = page_to_nid(page); 1902 1903 if (foll_flags & FOLL_GET) 1904 put_page(page); 1905 set_status: 1906 *status = err; 1907 1908 pages++; 1909 status++; 1910 } 1911 1912 mmap_read_unlock(mm); 1913 } 1914 1915 static int get_compat_pages_array(const void __user *chunk_pages[], 1916 const void __user * __user *pages, 1917 unsigned long chunk_nr) 1918 { 1919 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1920 compat_uptr_t p; 1921 int i; 1922 1923 for (i = 0; i < chunk_nr; i++) { 1924 if (get_user(p, pages32 + i)) 1925 return -EFAULT; 1926 chunk_pages[i] = compat_ptr(p); 1927 } 1928 1929 return 0; 1930 } 1931 1932 /* 1933 * Determine the nodes of a user array of pages and store it in 1934 * a user array of status. 1935 */ 1936 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1937 const void __user * __user *pages, 1938 int __user *status) 1939 { 1940 #define DO_PAGES_STAT_CHUNK_NR 16UL 1941 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1942 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1943 1944 while (nr_pages) { 1945 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 1946 1947 if (in_compat_syscall()) { 1948 if (get_compat_pages_array(chunk_pages, pages, 1949 chunk_nr)) 1950 break; 1951 } else { 1952 if (copy_from_user(chunk_pages, pages, 1953 chunk_nr * sizeof(*chunk_pages))) 1954 break; 1955 } 1956 1957 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1958 1959 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1960 break; 1961 1962 pages += chunk_nr; 1963 status += chunk_nr; 1964 nr_pages -= chunk_nr; 1965 } 1966 return nr_pages ? -EFAULT : 0; 1967 } 1968 1969 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1970 { 1971 struct task_struct *task; 1972 struct mm_struct *mm; 1973 1974 /* 1975 * There is no need to check if current process has the right to modify 1976 * the specified process when they are same. 1977 */ 1978 if (!pid) { 1979 mmget(current->mm); 1980 *mem_nodes = cpuset_mems_allowed(current); 1981 return current->mm; 1982 } 1983 1984 /* Find the mm_struct */ 1985 rcu_read_lock(); 1986 task = find_task_by_vpid(pid); 1987 if (!task) { 1988 rcu_read_unlock(); 1989 return ERR_PTR(-ESRCH); 1990 } 1991 get_task_struct(task); 1992 1993 /* 1994 * Check if this process has the right to modify the specified 1995 * process. Use the regular "ptrace_may_access()" checks. 1996 */ 1997 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1998 rcu_read_unlock(); 1999 mm = ERR_PTR(-EPERM); 2000 goto out; 2001 } 2002 rcu_read_unlock(); 2003 2004 mm = ERR_PTR(security_task_movememory(task)); 2005 if (IS_ERR(mm)) 2006 goto out; 2007 *mem_nodes = cpuset_mems_allowed(task); 2008 mm = get_task_mm(task); 2009 out: 2010 put_task_struct(task); 2011 if (!mm) 2012 mm = ERR_PTR(-EINVAL); 2013 return mm; 2014 } 2015 2016 /* 2017 * Move a list of pages in the address space of the currently executing 2018 * process. 2019 */ 2020 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 2021 const void __user * __user *pages, 2022 const int __user *nodes, 2023 int __user *status, int flags) 2024 { 2025 struct mm_struct *mm; 2026 int err; 2027 nodemask_t task_nodes; 2028 2029 /* Check flags */ 2030 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2031 return -EINVAL; 2032 2033 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2034 return -EPERM; 2035 2036 mm = find_mm_struct(pid, &task_nodes); 2037 if (IS_ERR(mm)) 2038 return PTR_ERR(mm); 2039 2040 if (nodes) 2041 err = do_pages_move(mm, task_nodes, nr_pages, pages, 2042 nodes, status, flags); 2043 else 2044 err = do_pages_stat(mm, nr_pages, pages, status); 2045 2046 mmput(mm); 2047 return err; 2048 } 2049 2050 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2051 const void __user * __user *, pages, 2052 const int __user *, nodes, 2053 int __user *, status, int, flags) 2054 { 2055 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2056 } 2057 2058 #ifdef CONFIG_NUMA_BALANCING 2059 /* 2060 * Returns true if this is a safe migration target node for misplaced NUMA 2061 * pages. Currently it only checks the watermarks which is crude. 2062 */ 2063 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2064 unsigned long nr_migrate_pages) 2065 { 2066 int z; 2067 2068 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2069 struct zone *zone = pgdat->node_zones + z; 2070 2071 if (!managed_zone(zone)) 2072 continue; 2073 2074 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2075 if (!zone_watermark_ok(zone, 0, 2076 high_wmark_pages(zone) + 2077 nr_migrate_pages, 2078 ZONE_MOVABLE, 0)) 2079 continue; 2080 return true; 2081 } 2082 return false; 2083 } 2084 2085 static struct page *alloc_misplaced_dst_page(struct page *page, 2086 unsigned long data) 2087 { 2088 int nid = (int) data; 2089 int order = compound_order(page); 2090 gfp_t gfp = __GFP_THISNODE; 2091 struct folio *new; 2092 2093 if (order > 0) 2094 gfp |= GFP_TRANSHUGE_LIGHT; 2095 else { 2096 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2097 __GFP_NOWARN; 2098 gfp &= ~__GFP_RECLAIM; 2099 } 2100 new = __folio_alloc_node(gfp, order, nid); 2101 2102 return &new->page; 2103 } 2104 2105 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2106 { 2107 int nr_pages = thp_nr_pages(page); 2108 int order = compound_order(page); 2109 2110 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2111 2112 /* Do not migrate THP mapped by multiple processes */ 2113 if (PageTransHuge(page) && total_mapcount(page) > 1) 2114 return 0; 2115 2116 /* Avoid migrating to a node that is nearly full */ 2117 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2118 int z; 2119 2120 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2121 return 0; 2122 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2123 if (managed_zone(pgdat->node_zones + z)) 2124 break; 2125 } 2126 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2127 return 0; 2128 } 2129 2130 if (isolate_lru_page(page)) 2131 return 0; 2132 2133 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 2134 nr_pages); 2135 2136 /* 2137 * Isolating the page has taken another reference, so the 2138 * caller's reference can be safely dropped without the page 2139 * disappearing underneath us during migration. 2140 */ 2141 put_page(page); 2142 return 1; 2143 } 2144 2145 /* 2146 * Attempt to migrate a misplaced page to the specified destination 2147 * node. Caller is expected to have an elevated reference count on 2148 * the page that will be dropped by this function before returning. 2149 */ 2150 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2151 int node) 2152 { 2153 pg_data_t *pgdat = NODE_DATA(node); 2154 int isolated; 2155 int nr_remaining; 2156 unsigned int nr_succeeded; 2157 LIST_HEAD(migratepages); 2158 int nr_pages = thp_nr_pages(page); 2159 2160 /* 2161 * Don't migrate file pages that are mapped in multiple processes 2162 * with execute permissions as they are probably shared libraries. 2163 */ 2164 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2165 (vma->vm_flags & VM_EXEC)) 2166 goto out; 2167 2168 /* 2169 * Also do not migrate dirty pages as not all filesystems can move 2170 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2171 */ 2172 if (page_is_file_lru(page) && PageDirty(page)) 2173 goto out; 2174 2175 isolated = numamigrate_isolate_page(pgdat, page); 2176 if (!isolated) 2177 goto out; 2178 2179 list_add(&page->lru, &migratepages); 2180 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2181 NULL, node, MIGRATE_ASYNC, 2182 MR_NUMA_MISPLACED, &nr_succeeded); 2183 if (nr_remaining) { 2184 if (!list_empty(&migratepages)) { 2185 list_del(&page->lru); 2186 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2187 page_is_file_lru(page), -nr_pages); 2188 putback_lru_page(page); 2189 } 2190 isolated = 0; 2191 } 2192 if (nr_succeeded) { 2193 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2194 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2195 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2196 nr_succeeded); 2197 } 2198 BUG_ON(!list_empty(&migratepages)); 2199 return isolated; 2200 2201 out: 2202 put_page(page); 2203 return 0; 2204 } 2205 #endif /* CONFIG_NUMA_BALANCING */ 2206 #endif /* CONFIG_NUMA */ 2207