1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/topology.h> 27 #include <linux/cpu.h> 28 #include <linux/cpuset.h> 29 #include <linux/writeback.h> 30 #include <linux/mempolicy.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/backing-dev.h> 34 #include <linux/compaction.h> 35 #include <linux/syscalls.h> 36 #include <linux/compat.h> 37 #include <linux/hugetlb.h> 38 #include <linux/hugetlb_cgroup.h> 39 #include <linux/gfp.h> 40 #include <linux/pfn_t.h> 41 #include <linux/memremap.h> 42 #include <linux/userfaultfd_k.h> 43 #include <linux/balloon_compaction.h> 44 #include <linux/page_idle.h> 45 #include <linux/page_owner.h> 46 #include <linux/sched/mm.h> 47 #include <linux/ptrace.h> 48 #include <linux/oom.h> 49 #include <linux/memory.h> 50 #include <linux/random.h> 51 #include <linux/sched/sysctl.h> 52 #include <linux/memory-tiers.h> 53 54 #include <asm/tlbflush.h> 55 56 #include <trace/events/migrate.h> 57 58 #include "internal.h" 59 60 bool isolate_movable_page(struct page *page, isolate_mode_t mode) 61 { 62 struct folio *folio = folio_get_nontail_page(page); 63 const struct movable_operations *mops; 64 65 /* 66 * Avoid burning cycles with pages that are yet under __free_pages(), 67 * or just got freed under us. 68 * 69 * In case we 'win' a race for a movable page being freed under us and 70 * raise its refcount preventing __free_pages() from doing its job 71 * the put_page() at the end of this block will take care of 72 * release this page, thus avoiding a nasty leakage. 73 */ 74 if (!folio) 75 goto out; 76 77 if (unlikely(folio_test_slab(folio))) 78 goto out_putfolio; 79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ 80 smp_rmb(); 81 /* 82 * Check movable flag before taking the page lock because 83 * we use non-atomic bitops on newly allocated page flags so 84 * unconditionally grabbing the lock ruins page's owner side. 85 */ 86 if (unlikely(!__folio_test_movable(folio))) 87 goto out_putfolio; 88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ 89 smp_rmb(); 90 if (unlikely(folio_test_slab(folio))) 91 goto out_putfolio; 92 93 /* 94 * As movable pages are not isolated from LRU lists, concurrent 95 * compaction threads can race against page migration functions 96 * as well as race against the releasing a page. 97 * 98 * In order to avoid having an already isolated movable page 99 * being (wrongly) re-isolated while it is under migration, 100 * or to avoid attempting to isolate pages being released, 101 * lets be sure we have the page lock 102 * before proceeding with the movable page isolation steps. 103 */ 104 if (unlikely(!folio_trylock(folio))) 105 goto out_putfolio; 106 107 if (!folio_test_movable(folio) || folio_test_isolated(folio)) 108 goto out_no_isolated; 109 110 mops = folio_movable_ops(folio); 111 VM_BUG_ON_FOLIO(!mops, folio); 112 113 if (!mops->isolate_page(&folio->page, mode)) 114 goto out_no_isolated; 115 116 /* Driver shouldn't use the isolated flag */ 117 WARN_ON_ONCE(folio_test_isolated(folio)); 118 folio_set_isolated(folio); 119 folio_unlock(folio); 120 121 return true; 122 123 out_no_isolated: 124 folio_unlock(folio); 125 out_putfolio: 126 folio_put(folio); 127 out: 128 return false; 129 } 130 131 static void putback_movable_folio(struct folio *folio) 132 { 133 const struct movable_operations *mops = folio_movable_ops(folio); 134 135 mops->putback_page(&folio->page); 136 folio_clear_isolated(folio); 137 } 138 139 /* 140 * Put previously isolated pages back onto the appropriate lists 141 * from where they were once taken off for compaction/migration. 142 * 143 * This function shall be used whenever the isolated pageset has been 144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 145 * and isolate_hugetlb(). 146 */ 147 void putback_movable_pages(struct list_head *l) 148 { 149 struct folio *folio; 150 struct folio *folio2; 151 152 list_for_each_entry_safe(folio, folio2, l, lru) { 153 if (unlikely(folio_test_hugetlb(folio))) { 154 folio_putback_active_hugetlb(folio); 155 continue; 156 } 157 list_del(&folio->lru); 158 /* 159 * We isolated non-lru movable folio so here we can use 160 * __folio_test_movable because LRU folio's mapping cannot 161 * have PAGE_MAPPING_MOVABLE. 162 */ 163 if (unlikely(__folio_test_movable(folio))) { 164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); 165 folio_lock(folio); 166 if (folio_test_movable(folio)) 167 putback_movable_folio(folio); 168 else 169 folio_clear_isolated(folio); 170 folio_unlock(folio); 171 folio_put(folio); 172 } else { 173 node_stat_mod_folio(folio, NR_ISOLATED_ANON + 174 folio_is_file_lru(folio), -folio_nr_pages(folio)); 175 folio_putback_lru(folio); 176 } 177 } 178 } 179 180 /* 181 * Restore a potential migration pte to a working pte entry 182 */ 183 static bool remove_migration_pte(struct folio *folio, 184 struct vm_area_struct *vma, unsigned long addr, void *old) 185 { 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 187 188 while (page_vma_mapped_walk(&pvmw)) { 189 rmap_t rmap_flags = RMAP_NONE; 190 pte_t old_pte; 191 pte_t pte; 192 swp_entry_t entry; 193 struct page *new; 194 unsigned long idx = 0; 195 196 /* pgoff is invalid for ksm pages, but they are never large */ 197 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 199 new = folio_page(folio, idx); 200 201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 202 /* PMD-mapped THP migration entry */ 203 if (!pvmw.pte) { 204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 205 !folio_test_pmd_mappable(folio), folio); 206 remove_migration_pmd(&pvmw, new); 207 continue; 208 } 209 #endif 210 211 folio_get(folio); 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 213 old_pte = ptep_get(pvmw.pte); 214 215 entry = pte_to_swp_entry(old_pte); 216 if (!is_migration_entry_young(entry)) 217 pte = pte_mkold(pte); 218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 219 pte = pte_mkdirty(pte); 220 if (pte_swp_soft_dirty(old_pte)) 221 pte = pte_mksoft_dirty(pte); 222 else 223 pte = pte_clear_soft_dirty(pte); 224 225 if (is_writable_migration_entry(entry)) 226 pte = pte_mkwrite(pte, vma); 227 else if (pte_swp_uffd_wp(old_pte)) 228 pte = pte_mkuffd_wp(pte); 229 230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 231 rmap_flags |= RMAP_EXCLUSIVE; 232 233 if (unlikely(is_device_private_page(new))) { 234 if (pte_write(pte)) 235 entry = make_writable_device_private_entry( 236 page_to_pfn(new)); 237 else 238 entry = make_readable_device_private_entry( 239 page_to_pfn(new)); 240 pte = swp_entry_to_pte(entry); 241 if (pte_swp_soft_dirty(old_pte)) 242 pte = pte_swp_mksoft_dirty(pte); 243 if (pte_swp_uffd_wp(old_pte)) 244 pte = pte_swp_mkuffd_wp(pte); 245 } 246 247 #ifdef CONFIG_HUGETLB_PAGE 248 if (folio_test_hugetlb(folio)) { 249 struct hstate *h = hstate_vma(vma); 250 unsigned int shift = huge_page_shift(h); 251 unsigned long psize = huge_page_size(h); 252 253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 254 if (folio_test_anon(folio)) 255 hugetlb_add_anon_rmap(folio, vma, pvmw.address, 256 rmap_flags); 257 else 258 hugetlb_add_file_rmap(folio); 259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, 260 psize); 261 } else 262 #endif 263 { 264 if (folio_test_anon(folio)) 265 folio_add_anon_rmap_pte(folio, new, vma, 266 pvmw.address, rmap_flags); 267 else 268 folio_add_file_rmap_pte(folio, new, vma); 269 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 270 } 271 if (vma->vm_flags & VM_LOCKED) 272 mlock_drain_local(); 273 274 trace_remove_migration_pte(pvmw.address, pte_val(pte), 275 compound_order(new)); 276 277 /* No need to invalidate - it was non-present before */ 278 update_mmu_cache(vma, pvmw.address, pvmw.pte); 279 } 280 281 return true; 282 } 283 284 /* 285 * Get rid of all migration entries and replace them by 286 * references to the indicated page. 287 */ 288 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 289 { 290 struct rmap_walk_control rwc = { 291 .rmap_one = remove_migration_pte, 292 .arg = src, 293 }; 294 295 if (locked) 296 rmap_walk_locked(dst, &rwc); 297 else 298 rmap_walk(dst, &rwc); 299 } 300 301 /* 302 * Something used the pte of a page under migration. We need to 303 * get to the page and wait until migration is finished. 304 * When we return from this function the fault will be retried. 305 */ 306 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 307 unsigned long address) 308 { 309 spinlock_t *ptl; 310 pte_t *ptep; 311 pte_t pte; 312 swp_entry_t entry; 313 314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 315 if (!ptep) 316 return; 317 318 pte = ptep_get(ptep); 319 pte_unmap(ptep); 320 321 if (!is_swap_pte(pte)) 322 goto out; 323 324 entry = pte_to_swp_entry(pte); 325 if (!is_migration_entry(entry)) 326 goto out; 327 328 migration_entry_wait_on_locked(entry, ptl); 329 return; 330 out: 331 spin_unlock(ptl); 332 } 333 334 #ifdef CONFIG_HUGETLB_PAGE 335 /* 336 * The vma read lock must be held upon entry. Holding that lock prevents either 337 * the pte or the ptl from being freed. 338 * 339 * This function will release the vma lock before returning. 340 */ 341 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) 342 { 343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); 344 pte_t pte; 345 346 hugetlb_vma_assert_locked(vma); 347 spin_lock(ptl); 348 pte = huge_ptep_get(ptep); 349 350 if (unlikely(!is_hugetlb_entry_migration(pte))) { 351 spin_unlock(ptl); 352 hugetlb_vma_unlock_read(vma); 353 } else { 354 /* 355 * If migration entry existed, safe to release vma lock 356 * here because the pgtable page won't be freed without the 357 * pgtable lock released. See comment right above pgtable 358 * lock release in migration_entry_wait_on_locked(). 359 */ 360 hugetlb_vma_unlock_read(vma); 361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); 362 } 363 } 364 #endif 365 366 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 367 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 368 { 369 spinlock_t *ptl; 370 371 ptl = pmd_lock(mm, pmd); 372 if (!is_pmd_migration_entry(*pmd)) 373 goto unlock; 374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); 375 return; 376 unlock: 377 spin_unlock(ptl); 378 } 379 #endif 380 381 static int folio_expected_refs(struct address_space *mapping, 382 struct folio *folio) 383 { 384 int refs = 1; 385 if (!mapping) 386 return refs; 387 388 refs += folio_nr_pages(folio); 389 if (folio_test_private(folio)) 390 refs++; 391 392 return refs; 393 } 394 395 /* 396 * Replace the page in the mapping. 397 * 398 * The number of remaining references must be: 399 * 1 for anonymous pages without a mapping 400 * 2 for pages with a mapping 401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 402 */ 403 int folio_migrate_mapping(struct address_space *mapping, 404 struct folio *newfolio, struct folio *folio, int extra_count) 405 { 406 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 407 struct zone *oldzone, *newzone; 408 int dirty; 409 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 410 long nr = folio_nr_pages(folio); 411 long entries, i; 412 413 if (!mapping) { 414 /* Anonymous page without mapping */ 415 if (folio_ref_count(folio) != expected_count) 416 return -EAGAIN; 417 418 /* Take off deferred split queue while frozen and memcg set */ 419 if (folio_test_large(folio) && 420 folio_test_large_rmappable(folio)) { 421 if (!folio_ref_freeze(folio, expected_count)) 422 return -EAGAIN; 423 folio_undo_large_rmappable(folio); 424 folio_ref_unfreeze(folio, expected_count); 425 } 426 427 /* No turning back from here */ 428 newfolio->index = folio->index; 429 newfolio->mapping = folio->mapping; 430 if (folio_test_swapbacked(folio)) 431 __folio_set_swapbacked(newfolio); 432 433 return MIGRATEPAGE_SUCCESS; 434 } 435 436 oldzone = folio_zone(folio); 437 newzone = folio_zone(newfolio); 438 439 xas_lock_irq(&xas); 440 if (!folio_ref_freeze(folio, expected_count)) { 441 xas_unlock_irq(&xas); 442 return -EAGAIN; 443 } 444 445 /* Take off deferred split queue while frozen and memcg set */ 446 if (folio_test_large(folio) && folio_test_large_rmappable(folio)) 447 folio_undo_large_rmappable(folio); 448 449 /* 450 * Now we know that no one else is looking at the folio: 451 * no turning back from here. 452 */ 453 newfolio->index = folio->index; 454 newfolio->mapping = folio->mapping; 455 folio_ref_add(newfolio, nr); /* add cache reference */ 456 if (folio_test_swapbacked(folio)) { 457 __folio_set_swapbacked(newfolio); 458 if (folio_test_swapcache(folio)) { 459 folio_set_swapcache(newfolio); 460 newfolio->private = folio_get_private(folio); 461 } 462 entries = nr; 463 } else { 464 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 465 entries = 1; 466 } 467 468 /* Move dirty while page refs frozen and newpage not yet exposed */ 469 dirty = folio_test_dirty(folio); 470 if (dirty) { 471 folio_clear_dirty(folio); 472 folio_set_dirty(newfolio); 473 } 474 475 /* Swap cache still stores N entries instead of a high-order entry */ 476 for (i = 0; i < entries; i++) { 477 xas_store(&xas, newfolio); 478 xas_next(&xas); 479 } 480 481 /* 482 * Drop cache reference from old page by unfreezing 483 * to one less reference. 484 * We know this isn't the last reference. 485 */ 486 folio_ref_unfreeze(folio, expected_count - nr); 487 488 xas_unlock(&xas); 489 /* Leave irq disabled to prevent preemption while updating stats */ 490 491 /* 492 * If moved to a different zone then also account 493 * the page for that zone. Other VM counters will be 494 * taken care of when we establish references to the 495 * new page and drop references to the old page. 496 * 497 * Note that anonymous pages are accounted for 498 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 499 * are mapped to swap space. 500 */ 501 if (newzone != oldzone) { 502 struct lruvec *old_lruvec, *new_lruvec; 503 struct mem_cgroup *memcg; 504 505 memcg = folio_memcg(folio); 506 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 507 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 508 509 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 510 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 511 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 512 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 513 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 514 515 if (folio_test_pmd_mappable(folio)) { 516 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); 517 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); 518 } 519 } 520 #ifdef CONFIG_SWAP 521 if (folio_test_swapcache(folio)) { 522 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 523 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 524 } 525 #endif 526 if (dirty && mapping_can_writeback(mapping)) { 527 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 528 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 529 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 530 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 531 } 532 } 533 local_irq_enable(); 534 535 return MIGRATEPAGE_SUCCESS; 536 } 537 EXPORT_SYMBOL(folio_migrate_mapping); 538 539 /* 540 * The expected number of remaining references is the same as that 541 * of folio_migrate_mapping(). 542 */ 543 int migrate_huge_page_move_mapping(struct address_space *mapping, 544 struct folio *dst, struct folio *src) 545 { 546 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 547 int expected_count; 548 549 xas_lock_irq(&xas); 550 expected_count = folio_expected_refs(mapping, src); 551 if (!folio_ref_freeze(src, expected_count)) { 552 xas_unlock_irq(&xas); 553 return -EAGAIN; 554 } 555 556 dst->index = src->index; 557 dst->mapping = src->mapping; 558 559 folio_ref_add(dst, folio_nr_pages(dst)); 560 561 xas_store(&xas, dst); 562 563 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); 564 565 xas_unlock_irq(&xas); 566 567 return MIGRATEPAGE_SUCCESS; 568 } 569 570 /* 571 * Copy the flags and some other ancillary information 572 */ 573 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 574 { 575 int cpupid; 576 577 if (folio_test_error(folio)) 578 folio_set_error(newfolio); 579 if (folio_test_referenced(folio)) 580 folio_set_referenced(newfolio); 581 if (folio_test_uptodate(folio)) 582 folio_mark_uptodate(newfolio); 583 if (folio_test_clear_active(folio)) { 584 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 585 folio_set_active(newfolio); 586 } else if (folio_test_clear_unevictable(folio)) 587 folio_set_unevictable(newfolio); 588 if (folio_test_workingset(folio)) 589 folio_set_workingset(newfolio); 590 if (folio_test_checked(folio)) 591 folio_set_checked(newfolio); 592 /* 593 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 594 * migration entries. We can still have PG_anon_exclusive set on an 595 * effectively unmapped and unreferenced first sub-pages of an 596 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 597 */ 598 if (folio_test_mappedtodisk(folio)) 599 folio_set_mappedtodisk(newfolio); 600 601 /* Move dirty on pages not done by folio_migrate_mapping() */ 602 if (folio_test_dirty(folio)) 603 folio_set_dirty(newfolio); 604 605 if (folio_test_young(folio)) 606 folio_set_young(newfolio); 607 if (folio_test_idle(folio)) 608 folio_set_idle(newfolio); 609 610 /* 611 * Copy NUMA information to the new page, to prevent over-eager 612 * future migrations of this same page. 613 */ 614 cpupid = folio_xchg_last_cpupid(folio, -1); 615 /* 616 * For memory tiering mode, when migrate between slow and fast 617 * memory node, reset cpupid, because that is used to record 618 * page access time in slow memory node. 619 */ 620 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 621 bool f_toptier = node_is_toptier(folio_nid(folio)); 622 bool t_toptier = node_is_toptier(folio_nid(newfolio)); 623 624 if (f_toptier != t_toptier) 625 cpupid = -1; 626 } 627 folio_xchg_last_cpupid(newfolio, cpupid); 628 629 folio_migrate_ksm(newfolio, folio); 630 /* 631 * Please do not reorder this without considering how mm/ksm.c's 632 * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache(). 633 */ 634 if (folio_test_swapcache(folio)) 635 folio_clear_swapcache(folio); 636 folio_clear_private(folio); 637 638 /* page->private contains hugetlb specific flags */ 639 if (!folio_test_hugetlb(folio)) 640 folio->private = NULL; 641 642 /* 643 * If any waiters have accumulated on the new page then 644 * wake them up. 645 */ 646 if (folio_test_writeback(newfolio)) 647 folio_end_writeback(newfolio); 648 649 /* 650 * PG_readahead shares the same bit with PG_reclaim. The above 651 * end_page_writeback() may clear PG_readahead mistakenly, so set the 652 * bit after that. 653 */ 654 if (folio_test_readahead(folio)) 655 folio_set_readahead(newfolio); 656 657 folio_copy_owner(newfolio, folio); 658 659 mem_cgroup_migrate(folio, newfolio); 660 } 661 EXPORT_SYMBOL(folio_migrate_flags); 662 663 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 664 { 665 folio_copy(newfolio, folio); 666 folio_migrate_flags(newfolio, folio); 667 } 668 EXPORT_SYMBOL(folio_migrate_copy); 669 670 /************************************************************ 671 * Migration functions 672 ***********************************************************/ 673 674 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 675 struct folio *src, enum migrate_mode mode, int extra_count) 676 { 677 int rc; 678 679 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 680 681 rc = folio_migrate_mapping(mapping, dst, src, extra_count); 682 683 if (rc != MIGRATEPAGE_SUCCESS) 684 return rc; 685 686 if (mode != MIGRATE_SYNC_NO_COPY) 687 folio_migrate_copy(dst, src); 688 else 689 folio_migrate_flags(dst, src); 690 return MIGRATEPAGE_SUCCESS; 691 } 692 693 /** 694 * migrate_folio() - Simple folio migration. 695 * @mapping: The address_space containing the folio. 696 * @dst: The folio to migrate the data to. 697 * @src: The folio containing the current data. 698 * @mode: How to migrate the page. 699 * 700 * Common logic to directly migrate a single LRU folio suitable for 701 * folios that do not use PagePrivate/PagePrivate2. 702 * 703 * Folios are locked upon entry and exit. 704 */ 705 int migrate_folio(struct address_space *mapping, struct folio *dst, 706 struct folio *src, enum migrate_mode mode) 707 { 708 return migrate_folio_extra(mapping, dst, src, mode, 0); 709 } 710 EXPORT_SYMBOL(migrate_folio); 711 712 #ifdef CONFIG_BUFFER_HEAD 713 /* Returns true if all buffers are successfully locked */ 714 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 715 enum migrate_mode mode) 716 { 717 struct buffer_head *bh = head; 718 struct buffer_head *failed_bh; 719 720 do { 721 if (!trylock_buffer(bh)) { 722 if (mode == MIGRATE_ASYNC) 723 goto unlock; 724 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) 725 goto unlock; 726 lock_buffer(bh); 727 } 728 729 bh = bh->b_this_page; 730 } while (bh != head); 731 732 return true; 733 734 unlock: 735 /* We failed to lock the buffer and cannot stall. */ 736 failed_bh = bh; 737 bh = head; 738 while (bh != failed_bh) { 739 unlock_buffer(bh); 740 bh = bh->b_this_page; 741 } 742 743 return false; 744 } 745 746 static int __buffer_migrate_folio(struct address_space *mapping, 747 struct folio *dst, struct folio *src, enum migrate_mode mode, 748 bool check_refs) 749 { 750 struct buffer_head *bh, *head; 751 int rc; 752 int expected_count; 753 754 head = folio_buffers(src); 755 if (!head) 756 return migrate_folio(mapping, dst, src, mode); 757 758 /* Check whether page does not have extra refs before we do more work */ 759 expected_count = folio_expected_refs(mapping, src); 760 if (folio_ref_count(src) != expected_count) 761 return -EAGAIN; 762 763 if (!buffer_migrate_lock_buffers(head, mode)) 764 return -EAGAIN; 765 766 if (check_refs) { 767 bool busy; 768 bool invalidated = false; 769 770 recheck_buffers: 771 busy = false; 772 spin_lock(&mapping->i_private_lock); 773 bh = head; 774 do { 775 if (atomic_read(&bh->b_count)) { 776 busy = true; 777 break; 778 } 779 bh = bh->b_this_page; 780 } while (bh != head); 781 if (busy) { 782 if (invalidated) { 783 rc = -EAGAIN; 784 goto unlock_buffers; 785 } 786 spin_unlock(&mapping->i_private_lock); 787 invalidate_bh_lrus(); 788 invalidated = true; 789 goto recheck_buffers; 790 } 791 } 792 793 rc = folio_migrate_mapping(mapping, dst, src, 0); 794 if (rc != MIGRATEPAGE_SUCCESS) 795 goto unlock_buffers; 796 797 folio_attach_private(dst, folio_detach_private(src)); 798 799 bh = head; 800 do { 801 folio_set_bh(bh, dst, bh_offset(bh)); 802 bh = bh->b_this_page; 803 } while (bh != head); 804 805 if (mode != MIGRATE_SYNC_NO_COPY) 806 folio_migrate_copy(dst, src); 807 else 808 folio_migrate_flags(dst, src); 809 810 rc = MIGRATEPAGE_SUCCESS; 811 unlock_buffers: 812 if (check_refs) 813 spin_unlock(&mapping->i_private_lock); 814 bh = head; 815 do { 816 unlock_buffer(bh); 817 bh = bh->b_this_page; 818 } while (bh != head); 819 820 return rc; 821 } 822 823 /** 824 * buffer_migrate_folio() - Migration function for folios with buffers. 825 * @mapping: The address space containing @src. 826 * @dst: The folio to migrate to. 827 * @src: The folio to migrate from. 828 * @mode: How to migrate the folio. 829 * 830 * This function can only be used if the underlying filesystem guarantees 831 * that no other references to @src exist. For example attached buffer 832 * heads are accessed only under the folio lock. If your filesystem cannot 833 * provide this guarantee, buffer_migrate_folio_norefs() may be more 834 * appropriate. 835 * 836 * Return: 0 on success or a negative errno on failure. 837 */ 838 int buffer_migrate_folio(struct address_space *mapping, 839 struct folio *dst, struct folio *src, enum migrate_mode mode) 840 { 841 return __buffer_migrate_folio(mapping, dst, src, mode, false); 842 } 843 EXPORT_SYMBOL(buffer_migrate_folio); 844 845 /** 846 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 847 * @mapping: The address space containing @src. 848 * @dst: The folio to migrate to. 849 * @src: The folio to migrate from. 850 * @mode: How to migrate the folio. 851 * 852 * Like buffer_migrate_folio() except that this variant is more careful 853 * and checks that there are also no buffer head references. This function 854 * is the right one for mappings where buffer heads are directly looked 855 * up and referenced (such as block device mappings). 856 * 857 * Return: 0 on success or a negative errno on failure. 858 */ 859 int buffer_migrate_folio_norefs(struct address_space *mapping, 860 struct folio *dst, struct folio *src, enum migrate_mode mode) 861 { 862 return __buffer_migrate_folio(mapping, dst, src, mode, true); 863 } 864 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); 865 #endif /* CONFIG_BUFFER_HEAD */ 866 867 int filemap_migrate_folio(struct address_space *mapping, 868 struct folio *dst, struct folio *src, enum migrate_mode mode) 869 { 870 int ret; 871 872 ret = folio_migrate_mapping(mapping, dst, src, 0); 873 if (ret != MIGRATEPAGE_SUCCESS) 874 return ret; 875 876 if (folio_get_private(src)) 877 folio_attach_private(dst, folio_detach_private(src)); 878 879 if (mode != MIGRATE_SYNC_NO_COPY) 880 folio_migrate_copy(dst, src); 881 else 882 folio_migrate_flags(dst, src); 883 return MIGRATEPAGE_SUCCESS; 884 } 885 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 886 887 /* 888 * Writeback a folio to clean the dirty state 889 */ 890 static int writeout(struct address_space *mapping, struct folio *folio) 891 { 892 struct writeback_control wbc = { 893 .sync_mode = WB_SYNC_NONE, 894 .nr_to_write = 1, 895 .range_start = 0, 896 .range_end = LLONG_MAX, 897 .for_reclaim = 1 898 }; 899 int rc; 900 901 if (!mapping->a_ops->writepage) 902 /* No write method for the address space */ 903 return -EINVAL; 904 905 if (!folio_clear_dirty_for_io(folio)) 906 /* Someone else already triggered a write */ 907 return -EAGAIN; 908 909 /* 910 * A dirty folio may imply that the underlying filesystem has 911 * the folio on some queue. So the folio must be clean for 912 * migration. Writeout may mean we lose the lock and the 913 * folio state is no longer what we checked for earlier. 914 * At this point we know that the migration attempt cannot 915 * be successful. 916 */ 917 remove_migration_ptes(folio, folio, false); 918 919 rc = mapping->a_ops->writepage(&folio->page, &wbc); 920 921 if (rc != AOP_WRITEPAGE_ACTIVATE) 922 /* unlocked. Relock */ 923 folio_lock(folio); 924 925 return (rc < 0) ? -EIO : -EAGAIN; 926 } 927 928 /* 929 * Default handling if a filesystem does not provide a migration function. 930 */ 931 static int fallback_migrate_folio(struct address_space *mapping, 932 struct folio *dst, struct folio *src, enum migrate_mode mode) 933 { 934 if (folio_test_dirty(src)) { 935 /* Only writeback folios in full synchronous migration */ 936 switch (mode) { 937 case MIGRATE_SYNC: 938 case MIGRATE_SYNC_NO_COPY: 939 break; 940 default: 941 return -EBUSY; 942 } 943 return writeout(mapping, src); 944 } 945 946 /* 947 * Buffers may be managed in a filesystem specific way. 948 * We must have no buffers or drop them. 949 */ 950 if (!filemap_release_folio(src, GFP_KERNEL)) 951 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 952 953 return migrate_folio(mapping, dst, src, mode); 954 } 955 956 /* 957 * Move a page to a newly allocated page 958 * The page is locked and all ptes have been successfully removed. 959 * 960 * The new page will have replaced the old page if this function 961 * is successful. 962 * 963 * Return value: 964 * < 0 - error code 965 * MIGRATEPAGE_SUCCESS - success 966 */ 967 static int move_to_new_folio(struct folio *dst, struct folio *src, 968 enum migrate_mode mode) 969 { 970 int rc = -EAGAIN; 971 bool is_lru = !__folio_test_movable(src); 972 973 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 974 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 975 976 if (likely(is_lru)) { 977 struct address_space *mapping = folio_mapping(src); 978 979 if (!mapping) 980 rc = migrate_folio(mapping, dst, src, mode); 981 else if (mapping_unmovable(mapping)) 982 rc = -EOPNOTSUPP; 983 else if (mapping->a_ops->migrate_folio) 984 /* 985 * Most folios have a mapping and most filesystems 986 * provide a migrate_folio callback. Anonymous folios 987 * are part of swap space which also has its own 988 * migrate_folio callback. This is the most common path 989 * for page migration. 990 */ 991 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 992 mode); 993 else 994 rc = fallback_migrate_folio(mapping, dst, src, mode); 995 } else { 996 const struct movable_operations *mops; 997 998 /* 999 * In case of non-lru page, it could be released after 1000 * isolation step. In that case, we shouldn't try migration. 1001 */ 1002 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 1003 if (!folio_test_movable(src)) { 1004 rc = MIGRATEPAGE_SUCCESS; 1005 folio_clear_isolated(src); 1006 goto out; 1007 } 1008 1009 mops = folio_movable_ops(src); 1010 rc = mops->migrate_page(&dst->page, &src->page, mode); 1011 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 1012 !folio_test_isolated(src)); 1013 } 1014 1015 /* 1016 * When successful, old pagecache src->mapping must be cleared before 1017 * src is freed; but stats require that PageAnon be left as PageAnon. 1018 */ 1019 if (rc == MIGRATEPAGE_SUCCESS) { 1020 if (__folio_test_movable(src)) { 1021 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 1022 1023 /* 1024 * We clear PG_movable under page_lock so any compactor 1025 * cannot try to migrate this page. 1026 */ 1027 folio_clear_isolated(src); 1028 } 1029 1030 /* 1031 * Anonymous and movable src->mapping will be cleared by 1032 * free_pages_prepare so don't reset it here for keeping 1033 * the type to work PageAnon, for example. 1034 */ 1035 if (!folio_mapping_flags(src)) 1036 src->mapping = NULL; 1037 1038 if (likely(!folio_is_zone_device(dst))) 1039 flush_dcache_folio(dst); 1040 } 1041 out: 1042 return rc; 1043 } 1044 1045 /* 1046 * To record some information during migration, we use unused private 1047 * field of struct folio of the newly allocated destination folio. 1048 * This is safe because nobody is using it except us. 1049 */ 1050 enum { 1051 PAGE_WAS_MAPPED = BIT(0), 1052 PAGE_WAS_MLOCKED = BIT(1), 1053 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED, 1054 }; 1055 1056 static void __migrate_folio_record(struct folio *dst, 1057 int old_page_state, 1058 struct anon_vma *anon_vma) 1059 { 1060 dst->private = (void *)anon_vma + old_page_state; 1061 } 1062 1063 static void __migrate_folio_extract(struct folio *dst, 1064 int *old_page_state, 1065 struct anon_vma **anon_vmap) 1066 { 1067 unsigned long private = (unsigned long)dst->private; 1068 1069 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES); 1070 *old_page_state = private & PAGE_OLD_STATES; 1071 dst->private = NULL; 1072 } 1073 1074 /* Restore the source folio to the original state upon failure */ 1075 static void migrate_folio_undo_src(struct folio *src, 1076 int page_was_mapped, 1077 struct anon_vma *anon_vma, 1078 bool locked, 1079 struct list_head *ret) 1080 { 1081 if (page_was_mapped) 1082 remove_migration_ptes(src, src, false); 1083 /* Drop an anon_vma reference if we took one */ 1084 if (anon_vma) 1085 put_anon_vma(anon_vma); 1086 if (locked) 1087 folio_unlock(src); 1088 if (ret) 1089 list_move_tail(&src->lru, ret); 1090 } 1091 1092 /* Restore the destination folio to the original state upon failure */ 1093 static void migrate_folio_undo_dst(struct folio *dst, bool locked, 1094 free_folio_t put_new_folio, unsigned long private) 1095 { 1096 if (locked) 1097 folio_unlock(dst); 1098 if (put_new_folio) 1099 put_new_folio(dst, private); 1100 else 1101 folio_put(dst); 1102 } 1103 1104 /* Cleanup src folio upon migration success */ 1105 static void migrate_folio_done(struct folio *src, 1106 enum migrate_reason reason) 1107 { 1108 /* 1109 * Compaction can migrate also non-LRU pages which are 1110 * not accounted to NR_ISOLATED_*. They can be recognized 1111 * as __folio_test_movable 1112 */ 1113 if (likely(!__folio_test_movable(src))) 1114 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + 1115 folio_is_file_lru(src), -folio_nr_pages(src)); 1116 1117 if (reason != MR_MEMORY_FAILURE) 1118 /* We release the page in page_handle_poison. */ 1119 folio_put(src); 1120 } 1121 1122 /* Obtain the lock on page, remove all ptes. */ 1123 static int migrate_folio_unmap(new_folio_t get_new_folio, 1124 free_folio_t put_new_folio, unsigned long private, 1125 struct folio *src, struct folio **dstp, enum migrate_mode mode, 1126 enum migrate_reason reason, struct list_head *ret) 1127 { 1128 struct folio *dst; 1129 int rc = -EAGAIN; 1130 int old_page_state = 0; 1131 struct anon_vma *anon_vma = NULL; 1132 bool is_lru = !__folio_test_movable(src); 1133 bool locked = false; 1134 bool dst_locked = false; 1135 1136 if (folio_ref_count(src) == 1) { 1137 /* Folio was freed from under us. So we are done. */ 1138 folio_clear_active(src); 1139 folio_clear_unevictable(src); 1140 /* free_pages_prepare() will clear PG_isolated. */ 1141 list_del(&src->lru); 1142 migrate_folio_done(src, reason); 1143 return MIGRATEPAGE_SUCCESS; 1144 } 1145 1146 dst = get_new_folio(src, private); 1147 if (!dst) 1148 return -ENOMEM; 1149 *dstp = dst; 1150 1151 dst->private = NULL; 1152 1153 if (!folio_trylock(src)) { 1154 if (mode == MIGRATE_ASYNC) 1155 goto out; 1156 1157 /* 1158 * It's not safe for direct compaction to call lock_page. 1159 * For example, during page readahead pages are added locked 1160 * to the LRU. Later, when the IO completes the pages are 1161 * marked uptodate and unlocked. However, the queueing 1162 * could be merging multiple pages for one bio (e.g. 1163 * mpage_readahead). If an allocation happens for the 1164 * second or third page, the process can end up locking 1165 * the same page twice and deadlocking. Rather than 1166 * trying to be clever about what pages can be locked, 1167 * avoid the use of lock_page for direct compaction 1168 * altogether. 1169 */ 1170 if (current->flags & PF_MEMALLOC) 1171 goto out; 1172 1173 /* 1174 * In "light" mode, we can wait for transient locks (eg 1175 * inserting a page into the page table), but it's not 1176 * worth waiting for I/O. 1177 */ 1178 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) 1179 goto out; 1180 1181 folio_lock(src); 1182 } 1183 locked = true; 1184 if (folio_test_mlocked(src)) 1185 old_page_state |= PAGE_WAS_MLOCKED; 1186 1187 if (folio_test_writeback(src)) { 1188 /* 1189 * Only in the case of a full synchronous migration is it 1190 * necessary to wait for PageWriteback. In the async case, 1191 * the retry loop is too short and in the sync-light case, 1192 * the overhead of stalling is too much 1193 */ 1194 switch (mode) { 1195 case MIGRATE_SYNC: 1196 case MIGRATE_SYNC_NO_COPY: 1197 break; 1198 default: 1199 rc = -EBUSY; 1200 goto out; 1201 } 1202 folio_wait_writeback(src); 1203 } 1204 1205 /* 1206 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, 1207 * we cannot notice that anon_vma is freed while we migrate a page. 1208 * This get_anon_vma() delays freeing anon_vma pointer until the end 1209 * of migration. File cache pages are no problem because of page_lock() 1210 * File Caches may use write_page() or lock_page() in migration, then, 1211 * just care Anon page here. 1212 * 1213 * Only folio_get_anon_vma() understands the subtleties of 1214 * getting a hold on an anon_vma from outside one of its mms. 1215 * But if we cannot get anon_vma, then we won't need it anyway, 1216 * because that implies that the anon page is no longer mapped 1217 * (and cannot be remapped so long as we hold the page lock). 1218 */ 1219 if (folio_test_anon(src) && !folio_test_ksm(src)) 1220 anon_vma = folio_get_anon_vma(src); 1221 1222 /* 1223 * Block others from accessing the new page when we get around to 1224 * establishing additional references. We are usually the only one 1225 * holding a reference to dst at this point. We used to have a BUG 1226 * here if folio_trylock(dst) fails, but would like to allow for 1227 * cases where there might be a race with the previous use of dst. 1228 * This is much like races on refcount of oldpage: just don't BUG(). 1229 */ 1230 if (unlikely(!folio_trylock(dst))) 1231 goto out; 1232 dst_locked = true; 1233 1234 if (unlikely(!is_lru)) { 1235 __migrate_folio_record(dst, old_page_state, anon_vma); 1236 return MIGRATEPAGE_UNMAP; 1237 } 1238 1239 /* 1240 * Corner case handling: 1241 * 1. When a new swap-cache page is read into, it is added to the LRU 1242 * and treated as swapcache but it has no rmap yet. 1243 * Calling try_to_unmap() against a src->mapping==NULL page will 1244 * trigger a BUG. So handle it here. 1245 * 2. An orphaned page (see truncate_cleanup_page) might have 1246 * fs-private metadata. The page can be picked up due to memory 1247 * offlining. Everywhere else except page reclaim, the page is 1248 * invisible to the vm, so the page can not be migrated. So try to 1249 * free the metadata, so the page can be freed. 1250 */ 1251 if (!src->mapping) { 1252 if (folio_test_private(src)) { 1253 try_to_free_buffers(src); 1254 goto out; 1255 } 1256 } else if (folio_mapped(src)) { 1257 /* Establish migration ptes */ 1258 VM_BUG_ON_FOLIO(folio_test_anon(src) && 1259 !folio_test_ksm(src) && !anon_vma, src); 1260 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); 1261 old_page_state |= PAGE_WAS_MAPPED; 1262 } 1263 1264 if (!folio_mapped(src)) { 1265 __migrate_folio_record(dst, old_page_state, anon_vma); 1266 return MIGRATEPAGE_UNMAP; 1267 } 1268 1269 out: 1270 /* 1271 * A folio that has not been unmapped will be restored to 1272 * right list unless we want to retry. 1273 */ 1274 if (rc == -EAGAIN) 1275 ret = NULL; 1276 1277 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, 1278 anon_vma, locked, ret); 1279 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); 1280 1281 return rc; 1282 } 1283 1284 /* Migrate the folio to the newly allocated folio in dst. */ 1285 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, 1286 struct folio *src, struct folio *dst, 1287 enum migrate_mode mode, enum migrate_reason reason, 1288 struct list_head *ret) 1289 { 1290 int rc; 1291 int old_page_state = 0; 1292 struct anon_vma *anon_vma = NULL; 1293 bool is_lru = !__folio_test_movable(src); 1294 struct list_head *prev; 1295 1296 __migrate_folio_extract(dst, &old_page_state, &anon_vma); 1297 prev = dst->lru.prev; 1298 list_del(&dst->lru); 1299 1300 rc = move_to_new_folio(dst, src, mode); 1301 if (rc) 1302 goto out; 1303 1304 if (unlikely(!is_lru)) 1305 goto out_unlock_both; 1306 1307 /* 1308 * When successful, push dst to LRU immediately: so that if it 1309 * turns out to be an mlocked page, remove_migration_ptes() will 1310 * automatically build up the correct dst->mlock_count for it. 1311 * 1312 * We would like to do something similar for the old page, when 1313 * unsuccessful, and other cases when a page has been temporarily 1314 * isolated from the unevictable LRU: but this case is the easiest. 1315 */ 1316 folio_add_lru(dst); 1317 if (old_page_state & PAGE_WAS_MLOCKED) 1318 lru_add_drain(); 1319 1320 if (old_page_state & PAGE_WAS_MAPPED) 1321 remove_migration_ptes(src, dst, false); 1322 1323 out_unlock_both: 1324 folio_unlock(dst); 1325 set_page_owner_migrate_reason(&dst->page, reason); 1326 /* 1327 * If migration is successful, decrease refcount of dst, 1328 * which will not free the page because new page owner increased 1329 * refcounter. 1330 */ 1331 folio_put(dst); 1332 1333 /* 1334 * A folio that has been migrated has all references removed 1335 * and will be freed. 1336 */ 1337 list_del(&src->lru); 1338 /* Drop an anon_vma reference if we took one */ 1339 if (anon_vma) 1340 put_anon_vma(anon_vma); 1341 folio_unlock(src); 1342 migrate_folio_done(src, reason); 1343 1344 return rc; 1345 out: 1346 /* 1347 * A folio that has not been migrated will be restored to 1348 * right list unless we want to retry. 1349 */ 1350 if (rc == -EAGAIN) { 1351 list_add(&dst->lru, prev); 1352 __migrate_folio_record(dst, old_page_state, anon_vma); 1353 return rc; 1354 } 1355 1356 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, 1357 anon_vma, true, ret); 1358 migrate_folio_undo_dst(dst, true, put_new_folio, private); 1359 1360 return rc; 1361 } 1362 1363 /* 1364 * Counterpart of unmap_and_move_page() for hugepage migration. 1365 * 1366 * This function doesn't wait the completion of hugepage I/O 1367 * because there is no race between I/O and migration for hugepage. 1368 * Note that currently hugepage I/O occurs only in direct I/O 1369 * where no lock is held and PG_writeback is irrelevant, 1370 * and writeback status of all subpages are counted in the reference 1371 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1372 * under direct I/O, the reference of the head page is 512 and a bit more.) 1373 * This means that when we try to migrate hugepage whose subpages are 1374 * doing direct I/O, some references remain after try_to_unmap() and 1375 * hugepage migration fails without data corruption. 1376 * 1377 * There is also no race when direct I/O is issued on the page under migration, 1378 * because then pte is replaced with migration swap entry and direct I/O code 1379 * will wait in the page fault for migration to complete. 1380 */ 1381 static int unmap_and_move_huge_page(new_folio_t get_new_folio, 1382 free_folio_t put_new_folio, unsigned long private, 1383 struct folio *src, int force, enum migrate_mode mode, 1384 int reason, struct list_head *ret) 1385 { 1386 struct folio *dst; 1387 int rc = -EAGAIN; 1388 int page_was_mapped = 0; 1389 struct anon_vma *anon_vma = NULL; 1390 struct address_space *mapping = NULL; 1391 1392 if (folio_ref_count(src) == 1) { 1393 /* page was freed from under us. So we are done. */ 1394 folio_putback_active_hugetlb(src); 1395 return MIGRATEPAGE_SUCCESS; 1396 } 1397 1398 dst = get_new_folio(src, private); 1399 if (!dst) 1400 return -ENOMEM; 1401 1402 if (!folio_trylock(src)) { 1403 if (!force) 1404 goto out; 1405 switch (mode) { 1406 case MIGRATE_SYNC: 1407 case MIGRATE_SYNC_NO_COPY: 1408 break; 1409 default: 1410 goto out; 1411 } 1412 folio_lock(src); 1413 } 1414 1415 /* 1416 * Check for pages which are in the process of being freed. Without 1417 * folio_mapping() set, hugetlbfs specific move page routine will not 1418 * be called and we could leak usage counts for subpools. 1419 */ 1420 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { 1421 rc = -EBUSY; 1422 goto out_unlock; 1423 } 1424 1425 if (folio_test_anon(src)) 1426 anon_vma = folio_get_anon_vma(src); 1427 1428 if (unlikely(!folio_trylock(dst))) 1429 goto put_anon; 1430 1431 if (folio_mapped(src)) { 1432 enum ttu_flags ttu = 0; 1433 1434 if (!folio_test_anon(src)) { 1435 /* 1436 * In shared mappings, try_to_unmap could potentially 1437 * call huge_pmd_unshare. Because of this, take 1438 * semaphore in write mode here and set TTU_RMAP_LOCKED 1439 * to let lower levels know we have taken the lock. 1440 */ 1441 mapping = hugetlb_folio_mapping_lock_write(src); 1442 if (unlikely(!mapping)) 1443 goto unlock_put_anon; 1444 1445 ttu = TTU_RMAP_LOCKED; 1446 } 1447 1448 try_to_migrate(src, ttu); 1449 page_was_mapped = 1; 1450 1451 if (ttu & TTU_RMAP_LOCKED) 1452 i_mmap_unlock_write(mapping); 1453 } 1454 1455 if (!folio_mapped(src)) 1456 rc = move_to_new_folio(dst, src, mode); 1457 1458 if (page_was_mapped) 1459 remove_migration_ptes(src, 1460 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1461 1462 unlock_put_anon: 1463 folio_unlock(dst); 1464 1465 put_anon: 1466 if (anon_vma) 1467 put_anon_vma(anon_vma); 1468 1469 if (rc == MIGRATEPAGE_SUCCESS) { 1470 move_hugetlb_state(src, dst, reason); 1471 put_new_folio = NULL; 1472 } 1473 1474 out_unlock: 1475 folio_unlock(src); 1476 out: 1477 if (rc == MIGRATEPAGE_SUCCESS) 1478 folio_putback_active_hugetlb(src); 1479 else if (rc != -EAGAIN) 1480 list_move_tail(&src->lru, ret); 1481 1482 /* 1483 * If migration was not successful and there's a freeing callback, use 1484 * it. Otherwise, put_page() will drop the reference grabbed during 1485 * isolation. 1486 */ 1487 if (put_new_folio) 1488 put_new_folio(dst, private); 1489 else 1490 folio_putback_active_hugetlb(dst); 1491 1492 return rc; 1493 } 1494 1495 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) 1496 { 1497 int rc; 1498 1499 folio_lock(folio); 1500 rc = split_folio_to_list(folio, split_folios); 1501 folio_unlock(folio); 1502 if (!rc) 1503 list_move_tail(&folio->lru, split_folios); 1504 1505 return rc; 1506 } 1507 1508 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1509 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR 1510 #else 1511 #define NR_MAX_BATCHED_MIGRATION 512 1512 #endif 1513 #define NR_MAX_MIGRATE_PAGES_RETRY 10 1514 #define NR_MAX_MIGRATE_ASYNC_RETRY 3 1515 #define NR_MAX_MIGRATE_SYNC_RETRY \ 1516 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) 1517 1518 struct migrate_pages_stats { 1519 int nr_succeeded; /* Normal and large folios migrated successfully, in 1520 units of base pages */ 1521 int nr_failed_pages; /* Normal and large folios failed to be migrated, in 1522 units of base pages. Untried folios aren't counted */ 1523 int nr_thp_succeeded; /* THP migrated successfully */ 1524 int nr_thp_failed; /* THP failed to be migrated */ 1525 int nr_thp_split; /* THP split before migrating */ 1526 int nr_split; /* Large folio (include THP) split before migrating */ 1527 }; 1528 1529 /* 1530 * Returns the number of hugetlb folios that were not migrated, or an error code 1531 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable 1532 * any more because the list has become empty or no retryable hugetlb folios 1533 * exist any more. It is caller's responsibility to call putback_movable_pages() 1534 * only if ret != 0. 1535 */ 1536 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, 1537 free_folio_t put_new_folio, unsigned long private, 1538 enum migrate_mode mode, int reason, 1539 struct migrate_pages_stats *stats, 1540 struct list_head *ret_folios) 1541 { 1542 int retry = 1; 1543 int nr_failed = 0; 1544 int nr_retry_pages = 0; 1545 int pass = 0; 1546 struct folio *folio, *folio2; 1547 int rc, nr_pages; 1548 1549 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { 1550 retry = 0; 1551 nr_retry_pages = 0; 1552 1553 list_for_each_entry_safe(folio, folio2, from, lru) { 1554 if (!folio_test_hugetlb(folio)) 1555 continue; 1556 1557 nr_pages = folio_nr_pages(folio); 1558 1559 cond_resched(); 1560 1561 /* 1562 * Migratability of hugepages depends on architectures and 1563 * their size. This check is necessary because some callers 1564 * of hugepage migration like soft offline and memory 1565 * hotremove don't walk through page tables or check whether 1566 * the hugepage is pmd-based or not before kicking migration. 1567 */ 1568 if (!hugepage_migration_supported(folio_hstate(folio))) { 1569 nr_failed++; 1570 stats->nr_failed_pages += nr_pages; 1571 list_move_tail(&folio->lru, ret_folios); 1572 continue; 1573 } 1574 1575 rc = unmap_and_move_huge_page(get_new_folio, 1576 put_new_folio, private, 1577 folio, pass > 2, mode, 1578 reason, ret_folios); 1579 /* 1580 * The rules are: 1581 * Success: hugetlb folio will be put back 1582 * -EAGAIN: stay on the from list 1583 * -ENOMEM: stay on the from list 1584 * Other errno: put on ret_folios list 1585 */ 1586 switch(rc) { 1587 case -ENOMEM: 1588 /* 1589 * When memory is low, don't bother to try to migrate 1590 * other folios, just exit. 1591 */ 1592 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1593 return -ENOMEM; 1594 case -EAGAIN: 1595 retry++; 1596 nr_retry_pages += nr_pages; 1597 break; 1598 case MIGRATEPAGE_SUCCESS: 1599 stats->nr_succeeded += nr_pages; 1600 break; 1601 default: 1602 /* 1603 * Permanent failure (-EBUSY, etc.): 1604 * unlike -EAGAIN case, the failed folio is 1605 * removed from migration folio list and not 1606 * retried in the next outer loop. 1607 */ 1608 nr_failed++; 1609 stats->nr_failed_pages += nr_pages; 1610 break; 1611 } 1612 } 1613 } 1614 /* 1615 * nr_failed is number of hugetlb folios failed to be migrated. After 1616 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb 1617 * folios as failed. 1618 */ 1619 nr_failed += retry; 1620 stats->nr_failed_pages += nr_retry_pages; 1621 1622 return nr_failed; 1623 } 1624 1625 /* 1626 * migrate_pages_batch() first unmaps folios in the from list as many as 1627 * possible, then move the unmapped folios. 1628 * 1629 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a 1630 * lock or bit when we have locked more than one folio. Which may cause 1631 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the 1632 * length of the from list must be <= 1. 1633 */ 1634 static int migrate_pages_batch(struct list_head *from, 1635 new_folio_t get_new_folio, free_folio_t put_new_folio, 1636 unsigned long private, enum migrate_mode mode, int reason, 1637 struct list_head *ret_folios, struct list_head *split_folios, 1638 struct migrate_pages_stats *stats, int nr_pass) 1639 { 1640 int retry = 1; 1641 int thp_retry = 1; 1642 int nr_failed = 0; 1643 int nr_retry_pages = 0; 1644 int pass = 0; 1645 bool is_thp = false; 1646 bool is_large = false; 1647 struct folio *folio, *folio2, *dst = NULL, *dst2; 1648 int rc, rc_saved = 0, nr_pages; 1649 LIST_HEAD(unmap_folios); 1650 LIST_HEAD(dst_folios); 1651 bool nosplit = (reason == MR_NUMA_MISPLACED); 1652 1653 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && 1654 !list_empty(from) && !list_is_singular(from)); 1655 1656 for (pass = 0; pass < nr_pass && retry; pass++) { 1657 retry = 0; 1658 thp_retry = 0; 1659 nr_retry_pages = 0; 1660 1661 list_for_each_entry_safe(folio, folio2, from, lru) { 1662 is_large = folio_test_large(folio); 1663 is_thp = is_large && folio_test_pmd_mappable(folio); 1664 nr_pages = folio_nr_pages(folio); 1665 1666 cond_resched(); 1667 1668 /* 1669 * The rare folio on the deferred split list should 1670 * be split now. It should not count as a failure: 1671 * but increment nr_failed because, without doing so, 1672 * migrate_pages() may report success with (split but 1673 * unmigrated) pages still on its fromlist; whereas it 1674 * always reports success when its fromlist is empty. 1675 * stats->nr_thp_failed should be increased too, 1676 * otherwise stats inconsistency will happen when 1677 * migrate_pages_batch is called via migrate_pages() 1678 * with MIGRATE_SYNC and MIGRATE_ASYNC. 1679 * 1680 * Only check it without removing it from the list. 1681 * Since the folio can be on deferred_split_scan() 1682 * local list and removing it can cause the local list 1683 * corruption. Folio split process below can handle it 1684 * with the help of folio_ref_freeze(). 1685 * 1686 * nr_pages > 2 is needed to avoid checking order-1 1687 * page cache folios. They exist, in contrast to 1688 * non-existent order-1 anonymous folios, and do not 1689 * use _deferred_list. 1690 */ 1691 if (nr_pages > 2 && 1692 !list_empty(&folio->_deferred_list)) { 1693 if (try_split_folio(folio, split_folios) == 0) { 1694 nr_failed++; 1695 stats->nr_thp_failed += is_thp; 1696 stats->nr_thp_split += is_thp; 1697 stats->nr_split++; 1698 continue; 1699 } 1700 } 1701 1702 /* 1703 * Large folio migration might be unsupported or 1704 * the allocation might be failed so we should retry 1705 * on the same folio with the large folio split 1706 * to normal folios. 1707 * 1708 * Split folios are put in split_folios, and 1709 * we will migrate them after the rest of the 1710 * list is processed. 1711 */ 1712 if (!thp_migration_supported() && is_thp) { 1713 nr_failed++; 1714 stats->nr_thp_failed++; 1715 if (!try_split_folio(folio, split_folios)) { 1716 stats->nr_thp_split++; 1717 stats->nr_split++; 1718 continue; 1719 } 1720 stats->nr_failed_pages += nr_pages; 1721 list_move_tail(&folio->lru, ret_folios); 1722 continue; 1723 } 1724 1725 rc = migrate_folio_unmap(get_new_folio, put_new_folio, 1726 private, folio, &dst, mode, reason, 1727 ret_folios); 1728 /* 1729 * The rules are: 1730 * Success: folio will be freed 1731 * Unmap: folio will be put on unmap_folios list, 1732 * dst folio put on dst_folios list 1733 * -EAGAIN: stay on the from list 1734 * -ENOMEM: stay on the from list 1735 * Other errno: put on ret_folios list 1736 */ 1737 switch(rc) { 1738 case -ENOMEM: 1739 /* 1740 * When memory is low, don't bother to try to migrate 1741 * other folios, move unmapped folios, then exit. 1742 */ 1743 nr_failed++; 1744 stats->nr_thp_failed += is_thp; 1745 /* Large folio NUMA faulting doesn't split to retry. */ 1746 if (is_large && !nosplit) { 1747 int ret = try_split_folio(folio, split_folios); 1748 1749 if (!ret) { 1750 stats->nr_thp_split += is_thp; 1751 stats->nr_split++; 1752 break; 1753 } else if (reason == MR_LONGTERM_PIN && 1754 ret == -EAGAIN) { 1755 /* 1756 * Try again to split large folio to 1757 * mitigate the failure of longterm pinning. 1758 */ 1759 retry++; 1760 thp_retry += is_thp; 1761 nr_retry_pages += nr_pages; 1762 /* Undo duplicated failure counting. */ 1763 nr_failed--; 1764 stats->nr_thp_failed -= is_thp; 1765 break; 1766 } 1767 } 1768 1769 stats->nr_failed_pages += nr_pages + nr_retry_pages; 1770 /* nr_failed isn't updated for not used */ 1771 stats->nr_thp_failed += thp_retry; 1772 rc_saved = rc; 1773 if (list_empty(&unmap_folios)) 1774 goto out; 1775 else 1776 goto move; 1777 case -EAGAIN: 1778 retry++; 1779 thp_retry += is_thp; 1780 nr_retry_pages += nr_pages; 1781 break; 1782 case MIGRATEPAGE_SUCCESS: 1783 stats->nr_succeeded += nr_pages; 1784 stats->nr_thp_succeeded += is_thp; 1785 break; 1786 case MIGRATEPAGE_UNMAP: 1787 list_move_tail(&folio->lru, &unmap_folios); 1788 list_add_tail(&dst->lru, &dst_folios); 1789 break; 1790 default: 1791 /* 1792 * Permanent failure (-EBUSY, etc.): 1793 * unlike -EAGAIN case, the failed folio is 1794 * removed from migration folio list and not 1795 * retried in the next outer loop. 1796 */ 1797 nr_failed++; 1798 stats->nr_thp_failed += is_thp; 1799 stats->nr_failed_pages += nr_pages; 1800 break; 1801 } 1802 } 1803 } 1804 nr_failed += retry; 1805 stats->nr_thp_failed += thp_retry; 1806 stats->nr_failed_pages += nr_retry_pages; 1807 move: 1808 /* Flush TLBs for all unmapped folios */ 1809 try_to_unmap_flush(); 1810 1811 retry = 1; 1812 for (pass = 0; pass < nr_pass && retry; pass++) { 1813 retry = 0; 1814 thp_retry = 0; 1815 nr_retry_pages = 0; 1816 1817 dst = list_first_entry(&dst_folios, struct folio, lru); 1818 dst2 = list_next_entry(dst, lru); 1819 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1820 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); 1821 nr_pages = folio_nr_pages(folio); 1822 1823 cond_resched(); 1824 1825 rc = migrate_folio_move(put_new_folio, private, 1826 folio, dst, mode, 1827 reason, ret_folios); 1828 /* 1829 * The rules are: 1830 * Success: folio will be freed 1831 * -EAGAIN: stay on the unmap_folios list 1832 * Other errno: put on ret_folios list 1833 */ 1834 switch(rc) { 1835 case -EAGAIN: 1836 retry++; 1837 thp_retry += is_thp; 1838 nr_retry_pages += nr_pages; 1839 break; 1840 case MIGRATEPAGE_SUCCESS: 1841 stats->nr_succeeded += nr_pages; 1842 stats->nr_thp_succeeded += is_thp; 1843 break; 1844 default: 1845 nr_failed++; 1846 stats->nr_thp_failed += is_thp; 1847 stats->nr_failed_pages += nr_pages; 1848 break; 1849 } 1850 dst = dst2; 1851 dst2 = list_next_entry(dst, lru); 1852 } 1853 } 1854 nr_failed += retry; 1855 stats->nr_thp_failed += thp_retry; 1856 stats->nr_failed_pages += nr_retry_pages; 1857 1858 rc = rc_saved ? : nr_failed; 1859 out: 1860 /* Cleanup remaining folios */ 1861 dst = list_first_entry(&dst_folios, struct folio, lru); 1862 dst2 = list_next_entry(dst, lru); 1863 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 1864 int old_page_state = 0; 1865 struct anon_vma *anon_vma = NULL; 1866 1867 __migrate_folio_extract(dst, &old_page_state, &anon_vma); 1868 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, 1869 anon_vma, true, ret_folios); 1870 list_del(&dst->lru); 1871 migrate_folio_undo_dst(dst, true, put_new_folio, private); 1872 dst = dst2; 1873 dst2 = list_next_entry(dst, lru); 1874 } 1875 1876 return rc; 1877 } 1878 1879 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, 1880 free_folio_t put_new_folio, unsigned long private, 1881 enum migrate_mode mode, int reason, 1882 struct list_head *ret_folios, struct list_head *split_folios, 1883 struct migrate_pages_stats *stats) 1884 { 1885 int rc, nr_failed = 0; 1886 LIST_HEAD(folios); 1887 struct migrate_pages_stats astats; 1888 1889 memset(&astats, 0, sizeof(astats)); 1890 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ 1891 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, 1892 reason, &folios, split_folios, &astats, 1893 NR_MAX_MIGRATE_ASYNC_RETRY); 1894 stats->nr_succeeded += astats.nr_succeeded; 1895 stats->nr_thp_succeeded += astats.nr_thp_succeeded; 1896 stats->nr_thp_split += astats.nr_thp_split; 1897 stats->nr_split += astats.nr_split; 1898 if (rc < 0) { 1899 stats->nr_failed_pages += astats.nr_failed_pages; 1900 stats->nr_thp_failed += astats.nr_thp_failed; 1901 list_splice_tail(&folios, ret_folios); 1902 return rc; 1903 } 1904 stats->nr_thp_failed += astats.nr_thp_split; 1905 /* 1906 * Do not count rc, as pages will be retried below. 1907 * Count nr_split only, since it includes nr_thp_split. 1908 */ 1909 nr_failed += astats.nr_split; 1910 /* 1911 * Fall back to migrate all failed folios one by one synchronously. All 1912 * failed folios except split THPs will be retried, so their failure 1913 * isn't counted 1914 */ 1915 list_splice_tail_init(&folios, from); 1916 while (!list_empty(from)) { 1917 list_move(from->next, &folios); 1918 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, 1919 private, mode, reason, ret_folios, 1920 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); 1921 list_splice_tail_init(&folios, ret_folios); 1922 if (rc < 0) 1923 return rc; 1924 nr_failed += rc; 1925 } 1926 1927 return nr_failed; 1928 } 1929 1930 /* 1931 * migrate_pages - migrate the folios specified in a list, to the free folios 1932 * supplied as the target for the page migration 1933 * 1934 * @from: The list of folios to be migrated. 1935 * @get_new_folio: The function used to allocate free folios to be used 1936 * as the target of the folio migration. 1937 * @put_new_folio: The function used to free target folios if migration 1938 * fails, or NULL if no special handling is necessary. 1939 * @private: Private data to be passed on to get_new_folio() 1940 * @mode: The migration mode that specifies the constraints for 1941 * folio migration, if any. 1942 * @reason: The reason for folio migration. 1943 * @ret_succeeded: Set to the number of folios migrated successfully if 1944 * the caller passes a non-NULL pointer. 1945 * 1946 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios 1947 * are movable any more because the list has become empty or no retryable folios 1948 * exist any more. It is caller's responsibility to call putback_movable_pages() 1949 * only if ret != 0. 1950 * 1951 * Returns the number of {normal folio, large folio, hugetlb} that were not 1952 * migrated, or an error code. The number of large folio splits will be 1953 * considered as the number of non-migrated large folio, no matter how many 1954 * split folios of the large folio are migrated successfully. 1955 */ 1956 int migrate_pages(struct list_head *from, new_folio_t get_new_folio, 1957 free_folio_t put_new_folio, unsigned long private, 1958 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1959 { 1960 int rc, rc_gather; 1961 int nr_pages; 1962 struct folio *folio, *folio2; 1963 LIST_HEAD(folios); 1964 LIST_HEAD(ret_folios); 1965 LIST_HEAD(split_folios); 1966 struct migrate_pages_stats stats; 1967 1968 trace_mm_migrate_pages_start(mode, reason); 1969 1970 memset(&stats, 0, sizeof(stats)); 1971 1972 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, 1973 mode, reason, &stats, &ret_folios); 1974 if (rc_gather < 0) 1975 goto out; 1976 1977 again: 1978 nr_pages = 0; 1979 list_for_each_entry_safe(folio, folio2, from, lru) { 1980 /* Retried hugetlb folios will be kept in list */ 1981 if (folio_test_hugetlb(folio)) { 1982 list_move_tail(&folio->lru, &ret_folios); 1983 continue; 1984 } 1985 1986 nr_pages += folio_nr_pages(folio); 1987 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1988 break; 1989 } 1990 if (nr_pages >= NR_MAX_BATCHED_MIGRATION) 1991 list_cut_before(&folios, from, &folio2->lru); 1992 else 1993 list_splice_init(from, &folios); 1994 if (mode == MIGRATE_ASYNC) 1995 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, 1996 private, mode, reason, &ret_folios, 1997 &split_folios, &stats, 1998 NR_MAX_MIGRATE_PAGES_RETRY); 1999 else 2000 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, 2001 private, mode, reason, &ret_folios, 2002 &split_folios, &stats); 2003 list_splice_tail_init(&folios, &ret_folios); 2004 if (rc < 0) { 2005 rc_gather = rc; 2006 list_splice_tail(&split_folios, &ret_folios); 2007 goto out; 2008 } 2009 if (!list_empty(&split_folios)) { 2010 /* 2011 * Failure isn't counted since all split folios of a large folio 2012 * is counted as 1 failure already. And, we only try to migrate 2013 * with minimal effort, force MIGRATE_ASYNC mode and retry once. 2014 */ 2015 migrate_pages_batch(&split_folios, get_new_folio, 2016 put_new_folio, private, MIGRATE_ASYNC, reason, 2017 &ret_folios, NULL, &stats, 1); 2018 list_splice_tail_init(&split_folios, &ret_folios); 2019 } 2020 rc_gather += rc; 2021 if (!list_empty(from)) 2022 goto again; 2023 out: 2024 /* 2025 * Put the permanent failure folio back to migration list, they 2026 * will be put back to the right list by the caller. 2027 */ 2028 list_splice(&ret_folios, from); 2029 2030 /* 2031 * Return 0 in case all split folios of fail-to-migrate large folios 2032 * are migrated successfully. 2033 */ 2034 if (list_empty(from)) 2035 rc_gather = 0; 2036 2037 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); 2038 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); 2039 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); 2040 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); 2041 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); 2042 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, 2043 stats.nr_thp_succeeded, stats.nr_thp_failed, 2044 stats.nr_thp_split, stats.nr_split, mode, 2045 reason); 2046 2047 if (ret_succeeded) 2048 *ret_succeeded = stats.nr_succeeded; 2049 2050 return rc_gather; 2051 } 2052 2053 struct folio *alloc_migration_target(struct folio *src, unsigned long private) 2054 { 2055 struct migration_target_control *mtc; 2056 gfp_t gfp_mask; 2057 unsigned int order = 0; 2058 int nid; 2059 int zidx; 2060 2061 mtc = (struct migration_target_control *)private; 2062 gfp_mask = mtc->gfp_mask; 2063 nid = mtc->nid; 2064 if (nid == NUMA_NO_NODE) 2065 nid = folio_nid(src); 2066 2067 if (folio_test_hugetlb(src)) { 2068 struct hstate *h = folio_hstate(src); 2069 2070 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 2071 return alloc_hugetlb_folio_nodemask(h, nid, 2072 mtc->nmask, gfp_mask, 2073 htlb_allow_alloc_fallback(mtc->reason)); 2074 } 2075 2076 if (folio_test_large(src)) { 2077 /* 2078 * clear __GFP_RECLAIM to make the migration callback 2079 * consistent with regular THP allocations. 2080 */ 2081 gfp_mask &= ~__GFP_RECLAIM; 2082 gfp_mask |= GFP_TRANSHUGE; 2083 order = folio_order(src); 2084 } 2085 zidx = zone_idx(folio_zone(src)); 2086 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 2087 gfp_mask |= __GFP_HIGHMEM; 2088 2089 return __folio_alloc(gfp_mask, order, nid, mtc->nmask); 2090 } 2091 2092 #ifdef CONFIG_NUMA 2093 2094 static int store_status(int __user *status, int start, int value, int nr) 2095 { 2096 while (nr-- > 0) { 2097 if (put_user(value, status + start)) 2098 return -EFAULT; 2099 start++; 2100 } 2101 2102 return 0; 2103 } 2104 2105 static int do_move_pages_to_node(struct list_head *pagelist, int node) 2106 { 2107 int err; 2108 struct migration_target_control mtc = { 2109 .nid = node, 2110 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 2111 .reason = MR_SYSCALL, 2112 }; 2113 2114 err = migrate_pages(pagelist, alloc_migration_target, NULL, 2115 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 2116 if (err) 2117 putback_movable_pages(pagelist); 2118 return err; 2119 } 2120 2121 /* 2122 * Resolves the given address to a struct page, isolates it from the LRU and 2123 * puts it to the given pagelist. 2124 * Returns: 2125 * errno - if the page cannot be found/isolated 2126 * 0 - when it doesn't have to be migrated because it is already on the 2127 * target node 2128 * 1 - when it has been queued 2129 */ 2130 static int add_page_for_migration(struct mm_struct *mm, const void __user *p, 2131 int node, struct list_head *pagelist, bool migrate_all) 2132 { 2133 struct vm_area_struct *vma; 2134 unsigned long addr; 2135 struct page *page; 2136 struct folio *folio; 2137 int err; 2138 2139 mmap_read_lock(mm); 2140 addr = (unsigned long)untagged_addr_remote(mm, p); 2141 2142 err = -EFAULT; 2143 vma = vma_lookup(mm, addr); 2144 if (!vma || !vma_migratable(vma)) 2145 goto out; 2146 2147 /* FOLL_DUMP to ignore special (like zero) pages */ 2148 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2149 2150 err = PTR_ERR(page); 2151 if (IS_ERR(page)) 2152 goto out; 2153 2154 err = -ENOENT; 2155 if (!page) 2156 goto out; 2157 2158 folio = page_folio(page); 2159 if (folio_is_zone_device(folio)) 2160 goto out_putfolio; 2161 2162 err = 0; 2163 if (folio_nid(folio) == node) 2164 goto out_putfolio; 2165 2166 err = -EACCES; 2167 if (folio_likely_mapped_shared(folio) && !migrate_all) 2168 goto out_putfolio; 2169 2170 err = -EBUSY; 2171 if (folio_test_hugetlb(folio)) { 2172 if (isolate_hugetlb(folio, pagelist)) 2173 err = 1; 2174 } else { 2175 if (!folio_isolate_lru(folio)) 2176 goto out_putfolio; 2177 2178 err = 1; 2179 list_add_tail(&folio->lru, pagelist); 2180 node_stat_mod_folio(folio, 2181 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2182 folio_nr_pages(folio)); 2183 } 2184 out_putfolio: 2185 /* 2186 * Either remove the duplicate refcount from folio_isolate_lru() 2187 * or drop the folio ref if it was not isolated. 2188 */ 2189 folio_put(folio); 2190 out: 2191 mmap_read_unlock(mm); 2192 return err; 2193 } 2194 2195 static int move_pages_and_store_status(int node, 2196 struct list_head *pagelist, int __user *status, 2197 int start, int i, unsigned long nr_pages) 2198 { 2199 int err; 2200 2201 if (list_empty(pagelist)) 2202 return 0; 2203 2204 err = do_move_pages_to_node(pagelist, node); 2205 if (err) { 2206 /* 2207 * Positive err means the number of failed 2208 * pages to migrate. Since we are going to 2209 * abort and return the number of non-migrated 2210 * pages, so need to include the rest of the 2211 * nr_pages that have not been attempted as 2212 * well. 2213 */ 2214 if (err > 0) 2215 err += nr_pages - i; 2216 return err; 2217 } 2218 return store_status(status, start, node, i - start); 2219 } 2220 2221 /* 2222 * Migrate an array of page address onto an array of nodes and fill 2223 * the corresponding array of status. 2224 */ 2225 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 2226 unsigned long nr_pages, 2227 const void __user * __user *pages, 2228 const int __user *nodes, 2229 int __user *status, int flags) 2230 { 2231 compat_uptr_t __user *compat_pages = (void __user *)pages; 2232 int current_node = NUMA_NO_NODE; 2233 LIST_HEAD(pagelist); 2234 int start, i; 2235 int err = 0, err1; 2236 2237 lru_cache_disable(); 2238 2239 for (i = start = 0; i < nr_pages; i++) { 2240 const void __user *p; 2241 int node; 2242 2243 err = -EFAULT; 2244 if (in_compat_syscall()) { 2245 compat_uptr_t cp; 2246 2247 if (get_user(cp, compat_pages + i)) 2248 goto out_flush; 2249 2250 p = compat_ptr(cp); 2251 } else { 2252 if (get_user(p, pages + i)) 2253 goto out_flush; 2254 } 2255 if (get_user(node, nodes + i)) 2256 goto out_flush; 2257 2258 err = -ENODEV; 2259 if (node < 0 || node >= MAX_NUMNODES) 2260 goto out_flush; 2261 if (!node_state(node, N_MEMORY)) 2262 goto out_flush; 2263 2264 err = -EACCES; 2265 if (!node_isset(node, task_nodes)) 2266 goto out_flush; 2267 2268 if (current_node == NUMA_NO_NODE) { 2269 current_node = node; 2270 start = i; 2271 } else if (node != current_node) { 2272 err = move_pages_and_store_status(current_node, 2273 &pagelist, status, start, i, nr_pages); 2274 if (err) 2275 goto out; 2276 start = i; 2277 current_node = node; 2278 } 2279 2280 /* 2281 * Errors in the page lookup or isolation are not fatal and we simply 2282 * report them via status 2283 */ 2284 err = add_page_for_migration(mm, p, current_node, &pagelist, 2285 flags & MPOL_MF_MOVE_ALL); 2286 2287 if (err > 0) { 2288 /* The page is successfully queued for migration */ 2289 continue; 2290 } 2291 2292 /* 2293 * The move_pages() man page does not have an -EEXIST choice, so 2294 * use -EFAULT instead. 2295 */ 2296 if (err == -EEXIST) 2297 err = -EFAULT; 2298 2299 /* 2300 * If the page is already on the target node (!err), store the 2301 * node, otherwise, store the err. 2302 */ 2303 err = store_status(status, i, err ? : current_node, 1); 2304 if (err) 2305 goto out_flush; 2306 2307 err = move_pages_and_store_status(current_node, &pagelist, 2308 status, start, i, nr_pages); 2309 if (err) { 2310 /* We have accounted for page i */ 2311 if (err > 0) 2312 err--; 2313 goto out; 2314 } 2315 current_node = NUMA_NO_NODE; 2316 } 2317 out_flush: 2318 /* Make sure we do not overwrite the existing error */ 2319 err1 = move_pages_and_store_status(current_node, &pagelist, 2320 status, start, i, nr_pages); 2321 if (err >= 0) 2322 err = err1; 2323 out: 2324 lru_cache_enable(); 2325 return err; 2326 } 2327 2328 /* 2329 * Determine the nodes of an array of pages and store it in an array of status. 2330 */ 2331 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 2332 const void __user **pages, int *status) 2333 { 2334 unsigned long i; 2335 2336 mmap_read_lock(mm); 2337 2338 for (i = 0; i < nr_pages; i++) { 2339 unsigned long addr = (unsigned long)(*pages); 2340 struct vm_area_struct *vma; 2341 struct page *page; 2342 int err = -EFAULT; 2343 2344 vma = vma_lookup(mm, addr); 2345 if (!vma) 2346 goto set_status; 2347 2348 /* FOLL_DUMP to ignore special (like zero) pages */ 2349 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 2350 2351 err = PTR_ERR(page); 2352 if (IS_ERR(page)) 2353 goto set_status; 2354 2355 err = -ENOENT; 2356 if (!page) 2357 goto set_status; 2358 2359 if (!is_zone_device_page(page)) 2360 err = page_to_nid(page); 2361 2362 put_page(page); 2363 set_status: 2364 *status = err; 2365 2366 pages++; 2367 status++; 2368 } 2369 2370 mmap_read_unlock(mm); 2371 } 2372 2373 static int get_compat_pages_array(const void __user *chunk_pages[], 2374 const void __user * __user *pages, 2375 unsigned long chunk_nr) 2376 { 2377 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 2378 compat_uptr_t p; 2379 int i; 2380 2381 for (i = 0; i < chunk_nr; i++) { 2382 if (get_user(p, pages32 + i)) 2383 return -EFAULT; 2384 chunk_pages[i] = compat_ptr(p); 2385 } 2386 2387 return 0; 2388 } 2389 2390 /* 2391 * Determine the nodes of a user array of pages and store it in 2392 * a user array of status. 2393 */ 2394 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 2395 const void __user * __user *pages, 2396 int __user *status) 2397 { 2398 #define DO_PAGES_STAT_CHUNK_NR 16UL 2399 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 2400 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 2401 2402 while (nr_pages) { 2403 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 2404 2405 if (in_compat_syscall()) { 2406 if (get_compat_pages_array(chunk_pages, pages, 2407 chunk_nr)) 2408 break; 2409 } else { 2410 if (copy_from_user(chunk_pages, pages, 2411 chunk_nr * sizeof(*chunk_pages))) 2412 break; 2413 } 2414 2415 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 2416 2417 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 2418 break; 2419 2420 pages += chunk_nr; 2421 status += chunk_nr; 2422 nr_pages -= chunk_nr; 2423 } 2424 return nr_pages ? -EFAULT : 0; 2425 } 2426 2427 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 2428 { 2429 struct task_struct *task; 2430 struct mm_struct *mm; 2431 2432 /* 2433 * There is no need to check if current process has the right to modify 2434 * the specified process when they are same. 2435 */ 2436 if (!pid) { 2437 mmget(current->mm); 2438 *mem_nodes = cpuset_mems_allowed(current); 2439 return current->mm; 2440 } 2441 2442 /* Find the mm_struct */ 2443 rcu_read_lock(); 2444 task = find_task_by_vpid(pid); 2445 if (!task) { 2446 rcu_read_unlock(); 2447 return ERR_PTR(-ESRCH); 2448 } 2449 get_task_struct(task); 2450 2451 /* 2452 * Check if this process has the right to modify the specified 2453 * process. Use the regular "ptrace_may_access()" checks. 2454 */ 2455 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 2456 rcu_read_unlock(); 2457 mm = ERR_PTR(-EPERM); 2458 goto out; 2459 } 2460 rcu_read_unlock(); 2461 2462 mm = ERR_PTR(security_task_movememory(task)); 2463 if (IS_ERR(mm)) 2464 goto out; 2465 *mem_nodes = cpuset_mems_allowed(task); 2466 mm = get_task_mm(task); 2467 out: 2468 put_task_struct(task); 2469 if (!mm) 2470 mm = ERR_PTR(-EINVAL); 2471 return mm; 2472 } 2473 2474 /* 2475 * Move a list of pages in the address space of the currently executing 2476 * process. 2477 */ 2478 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 2479 const void __user * __user *pages, 2480 const int __user *nodes, 2481 int __user *status, int flags) 2482 { 2483 struct mm_struct *mm; 2484 int err; 2485 nodemask_t task_nodes; 2486 2487 /* Check flags */ 2488 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2489 return -EINVAL; 2490 2491 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2492 return -EPERM; 2493 2494 mm = find_mm_struct(pid, &task_nodes); 2495 if (IS_ERR(mm)) 2496 return PTR_ERR(mm); 2497 2498 if (nodes) 2499 err = do_pages_move(mm, task_nodes, nr_pages, pages, 2500 nodes, status, flags); 2501 else 2502 err = do_pages_stat(mm, nr_pages, pages, status); 2503 2504 mmput(mm); 2505 return err; 2506 } 2507 2508 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2509 const void __user * __user *, pages, 2510 const int __user *, nodes, 2511 int __user *, status, int, flags) 2512 { 2513 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2514 } 2515 2516 #ifdef CONFIG_NUMA_BALANCING 2517 /* 2518 * Returns true if this is a safe migration target node for misplaced NUMA 2519 * pages. Currently it only checks the watermarks which is crude. 2520 */ 2521 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2522 unsigned long nr_migrate_pages) 2523 { 2524 int z; 2525 2526 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2527 struct zone *zone = pgdat->node_zones + z; 2528 2529 if (!managed_zone(zone)) 2530 continue; 2531 2532 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2533 if (!zone_watermark_ok(zone, 0, 2534 high_wmark_pages(zone) + 2535 nr_migrate_pages, 2536 ZONE_MOVABLE, 0)) 2537 continue; 2538 return true; 2539 } 2540 return false; 2541 } 2542 2543 static struct folio *alloc_misplaced_dst_folio(struct folio *src, 2544 unsigned long data) 2545 { 2546 int nid = (int) data; 2547 int order = folio_order(src); 2548 gfp_t gfp = __GFP_THISNODE; 2549 2550 if (order > 0) 2551 gfp |= GFP_TRANSHUGE_LIGHT; 2552 else { 2553 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2554 __GFP_NOWARN; 2555 gfp &= ~__GFP_RECLAIM; 2556 } 2557 return __folio_alloc_node(gfp, order, nid); 2558 } 2559 2560 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) 2561 { 2562 int nr_pages = folio_nr_pages(folio); 2563 2564 /* Avoid migrating to a node that is nearly full */ 2565 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2566 int z; 2567 2568 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2569 return 0; 2570 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2571 if (managed_zone(pgdat->node_zones + z)) 2572 break; 2573 } 2574 2575 /* 2576 * If there are no managed zones, it should not proceed 2577 * further. 2578 */ 2579 if (z < 0) 2580 return 0; 2581 2582 wakeup_kswapd(pgdat->node_zones + z, 0, 2583 folio_order(folio), ZONE_MOVABLE); 2584 return 0; 2585 } 2586 2587 if (!folio_isolate_lru(folio)) 2588 return 0; 2589 2590 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), 2591 nr_pages); 2592 2593 /* 2594 * Isolating the folio has taken another reference, so the 2595 * caller's reference can be safely dropped without the folio 2596 * disappearing underneath us during migration. 2597 */ 2598 folio_put(folio); 2599 return 1; 2600 } 2601 2602 /* 2603 * Attempt to migrate a misplaced folio to the specified destination 2604 * node. Caller is expected to have an elevated reference count on 2605 * the folio that will be dropped by this function before returning. 2606 */ 2607 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 2608 int node) 2609 { 2610 pg_data_t *pgdat = NODE_DATA(node); 2611 int isolated; 2612 int nr_remaining; 2613 unsigned int nr_succeeded; 2614 LIST_HEAD(migratepages); 2615 int nr_pages = folio_nr_pages(folio); 2616 2617 /* 2618 * Don't migrate file folios that are mapped in multiple processes 2619 * with execute permissions as they are probably shared libraries. 2620 * 2621 * See folio_likely_mapped_shared() on possible imprecision when we 2622 * cannot easily detect if a folio is shared. 2623 */ 2624 if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) && 2625 (vma->vm_flags & VM_EXEC)) 2626 goto out; 2627 2628 /* 2629 * Also do not migrate dirty folios as not all filesystems can move 2630 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles. 2631 */ 2632 if (folio_is_file_lru(folio) && folio_test_dirty(folio)) 2633 goto out; 2634 2635 isolated = numamigrate_isolate_folio(pgdat, folio); 2636 if (!isolated) 2637 goto out; 2638 2639 list_add(&folio->lru, &migratepages); 2640 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, 2641 NULL, node, MIGRATE_ASYNC, 2642 MR_NUMA_MISPLACED, &nr_succeeded); 2643 if (nr_remaining) { 2644 if (!list_empty(&migratepages)) { 2645 list_del(&folio->lru); 2646 node_stat_mod_folio(folio, NR_ISOLATED_ANON + 2647 folio_is_file_lru(folio), -nr_pages); 2648 folio_putback_lru(folio); 2649 } 2650 isolated = 0; 2651 } 2652 if (nr_succeeded) { 2653 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2654 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node)) 2655 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2656 nr_succeeded); 2657 } 2658 BUG_ON(!list_empty(&migratepages)); 2659 return isolated; 2660 2661 out: 2662 folio_put(folio); 2663 return 0; 2664 } 2665 #endif /* CONFIG_NUMA_BALANCING */ 2666 #endif /* CONFIG_NUMA */ 2667