1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: 9 * 10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11 * Hirokazu Takahashi <taka@valinux.co.jp> 12 * Dave Hansen <haveblue@us.ibm.com> 13 * Christoph Lameter 14 */ 15 16 #include <linux/migrate.h> 17 #include <linux/export.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <linux/pagemap.h> 21 #include <linux/buffer_head.h> 22 #include <linux/mm_inline.h> 23 #include <linux/nsproxy.h> 24 #include <linux/pagevec.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/topology.h> 28 #include <linux/cpu.h> 29 #include <linux/cpuset.h> 30 #include <linux/writeback.h> 31 #include <linux/mempolicy.h> 32 #include <linux/vmalloc.h> 33 #include <linux/security.h> 34 #include <linux/backing-dev.h> 35 #include <linux/compaction.h> 36 #include <linux/syscalls.h> 37 #include <linux/compat.h> 38 #include <linux/hugetlb.h> 39 #include <linux/hugetlb_cgroup.h> 40 #include <linux/gfp.h> 41 #include <linux/pfn_t.h> 42 #include <linux/memremap.h> 43 #include <linux/userfaultfd_k.h> 44 #include <linux/balloon_compaction.h> 45 #include <linux/page_idle.h> 46 #include <linux/page_owner.h> 47 #include <linux/sched/mm.h> 48 #include <linux/ptrace.h> 49 #include <linux/oom.h> 50 #include <linux/memory.h> 51 #include <linux/random.h> 52 #include <linux/sched/sysctl.h> 53 54 #include <asm/tlbflush.h> 55 56 #include <trace/events/migrate.h> 57 58 #include "internal.h" 59 60 int isolate_movable_page(struct page *page, isolate_mode_t mode) 61 { 62 const struct movable_operations *mops; 63 64 /* 65 * Avoid burning cycles with pages that are yet under __free_pages(), 66 * or just got freed under us. 67 * 68 * In case we 'win' a race for a movable page being freed under us and 69 * raise its refcount preventing __free_pages() from doing its job 70 * the put_page() at the end of this block will take care of 71 * release this page, thus avoiding a nasty leakage. 72 */ 73 if (unlikely(!get_page_unless_zero(page))) 74 goto out; 75 76 /* 77 * Check PageMovable before holding a PG_lock because page's owner 78 * assumes anybody doesn't touch PG_lock of newly allocated page 79 * so unconditionally grabbing the lock ruins page's owner side. 80 */ 81 if (unlikely(!__PageMovable(page))) 82 goto out_putpage; 83 /* 84 * As movable pages are not isolated from LRU lists, concurrent 85 * compaction threads can race against page migration functions 86 * as well as race against the releasing a page. 87 * 88 * In order to avoid having an already isolated movable page 89 * being (wrongly) re-isolated while it is under migration, 90 * or to avoid attempting to isolate pages being released, 91 * lets be sure we have the page lock 92 * before proceeding with the movable page isolation steps. 93 */ 94 if (unlikely(!trylock_page(page))) 95 goto out_putpage; 96 97 if (!PageMovable(page) || PageIsolated(page)) 98 goto out_no_isolated; 99 100 mops = page_movable_ops(page); 101 VM_BUG_ON_PAGE(!mops, page); 102 103 if (!mops->isolate_page(page, mode)) 104 goto out_no_isolated; 105 106 /* Driver shouldn't use PG_isolated bit of page->flags */ 107 WARN_ON_ONCE(PageIsolated(page)); 108 SetPageIsolated(page); 109 unlock_page(page); 110 111 return 0; 112 113 out_no_isolated: 114 unlock_page(page); 115 out_putpage: 116 put_page(page); 117 out: 118 return -EBUSY; 119 } 120 121 static void putback_movable_page(struct page *page) 122 { 123 const struct movable_operations *mops = page_movable_ops(page); 124 125 mops->putback_page(page); 126 ClearPageIsolated(page); 127 } 128 129 /* 130 * Put previously isolated pages back onto the appropriate lists 131 * from where they were once taken off for compaction/migration. 132 * 133 * This function shall be used whenever the isolated pageset has been 134 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 135 * and isolate_huge_page(). 136 */ 137 void putback_movable_pages(struct list_head *l) 138 { 139 struct page *page; 140 struct page *page2; 141 142 list_for_each_entry_safe(page, page2, l, lru) { 143 if (unlikely(PageHuge(page))) { 144 putback_active_hugepage(page); 145 continue; 146 } 147 list_del(&page->lru); 148 /* 149 * We isolated non-lru movable page so here we can use 150 * __PageMovable because LRU page's mapping cannot have 151 * PAGE_MAPPING_MOVABLE. 152 */ 153 if (unlikely(__PageMovable(page))) { 154 VM_BUG_ON_PAGE(!PageIsolated(page), page); 155 lock_page(page); 156 if (PageMovable(page)) 157 putback_movable_page(page); 158 else 159 ClearPageIsolated(page); 160 unlock_page(page); 161 put_page(page); 162 } else { 163 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 164 page_is_file_lru(page), -thp_nr_pages(page)); 165 putback_lru_page(page); 166 } 167 } 168 } 169 170 /* 171 * Restore a potential migration pte to a working pte entry 172 */ 173 static bool remove_migration_pte(struct folio *folio, 174 struct vm_area_struct *vma, unsigned long addr, void *old) 175 { 176 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 177 178 while (page_vma_mapped_walk(&pvmw)) { 179 rmap_t rmap_flags = RMAP_NONE; 180 pte_t pte; 181 swp_entry_t entry; 182 struct page *new; 183 unsigned long idx = 0; 184 185 /* pgoff is invalid for ksm pages, but they are never large */ 186 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 187 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 188 new = folio_page(folio, idx); 189 190 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 191 /* PMD-mapped THP migration entry */ 192 if (!pvmw.pte) { 193 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 194 !folio_test_pmd_mappable(folio), folio); 195 remove_migration_pmd(&pvmw, new); 196 continue; 197 } 198 #endif 199 200 folio_get(folio); 201 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 202 if (pte_swp_soft_dirty(*pvmw.pte)) 203 pte = pte_mksoft_dirty(pte); 204 205 /* 206 * Recheck VMA as permissions can change since migration started 207 */ 208 entry = pte_to_swp_entry(*pvmw.pte); 209 if (is_writable_migration_entry(entry)) 210 pte = maybe_mkwrite(pte, vma); 211 else if (pte_swp_uffd_wp(*pvmw.pte)) 212 pte = pte_mkuffd_wp(pte); 213 214 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 215 rmap_flags |= RMAP_EXCLUSIVE; 216 217 if (unlikely(is_device_private_page(new))) { 218 if (pte_write(pte)) 219 entry = make_writable_device_private_entry( 220 page_to_pfn(new)); 221 else 222 entry = make_readable_device_private_entry( 223 page_to_pfn(new)); 224 pte = swp_entry_to_pte(entry); 225 if (pte_swp_soft_dirty(*pvmw.pte)) 226 pte = pte_swp_mksoft_dirty(pte); 227 if (pte_swp_uffd_wp(*pvmw.pte)) 228 pte = pte_swp_mkuffd_wp(pte); 229 } 230 231 #ifdef CONFIG_HUGETLB_PAGE 232 if (folio_test_hugetlb(folio)) { 233 unsigned int shift = huge_page_shift(hstate_vma(vma)); 234 235 pte = pte_mkhuge(pte); 236 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 237 if (folio_test_anon(folio)) 238 hugepage_add_anon_rmap(new, vma, pvmw.address, 239 rmap_flags); 240 else 241 page_dup_file_rmap(new, true); 242 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 243 } else 244 #endif 245 { 246 if (folio_test_anon(folio)) 247 page_add_anon_rmap(new, vma, pvmw.address, 248 rmap_flags); 249 else 250 page_add_file_rmap(new, vma, false); 251 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 252 } 253 if (vma->vm_flags & VM_LOCKED) 254 mlock_page_drain_local(); 255 256 trace_remove_migration_pte(pvmw.address, pte_val(pte), 257 compound_order(new)); 258 259 /* No need to invalidate - it was non-present before */ 260 update_mmu_cache(vma, pvmw.address, pvmw.pte); 261 } 262 263 return true; 264 } 265 266 /* 267 * Get rid of all migration entries and replace them by 268 * references to the indicated page. 269 */ 270 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 271 { 272 struct rmap_walk_control rwc = { 273 .rmap_one = remove_migration_pte, 274 .arg = src, 275 }; 276 277 if (locked) 278 rmap_walk_locked(dst, &rwc); 279 else 280 rmap_walk(dst, &rwc); 281 } 282 283 /* 284 * Something used the pte of a page under migration. We need to 285 * get to the page and wait until migration is finished. 286 * When we return from this function the fault will be retried. 287 */ 288 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 289 spinlock_t *ptl) 290 { 291 pte_t pte; 292 swp_entry_t entry; 293 294 spin_lock(ptl); 295 pte = *ptep; 296 if (!is_swap_pte(pte)) 297 goto out; 298 299 entry = pte_to_swp_entry(pte); 300 if (!is_migration_entry(entry)) 301 goto out; 302 303 migration_entry_wait_on_locked(entry, ptep, ptl); 304 return; 305 out: 306 pte_unmap_unlock(ptep, ptl); 307 } 308 309 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 310 unsigned long address) 311 { 312 spinlock_t *ptl = pte_lockptr(mm, pmd); 313 pte_t *ptep = pte_offset_map(pmd, address); 314 __migration_entry_wait(mm, ptep, ptl); 315 } 316 317 void migration_entry_wait_huge(struct vm_area_struct *vma, 318 struct mm_struct *mm, pte_t *pte) 319 { 320 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 321 __migration_entry_wait(mm, pte, ptl); 322 } 323 324 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 325 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 326 { 327 spinlock_t *ptl; 328 329 ptl = pmd_lock(mm, pmd); 330 if (!is_pmd_migration_entry(*pmd)) 331 goto unlock; 332 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 333 return; 334 unlock: 335 spin_unlock(ptl); 336 } 337 #endif 338 339 static int folio_expected_refs(struct address_space *mapping, 340 struct folio *folio) 341 { 342 int refs = 1; 343 if (!mapping) 344 return refs; 345 346 refs += folio_nr_pages(folio); 347 if (folio_test_private(folio)) 348 refs++; 349 350 return refs; 351 } 352 353 /* 354 * Replace the page in the mapping. 355 * 356 * The number of remaining references must be: 357 * 1 for anonymous pages without a mapping 358 * 2 for pages with a mapping 359 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 360 */ 361 int folio_migrate_mapping(struct address_space *mapping, 362 struct folio *newfolio, struct folio *folio, int extra_count) 363 { 364 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 365 struct zone *oldzone, *newzone; 366 int dirty; 367 int expected_count = folio_expected_refs(mapping, folio) + extra_count; 368 long nr = folio_nr_pages(folio); 369 370 if (!mapping) { 371 /* Anonymous page without mapping */ 372 if (folio_ref_count(folio) != expected_count) 373 return -EAGAIN; 374 375 /* No turning back from here */ 376 newfolio->index = folio->index; 377 newfolio->mapping = folio->mapping; 378 if (folio_test_swapbacked(folio)) 379 __folio_set_swapbacked(newfolio); 380 381 return MIGRATEPAGE_SUCCESS; 382 } 383 384 oldzone = folio_zone(folio); 385 newzone = folio_zone(newfolio); 386 387 xas_lock_irq(&xas); 388 if (!folio_ref_freeze(folio, expected_count)) { 389 xas_unlock_irq(&xas); 390 return -EAGAIN; 391 } 392 393 /* 394 * Now we know that no one else is looking at the folio: 395 * no turning back from here. 396 */ 397 newfolio->index = folio->index; 398 newfolio->mapping = folio->mapping; 399 folio_ref_add(newfolio, nr); /* add cache reference */ 400 if (folio_test_swapbacked(folio)) { 401 __folio_set_swapbacked(newfolio); 402 if (folio_test_swapcache(folio)) { 403 folio_set_swapcache(newfolio); 404 newfolio->private = folio_get_private(folio); 405 } 406 } else { 407 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 408 } 409 410 /* Move dirty while page refs frozen and newpage not yet exposed */ 411 dirty = folio_test_dirty(folio); 412 if (dirty) { 413 folio_clear_dirty(folio); 414 folio_set_dirty(newfolio); 415 } 416 417 xas_store(&xas, newfolio); 418 419 /* 420 * Drop cache reference from old page by unfreezing 421 * to one less reference. 422 * We know this isn't the last reference. 423 */ 424 folio_ref_unfreeze(folio, expected_count - nr); 425 426 xas_unlock(&xas); 427 /* Leave irq disabled to prevent preemption while updating stats */ 428 429 /* 430 * If moved to a different zone then also account 431 * the page for that zone. Other VM counters will be 432 * taken care of when we establish references to the 433 * new page and drop references to the old page. 434 * 435 * Note that anonymous pages are accounted for 436 * via NR_FILE_PAGES and NR_ANON_MAPPED if they 437 * are mapped to swap space. 438 */ 439 if (newzone != oldzone) { 440 struct lruvec *old_lruvec, *new_lruvec; 441 struct mem_cgroup *memcg; 442 443 memcg = folio_memcg(folio); 444 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 445 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 446 447 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 448 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 449 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 450 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 451 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 452 } 453 #ifdef CONFIG_SWAP 454 if (folio_test_swapcache(folio)) { 455 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 456 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 457 } 458 #endif 459 if (dirty && mapping_can_writeback(mapping)) { 460 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 461 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 462 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 463 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 464 } 465 } 466 local_irq_enable(); 467 468 return MIGRATEPAGE_SUCCESS; 469 } 470 EXPORT_SYMBOL(folio_migrate_mapping); 471 472 /* 473 * The expected number of remaining references is the same as that 474 * of folio_migrate_mapping(). 475 */ 476 int migrate_huge_page_move_mapping(struct address_space *mapping, 477 struct folio *dst, struct folio *src) 478 { 479 XA_STATE(xas, &mapping->i_pages, folio_index(src)); 480 int expected_count; 481 482 xas_lock_irq(&xas); 483 expected_count = 2 + folio_has_private(src); 484 if (!folio_ref_freeze(src, expected_count)) { 485 xas_unlock_irq(&xas); 486 return -EAGAIN; 487 } 488 489 dst->index = src->index; 490 dst->mapping = src->mapping; 491 492 folio_get(dst); 493 494 xas_store(&xas, dst); 495 496 folio_ref_unfreeze(src, expected_count - 1); 497 498 xas_unlock_irq(&xas); 499 500 return MIGRATEPAGE_SUCCESS; 501 } 502 503 /* 504 * Copy the flags and some other ancillary information 505 */ 506 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 507 { 508 int cpupid; 509 510 if (folio_test_error(folio)) 511 folio_set_error(newfolio); 512 if (folio_test_referenced(folio)) 513 folio_set_referenced(newfolio); 514 if (folio_test_uptodate(folio)) 515 folio_mark_uptodate(newfolio); 516 if (folio_test_clear_active(folio)) { 517 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 518 folio_set_active(newfolio); 519 } else if (folio_test_clear_unevictable(folio)) 520 folio_set_unevictable(newfolio); 521 if (folio_test_workingset(folio)) 522 folio_set_workingset(newfolio); 523 if (folio_test_checked(folio)) 524 folio_set_checked(newfolio); 525 /* 526 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 527 * migration entries. We can still have PG_anon_exclusive set on an 528 * effectively unmapped and unreferenced first sub-pages of an 529 * anonymous THP: we can simply copy it here via PG_mappedtodisk. 530 */ 531 if (folio_test_mappedtodisk(folio)) 532 folio_set_mappedtodisk(newfolio); 533 534 /* Move dirty on pages not done by folio_migrate_mapping() */ 535 if (folio_test_dirty(folio)) 536 folio_set_dirty(newfolio); 537 538 if (folio_test_young(folio)) 539 folio_set_young(newfolio); 540 if (folio_test_idle(folio)) 541 folio_set_idle(newfolio); 542 543 /* 544 * Copy NUMA information to the new page, to prevent over-eager 545 * future migrations of this same page. 546 */ 547 cpupid = page_cpupid_xchg_last(&folio->page, -1); 548 page_cpupid_xchg_last(&newfolio->page, cpupid); 549 550 folio_migrate_ksm(newfolio, folio); 551 /* 552 * Please do not reorder this without considering how mm/ksm.c's 553 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 554 */ 555 if (folio_test_swapcache(folio)) 556 folio_clear_swapcache(folio); 557 folio_clear_private(folio); 558 559 /* page->private contains hugetlb specific flags */ 560 if (!folio_test_hugetlb(folio)) 561 folio->private = NULL; 562 563 /* 564 * If any waiters have accumulated on the new page then 565 * wake them up. 566 */ 567 if (folio_test_writeback(newfolio)) 568 folio_end_writeback(newfolio); 569 570 /* 571 * PG_readahead shares the same bit with PG_reclaim. The above 572 * end_page_writeback() may clear PG_readahead mistakenly, so set the 573 * bit after that. 574 */ 575 if (folio_test_readahead(folio)) 576 folio_set_readahead(newfolio); 577 578 folio_copy_owner(newfolio, folio); 579 580 if (!folio_test_hugetlb(folio)) 581 mem_cgroup_migrate(folio, newfolio); 582 } 583 EXPORT_SYMBOL(folio_migrate_flags); 584 585 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 586 { 587 folio_copy(newfolio, folio); 588 folio_migrate_flags(newfolio, folio); 589 } 590 EXPORT_SYMBOL(folio_migrate_copy); 591 592 /************************************************************ 593 * Migration functions 594 ***********************************************************/ 595 596 /** 597 * migrate_folio() - Simple folio migration. 598 * @mapping: The address_space containing the folio. 599 * @dst: The folio to migrate the data to. 600 * @src: The folio containing the current data. 601 * @mode: How to migrate the page. 602 * 603 * Common logic to directly migrate a single LRU folio suitable for 604 * folios that do not use PagePrivate/PagePrivate2. 605 * 606 * Folios are locked upon entry and exit. 607 */ 608 int migrate_folio(struct address_space *mapping, struct folio *dst, 609 struct folio *src, enum migrate_mode mode) 610 { 611 int rc; 612 613 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 614 615 rc = folio_migrate_mapping(mapping, dst, src, 0); 616 617 if (rc != MIGRATEPAGE_SUCCESS) 618 return rc; 619 620 if (mode != MIGRATE_SYNC_NO_COPY) 621 folio_migrate_copy(dst, src); 622 else 623 folio_migrate_flags(dst, src); 624 return MIGRATEPAGE_SUCCESS; 625 } 626 EXPORT_SYMBOL(migrate_folio); 627 628 #ifdef CONFIG_BLOCK 629 /* Returns true if all buffers are successfully locked */ 630 static bool buffer_migrate_lock_buffers(struct buffer_head *head, 631 enum migrate_mode mode) 632 { 633 struct buffer_head *bh = head; 634 635 /* Simple case, sync compaction */ 636 if (mode != MIGRATE_ASYNC) { 637 do { 638 lock_buffer(bh); 639 bh = bh->b_this_page; 640 641 } while (bh != head); 642 643 return true; 644 } 645 646 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 647 do { 648 if (!trylock_buffer(bh)) { 649 /* 650 * We failed to lock the buffer and cannot stall in 651 * async migration. Release the taken locks 652 */ 653 struct buffer_head *failed_bh = bh; 654 bh = head; 655 while (bh != failed_bh) { 656 unlock_buffer(bh); 657 bh = bh->b_this_page; 658 } 659 return false; 660 } 661 662 bh = bh->b_this_page; 663 } while (bh != head); 664 return true; 665 } 666 667 static int __buffer_migrate_folio(struct address_space *mapping, 668 struct folio *dst, struct folio *src, enum migrate_mode mode, 669 bool check_refs) 670 { 671 struct buffer_head *bh, *head; 672 int rc; 673 int expected_count; 674 675 head = folio_buffers(src); 676 if (!head) 677 return migrate_folio(mapping, dst, src, mode); 678 679 /* Check whether page does not have extra refs before we do more work */ 680 expected_count = folio_expected_refs(mapping, src); 681 if (folio_ref_count(src) != expected_count) 682 return -EAGAIN; 683 684 if (!buffer_migrate_lock_buffers(head, mode)) 685 return -EAGAIN; 686 687 if (check_refs) { 688 bool busy; 689 bool invalidated = false; 690 691 recheck_buffers: 692 busy = false; 693 spin_lock(&mapping->private_lock); 694 bh = head; 695 do { 696 if (atomic_read(&bh->b_count)) { 697 busy = true; 698 break; 699 } 700 bh = bh->b_this_page; 701 } while (bh != head); 702 if (busy) { 703 if (invalidated) { 704 rc = -EAGAIN; 705 goto unlock_buffers; 706 } 707 spin_unlock(&mapping->private_lock); 708 invalidate_bh_lrus(); 709 invalidated = true; 710 goto recheck_buffers; 711 } 712 } 713 714 rc = folio_migrate_mapping(mapping, dst, src, 0); 715 if (rc != MIGRATEPAGE_SUCCESS) 716 goto unlock_buffers; 717 718 folio_attach_private(dst, folio_detach_private(src)); 719 720 bh = head; 721 do { 722 set_bh_page(bh, &dst->page, bh_offset(bh)); 723 bh = bh->b_this_page; 724 } while (bh != head); 725 726 if (mode != MIGRATE_SYNC_NO_COPY) 727 folio_migrate_copy(dst, src); 728 else 729 folio_migrate_flags(dst, src); 730 731 rc = MIGRATEPAGE_SUCCESS; 732 unlock_buffers: 733 if (check_refs) 734 spin_unlock(&mapping->private_lock); 735 bh = head; 736 do { 737 unlock_buffer(bh); 738 bh = bh->b_this_page; 739 } while (bh != head); 740 741 return rc; 742 } 743 744 /** 745 * buffer_migrate_folio() - Migration function for folios with buffers. 746 * @mapping: The address space containing @src. 747 * @dst: The folio to migrate to. 748 * @src: The folio to migrate from. 749 * @mode: How to migrate the folio. 750 * 751 * This function can only be used if the underlying filesystem guarantees 752 * that no other references to @src exist. For example attached buffer 753 * heads are accessed only under the folio lock. If your filesystem cannot 754 * provide this guarantee, buffer_migrate_folio_norefs() may be more 755 * appropriate. 756 * 757 * Return: 0 on success or a negative errno on failure. 758 */ 759 int buffer_migrate_folio(struct address_space *mapping, 760 struct folio *dst, struct folio *src, enum migrate_mode mode) 761 { 762 return __buffer_migrate_folio(mapping, dst, src, mode, false); 763 } 764 EXPORT_SYMBOL(buffer_migrate_folio); 765 766 /** 767 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 768 * @mapping: The address space containing @src. 769 * @dst: The folio to migrate to. 770 * @src: The folio to migrate from. 771 * @mode: How to migrate the folio. 772 * 773 * Like buffer_migrate_folio() except that this variant is more careful 774 * and checks that there are also no buffer head references. This function 775 * is the right one for mappings where buffer heads are directly looked 776 * up and referenced (such as block device mappings). 777 * 778 * Return: 0 on success or a negative errno on failure. 779 */ 780 int buffer_migrate_folio_norefs(struct address_space *mapping, 781 struct folio *dst, struct folio *src, enum migrate_mode mode) 782 { 783 return __buffer_migrate_folio(mapping, dst, src, mode, true); 784 } 785 #endif 786 787 int filemap_migrate_folio(struct address_space *mapping, 788 struct folio *dst, struct folio *src, enum migrate_mode mode) 789 { 790 int ret; 791 792 ret = folio_migrate_mapping(mapping, dst, src, 0); 793 if (ret != MIGRATEPAGE_SUCCESS) 794 return ret; 795 796 if (folio_get_private(src)) 797 folio_attach_private(dst, folio_detach_private(src)); 798 799 if (mode != MIGRATE_SYNC_NO_COPY) 800 folio_migrate_copy(dst, src); 801 else 802 folio_migrate_flags(dst, src); 803 return MIGRATEPAGE_SUCCESS; 804 } 805 EXPORT_SYMBOL_GPL(filemap_migrate_folio); 806 807 /* 808 * Writeback a folio to clean the dirty state 809 */ 810 static int writeout(struct address_space *mapping, struct folio *folio) 811 { 812 struct writeback_control wbc = { 813 .sync_mode = WB_SYNC_NONE, 814 .nr_to_write = 1, 815 .range_start = 0, 816 .range_end = LLONG_MAX, 817 .for_reclaim = 1 818 }; 819 int rc; 820 821 if (!mapping->a_ops->writepage) 822 /* No write method for the address space */ 823 return -EINVAL; 824 825 if (!folio_clear_dirty_for_io(folio)) 826 /* Someone else already triggered a write */ 827 return -EAGAIN; 828 829 /* 830 * A dirty folio may imply that the underlying filesystem has 831 * the folio on some queue. So the folio must be clean for 832 * migration. Writeout may mean we lose the lock and the 833 * folio state is no longer what we checked for earlier. 834 * At this point we know that the migration attempt cannot 835 * be successful. 836 */ 837 remove_migration_ptes(folio, folio, false); 838 839 rc = mapping->a_ops->writepage(&folio->page, &wbc); 840 841 if (rc != AOP_WRITEPAGE_ACTIVATE) 842 /* unlocked. Relock */ 843 folio_lock(folio); 844 845 return (rc < 0) ? -EIO : -EAGAIN; 846 } 847 848 /* 849 * Default handling if a filesystem does not provide a migration function. 850 */ 851 static int fallback_migrate_folio(struct address_space *mapping, 852 struct folio *dst, struct folio *src, enum migrate_mode mode) 853 { 854 if (folio_test_dirty(src)) { 855 /* Only writeback folios in full synchronous migration */ 856 switch (mode) { 857 case MIGRATE_SYNC: 858 case MIGRATE_SYNC_NO_COPY: 859 break; 860 default: 861 return -EBUSY; 862 } 863 return writeout(mapping, src); 864 } 865 866 /* 867 * Buffers may be managed in a filesystem specific way. 868 * We must have no buffers or drop them. 869 */ 870 if (folio_test_private(src) && 871 !filemap_release_folio(src, GFP_KERNEL)) 872 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 873 874 return migrate_folio(mapping, dst, src, mode); 875 } 876 877 /* 878 * Move a page to a newly allocated page 879 * The page is locked and all ptes have been successfully removed. 880 * 881 * The new page will have replaced the old page if this function 882 * is successful. 883 * 884 * Return value: 885 * < 0 - error code 886 * MIGRATEPAGE_SUCCESS - success 887 */ 888 static int move_to_new_folio(struct folio *dst, struct folio *src, 889 enum migrate_mode mode) 890 { 891 int rc = -EAGAIN; 892 bool is_lru = !__PageMovable(&src->page); 893 894 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 895 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 896 897 if (likely(is_lru)) { 898 struct address_space *mapping = folio_mapping(src); 899 900 if (!mapping) 901 rc = migrate_folio(mapping, dst, src, mode); 902 else if (mapping->a_ops->migrate_folio) 903 /* 904 * Most folios have a mapping and most filesystems 905 * provide a migrate_folio callback. Anonymous folios 906 * are part of swap space which also has its own 907 * migrate_folio callback. This is the most common path 908 * for page migration. 909 */ 910 rc = mapping->a_ops->migrate_folio(mapping, dst, src, 911 mode); 912 else 913 rc = fallback_migrate_folio(mapping, dst, src, mode); 914 } else { 915 const struct movable_operations *mops; 916 917 /* 918 * In case of non-lru page, it could be released after 919 * isolation step. In that case, we shouldn't try migration. 920 */ 921 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 922 if (!folio_test_movable(src)) { 923 rc = MIGRATEPAGE_SUCCESS; 924 folio_clear_isolated(src); 925 goto out; 926 } 927 928 mops = page_movable_ops(&src->page); 929 rc = mops->migrate_page(&dst->page, &src->page, mode); 930 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 931 !folio_test_isolated(src)); 932 } 933 934 /* 935 * When successful, old pagecache src->mapping must be cleared before 936 * src is freed; but stats require that PageAnon be left as PageAnon. 937 */ 938 if (rc == MIGRATEPAGE_SUCCESS) { 939 if (__PageMovable(&src->page)) { 940 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 941 942 /* 943 * We clear PG_movable under page_lock so any compactor 944 * cannot try to migrate this page. 945 */ 946 folio_clear_isolated(src); 947 } 948 949 /* 950 * Anonymous and movable src->mapping will be cleared by 951 * free_pages_prepare so don't reset it here for keeping 952 * the type to work PageAnon, for example. 953 */ 954 if (!folio_mapping_flags(src)) 955 src->mapping = NULL; 956 957 if (likely(!folio_is_zone_device(dst))) 958 flush_dcache_folio(dst); 959 } 960 out: 961 return rc; 962 } 963 964 static int __unmap_and_move(struct page *page, struct page *newpage, 965 int force, enum migrate_mode mode) 966 { 967 struct folio *folio = page_folio(page); 968 struct folio *dst = page_folio(newpage); 969 int rc = -EAGAIN; 970 bool page_was_mapped = false; 971 struct anon_vma *anon_vma = NULL; 972 bool is_lru = !__PageMovable(page); 973 974 if (!trylock_page(page)) { 975 if (!force || mode == MIGRATE_ASYNC) 976 goto out; 977 978 /* 979 * It's not safe for direct compaction to call lock_page. 980 * For example, during page readahead pages are added locked 981 * to the LRU. Later, when the IO completes the pages are 982 * marked uptodate and unlocked. However, the queueing 983 * could be merging multiple pages for one bio (e.g. 984 * mpage_readahead). If an allocation happens for the 985 * second or third page, the process can end up locking 986 * the same page twice and deadlocking. Rather than 987 * trying to be clever about what pages can be locked, 988 * avoid the use of lock_page for direct compaction 989 * altogether. 990 */ 991 if (current->flags & PF_MEMALLOC) 992 goto out; 993 994 lock_page(page); 995 } 996 997 if (PageWriteback(page)) { 998 /* 999 * Only in the case of a full synchronous migration is it 1000 * necessary to wait for PageWriteback. In the async case, 1001 * the retry loop is too short and in the sync-light case, 1002 * the overhead of stalling is too much 1003 */ 1004 switch (mode) { 1005 case MIGRATE_SYNC: 1006 case MIGRATE_SYNC_NO_COPY: 1007 break; 1008 default: 1009 rc = -EBUSY; 1010 goto out_unlock; 1011 } 1012 if (!force) 1013 goto out_unlock; 1014 wait_on_page_writeback(page); 1015 } 1016 1017 /* 1018 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 1019 * we cannot notice that anon_vma is freed while we migrates a page. 1020 * This get_anon_vma() delays freeing anon_vma pointer until the end 1021 * of migration. File cache pages are no problem because of page_lock() 1022 * File Caches may use write_page() or lock_page() in migration, then, 1023 * just care Anon page here. 1024 * 1025 * Only page_get_anon_vma() understands the subtleties of 1026 * getting a hold on an anon_vma from outside one of its mms. 1027 * But if we cannot get anon_vma, then we won't need it anyway, 1028 * because that implies that the anon page is no longer mapped 1029 * (and cannot be remapped so long as we hold the page lock). 1030 */ 1031 if (PageAnon(page) && !PageKsm(page)) 1032 anon_vma = page_get_anon_vma(page); 1033 1034 /* 1035 * Block others from accessing the new page when we get around to 1036 * establishing additional references. We are usually the only one 1037 * holding a reference to newpage at this point. We used to have a BUG 1038 * here if trylock_page(newpage) fails, but would like to allow for 1039 * cases where there might be a race with the previous use of newpage. 1040 * This is much like races on refcount of oldpage: just don't BUG(). 1041 */ 1042 if (unlikely(!trylock_page(newpage))) 1043 goto out_unlock; 1044 1045 if (unlikely(!is_lru)) { 1046 rc = move_to_new_folio(dst, folio, mode); 1047 goto out_unlock_both; 1048 } 1049 1050 /* 1051 * Corner case handling: 1052 * 1. When a new swap-cache page is read into, it is added to the LRU 1053 * and treated as swapcache but it has no rmap yet. 1054 * Calling try_to_unmap() against a page->mapping==NULL page will 1055 * trigger a BUG. So handle it here. 1056 * 2. An orphaned page (see truncate_cleanup_page) might have 1057 * fs-private metadata. The page can be picked up due to memory 1058 * offlining. Everywhere else except page reclaim, the page is 1059 * invisible to the vm, so the page can not be migrated. So try to 1060 * free the metadata, so the page can be freed. 1061 */ 1062 if (!page->mapping) { 1063 VM_BUG_ON_PAGE(PageAnon(page), page); 1064 if (page_has_private(page)) { 1065 try_to_free_buffers(folio); 1066 goto out_unlock_both; 1067 } 1068 } else if (page_mapped(page)) { 1069 /* Establish migration ptes */ 1070 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 1071 page); 1072 try_to_migrate(folio, 0); 1073 page_was_mapped = true; 1074 } 1075 1076 if (!page_mapped(page)) 1077 rc = move_to_new_folio(dst, folio, mode); 1078 1079 /* 1080 * When successful, push newpage to LRU immediately: so that if it 1081 * turns out to be an mlocked page, remove_migration_ptes() will 1082 * automatically build up the correct newpage->mlock_count for it. 1083 * 1084 * We would like to do something similar for the old page, when 1085 * unsuccessful, and other cases when a page has been temporarily 1086 * isolated from the unevictable LRU: but this case is the easiest. 1087 */ 1088 if (rc == MIGRATEPAGE_SUCCESS) { 1089 lru_cache_add(newpage); 1090 if (page_was_mapped) 1091 lru_add_drain(); 1092 } 1093 1094 if (page_was_mapped) 1095 remove_migration_ptes(folio, 1096 rc == MIGRATEPAGE_SUCCESS ? dst : folio, false); 1097 1098 out_unlock_both: 1099 unlock_page(newpage); 1100 out_unlock: 1101 /* Drop an anon_vma reference if we took one */ 1102 if (anon_vma) 1103 put_anon_vma(anon_vma); 1104 unlock_page(page); 1105 out: 1106 /* 1107 * If migration is successful, decrease refcount of the newpage, 1108 * which will not free the page because new page owner increased 1109 * refcounter. 1110 */ 1111 if (rc == MIGRATEPAGE_SUCCESS) 1112 put_page(newpage); 1113 1114 return rc; 1115 } 1116 1117 /* 1118 * Obtain the lock on page, remove all ptes and migrate the page 1119 * to the newly allocated page in newpage. 1120 */ 1121 static int unmap_and_move(new_page_t get_new_page, 1122 free_page_t put_new_page, 1123 unsigned long private, struct page *page, 1124 int force, enum migrate_mode mode, 1125 enum migrate_reason reason, 1126 struct list_head *ret) 1127 { 1128 int rc = MIGRATEPAGE_SUCCESS; 1129 struct page *newpage = NULL; 1130 1131 if (!thp_migration_supported() && PageTransHuge(page)) 1132 return -ENOSYS; 1133 1134 if (page_count(page) == 1) { 1135 /* page was freed from under us. So we are done. */ 1136 ClearPageActive(page); 1137 ClearPageUnevictable(page); 1138 if (unlikely(__PageMovable(page))) { 1139 lock_page(page); 1140 if (!PageMovable(page)) 1141 ClearPageIsolated(page); 1142 unlock_page(page); 1143 } 1144 goto out; 1145 } 1146 1147 newpage = get_new_page(page, private); 1148 if (!newpage) 1149 return -ENOMEM; 1150 1151 newpage->private = 0; 1152 rc = __unmap_and_move(page, newpage, force, mode); 1153 if (rc == MIGRATEPAGE_SUCCESS) 1154 set_page_owner_migrate_reason(newpage, reason); 1155 1156 out: 1157 if (rc != -EAGAIN) { 1158 /* 1159 * A page that has been migrated has all references 1160 * removed and will be freed. A page that has not been 1161 * migrated will have kept its references and be restored. 1162 */ 1163 list_del(&page->lru); 1164 } 1165 1166 /* 1167 * If migration is successful, releases reference grabbed during 1168 * isolation. Otherwise, restore the page to right list unless 1169 * we want to retry. 1170 */ 1171 if (rc == MIGRATEPAGE_SUCCESS) { 1172 /* 1173 * Compaction can migrate also non-LRU pages which are 1174 * not accounted to NR_ISOLATED_*. They can be recognized 1175 * as __PageMovable 1176 */ 1177 if (likely(!__PageMovable(page))) 1178 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1179 page_is_file_lru(page), -thp_nr_pages(page)); 1180 1181 if (reason != MR_MEMORY_FAILURE) 1182 /* 1183 * We release the page in page_handle_poison. 1184 */ 1185 put_page(page); 1186 } else { 1187 if (rc != -EAGAIN) 1188 list_add_tail(&page->lru, ret); 1189 1190 if (put_new_page) 1191 put_new_page(newpage, private); 1192 else 1193 put_page(newpage); 1194 } 1195 1196 return rc; 1197 } 1198 1199 /* 1200 * Counterpart of unmap_and_move_page() for hugepage migration. 1201 * 1202 * This function doesn't wait the completion of hugepage I/O 1203 * because there is no race between I/O and migration for hugepage. 1204 * Note that currently hugepage I/O occurs only in direct I/O 1205 * where no lock is held and PG_writeback is irrelevant, 1206 * and writeback status of all subpages are counted in the reference 1207 * count of the head page (i.e. if all subpages of a 2MB hugepage are 1208 * under direct I/O, the reference of the head page is 512 and a bit more.) 1209 * This means that when we try to migrate hugepage whose subpages are 1210 * doing direct I/O, some references remain after try_to_unmap() and 1211 * hugepage migration fails without data corruption. 1212 * 1213 * There is also no race when direct I/O is issued on the page under migration, 1214 * because then pte is replaced with migration swap entry and direct I/O code 1215 * will wait in the page fault for migration to complete. 1216 */ 1217 static int unmap_and_move_huge_page(new_page_t get_new_page, 1218 free_page_t put_new_page, unsigned long private, 1219 struct page *hpage, int force, 1220 enum migrate_mode mode, int reason, 1221 struct list_head *ret) 1222 { 1223 struct folio *dst, *src = page_folio(hpage); 1224 int rc = -EAGAIN; 1225 int page_was_mapped = 0; 1226 struct page *new_hpage; 1227 struct anon_vma *anon_vma = NULL; 1228 struct address_space *mapping = NULL; 1229 1230 /* 1231 * Migratability of hugepages depends on architectures and their size. 1232 * This check is necessary because some callers of hugepage migration 1233 * like soft offline and memory hotremove don't walk through page 1234 * tables or check whether the hugepage is pmd-based or not before 1235 * kicking migration. 1236 */ 1237 if (!hugepage_migration_supported(page_hstate(hpage))) { 1238 list_move_tail(&hpage->lru, ret); 1239 return -ENOSYS; 1240 } 1241 1242 if (page_count(hpage) == 1) { 1243 /* page was freed from under us. So we are done. */ 1244 putback_active_hugepage(hpage); 1245 return MIGRATEPAGE_SUCCESS; 1246 } 1247 1248 new_hpage = get_new_page(hpage, private); 1249 if (!new_hpage) 1250 return -ENOMEM; 1251 dst = page_folio(new_hpage); 1252 1253 if (!trylock_page(hpage)) { 1254 if (!force) 1255 goto out; 1256 switch (mode) { 1257 case MIGRATE_SYNC: 1258 case MIGRATE_SYNC_NO_COPY: 1259 break; 1260 default: 1261 goto out; 1262 } 1263 lock_page(hpage); 1264 } 1265 1266 /* 1267 * Check for pages which are in the process of being freed. Without 1268 * page_mapping() set, hugetlbfs specific move page routine will not 1269 * be called and we could leak usage counts for subpools. 1270 */ 1271 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1272 rc = -EBUSY; 1273 goto out_unlock; 1274 } 1275 1276 if (PageAnon(hpage)) 1277 anon_vma = page_get_anon_vma(hpage); 1278 1279 if (unlikely(!trylock_page(new_hpage))) 1280 goto put_anon; 1281 1282 if (page_mapped(hpage)) { 1283 enum ttu_flags ttu = 0; 1284 1285 if (!PageAnon(hpage)) { 1286 /* 1287 * In shared mappings, try_to_unmap could potentially 1288 * call huge_pmd_unshare. Because of this, take 1289 * semaphore in write mode here and set TTU_RMAP_LOCKED 1290 * to let lower levels know we have taken the lock. 1291 */ 1292 mapping = hugetlb_page_mapping_lock_write(hpage); 1293 if (unlikely(!mapping)) 1294 goto unlock_put_anon; 1295 1296 ttu = TTU_RMAP_LOCKED; 1297 } 1298 1299 try_to_migrate(src, ttu); 1300 page_was_mapped = 1; 1301 1302 if (ttu & TTU_RMAP_LOCKED) 1303 i_mmap_unlock_write(mapping); 1304 } 1305 1306 if (!page_mapped(hpage)) 1307 rc = move_to_new_folio(dst, src, mode); 1308 1309 if (page_was_mapped) 1310 remove_migration_ptes(src, 1311 rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1312 1313 unlock_put_anon: 1314 unlock_page(new_hpage); 1315 1316 put_anon: 1317 if (anon_vma) 1318 put_anon_vma(anon_vma); 1319 1320 if (rc == MIGRATEPAGE_SUCCESS) { 1321 move_hugetlb_state(hpage, new_hpage, reason); 1322 put_new_page = NULL; 1323 } 1324 1325 out_unlock: 1326 unlock_page(hpage); 1327 out: 1328 if (rc == MIGRATEPAGE_SUCCESS) 1329 putback_active_hugepage(hpage); 1330 else if (rc != -EAGAIN) 1331 list_move_tail(&hpage->lru, ret); 1332 1333 /* 1334 * If migration was not successful and there's a freeing callback, use 1335 * it. Otherwise, put_page() will drop the reference grabbed during 1336 * isolation. 1337 */ 1338 if (put_new_page) 1339 put_new_page(new_hpage, private); 1340 else 1341 putback_active_hugepage(new_hpage); 1342 1343 return rc; 1344 } 1345 1346 static inline int try_split_thp(struct page *page, struct page **page2, 1347 struct list_head *from) 1348 { 1349 int rc = 0; 1350 1351 lock_page(page); 1352 rc = split_huge_page_to_list(page, from); 1353 unlock_page(page); 1354 if (!rc) 1355 list_safe_reset_next(page, *page2, lru); 1356 1357 return rc; 1358 } 1359 1360 /* 1361 * migrate_pages - migrate the pages specified in a list, to the free pages 1362 * supplied as the target for the page migration 1363 * 1364 * @from: The list of pages to be migrated. 1365 * @get_new_page: The function used to allocate free pages to be used 1366 * as the target of the page migration. 1367 * @put_new_page: The function used to free target pages if migration 1368 * fails, or NULL if no special handling is necessary. 1369 * @private: Private data to be passed on to get_new_page() 1370 * @mode: The migration mode that specifies the constraints for 1371 * page migration, if any. 1372 * @reason: The reason for page migration. 1373 * @ret_succeeded: Set to the number of normal pages migrated successfully if 1374 * the caller passes a non-NULL pointer. 1375 * 1376 * The function returns after 10 attempts or if no pages are movable any more 1377 * because the list has become empty or no retryable pages exist any more. 1378 * It is caller's responsibility to call putback_movable_pages() to return pages 1379 * to the LRU or free list only if ret != 0. 1380 * 1381 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 1382 * an error code. The number of THP splits will be considered as the number of 1383 * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1384 */ 1385 int migrate_pages(struct list_head *from, new_page_t get_new_page, 1386 free_page_t put_new_page, unsigned long private, 1387 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1388 { 1389 int retry = 1; 1390 int thp_retry = 1; 1391 int nr_failed = 0; 1392 int nr_failed_pages = 0; 1393 int nr_succeeded = 0; 1394 int nr_thp_succeeded = 0; 1395 int nr_thp_failed = 0; 1396 int nr_thp_split = 0; 1397 int pass = 0; 1398 bool is_thp = false; 1399 struct page *page; 1400 struct page *page2; 1401 int rc, nr_subpages; 1402 LIST_HEAD(ret_pages); 1403 LIST_HEAD(thp_split_pages); 1404 bool nosplit = (reason == MR_NUMA_MISPLACED); 1405 bool no_subpage_counting = false; 1406 1407 trace_mm_migrate_pages_start(mode, reason); 1408 1409 thp_subpage_migration: 1410 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1411 retry = 0; 1412 thp_retry = 0; 1413 1414 list_for_each_entry_safe(page, page2, from, lru) { 1415 retry: 1416 /* 1417 * THP statistics is based on the source huge page. 1418 * Capture required information that might get lost 1419 * during migration. 1420 */ 1421 is_thp = PageTransHuge(page) && !PageHuge(page); 1422 nr_subpages = compound_nr(page); 1423 cond_resched(); 1424 1425 if (PageHuge(page)) 1426 rc = unmap_and_move_huge_page(get_new_page, 1427 put_new_page, private, page, 1428 pass > 2, mode, reason, 1429 &ret_pages); 1430 else 1431 rc = unmap_and_move(get_new_page, put_new_page, 1432 private, page, pass > 2, mode, 1433 reason, &ret_pages); 1434 /* 1435 * The rules are: 1436 * Success: non hugetlb page will be freed, hugetlb 1437 * page will be put back 1438 * -EAGAIN: stay on the from list 1439 * -ENOMEM: stay on the from list 1440 * Other errno: put on ret_pages list then splice to 1441 * from list 1442 */ 1443 switch(rc) { 1444 /* 1445 * THP migration might be unsupported or the 1446 * allocation could've failed so we should 1447 * retry on the same page with the THP split 1448 * to base pages. 1449 * 1450 * Head page is retried immediately and tail 1451 * pages are added to the tail of the list so 1452 * we encounter them after the rest of the list 1453 * is processed. 1454 */ 1455 case -ENOSYS: 1456 /* THP migration is unsupported */ 1457 if (is_thp) { 1458 nr_thp_failed++; 1459 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1460 nr_thp_split++; 1461 goto retry; 1462 } 1463 /* Hugetlb migration is unsupported */ 1464 } else if (!no_subpage_counting) { 1465 nr_failed++; 1466 } 1467 1468 nr_failed_pages += nr_subpages; 1469 break; 1470 case -ENOMEM: 1471 /* 1472 * When memory is low, don't bother to try to migrate 1473 * other pages, just exit. 1474 * THP NUMA faulting doesn't split THP to retry. 1475 */ 1476 if (is_thp && !nosplit) { 1477 nr_thp_failed++; 1478 if (!try_split_thp(page, &page2, &thp_split_pages)) { 1479 nr_thp_split++; 1480 goto retry; 1481 } 1482 } else if (!no_subpage_counting) { 1483 nr_failed++; 1484 } 1485 1486 nr_failed_pages += nr_subpages; 1487 /* 1488 * There might be some subpages of fail-to-migrate THPs 1489 * left in thp_split_pages list. Move them back to migration 1490 * list so that they could be put back to the right list by 1491 * the caller otherwise the page refcnt will be leaked. 1492 */ 1493 list_splice_init(&thp_split_pages, from); 1494 nr_thp_failed += thp_retry; 1495 goto out; 1496 case -EAGAIN: 1497 if (is_thp) 1498 thp_retry++; 1499 else 1500 retry++; 1501 break; 1502 case MIGRATEPAGE_SUCCESS: 1503 nr_succeeded += nr_subpages; 1504 if (is_thp) 1505 nr_thp_succeeded++; 1506 break; 1507 default: 1508 /* 1509 * Permanent failure (-EBUSY, etc.): 1510 * unlike -EAGAIN case, the failed page is 1511 * removed from migration page list and not 1512 * retried in the next outer loop. 1513 */ 1514 if (is_thp) 1515 nr_thp_failed++; 1516 else if (!no_subpage_counting) 1517 nr_failed++; 1518 1519 nr_failed_pages += nr_subpages; 1520 break; 1521 } 1522 } 1523 } 1524 nr_failed += retry; 1525 nr_thp_failed += thp_retry; 1526 /* 1527 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1528 * counting in this round, since all subpages of a THP is counted 1529 * as 1 failure in the first round. 1530 */ 1531 if (!list_empty(&thp_split_pages)) { 1532 /* 1533 * Move non-migrated pages (after 10 retries) to ret_pages 1534 * to avoid migrating them again. 1535 */ 1536 list_splice_init(from, &ret_pages); 1537 list_splice_init(&thp_split_pages, from); 1538 no_subpage_counting = true; 1539 retry = 1; 1540 goto thp_subpage_migration; 1541 } 1542 1543 rc = nr_failed + nr_thp_failed; 1544 out: 1545 /* 1546 * Put the permanent failure page back to migration list, they 1547 * will be put back to the right list by the caller. 1548 */ 1549 list_splice(&ret_pages, from); 1550 1551 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1552 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 1553 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 1554 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 1555 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1556 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 1557 nr_thp_failed, nr_thp_split, mode, reason); 1558 1559 if (ret_succeeded) 1560 *ret_succeeded = nr_succeeded; 1561 1562 return rc; 1563 } 1564 1565 struct page *alloc_migration_target(struct page *page, unsigned long private) 1566 { 1567 struct folio *folio = page_folio(page); 1568 struct migration_target_control *mtc; 1569 gfp_t gfp_mask; 1570 unsigned int order = 0; 1571 struct folio *new_folio = NULL; 1572 int nid; 1573 int zidx; 1574 1575 mtc = (struct migration_target_control *)private; 1576 gfp_mask = mtc->gfp_mask; 1577 nid = mtc->nid; 1578 if (nid == NUMA_NO_NODE) 1579 nid = folio_nid(folio); 1580 1581 if (folio_test_hugetlb(folio)) { 1582 struct hstate *h = page_hstate(&folio->page); 1583 1584 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 1585 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1586 } 1587 1588 if (folio_test_large(folio)) { 1589 /* 1590 * clear __GFP_RECLAIM to make the migration callback 1591 * consistent with regular THP allocations. 1592 */ 1593 gfp_mask &= ~__GFP_RECLAIM; 1594 gfp_mask |= GFP_TRANSHUGE; 1595 order = folio_order(folio); 1596 } 1597 zidx = zone_idx(folio_zone(folio)); 1598 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1599 gfp_mask |= __GFP_HIGHMEM; 1600 1601 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 1602 1603 return &new_folio->page; 1604 } 1605 1606 #ifdef CONFIG_NUMA 1607 1608 static int store_status(int __user *status, int start, int value, int nr) 1609 { 1610 while (nr-- > 0) { 1611 if (put_user(value, status + start)) 1612 return -EFAULT; 1613 start++; 1614 } 1615 1616 return 0; 1617 } 1618 1619 static int do_move_pages_to_node(struct mm_struct *mm, 1620 struct list_head *pagelist, int node) 1621 { 1622 int err; 1623 struct migration_target_control mtc = { 1624 .nid = node, 1625 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1626 }; 1627 1628 err = migrate_pages(pagelist, alloc_migration_target, NULL, 1629 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1630 if (err) 1631 putback_movable_pages(pagelist); 1632 return err; 1633 } 1634 1635 /* 1636 * Resolves the given address to a struct page, isolates it from the LRU and 1637 * puts it to the given pagelist. 1638 * Returns: 1639 * errno - if the page cannot be found/isolated 1640 * 0 - when it doesn't have to be migrated because it is already on the 1641 * target node 1642 * 1 - when it has been queued 1643 */ 1644 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1645 int node, struct list_head *pagelist, bool migrate_all) 1646 { 1647 struct vm_area_struct *vma; 1648 struct page *page; 1649 int err; 1650 1651 mmap_read_lock(mm); 1652 err = -EFAULT; 1653 vma = vma_lookup(mm, addr); 1654 if (!vma || !vma_migratable(vma)) 1655 goto out; 1656 1657 /* FOLL_DUMP to ignore special (like zero) pages */ 1658 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1659 1660 err = PTR_ERR(page); 1661 if (IS_ERR(page)) 1662 goto out; 1663 1664 err = -ENOENT; 1665 if (!page) 1666 goto out; 1667 1668 err = 0; 1669 if (page_to_nid(page) == node) 1670 goto out_putpage; 1671 1672 err = -EACCES; 1673 if (page_mapcount(page) > 1 && !migrate_all) 1674 goto out_putpage; 1675 1676 if (PageHuge(page)) { 1677 if (PageHead(page)) { 1678 isolate_huge_page(page, pagelist); 1679 err = 1; 1680 } 1681 } else { 1682 struct page *head; 1683 1684 head = compound_head(page); 1685 err = isolate_lru_page(head); 1686 if (err) 1687 goto out_putpage; 1688 1689 err = 1; 1690 list_add_tail(&head->lru, pagelist); 1691 mod_node_page_state(page_pgdat(head), 1692 NR_ISOLATED_ANON + page_is_file_lru(head), 1693 thp_nr_pages(head)); 1694 } 1695 out_putpage: 1696 /* 1697 * Either remove the duplicate refcount from 1698 * isolate_lru_page() or drop the page ref if it was 1699 * not isolated. 1700 */ 1701 put_page(page); 1702 out: 1703 mmap_read_unlock(mm); 1704 return err; 1705 } 1706 1707 static int move_pages_and_store_status(struct mm_struct *mm, int node, 1708 struct list_head *pagelist, int __user *status, 1709 int start, int i, unsigned long nr_pages) 1710 { 1711 int err; 1712 1713 if (list_empty(pagelist)) 1714 return 0; 1715 1716 err = do_move_pages_to_node(mm, pagelist, node); 1717 if (err) { 1718 /* 1719 * Positive err means the number of failed 1720 * pages to migrate. Since we are going to 1721 * abort and return the number of non-migrated 1722 * pages, so need to include the rest of the 1723 * nr_pages that have not been attempted as 1724 * well. 1725 */ 1726 if (err > 0) 1727 err += nr_pages - i - 1; 1728 return err; 1729 } 1730 return store_status(status, start, node, i - start); 1731 } 1732 1733 /* 1734 * Migrate an array of page address onto an array of nodes and fill 1735 * the corresponding array of status. 1736 */ 1737 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 1738 unsigned long nr_pages, 1739 const void __user * __user *pages, 1740 const int __user *nodes, 1741 int __user *status, int flags) 1742 { 1743 int current_node = NUMA_NO_NODE; 1744 LIST_HEAD(pagelist); 1745 int start, i; 1746 int err = 0, err1; 1747 1748 lru_cache_disable(); 1749 1750 for (i = start = 0; i < nr_pages; i++) { 1751 const void __user *p; 1752 unsigned long addr; 1753 int node; 1754 1755 err = -EFAULT; 1756 if (get_user(p, pages + i)) 1757 goto out_flush; 1758 if (get_user(node, nodes + i)) 1759 goto out_flush; 1760 addr = (unsigned long)untagged_addr(p); 1761 1762 err = -ENODEV; 1763 if (node < 0 || node >= MAX_NUMNODES) 1764 goto out_flush; 1765 if (!node_state(node, N_MEMORY)) 1766 goto out_flush; 1767 1768 err = -EACCES; 1769 if (!node_isset(node, task_nodes)) 1770 goto out_flush; 1771 1772 if (current_node == NUMA_NO_NODE) { 1773 current_node = node; 1774 start = i; 1775 } else if (node != current_node) { 1776 err = move_pages_and_store_status(mm, current_node, 1777 &pagelist, status, start, i, nr_pages); 1778 if (err) 1779 goto out; 1780 start = i; 1781 current_node = node; 1782 } 1783 1784 /* 1785 * Errors in the page lookup or isolation are not fatal and we simply 1786 * report them via status 1787 */ 1788 err = add_page_for_migration(mm, addr, current_node, 1789 &pagelist, flags & MPOL_MF_MOVE_ALL); 1790 1791 if (err > 0) { 1792 /* The page is successfully queued for migration */ 1793 continue; 1794 } 1795 1796 /* 1797 * The move_pages() man page does not have an -EEXIST choice, so 1798 * use -EFAULT instead. 1799 */ 1800 if (err == -EEXIST) 1801 err = -EFAULT; 1802 1803 /* 1804 * If the page is already on the target node (!err), store the 1805 * node, otherwise, store the err. 1806 */ 1807 err = store_status(status, i, err ? : current_node, 1); 1808 if (err) 1809 goto out_flush; 1810 1811 err = move_pages_and_store_status(mm, current_node, &pagelist, 1812 status, start, i, nr_pages); 1813 if (err) 1814 goto out; 1815 current_node = NUMA_NO_NODE; 1816 } 1817 out_flush: 1818 /* Make sure we do not overwrite the existing error */ 1819 err1 = move_pages_and_store_status(mm, current_node, &pagelist, 1820 status, start, i, nr_pages); 1821 if (err >= 0) 1822 err = err1; 1823 out: 1824 lru_cache_enable(); 1825 return err; 1826 } 1827 1828 /* 1829 * Determine the nodes of an array of pages and store it in an array of status. 1830 */ 1831 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1832 const void __user **pages, int *status) 1833 { 1834 unsigned long i; 1835 1836 mmap_read_lock(mm); 1837 1838 for (i = 0; i < nr_pages; i++) { 1839 unsigned long addr = (unsigned long)(*pages); 1840 struct vm_area_struct *vma; 1841 struct page *page; 1842 int err = -EFAULT; 1843 1844 vma = vma_lookup(mm, addr); 1845 if (!vma) 1846 goto set_status; 1847 1848 /* FOLL_DUMP to ignore special (like zero) pages */ 1849 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 1850 1851 err = PTR_ERR(page); 1852 if (IS_ERR(page)) 1853 goto set_status; 1854 1855 if (page) { 1856 err = page_to_nid(page); 1857 put_page(page); 1858 } else { 1859 err = -ENOENT; 1860 } 1861 set_status: 1862 *status = err; 1863 1864 pages++; 1865 status++; 1866 } 1867 1868 mmap_read_unlock(mm); 1869 } 1870 1871 static int get_compat_pages_array(const void __user *chunk_pages[], 1872 const void __user * __user *pages, 1873 unsigned long chunk_nr) 1874 { 1875 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 1876 compat_uptr_t p; 1877 int i; 1878 1879 for (i = 0; i < chunk_nr; i++) { 1880 if (get_user(p, pages32 + i)) 1881 return -EFAULT; 1882 chunk_pages[i] = compat_ptr(p); 1883 } 1884 1885 return 0; 1886 } 1887 1888 /* 1889 * Determine the nodes of a user array of pages and store it in 1890 * a user array of status. 1891 */ 1892 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1893 const void __user * __user *pages, 1894 int __user *status) 1895 { 1896 #define DO_PAGES_STAT_CHUNK_NR 16UL 1897 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1898 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1899 1900 while (nr_pages) { 1901 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 1902 1903 if (in_compat_syscall()) { 1904 if (get_compat_pages_array(chunk_pages, pages, 1905 chunk_nr)) 1906 break; 1907 } else { 1908 if (copy_from_user(chunk_pages, pages, 1909 chunk_nr * sizeof(*chunk_pages))) 1910 break; 1911 } 1912 1913 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1914 1915 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1916 break; 1917 1918 pages += chunk_nr; 1919 status += chunk_nr; 1920 nr_pages -= chunk_nr; 1921 } 1922 return nr_pages ? -EFAULT : 0; 1923 } 1924 1925 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 1926 { 1927 struct task_struct *task; 1928 struct mm_struct *mm; 1929 1930 /* 1931 * There is no need to check if current process has the right to modify 1932 * the specified process when they are same. 1933 */ 1934 if (!pid) { 1935 mmget(current->mm); 1936 *mem_nodes = cpuset_mems_allowed(current); 1937 return current->mm; 1938 } 1939 1940 /* Find the mm_struct */ 1941 rcu_read_lock(); 1942 task = find_task_by_vpid(pid); 1943 if (!task) { 1944 rcu_read_unlock(); 1945 return ERR_PTR(-ESRCH); 1946 } 1947 get_task_struct(task); 1948 1949 /* 1950 * Check if this process has the right to modify the specified 1951 * process. Use the regular "ptrace_may_access()" checks. 1952 */ 1953 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1954 rcu_read_unlock(); 1955 mm = ERR_PTR(-EPERM); 1956 goto out; 1957 } 1958 rcu_read_unlock(); 1959 1960 mm = ERR_PTR(security_task_movememory(task)); 1961 if (IS_ERR(mm)) 1962 goto out; 1963 *mem_nodes = cpuset_mems_allowed(task); 1964 mm = get_task_mm(task); 1965 out: 1966 put_task_struct(task); 1967 if (!mm) 1968 mm = ERR_PTR(-EINVAL); 1969 return mm; 1970 } 1971 1972 /* 1973 * Move a list of pages in the address space of the currently executing 1974 * process. 1975 */ 1976 static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 1977 const void __user * __user *pages, 1978 const int __user *nodes, 1979 int __user *status, int flags) 1980 { 1981 struct mm_struct *mm; 1982 int err; 1983 nodemask_t task_nodes; 1984 1985 /* Check flags */ 1986 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1987 return -EINVAL; 1988 1989 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1990 return -EPERM; 1991 1992 mm = find_mm_struct(pid, &task_nodes); 1993 if (IS_ERR(mm)) 1994 return PTR_ERR(mm); 1995 1996 if (nodes) 1997 err = do_pages_move(mm, task_nodes, nr_pages, pages, 1998 nodes, status, flags); 1999 else 2000 err = do_pages_stat(mm, nr_pages, pages, status); 2001 2002 mmput(mm); 2003 return err; 2004 } 2005 2006 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 2007 const void __user * __user *, pages, 2008 const int __user *, nodes, 2009 int __user *, status, int, flags) 2010 { 2011 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 2012 } 2013 2014 #ifdef CONFIG_NUMA_BALANCING 2015 /* 2016 * Returns true if this is a safe migration target node for misplaced NUMA 2017 * pages. Currently it only checks the watermarks which is crude. 2018 */ 2019 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 2020 unsigned long nr_migrate_pages) 2021 { 2022 int z; 2023 2024 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2025 struct zone *zone = pgdat->node_zones + z; 2026 2027 if (!managed_zone(zone)) 2028 continue; 2029 2030 /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 2031 if (!zone_watermark_ok(zone, 0, 2032 high_wmark_pages(zone) + 2033 nr_migrate_pages, 2034 ZONE_MOVABLE, 0)) 2035 continue; 2036 return true; 2037 } 2038 return false; 2039 } 2040 2041 static struct page *alloc_misplaced_dst_page(struct page *page, 2042 unsigned long data) 2043 { 2044 int nid = (int) data; 2045 int order = compound_order(page); 2046 gfp_t gfp = __GFP_THISNODE; 2047 struct folio *new; 2048 2049 if (order > 0) 2050 gfp |= GFP_TRANSHUGE_LIGHT; 2051 else { 2052 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2053 __GFP_NOWARN; 2054 gfp &= ~__GFP_RECLAIM; 2055 } 2056 new = __folio_alloc_node(gfp, order, nid); 2057 2058 return &new->page; 2059 } 2060 2061 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2062 { 2063 int nr_pages = thp_nr_pages(page); 2064 int order = compound_order(page); 2065 2066 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 2067 2068 /* Do not migrate THP mapped by multiple processes */ 2069 if (PageTransHuge(page) && total_mapcount(page) > 1) 2070 return 0; 2071 2072 /* Avoid migrating to a node that is nearly full */ 2073 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2074 int z; 2075 2076 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2077 return 0; 2078 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2079 if (managed_zone(pgdat->node_zones + z)) 2080 break; 2081 } 2082 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2083 return 0; 2084 } 2085 2086 if (isolate_lru_page(page)) 2087 return 0; 2088 2089 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 2090 nr_pages); 2091 2092 /* 2093 * Isolating the page has taken another reference, so the 2094 * caller's reference can be safely dropped without the page 2095 * disappearing underneath us during migration. 2096 */ 2097 put_page(page); 2098 return 1; 2099 } 2100 2101 /* 2102 * Attempt to migrate a misplaced page to the specified destination 2103 * node. Caller is expected to have an elevated reference count on 2104 * the page that will be dropped by this function before returning. 2105 */ 2106 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 2107 int node) 2108 { 2109 pg_data_t *pgdat = NODE_DATA(node); 2110 int isolated; 2111 int nr_remaining; 2112 unsigned int nr_succeeded; 2113 LIST_HEAD(migratepages); 2114 int nr_pages = thp_nr_pages(page); 2115 2116 /* 2117 * Don't migrate file pages that are mapped in multiple processes 2118 * with execute permissions as they are probably shared libraries. 2119 */ 2120 if (page_mapcount(page) != 1 && page_is_file_lru(page) && 2121 (vma->vm_flags & VM_EXEC)) 2122 goto out; 2123 2124 /* 2125 * Also do not migrate dirty pages as not all filesystems can move 2126 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 2127 */ 2128 if (page_is_file_lru(page) && PageDirty(page)) 2129 goto out; 2130 2131 isolated = numamigrate_isolate_page(pgdat, page); 2132 if (!isolated) 2133 goto out; 2134 2135 list_add(&page->lru, &migratepages); 2136 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2137 NULL, node, MIGRATE_ASYNC, 2138 MR_NUMA_MISPLACED, &nr_succeeded); 2139 if (nr_remaining) { 2140 if (!list_empty(&migratepages)) { 2141 list_del(&page->lru); 2142 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2143 page_is_file_lru(page), -nr_pages); 2144 putback_lru_page(page); 2145 } 2146 isolated = 0; 2147 } 2148 if (nr_succeeded) { 2149 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2150 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2151 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2152 nr_succeeded); 2153 } 2154 BUG_ON(!list_empty(&migratepages)); 2155 return isolated; 2156 2157 out: 2158 put_page(page); 2159 return 0; 2160 } 2161 #endif /* CONFIG_NUMA_BALANCING */ 2162 2163 /* 2164 * node_demotion[] example: 2165 * 2166 * Consider a system with two sockets. Each socket has 2167 * three classes of memory attached: fast, medium and slow. 2168 * Each memory class is placed in its own NUMA node. The 2169 * CPUs are placed in the node with the "fast" memory. The 2170 * 6 NUMA nodes (0-5) might be split among the sockets like 2171 * this: 2172 * 2173 * Socket A: 0, 1, 2 2174 * Socket B: 3, 4, 5 2175 * 2176 * When Node 0 fills up, its memory should be migrated to 2177 * Node 1. When Node 1 fills up, it should be migrated to 2178 * Node 2. The migration path start on the nodes with the 2179 * processors (since allocations default to this node) and 2180 * fast memory, progress through medium and end with the 2181 * slow memory: 2182 * 2183 * 0 -> 1 -> 2 -> stop 2184 * 3 -> 4 -> 5 -> stop 2185 * 2186 * This is represented in the node_demotion[] like this: 2187 * 2188 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2189 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2190 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2191 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2192 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2193 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2194 * 2195 * Moreover some systems may have multiple slow memory nodes. 2196 * Suppose a system has one socket with 3 memory nodes, node 0 2197 * is fast memory type, and node 1/2 both are slow memory 2198 * type, and the distance between fast memory node and slow 2199 * memory node is same. So the migration path should be: 2200 * 2201 * 0 -> 1/2 -> stop 2202 * 2203 * This is represented in the node_demotion[] like this: 2204 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2205 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2206 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2207 */ 2208 2209 /* 2210 * Writes to this array occur without locking. Cycles are 2211 * not allowed: Node X demotes to Y which demotes to X... 2212 * 2213 * If multiple reads are performed, a single rcu_read_lock() 2214 * must be held over all reads to ensure that no cycles are 2215 * observed. 2216 */ 2217 #define DEFAULT_DEMOTION_TARGET_NODES 15 2218 2219 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2220 #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2221 #else 2222 #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2223 #endif 2224 2225 struct demotion_nodes { 2226 unsigned short nr; 2227 short nodes[DEMOTION_TARGET_NODES]; 2228 }; 2229 2230 static struct demotion_nodes *node_demotion __read_mostly; 2231 2232 /** 2233 * next_demotion_node() - Get the next node in the demotion path 2234 * @node: The starting node to lookup the next node 2235 * 2236 * Return: node id for next memory node in the demotion path hierarchy 2237 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2238 * @node online or guarantee that it *continues* to be the next demotion 2239 * target. 2240 */ 2241 int next_demotion_node(int node) 2242 { 2243 struct demotion_nodes *nd; 2244 unsigned short target_nr, index; 2245 int target; 2246 2247 if (!node_demotion) 2248 return NUMA_NO_NODE; 2249 2250 nd = &node_demotion[node]; 2251 2252 /* 2253 * node_demotion[] is updated without excluding this 2254 * function from running. RCU doesn't provide any 2255 * compiler barriers, so the READ_ONCE() is required 2256 * to avoid compiler reordering or read merging. 2257 * 2258 * Make sure to use RCU over entire code blocks if 2259 * node_demotion[] reads need to be consistent. 2260 */ 2261 rcu_read_lock(); 2262 target_nr = READ_ONCE(nd->nr); 2263 2264 switch (target_nr) { 2265 case 0: 2266 target = NUMA_NO_NODE; 2267 goto out; 2268 case 1: 2269 index = 0; 2270 break; 2271 default: 2272 /* 2273 * If there are multiple target nodes, just select one 2274 * target node randomly. 2275 * 2276 * In addition, we can also use round-robin to select 2277 * target node, but we should introduce another variable 2278 * for node_demotion[] to record last selected target node, 2279 * that may cause cache ping-pong due to the changing of 2280 * last target node. Or introducing per-cpu data to avoid 2281 * caching issue, which seems more complicated. So selecting 2282 * target node randomly seems better until now. 2283 */ 2284 index = get_random_int() % target_nr; 2285 break; 2286 } 2287 2288 target = READ_ONCE(nd->nodes[index]); 2289 2290 out: 2291 rcu_read_unlock(); 2292 return target; 2293 } 2294 2295 /* Disable reclaim-based migration. */ 2296 static void __disable_all_migrate_targets(void) 2297 { 2298 int node, i; 2299 2300 if (!node_demotion) 2301 return; 2302 2303 for_each_online_node(node) { 2304 node_demotion[node].nr = 0; 2305 for (i = 0; i < DEMOTION_TARGET_NODES; i++) 2306 node_demotion[node].nodes[i] = NUMA_NO_NODE; 2307 } 2308 } 2309 2310 static void disable_all_migrate_targets(void) 2311 { 2312 __disable_all_migrate_targets(); 2313 2314 /* 2315 * Ensure that the "disable" is visible across the system. 2316 * Readers will see either a combination of before+disable 2317 * state or disable+after. They will never see before and 2318 * after state together. 2319 * 2320 * The before+after state together might have cycles and 2321 * could cause readers to do things like loop until this 2322 * function finishes. This ensures they can only see a 2323 * single "bad" read and would, for instance, only loop 2324 * once. 2325 */ 2326 synchronize_rcu(); 2327 } 2328 2329 /* 2330 * Find an automatic demotion target for 'node'. 2331 * Failing here is OK. It might just indicate 2332 * being at the end of a chain. 2333 */ 2334 static int establish_migrate_target(int node, nodemask_t *used, 2335 int best_distance) 2336 { 2337 int migration_target, index, val; 2338 struct demotion_nodes *nd; 2339 2340 if (!node_demotion) 2341 return NUMA_NO_NODE; 2342 2343 nd = &node_demotion[node]; 2344 2345 migration_target = find_next_best_node(node, used); 2346 if (migration_target == NUMA_NO_NODE) 2347 return NUMA_NO_NODE; 2348 2349 /* 2350 * If the node has been set a migration target node before, 2351 * which means it's the best distance between them. Still 2352 * check if this node can be demoted to other target nodes 2353 * if they have a same best distance. 2354 */ 2355 if (best_distance != -1) { 2356 val = node_distance(node, migration_target); 2357 if (val > best_distance) 2358 goto out_clear; 2359 } 2360 2361 index = nd->nr; 2362 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 2363 "Exceeds maximum demotion target nodes\n")) 2364 goto out_clear; 2365 2366 nd->nodes[index] = migration_target; 2367 nd->nr++; 2368 2369 return migration_target; 2370 out_clear: 2371 node_clear(migration_target, *used); 2372 return NUMA_NO_NODE; 2373 } 2374 2375 /* 2376 * When memory fills up on a node, memory contents can be 2377 * automatically migrated to another node instead of 2378 * discarded at reclaim. 2379 * 2380 * Establish a "migration path" which will start at nodes 2381 * with CPUs and will follow the priorities used to build the 2382 * page allocator zonelists. 2383 * 2384 * The difference here is that cycles must be avoided. If 2385 * node0 migrates to node1, then neither node1, nor anything 2386 * node1 migrates to can migrate to node0. Also one node can 2387 * be migrated to multiple nodes if the target nodes all have 2388 * a same best-distance against the source node. 2389 * 2390 * This function can run simultaneously with readers of 2391 * node_demotion[]. However, it can not run simultaneously 2392 * with itself. Exclusion is provided by memory hotplug events 2393 * being single-threaded. 2394 */ 2395 static void __set_migration_target_nodes(void) 2396 { 2397 nodemask_t next_pass; 2398 nodemask_t this_pass; 2399 nodemask_t used_targets = NODE_MASK_NONE; 2400 int node, best_distance; 2401 2402 /* 2403 * Avoid any oddities like cycles that could occur 2404 * from changes in the topology. This will leave 2405 * a momentary gap when migration is disabled. 2406 */ 2407 disable_all_migrate_targets(); 2408 2409 /* 2410 * Allocations go close to CPUs, first. Assume that 2411 * the migration path starts at the nodes with CPUs. 2412 */ 2413 next_pass = node_states[N_CPU]; 2414 again: 2415 this_pass = next_pass; 2416 next_pass = NODE_MASK_NONE; 2417 /* 2418 * To avoid cycles in the migration "graph", ensure 2419 * that migration sources are not future targets by 2420 * setting them in 'used_targets'. Do this only 2421 * once per pass so that multiple source nodes can 2422 * share a target node. 2423 * 2424 * 'used_targets' will become unavailable in future 2425 * passes. This limits some opportunities for 2426 * multiple source nodes to share a destination. 2427 */ 2428 nodes_or(used_targets, used_targets, this_pass); 2429 2430 for_each_node_mask(node, this_pass) { 2431 best_distance = -1; 2432 2433 /* 2434 * Try to set up the migration path for the node, and the target 2435 * migration nodes can be multiple, so doing a loop to find all 2436 * the target nodes if they all have a best node distance. 2437 */ 2438 do { 2439 int target_node = 2440 establish_migrate_target(node, &used_targets, 2441 best_distance); 2442 2443 if (target_node == NUMA_NO_NODE) 2444 break; 2445 2446 if (best_distance == -1) 2447 best_distance = node_distance(node, target_node); 2448 2449 /* 2450 * Visit targets from this pass in the next pass. 2451 * Eventually, every node will have been part of 2452 * a pass, and will become set in 'used_targets'. 2453 */ 2454 node_set(target_node, next_pass); 2455 } while (1); 2456 } 2457 /* 2458 * 'next_pass' contains nodes which became migration 2459 * targets in this pass. Make additional passes until 2460 * no more migrations targets are available. 2461 */ 2462 if (!nodes_empty(next_pass)) 2463 goto again; 2464 } 2465 2466 /* 2467 * For callers that do not hold get_online_mems() already. 2468 */ 2469 void set_migration_target_nodes(void) 2470 { 2471 get_online_mems(); 2472 __set_migration_target_nodes(); 2473 put_online_mems(); 2474 } 2475 2476 /* 2477 * This leaves migrate-on-reclaim transiently disabled between 2478 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 2479 * whether reclaim-based migration is enabled or not, which 2480 * ensures that the user can turn reclaim-based migration at 2481 * any time without needing to recalculate migration targets. 2482 * 2483 * These callbacks already hold get_online_mems(). That is why 2484 * __set_migration_target_nodes() can be used as opposed to 2485 * set_migration_target_nodes(). 2486 */ 2487 #ifdef CONFIG_MEMORY_HOTPLUG 2488 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 2489 unsigned long action, void *_arg) 2490 { 2491 struct memory_notify *arg = _arg; 2492 2493 /* 2494 * Only update the node migration order when a node is 2495 * changing status, like online->offline. This avoids 2496 * the overhead of synchronize_rcu() in most cases. 2497 */ 2498 if (arg->status_change_nid < 0) 2499 return notifier_from_errno(0); 2500 2501 switch (action) { 2502 case MEM_GOING_OFFLINE: 2503 /* 2504 * Make sure there are not transient states where 2505 * an offline node is a migration target. This 2506 * will leave migration disabled until the offline 2507 * completes and the MEM_OFFLINE case below runs. 2508 */ 2509 disable_all_migrate_targets(); 2510 break; 2511 case MEM_OFFLINE: 2512 case MEM_ONLINE: 2513 /* 2514 * Recalculate the target nodes once the node 2515 * reaches its final state (online or offline). 2516 */ 2517 __set_migration_target_nodes(); 2518 break; 2519 case MEM_CANCEL_OFFLINE: 2520 /* 2521 * MEM_GOING_OFFLINE disabled all the migration 2522 * targets. Reenable them. 2523 */ 2524 __set_migration_target_nodes(); 2525 break; 2526 case MEM_GOING_ONLINE: 2527 case MEM_CANCEL_ONLINE: 2528 break; 2529 } 2530 2531 return notifier_from_errno(0); 2532 } 2533 #endif 2534 2535 void __init migrate_on_reclaim_init(void) 2536 { 2537 node_demotion = kcalloc(nr_node_ids, 2538 sizeof(struct demotion_nodes), 2539 GFP_KERNEL); 2540 WARN_ON(!node_demotion); 2541 #ifdef CONFIG_MEMORY_HOTPLUG 2542 hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 2543 #endif 2544 /* 2545 * At this point, all numa nodes with memory/CPus have their state 2546 * properly set, so we can build the demotion order now. 2547 * Let us hold the cpu_hotplug lock just, as we could possibily have 2548 * CPU hotplug events during boot. 2549 */ 2550 cpus_read_lock(); 2551 set_migration_target_nodes(); 2552 cpus_read_unlock(); 2553 } 2554 2555 bool numa_demotion_enabled = false; 2556 2557 #ifdef CONFIG_SYSFS 2558 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 2559 struct kobj_attribute *attr, char *buf) 2560 { 2561 return sysfs_emit(buf, "%s\n", 2562 numa_demotion_enabled ? "true" : "false"); 2563 } 2564 2565 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 2566 struct kobj_attribute *attr, 2567 const char *buf, size_t count) 2568 { 2569 ssize_t ret; 2570 2571 ret = kstrtobool(buf, &numa_demotion_enabled); 2572 if (ret) 2573 return ret; 2574 2575 return count; 2576 } 2577 2578 static struct kobj_attribute numa_demotion_enabled_attr = 2579 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 2580 numa_demotion_enabled_store); 2581 2582 static struct attribute *numa_attrs[] = { 2583 &numa_demotion_enabled_attr.attr, 2584 NULL, 2585 }; 2586 2587 static const struct attribute_group numa_attr_group = { 2588 .attrs = numa_attrs, 2589 }; 2590 2591 static int __init numa_init_sysfs(void) 2592 { 2593 int err; 2594 struct kobject *numa_kobj; 2595 2596 numa_kobj = kobject_create_and_add("numa", mm_kobj); 2597 if (!numa_kobj) { 2598 pr_err("failed to create numa kobject\n"); 2599 return -ENOMEM; 2600 } 2601 err = sysfs_create_group(numa_kobj, &numa_attr_group); 2602 if (err) { 2603 pr_err("failed to register numa group\n"); 2604 goto delete_obj; 2605 } 2606 return 0; 2607 2608 delete_obj: 2609 kobject_put(numa_kobj); 2610 return err; 2611 } 2612 subsys_initcall(numa_init_sysfs); 2613 #endif /* CONFIG_SYSFS */ 2614 #endif /* CONFIG_NUMA */ 2615