1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/pfn_t.h>
39 #include <linux/page_idle.h>
40 #include <linux/page_owner.h>
41 #include <linux/sched/mm.h>
42 #include <linux/ptrace.h>
43 #include <linux/memory.h>
44 #include <linux/sched/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/pagewalk.h>
47
48 #include <asm/tlbflush.h>
49
50 #include <trace/events/migrate.h>
51
52 #include "internal.h"
53
isolate_movable_page(struct page * page,isolate_mode_t mode)54 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
55 {
56 struct folio *folio = folio_get_nontail_page(page);
57 const struct movable_operations *mops;
58
59 /*
60 * Avoid burning cycles with pages that are yet under __free_pages(),
61 * or just got freed under us.
62 *
63 * In case we 'win' a race for a movable page being freed under us and
64 * raise its refcount preventing __free_pages() from doing its job
65 * the put_page() at the end of this block will take care of
66 * release this page, thus avoiding a nasty leakage.
67 */
68 if (!folio)
69 goto out;
70
71 /*
72 * Check movable flag before taking the page lock because
73 * we use non-atomic bitops on newly allocated page flags so
74 * unconditionally grabbing the lock ruins page's owner side.
75 */
76 if (unlikely(!__folio_test_movable(folio)))
77 goto out_putfolio;
78
79 /*
80 * As movable pages are not isolated from LRU lists, concurrent
81 * compaction threads can race against page migration functions
82 * as well as race against the releasing a page.
83 *
84 * In order to avoid having an already isolated movable page
85 * being (wrongly) re-isolated while it is under migration,
86 * or to avoid attempting to isolate pages being released,
87 * lets be sure we have the page lock
88 * before proceeding with the movable page isolation steps.
89 */
90 if (unlikely(!folio_trylock(folio)))
91 goto out_putfolio;
92
93 if (!folio_test_movable(folio) || folio_test_isolated(folio))
94 goto out_no_isolated;
95
96 mops = folio_movable_ops(folio);
97 VM_BUG_ON_FOLIO(!mops, folio);
98
99 if (!mops->isolate_page(&folio->page, mode))
100 goto out_no_isolated;
101
102 /* Driver shouldn't use the isolated flag */
103 WARN_ON_ONCE(folio_test_isolated(folio));
104 folio_set_isolated(folio);
105 folio_unlock(folio);
106
107 return true;
108
109 out_no_isolated:
110 folio_unlock(folio);
111 out_putfolio:
112 folio_put(folio);
113 out:
114 return false;
115 }
116
putback_movable_folio(struct folio * folio)117 static void putback_movable_folio(struct folio *folio)
118 {
119 const struct movable_operations *mops = folio_movable_ops(folio);
120
121 mops->putback_page(&folio->page);
122 folio_clear_isolated(folio);
123 }
124
125 /*
126 * Put previously isolated pages back onto the appropriate lists
127 * from where they were once taken off for compaction/migration.
128 *
129 * This function shall be used whenever the isolated pageset has been
130 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
131 * and folio_isolate_hugetlb().
132 */
putback_movable_pages(struct list_head * l)133 void putback_movable_pages(struct list_head *l)
134 {
135 struct folio *folio;
136 struct folio *folio2;
137
138 list_for_each_entry_safe(folio, folio2, l, lru) {
139 if (unlikely(folio_test_hugetlb(folio))) {
140 folio_putback_hugetlb(folio);
141 continue;
142 }
143 list_del(&folio->lru);
144 /*
145 * We isolated non-lru movable folio so here we can use
146 * __folio_test_movable because LRU folio's mapping cannot
147 * have PAGE_MAPPING_MOVABLE.
148 */
149 if (unlikely(__folio_test_movable(folio))) {
150 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
151 folio_lock(folio);
152 if (folio_test_movable(folio))
153 putback_movable_folio(folio);
154 else
155 folio_clear_isolated(folio);
156 folio_unlock(folio);
157 folio_put(folio);
158 } else {
159 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
160 folio_is_file_lru(folio), -folio_nr_pages(folio));
161 folio_putback_lru(folio);
162 }
163 }
164 }
165
166 /* Must be called with an elevated refcount on the non-hugetlb folio */
isolate_folio_to_list(struct folio * folio,struct list_head * list)167 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
168 {
169 bool isolated, lru;
170
171 if (folio_test_hugetlb(folio))
172 return folio_isolate_hugetlb(folio, list);
173
174 lru = !__folio_test_movable(folio);
175 if (lru)
176 isolated = folio_isolate_lru(folio);
177 else
178 isolated = isolate_movable_page(&folio->page,
179 ISOLATE_UNEVICTABLE);
180
181 if (!isolated)
182 return false;
183
184 list_add(&folio->lru, list);
185 if (lru)
186 node_stat_add_folio(folio, NR_ISOLATED_ANON +
187 folio_is_file_lru(folio));
188
189 return true;
190 }
191
try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,unsigned long idx)192 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
193 struct folio *folio,
194 unsigned long idx)
195 {
196 struct page *page = folio_page(folio, idx);
197 bool contains_data;
198 pte_t newpte;
199 void *addr;
200
201 if (PageCompound(page))
202 return false;
203 VM_BUG_ON_PAGE(!PageAnon(page), page);
204 VM_BUG_ON_PAGE(!PageLocked(page), page);
205 VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
206
207 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
208 mm_forbids_zeropage(pvmw->vma->vm_mm))
209 return false;
210
211 /*
212 * The pmd entry mapping the old thp was flushed and the pte mapping
213 * this subpage has been non present. If the subpage is only zero-filled
214 * then map it to the shared zeropage.
215 */
216 addr = kmap_local_page(page);
217 contains_data = memchr_inv(addr, 0, PAGE_SIZE);
218 kunmap_local(addr);
219
220 if (contains_data)
221 return false;
222
223 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
224 pvmw->vma->vm_page_prot));
225 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
226
227 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
228 return true;
229 }
230
231 struct rmap_walk_arg {
232 struct folio *folio;
233 bool map_unused_to_zeropage;
234 };
235
236 /*
237 * Restore a potential migration pte to a working pte entry
238 */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)239 static bool remove_migration_pte(struct folio *folio,
240 struct vm_area_struct *vma, unsigned long addr, void *arg)
241 {
242 struct rmap_walk_arg *rmap_walk_arg = arg;
243 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
244
245 while (page_vma_mapped_walk(&pvmw)) {
246 rmap_t rmap_flags = RMAP_NONE;
247 pte_t old_pte;
248 pte_t pte;
249 swp_entry_t entry;
250 struct page *new;
251 unsigned long idx = 0;
252
253 /* pgoff is invalid for ksm pages, but they are never large */
254 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
255 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
256 new = folio_page(folio, idx);
257
258 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
259 /* PMD-mapped THP migration entry */
260 if (!pvmw.pte) {
261 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
262 !folio_test_pmd_mappable(folio), folio);
263 remove_migration_pmd(&pvmw, new);
264 continue;
265 }
266 #endif
267 if (rmap_walk_arg->map_unused_to_zeropage &&
268 try_to_map_unused_to_zeropage(&pvmw, folio, idx))
269 continue;
270
271 folio_get(folio);
272 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
273 old_pte = ptep_get(pvmw.pte);
274
275 entry = pte_to_swp_entry(old_pte);
276 if (!is_migration_entry_young(entry))
277 pte = pte_mkold(pte);
278 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
279 pte = pte_mkdirty(pte);
280 if (pte_swp_soft_dirty(old_pte))
281 pte = pte_mksoft_dirty(pte);
282 else
283 pte = pte_clear_soft_dirty(pte);
284
285 if (is_writable_migration_entry(entry))
286 pte = pte_mkwrite(pte, vma);
287 else if (pte_swp_uffd_wp(old_pte))
288 pte = pte_mkuffd_wp(pte);
289
290 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
291 rmap_flags |= RMAP_EXCLUSIVE;
292
293 if (unlikely(is_device_private_page(new))) {
294 if (pte_write(pte))
295 entry = make_writable_device_private_entry(
296 page_to_pfn(new));
297 else
298 entry = make_readable_device_private_entry(
299 page_to_pfn(new));
300 pte = swp_entry_to_pte(entry);
301 if (pte_swp_soft_dirty(old_pte))
302 pte = pte_swp_mksoft_dirty(pte);
303 if (pte_swp_uffd_wp(old_pte))
304 pte = pte_swp_mkuffd_wp(pte);
305 }
306
307 #ifdef CONFIG_HUGETLB_PAGE
308 if (folio_test_hugetlb(folio)) {
309 struct hstate *h = hstate_vma(vma);
310 unsigned int shift = huge_page_shift(h);
311 unsigned long psize = huge_page_size(h);
312
313 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
314 if (folio_test_anon(folio))
315 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
316 rmap_flags);
317 else
318 hugetlb_add_file_rmap(folio);
319 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
320 psize);
321 } else
322 #endif
323 {
324 if (folio_test_anon(folio))
325 folio_add_anon_rmap_pte(folio, new, vma,
326 pvmw.address, rmap_flags);
327 else
328 folio_add_file_rmap_pte(folio, new, vma);
329 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
330 }
331 if (vma->vm_flags & VM_LOCKED)
332 mlock_drain_local();
333
334 trace_remove_migration_pte(pvmw.address, pte_val(pte),
335 compound_order(new));
336
337 /* No need to invalidate - it was non-present before */
338 update_mmu_cache(vma, pvmw.address, pvmw.pte);
339 }
340
341 return true;
342 }
343
344 /*
345 * Get rid of all migration entries and replace them by
346 * references to the indicated page.
347 */
remove_migration_ptes(struct folio * src,struct folio * dst,int flags)348 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
349 {
350 struct rmap_walk_arg rmap_walk_arg = {
351 .folio = src,
352 .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
353 };
354
355 struct rmap_walk_control rwc = {
356 .rmap_one = remove_migration_pte,
357 .arg = &rmap_walk_arg,
358 };
359
360 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
361
362 if (flags & RMP_LOCKED)
363 rmap_walk_locked(dst, &rwc);
364 else
365 rmap_walk(dst, &rwc);
366 }
367
368 /*
369 * Something used the pte of a page under migration. We need to
370 * get to the page and wait until migration is finished.
371 * When we return from this function the fault will be retried.
372 */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)373 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
374 unsigned long address)
375 {
376 spinlock_t *ptl;
377 pte_t *ptep;
378 pte_t pte;
379 swp_entry_t entry;
380
381 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
382 if (!ptep)
383 return;
384
385 pte = ptep_get(ptep);
386 pte_unmap(ptep);
387
388 if (!is_swap_pte(pte))
389 goto out;
390
391 entry = pte_to_swp_entry(pte);
392 if (!is_migration_entry(entry))
393 goto out;
394
395 migration_entry_wait_on_locked(entry, ptl);
396 return;
397 out:
398 spin_unlock(ptl);
399 }
400
401 #ifdef CONFIG_HUGETLB_PAGE
402 /*
403 * The vma read lock must be held upon entry. Holding that lock prevents either
404 * the pte or the ptl from being freed.
405 *
406 * This function will release the vma lock before returning.
407 */
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)408 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
409 {
410 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
411 pte_t pte;
412
413 hugetlb_vma_assert_locked(vma);
414 spin_lock(ptl);
415 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
416
417 if (unlikely(!is_hugetlb_entry_migration(pte))) {
418 spin_unlock(ptl);
419 hugetlb_vma_unlock_read(vma);
420 } else {
421 /*
422 * If migration entry existed, safe to release vma lock
423 * here because the pgtable page won't be freed without the
424 * pgtable lock released. See comment right above pgtable
425 * lock release in migration_entry_wait_on_locked().
426 */
427 hugetlb_vma_unlock_read(vma);
428 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
429 }
430 }
431 #endif
432
433 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)434 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
435 {
436 spinlock_t *ptl;
437
438 ptl = pmd_lock(mm, pmd);
439 if (!is_pmd_migration_entry(*pmd))
440 goto unlock;
441 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
442 return;
443 unlock:
444 spin_unlock(ptl);
445 }
446 #endif
447
folio_expected_refs(struct address_space * mapping,struct folio * folio)448 static int folio_expected_refs(struct address_space *mapping,
449 struct folio *folio)
450 {
451 int refs = 1;
452 if (!mapping)
453 return refs;
454
455 refs += folio_nr_pages(folio);
456 if (folio_test_private(folio))
457 refs++;
458
459 return refs;
460 }
461
462 /*
463 * Replace the folio in the mapping.
464 *
465 * The number of remaining references must be:
466 * 1 for anonymous folios without a mapping
467 * 2 for folios with a mapping
468 * 3 for folios with a mapping and the private flag set.
469 */
__folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count)470 static int __folio_migrate_mapping(struct address_space *mapping,
471 struct folio *newfolio, struct folio *folio, int expected_count)
472 {
473 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
474 struct zone *oldzone, *newzone;
475 int dirty;
476 long nr = folio_nr_pages(folio);
477 long entries, i;
478
479 if (!mapping) {
480 /* Take off deferred split queue while frozen and memcg set */
481 if (folio_test_large(folio) &&
482 folio_test_large_rmappable(folio)) {
483 if (!folio_ref_freeze(folio, expected_count))
484 return -EAGAIN;
485 folio_unqueue_deferred_split(folio);
486 folio_ref_unfreeze(folio, expected_count);
487 }
488
489 /* No turning back from here */
490 newfolio->index = folio->index;
491 newfolio->mapping = folio->mapping;
492 if (folio_test_anon(folio) && folio_test_large(folio))
493 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
494 if (folio_test_swapbacked(folio))
495 __folio_set_swapbacked(newfolio);
496
497 return MIGRATEPAGE_SUCCESS;
498 }
499
500 oldzone = folio_zone(folio);
501 newzone = folio_zone(newfolio);
502
503 xas_lock_irq(&xas);
504 if (!folio_ref_freeze(folio, expected_count)) {
505 xas_unlock_irq(&xas);
506 return -EAGAIN;
507 }
508
509 /* Take off deferred split queue while frozen and memcg set */
510 folio_unqueue_deferred_split(folio);
511
512 /*
513 * Now we know that no one else is looking at the folio:
514 * no turning back from here.
515 */
516 newfolio->index = folio->index;
517 newfolio->mapping = folio->mapping;
518 if (folio_test_anon(folio) && folio_test_large(folio))
519 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
520 folio_ref_add(newfolio, nr); /* add cache reference */
521 if (folio_test_swapbacked(folio)) {
522 __folio_set_swapbacked(newfolio);
523 if (folio_test_swapcache(folio)) {
524 folio_set_swapcache(newfolio);
525 newfolio->private = folio_get_private(folio);
526 }
527 entries = nr;
528 } else {
529 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
530 entries = 1;
531 }
532
533 /* Move dirty while folio refs frozen and newfolio not yet exposed */
534 dirty = folio_test_dirty(folio);
535 if (dirty) {
536 folio_clear_dirty(folio);
537 folio_set_dirty(newfolio);
538 }
539
540 /* Swap cache still stores N entries instead of a high-order entry */
541 for (i = 0; i < entries; i++) {
542 xas_store(&xas, newfolio);
543 xas_next(&xas);
544 }
545
546 /*
547 * Drop cache reference from old folio by unfreezing
548 * to one less reference.
549 * We know this isn't the last reference.
550 */
551 folio_ref_unfreeze(folio, expected_count - nr);
552
553 xas_unlock(&xas);
554 /* Leave irq disabled to prevent preemption while updating stats */
555
556 /*
557 * If moved to a different zone then also account
558 * the folio for that zone. Other VM counters will be
559 * taken care of when we establish references to the
560 * new folio and drop references to the old folio.
561 *
562 * Note that anonymous folios are accounted for
563 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
564 * are mapped to swap space.
565 */
566 if (newzone != oldzone) {
567 struct lruvec *old_lruvec, *new_lruvec;
568 struct mem_cgroup *memcg;
569
570 memcg = folio_memcg(folio);
571 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
572 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
573
574 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
575 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
576 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
577 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
578 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
579
580 if (folio_test_pmd_mappable(folio)) {
581 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
582 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
583 }
584 }
585 #ifdef CONFIG_SWAP
586 if (folio_test_swapcache(folio)) {
587 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
588 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
589 }
590 #endif
591 if (dirty && mapping_can_writeback(mapping)) {
592 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
593 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
594 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
595 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
596 }
597 }
598 local_irq_enable();
599
600 return MIGRATEPAGE_SUCCESS;
601 }
602
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)603 int folio_migrate_mapping(struct address_space *mapping,
604 struct folio *newfolio, struct folio *folio, int extra_count)
605 {
606 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
607
608 if (folio_ref_count(folio) != expected_count)
609 return -EAGAIN;
610
611 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
612 }
613 EXPORT_SYMBOL(folio_migrate_mapping);
614
615 /*
616 * The expected number of remaining references is the same as that
617 * of folio_migrate_mapping().
618 */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)619 int migrate_huge_page_move_mapping(struct address_space *mapping,
620 struct folio *dst, struct folio *src)
621 {
622 XA_STATE(xas, &mapping->i_pages, folio_index(src));
623 int rc, expected_count = folio_expected_refs(mapping, src);
624
625 if (folio_ref_count(src) != expected_count)
626 return -EAGAIN;
627
628 rc = folio_mc_copy(dst, src);
629 if (unlikely(rc))
630 return rc;
631
632 xas_lock_irq(&xas);
633 if (!folio_ref_freeze(src, expected_count)) {
634 xas_unlock_irq(&xas);
635 return -EAGAIN;
636 }
637
638 dst->index = src->index;
639 dst->mapping = src->mapping;
640
641 folio_ref_add(dst, folio_nr_pages(dst));
642
643 xas_store(&xas, dst);
644
645 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
646
647 xas_unlock_irq(&xas);
648
649 return MIGRATEPAGE_SUCCESS;
650 }
651
652 /*
653 * Copy the flags and some other ancillary information
654 */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)655 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
656 {
657 int cpupid;
658
659 if (folio_test_referenced(folio))
660 folio_set_referenced(newfolio);
661 if (folio_test_uptodate(folio))
662 folio_mark_uptodate(newfolio);
663 if (folio_test_clear_active(folio)) {
664 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
665 folio_set_active(newfolio);
666 } else if (folio_test_clear_unevictable(folio))
667 folio_set_unevictable(newfolio);
668 if (folio_test_workingset(folio))
669 folio_set_workingset(newfolio);
670 if (folio_test_checked(folio))
671 folio_set_checked(newfolio);
672 /*
673 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
674 * migration entries. We can still have PG_anon_exclusive set on an
675 * effectively unmapped and unreferenced first sub-pages of an
676 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
677 */
678 if (folio_test_mappedtodisk(folio))
679 folio_set_mappedtodisk(newfolio);
680
681 /* Move dirty on pages not done by folio_migrate_mapping() */
682 if (folio_test_dirty(folio))
683 folio_set_dirty(newfolio);
684
685 if (folio_test_young(folio))
686 folio_set_young(newfolio);
687 if (folio_test_idle(folio))
688 folio_set_idle(newfolio);
689
690 folio_migrate_refs(newfolio, folio);
691 /*
692 * Copy NUMA information to the new page, to prevent over-eager
693 * future migrations of this same page.
694 */
695 cpupid = folio_xchg_last_cpupid(folio, -1);
696 /*
697 * For memory tiering mode, when migrate between slow and fast
698 * memory node, reset cpupid, because that is used to record
699 * page access time in slow memory node.
700 */
701 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
702 bool f_toptier = node_is_toptier(folio_nid(folio));
703 bool t_toptier = node_is_toptier(folio_nid(newfolio));
704
705 if (f_toptier != t_toptier)
706 cpupid = -1;
707 }
708 folio_xchg_last_cpupid(newfolio, cpupid);
709
710 folio_migrate_ksm(newfolio, folio);
711 /*
712 * Please do not reorder this without considering how mm/ksm.c's
713 * ksm_get_folio() depends upon ksm_migrate_page() and the
714 * swapcache flag.
715 */
716 if (folio_test_swapcache(folio))
717 folio_clear_swapcache(folio);
718 folio_clear_private(folio);
719
720 /* page->private contains hugetlb specific flags */
721 if (!folio_test_hugetlb(folio))
722 folio->private = NULL;
723
724 /*
725 * If any waiters have accumulated on the new page then
726 * wake them up.
727 */
728 if (folio_test_writeback(newfolio))
729 folio_end_writeback(newfolio);
730
731 /*
732 * PG_readahead shares the same bit with PG_reclaim. The above
733 * end_page_writeback() may clear PG_readahead mistakenly, so set the
734 * bit after that.
735 */
736 if (folio_test_readahead(folio))
737 folio_set_readahead(newfolio);
738
739 folio_copy_owner(newfolio, folio);
740 pgalloc_tag_swap(newfolio, folio);
741
742 mem_cgroup_migrate(folio, newfolio);
743 }
744 EXPORT_SYMBOL(folio_migrate_flags);
745
746 /************************************************************
747 * Migration functions
748 ***********************************************************/
749
__migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,void * src_private,enum migrate_mode mode)750 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
751 struct folio *src, void *src_private,
752 enum migrate_mode mode)
753 {
754 int rc, expected_count = folio_expected_refs(mapping, src);
755
756 /* Check whether src does not have extra refs before we do more work */
757 if (folio_ref_count(src) != expected_count)
758 return -EAGAIN;
759
760 rc = folio_mc_copy(dst, src);
761 if (unlikely(rc))
762 return rc;
763
764 rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
765 if (rc != MIGRATEPAGE_SUCCESS)
766 return rc;
767
768 if (src_private)
769 folio_attach_private(dst, folio_detach_private(src));
770
771 folio_migrate_flags(dst, src);
772 return MIGRATEPAGE_SUCCESS;
773 }
774
775 /**
776 * migrate_folio() - Simple folio migration.
777 * @mapping: The address_space containing the folio.
778 * @dst: The folio to migrate the data to.
779 * @src: The folio containing the current data.
780 * @mode: How to migrate the page.
781 *
782 * Common logic to directly migrate a single LRU folio suitable for
783 * folios that do not have private data.
784 *
785 * Folios are locked upon entry and exit.
786 */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)787 int migrate_folio(struct address_space *mapping, struct folio *dst,
788 struct folio *src, enum migrate_mode mode)
789 {
790 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
791 return __migrate_folio(mapping, dst, src, NULL, mode);
792 }
793 EXPORT_SYMBOL(migrate_folio);
794
795 #ifdef CONFIG_BUFFER_HEAD
796 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)797 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
798 enum migrate_mode mode)
799 {
800 struct buffer_head *bh = head;
801 struct buffer_head *failed_bh;
802
803 do {
804 if (!trylock_buffer(bh)) {
805 if (mode == MIGRATE_ASYNC)
806 goto unlock;
807 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
808 goto unlock;
809 lock_buffer(bh);
810 }
811
812 bh = bh->b_this_page;
813 } while (bh != head);
814
815 return true;
816
817 unlock:
818 /* We failed to lock the buffer and cannot stall. */
819 failed_bh = bh;
820 bh = head;
821 while (bh != failed_bh) {
822 unlock_buffer(bh);
823 bh = bh->b_this_page;
824 }
825
826 return false;
827 }
828
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)829 static int __buffer_migrate_folio(struct address_space *mapping,
830 struct folio *dst, struct folio *src, enum migrate_mode mode,
831 bool check_refs)
832 {
833 struct buffer_head *bh, *head;
834 int rc;
835 int expected_count;
836
837 head = folio_buffers(src);
838 if (!head)
839 return migrate_folio(mapping, dst, src, mode);
840
841 /* Check whether page does not have extra refs before we do more work */
842 expected_count = folio_expected_refs(mapping, src);
843 if (folio_ref_count(src) != expected_count)
844 return -EAGAIN;
845
846 if (!buffer_migrate_lock_buffers(head, mode))
847 return -EAGAIN;
848
849 if (check_refs) {
850 bool busy;
851 bool invalidated = false;
852
853 recheck_buffers:
854 busy = false;
855 spin_lock(&mapping->i_private_lock);
856 bh = head;
857 do {
858 if (atomic_read(&bh->b_count)) {
859 busy = true;
860 break;
861 }
862 bh = bh->b_this_page;
863 } while (bh != head);
864 if (busy) {
865 if (invalidated) {
866 rc = -EAGAIN;
867 goto unlock_buffers;
868 }
869 spin_unlock(&mapping->i_private_lock);
870 invalidate_bh_lrus();
871 invalidated = true;
872 goto recheck_buffers;
873 }
874 }
875
876 rc = filemap_migrate_folio(mapping, dst, src, mode);
877 if (rc != MIGRATEPAGE_SUCCESS)
878 goto unlock_buffers;
879
880 bh = head;
881 do {
882 folio_set_bh(bh, dst, bh_offset(bh));
883 bh = bh->b_this_page;
884 } while (bh != head);
885
886 unlock_buffers:
887 if (check_refs)
888 spin_unlock(&mapping->i_private_lock);
889 bh = head;
890 do {
891 unlock_buffer(bh);
892 bh = bh->b_this_page;
893 } while (bh != head);
894
895 return rc;
896 }
897
898 /**
899 * buffer_migrate_folio() - Migration function for folios with buffers.
900 * @mapping: The address space containing @src.
901 * @dst: The folio to migrate to.
902 * @src: The folio to migrate from.
903 * @mode: How to migrate the folio.
904 *
905 * This function can only be used if the underlying filesystem guarantees
906 * that no other references to @src exist. For example attached buffer
907 * heads are accessed only under the folio lock. If your filesystem cannot
908 * provide this guarantee, buffer_migrate_folio_norefs() may be more
909 * appropriate.
910 *
911 * Return: 0 on success or a negative errno on failure.
912 */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)913 int buffer_migrate_folio(struct address_space *mapping,
914 struct folio *dst, struct folio *src, enum migrate_mode mode)
915 {
916 return __buffer_migrate_folio(mapping, dst, src, mode, false);
917 }
918 EXPORT_SYMBOL(buffer_migrate_folio);
919
920 /**
921 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
922 * @mapping: The address space containing @src.
923 * @dst: The folio to migrate to.
924 * @src: The folio to migrate from.
925 * @mode: How to migrate the folio.
926 *
927 * Like buffer_migrate_folio() except that this variant is more careful
928 * and checks that there are also no buffer head references. This function
929 * is the right one for mappings where buffer heads are directly looked
930 * up and referenced (such as block device mappings).
931 *
932 * Return: 0 on success or a negative errno on failure.
933 */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)934 int buffer_migrate_folio_norefs(struct address_space *mapping,
935 struct folio *dst, struct folio *src, enum migrate_mode mode)
936 {
937 return __buffer_migrate_folio(mapping, dst, src, mode, true);
938 }
939 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
940 #endif /* CONFIG_BUFFER_HEAD */
941
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)942 int filemap_migrate_folio(struct address_space *mapping,
943 struct folio *dst, struct folio *src, enum migrate_mode mode)
944 {
945 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
946 }
947 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
948
949 /*
950 * Writeback a folio to clean the dirty state
951 */
writeout(struct address_space * mapping,struct folio * folio)952 static int writeout(struct address_space *mapping, struct folio *folio)
953 {
954 struct writeback_control wbc = {
955 .sync_mode = WB_SYNC_NONE,
956 .nr_to_write = 1,
957 .range_start = 0,
958 .range_end = LLONG_MAX,
959 .for_reclaim = 1
960 };
961 int rc;
962
963 if (!mapping->a_ops->writepage)
964 /* No write method for the address space */
965 return -EINVAL;
966
967 if (!folio_clear_dirty_for_io(folio))
968 /* Someone else already triggered a write */
969 return -EAGAIN;
970
971 /*
972 * A dirty folio may imply that the underlying filesystem has
973 * the folio on some queue. So the folio must be clean for
974 * migration. Writeout may mean we lose the lock and the
975 * folio state is no longer what we checked for earlier.
976 * At this point we know that the migration attempt cannot
977 * be successful.
978 */
979 remove_migration_ptes(folio, folio, 0);
980
981 rc = mapping->a_ops->writepage(&folio->page, &wbc);
982
983 if (rc != AOP_WRITEPAGE_ACTIVATE)
984 /* unlocked. Relock */
985 folio_lock(folio);
986
987 return (rc < 0) ? -EIO : -EAGAIN;
988 }
989
990 /*
991 * Default handling if a filesystem does not provide a migration function.
992 */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)993 static int fallback_migrate_folio(struct address_space *mapping,
994 struct folio *dst, struct folio *src, enum migrate_mode mode)
995 {
996 if (folio_test_dirty(src)) {
997 /* Only writeback folios in full synchronous migration */
998 switch (mode) {
999 case MIGRATE_SYNC:
1000 break;
1001 default:
1002 return -EBUSY;
1003 }
1004 return writeout(mapping, src);
1005 }
1006
1007 /*
1008 * Buffers may be managed in a filesystem specific way.
1009 * We must have no buffers or drop them.
1010 */
1011 if (!filemap_release_folio(src, GFP_KERNEL))
1012 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1013
1014 return migrate_folio(mapping, dst, src, mode);
1015 }
1016
1017 /*
1018 * Move a page to a newly allocated page
1019 * The page is locked and all ptes have been successfully removed.
1020 *
1021 * The new page will have replaced the old page if this function
1022 * is successful.
1023 *
1024 * Return value:
1025 * < 0 - error code
1026 * MIGRATEPAGE_SUCCESS - success
1027 */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)1028 static int move_to_new_folio(struct folio *dst, struct folio *src,
1029 enum migrate_mode mode)
1030 {
1031 int rc = -EAGAIN;
1032 bool is_lru = !__folio_test_movable(src);
1033
1034 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1035 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1036
1037 if (likely(is_lru)) {
1038 struct address_space *mapping = folio_mapping(src);
1039
1040 if (!mapping)
1041 rc = migrate_folio(mapping, dst, src, mode);
1042 else if (mapping_inaccessible(mapping))
1043 rc = -EOPNOTSUPP;
1044 else if (mapping->a_ops->migrate_folio)
1045 /*
1046 * Most folios have a mapping and most filesystems
1047 * provide a migrate_folio callback. Anonymous folios
1048 * are part of swap space which also has its own
1049 * migrate_folio callback. This is the most common path
1050 * for page migration.
1051 */
1052 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1053 mode);
1054 else
1055 rc = fallback_migrate_folio(mapping, dst, src, mode);
1056 } else {
1057 const struct movable_operations *mops;
1058
1059 /*
1060 * In case of non-lru page, it could be released after
1061 * isolation step. In that case, we shouldn't try migration.
1062 */
1063 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1064 if (!folio_test_movable(src)) {
1065 rc = MIGRATEPAGE_SUCCESS;
1066 folio_clear_isolated(src);
1067 goto out;
1068 }
1069
1070 mops = folio_movable_ops(src);
1071 rc = mops->migrate_page(&dst->page, &src->page, mode);
1072 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1073 !folio_test_isolated(src));
1074 }
1075
1076 /*
1077 * When successful, old pagecache src->mapping must be cleared before
1078 * src is freed; but stats require that PageAnon be left as PageAnon.
1079 */
1080 if (rc == MIGRATEPAGE_SUCCESS) {
1081 if (__folio_test_movable(src)) {
1082 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1083
1084 /*
1085 * We clear PG_movable under page_lock so any compactor
1086 * cannot try to migrate this page.
1087 */
1088 folio_clear_isolated(src);
1089 }
1090
1091 /*
1092 * Anonymous and movable src->mapping will be cleared by
1093 * free_pages_prepare so don't reset it here for keeping
1094 * the type to work PageAnon, for example.
1095 */
1096 if (!folio_mapping_flags(src))
1097 src->mapping = NULL;
1098
1099 if (likely(!folio_is_zone_device(dst)))
1100 flush_dcache_folio(dst);
1101 }
1102 out:
1103 return rc;
1104 }
1105
1106 /*
1107 * To record some information during migration, we use unused private
1108 * field of struct folio of the newly allocated destination folio.
1109 * This is safe because nobody is using it except us.
1110 */
1111 enum {
1112 PAGE_WAS_MAPPED = BIT(0),
1113 PAGE_WAS_MLOCKED = BIT(1),
1114 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1115 };
1116
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1117 static void __migrate_folio_record(struct folio *dst,
1118 int old_page_state,
1119 struct anon_vma *anon_vma)
1120 {
1121 dst->private = (void *)anon_vma + old_page_state;
1122 }
1123
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1124 static void __migrate_folio_extract(struct folio *dst,
1125 int *old_page_state,
1126 struct anon_vma **anon_vmap)
1127 {
1128 unsigned long private = (unsigned long)dst->private;
1129
1130 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1131 *old_page_state = private & PAGE_OLD_STATES;
1132 dst->private = NULL;
1133 }
1134
1135 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1136 static void migrate_folio_undo_src(struct folio *src,
1137 int page_was_mapped,
1138 struct anon_vma *anon_vma,
1139 bool locked,
1140 struct list_head *ret)
1141 {
1142 if (page_was_mapped)
1143 remove_migration_ptes(src, src, 0);
1144 /* Drop an anon_vma reference if we took one */
1145 if (anon_vma)
1146 put_anon_vma(anon_vma);
1147 if (locked)
1148 folio_unlock(src);
1149 if (ret)
1150 list_move_tail(&src->lru, ret);
1151 }
1152
1153 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1154 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1155 free_folio_t put_new_folio, unsigned long private)
1156 {
1157 if (locked)
1158 folio_unlock(dst);
1159 if (put_new_folio)
1160 put_new_folio(dst, private);
1161 else
1162 folio_put(dst);
1163 }
1164
1165 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1166 static void migrate_folio_done(struct folio *src,
1167 enum migrate_reason reason)
1168 {
1169 /*
1170 * Compaction can migrate also non-LRU pages which are
1171 * not accounted to NR_ISOLATED_*. They can be recognized
1172 * as __folio_test_movable
1173 */
1174 if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1175 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1176 folio_is_file_lru(src), -folio_nr_pages(src));
1177
1178 if (reason != MR_MEMORY_FAILURE)
1179 /* We release the page in page_handle_poison. */
1180 folio_put(src);
1181 }
1182
1183 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1184 static int migrate_folio_unmap(new_folio_t get_new_folio,
1185 free_folio_t put_new_folio, unsigned long private,
1186 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1187 enum migrate_reason reason, struct list_head *ret)
1188 {
1189 struct folio *dst;
1190 int rc = -EAGAIN;
1191 int old_page_state = 0;
1192 struct anon_vma *anon_vma = NULL;
1193 bool is_lru = data_race(!__folio_test_movable(src));
1194 bool locked = false;
1195 bool dst_locked = false;
1196
1197 if (folio_ref_count(src) == 1) {
1198 /* Folio was freed from under us. So we are done. */
1199 folio_clear_active(src);
1200 folio_clear_unevictable(src);
1201 /* free_pages_prepare() will clear PG_isolated. */
1202 list_del(&src->lru);
1203 migrate_folio_done(src, reason);
1204 return MIGRATEPAGE_SUCCESS;
1205 }
1206
1207 dst = get_new_folio(src, private);
1208 if (!dst)
1209 return -ENOMEM;
1210 *dstp = dst;
1211
1212 dst->private = NULL;
1213
1214 if (!folio_trylock(src)) {
1215 if (mode == MIGRATE_ASYNC)
1216 goto out;
1217
1218 /*
1219 * It's not safe for direct compaction to call lock_page.
1220 * For example, during page readahead pages are added locked
1221 * to the LRU. Later, when the IO completes the pages are
1222 * marked uptodate and unlocked. However, the queueing
1223 * could be merging multiple pages for one bio (e.g.
1224 * mpage_readahead). If an allocation happens for the
1225 * second or third page, the process can end up locking
1226 * the same page twice and deadlocking. Rather than
1227 * trying to be clever about what pages can be locked,
1228 * avoid the use of lock_page for direct compaction
1229 * altogether.
1230 */
1231 if (current->flags & PF_MEMALLOC)
1232 goto out;
1233
1234 /*
1235 * In "light" mode, we can wait for transient locks (eg
1236 * inserting a page into the page table), but it's not
1237 * worth waiting for I/O.
1238 */
1239 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1240 goto out;
1241
1242 folio_lock(src);
1243 }
1244 locked = true;
1245 if (folio_test_mlocked(src))
1246 old_page_state |= PAGE_WAS_MLOCKED;
1247
1248 if (folio_test_writeback(src)) {
1249 /*
1250 * Only in the case of a full synchronous migration is it
1251 * necessary to wait for PageWriteback. In the async case,
1252 * the retry loop is too short and in the sync-light case,
1253 * the overhead of stalling is too much
1254 */
1255 switch (mode) {
1256 case MIGRATE_SYNC:
1257 break;
1258 default:
1259 rc = -EBUSY;
1260 goto out;
1261 }
1262 folio_wait_writeback(src);
1263 }
1264
1265 /*
1266 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1267 * we cannot notice that anon_vma is freed while we migrate a page.
1268 * This get_anon_vma() delays freeing anon_vma pointer until the end
1269 * of migration. File cache pages are no problem because of page_lock()
1270 * File Caches may use write_page() or lock_page() in migration, then,
1271 * just care Anon page here.
1272 *
1273 * Only folio_get_anon_vma() understands the subtleties of
1274 * getting a hold on an anon_vma from outside one of its mms.
1275 * But if we cannot get anon_vma, then we won't need it anyway,
1276 * because that implies that the anon page is no longer mapped
1277 * (and cannot be remapped so long as we hold the page lock).
1278 */
1279 if (folio_test_anon(src) && !folio_test_ksm(src))
1280 anon_vma = folio_get_anon_vma(src);
1281
1282 /*
1283 * Block others from accessing the new page when we get around to
1284 * establishing additional references. We are usually the only one
1285 * holding a reference to dst at this point. We used to have a BUG
1286 * here if folio_trylock(dst) fails, but would like to allow for
1287 * cases where there might be a race with the previous use of dst.
1288 * This is much like races on refcount of oldpage: just don't BUG().
1289 */
1290 if (unlikely(!folio_trylock(dst)))
1291 goto out;
1292 dst_locked = true;
1293
1294 if (unlikely(!is_lru)) {
1295 __migrate_folio_record(dst, old_page_state, anon_vma);
1296 return MIGRATEPAGE_UNMAP;
1297 }
1298
1299 /*
1300 * Corner case handling:
1301 * 1. When a new swap-cache page is read into, it is added to the LRU
1302 * and treated as swapcache but it has no rmap yet.
1303 * Calling try_to_unmap() against a src->mapping==NULL page will
1304 * trigger a BUG. So handle it here.
1305 * 2. An orphaned page (see truncate_cleanup_page) might have
1306 * fs-private metadata. The page can be picked up due to memory
1307 * offlining. Everywhere else except page reclaim, the page is
1308 * invisible to the vm, so the page can not be migrated. So try to
1309 * free the metadata, so the page can be freed.
1310 */
1311 if (!src->mapping) {
1312 if (folio_test_private(src)) {
1313 try_to_free_buffers(src);
1314 goto out;
1315 }
1316 } else if (folio_mapped(src)) {
1317 /* Establish migration ptes */
1318 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1319 !folio_test_ksm(src) && !anon_vma, src);
1320 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1321 old_page_state |= PAGE_WAS_MAPPED;
1322 }
1323
1324 if (!folio_mapped(src)) {
1325 __migrate_folio_record(dst, old_page_state, anon_vma);
1326 return MIGRATEPAGE_UNMAP;
1327 }
1328
1329 out:
1330 /*
1331 * A folio that has not been unmapped will be restored to
1332 * right list unless we want to retry.
1333 */
1334 if (rc == -EAGAIN)
1335 ret = NULL;
1336
1337 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1338 anon_vma, locked, ret);
1339 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1340
1341 return rc;
1342 }
1343
1344 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1345 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1346 struct folio *src, struct folio *dst,
1347 enum migrate_mode mode, enum migrate_reason reason,
1348 struct list_head *ret)
1349 {
1350 int rc;
1351 int old_page_state = 0;
1352 struct anon_vma *anon_vma = NULL;
1353 bool is_lru = !__folio_test_movable(src);
1354 struct list_head *prev;
1355
1356 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1357 prev = dst->lru.prev;
1358 list_del(&dst->lru);
1359
1360 rc = move_to_new_folio(dst, src, mode);
1361 if (rc)
1362 goto out;
1363
1364 if (unlikely(!is_lru))
1365 goto out_unlock_both;
1366
1367 /*
1368 * When successful, push dst to LRU immediately: so that if it
1369 * turns out to be an mlocked page, remove_migration_ptes() will
1370 * automatically build up the correct dst->mlock_count for it.
1371 *
1372 * We would like to do something similar for the old page, when
1373 * unsuccessful, and other cases when a page has been temporarily
1374 * isolated from the unevictable LRU: but this case is the easiest.
1375 */
1376 folio_add_lru(dst);
1377 if (old_page_state & PAGE_WAS_MLOCKED)
1378 lru_add_drain();
1379
1380 if (old_page_state & PAGE_WAS_MAPPED)
1381 remove_migration_ptes(src, dst, 0);
1382
1383 out_unlock_both:
1384 folio_unlock(dst);
1385 set_page_owner_migrate_reason(&dst->page, reason);
1386 /*
1387 * If migration is successful, decrease refcount of dst,
1388 * which will not free the page because new page owner increased
1389 * refcounter.
1390 */
1391 folio_put(dst);
1392
1393 /*
1394 * A folio that has been migrated has all references removed
1395 * and will be freed.
1396 */
1397 list_del(&src->lru);
1398 /* Drop an anon_vma reference if we took one */
1399 if (anon_vma)
1400 put_anon_vma(anon_vma);
1401 folio_unlock(src);
1402 migrate_folio_done(src, reason);
1403
1404 return rc;
1405 out:
1406 /*
1407 * A folio that has not been migrated will be restored to
1408 * right list unless we want to retry.
1409 */
1410 if (rc == -EAGAIN) {
1411 list_add(&dst->lru, prev);
1412 __migrate_folio_record(dst, old_page_state, anon_vma);
1413 return rc;
1414 }
1415
1416 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1417 anon_vma, true, ret);
1418 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1419
1420 return rc;
1421 }
1422
1423 /*
1424 * Counterpart of unmap_and_move_page() for hugepage migration.
1425 *
1426 * This function doesn't wait the completion of hugepage I/O
1427 * because there is no race between I/O and migration for hugepage.
1428 * Note that currently hugepage I/O occurs only in direct I/O
1429 * where no lock is held and PG_writeback is irrelevant,
1430 * and writeback status of all subpages are counted in the reference
1431 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1432 * under direct I/O, the reference of the head page is 512 and a bit more.)
1433 * This means that when we try to migrate hugepage whose subpages are
1434 * doing direct I/O, some references remain after try_to_unmap() and
1435 * hugepage migration fails without data corruption.
1436 *
1437 * There is also no race when direct I/O is issued on the page under migration,
1438 * because then pte is replaced with migration swap entry and direct I/O code
1439 * will wait in the page fault for migration to complete.
1440 */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1441 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1442 free_folio_t put_new_folio, unsigned long private,
1443 struct folio *src, int force, enum migrate_mode mode,
1444 int reason, struct list_head *ret)
1445 {
1446 struct folio *dst;
1447 int rc = -EAGAIN;
1448 int page_was_mapped = 0;
1449 struct anon_vma *anon_vma = NULL;
1450 struct address_space *mapping = NULL;
1451
1452 if (folio_ref_count(src) == 1) {
1453 /* page was freed from under us. So we are done. */
1454 folio_putback_hugetlb(src);
1455 return MIGRATEPAGE_SUCCESS;
1456 }
1457
1458 dst = get_new_folio(src, private);
1459 if (!dst)
1460 return -ENOMEM;
1461
1462 if (!folio_trylock(src)) {
1463 if (!force)
1464 goto out;
1465 switch (mode) {
1466 case MIGRATE_SYNC:
1467 break;
1468 default:
1469 goto out;
1470 }
1471 folio_lock(src);
1472 }
1473
1474 /*
1475 * Check for pages which are in the process of being freed. Without
1476 * folio_mapping() set, hugetlbfs specific move page routine will not
1477 * be called and we could leak usage counts for subpools.
1478 */
1479 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1480 rc = -EBUSY;
1481 goto out_unlock;
1482 }
1483
1484 if (folio_test_anon(src))
1485 anon_vma = folio_get_anon_vma(src);
1486
1487 if (unlikely(!folio_trylock(dst)))
1488 goto put_anon;
1489
1490 if (folio_mapped(src)) {
1491 enum ttu_flags ttu = 0;
1492
1493 if (!folio_test_anon(src)) {
1494 /*
1495 * In shared mappings, try_to_unmap could potentially
1496 * call huge_pmd_unshare. Because of this, take
1497 * semaphore in write mode here and set TTU_RMAP_LOCKED
1498 * to let lower levels know we have taken the lock.
1499 */
1500 mapping = hugetlb_folio_mapping_lock_write(src);
1501 if (unlikely(!mapping))
1502 goto unlock_put_anon;
1503
1504 ttu = TTU_RMAP_LOCKED;
1505 }
1506
1507 try_to_migrate(src, ttu);
1508 page_was_mapped = 1;
1509
1510 if (ttu & TTU_RMAP_LOCKED)
1511 i_mmap_unlock_write(mapping);
1512 }
1513
1514 if (!folio_mapped(src))
1515 rc = move_to_new_folio(dst, src, mode);
1516
1517 if (page_was_mapped)
1518 remove_migration_ptes(src,
1519 rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1520
1521 unlock_put_anon:
1522 folio_unlock(dst);
1523
1524 put_anon:
1525 if (anon_vma)
1526 put_anon_vma(anon_vma);
1527
1528 if (rc == MIGRATEPAGE_SUCCESS) {
1529 move_hugetlb_state(src, dst, reason);
1530 put_new_folio = NULL;
1531 }
1532
1533 out_unlock:
1534 folio_unlock(src);
1535 out:
1536 if (rc == MIGRATEPAGE_SUCCESS)
1537 folio_putback_hugetlb(src);
1538 else if (rc != -EAGAIN)
1539 list_move_tail(&src->lru, ret);
1540
1541 /*
1542 * If migration was not successful and there's a freeing callback,
1543 * return the folio to that special allocator. Otherwise, simply drop
1544 * our additional reference.
1545 */
1546 if (put_new_folio)
1547 put_new_folio(dst, private);
1548 else
1549 folio_put(dst);
1550
1551 return rc;
1552 }
1553
try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode)1554 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1555 enum migrate_mode mode)
1556 {
1557 int rc;
1558
1559 if (mode == MIGRATE_ASYNC) {
1560 if (!folio_trylock(folio))
1561 return -EAGAIN;
1562 } else {
1563 folio_lock(folio);
1564 }
1565 rc = split_folio_to_list(folio, split_folios);
1566 folio_unlock(folio);
1567 if (!rc)
1568 list_move_tail(&folio->lru, split_folios);
1569
1570 return rc;
1571 }
1572
1573 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1574 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1575 #else
1576 #define NR_MAX_BATCHED_MIGRATION 512
1577 #endif
1578 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1579 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1580 #define NR_MAX_MIGRATE_SYNC_RETRY \
1581 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1582
1583 struct migrate_pages_stats {
1584 int nr_succeeded; /* Normal and large folios migrated successfully, in
1585 units of base pages */
1586 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1587 units of base pages. Untried folios aren't counted */
1588 int nr_thp_succeeded; /* THP migrated successfully */
1589 int nr_thp_failed; /* THP failed to be migrated */
1590 int nr_thp_split; /* THP split before migrating */
1591 int nr_split; /* Large folio (include THP) split before migrating */
1592 };
1593
1594 /*
1595 * Returns the number of hugetlb folios that were not migrated, or an error code
1596 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1597 * any more because the list has become empty or no retryable hugetlb folios
1598 * exist any more. It is caller's responsibility to call putback_movable_pages()
1599 * only if ret != 0.
1600 */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1601 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1602 free_folio_t put_new_folio, unsigned long private,
1603 enum migrate_mode mode, int reason,
1604 struct migrate_pages_stats *stats,
1605 struct list_head *ret_folios)
1606 {
1607 int retry = 1;
1608 int nr_failed = 0;
1609 int nr_retry_pages = 0;
1610 int pass = 0;
1611 struct folio *folio, *folio2;
1612 int rc, nr_pages;
1613
1614 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1615 retry = 0;
1616 nr_retry_pages = 0;
1617
1618 list_for_each_entry_safe(folio, folio2, from, lru) {
1619 if (!folio_test_hugetlb(folio))
1620 continue;
1621
1622 nr_pages = folio_nr_pages(folio);
1623
1624 cond_resched();
1625
1626 /*
1627 * Migratability of hugepages depends on architectures and
1628 * their size. This check is necessary because some callers
1629 * of hugepage migration like soft offline and memory
1630 * hotremove don't walk through page tables or check whether
1631 * the hugepage is pmd-based or not before kicking migration.
1632 */
1633 if (!hugepage_migration_supported(folio_hstate(folio))) {
1634 nr_failed++;
1635 stats->nr_failed_pages += nr_pages;
1636 list_move_tail(&folio->lru, ret_folios);
1637 continue;
1638 }
1639
1640 rc = unmap_and_move_huge_page(get_new_folio,
1641 put_new_folio, private,
1642 folio, pass > 2, mode,
1643 reason, ret_folios);
1644 /*
1645 * The rules are:
1646 * Success: hugetlb folio will be put back
1647 * -EAGAIN: stay on the from list
1648 * -ENOMEM: stay on the from list
1649 * Other errno: put on ret_folios list
1650 */
1651 switch(rc) {
1652 case -ENOMEM:
1653 /*
1654 * When memory is low, don't bother to try to migrate
1655 * other folios, just exit.
1656 */
1657 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1658 return -ENOMEM;
1659 case -EAGAIN:
1660 retry++;
1661 nr_retry_pages += nr_pages;
1662 break;
1663 case MIGRATEPAGE_SUCCESS:
1664 stats->nr_succeeded += nr_pages;
1665 break;
1666 default:
1667 /*
1668 * Permanent failure (-EBUSY, etc.):
1669 * unlike -EAGAIN case, the failed folio is
1670 * removed from migration folio list and not
1671 * retried in the next outer loop.
1672 */
1673 nr_failed++;
1674 stats->nr_failed_pages += nr_pages;
1675 break;
1676 }
1677 }
1678 }
1679 /*
1680 * nr_failed is number of hugetlb folios failed to be migrated. After
1681 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1682 * folios as failed.
1683 */
1684 nr_failed += retry;
1685 stats->nr_failed_pages += nr_retry_pages;
1686
1687 return nr_failed;
1688 }
1689
migrate_folios_move(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct migrate_pages_stats * stats,int * retry,int * thp_retry,int * nr_failed,int * nr_retry_pages)1690 static void migrate_folios_move(struct list_head *src_folios,
1691 struct list_head *dst_folios,
1692 free_folio_t put_new_folio, unsigned long private,
1693 enum migrate_mode mode, int reason,
1694 struct list_head *ret_folios,
1695 struct migrate_pages_stats *stats,
1696 int *retry, int *thp_retry, int *nr_failed,
1697 int *nr_retry_pages)
1698 {
1699 struct folio *folio, *folio2, *dst, *dst2;
1700 bool is_thp;
1701 int nr_pages;
1702 int rc;
1703
1704 dst = list_first_entry(dst_folios, struct folio, lru);
1705 dst2 = list_next_entry(dst, lru);
1706 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1707 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1708 nr_pages = folio_nr_pages(folio);
1709
1710 cond_resched();
1711
1712 rc = migrate_folio_move(put_new_folio, private,
1713 folio, dst, mode,
1714 reason, ret_folios);
1715 /*
1716 * The rules are:
1717 * Success: folio will be freed
1718 * -EAGAIN: stay on the unmap_folios list
1719 * Other errno: put on ret_folios list
1720 */
1721 switch (rc) {
1722 case -EAGAIN:
1723 *retry += 1;
1724 *thp_retry += is_thp;
1725 *nr_retry_pages += nr_pages;
1726 break;
1727 case MIGRATEPAGE_SUCCESS:
1728 stats->nr_succeeded += nr_pages;
1729 stats->nr_thp_succeeded += is_thp;
1730 break;
1731 default:
1732 *nr_failed += 1;
1733 stats->nr_thp_failed += is_thp;
1734 stats->nr_failed_pages += nr_pages;
1735 break;
1736 }
1737 dst = dst2;
1738 dst2 = list_next_entry(dst, lru);
1739 }
1740 }
1741
migrate_folios_undo(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,struct list_head * ret_folios)1742 static void migrate_folios_undo(struct list_head *src_folios,
1743 struct list_head *dst_folios,
1744 free_folio_t put_new_folio, unsigned long private,
1745 struct list_head *ret_folios)
1746 {
1747 struct folio *folio, *folio2, *dst, *dst2;
1748
1749 dst = list_first_entry(dst_folios, struct folio, lru);
1750 dst2 = list_next_entry(dst, lru);
1751 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1752 int old_page_state = 0;
1753 struct anon_vma *anon_vma = NULL;
1754
1755 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1756 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1757 anon_vma, true, ret_folios);
1758 list_del(&dst->lru);
1759 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1760 dst = dst2;
1761 dst2 = list_next_entry(dst, lru);
1762 }
1763 }
1764
1765 /*
1766 * migrate_pages_batch() first unmaps folios in the from list as many as
1767 * possible, then move the unmapped folios.
1768 *
1769 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1770 * lock or bit when we have locked more than one folio. Which may cause
1771 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1772 * length of the from list must be <= 1.
1773 */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1774 static int migrate_pages_batch(struct list_head *from,
1775 new_folio_t get_new_folio, free_folio_t put_new_folio,
1776 unsigned long private, enum migrate_mode mode, int reason,
1777 struct list_head *ret_folios, struct list_head *split_folios,
1778 struct migrate_pages_stats *stats, int nr_pass)
1779 {
1780 int retry = 1;
1781 int thp_retry = 1;
1782 int nr_failed = 0;
1783 int nr_retry_pages = 0;
1784 int pass = 0;
1785 bool is_thp = false;
1786 bool is_large = false;
1787 struct folio *folio, *folio2, *dst = NULL;
1788 int rc, rc_saved = 0, nr_pages;
1789 LIST_HEAD(unmap_folios);
1790 LIST_HEAD(dst_folios);
1791 bool nosplit = (reason == MR_NUMA_MISPLACED);
1792
1793 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1794 !list_empty(from) && !list_is_singular(from));
1795
1796 for (pass = 0; pass < nr_pass && retry; pass++) {
1797 retry = 0;
1798 thp_retry = 0;
1799 nr_retry_pages = 0;
1800
1801 list_for_each_entry_safe(folio, folio2, from, lru) {
1802 is_large = folio_test_large(folio);
1803 is_thp = folio_test_pmd_mappable(folio);
1804 nr_pages = folio_nr_pages(folio);
1805
1806 cond_resched();
1807
1808 /*
1809 * The rare folio on the deferred split list should
1810 * be split now. It should not count as a failure:
1811 * but increment nr_failed because, without doing so,
1812 * migrate_pages() may report success with (split but
1813 * unmigrated) pages still on its fromlist; whereas it
1814 * always reports success when its fromlist is empty.
1815 * stats->nr_thp_failed should be increased too,
1816 * otherwise stats inconsistency will happen when
1817 * migrate_pages_batch is called via migrate_pages()
1818 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1819 *
1820 * Only check it without removing it from the list.
1821 * Since the folio can be on deferred_split_scan()
1822 * local list and removing it can cause the local list
1823 * corruption. Folio split process below can handle it
1824 * with the help of folio_ref_freeze().
1825 *
1826 * nr_pages > 2 is needed to avoid checking order-1
1827 * page cache folios. They exist, in contrast to
1828 * non-existent order-1 anonymous folios, and do not
1829 * use _deferred_list.
1830 */
1831 if (nr_pages > 2 &&
1832 !list_empty(&folio->_deferred_list) &&
1833 folio_test_partially_mapped(folio)) {
1834 if (!try_split_folio(folio, split_folios, mode)) {
1835 nr_failed++;
1836 stats->nr_thp_failed += is_thp;
1837 stats->nr_thp_split += is_thp;
1838 stats->nr_split++;
1839 continue;
1840 }
1841 }
1842
1843 /*
1844 * Large folio migration might be unsupported or
1845 * the allocation might be failed so we should retry
1846 * on the same folio with the large folio split
1847 * to normal folios.
1848 *
1849 * Split folios are put in split_folios, and
1850 * we will migrate them after the rest of the
1851 * list is processed.
1852 */
1853 if (!thp_migration_supported() && is_thp) {
1854 nr_failed++;
1855 stats->nr_thp_failed++;
1856 if (!try_split_folio(folio, split_folios, mode)) {
1857 stats->nr_thp_split++;
1858 stats->nr_split++;
1859 continue;
1860 }
1861 stats->nr_failed_pages += nr_pages;
1862 list_move_tail(&folio->lru, ret_folios);
1863 continue;
1864 }
1865
1866 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1867 private, folio, &dst, mode, reason,
1868 ret_folios);
1869 /*
1870 * The rules are:
1871 * Success: folio will be freed
1872 * Unmap: folio will be put on unmap_folios list,
1873 * dst folio put on dst_folios list
1874 * -EAGAIN: stay on the from list
1875 * -ENOMEM: stay on the from list
1876 * Other errno: put on ret_folios list
1877 */
1878 switch(rc) {
1879 case -ENOMEM:
1880 /*
1881 * When memory is low, don't bother to try to migrate
1882 * other folios, move unmapped folios, then exit.
1883 */
1884 nr_failed++;
1885 stats->nr_thp_failed += is_thp;
1886 /* Large folio NUMA faulting doesn't split to retry. */
1887 if (is_large && !nosplit) {
1888 int ret = try_split_folio(folio, split_folios, mode);
1889
1890 if (!ret) {
1891 stats->nr_thp_split += is_thp;
1892 stats->nr_split++;
1893 break;
1894 } else if (reason == MR_LONGTERM_PIN &&
1895 ret == -EAGAIN) {
1896 /*
1897 * Try again to split large folio to
1898 * mitigate the failure of longterm pinning.
1899 */
1900 retry++;
1901 thp_retry += is_thp;
1902 nr_retry_pages += nr_pages;
1903 /* Undo duplicated failure counting. */
1904 nr_failed--;
1905 stats->nr_thp_failed -= is_thp;
1906 break;
1907 }
1908 }
1909
1910 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1911 /* nr_failed isn't updated for not used */
1912 stats->nr_thp_failed += thp_retry;
1913 rc_saved = rc;
1914 if (list_empty(&unmap_folios))
1915 goto out;
1916 else
1917 goto move;
1918 case -EAGAIN:
1919 retry++;
1920 thp_retry += is_thp;
1921 nr_retry_pages += nr_pages;
1922 break;
1923 case MIGRATEPAGE_SUCCESS:
1924 stats->nr_succeeded += nr_pages;
1925 stats->nr_thp_succeeded += is_thp;
1926 break;
1927 case MIGRATEPAGE_UNMAP:
1928 list_move_tail(&folio->lru, &unmap_folios);
1929 list_add_tail(&dst->lru, &dst_folios);
1930 break;
1931 default:
1932 /*
1933 * Permanent failure (-EBUSY, etc.):
1934 * unlike -EAGAIN case, the failed folio is
1935 * removed from migration folio list and not
1936 * retried in the next outer loop.
1937 */
1938 nr_failed++;
1939 stats->nr_thp_failed += is_thp;
1940 stats->nr_failed_pages += nr_pages;
1941 break;
1942 }
1943 }
1944 }
1945 nr_failed += retry;
1946 stats->nr_thp_failed += thp_retry;
1947 stats->nr_failed_pages += nr_retry_pages;
1948 move:
1949 /* Flush TLBs for all unmapped folios */
1950 try_to_unmap_flush();
1951
1952 retry = 1;
1953 for (pass = 0; pass < nr_pass && retry; pass++) {
1954 retry = 0;
1955 thp_retry = 0;
1956 nr_retry_pages = 0;
1957
1958 /* Move the unmapped folios */
1959 migrate_folios_move(&unmap_folios, &dst_folios,
1960 put_new_folio, private, mode, reason,
1961 ret_folios, stats, &retry, &thp_retry,
1962 &nr_failed, &nr_retry_pages);
1963 }
1964 nr_failed += retry;
1965 stats->nr_thp_failed += thp_retry;
1966 stats->nr_failed_pages += nr_retry_pages;
1967
1968 rc = rc_saved ? : nr_failed;
1969 out:
1970 /* Cleanup remaining folios */
1971 migrate_folios_undo(&unmap_folios, &dst_folios,
1972 put_new_folio, private, ret_folios);
1973
1974 return rc;
1975 }
1976
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1977 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1978 free_folio_t put_new_folio, unsigned long private,
1979 enum migrate_mode mode, int reason,
1980 struct list_head *ret_folios, struct list_head *split_folios,
1981 struct migrate_pages_stats *stats)
1982 {
1983 int rc, nr_failed = 0;
1984 LIST_HEAD(folios);
1985 struct migrate_pages_stats astats;
1986
1987 memset(&astats, 0, sizeof(astats));
1988 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1989 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1990 reason, &folios, split_folios, &astats,
1991 NR_MAX_MIGRATE_ASYNC_RETRY);
1992 stats->nr_succeeded += astats.nr_succeeded;
1993 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1994 stats->nr_thp_split += astats.nr_thp_split;
1995 stats->nr_split += astats.nr_split;
1996 if (rc < 0) {
1997 stats->nr_failed_pages += astats.nr_failed_pages;
1998 stats->nr_thp_failed += astats.nr_thp_failed;
1999 list_splice_tail(&folios, ret_folios);
2000 return rc;
2001 }
2002 stats->nr_thp_failed += astats.nr_thp_split;
2003 /*
2004 * Do not count rc, as pages will be retried below.
2005 * Count nr_split only, since it includes nr_thp_split.
2006 */
2007 nr_failed += astats.nr_split;
2008 /*
2009 * Fall back to migrate all failed folios one by one synchronously. All
2010 * failed folios except split THPs will be retried, so their failure
2011 * isn't counted
2012 */
2013 list_splice_tail_init(&folios, from);
2014 while (!list_empty(from)) {
2015 list_move(from->next, &folios);
2016 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2017 private, mode, reason, ret_folios,
2018 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
2019 list_splice_tail_init(&folios, ret_folios);
2020 if (rc < 0)
2021 return rc;
2022 nr_failed += rc;
2023 }
2024
2025 return nr_failed;
2026 }
2027
2028 /*
2029 * migrate_pages - migrate the folios specified in a list, to the free folios
2030 * supplied as the target for the page migration
2031 *
2032 * @from: The list of folios to be migrated.
2033 * @get_new_folio: The function used to allocate free folios to be used
2034 * as the target of the folio migration.
2035 * @put_new_folio: The function used to free target folios if migration
2036 * fails, or NULL if no special handling is necessary.
2037 * @private: Private data to be passed on to get_new_folio()
2038 * @mode: The migration mode that specifies the constraints for
2039 * folio migration, if any.
2040 * @reason: The reason for folio migration.
2041 * @ret_succeeded: Set to the number of folios migrated successfully if
2042 * the caller passes a non-NULL pointer.
2043 *
2044 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2045 * are movable any more because the list has become empty or no retryable folios
2046 * exist any more. It is caller's responsibility to call putback_movable_pages()
2047 * only if ret != 0.
2048 *
2049 * Returns the number of {normal folio, large folio, hugetlb} that were not
2050 * migrated, or an error code. The number of large folio splits will be
2051 * considered as the number of non-migrated large folio, no matter how many
2052 * split folios of the large folio are migrated successfully.
2053 */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)2054 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2055 free_folio_t put_new_folio, unsigned long private,
2056 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2057 {
2058 int rc, rc_gather;
2059 int nr_pages;
2060 struct folio *folio, *folio2;
2061 LIST_HEAD(folios);
2062 LIST_HEAD(ret_folios);
2063 LIST_HEAD(split_folios);
2064 struct migrate_pages_stats stats;
2065
2066 trace_mm_migrate_pages_start(mode, reason);
2067
2068 memset(&stats, 0, sizeof(stats));
2069
2070 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2071 mode, reason, &stats, &ret_folios);
2072 if (rc_gather < 0)
2073 goto out;
2074
2075 again:
2076 nr_pages = 0;
2077 list_for_each_entry_safe(folio, folio2, from, lru) {
2078 /* Retried hugetlb folios will be kept in list */
2079 if (folio_test_hugetlb(folio)) {
2080 list_move_tail(&folio->lru, &ret_folios);
2081 continue;
2082 }
2083
2084 nr_pages += folio_nr_pages(folio);
2085 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2086 break;
2087 }
2088 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2089 list_cut_before(&folios, from, &folio2->lru);
2090 else
2091 list_splice_init(from, &folios);
2092 if (mode == MIGRATE_ASYNC)
2093 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2094 private, mode, reason, &ret_folios,
2095 &split_folios, &stats,
2096 NR_MAX_MIGRATE_PAGES_RETRY);
2097 else
2098 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2099 private, mode, reason, &ret_folios,
2100 &split_folios, &stats);
2101 list_splice_tail_init(&folios, &ret_folios);
2102 if (rc < 0) {
2103 rc_gather = rc;
2104 list_splice_tail(&split_folios, &ret_folios);
2105 goto out;
2106 }
2107 if (!list_empty(&split_folios)) {
2108 /*
2109 * Failure isn't counted since all split folios of a large folio
2110 * is counted as 1 failure already. And, we only try to migrate
2111 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2112 */
2113 migrate_pages_batch(&split_folios, get_new_folio,
2114 put_new_folio, private, MIGRATE_ASYNC, reason,
2115 &ret_folios, NULL, &stats, 1);
2116 list_splice_tail_init(&split_folios, &ret_folios);
2117 }
2118 rc_gather += rc;
2119 if (!list_empty(from))
2120 goto again;
2121 out:
2122 /*
2123 * Put the permanent failure folio back to migration list, they
2124 * will be put back to the right list by the caller.
2125 */
2126 list_splice(&ret_folios, from);
2127
2128 /*
2129 * Return 0 in case all split folios of fail-to-migrate large folios
2130 * are migrated successfully.
2131 */
2132 if (list_empty(from))
2133 rc_gather = 0;
2134
2135 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2136 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2137 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2138 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2139 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2140 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2141 stats.nr_thp_succeeded, stats.nr_thp_failed,
2142 stats.nr_thp_split, stats.nr_split, mode,
2143 reason);
2144
2145 if (ret_succeeded)
2146 *ret_succeeded = stats.nr_succeeded;
2147
2148 return rc_gather;
2149 }
2150
alloc_migration_target(struct folio * src,unsigned long private)2151 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2152 {
2153 struct migration_target_control *mtc;
2154 gfp_t gfp_mask;
2155 unsigned int order = 0;
2156 int nid;
2157 int zidx;
2158
2159 mtc = (struct migration_target_control *)private;
2160 gfp_mask = mtc->gfp_mask;
2161 nid = mtc->nid;
2162 if (nid == NUMA_NO_NODE)
2163 nid = folio_nid(src);
2164
2165 if (folio_test_hugetlb(src)) {
2166 struct hstate *h = folio_hstate(src);
2167
2168 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2169 return alloc_hugetlb_folio_nodemask(h, nid,
2170 mtc->nmask, gfp_mask,
2171 htlb_allow_alloc_fallback(mtc->reason));
2172 }
2173
2174 if (folio_test_large(src)) {
2175 /*
2176 * clear __GFP_RECLAIM to make the migration callback
2177 * consistent with regular THP allocations.
2178 */
2179 gfp_mask &= ~__GFP_RECLAIM;
2180 gfp_mask |= GFP_TRANSHUGE;
2181 order = folio_order(src);
2182 }
2183 zidx = zone_idx(folio_zone(src));
2184 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2185 gfp_mask |= __GFP_HIGHMEM;
2186
2187 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2188 }
2189
2190 #ifdef CONFIG_NUMA
2191
store_status(int __user * status,int start,int value,int nr)2192 static int store_status(int __user *status, int start, int value, int nr)
2193 {
2194 while (nr-- > 0) {
2195 if (put_user(value, status + start))
2196 return -EFAULT;
2197 start++;
2198 }
2199
2200 return 0;
2201 }
2202
do_move_pages_to_node(struct list_head * pagelist,int node)2203 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2204 {
2205 int err;
2206 struct migration_target_control mtc = {
2207 .nid = node,
2208 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2209 .reason = MR_SYSCALL,
2210 };
2211
2212 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2213 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2214 if (err)
2215 putback_movable_pages(pagelist);
2216 return err;
2217 }
2218
__add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all)2219 static int __add_folio_for_migration(struct folio *folio, int node,
2220 struct list_head *pagelist, bool migrate_all)
2221 {
2222 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2223 return -EFAULT;
2224
2225 if (folio_is_zone_device(folio))
2226 return -ENOENT;
2227
2228 if (folio_nid(folio) == node)
2229 return 0;
2230
2231 if (folio_likely_mapped_shared(folio) && !migrate_all)
2232 return -EACCES;
2233
2234 if (folio_test_hugetlb(folio)) {
2235 if (folio_isolate_hugetlb(folio, pagelist))
2236 return 1;
2237 } else if (folio_isolate_lru(folio)) {
2238 list_add_tail(&folio->lru, pagelist);
2239 node_stat_mod_folio(folio,
2240 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2241 folio_nr_pages(folio));
2242 return 1;
2243 }
2244 return -EBUSY;
2245 }
2246
2247 /*
2248 * Resolves the given address to a struct folio, isolates it from the LRU and
2249 * puts it to the given pagelist.
2250 * Returns:
2251 * errno - if the folio cannot be found/isolated
2252 * 0 - when it doesn't have to be migrated because it is already on the
2253 * target node
2254 * 1 - when it has been queued
2255 */
add_folio_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2256 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2257 int node, struct list_head *pagelist, bool migrate_all)
2258 {
2259 struct vm_area_struct *vma;
2260 struct folio_walk fw;
2261 struct folio *folio;
2262 unsigned long addr;
2263 int err = -EFAULT;
2264
2265 mmap_read_lock(mm);
2266 addr = (unsigned long)untagged_addr_remote(mm, p);
2267
2268 vma = vma_lookup(mm, addr);
2269 if (vma && vma_migratable(vma)) {
2270 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2271 if (folio) {
2272 err = __add_folio_for_migration(folio, node, pagelist,
2273 migrate_all);
2274 folio_walk_end(&fw, vma);
2275 } else {
2276 err = -ENOENT;
2277 }
2278 }
2279 mmap_read_unlock(mm);
2280 return err;
2281 }
2282
move_pages_and_store_status(int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2283 static int move_pages_and_store_status(int node,
2284 struct list_head *pagelist, int __user *status,
2285 int start, int i, unsigned long nr_pages)
2286 {
2287 int err;
2288
2289 if (list_empty(pagelist))
2290 return 0;
2291
2292 err = do_move_pages_to_node(pagelist, node);
2293 if (err) {
2294 /*
2295 * Positive err means the number of failed
2296 * pages to migrate. Since we are going to
2297 * abort and return the number of non-migrated
2298 * pages, so need to include the rest of the
2299 * nr_pages that have not been attempted as
2300 * well.
2301 */
2302 if (err > 0)
2303 err += nr_pages - i;
2304 return err;
2305 }
2306 return store_status(status, start, node, i - start);
2307 }
2308
2309 /*
2310 * Migrate an array of page address onto an array of nodes and fill
2311 * the corresponding array of status.
2312 */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2313 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2314 unsigned long nr_pages,
2315 const void __user * __user *pages,
2316 const int __user *nodes,
2317 int __user *status, int flags)
2318 {
2319 compat_uptr_t __user *compat_pages = (void __user *)pages;
2320 int current_node = NUMA_NO_NODE;
2321 LIST_HEAD(pagelist);
2322 int start, i;
2323 int err = 0, err1;
2324
2325 lru_cache_disable();
2326
2327 for (i = start = 0; i < nr_pages; i++) {
2328 const void __user *p;
2329 int node;
2330
2331 err = -EFAULT;
2332 if (in_compat_syscall()) {
2333 compat_uptr_t cp;
2334
2335 if (get_user(cp, compat_pages + i))
2336 goto out_flush;
2337
2338 p = compat_ptr(cp);
2339 } else {
2340 if (get_user(p, pages + i))
2341 goto out_flush;
2342 }
2343 if (get_user(node, nodes + i))
2344 goto out_flush;
2345
2346 err = -ENODEV;
2347 if (node < 0 || node >= MAX_NUMNODES)
2348 goto out_flush;
2349 if (!node_state(node, N_MEMORY))
2350 goto out_flush;
2351
2352 err = -EACCES;
2353 if (!node_isset(node, task_nodes))
2354 goto out_flush;
2355
2356 if (current_node == NUMA_NO_NODE) {
2357 current_node = node;
2358 start = i;
2359 } else if (node != current_node) {
2360 err = move_pages_and_store_status(current_node,
2361 &pagelist, status, start, i, nr_pages);
2362 if (err)
2363 goto out;
2364 start = i;
2365 current_node = node;
2366 }
2367
2368 /*
2369 * Errors in the page lookup or isolation are not fatal and we simply
2370 * report them via status
2371 */
2372 err = add_folio_for_migration(mm, p, current_node, &pagelist,
2373 flags & MPOL_MF_MOVE_ALL);
2374
2375 if (err > 0) {
2376 /* The page is successfully queued for migration */
2377 continue;
2378 }
2379
2380 /*
2381 * The move_pages() man page does not have an -EEXIST choice, so
2382 * use -EFAULT instead.
2383 */
2384 if (err == -EEXIST)
2385 err = -EFAULT;
2386
2387 /*
2388 * If the page is already on the target node (!err), store the
2389 * node, otherwise, store the err.
2390 */
2391 err = store_status(status, i, err ? : current_node, 1);
2392 if (err)
2393 goto out_flush;
2394
2395 err = move_pages_and_store_status(current_node, &pagelist,
2396 status, start, i, nr_pages);
2397 if (err) {
2398 /* We have accounted for page i */
2399 if (err > 0)
2400 err--;
2401 goto out;
2402 }
2403 current_node = NUMA_NO_NODE;
2404 }
2405 out_flush:
2406 /* Make sure we do not overwrite the existing error */
2407 err1 = move_pages_and_store_status(current_node, &pagelist,
2408 status, start, i, nr_pages);
2409 if (err >= 0)
2410 err = err1;
2411 out:
2412 lru_cache_enable();
2413 return err;
2414 }
2415
2416 /*
2417 * Determine the nodes of an array of pages and store it in an array of status.
2418 */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2419 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2420 const void __user **pages, int *status)
2421 {
2422 unsigned long i;
2423
2424 mmap_read_lock(mm);
2425
2426 for (i = 0; i < nr_pages; i++) {
2427 unsigned long addr = (unsigned long)(*pages);
2428 struct vm_area_struct *vma;
2429 struct folio_walk fw;
2430 struct folio *folio;
2431 int err = -EFAULT;
2432
2433 vma = vma_lookup(mm, addr);
2434 if (!vma)
2435 goto set_status;
2436
2437 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2438 if (folio) {
2439 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2440 err = -EFAULT;
2441 else if (folio_is_zone_device(folio))
2442 err = -ENOENT;
2443 else
2444 err = folio_nid(folio);
2445 folio_walk_end(&fw, vma);
2446 } else {
2447 err = -ENOENT;
2448 }
2449 set_status:
2450 *status = err;
2451
2452 pages++;
2453 status++;
2454 }
2455
2456 mmap_read_unlock(mm);
2457 }
2458
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)2459 static int get_compat_pages_array(const void __user *chunk_pages[],
2460 const void __user * __user *pages,
2461 unsigned long chunk_nr)
2462 {
2463 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2464 compat_uptr_t p;
2465 int i;
2466
2467 for (i = 0; i < chunk_nr; i++) {
2468 if (get_user(p, pages32 + i))
2469 return -EFAULT;
2470 chunk_pages[i] = compat_ptr(p);
2471 }
2472
2473 return 0;
2474 }
2475
2476 /*
2477 * Determine the nodes of a user array of pages and store it in
2478 * a user array of status.
2479 */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2480 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2481 const void __user * __user *pages,
2482 int __user *status)
2483 {
2484 #define DO_PAGES_STAT_CHUNK_NR 16UL
2485 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2486 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2487
2488 while (nr_pages) {
2489 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2490
2491 if (in_compat_syscall()) {
2492 if (get_compat_pages_array(chunk_pages, pages,
2493 chunk_nr))
2494 break;
2495 } else {
2496 if (copy_from_user(chunk_pages, pages,
2497 chunk_nr * sizeof(*chunk_pages)))
2498 break;
2499 }
2500
2501 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2502
2503 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2504 break;
2505
2506 pages += chunk_nr;
2507 status += chunk_nr;
2508 nr_pages -= chunk_nr;
2509 }
2510 return nr_pages ? -EFAULT : 0;
2511 }
2512
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2513 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2514 {
2515 struct task_struct *task;
2516 struct mm_struct *mm;
2517
2518 /*
2519 * There is no need to check if current process has the right to modify
2520 * the specified process when they are same.
2521 */
2522 if (!pid) {
2523 mmget(current->mm);
2524 *mem_nodes = cpuset_mems_allowed(current);
2525 return current->mm;
2526 }
2527
2528 task = find_get_task_by_vpid(pid);
2529 if (!task) {
2530 return ERR_PTR(-ESRCH);
2531 }
2532
2533 /*
2534 * Check if this process has the right to modify the specified
2535 * process. Use the regular "ptrace_may_access()" checks.
2536 */
2537 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2538 mm = ERR_PTR(-EPERM);
2539 goto out;
2540 }
2541
2542 mm = ERR_PTR(security_task_movememory(task));
2543 if (IS_ERR(mm))
2544 goto out;
2545 *mem_nodes = cpuset_mems_allowed(task);
2546 mm = get_task_mm(task);
2547 out:
2548 put_task_struct(task);
2549 if (!mm)
2550 mm = ERR_PTR(-EINVAL);
2551 return mm;
2552 }
2553
2554 /*
2555 * Move a list of pages in the address space of the currently executing
2556 * process.
2557 */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2558 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2559 const void __user * __user *pages,
2560 const int __user *nodes,
2561 int __user *status, int flags)
2562 {
2563 struct mm_struct *mm;
2564 int err;
2565 nodemask_t task_nodes;
2566
2567 /* Check flags */
2568 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2569 return -EINVAL;
2570
2571 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2572 return -EPERM;
2573
2574 mm = find_mm_struct(pid, &task_nodes);
2575 if (IS_ERR(mm))
2576 return PTR_ERR(mm);
2577
2578 if (nodes)
2579 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2580 nodes, status, flags);
2581 else
2582 err = do_pages_stat(mm, nr_pages, pages, status);
2583
2584 mmput(mm);
2585 return err;
2586 }
2587
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2588 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2589 const void __user * __user *, pages,
2590 const int __user *, nodes,
2591 int __user *, status, int, flags)
2592 {
2593 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2594 }
2595
2596 #ifdef CONFIG_NUMA_BALANCING
2597 /*
2598 * Returns true if this is a safe migration target node for misplaced NUMA
2599 * pages. Currently it only checks the watermarks which is crude.
2600 */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2601 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2602 unsigned long nr_migrate_pages)
2603 {
2604 int z;
2605
2606 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2607 struct zone *zone = pgdat->node_zones + z;
2608
2609 if (!managed_zone(zone))
2610 continue;
2611
2612 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2613 if (!zone_watermark_ok(zone, 0,
2614 high_wmark_pages(zone) +
2615 nr_migrate_pages,
2616 ZONE_MOVABLE, ALLOC_CMA))
2617 continue;
2618 return true;
2619 }
2620 return false;
2621 }
2622
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2623 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2624 unsigned long data)
2625 {
2626 int nid = (int) data;
2627 int order = folio_order(src);
2628 gfp_t gfp = __GFP_THISNODE;
2629
2630 if (order > 0)
2631 gfp |= GFP_TRANSHUGE_LIGHT;
2632 else {
2633 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2634 __GFP_NOWARN;
2635 gfp &= ~__GFP_RECLAIM;
2636 }
2637 return __folio_alloc_node(gfp, order, nid);
2638 }
2639
2640 /*
2641 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2642 * permitted. Must be called with the PTL still held.
2643 */
migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node)2644 int migrate_misplaced_folio_prepare(struct folio *folio,
2645 struct vm_area_struct *vma, int node)
2646 {
2647 int nr_pages = folio_nr_pages(folio);
2648 pg_data_t *pgdat = NODE_DATA(node);
2649
2650 if (folio_is_file_lru(folio)) {
2651 /*
2652 * Do not migrate file folios that are mapped in multiple
2653 * processes with execute permissions as they are probably
2654 * shared libraries.
2655 *
2656 * See folio_likely_mapped_shared() on possible imprecision
2657 * when we cannot easily detect if a folio is shared.
2658 */
2659 if ((vma->vm_flags & VM_EXEC) &&
2660 folio_likely_mapped_shared(folio))
2661 return -EACCES;
2662
2663 /*
2664 * Do not migrate dirty folios as not all filesystems can move
2665 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2666 * cycles.
2667 */
2668 if (folio_test_dirty(folio))
2669 return -EAGAIN;
2670 }
2671
2672 /* Avoid migrating to a node that is nearly full */
2673 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2674 int z;
2675
2676 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2677 return -EAGAIN;
2678 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2679 if (managed_zone(pgdat->node_zones + z))
2680 break;
2681 }
2682
2683 /*
2684 * If there are no managed zones, it should not proceed
2685 * further.
2686 */
2687 if (z < 0)
2688 return -EAGAIN;
2689
2690 wakeup_kswapd(pgdat->node_zones + z, 0,
2691 folio_order(folio), ZONE_MOVABLE);
2692 return -EAGAIN;
2693 }
2694
2695 if (!folio_isolate_lru(folio))
2696 return -EAGAIN;
2697
2698 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2699 nr_pages);
2700 return 0;
2701 }
2702
2703 /*
2704 * Attempt to migrate a misplaced folio to the specified destination
2705 * node. Caller is expected to have isolated the folio by calling
2706 * migrate_misplaced_folio_prepare(), which will result in an
2707 * elevated reference count on the folio. This function will un-isolate the
2708 * folio, dereferencing the folio before returning.
2709 */
migrate_misplaced_folio(struct folio * folio,int node)2710 int migrate_misplaced_folio(struct folio *folio, int node)
2711 {
2712 pg_data_t *pgdat = NODE_DATA(node);
2713 int nr_remaining;
2714 unsigned int nr_succeeded;
2715 LIST_HEAD(migratepages);
2716 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2717 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2718
2719 list_add(&folio->lru, &migratepages);
2720 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2721 NULL, node, MIGRATE_ASYNC,
2722 MR_NUMA_MISPLACED, &nr_succeeded);
2723 if (nr_remaining && !list_empty(&migratepages))
2724 putback_movable_pages(&migratepages);
2725 if (nr_succeeded) {
2726 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2727 count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2728 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2729 && !node_is_toptier(folio_nid(folio))
2730 && node_is_toptier(node))
2731 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2732 }
2733 mem_cgroup_put(memcg);
2734 BUG_ON(!list_empty(&migratepages));
2735 return nr_remaining ? -EAGAIN : 0;
2736 }
2737 #endif /* CONFIG_NUMA_BALANCING */
2738 #endif /* CONFIG_NUMA */
2739