1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/page_idle.h>
39 #include <linux/page_owner.h>
40 #include <linux/sched/mm.h>
41 #include <linux/ptrace.h>
42 #include <linux/memory.h>
43 #include <linux/sched/sysctl.h>
44 #include <linux/memory-tiers.h>
45 #include <linux/pagewalk.h>
46
47 #include <asm/tlbflush.h>
48
49 #include <trace/events/migrate.h>
50
51 #include "internal.h"
52 #include "swap.h"
53
54 static const struct movable_operations *offline_movable_ops;
55 static const struct movable_operations *zsmalloc_movable_ops;
56
set_movable_ops(const struct movable_operations * ops,enum pagetype type)57 int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
58 {
59 /*
60 * We only allow for selected types and don't handle concurrent
61 * registration attempts yet.
62 */
63 switch (type) {
64 case PGTY_offline:
65 if (offline_movable_ops && ops)
66 return -EBUSY;
67 offline_movable_ops = ops;
68 break;
69 case PGTY_zsmalloc:
70 if (zsmalloc_movable_ops && ops)
71 return -EBUSY;
72 zsmalloc_movable_ops = ops;
73 break;
74 default:
75 return -EINVAL;
76 }
77 return 0;
78 }
79 EXPORT_SYMBOL_GPL(set_movable_ops);
80
page_movable_ops(struct page * page)81 static const struct movable_operations *page_movable_ops(struct page *page)
82 {
83 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
84
85 /*
86 * If we enable page migration for a page of a certain type by marking
87 * it as movable, the page type must be sticky until the page gets freed
88 * back to the buddy.
89 */
90 if (PageOffline(page))
91 /* Only balloon compaction sets PageOffline pages movable. */
92 return offline_movable_ops;
93 if (PageZsmalloc(page))
94 return zsmalloc_movable_ops;
95
96 return NULL;
97 }
98
99 /**
100 * isolate_movable_ops_page - isolate a movable_ops page for migration
101 * @page: The page.
102 * @mode: The isolation mode.
103 *
104 * Try to isolate a movable_ops page for migration. Will fail if the page is
105 * not a movable_ops page, if the page is already isolated for migration
106 * or if the page was just was released by its owner.
107 *
108 * Once isolated, the page cannot get freed until it is either putback
109 * or migrated.
110 *
111 * Returns true if isolation succeeded, otherwise false.
112 */
isolate_movable_ops_page(struct page * page,isolate_mode_t mode)113 bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
114 {
115 /*
116 * TODO: these pages will not be folios in the future. All
117 * folio dependencies will have to be removed.
118 */
119 struct folio *folio = folio_get_nontail_page(page);
120 const struct movable_operations *mops;
121
122 /*
123 * Avoid burning cycles with pages that are yet under __free_pages(),
124 * or just got freed under us.
125 *
126 * In case we 'win' a race for a movable page being freed under us and
127 * raise its refcount preventing __free_pages() from doing its job
128 * the put_page() at the end of this block will take care of
129 * release this page, thus avoiding a nasty leakage.
130 */
131 if (!folio)
132 goto out;
133
134 /*
135 * Check for movable_ops pages before taking the page lock because
136 * we use non-atomic bitops on newly allocated page flags so
137 * unconditionally grabbing the lock ruins page's owner side.
138 *
139 * Note that once a page has movable_ops, it will stay that way
140 * until the page was freed.
141 */
142 if (unlikely(!page_has_movable_ops(page)))
143 goto out_putfolio;
144
145 /*
146 * As movable pages are not isolated from LRU lists, concurrent
147 * compaction threads can race against page migration functions
148 * as well as race against the releasing a page.
149 *
150 * In order to avoid having an already isolated movable page
151 * being (wrongly) re-isolated while it is under migration,
152 * or to avoid attempting to isolate pages being released,
153 * lets be sure we have the page lock
154 * before proceeding with the movable page isolation steps.
155 */
156 if (unlikely(!folio_trylock(folio)))
157 goto out_putfolio;
158
159 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
160 if (PageMovableOpsIsolated(page))
161 goto out_no_isolated;
162
163 mops = page_movable_ops(page);
164 if (WARN_ON_ONCE(!mops))
165 goto out_no_isolated;
166
167 if (!mops->isolate_page(page, mode))
168 goto out_no_isolated;
169
170 /* Driver shouldn't use the isolated flag */
171 VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page);
172 SetPageMovableOpsIsolated(page);
173 folio_unlock(folio);
174
175 return true;
176
177 out_no_isolated:
178 folio_unlock(folio);
179 out_putfolio:
180 folio_put(folio);
181 out:
182 return false;
183 }
184
185 /**
186 * putback_movable_ops_page - putback an isolated movable_ops page
187 * @page: The isolated page.
188 *
189 * Putback an isolated movable_ops page.
190 *
191 * After the page was putback, it might get freed instantly.
192 */
putback_movable_ops_page(struct page * page)193 static void putback_movable_ops_page(struct page *page)
194 {
195 /*
196 * TODO: these pages will not be folios in the future. All
197 * folio dependencies will have to be removed.
198 */
199 struct folio *folio = page_folio(page);
200
201 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
202 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page);
203 folio_lock(folio);
204 page_movable_ops(page)->putback_page(page);
205 ClearPageMovableOpsIsolated(page);
206 folio_unlock(folio);
207 folio_put(folio);
208 }
209
210 /**
211 * migrate_movable_ops_page - migrate an isolated movable_ops page
212 * @dst: The destination page.
213 * @src: The source page.
214 * @mode: The migration mode.
215 *
216 * Migrate an isolated movable_ops page.
217 *
218 * If the src page was already released by its owner, the src page is
219 * un-isolated (putback) and migration succeeds; the migration core will be the
220 * owner of both pages.
221 *
222 * If the src page was not released by its owner and the migration was
223 * successful, the owner of the src page and the dst page are swapped and
224 * the src page is un-isolated.
225 *
226 * If migration fails, the ownership stays unmodified and the src page
227 * remains isolated: migration may be retried later or the page can be putback.
228 *
229 * TODO: migration core will treat both pages as folios and lock them before
230 * this call to unlock them after this call. Further, the folio refcounts on
231 * src and dst are also released by migration core. These pages will not be
232 * folios in the future, so that must be reworked.
233 *
234 * Returns 0 on success, otherwise a negative error code.
235 */
migrate_movable_ops_page(struct page * dst,struct page * src,enum migrate_mode mode)236 static int migrate_movable_ops_page(struct page *dst, struct page *src,
237 enum migrate_mode mode)
238 {
239 int rc;
240
241 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
242 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
243 rc = page_movable_ops(src)->migrate_page(dst, src, mode);
244 if (!rc)
245 ClearPageMovableOpsIsolated(src);
246 return rc;
247 }
248
249 /*
250 * Put previously isolated pages back onto the appropriate lists
251 * from where they were once taken off for compaction/migration.
252 *
253 * This function shall be used whenever the isolated pageset has been
254 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
255 * and folio_isolate_hugetlb().
256 */
putback_movable_pages(struct list_head * l)257 void putback_movable_pages(struct list_head *l)
258 {
259 struct folio *folio;
260 struct folio *folio2;
261
262 list_for_each_entry_safe(folio, folio2, l, lru) {
263 if (unlikely(folio_test_hugetlb(folio))) {
264 folio_putback_hugetlb(folio);
265 continue;
266 }
267 list_del(&folio->lru);
268 if (unlikely(page_has_movable_ops(&folio->page))) {
269 putback_movable_ops_page(&folio->page);
270 } else {
271 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
272 folio_is_file_lru(folio), -folio_nr_pages(folio));
273 folio_putback_lru(folio);
274 }
275 }
276 }
277
278 /* Must be called with an elevated refcount on the non-hugetlb folio */
isolate_folio_to_list(struct folio * folio,struct list_head * list)279 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
280 {
281 if (folio_test_hugetlb(folio))
282 return folio_isolate_hugetlb(folio, list);
283
284 if (page_has_movable_ops(&folio->page)) {
285 if (!isolate_movable_ops_page(&folio->page,
286 ISOLATE_UNEVICTABLE))
287 return false;
288 } else {
289 if (!folio_isolate_lru(folio))
290 return false;
291 node_stat_add_folio(folio, NR_ISOLATED_ANON +
292 folio_is_file_lru(folio));
293 }
294 list_add(&folio->lru, list);
295 return true;
296 }
297
try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,unsigned long idx)298 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
299 struct folio *folio,
300 unsigned long idx)
301 {
302 struct page *page = folio_page(folio, idx);
303 bool contains_data;
304 pte_t newpte;
305 void *addr;
306
307 if (PageCompound(page))
308 return false;
309 VM_BUG_ON_PAGE(!PageAnon(page), page);
310 VM_BUG_ON_PAGE(!PageLocked(page), page);
311 VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
312
313 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
314 mm_forbids_zeropage(pvmw->vma->vm_mm))
315 return false;
316
317 /*
318 * The pmd entry mapping the old thp was flushed and the pte mapping
319 * this subpage has been non present. If the subpage is only zero-filled
320 * then map it to the shared zeropage.
321 */
322 addr = kmap_local_page(page);
323 contains_data = memchr_inv(addr, 0, PAGE_SIZE);
324 kunmap_local(addr);
325
326 if (contains_data)
327 return false;
328
329 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
330 pvmw->vma->vm_page_prot));
331 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
332
333 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
334 return true;
335 }
336
337 struct rmap_walk_arg {
338 struct folio *folio;
339 bool map_unused_to_zeropage;
340 };
341
342 /*
343 * Restore a potential migration pte to a working pte entry
344 */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)345 static bool remove_migration_pte(struct folio *folio,
346 struct vm_area_struct *vma, unsigned long addr, void *arg)
347 {
348 struct rmap_walk_arg *rmap_walk_arg = arg;
349 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
350
351 while (page_vma_mapped_walk(&pvmw)) {
352 rmap_t rmap_flags = RMAP_NONE;
353 pte_t old_pte;
354 pte_t pte;
355 swp_entry_t entry;
356 struct page *new;
357 unsigned long idx = 0;
358
359 /* pgoff is invalid for ksm pages, but they are never large */
360 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
361 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
362 new = folio_page(folio, idx);
363
364 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
365 /* PMD-mapped THP migration entry */
366 if (!pvmw.pte) {
367 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
368 !folio_test_pmd_mappable(folio), folio);
369 remove_migration_pmd(&pvmw, new);
370 continue;
371 }
372 #endif
373 if (rmap_walk_arg->map_unused_to_zeropage &&
374 try_to_map_unused_to_zeropage(&pvmw, folio, idx))
375 continue;
376
377 folio_get(folio);
378 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
379 old_pte = ptep_get(pvmw.pte);
380
381 entry = pte_to_swp_entry(old_pte);
382 if (!is_migration_entry_young(entry))
383 pte = pte_mkold(pte);
384 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
385 pte = pte_mkdirty(pte);
386 if (pte_swp_soft_dirty(old_pte))
387 pte = pte_mksoft_dirty(pte);
388 else
389 pte = pte_clear_soft_dirty(pte);
390
391 if (is_writable_migration_entry(entry))
392 pte = pte_mkwrite(pte, vma);
393 else if (pte_swp_uffd_wp(old_pte))
394 pte = pte_mkuffd_wp(pte);
395
396 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
397 rmap_flags |= RMAP_EXCLUSIVE;
398
399 if (unlikely(is_device_private_page(new))) {
400 if (pte_write(pte))
401 entry = make_writable_device_private_entry(
402 page_to_pfn(new));
403 else
404 entry = make_readable_device_private_entry(
405 page_to_pfn(new));
406 pte = swp_entry_to_pte(entry);
407 if (pte_swp_soft_dirty(old_pte))
408 pte = pte_swp_mksoft_dirty(pte);
409 if (pte_swp_uffd_wp(old_pte))
410 pte = pte_swp_mkuffd_wp(pte);
411 }
412
413 #ifdef CONFIG_HUGETLB_PAGE
414 if (folio_test_hugetlb(folio)) {
415 struct hstate *h = hstate_vma(vma);
416 unsigned int shift = huge_page_shift(h);
417 unsigned long psize = huge_page_size(h);
418
419 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
420 if (folio_test_anon(folio))
421 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
422 rmap_flags);
423 else
424 hugetlb_add_file_rmap(folio);
425 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
426 psize);
427 } else
428 #endif
429 {
430 if (folio_test_anon(folio))
431 folio_add_anon_rmap_pte(folio, new, vma,
432 pvmw.address, rmap_flags);
433 else
434 folio_add_file_rmap_pte(folio, new, vma);
435 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
436 }
437 if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
438 mlock_drain_local();
439
440 trace_remove_migration_pte(pvmw.address, pte_val(pte),
441 compound_order(new));
442
443 /* No need to invalidate - it was non-present before */
444 update_mmu_cache(vma, pvmw.address, pvmw.pte);
445 }
446
447 return true;
448 }
449
450 /*
451 * Get rid of all migration entries and replace them by
452 * references to the indicated page.
453 */
remove_migration_ptes(struct folio * src,struct folio * dst,int flags)454 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
455 {
456 struct rmap_walk_arg rmap_walk_arg = {
457 .folio = src,
458 .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
459 };
460
461 struct rmap_walk_control rwc = {
462 .rmap_one = remove_migration_pte,
463 .arg = &rmap_walk_arg,
464 };
465
466 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
467
468 if (flags & RMP_LOCKED)
469 rmap_walk_locked(dst, &rwc);
470 else
471 rmap_walk(dst, &rwc);
472 }
473
474 /*
475 * Something used the pte of a page under migration. We need to
476 * get to the page and wait until migration is finished.
477 * When we return from this function the fault will be retried.
478 */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)479 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
480 unsigned long address)
481 {
482 spinlock_t *ptl;
483 pte_t *ptep;
484 pte_t pte;
485 swp_entry_t entry;
486
487 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
488 if (!ptep)
489 return;
490
491 pte = ptep_get(ptep);
492 pte_unmap(ptep);
493
494 if (!is_swap_pte(pte))
495 goto out;
496
497 entry = pte_to_swp_entry(pte);
498 if (!is_migration_entry(entry))
499 goto out;
500
501 migration_entry_wait_on_locked(entry, ptl);
502 return;
503 out:
504 spin_unlock(ptl);
505 }
506
507 #ifdef CONFIG_HUGETLB_PAGE
508 /*
509 * The vma read lock must be held upon entry. Holding that lock prevents either
510 * the pte or the ptl from being freed.
511 *
512 * This function will release the vma lock before returning.
513 */
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)514 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
515 {
516 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
517 pte_t pte;
518
519 hugetlb_vma_assert_locked(vma);
520 spin_lock(ptl);
521 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
522
523 if (unlikely(!is_hugetlb_entry_migration(pte))) {
524 spin_unlock(ptl);
525 hugetlb_vma_unlock_read(vma);
526 } else {
527 /*
528 * If migration entry existed, safe to release vma lock
529 * here because the pgtable page won't be freed without the
530 * pgtable lock released. See comment right above pgtable
531 * lock release in migration_entry_wait_on_locked().
532 */
533 hugetlb_vma_unlock_read(vma);
534 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
535 }
536 }
537 #endif
538
539 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)540 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
541 {
542 spinlock_t *ptl;
543
544 ptl = pmd_lock(mm, pmd);
545 if (!is_pmd_migration_entry(*pmd))
546 goto unlock;
547 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
548 return;
549 unlock:
550 spin_unlock(ptl);
551 }
552 #endif
553
554 /*
555 * Replace the folio in the mapping.
556 *
557 * The number of remaining references must be:
558 * 1 for anonymous folios without a mapping
559 * 2 for folios with a mapping
560 * 3 for folios with a mapping and the private flag set.
561 */
__folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count)562 static int __folio_migrate_mapping(struct address_space *mapping,
563 struct folio *newfolio, struct folio *folio, int expected_count)
564 {
565 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
566 struct swap_cluster_info *ci = NULL;
567 struct zone *oldzone, *newzone;
568 int dirty;
569 long nr = folio_nr_pages(folio);
570
571 if (!mapping) {
572 /* Take off deferred split queue while frozen and memcg set */
573 if (folio_test_large(folio) &&
574 folio_test_large_rmappable(folio)) {
575 if (!folio_ref_freeze(folio, expected_count))
576 return -EAGAIN;
577 folio_unqueue_deferred_split(folio);
578 folio_ref_unfreeze(folio, expected_count);
579 }
580
581 /* No turning back from here */
582 newfolio->index = folio->index;
583 newfolio->mapping = folio->mapping;
584 if (folio_test_anon(folio) && folio_test_large(folio))
585 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
586 if (folio_test_swapbacked(folio))
587 __folio_set_swapbacked(newfolio);
588
589 return 0;
590 }
591
592 oldzone = folio_zone(folio);
593 newzone = folio_zone(newfolio);
594
595 if (folio_test_swapcache(folio))
596 ci = swap_cluster_get_and_lock_irq(folio);
597 else
598 xas_lock_irq(&xas);
599
600 if (!folio_ref_freeze(folio, expected_count)) {
601 if (ci)
602 swap_cluster_unlock_irq(ci);
603 else
604 xas_unlock_irq(&xas);
605 return -EAGAIN;
606 }
607
608 /* Take off deferred split queue while frozen and memcg set */
609 folio_unqueue_deferred_split(folio);
610
611 /*
612 * Now we know that no one else is looking at the folio:
613 * no turning back from here.
614 */
615 newfolio->index = folio->index;
616 newfolio->mapping = folio->mapping;
617 if (folio_test_anon(folio) && folio_test_large(folio))
618 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
619 folio_ref_add(newfolio, nr); /* add cache reference */
620 if (folio_test_swapbacked(folio))
621 __folio_set_swapbacked(newfolio);
622 if (folio_test_swapcache(folio)) {
623 folio_set_swapcache(newfolio);
624 newfolio->private = folio_get_private(folio);
625 }
626
627 /* Move dirty while folio refs frozen and newfolio not yet exposed */
628 dirty = folio_test_dirty(folio);
629 if (dirty) {
630 folio_clear_dirty(folio);
631 folio_set_dirty(newfolio);
632 }
633
634 if (folio_test_swapcache(folio))
635 __swap_cache_replace_folio(ci, folio, newfolio);
636 else
637 xas_store(&xas, newfolio);
638
639 /*
640 * Drop cache reference from old folio by unfreezing
641 * to one less reference.
642 * We know this isn't the last reference.
643 */
644 folio_ref_unfreeze(folio, expected_count - nr);
645
646 /* Leave irq disabled to prevent preemption while updating stats */
647 if (ci)
648 swap_cluster_unlock(ci);
649 else
650 xas_unlock(&xas);
651
652 /*
653 * If moved to a different zone then also account
654 * the folio for that zone. Other VM counters will be
655 * taken care of when we establish references to the
656 * new folio and drop references to the old folio.
657 *
658 * Note that anonymous folios are accounted for
659 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
660 * are mapped to swap space.
661 */
662 if (newzone != oldzone) {
663 struct lruvec *old_lruvec, *new_lruvec;
664 struct mem_cgroup *memcg;
665
666 memcg = folio_memcg(folio);
667 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
668 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
669
670 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
671 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
672 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
673 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
674 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
675
676 if (folio_test_pmd_mappable(folio)) {
677 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
678 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
679 }
680 }
681 #ifdef CONFIG_SWAP
682 if (folio_test_swapcache(folio)) {
683 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
684 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
685 }
686 #endif
687 if (dirty && mapping_can_writeback(mapping)) {
688 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
689 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
690 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
691 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
692 }
693 }
694 local_irq_enable();
695
696 return 0;
697 }
698
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)699 int folio_migrate_mapping(struct address_space *mapping,
700 struct folio *newfolio, struct folio *folio, int extra_count)
701 {
702 int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
703
704 if (folio_ref_count(folio) != expected_count)
705 return -EAGAIN;
706
707 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
708 }
709 EXPORT_SYMBOL(folio_migrate_mapping);
710
711 /*
712 * The expected number of remaining references is the same as that
713 * of folio_migrate_mapping().
714 */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)715 int migrate_huge_page_move_mapping(struct address_space *mapping,
716 struct folio *dst, struct folio *src)
717 {
718 XA_STATE(xas, &mapping->i_pages, folio_index(src));
719 int rc, expected_count = folio_expected_ref_count(src) + 1;
720
721 if (folio_ref_count(src) != expected_count)
722 return -EAGAIN;
723
724 rc = folio_mc_copy(dst, src);
725 if (unlikely(rc))
726 return rc;
727
728 xas_lock_irq(&xas);
729 if (!folio_ref_freeze(src, expected_count)) {
730 xas_unlock_irq(&xas);
731 return -EAGAIN;
732 }
733
734 dst->index = src->index;
735 dst->mapping = src->mapping;
736
737 folio_ref_add(dst, folio_nr_pages(dst));
738
739 xas_store(&xas, dst);
740
741 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
742
743 xas_unlock_irq(&xas);
744
745 return 0;
746 }
747
748 /*
749 * Copy the flags and some other ancillary information
750 */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)751 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
752 {
753 int cpupid;
754
755 if (folio_test_referenced(folio))
756 folio_set_referenced(newfolio);
757 if (folio_test_uptodate(folio))
758 folio_mark_uptodate(newfolio);
759 if (folio_test_clear_active(folio)) {
760 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
761 folio_set_active(newfolio);
762 } else if (folio_test_clear_unevictable(folio))
763 folio_set_unevictable(newfolio);
764 if (folio_test_workingset(folio))
765 folio_set_workingset(newfolio);
766 if (folio_test_checked(folio))
767 folio_set_checked(newfolio);
768 /*
769 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
770 * migration entries. We can still have PG_anon_exclusive set on an
771 * effectively unmapped and unreferenced first sub-pages of an
772 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
773 */
774 if (folio_test_mappedtodisk(folio))
775 folio_set_mappedtodisk(newfolio);
776
777 /* Move dirty on pages not done by folio_migrate_mapping() */
778 if (folio_test_dirty(folio))
779 folio_set_dirty(newfolio);
780
781 if (folio_test_young(folio))
782 folio_set_young(newfolio);
783 if (folio_test_idle(folio))
784 folio_set_idle(newfolio);
785
786 folio_migrate_refs(newfolio, folio);
787 /*
788 * Copy NUMA information to the new page, to prevent over-eager
789 * future migrations of this same page.
790 */
791 cpupid = folio_xchg_last_cpupid(folio, -1);
792 /*
793 * For memory tiering mode, when migrate between slow and fast
794 * memory node, reset cpupid, because that is used to record
795 * page access time in slow memory node.
796 */
797 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
798 bool f_toptier = node_is_toptier(folio_nid(folio));
799 bool t_toptier = node_is_toptier(folio_nid(newfolio));
800
801 if (f_toptier != t_toptier)
802 cpupid = -1;
803 }
804 folio_xchg_last_cpupid(newfolio, cpupid);
805
806 folio_migrate_ksm(newfolio, folio);
807 /*
808 * Please do not reorder this without considering how mm/ksm.c's
809 * ksm_get_folio() depends upon ksm_migrate_page() and the
810 * swapcache flag.
811 */
812 if (folio_test_swapcache(folio))
813 folio_clear_swapcache(folio);
814 folio_clear_private(folio);
815
816 /* page->private contains hugetlb specific flags */
817 if (!folio_test_hugetlb(folio))
818 folio->private = NULL;
819
820 /*
821 * If any waiters have accumulated on the new page then
822 * wake them up.
823 */
824 if (folio_test_writeback(newfolio))
825 folio_end_writeback(newfolio);
826
827 /*
828 * PG_readahead shares the same bit with PG_reclaim. The above
829 * end_page_writeback() may clear PG_readahead mistakenly, so set the
830 * bit after that.
831 */
832 if (folio_test_readahead(folio))
833 folio_set_readahead(newfolio);
834
835 folio_copy_owner(newfolio, folio);
836 pgalloc_tag_swap(newfolio, folio);
837
838 mem_cgroup_migrate(folio, newfolio);
839 }
840 EXPORT_SYMBOL(folio_migrate_flags);
841
842 /************************************************************
843 * Migration functions
844 ***********************************************************/
845
__migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,void * src_private,enum migrate_mode mode)846 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
847 struct folio *src, void *src_private,
848 enum migrate_mode mode)
849 {
850 int rc, expected_count = folio_expected_ref_count(src) + 1;
851
852 /* Check whether src does not have extra refs before we do more work */
853 if (folio_ref_count(src) != expected_count)
854 return -EAGAIN;
855
856 rc = folio_mc_copy(dst, src);
857 if (unlikely(rc))
858 return rc;
859
860 rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
861 if (rc)
862 return rc;
863
864 if (src_private)
865 folio_attach_private(dst, folio_detach_private(src));
866
867 folio_migrate_flags(dst, src);
868 return 0;
869 }
870
871 /**
872 * migrate_folio() - Simple folio migration.
873 * @mapping: The address_space containing the folio.
874 * @dst: The folio to migrate the data to.
875 * @src: The folio containing the current data.
876 * @mode: How to migrate the page.
877 *
878 * Common logic to directly migrate a single LRU folio suitable for
879 * folios that do not have private data.
880 *
881 * Folios are locked upon entry and exit.
882 */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)883 int migrate_folio(struct address_space *mapping, struct folio *dst,
884 struct folio *src, enum migrate_mode mode)
885 {
886 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
887 return __migrate_folio(mapping, dst, src, NULL, mode);
888 }
889 EXPORT_SYMBOL(migrate_folio);
890
891 #ifdef CONFIG_BUFFER_HEAD
892 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)893 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
894 enum migrate_mode mode)
895 {
896 struct buffer_head *bh = head;
897 struct buffer_head *failed_bh;
898
899 do {
900 if (!trylock_buffer(bh)) {
901 if (mode == MIGRATE_ASYNC)
902 goto unlock;
903 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
904 goto unlock;
905 lock_buffer(bh);
906 }
907
908 bh = bh->b_this_page;
909 } while (bh != head);
910
911 return true;
912
913 unlock:
914 /* We failed to lock the buffer and cannot stall. */
915 failed_bh = bh;
916 bh = head;
917 while (bh != failed_bh) {
918 unlock_buffer(bh);
919 bh = bh->b_this_page;
920 }
921
922 return false;
923 }
924
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)925 static int __buffer_migrate_folio(struct address_space *mapping,
926 struct folio *dst, struct folio *src, enum migrate_mode mode,
927 bool check_refs)
928 {
929 struct buffer_head *bh, *head;
930 int rc;
931 int expected_count;
932
933 head = folio_buffers(src);
934 if (!head)
935 return migrate_folio(mapping, dst, src, mode);
936
937 /* Check whether page does not have extra refs before we do more work */
938 expected_count = folio_expected_ref_count(src) + 1;
939 if (folio_ref_count(src) != expected_count)
940 return -EAGAIN;
941
942 if (!buffer_migrate_lock_buffers(head, mode))
943 return -EAGAIN;
944
945 if (check_refs) {
946 bool busy, migrating;
947 bool invalidated = false;
948
949 migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
950 VM_WARN_ON_ONCE(migrating);
951 recheck_buffers:
952 busy = false;
953 spin_lock(&mapping->i_private_lock);
954 bh = head;
955 do {
956 if (atomic_read(&bh->b_count)) {
957 busy = true;
958 break;
959 }
960 bh = bh->b_this_page;
961 } while (bh != head);
962 spin_unlock(&mapping->i_private_lock);
963 if (busy) {
964 if (invalidated) {
965 rc = -EAGAIN;
966 goto unlock_buffers;
967 }
968 invalidate_bh_lrus();
969 invalidated = true;
970 goto recheck_buffers;
971 }
972 }
973
974 rc = filemap_migrate_folio(mapping, dst, src, mode);
975 if (rc)
976 goto unlock_buffers;
977
978 bh = head;
979 do {
980 folio_set_bh(bh, dst, bh_offset(bh));
981 bh = bh->b_this_page;
982 } while (bh != head);
983
984 unlock_buffers:
985 if (check_refs)
986 clear_bit_unlock(BH_Migrate, &head->b_state);
987 bh = head;
988 do {
989 unlock_buffer(bh);
990 bh = bh->b_this_page;
991 } while (bh != head);
992
993 return rc;
994 }
995
996 /**
997 * buffer_migrate_folio() - Migration function for folios with buffers.
998 * @mapping: The address space containing @src.
999 * @dst: The folio to migrate to.
1000 * @src: The folio to migrate from.
1001 * @mode: How to migrate the folio.
1002 *
1003 * This function can only be used if the underlying filesystem guarantees
1004 * that no other references to @src exist. For example attached buffer
1005 * heads are accessed only under the folio lock. If your filesystem cannot
1006 * provide this guarantee, buffer_migrate_folio_norefs() may be more
1007 * appropriate.
1008 *
1009 * Return: 0 on success or a negative errno on failure.
1010 */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1011 int buffer_migrate_folio(struct address_space *mapping,
1012 struct folio *dst, struct folio *src, enum migrate_mode mode)
1013 {
1014 return __buffer_migrate_folio(mapping, dst, src, mode, false);
1015 }
1016 EXPORT_SYMBOL(buffer_migrate_folio);
1017
1018 /**
1019 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
1020 * @mapping: The address space containing @src.
1021 * @dst: The folio to migrate to.
1022 * @src: The folio to migrate from.
1023 * @mode: How to migrate the folio.
1024 *
1025 * Like buffer_migrate_folio() except that this variant is more careful
1026 * and checks that there are also no buffer head references. This function
1027 * is the right one for mappings where buffer heads are directly looked
1028 * up and referenced (such as block device mappings).
1029 *
1030 * Return: 0 on success or a negative errno on failure.
1031 */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1032 int buffer_migrate_folio_norefs(struct address_space *mapping,
1033 struct folio *dst, struct folio *src, enum migrate_mode mode)
1034 {
1035 return __buffer_migrate_folio(mapping, dst, src, mode, true);
1036 }
1037 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
1038 #endif /* CONFIG_BUFFER_HEAD */
1039
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1040 int filemap_migrate_folio(struct address_space *mapping,
1041 struct folio *dst, struct folio *src, enum migrate_mode mode)
1042 {
1043 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
1044 }
1045 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
1046
1047 /*
1048 * Default handling if a filesystem does not provide a migration function.
1049 */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1050 static int fallback_migrate_folio(struct address_space *mapping,
1051 struct folio *dst, struct folio *src, enum migrate_mode mode)
1052 {
1053 WARN_ONCE(mapping->a_ops->writepages,
1054 "%ps does not implement migrate_folio\n",
1055 mapping->a_ops);
1056 if (folio_test_dirty(src))
1057 return -EBUSY;
1058
1059 /*
1060 * Filesystem may have private data at folio->private that we
1061 * can't migrate automatically.
1062 */
1063 if (!filemap_release_folio(src, GFP_KERNEL))
1064 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1065
1066 return migrate_folio(mapping, dst, src, mode);
1067 }
1068
1069 /*
1070 * Move a src folio to a newly allocated dst folio.
1071 *
1072 * The src and dst folios are locked and the src folios was unmapped from
1073 * the page tables.
1074 *
1075 * On success, the src folio was replaced by the dst folio.
1076 *
1077 * Return value:
1078 * < 0 - error code
1079 * 0 - success
1080 */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)1081 static int move_to_new_folio(struct folio *dst, struct folio *src,
1082 enum migrate_mode mode)
1083 {
1084 struct address_space *mapping = folio_mapping(src);
1085 int rc = -EAGAIN;
1086
1087 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1088 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1089
1090 if (!mapping)
1091 rc = migrate_folio(mapping, dst, src, mode);
1092 else if (mapping_inaccessible(mapping))
1093 rc = -EOPNOTSUPP;
1094 else if (mapping->a_ops->migrate_folio)
1095 /*
1096 * Most folios have a mapping and most filesystems
1097 * provide a migrate_folio callback. Anonymous folios
1098 * are part of swap space which also has its own
1099 * migrate_folio callback. This is the most common path
1100 * for page migration.
1101 */
1102 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1103 mode);
1104 else
1105 rc = fallback_migrate_folio(mapping, dst, src, mode);
1106
1107 if (!rc) {
1108 /*
1109 * For pagecache folios, src->mapping must be cleared before src
1110 * is freed. Anonymous folios must stay anonymous until freed.
1111 */
1112 if (!folio_test_anon(src))
1113 src->mapping = NULL;
1114
1115 if (likely(!folio_is_zone_device(dst)))
1116 flush_dcache_folio(dst);
1117 }
1118 return rc;
1119 }
1120
1121 /*
1122 * To record some information during migration, we use unused private
1123 * field of struct folio of the newly allocated destination folio.
1124 * This is safe because nobody is using it except us.
1125 */
1126 enum {
1127 PAGE_WAS_MAPPED = BIT(0),
1128 PAGE_WAS_MLOCKED = BIT(1),
1129 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1130 };
1131
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1132 static void __migrate_folio_record(struct folio *dst,
1133 int old_page_state,
1134 struct anon_vma *anon_vma)
1135 {
1136 dst->private = (void *)anon_vma + old_page_state;
1137 }
1138
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1139 static void __migrate_folio_extract(struct folio *dst,
1140 int *old_page_state,
1141 struct anon_vma **anon_vmap)
1142 {
1143 unsigned long private = (unsigned long)dst->private;
1144
1145 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1146 *old_page_state = private & PAGE_OLD_STATES;
1147 dst->private = NULL;
1148 }
1149
1150 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1151 static void migrate_folio_undo_src(struct folio *src,
1152 int page_was_mapped,
1153 struct anon_vma *anon_vma,
1154 bool locked,
1155 struct list_head *ret)
1156 {
1157 if (page_was_mapped)
1158 remove_migration_ptes(src, src, 0);
1159 /* Drop an anon_vma reference if we took one */
1160 if (anon_vma)
1161 put_anon_vma(anon_vma);
1162 if (locked)
1163 folio_unlock(src);
1164 if (ret)
1165 list_move_tail(&src->lru, ret);
1166 }
1167
1168 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1169 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1170 free_folio_t put_new_folio, unsigned long private)
1171 {
1172 if (locked)
1173 folio_unlock(dst);
1174 if (put_new_folio)
1175 put_new_folio(dst, private);
1176 else
1177 folio_put(dst);
1178 }
1179
1180 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1181 static void migrate_folio_done(struct folio *src,
1182 enum migrate_reason reason)
1183 {
1184 if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
1185 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1186 folio_is_file_lru(src), -folio_nr_pages(src));
1187
1188 if (reason != MR_MEMORY_FAILURE)
1189 /* We release the page in page_handle_poison. */
1190 folio_put(src);
1191 }
1192
1193 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,struct list_head * ret)1194 static int migrate_folio_unmap(new_folio_t get_new_folio,
1195 free_folio_t put_new_folio, unsigned long private,
1196 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1197 struct list_head *ret)
1198 {
1199 struct folio *dst;
1200 int rc = -EAGAIN;
1201 int old_page_state = 0;
1202 struct anon_vma *anon_vma = NULL;
1203 bool locked = false;
1204 bool dst_locked = false;
1205
1206 dst = get_new_folio(src, private);
1207 if (!dst)
1208 return -ENOMEM;
1209 *dstp = dst;
1210
1211 dst->private = NULL;
1212
1213 if (!folio_trylock(src)) {
1214 if (mode == MIGRATE_ASYNC)
1215 goto out;
1216
1217 /*
1218 * It's not safe for direct compaction to call lock_page.
1219 * For example, during page readahead pages are added locked
1220 * to the LRU. Later, when the IO completes the pages are
1221 * marked uptodate and unlocked. However, the queueing
1222 * could be merging multiple pages for one bio (e.g.
1223 * mpage_readahead). If an allocation happens for the
1224 * second or third page, the process can end up locking
1225 * the same page twice and deadlocking. Rather than
1226 * trying to be clever about what pages can be locked,
1227 * avoid the use of lock_page for direct compaction
1228 * altogether.
1229 */
1230 if (current->flags & PF_MEMALLOC)
1231 goto out;
1232
1233 /*
1234 * In "light" mode, we can wait for transient locks (eg
1235 * inserting a page into the page table), but it's not
1236 * worth waiting for I/O.
1237 */
1238 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1239 goto out;
1240
1241 folio_lock(src);
1242 }
1243 locked = true;
1244 if (folio_test_mlocked(src))
1245 old_page_state |= PAGE_WAS_MLOCKED;
1246
1247 if (folio_test_writeback(src)) {
1248 /*
1249 * Only in the case of a full synchronous migration is it
1250 * necessary to wait for PageWriteback. In the async case,
1251 * the retry loop is too short and in the sync-light case,
1252 * the overhead of stalling is too much
1253 */
1254 switch (mode) {
1255 case MIGRATE_SYNC:
1256 break;
1257 default:
1258 rc = -EBUSY;
1259 goto out;
1260 }
1261 folio_wait_writeback(src);
1262 }
1263
1264 /*
1265 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1266 * we cannot notice that anon_vma is freed while we migrate a page.
1267 * This get_anon_vma() delays freeing anon_vma pointer until the end
1268 * of migration. File cache pages are no problem because of page_lock()
1269 * File Caches may use write_page() or lock_page() in migration, then,
1270 * just care Anon page here.
1271 *
1272 * Only folio_get_anon_vma() understands the subtleties of
1273 * getting a hold on an anon_vma from outside one of its mms.
1274 * But if we cannot get anon_vma, then we won't need it anyway,
1275 * because that implies that the anon page is no longer mapped
1276 * (and cannot be remapped so long as we hold the page lock).
1277 */
1278 if (folio_test_anon(src) && !folio_test_ksm(src))
1279 anon_vma = folio_get_anon_vma(src);
1280
1281 /*
1282 * Block others from accessing the new page when we get around to
1283 * establishing additional references. We are usually the only one
1284 * holding a reference to dst at this point. We used to have a BUG
1285 * here if folio_trylock(dst) fails, but would like to allow for
1286 * cases where there might be a race with the previous use of dst.
1287 * This is much like races on refcount of oldpage: just don't BUG().
1288 */
1289 if (unlikely(!folio_trylock(dst)))
1290 goto out;
1291 dst_locked = true;
1292
1293 if (unlikely(page_has_movable_ops(&src->page))) {
1294 __migrate_folio_record(dst, old_page_state, anon_vma);
1295 return 0;
1296 }
1297
1298 /*
1299 * Corner case handling:
1300 * 1. When a new swap-cache page is read into, it is added to the LRU
1301 * and treated as swapcache but it has no rmap yet.
1302 * Calling try_to_unmap() against a src->mapping==NULL page will
1303 * trigger a BUG. So handle it here.
1304 * 2. An orphaned page (see truncate_cleanup_page) might have
1305 * fs-private metadata. The page can be picked up due to memory
1306 * offlining. Everywhere else except page reclaim, the page is
1307 * invisible to the vm, so the page can not be migrated. So try to
1308 * free the metadata, so the page can be freed.
1309 */
1310 if (!src->mapping) {
1311 if (folio_test_private(src)) {
1312 try_to_free_buffers(src);
1313 goto out;
1314 }
1315 } else if (folio_mapped(src)) {
1316 /* Establish migration ptes */
1317 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1318 !folio_test_ksm(src) && !anon_vma, src);
1319 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1320 old_page_state |= PAGE_WAS_MAPPED;
1321 }
1322
1323 if (!folio_mapped(src)) {
1324 __migrate_folio_record(dst, old_page_state, anon_vma);
1325 return 0;
1326 }
1327
1328 out:
1329 /*
1330 * A folio that has not been unmapped will be restored to
1331 * right list unless we want to retry.
1332 */
1333 if (rc == -EAGAIN)
1334 ret = NULL;
1335
1336 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1337 anon_vma, locked, ret);
1338 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1339
1340 return rc;
1341 }
1342
1343 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1344 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1345 struct folio *src, struct folio *dst,
1346 enum migrate_mode mode, enum migrate_reason reason,
1347 struct list_head *ret)
1348 {
1349 int rc;
1350 int old_page_state = 0;
1351 struct anon_vma *anon_vma = NULL;
1352 struct list_head *prev;
1353
1354 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1355 prev = dst->lru.prev;
1356 list_del(&dst->lru);
1357
1358 if (unlikely(page_has_movable_ops(&src->page))) {
1359 rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
1360 if (rc)
1361 goto out;
1362 goto out_unlock_both;
1363 }
1364
1365 rc = move_to_new_folio(dst, src, mode);
1366 if (rc)
1367 goto out;
1368
1369 /*
1370 * When successful, push dst to LRU immediately: so that if it
1371 * turns out to be an mlocked page, remove_migration_ptes() will
1372 * automatically build up the correct dst->mlock_count for it.
1373 *
1374 * We would like to do something similar for the old page, when
1375 * unsuccessful, and other cases when a page has been temporarily
1376 * isolated from the unevictable LRU: but this case is the easiest.
1377 */
1378 folio_add_lru(dst);
1379 if (old_page_state & PAGE_WAS_MLOCKED)
1380 lru_add_drain();
1381
1382 if (old_page_state & PAGE_WAS_MAPPED)
1383 remove_migration_ptes(src, dst, 0);
1384
1385 out_unlock_both:
1386 folio_unlock(dst);
1387 folio_set_owner_migrate_reason(dst, reason);
1388 /*
1389 * If migration is successful, decrease refcount of dst,
1390 * which will not free the page because new page owner increased
1391 * refcounter.
1392 */
1393 folio_put(dst);
1394
1395 /*
1396 * A folio that has been migrated has all references removed
1397 * and will be freed.
1398 */
1399 list_del(&src->lru);
1400 /* Drop an anon_vma reference if we took one */
1401 if (anon_vma)
1402 put_anon_vma(anon_vma);
1403 folio_unlock(src);
1404 migrate_folio_done(src, reason);
1405
1406 return rc;
1407 out:
1408 /*
1409 * A folio that has not been migrated will be restored to
1410 * right list unless we want to retry.
1411 */
1412 if (rc == -EAGAIN) {
1413 list_add(&dst->lru, prev);
1414 __migrate_folio_record(dst, old_page_state, anon_vma);
1415 return rc;
1416 }
1417
1418 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1419 anon_vma, true, ret);
1420 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1421
1422 return rc;
1423 }
1424
1425 /*
1426 * Counterpart of unmap_and_move_page() for hugepage migration.
1427 *
1428 * This function doesn't wait the completion of hugepage I/O
1429 * because there is no race between I/O and migration for hugepage.
1430 * Note that currently hugepage I/O occurs only in direct I/O
1431 * where no lock is held and PG_writeback is irrelevant,
1432 * and writeback status of all subpages are counted in the reference
1433 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1434 * under direct I/O, the reference of the head page is 512 and a bit more.)
1435 * This means that when we try to migrate hugepage whose subpages are
1436 * doing direct I/O, some references remain after try_to_unmap() and
1437 * hugepage migration fails without data corruption.
1438 *
1439 * There is also no race when direct I/O is issued on the page under migration,
1440 * because then pte is replaced with migration swap entry and direct I/O code
1441 * will wait in the page fault for migration to complete.
1442 */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1443 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1444 free_folio_t put_new_folio, unsigned long private,
1445 struct folio *src, int force, enum migrate_mode mode,
1446 int reason, struct list_head *ret)
1447 {
1448 struct folio *dst;
1449 int rc = -EAGAIN;
1450 int page_was_mapped = 0;
1451 struct anon_vma *anon_vma = NULL;
1452 struct address_space *mapping = NULL;
1453
1454 if (folio_ref_count(src) == 1) {
1455 /* page was freed from under us. So we are done. */
1456 folio_putback_hugetlb(src);
1457 return 0;
1458 }
1459
1460 dst = get_new_folio(src, private);
1461 if (!dst)
1462 return -ENOMEM;
1463
1464 if (!folio_trylock(src)) {
1465 if (!force)
1466 goto out;
1467 switch (mode) {
1468 case MIGRATE_SYNC:
1469 break;
1470 default:
1471 goto out;
1472 }
1473 folio_lock(src);
1474 }
1475
1476 /*
1477 * Check for pages which are in the process of being freed. Without
1478 * folio_mapping() set, hugetlbfs specific move page routine will not
1479 * be called and we could leak usage counts for subpools.
1480 */
1481 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1482 rc = -EBUSY;
1483 goto out_unlock;
1484 }
1485
1486 if (folio_test_anon(src))
1487 anon_vma = folio_get_anon_vma(src);
1488
1489 if (unlikely(!folio_trylock(dst)))
1490 goto put_anon;
1491
1492 if (folio_mapped(src)) {
1493 enum ttu_flags ttu = 0;
1494
1495 if (!folio_test_anon(src)) {
1496 /*
1497 * In shared mappings, try_to_unmap could potentially
1498 * call huge_pmd_unshare. Because of this, take
1499 * semaphore in write mode here and set TTU_RMAP_LOCKED
1500 * to let lower levels know we have taken the lock.
1501 */
1502 mapping = hugetlb_folio_mapping_lock_write(src);
1503 if (unlikely(!mapping))
1504 goto unlock_put_anon;
1505
1506 ttu = TTU_RMAP_LOCKED;
1507 }
1508
1509 try_to_migrate(src, ttu);
1510 page_was_mapped = 1;
1511
1512 if (ttu & TTU_RMAP_LOCKED)
1513 i_mmap_unlock_write(mapping);
1514 }
1515
1516 if (!folio_mapped(src))
1517 rc = move_to_new_folio(dst, src, mode);
1518
1519 if (page_was_mapped)
1520 remove_migration_ptes(src, !rc ? dst : src, 0);
1521
1522 unlock_put_anon:
1523 folio_unlock(dst);
1524
1525 put_anon:
1526 if (anon_vma)
1527 put_anon_vma(anon_vma);
1528
1529 if (!rc) {
1530 move_hugetlb_state(src, dst, reason);
1531 put_new_folio = NULL;
1532 }
1533
1534 out_unlock:
1535 folio_unlock(src);
1536 out:
1537 if (!rc)
1538 folio_putback_hugetlb(src);
1539 else if (rc != -EAGAIN)
1540 list_move_tail(&src->lru, ret);
1541
1542 /*
1543 * If migration was not successful and there's a freeing callback,
1544 * return the folio to that special allocator. Otherwise, simply drop
1545 * our additional reference.
1546 */
1547 if (put_new_folio)
1548 put_new_folio(dst, private);
1549 else
1550 folio_put(dst);
1551
1552 return rc;
1553 }
1554
try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode)1555 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1556 enum migrate_mode mode)
1557 {
1558 int rc;
1559
1560 if (mode == MIGRATE_ASYNC) {
1561 if (!folio_trylock(folio))
1562 return -EAGAIN;
1563 } else {
1564 folio_lock(folio);
1565 }
1566 rc = split_folio_to_list(folio, split_folios);
1567 folio_unlock(folio);
1568 if (!rc)
1569 list_move_tail(&folio->lru, split_folios);
1570
1571 return rc;
1572 }
1573
1574 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1575 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1576 #else
1577 #define NR_MAX_BATCHED_MIGRATION 512
1578 #endif
1579 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1580 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1581 #define NR_MAX_MIGRATE_SYNC_RETRY \
1582 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1583
1584 struct migrate_pages_stats {
1585 int nr_succeeded; /* Normal and large folios migrated successfully, in
1586 units of base pages */
1587 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1588 units of base pages. Untried folios aren't counted */
1589 int nr_thp_succeeded; /* THP migrated successfully */
1590 int nr_thp_failed; /* THP failed to be migrated */
1591 int nr_thp_split; /* THP split before migrating */
1592 int nr_split; /* Large folio (include THP) split before migrating */
1593 };
1594
1595 /*
1596 * Returns the number of hugetlb folios that were not migrated, or an error code
1597 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1598 * any more because the list has become empty or no retryable hugetlb folios
1599 * exist any more. It is caller's responsibility to call putback_movable_pages()
1600 * only if ret != 0.
1601 */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1602 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1603 free_folio_t put_new_folio, unsigned long private,
1604 enum migrate_mode mode, int reason,
1605 struct migrate_pages_stats *stats,
1606 struct list_head *ret_folios)
1607 {
1608 int retry = 1;
1609 int nr_failed = 0;
1610 int nr_retry_pages = 0;
1611 int pass = 0;
1612 struct folio *folio, *folio2;
1613 int rc, nr_pages;
1614
1615 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1616 retry = 0;
1617 nr_retry_pages = 0;
1618
1619 list_for_each_entry_safe(folio, folio2, from, lru) {
1620 if (!folio_test_hugetlb(folio))
1621 continue;
1622
1623 nr_pages = folio_nr_pages(folio);
1624
1625 cond_resched();
1626
1627 /*
1628 * Migratability of hugepages depends on architectures and
1629 * their size. This check is necessary because some callers
1630 * of hugepage migration like soft offline and memory
1631 * hotremove don't walk through page tables or check whether
1632 * the hugepage is pmd-based or not before kicking migration.
1633 */
1634 if (!hugepage_migration_supported(folio_hstate(folio))) {
1635 nr_failed++;
1636 stats->nr_failed_pages += nr_pages;
1637 list_move_tail(&folio->lru, ret_folios);
1638 continue;
1639 }
1640
1641 rc = unmap_and_move_huge_page(get_new_folio,
1642 put_new_folio, private,
1643 folio, pass > 2, mode,
1644 reason, ret_folios);
1645 /*
1646 * The rules are:
1647 * 0: hugetlb folio will be put back
1648 * -EAGAIN: stay on the from list
1649 * -ENOMEM: stay on the from list
1650 * Other errno: put on ret_folios list
1651 */
1652 switch(rc) {
1653 case -ENOMEM:
1654 /*
1655 * When memory is low, don't bother to try to migrate
1656 * other folios, just exit.
1657 */
1658 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1659 return -ENOMEM;
1660 case -EAGAIN:
1661 retry++;
1662 nr_retry_pages += nr_pages;
1663 break;
1664 case 0:
1665 stats->nr_succeeded += nr_pages;
1666 break;
1667 default:
1668 /*
1669 * Permanent failure (-EBUSY, etc.):
1670 * unlike -EAGAIN case, the failed folio is
1671 * removed from migration folio list and not
1672 * retried in the next outer loop.
1673 */
1674 nr_failed++;
1675 stats->nr_failed_pages += nr_pages;
1676 break;
1677 }
1678 }
1679 }
1680 /*
1681 * nr_failed is number of hugetlb folios failed to be migrated. After
1682 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1683 * folios as failed.
1684 */
1685 nr_failed += retry;
1686 stats->nr_failed_pages += nr_retry_pages;
1687
1688 return nr_failed;
1689 }
1690
migrate_folios_move(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct migrate_pages_stats * stats,int * retry,int * thp_retry,int * nr_failed,int * nr_retry_pages)1691 static void migrate_folios_move(struct list_head *src_folios,
1692 struct list_head *dst_folios,
1693 free_folio_t put_new_folio, unsigned long private,
1694 enum migrate_mode mode, int reason,
1695 struct list_head *ret_folios,
1696 struct migrate_pages_stats *stats,
1697 int *retry, int *thp_retry, int *nr_failed,
1698 int *nr_retry_pages)
1699 {
1700 struct folio *folio, *folio2, *dst, *dst2;
1701 bool is_thp;
1702 int nr_pages;
1703 int rc;
1704
1705 dst = list_first_entry(dst_folios, struct folio, lru);
1706 dst2 = list_next_entry(dst, lru);
1707 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1708 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1709 nr_pages = folio_nr_pages(folio);
1710
1711 cond_resched();
1712
1713 rc = migrate_folio_move(put_new_folio, private,
1714 folio, dst, mode,
1715 reason, ret_folios);
1716 /*
1717 * The rules are:
1718 * 0: folio will be freed
1719 * -EAGAIN: stay on the unmap_folios list
1720 * Other errno: put on ret_folios list
1721 */
1722 switch (rc) {
1723 case -EAGAIN:
1724 *retry += 1;
1725 *thp_retry += is_thp;
1726 *nr_retry_pages += nr_pages;
1727 break;
1728 case 0:
1729 stats->nr_succeeded += nr_pages;
1730 stats->nr_thp_succeeded += is_thp;
1731 break;
1732 default:
1733 *nr_failed += 1;
1734 stats->nr_thp_failed += is_thp;
1735 stats->nr_failed_pages += nr_pages;
1736 break;
1737 }
1738 dst = dst2;
1739 dst2 = list_next_entry(dst, lru);
1740 }
1741 }
1742
migrate_folios_undo(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,struct list_head * ret_folios)1743 static void migrate_folios_undo(struct list_head *src_folios,
1744 struct list_head *dst_folios,
1745 free_folio_t put_new_folio, unsigned long private,
1746 struct list_head *ret_folios)
1747 {
1748 struct folio *folio, *folio2, *dst, *dst2;
1749
1750 dst = list_first_entry(dst_folios, struct folio, lru);
1751 dst2 = list_next_entry(dst, lru);
1752 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1753 int old_page_state = 0;
1754 struct anon_vma *anon_vma = NULL;
1755
1756 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1757 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1758 anon_vma, true, ret_folios);
1759 list_del(&dst->lru);
1760 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1761 dst = dst2;
1762 dst2 = list_next_entry(dst, lru);
1763 }
1764 }
1765
1766 /*
1767 * migrate_pages_batch() first unmaps folios in the from list as many as
1768 * possible, then move the unmapped folios.
1769 *
1770 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1771 * lock or bit when we have locked more than one folio. Which may cause
1772 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1773 * length of the from list must be <= 1.
1774 */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1775 static int migrate_pages_batch(struct list_head *from,
1776 new_folio_t get_new_folio, free_folio_t put_new_folio,
1777 unsigned long private, enum migrate_mode mode, int reason,
1778 struct list_head *ret_folios, struct list_head *split_folios,
1779 struct migrate_pages_stats *stats, int nr_pass)
1780 {
1781 int retry = 1;
1782 int thp_retry = 1;
1783 int nr_failed = 0;
1784 int nr_retry_pages = 0;
1785 int pass = 0;
1786 bool is_thp = false;
1787 bool is_large = false;
1788 struct folio *folio, *folio2, *dst = NULL;
1789 int rc, rc_saved = 0, nr_pages;
1790 LIST_HEAD(unmap_folios);
1791 LIST_HEAD(dst_folios);
1792 bool nosplit = (reason == MR_NUMA_MISPLACED);
1793
1794 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1795 !list_empty(from) && !list_is_singular(from));
1796
1797 for (pass = 0; pass < nr_pass && retry; pass++) {
1798 retry = 0;
1799 thp_retry = 0;
1800 nr_retry_pages = 0;
1801
1802 list_for_each_entry_safe(folio, folio2, from, lru) {
1803 is_large = folio_test_large(folio);
1804 is_thp = folio_test_pmd_mappable(folio);
1805 nr_pages = folio_nr_pages(folio);
1806
1807 cond_resched();
1808
1809 /*
1810 * The rare folio on the deferred split list should
1811 * be split now. It should not count as a failure:
1812 * but increment nr_failed because, without doing so,
1813 * migrate_pages() may report success with (split but
1814 * unmigrated) pages still on its fromlist; whereas it
1815 * always reports success when its fromlist is empty.
1816 * stats->nr_thp_failed should be increased too,
1817 * otherwise stats inconsistency will happen when
1818 * migrate_pages_batch is called via migrate_pages()
1819 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1820 *
1821 * Only check it without removing it from the list.
1822 * Since the folio can be on deferred_split_scan()
1823 * local list and removing it can cause the local list
1824 * corruption. Folio split process below can handle it
1825 * with the help of folio_ref_freeze().
1826 *
1827 * nr_pages > 2 is needed to avoid checking order-1
1828 * page cache folios. They exist, in contrast to
1829 * non-existent order-1 anonymous folios, and do not
1830 * use _deferred_list.
1831 */
1832 if (nr_pages > 2 &&
1833 !list_empty(&folio->_deferred_list) &&
1834 folio_test_partially_mapped(folio)) {
1835 if (!try_split_folio(folio, split_folios, mode)) {
1836 nr_failed++;
1837 stats->nr_thp_failed += is_thp;
1838 stats->nr_thp_split += is_thp;
1839 stats->nr_split++;
1840 continue;
1841 }
1842 }
1843
1844 /*
1845 * Large folio migration might be unsupported or
1846 * the allocation might be failed so we should retry
1847 * on the same folio with the large folio split
1848 * to normal folios.
1849 *
1850 * Split folios are put in split_folios, and
1851 * we will migrate them after the rest of the
1852 * list is processed.
1853 */
1854 if (!thp_migration_supported() && is_thp) {
1855 nr_failed++;
1856 stats->nr_thp_failed++;
1857 if (!try_split_folio(folio, split_folios, mode)) {
1858 stats->nr_thp_split++;
1859 stats->nr_split++;
1860 continue;
1861 }
1862 stats->nr_failed_pages += nr_pages;
1863 list_move_tail(&folio->lru, ret_folios);
1864 continue;
1865 }
1866
1867 /*
1868 * If we are holding the last folio reference, the folio
1869 * was freed from under us, so just drop our reference.
1870 */
1871 if (likely(!page_has_movable_ops(&folio->page)) &&
1872 folio_ref_count(folio) == 1) {
1873 folio_clear_active(folio);
1874 folio_clear_unevictable(folio);
1875 list_del(&folio->lru);
1876 migrate_folio_done(folio, reason);
1877 stats->nr_succeeded += nr_pages;
1878 stats->nr_thp_succeeded += is_thp;
1879 continue;
1880 }
1881
1882 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1883 private, folio, &dst, mode, ret_folios);
1884 /*
1885 * The rules are:
1886 * 0: folio will be put on unmap_folios list,
1887 * dst folio put on dst_folios list
1888 * -EAGAIN: stay on the from list
1889 * -ENOMEM: stay on the from list
1890 * Other errno: put on ret_folios list
1891 */
1892 switch(rc) {
1893 case -ENOMEM:
1894 /*
1895 * When memory is low, don't bother to try to migrate
1896 * other folios, move unmapped folios, then exit.
1897 */
1898 nr_failed++;
1899 stats->nr_thp_failed += is_thp;
1900 /* Large folio NUMA faulting doesn't split to retry. */
1901 if (is_large && !nosplit) {
1902 int ret = try_split_folio(folio, split_folios, mode);
1903
1904 if (!ret) {
1905 stats->nr_thp_split += is_thp;
1906 stats->nr_split++;
1907 break;
1908 } else if (reason == MR_LONGTERM_PIN &&
1909 ret == -EAGAIN) {
1910 /*
1911 * Try again to split large folio to
1912 * mitigate the failure of longterm pinning.
1913 */
1914 retry++;
1915 thp_retry += is_thp;
1916 nr_retry_pages += nr_pages;
1917 /* Undo duplicated failure counting. */
1918 nr_failed--;
1919 stats->nr_thp_failed -= is_thp;
1920 break;
1921 }
1922 }
1923
1924 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1925 /* nr_failed isn't updated for not used */
1926 stats->nr_thp_failed += thp_retry;
1927 rc_saved = rc;
1928 if (list_empty(&unmap_folios))
1929 goto out;
1930 else
1931 goto move;
1932 case -EAGAIN:
1933 retry++;
1934 thp_retry += is_thp;
1935 nr_retry_pages += nr_pages;
1936 break;
1937 case 0:
1938 list_move_tail(&folio->lru, &unmap_folios);
1939 list_add_tail(&dst->lru, &dst_folios);
1940 break;
1941 default:
1942 /*
1943 * Permanent failure (-EBUSY, etc.):
1944 * unlike -EAGAIN case, the failed folio is
1945 * removed from migration folio list and not
1946 * retried in the next outer loop.
1947 */
1948 nr_failed++;
1949 stats->nr_thp_failed += is_thp;
1950 stats->nr_failed_pages += nr_pages;
1951 break;
1952 }
1953 }
1954 }
1955 nr_failed += retry;
1956 stats->nr_thp_failed += thp_retry;
1957 stats->nr_failed_pages += nr_retry_pages;
1958 move:
1959 /* Flush TLBs for all unmapped folios */
1960 try_to_unmap_flush();
1961
1962 retry = 1;
1963 for (pass = 0; pass < nr_pass && retry; pass++) {
1964 retry = 0;
1965 thp_retry = 0;
1966 nr_retry_pages = 0;
1967
1968 /* Move the unmapped folios */
1969 migrate_folios_move(&unmap_folios, &dst_folios,
1970 put_new_folio, private, mode, reason,
1971 ret_folios, stats, &retry, &thp_retry,
1972 &nr_failed, &nr_retry_pages);
1973 }
1974 nr_failed += retry;
1975 stats->nr_thp_failed += thp_retry;
1976 stats->nr_failed_pages += nr_retry_pages;
1977
1978 rc = rc_saved ? : nr_failed;
1979 out:
1980 /* Cleanup remaining folios */
1981 migrate_folios_undo(&unmap_folios, &dst_folios,
1982 put_new_folio, private, ret_folios);
1983
1984 return rc;
1985 }
1986
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1987 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1988 free_folio_t put_new_folio, unsigned long private,
1989 enum migrate_mode mode, int reason,
1990 struct list_head *ret_folios, struct list_head *split_folios,
1991 struct migrate_pages_stats *stats)
1992 {
1993 int rc, nr_failed = 0;
1994 LIST_HEAD(folios);
1995 struct migrate_pages_stats astats;
1996
1997 memset(&astats, 0, sizeof(astats));
1998 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1999 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2000 reason, &folios, split_folios, &astats,
2001 NR_MAX_MIGRATE_ASYNC_RETRY);
2002 stats->nr_succeeded += astats.nr_succeeded;
2003 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
2004 stats->nr_thp_split += astats.nr_thp_split;
2005 stats->nr_split += astats.nr_split;
2006 if (rc < 0) {
2007 stats->nr_failed_pages += astats.nr_failed_pages;
2008 stats->nr_thp_failed += astats.nr_thp_failed;
2009 list_splice_tail(&folios, ret_folios);
2010 return rc;
2011 }
2012 stats->nr_thp_failed += astats.nr_thp_split;
2013 /*
2014 * Do not count rc, as pages will be retried below.
2015 * Count nr_split only, since it includes nr_thp_split.
2016 */
2017 nr_failed += astats.nr_split;
2018 /*
2019 * Fall back to migrate all failed folios one by one synchronously. All
2020 * failed folios except split THPs will be retried, so their failure
2021 * isn't counted
2022 */
2023 list_splice_tail_init(&folios, from);
2024 while (!list_empty(from)) {
2025 list_move(from->next, &folios);
2026 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2027 private, mode, reason, ret_folios,
2028 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
2029 list_splice_tail_init(&folios, ret_folios);
2030 if (rc < 0)
2031 return rc;
2032 nr_failed += rc;
2033 }
2034
2035 return nr_failed;
2036 }
2037
2038 /*
2039 * migrate_pages - migrate the folios specified in a list, to the free folios
2040 * supplied as the target for the page migration
2041 *
2042 * @from: The list of folios to be migrated.
2043 * @get_new_folio: The function used to allocate free folios to be used
2044 * as the target of the folio migration.
2045 * @put_new_folio: The function used to free target folios if migration
2046 * fails, or NULL if no special handling is necessary.
2047 * @private: Private data to be passed on to get_new_folio()
2048 * @mode: The migration mode that specifies the constraints for
2049 * folio migration, if any.
2050 * @reason: The reason for folio migration.
2051 * @ret_succeeded: Set to the number of folios migrated successfully if
2052 * the caller passes a non-NULL pointer.
2053 *
2054 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2055 * are movable any more because the list has become empty or no retryable folios
2056 * exist any more. It is caller's responsibility to call putback_movable_pages()
2057 * only if ret != 0.
2058 *
2059 * Returns the number of {normal folio, large folio, hugetlb} that were not
2060 * migrated, or an error code. The number of large folio splits will be
2061 * considered as the number of non-migrated large folio, no matter how many
2062 * split folios of the large folio are migrated successfully.
2063 */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)2064 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2065 free_folio_t put_new_folio, unsigned long private,
2066 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2067 {
2068 int rc, rc_gather;
2069 int nr_pages;
2070 struct folio *folio, *folio2;
2071 LIST_HEAD(folios);
2072 LIST_HEAD(ret_folios);
2073 LIST_HEAD(split_folios);
2074 struct migrate_pages_stats stats;
2075
2076 trace_mm_migrate_pages_start(mode, reason);
2077
2078 memset(&stats, 0, sizeof(stats));
2079
2080 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2081 mode, reason, &stats, &ret_folios);
2082 if (rc_gather < 0)
2083 goto out;
2084
2085 again:
2086 nr_pages = 0;
2087 list_for_each_entry_safe(folio, folio2, from, lru) {
2088 /* Retried hugetlb folios will be kept in list */
2089 if (folio_test_hugetlb(folio)) {
2090 list_move_tail(&folio->lru, &ret_folios);
2091 continue;
2092 }
2093
2094 nr_pages += folio_nr_pages(folio);
2095 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2096 break;
2097 }
2098 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2099 list_cut_before(&folios, from, &folio2->lru);
2100 else
2101 list_splice_init(from, &folios);
2102 if (mode == MIGRATE_ASYNC)
2103 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2104 private, mode, reason, &ret_folios,
2105 &split_folios, &stats,
2106 NR_MAX_MIGRATE_PAGES_RETRY);
2107 else
2108 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2109 private, mode, reason, &ret_folios,
2110 &split_folios, &stats);
2111 list_splice_tail_init(&folios, &ret_folios);
2112 if (rc < 0) {
2113 rc_gather = rc;
2114 list_splice_tail(&split_folios, &ret_folios);
2115 goto out;
2116 }
2117 if (!list_empty(&split_folios)) {
2118 /*
2119 * Failure isn't counted since all split folios of a large folio
2120 * is counted as 1 failure already. And, we only try to migrate
2121 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2122 */
2123 migrate_pages_batch(&split_folios, get_new_folio,
2124 put_new_folio, private, MIGRATE_ASYNC, reason,
2125 &ret_folios, NULL, &stats, 1);
2126 list_splice_tail_init(&split_folios, &ret_folios);
2127 }
2128 rc_gather += rc;
2129 if (!list_empty(from))
2130 goto again;
2131 out:
2132 /*
2133 * Put the permanent failure folio back to migration list, they
2134 * will be put back to the right list by the caller.
2135 */
2136 list_splice(&ret_folios, from);
2137
2138 /*
2139 * Return 0 in case all split folios of fail-to-migrate large folios
2140 * are migrated successfully.
2141 */
2142 if (list_empty(from))
2143 rc_gather = 0;
2144
2145 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2146 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2147 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2148 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2149 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2150 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2151 stats.nr_thp_succeeded, stats.nr_thp_failed,
2152 stats.nr_thp_split, stats.nr_split, mode,
2153 reason);
2154
2155 if (ret_succeeded)
2156 *ret_succeeded = stats.nr_succeeded;
2157
2158 return rc_gather;
2159 }
2160
alloc_migration_target(struct folio * src,unsigned long private)2161 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2162 {
2163 struct migration_target_control *mtc;
2164 gfp_t gfp_mask;
2165 unsigned int order = 0;
2166 int nid;
2167 int zidx;
2168
2169 mtc = (struct migration_target_control *)private;
2170 gfp_mask = mtc->gfp_mask;
2171 nid = mtc->nid;
2172 if (nid == NUMA_NO_NODE)
2173 nid = folio_nid(src);
2174
2175 if (folio_test_hugetlb(src)) {
2176 struct hstate *h = folio_hstate(src);
2177
2178 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2179 return alloc_hugetlb_folio_nodemask(h, nid,
2180 mtc->nmask, gfp_mask,
2181 htlb_allow_alloc_fallback(mtc->reason));
2182 }
2183
2184 if (folio_test_large(src)) {
2185 /*
2186 * clear __GFP_RECLAIM to make the migration callback
2187 * consistent with regular THP allocations.
2188 */
2189 gfp_mask &= ~__GFP_RECLAIM;
2190 gfp_mask |= GFP_TRANSHUGE;
2191 order = folio_order(src);
2192 }
2193 zidx = zone_idx(folio_zone(src));
2194 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2195 gfp_mask |= __GFP_HIGHMEM;
2196
2197 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2198 }
2199
2200 #ifdef CONFIG_NUMA
2201
store_status(int __user * status,int start,int value,int nr)2202 static int store_status(int __user *status, int start, int value, int nr)
2203 {
2204 while (nr-- > 0) {
2205 if (put_user(value, status + start))
2206 return -EFAULT;
2207 start++;
2208 }
2209
2210 return 0;
2211 }
2212
do_move_pages_to_node(struct list_head * pagelist,int node)2213 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2214 {
2215 int err;
2216 struct migration_target_control mtc = {
2217 .nid = node,
2218 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2219 .reason = MR_SYSCALL,
2220 };
2221
2222 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2223 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2224 if (err)
2225 putback_movable_pages(pagelist);
2226 return err;
2227 }
2228
__add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all)2229 static int __add_folio_for_migration(struct folio *folio, int node,
2230 struct list_head *pagelist, bool migrate_all)
2231 {
2232 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2233 return -EFAULT;
2234
2235 if (folio_is_zone_device(folio))
2236 return -ENOENT;
2237
2238 if (folio_nid(folio) == node)
2239 return 0;
2240
2241 if (folio_maybe_mapped_shared(folio) && !migrate_all)
2242 return -EACCES;
2243
2244 if (folio_test_hugetlb(folio)) {
2245 if (folio_isolate_hugetlb(folio, pagelist))
2246 return 1;
2247 } else if (folio_isolate_lru(folio)) {
2248 list_add_tail(&folio->lru, pagelist);
2249 node_stat_mod_folio(folio,
2250 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2251 folio_nr_pages(folio));
2252 return 1;
2253 }
2254 return -EBUSY;
2255 }
2256
2257 /*
2258 * Resolves the given address to a struct folio, isolates it from the LRU and
2259 * puts it to the given pagelist.
2260 * Returns:
2261 * errno - if the folio cannot be found/isolated
2262 * 0 - when it doesn't have to be migrated because it is already on the
2263 * target node
2264 * 1 - when it has been queued
2265 */
add_folio_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2266 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2267 int node, struct list_head *pagelist, bool migrate_all)
2268 {
2269 struct vm_area_struct *vma;
2270 struct folio_walk fw;
2271 struct folio *folio;
2272 unsigned long addr;
2273 int err = -EFAULT;
2274
2275 mmap_read_lock(mm);
2276 addr = (unsigned long)untagged_addr_remote(mm, p);
2277
2278 vma = vma_lookup(mm, addr);
2279 if (vma && vma_migratable(vma)) {
2280 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2281 if (folio) {
2282 err = __add_folio_for_migration(folio, node, pagelist,
2283 migrate_all);
2284 folio_walk_end(&fw, vma);
2285 } else {
2286 err = -ENOENT;
2287 }
2288 }
2289 mmap_read_unlock(mm);
2290 return err;
2291 }
2292
move_pages_and_store_status(int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2293 static int move_pages_and_store_status(int node,
2294 struct list_head *pagelist, int __user *status,
2295 int start, int i, unsigned long nr_pages)
2296 {
2297 int err;
2298
2299 if (list_empty(pagelist))
2300 return 0;
2301
2302 err = do_move_pages_to_node(pagelist, node);
2303 if (err) {
2304 /*
2305 * Positive err means the number of failed
2306 * pages to migrate. Since we are going to
2307 * abort and return the number of non-migrated
2308 * pages, so need to include the rest of the
2309 * nr_pages that have not been attempted as
2310 * well.
2311 */
2312 if (err > 0)
2313 err += nr_pages - i;
2314 return err;
2315 }
2316 return store_status(status, start, node, i - start);
2317 }
2318
2319 /*
2320 * Migrate an array of page address onto an array of nodes and fill
2321 * the corresponding array of status.
2322 */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2323 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2324 unsigned long nr_pages,
2325 const void __user * __user *pages,
2326 const int __user *nodes,
2327 int __user *status, int flags)
2328 {
2329 compat_uptr_t __user *compat_pages = (void __user *)pages;
2330 int current_node = NUMA_NO_NODE;
2331 LIST_HEAD(pagelist);
2332 int start, i;
2333 int err = 0, err1;
2334
2335 lru_cache_disable();
2336
2337 for (i = start = 0; i < nr_pages; i++) {
2338 const void __user *p;
2339 int node;
2340
2341 err = -EFAULT;
2342 if (in_compat_syscall()) {
2343 compat_uptr_t cp;
2344
2345 if (get_user(cp, compat_pages + i))
2346 goto out_flush;
2347
2348 p = compat_ptr(cp);
2349 } else {
2350 if (get_user(p, pages + i))
2351 goto out_flush;
2352 }
2353 if (get_user(node, nodes + i))
2354 goto out_flush;
2355
2356 err = -ENODEV;
2357 if (node < 0 || node >= MAX_NUMNODES)
2358 goto out_flush;
2359 if (!node_state(node, N_MEMORY))
2360 goto out_flush;
2361
2362 err = -EACCES;
2363 if (!node_isset(node, task_nodes))
2364 goto out_flush;
2365
2366 if (current_node == NUMA_NO_NODE) {
2367 current_node = node;
2368 start = i;
2369 } else if (node != current_node) {
2370 err = move_pages_and_store_status(current_node,
2371 &pagelist, status, start, i, nr_pages);
2372 if (err)
2373 goto out;
2374 start = i;
2375 current_node = node;
2376 }
2377
2378 /*
2379 * Errors in the page lookup or isolation are not fatal and we simply
2380 * report them via status
2381 */
2382 err = add_folio_for_migration(mm, p, current_node, &pagelist,
2383 flags & MPOL_MF_MOVE_ALL);
2384
2385 if (err > 0) {
2386 /* The page is successfully queued for migration */
2387 continue;
2388 }
2389
2390 /*
2391 * If the page is already on the target node (!err), store the
2392 * node, otherwise, store the err.
2393 */
2394 err = store_status(status, i, err ? : current_node, 1);
2395 if (err)
2396 goto out_flush;
2397
2398 err = move_pages_and_store_status(current_node, &pagelist,
2399 status, start, i, nr_pages);
2400 if (err) {
2401 /* We have accounted for page i */
2402 if (err > 0)
2403 err--;
2404 goto out;
2405 }
2406 current_node = NUMA_NO_NODE;
2407 }
2408 out_flush:
2409 /* Make sure we do not overwrite the existing error */
2410 err1 = move_pages_and_store_status(current_node, &pagelist,
2411 status, start, i, nr_pages);
2412 if (err >= 0)
2413 err = err1;
2414 out:
2415 lru_cache_enable();
2416 return err;
2417 }
2418
2419 /*
2420 * Determine the nodes of an array of pages and store it in an array of status.
2421 */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2422 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2423 const void __user **pages, int *status)
2424 {
2425 unsigned long i;
2426
2427 mmap_read_lock(mm);
2428
2429 for (i = 0; i < nr_pages; i++) {
2430 unsigned long addr = (unsigned long)(*pages);
2431 struct vm_area_struct *vma;
2432 struct folio_walk fw;
2433 struct folio *folio;
2434 int err = -EFAULT;
2435
2436 vma = vma_lookup(mm, addr);
2437 if (!vma)
2438 goto set_status;
2439
2440 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2441 if (folio) {
2442 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2443 err = -EFAULT;
2444 else if (folio_is_zone_device(folio))
2445 err = -ENOENT;
2446 else
2447 err = folio_nid(folio);
2448 folio_walk_end(&fw, vma);
2449 } else {
2450 err = -ENOENT;
2451 }
2452 set_status:
2453 *status = err;
2454
2455 pages++;
2456 status++;
2457 }
2458
2459 mmap_read_unlock(mm);
2460 }
2461
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_offset,unsigned long chunk_nr)2462 static int get_compat_pages_array(const void __user *chunk_pages[],
2463 const void __user * __user *pages,
2464 unsigned long chunk_offset,
2465 unsigned long chunk_nr)
2466 {
2467 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2468 compat_uptr_t p;
2469 int i;
2470
2471 for (i = 0; i < chunk_nr; i++) {
2472 if (get_user(p, pages32 + chunk_offset + i))
2473 return -EFAULT;
2474 chunk_pages[i] = compat_ptr(p);
2475 }
2476
2477 return 0;
2478 }
2479
2480 /*
2481 * Determine the nodes of a user array of pages and store it in
2482 * a user array of status.
2483 */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2484 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2485 const void __user * __user *pages,
2486 int __user *status)
2487 {
2488 #define DO_PAGES_STAT_CHUNK_NR 16UL
2489 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2490 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2491 unsigned long chunk_offset = 0;
2492
2493 while (nr_pages) {
2494 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2495
2496 if (in_compat_syscall()) {
2497 if (get_compat_pages_array(chunk_pages, pages,
2498 chunk_offset, chunk_nr))
2499 break;
2500 } else {
2501 if (copy_from_user(chunk_pages, pages + chunk_offset,
2502 chunk_nr * sizeof(*chunk_pages)))
2503 break;
2504 }
2505
2506 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2507
2508 if (copy_to_user(status + chunk_offset, chunk_status,
2509 chunk_nr * sizeof(*status)))
2510 break;
2511
2512 chunk_offset += chunk_nr;
2513 nr_pages -= chunk_nr;
2514 }
2515 return nr_pages ? -EFAULT : 0;
2516 }
2517
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2518 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2519 {
2520 struct task_struct *task;
2521 struct mm_struct *mm;
2522
2523 /*
2524 * There is no need to check if current process has the right to modify
2525 * the specified process when they are same.
2526 */
2527 if (!pid) {
2528 mmget(current->mm);
2529 *mem_nodes = cpuset_mems_allowed(current);
2530 return current->mm;
2531 }
2532
2533 task = find_get_task_by_vpid(pid);
2534 if (!task) {
2535 return ERR_PTR(-ESRCH);
2536 }
2537
2538 /*
2539 * Check if this process has the right to modify the specified
2540 * process. Use the regular "ptrace_may_access()" checks.
2541 */
2542 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2543 mm = ERR_PTR(-EPERM);
2544 goto out;
2545 }
2546
2547 mm = ERR_PTR(security_task_movememory(task));
2548 if (IS_ERR(mm))
2549 goto out;
2550 *mem_nodes = cpuset_mems_allowed(task);
2551 mm = get_task_mm(task);
2552 out:
2553 put_task_struct(task);
2554 if (!mm)
2555 mm = ERR_PTR(-EINVAL);
2556 return mm;
2557 }
2558
2559 /*
2560 * Move a list of pages in the address space of the currently executing
2561 * process.
2562 */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2563 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2564 const void __user * __user *pages,
2565 const int __user *nodes,
2566 int __user *status, int flags)
2567 {
2568 struct mm_struct *mm;
2569 int err;
2570 nodemask_t task_nodes;
2571
2572 /* Check flags */
2573 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2574 return -EINVAL;
2575
2576 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2577 return -EPERM;
2578
2579 mm = find_mm_struct(pid, &task_nodes);
2580 if (IS_ERR(mm))
2581 return PTR_ERR(mm);
2582
2583 if (nodes)
2584 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2585 nodes, status, flags);
2586 else
2587 err = do_pages_stat(mm, nr_pages, pages, status);
2588
2589 mmput(mm);
2590 return err;
2591 }
2592
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2593 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2594 const void __user * __user *, pages,
2595 const int __user *, nodes,
2596 int __user *, status, int, flags)
2597 {
2598 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2599 }
2600
2601 #ifdef CONFIG_NUMA_BALANCING
2602 /*
2603 * Returns true if this is a safe migration target node for misplaced NUMA
2604 * pages. Currently it only checks the watermarks which is crude.
2605 */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2606 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2607 unsigned long nr_migrate_pages)
2608 {
2609 int z;
2610
2611 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2612 struct zone *zone = pgdat->node_zones + z;
2613
2614 if (!managed_zone(zone))
2615 continue;
2616
2617 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2618 if (!zone_watermark_ok(zone, 0,
2619 high_wmark_pages(zone) +
2620 nr_migrate_pages,
2621 ZONE_MOVABLE, ALLOC_CMA))
2622 continue;
2623 return true;
2624 }
2625 return false;
2626 }
2627
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2628 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2629 unsigned long data)
2630 {
2631 int nid = (int) data;
2632 int order = folio_order(src);
2633 gfp_t gfp = __GFP_THISNODE;
2634
2635 if (order > 0)
2636 gfp |= GFP_TRANSHUGE_LIGHT;
2637 else {
2638 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2639 __GFP_NOWARN;
2640 gfp &= ~__GFP_RECLAIM;
2641 }
2642 return __folio_alloc_node(gfp, order, nid);
2643 }
2644
2645 /*
2646 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2647 * permitted. Must be called with the PTL still held.
2648 */
migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node)2649 int migrate_misplaced_folio_prepare(struct folio *folio,
2650 struct vm_area_struct *vma, int node)
2651 {
2652 int nr_pages = folio_nr_pages(folio);
2653 pg_data_t *pgdat = NODE_DATA(node);
2654
2655 if (folio_is_file_lru(folio)) {
2656 /*
2657 * Do not migrate file folios that are mapped in multiple
2658 * processes with execute permissions as they are probably
2659 * shared libraries.
2660 *
2661 * See folio_maybe_mapped_shared() on possible imprecision
2662 * when we cannot easily detect if a folio is shared.
2663 */
2664 if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
2665 return -EACCES;
2666
2667 /*
2668 * Do not migrate dirty folios as not all filesystems can move
2669 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2670 * cycles.
2671 */
2672 if (folio_test_dirty(folio))
2673 return -EAGAIN;
2674 }
2675
2676 /* Avoid migrating to a node that is nearly full */
2677 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2678 int z;
2679
2680 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2681 return -EAGAIN;
2682 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2683 if (managed_zone(pgdat->node_zones + z))
2684 break;
2685 }
2686
2687 /*
2688 * If there are no managed zones, it should not proceed
2689 * further.
2690 */
2691 if (z < 0)
2692 return -EAGAIN;
2693
2694 wakeup_kswapd(pgdat->node_zones + z, 0,
2695 folio_order(folio), ZONE_MOVABLE);
2696 return -EAGAIN;
2697 }
2698
2699 if (!folio_isolate_lru(folio))
2700 return -EAGAIN;
2701
2702 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2703 nr_pages);
2704 return 0;
2705 }
2706
2707 /*
2708 * Attempt to migrate a misplaced folio to the specified destination
2709 * node. Caller is expected to have isolated the folio by calling
2710 * migrate_misplaced_folio_prepare(), which will result in an
2711 * elevated reference count on the folio. This function will un-isolate the
2712 * folio, dereferencing the folio before returning.
2713 */
migrate_misplaced_folio(struct folio * folio,int node)2714 int migrate_misplaced_folio(struct folio *folio, int node)
2715 {
2716 pg_data_t *pgdat = NODE_DATA(node);
2717 int nr_remaining;
2718 unsigned int nr_succeeded;
2719 LIST_HEAD(migratepages);
2720 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2721 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2722
2723 list_add(&folio->lru, &migratepages);
2724 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2725 NULL, node, MIGRATE_ASYNC,
2726 MR_NUMA_MISPLACED, &nr_succeeded);
2727 if (nr_remaining && !list_empty(&migratepages))
2728 putback_movable_pages(&migratepages);
2729 if (nr_succeeded) {
2730 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2731 count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2732 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2733 && !node_is_toptier(folio_nid(folio))
2734 && node_is_toptier(node))
2735 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2736 }
2737 mem_cgroup_put(memcg);
2738 BUG_ON(!list_empty(&migratepages));
2739 return nr_remaining ? -EAGAIN : 0;
2740 }
2741 #endif /* CONFIG_NUMA_BALANCING */
2742 #endif /* CONFIG_NUMA */
2743