1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/leafops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/page_idle.h>
39 #include <linux/page_owner.h>
40 #include <linux/sched/mm.h>
41 #include <linux/ptrace.h>
42 #include <linux/memory.h>
43 #include <linux/sched/sysctl.h>
44 #include <linux/memory-tiers.h>
45 #include <linux/pagewalk.h>
46
47 #include <asm/tlbflush.h>
48
49 #include <trace/events/migrate.h>
50
51 #include "internal.h"
52 #include "swap.h"
53
54 static const struct movable_operations *offline_movable_ops;
55 static const struct movable_operations *zsmalloc_movable_ops;
56
set_movable_ops(const struct movable_operations * ops,enum pagetype type)57 int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
58 {
59 /*
60 * We only allow for selected types and don't handle concurrent
61 * registration attempts yet.
62 */
63 switch (type) {
64 case PGTY_offline:
65 if (offline_movable_ops && ops)
66 return -EBUSY;
67 offline_movable_ops = ops;
68 break;
69 case PGTY_zsmalloc:
70 if (zsmalloc_movable_ops && ops)
71 return -EBUSY;
72 zsmalloc_movable_ops = ops;
73 break;
74 default:
75 return -EINVAL;
76 }
77 return 0;
78 }
79 EXPORT_SYMBOL_GPL(set_movable_ops);
80
page_movable_ops(struct page * page)81 static const struct movable_operations *page_movable_ops(struct page *page)
82 {
83 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
84
85 /*
86 * If we enable page migration for a page of a certain type by marking
87 * it as movable, the page type must be sticky until the page gets freed
88 * back to the buddy.
89 */
90 if (PageOffline(page))
91 /* Only balloon page migration sets PageOffline pages movable. */
92 return offline_movable_ops;
93 if (PageZsmalloc(page))
94 return zsmalloc_movable_ops;
95
96 return NULL;
97 }
98
99 /**
100 * isolate_movable_ops_page - isolate a movable_ops page for migration
101 * @page: The page.
102 * @mode: The isolation mode.
103 *
104 * Try to isolate a movable_ops page for migration. Will fail if the page is
105 * not a movable_ops page, if the page is already isolated for migration
106 * or if the page was just was released by its owner.
107 *
108 * Once isolated, the page cannot get freed until it is either putback
109 * or migrated.
110 *
111 * Returns true if isolation succeeded, otherwise false.
112 */
isolate_movable_ops_page(struct page * page,isolate_mode_t mode)113 bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
114 {
115 /*
116 * TODO: these pages will not be folios in the future. All
117 * folio dependencies will have to be removed.
118 */
119 struct folio *folio = folio_get_nontail_page(page);
120 const struct movable_operations *mops;
121
122 /*
123 * Avoid burning cycles with pages that are yet under __free_pages(),
124 * or just got freed under us.
125 *
126 * In case we 'win' a race for a movable page being freed under us and
127 * raise its refcount preventing __free_pages() from doing its job
128 * the put_page() at the end of this block will take care of
129 * release this page, thus avoiding a nasty leakage.
130 */
131 if (!folio)
132 goto out;
133
134 /*
135 * Check for movable_ops pages before taking the page lock because
136 * we use non-atomic bitops on newly allocated page flags so
137 * unconditionally grabbing the lock ruins page's owner side.
138 *
139 * Note that once a page has movable_ops, it will stay that way
140 * until the page was freed.
141 */
142 if (unlikely(!page_has_movable_ops(page)))
143 goto out_putfolio;
144
145 /*
146 * As movable pages are not isolated from LRU lists, concurrent
147 * compaction threads can race against page migration functions
148 * as well as race against the releasing a page.
149 *
150 * In order to avoid having an already isolated movable page
151 * being (wrongly) re-isolated while it is under migration,
152 * or to avoid attempting to isolate pages being released,
153 * lets be sure we have the page lock
154 * before proceeding with the movable page isolation steps.
155 */
156 if (unlikely(!folio_trylock(folio)))
157 goto out_putfolio;
158
159 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
160 if (PageMovableOpsIsolated(page))
161 goto out_no_isolated;
162
163 mops = page_movable_ops(page);
164 if (WARN_ON_ONCE(!mops))
165 goto out_no_isolated;
166
167 if (!mops->isolate_page(page, mode))
168 goto out_no_isolated;
169
170 /* Driver shouldn't use the isolated flag */
171 VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page);
172 SetPageMovableOpsIsolated(page);
173 folio_unlock(folio);
174
175 return true;
176
177 out_no_isolated:
178 folio_unlock(folio);
179 out_putfolio:
180 folio_put(folio);
181 out:
182 return false;
183 }
184
185 /**
186 * putback_movable_ops_page - putback an isolated movable_ops page
187 * @page: The isolated page.
188 *
189 * Putback an isolated movable_ops page.
190 *
191 * After the page was putback, it might get freed instantly.
192 */
putback_movable_ops_page(struct page * page)193 static void putback_movable_ops_page(struct page *page)
194 {
195 /*
196 * TODO: these pages will not be folios in the future. All
197 * folio dependencies will have to be removed.
198 */
199 struct folio *folio = page_folio(page);
200
201 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
202 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page);
203 folio_lock(folio);
204 page_movable_ops(page)->putback_page(page);
205 ClearPageMovableOpsIsolated(page);
206 folio_unlock(folio);
207 folio_put(folio);
208 }
209
210 /**
211 * migrate_movable_ops_page - migrate an isolated movable_ops page
212 * @dst: The destination page.
213 * @src: The source page.
214 * @mode: The migration mode.
215 *
216 * Migrate an isolated movable_ops page.
217 *
218 * If the src page was already released by its owner, the src page is
219 * un-isolated (putback) and migration succeeds; the migration core will be the
220 * owner of both pages.
221 *
222 * If the src page was not released by its owner and the migration was
223 * successful, the owner of the src page and the dst page are swapped and
224 * the src page is un-isolated.
225 *
226 * If migration fails, the ownership stays unmodified and the src page
227 * remains isolated: migration may be retried later or the page can be putback.
228 *
229 * TODO: migration core will treat both pages as folios and lock them before
230 * this call to unlock them after this call. Further, the folio refcounts on
231 * src and dst are also released by migration core. These pages will not be
232 * folios in the future, so that must be reworked.
233 *
234 * Returns 0 on success, otherwise a negative error code.
235 */
migrate_movable_ops_page(struct page * dst,struct page * src,enum migrate_mode mode)236 static int migrate_movable_ops_page(struct page *dst, struct page *src,
237 enum migrate_mode mode)
238 {
239 int rc;
240
241 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
242 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
243 rc = page_movable_ops(src)->migrate_page(dst, src, mode);
244 if (!rc)
245 ClearPageMovableOpsIsolated(src);
246 return rc;
247 }
248
249 /*
250 * Put previously isolated pages back onto the appropriate lists
251 * from where they were once taken off for compaction/migration.
252 *
253 * This function shall be used whenever the isolated pageset has been
254 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
255 * and folio_isolate_hugetlb().
256 */
putback_movable_pages(struct list_head * l)257 void putback_movable_pages(struct list_head *l)
258 {
259 struct folio *folio;
260 struct folio *folio2;
261
262 list_for_each_entry_safe(folio, folio2, l, lru) {
263 if (unlikely(folio_test_hugetlb(folio))) {
264 folio_putback_hugetlb(folio);
265 continue;
266 }
267 list_del(&folio->lru);
268 if (unlikely(page_has_movable_ops(&folio->page))) {
269 putback_movable_ops_page(&folio->page);
270 } else {
271 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
272 folio_is_file_lru(folio), -folio_nr_pages(folio));
273 folio_putback_lru(folio);
274 }
275 }
276 }
277
278 /* Must be called with an elevated refcount on the non-hugetlb folio */
isolate_folio_to_list(struct folio * folio,struct list_head * list)279 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
280 {
281 if (folio_test_hugetlb(folio))
282 return folio_isolate_hugetlb(folio, list);
283
284 if (page_has_movable_ops(&folio->page)) {
285 if (!isolate_movable_ops_page(&folio->page,
286 ISOLATE_UNEVICTABLE))
287 return false;
288 } else {
289 if (!folio_isolate_lru(folio))
290 return false;
291 node_stat_add_folio(folio, NR_ISOLATED_ANON +
292 folio_is_file_lru(folio));
293 }
294 list_add(&folio->lru, list);
295 return true;
296 }
297
try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,pte_t old_pte,unsigned long idx)298 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
299 struct folio *folio, pte_t old_pte, unsigned long idx)
300 {
301 struct page *page = folio_page(folio, idx);
302 pte_t newpte;
303
304 if (PageCompound(page) || PageHWPoison(page))
305 return false;
306
307 VM_BUG_ON_PAGE(!PageAnon(page), page);
308 VM_BUG_ON_PAGE(!PageLocked(page), page);
309 VM_BUG_ON_PAGE(pte_present(old_pte), page);
310 VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio);
311
312 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
313 mm_forbids_zeropage(pvmw->vma->vm_mm))
314 return false;
315
316 /*
317 * The pmd entry mapping the old thp was flushed and the pte mapping
318 * this subpage has been non present. If the subpage is only zero-filled
319 * then map it to the shared zeropage.
320 */
321 if (!pages_identical(page, ZERO_PAGE(0)))
322 return false;
323
324 newpte = pte_mkspecial(pfn_pte(zero_pfn(pvmw->address),
325 pvmw->vma->vm_page_prot));
326
327 if (pte_swp_soft_dirty(old_pte))
328 newpte = pte_mksoft_dirty(newpte);
329 if (pte_swp_uffd_wp(old_pte))
330 newpte = pte_mkuffd_wp(newpte);
331
332 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
333
334 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
335 return true;
336 }
337
338 struct rmap_walk_arg {
339 struct folio *folio;
340 bool map_unused_to_zeropage;
341 };
342
343 /*
344 * Restore a potential migration pte to a working pte entry
345 */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)346 static bool remove_migration_pte(struct folio *folio,
347 struct vm_area_struct *vma, unsigned long addr, void *arg)
348 {
349 struct rmap_walk_arg *rmap_walk_arg = arg;
350 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
351
352 while (page_vma_mapped_walk(&pvmw)) {
353 rmap_t rmap_flags = RMAP_NONE;
354 pte_t old_pte;
355 pte_t pte;
356 softleaf_t entry;
357 struct page *new;
358 unsigned long idx = 0;
359
360 /* pgoff is invalid for ksm pages, but they are never large */
361 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
362 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
363 new = folio_page(folio, idx);
364
365 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
366 /* PMD-mapped THP migration entry */
367 if (!pvmw.pte) {
368 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
369 !folio_test_pmd_mappable(folio), folio);
370 remove_migration_pmd(&pvmw, new);
371 continue;
372 }
373 #endif
374 old_pte = ptep_get(pvmw.pte);
375 if (rmap_walk_arg->map_unused_to_zeropage &&
376 try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
377 continue;
378
379 folio_get(folio);
380 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
381
382 entry = softleaf_from_pte(old_pte);
383 if (!softleaf_is_migration_young(entry))
384 pte = pte_mkold(pte);
385 if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
386 pte = pte_mkdirty(pte);
387 if (pte_swp_soft_dirty(old_pte))
388 pte = pte_mksoft_dirty(pte);
389 else
390 pte = pte_clear_soft_dirty(pte);
391
392 if (softleaf_is_migration_write(entry))
393 pte = pte_mkwrite(pte, vma);
394 else if (pte_swp_uffd_wp(old_pte))
395 pte = pte_mkuffd_wp(pte);
396
397 if (folio_test_anon(folio) && !softleaf_is_migration_read(entry))
398 rmap_flags |= RMAP_EXCLUSIVE;
399
400 if (unlikely(is_device_private_page(new))) {
401 if (pte_write(pte))
402 entry = make_writable_device_private_entry(
403 page_to_pfn(new));
404 else
405 entry = make_readable_device_private_entry(
406 page_to_pfn(new));
407 pte = softleaf_to_pte(entry);
408 if (pte_swp_soft_dirty(old_pte))
409 pte = pte_swp_mksoft_dirty(pte);
410 if (pte_swp_uffd_wp(old_pte))
411 pte = pte_swp_mkuffd_wp(pte);
412 }
413
414 #ifdef CONFIG_HUGETLB_PAGE
415 if (folio_test_hugetlb(folio)) {
416 struct hstate *h = hstate_vma(vma);
417 unsigned int shift = huge_page_shift(h);
418 unsigned long psize = huge_page_size(h);
419
420 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
421 if (folio_test_anon(folio))
422 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
423 rmap_flags);
424 else
425 hugetlb_add_file_rmap(folio);
426 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
427 psize);
428 } else
429 #endif
430 {
431 if (folio_test_anon(folio))
432 folio_add_anon_rmap_pte(folio, new, vma,
433 pvmw.address, rmap_flags);
434 else
435 folio_add_file_rmap_pte(folio, new, vma);
436 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
437 }
438 if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
439 mlock_drain_local();
440
441 trace_remove_migration_pte(pvmw.address, pte_val(pte),
442 compound_order(new));
443
444 /* No need to invalidate - it was non-present before */
445 update_mmu_cache(vma, pvmw.address, pvmw.pte);
446 }
447
448 return true;
449 }
450
451 /*
452 * Get rid of all migration entries and replace them by
453 * references to the indicated page.
454 */
remove_migration_ptes(struct folio * src,struct folio * dst,enum ttu_flags flags)455 void remove_migration_ptes(struct folio *src, struct folio *dst,
456 enum ttu_flags flags)
457 {
458 struct rmap_walk_arg rmap_walk_arg = {
459 .folio = src,
460 .map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE,
461 };
462
463 struct rmap_walk_control rwc = {
464 .rmap_one = remove_migration_pte,
465 .arg = &rmap_walk_arg,
466 };
467
468 VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src);
469
470 if (flags & TTU_RMAP_LOCKED)
471 rmap_walk_locked(dst, &rwc);
472 else
473 rmap_walk(dst, &rwc);
474 }
475
476 /*
477 * Something used the pte of a page under migration. We need to
478 * get to the page and wait until migration is finished.
479 * When we return from this function the fault will be retried.
480 */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)481 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
482 unsigned long address)
483 {
484 spinlock_t *ptl;
485 pte_t *ptep;
486 pte_t pte;
487 softleaf_t entry;
488
489 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
490 if (!ptep)
491 return;
492
493 pte = ptep_get(ptep);
494 pte_unmap(ptep);
495
496 if (pte_none(pte) || pte_present(pte))
497 goto out;
498
499 entry = softleaf_from_pte(pte);
500 if (!softleaf_is_migration(entry))
501 goto out;
502
503 softleaf_entry_wait_on_locked(entry, ptl);
504 return;
505 out:
506 spin_unlock(ptl);
507 }
508
509 #ifdef CONFIG_HUGETLB_PAGE
510 /*
511 * The vma read lock must be held upon entry. Holding that lock prevents either
512 * the pte or the ptl from being freed.
513 *
514 * This function will release the vma lock before returning.
515 */
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)516 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
517 {
518 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
519 softleaf_t entry;
520 pte_t pte;
521
522 hugetlb_vma_assert_locked(vma);
523 spin_lock(ptl);
524 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
525
526 if (huge_pte_none(pte))
527 goto fail;
528
529 entry = softleaf_from_pte(pte);
530 if (softleaf_is_migration(entry)) {
531 /*
532 * If migration entry existed, safe to release vma lock
533 * here because the pgtable page won't be freed without the
534 * pgtable lock released. See comment right above pgtable
535 * lock release in softleaf_entry_wait_on_locked().
536 */
537 hugetlb_vma_unlock_read(vma);
538 softleaf_entry_wait_on_locked(entry, ptl);
539 return;
540 }
541
542 fail:
543 spin_unlock(ptl);
544 hugetlb_vma_unlock_read(vma);
545 }
546 #endif
547
548 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)549 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
550 {
551 spinlock_t *ptl;
552
553 ptl = pmd_lock(mm, pmd);
554 if (!pmd_is_migration_entry(*pmd))
555 goto unlock;
556 softleaf_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl);
557 return;
558 unlock:
559 spin_unlock(ptl);
560 }
561 #endif
562
563 /*
564 * Replace the folio in the mapping.
565 *
566 * The number of remaining references must be:
567 * 1 for anonymous folios without a mapping
568 * 2 for folios with a mapping
569 * 3 for folios with a mapping and the private flag set.
570 */
__folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count)571 static int __folio_migrate_mapping(struct address_space *mapping,
572 struct folio *newfolio, struct folio *folio, int expected_count)
573 {
574 XA_STATE(xas, &mapping->i_pages, folio->index);
575 struct swap_cluster_info *ci = NULL;
576 struct zone *oldzone, *newzone;
577 int dirty;
578 long nr = folio_nr_pages(folio);
579
580 if (!mapping) {
581 /* Take off deferred split queue while frozen and memcg set */
582 if (folio_test_large(folio) &&
583 folio_test_large_rmappable(folio)) {
584 if (!folio_ref_freeze(folio, expected_count))
585 return -EAGAIN;
586 folio_unqueue_deferred_split(folio);
587 folio_ref_unfreeze(folio, expected_count);
588 }
589
590 /* No turning back from here */
591 newfolio->index = folio->index;
592 newfolio->mapping = folio->mapping;
593 if (folio_test_anon(folio) && folio_test_large(folio))
594 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
595 if (folio_test_swapbacked(folio))
596 __folio_set_swapbacked(newfolio);
597
598 return 0;
599 }
600
601 oldzone = folio_zone(folio);
602 newzone = folio_zone(newfolio);
603
604 if (folio_test_swapcache(folio))
605 ci = swap_cluster_get_and_lock_irq(folio);
606 else
607 xas_lock_irq(&xas);
608
609 if (!folio_ref_freeze(folio, expected_count)) {
610 if (ci)
611 swap_cluster_unlock_irq(ci);
612 else
613 xas_unlock_irq(&xas);
614 return -EAGAIN;
615 }
616
617 /* Take off deferred split queue while frozen and memcg set */
618 folio_unqueue_deferred_split(folio);
619
620 /*
621 * Now we know that no one else is looking at the folio:
622 * no turning back from here.
623 */
624 newfolio->index = folio->index;
625 newfolio->mapping = folio->mapping;
626 if (folio_test_anon(folio) && folio_test_large(folio))
627 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
628 folio_ref_add(newfolio, nr); /* add cache reference */
629 if (folio_test_swapbacked(folio))
630 __folio_set_swapbacked(newfolio);
631 if (folio_test_swapcache(folio)) {
632 folio_set_swapcache(newfolio);
633 newfolio->private = folio_get_private(folio);
634 }
635
636 /* Move dirty while folio refs frozen and newfolio not yet exposed */
637 dirty = folio_test_dirty(folio);
638 if (dirty) {
639 folio_clear_dirty(folio);
640 folio_set_dirty(newfolio);
641 }
642
643 if (folio_test_swapcache(folio))
644 __swap_cache_replace_folio(ci, folio, newfolio);
645 else
646 xas_store(&xas, newfolio);
647
648 /*
649 * Drop cache reference from old folio by unfreezing
650 * to one less reference.
651 * We know this isn't the last reference.
652 */
653 folio_ref_unfreeze(folio, expected_count - nr);
654
655 /* Leave irq disabled to prevent preemption while updating stats */
656 if (ci)
657 swap_cluster_unlock(ci);
658 else
659 xas_unlock(&xas);
660
661 /*
662 * If moved to a different zone then also account
663 * the folio for that zone. Other VM counters will be
664 * taken care of when we establish references to the
665 * new folio and drop references to the old folio.
666 *
667 * Note that anonymous folios are accounted for
668 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
669 * are mapped to swap space.
670 */
671 if (newzone != oldzone) {
672 struct lruvec *old_lruvec, *new_lruvec;
673 struct mem_cgroup *memcg;
674
675 rcu_read_lock();
676 memcg = folio_memcg(folio);
677 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
678 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
679
680 mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
681 mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
682 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
683 mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
684 mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
685
686 if (folio_test_pmd_mappable(folio)) {
687 mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
688 mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
689 }
690 }
691 #ifdef CONFIG_SWAP
692 if (folio_test_swapcache(folio)) {
693 mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
694 mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
695 }
696 #endif
697 if (dirty && mapping_can_writeback(mapping)) {
698 mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
699 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
700 mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
701 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
702 }
703 rcu_read_unlock();
704 }
705 local_irq_enable();
706
707 return 0;
708 }
709
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)710 int folio_migrate_mapping(struct address_space *mapping,
711 struct folio *newfolio, struct folio *folio, int extra_count)
712 {
713 int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
714
715 if (folio_ref_count(folio) != expected_count)
716 return -EAGAIN;
717
718 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
719 }
720 EXPORT_SYMBOL(folio_migrate_mapping);
721
722 /*
723 * The expected number of remaining references is the same as that
724 * of folio_migrate_mapping().
725 */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)726 int migrate_huge_page_move_mapping(struct address_space *mapping,
727 struct folio *dst, struct folio *src)
728 {
729 XA_STATE(xas, &mapping->i_pages, src->index);
730 int rc, expected_count = folio_expected_ref_count(src) + 1;
731
732 if (folio_ref_count(src) != expected_count)
733 return -EAGAIN;
734
735 rc = folio_mc_copy(dst, src);
736 if (unlikely(rc))
737 return rc;
738
739 xas_lock_irq(&xas);
740 if (!folio_ref_freeze(src, expected_count)) {
741 xas_unlock_irq(&xas);
742 return -EAGAIN;
743 }
744
745 dst->index = src->index;
746 dst->mapping = src->mapping;
747
748 folio_ref_add(dst, folio_nr_pages(dst));
749
750 xas_store(&xas, dst);
751
752 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
753
754 xas_unlock_irq(&xas);
755
756 return 0;
757 }
758
759 /*
760 * Copy the flags and some other ancillary information
761 */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)762 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
763 {
764 int cpupid;
765
766 if (folio_test_referenced(folio))
767 folio_set_referenced(newfolio);
768 if (folio_test_uptodate(folio))
769 folio_mark_uptodate(newfolio);
770 if (folio_test_clear_active(folio)) {
771 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
772 folio_set_active(newfolio);
773 } else if (folio_test_clear_unevictable(folio))
774 folio_set_unevictable(newfolio);
775 if (folio_test_workingset(folio))
776 folio_set_workingset(newfolio);
777 if (folio_test_checked(folio))
778 folio_set_checked(newfolio);
779 /*
780 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
781 * migration entries. We can still have PG_anon_exclusive set on an
782 * effectively unmapped and unreferenced first sub-pages of an
783 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
784 */
785 if (folio_test_mappedtodisk(folio))
786 folio_set_mappedtodisk(newfolio);
787
788 /* Move dirty on pages not done by folio_migrate_mapping() */
789 if (folio_test_dirty(folio))
790 folio_set_dirty(newfolio);
791
792 if (folio_test_young(folio))
793 folio_set_young(newfolio);
794 if (folio_test_idle(folio))
795 folio_set_idle(newfolio);
796
797 folio_migrate_refs(newfolio, folio);
798 /*
799 * Copy NUMA information to the new page, to prevent over-eager
800 * future migrations of this same page.
801 */
802 cpupid = folio_xchg_last_cpupid(folio, -1);
803 /*
804 * For memory tiering mode, when migrate between slow and fast
805 * memory node, reset cpupid, because that is used to record
806 * page access time in slow memory node.
807 */
808 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
809 bool f_toptier = node_is_toptier(folio_nid(folio));
810 bool t_toptier = node_is_toptier(folio_nid(newfolio));
811
812 if (f_toptier != t_toptier)
813 cpupid = -1;
814 }
815 folio_xchg_last_cpupid(newfolio, cpupid);
816
817 folio_migrate_ksm(newfolio, folio);
818 /*
819 * Please do not reorder this without considering how mm/ksm.c's
820 * ksm_get_folio() depends upon ksm_migrate_page() and the
821 * swapcache flag.
822 */
823 if (folio_test_swapcache(folio))
824 folio_clear_swapcache(folio);
825 folio_clear_private(folio);
826
827 /* page->private contains hugetlb specific flags */
828 if (!folio_test_hugetlb(folio))
829 folio->private = NULL;
830
831 /*
832 * If any waiters have accumulated on the new page then
833 * wake them up.
834 */
835 if (folio_test_writeback(newfolio))
836 folio_end_writeback(newfolio);
837
838 /*
839 * PG_readahead shares the same bit with PG_reclaim. The above
840 * end_page_writeback() may clear PG_readahead mistakenly, so set the
841 * bit after that.
842 */
843 if (folio_test_readahead(folio))
844 folio_set_readahead(newfolio);
845
846 folio_copy_owner(newfolio, folio);
847 pgalloc_tag_swap(newfolio, folio);
848
849 mem_cgroup_migrate(folio, newfolio);
850 }
851 EXPORT_SYMBOL(folio_migrate_flags);
852
853 /************************************************************
854 * Migration functions
855 ***********************************************************/
856
__migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,void * src_private,enum migrate_mode mode)857 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
858 struct folio *src, void *src_private,
859 enum migrate_mode mode)
860 {
861 int rc, expected_count = folio_expected_ref_count(src) + 1;
862
863 /* Check whether src does not have extra refs before we do more work */
864 if (folio_ref_count(src) != expected_count)
865 return -EAGAIN;
866
867 rc = folio_mc_copy(dst, src);
868 if (unlikely(rc))
869 return rc;
870
871 rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
872 if (rc)
873 return rc;
874
875 if (src_private)
876 folio_attach_private(dst, folio_detach_private(src));
877
878 folio_migrate_flags(dst, src);
879 return 0;
880 }
881
882 /**
883 * migrate_folio() - Simple folio migration.
884 * @mapping: The address_space containing the folio.
885 * @dst: The folio to migrate the data to.
886 * @src: The folio containing the current data.
887 * @mode: How to migrate the page.
888 *
889 * Common logic to directly migrate a single LRU folio suitable for
890 * folios that do not have private data.
891 *
892 * Folios are locked upon entry and exit.
893 */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)894 int migrate_folio(struct address_space *mapping, struct folio *dst,
895 struct folio *src, enum migrate_mode mode)
896 {
897 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
898 return __migrate_folio(mapping, dst, src, NULL, mode);
899 }
900 EXPORT_SYMBOL(migrate_folio);
901
902 #ifdef CONFIG_BUFFER_HEAD
903 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)904 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
905 enum migrate_mode mode)
906 {
907 struct buffer_head *bh = head;
908 struct buffer_head *failed_bh;
909
910 do {
911 if (!trylock_buffer(bh)) {
912 if (mode == MIGRATE_ASYNC)
913 goto unlock;
914 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
915 goto unlock;
916 lock_buffer(bh);
917 }
918
919 bh = bh->b_this_page;
920 } while (bh != head);
921
922 return true;
923
924 unlock:
925 /* We failed to lock the buffer and cannot stall. */
926 failed_bh = bh;
927 bh = head;
928 while (bh != failed_bh) {
929 unlock_buffer(bh);
930 bh = bh->b_this_page;
931 }
932
933 return false;
934 }
935
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)936 static int __buffer_migrate_folio(struct address_space *mapping,
937 struct folio *dst, struct folio *src, enum migrate_mode mode,
938 bool check_refs)
939 {
940 struct buffer_head *bh, *head;
941 int rc;
942 int expected_count;
943
944 head = folio_buffers(src);
945 if (!head)
946 return migrate_folio(mapping, dst, src, mode);
947
948 /* Check whether page does not have extra refs before we do more work */
949 expected_count = folio_expected_ref_count(src) + 1;
950 if (folio_ref_count(src) != expected_count)
951 return -EAGAIN;
952
953 if (!buffer_migrate_lock_buffers(head, mode))
954 return -EAGAIN;
955
956 if (check_refs) {
957 bool busy, migrating;
958 bool invalidated = false;
959
960 migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
961 VM_WARN_ON_ONCE(migrating);
962 recheck_buffers:
963 busy = false;
964 spin_lock(&mapping->i_private_lock);
965 bh = head;
966 do {
967 if (atomic_read(&bh->b_count)) {
968 busy = true;
969 break;
970 }
971 bh = bh->b_this_page;
972 } while (bh != head);
973 spin_unlock(&mapping->i_private_lock);
974 if (busy) {
975 if (invalidated) {
976 rc = -EAGAIN;
977 goto unlock_buffers;
978 }
979 invalidate_bh_lrus();
980 invalidated = true;
981 goto recheck_buffers;
982 }
983 }
984
985 rc = filemap_migrate_folio(mapping, dst, src, mode);
986 if (rc)
987 goto unlock_buffers;
988
989 bh = head;
990 do {
991 folio_set_bh(bh, dst, bh_offset(bh));
992 bh = bh->b_this_page;
993 } while (bh != head);
994
995 unlock_buffers:
996 if (check_refs)
997 clear_bit_unlock(BH_Migrate, &head->b_state);
998 bh = head;
999 do {
1000 unlock_buffer(bh);
1001 bh = bh->b_this_page;
1002 } while (bh != head);
1003
1004 return rc;
1005 }
1006
1007 /**
1008 * buffer_migrate_folio() - Migration function for folios with buffers.
1009 * @mapping: The address space containing @src.
1010 * @dst: The folio to migrate to.
1011 * @src: The folio to migrate from.
1012 * @mode: How to migrate the folio.
1013 *
1014 * This function can only be used if the underlying filesystem guarantees
1015 * that no other references to @src exist. For example attached buffer
1016 * heads are accessed only under the folio lock. If your filesystem cannot
1017 * provide this guarantee, buffer_migrate_folio_norefs() may be more
1018 * appropriate.
1019 *
1020 * Return: 0 on success or a negative errno on failure.
1021 */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1022 int buffer_migrate_folio(struct address_space *mapping,
1023 struct folio *dst, struct folio *src, enum migrate_mode mode)
1024 {
1025 return __buffer_migrate_folio(mapping, dst, src, mode, false);
1026 }
1027 EXPORT_SYMBOL(buffer_migrate_folio);
1028
1029 /**
1030 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
1031 * @mapping: The address space containing @src.
1032 * @dst: The folio to migrate to.
1033 * @src: The folio to migrate from.
1034 * @mode: How to migrate the folio.
1035 *
1036 * Like buffer_migrate_folio() except that this variant is more careful
1037 * and checks that there are also no buffer head references. This function
1038 * is the right one for mappings where buffer heads are directly looked
1039 * up and referenced (such as block device mappings).
1040 *
1041 * Return: 0 on success or a negative errno on failure.
1042 */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1043 int buffer_migrate_folio_norefs(struct address_space *mapping,
1044 struct folio *dst, struct folio *src, enum migrate_mode mode)
1045 {
1046 return __buffer_migrate_folio(mapping, dst, src, mode, true);
1047 }
1048 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
1049 #endif /* CONFIG_BUFFER_HEAD */
1050
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1051 int filemap_migrate_folio(struct address_space *mapping,
1052 struct folio *dst, struct folio *src, enum migrate_mode mode)
1053 {
1054 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
1055 }
1056 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
1057
1058 /*
1059 * Default handling if a filesystem does not provide a migration function.
1060 */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1061 static int fallback_migrate_folio(struct address_space *mapping,
1062 struct folio *dst, struct folio *src, enum migrate_mode mode)
1063 {
1064 WARN_ONCE(mapping->a_ops->writepages,
1065 "%ps does not implement migrate_folio\n",
1066 mapping->a_ops);
1067 if (folio_test_dirty(src))
1068 return -EBUSY;
1069
1070 /*
1071 * Filesystem may have private data at folio->private that we
1072 * can't migrate automatically.
1073 */
1074 if (!filemap_release_folio(src, GFP_KERNEL))
1075 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1076
1077 return migrate_folio(mapping, dst, src, mode);
1078 }
1079
1080 /*
1081 * Move a src folio to a newly allocated dst folio.
1082 *
1083 * The src and dst folios are locked and the src folios was unmapped from
1084 * the page tables.
1085 *
1086 * On success, the src folio was replaced by the dst folio.
1087 *
1088 * Return value:
1089 * < 0 - error code
1090 * 0 - success
1091 */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)1092 static int move_to_new_folio(struct folio *dst, struct folio *src,
1093 enum migrate_mode mode)
1094 {
1095 struct address_space *mapping = folio_mapping(src);
1096 int rc = -EAGAIN;
1097
1098 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1099 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1100
1101 if (!mapping)
1102 rc = migrate_folio(mapping, dst, src, mode);
1103 else if (mapping_inaccessible(mapping))
1104 rc = -EOPNOTSUPP;
1105 else if (mapping->a_ops->migrate_folio)
1106 /*
1107 * Most folios have a mapping and most filesystems
1108 * provide a migrate_folio callback. Anonymous folios
1109 * are part of swap space which also has its own
1110 * migrate_folio callback. This is the most common path
1111 * for page migration.
1112 */
1113 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1114 mode);
1115 else
1116 rc = fallback_migrate_folio(mapping, dst, src, mode);
1117
1118 if (!rc) {
1119 /*
1120 * For pagecache folios, src->mapping must be cleared before src
1121 * is freed. Anonymous folios must stay anonymous until freed.
1122 */
1123 if (!folio_test_anon(src))
1124 src->mapping = NULL;
1125
1126 if (likely(!folio_is_zone_device(dst)))
1127 flush_dcache_folio(dst);
1128 }
1129 return rc;
1130 }
1131
1132 /*
1133 * To record some information during migration, we use unused private
1134 * field of struct folio of the newly allocated destination folio.
1135 * This is safe because nobody is using it except us.
1136 */
1137 enum {
1138 PAGE_WAS_MAPPED = BIT(0),
1139 PAGE_WAS_MLOCKED = BIT(1),
1140 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1141 };
1142
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1143 static void __migrate_folio_record(struct folio *dst,
1144 int old_page_state,
1145 struct anon_vma *anon_vma)
1146 {
1147 dst->private = (void *)anon_vma + old_page_state;
1148 }
1149
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1150 static void __migrate_folio_extract(struct folio *dst,
1151 int *old_page_state,
1152 struct anon_vma **anon_vmap)
1153 {
1154 unsigned long private = (unsigned long)dst->private;
1155
1156 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1157 *old_page_state = private & PAGE_OLD_STATES;
1158 dst->private = NULL;
1159 }
1160
1161 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1162 static void migrate_folio_undo_src(struct folio *src,
1163 int page_was_mapped,
1164 struct anon_vma *anon_vma,
1165 bool locked,
1166 struct list_head *ret)
1167 {
1168 if (page_was_mapped)
1169 remove_migration_ptes(src, src, 0);
1170 /* Drop an anon_vma reference if we took one */
1171 if (anon_vma)
1172 put_anon_vma(anon_vma);
1173 if (locked)
1174 folio_unlock(src);
1175 if (ret)
1176 list_move_tail(&src->lru, ret);
1177 }
1178
1179 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1180 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1181 free_folio_t put_new_folio, unsigned long private)
1182 {
1183 if (locked)
1184 folio_unlock(dst);
1185 if (put_new_folio)
1186 put_new_folio(dst, private);
1187 else
1188 folio_put(dst);
1189 }
1190
1191 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1192 static void migrate_folio_done(struct folio *src,
1193 enum migrate_reason reason)
1194 {
1195 if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
1196 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1197 folio_is_file_lru(src), -folio_nr_pages(src));
1198
1199 if (reason != MR_MEMORY_FAILURE)
1200 /* We release the page in page_handle_poison. */
1201 folio_put(src);
1202 }
1203
1204 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,struct list_head * ret)1205 static int migrate_folio_unmap(new_folio_t get_new_folio,
1206 free_folio_t put_new_folio, unsigned long private,
1207 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1208 struct list_head *ret)
1209 {
1210 struct folio *dst;
1211 int rc = -EAGAIN;
1212 int old_page_state = 0;
1213 struct anon_vma *anon_vma = NULL;
1214 bool locked = false;
1215 bool dst_locked = false;
1216
1217 dst = get_new_folio(src, private);
1218 if (!dst)
1219 return -ENOMEM;
1220 *dstp = dst;
1221
1222 dst->private = NULL;
1223
1224 if (!folio_trylock(src)) {
1225 if (mode == MIGRATE_ASYNC)
1226 goto out;
1227
1228 /*
1229 * It's not safe for direct compaction to call lock_page.
1230 * For example, during page readahead pages are added locked
1231 * to the LRU. Later, when the IO completes the pages are
1232 * marked uptodate and unlocked. However, the queueing
1233 * could be merging multiple pages for one bio (e.g.
1234 * mpage_readahead). If an allocation happens for the
1235 * second or third page, the process can end up locking
1236 * the same page twice and deadlocking. Rather than
1237 * trying to be clever about what pages can be locked,
1238 * avoid the use of lock_page for direct compaction
1239 * altogether.
1240 */
1241 if (current->flags & PF_MEMALLOC)
1242 goto out;
1243
1244 /*
1245 * In "light" mode, we can wait for transient locks (eg
1246 * inserting a page into the page table), but it's not
1247 * worth waiting for I/O.
1248 */
1249 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1250 goto out;
1251
1252 folio_lock(src);
1253 }
1254 locked = true;
1255 if (folio_test_mlocked(src))
1256 old_page_state |= PAGE_WAS_MLOCKED;
1257
1258 if (folio_test_writeback(src)) {
1259 /*
1260 * Only in the case of a full synchronous migration is it
1261 * necessary to wait for PageWriteback. In the async case,
1262 * the retry loop is too short and in the sync-light case,
1263 * the overhead of stalling is too much
1264 */
1265 switch (mode) {
1266 case MIGRATE_SYNC:
1267 break;
1268 default:
1269 rc = -EBUSY;
1270 goto out;
1271 }
1272 folio_wait_writeback(src);
1273 }
1274
1275 /*
1276 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1277 * we cannot notice that anon_vma is freed while we migrate a page.
1278 * This get_anon_vma() delays freeing anon_vma pointer until the end
1279 * of migration. File cache pages are no problem because of page_lock()
1280 * File Caches may use write_page() or lock_page() in migration, then,
1281 * just care Anon page here.
1282 *
1283 * Only folio_get_anon_vma() understands the subtleties of
1284 * getting a hold on an anon_vma from outside one of its mms.
1285 * But if we cannot get anon_vma, then we won't need it anyway,
1286 * because that implies that the anon page is no longer mapped
1287 * (and cannot be remapped so long as we hold the page lock).
1288 */
1289 if (folio_test_anon(src) && !folio_test_ksm(src))
1290 anon_vma = folio_get_anon_vma(src);
1291
1292 /*
1293 * Block others from accessing the new page when we get around to
1294 * establishing additional references. We are usually the only one
1295 * holding a reference to dst at this point. We used to have a BUG
1296 * here if folio_trylock(dst) fails, but would like to allow for
1297 * cases where there might be a race with the previous use of dst.
1298 * This is much like races on refcount of oldpage: just don't BUG().
1299 */
1300 if (unlikely(!folio_trylock(dst)))
1301 goto out;
1302 dst_locked = true;
1303
1304 if (unlikely(page_has_movable_ops(&src->page))) {
1305 __migrate_folio_record(dst, old_page_state, anon_vma);
1306 return 0;
1307 }
1308
1309 /*
1310 * Corner case handling:
1311 * 1. When a new swap-cache page is read into, it is added to the LRU
1312 * and treated as swapcache but it has no rmap yet.
1313 * Calling try_to_unmap() against a src->mapping==NULL page will
1314 * trigger a BUG. So handle it here.
1315 * 2. An orphaned page (see truncate_cleanup_page) might have
1316 * fs-private metadata. The page can be picked up due to memory
1317 * offlining. Everywhere else except page reclaim, the page is
1318 * invisible to the vm, so the page can not be migrated. So try to
1319 * free the metadata, so the page can be freed.
1320 */
1321 if (!src->mapping) {
1322 if (folio_test_private(src)) {
1323 try_to_free_buffers(src);
1324 goto out;
1325 }
1326 } else if (folio_mapped(src)) {
1327 /* Establish migration ptes */
1328 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1329 !folio_test_ksm(src) && !anon_vma, src);
1330 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1331 old_page_state |= PAGE_WAS_MAPPED;
1332 }
1333
1334 if (!folio_mapped(src)) {
1335 __migrate_folio_record(dst, old_page_state, anon_vma);
1336 return 0;
1337 }
1338
1339 out:
1340 /*
1341 * A folio that has not been unmapped will be restored to
1342 * right list unless we want to retry.
1343 */
1344 if (rc == -EAGAIN)
1345 ret = NULL;
1346
1347 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1348 anon_vma, locked, ret);
1349 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1350
1351 return rc;
1352 }
1353
1354 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1355 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1356 struct folio *src, struct folio *dst,
1357 enum migrate_mode mode, enum migrate_reason reason,
1358 struct list_head *ret)
1359 {
1360 int rc;
1361 int old_page_state = 0;
1362 struct anon_vma *anon_vma = NULL;
1363 bool src_deferred_split = false;
1364 bool src_partially_mapped = false;
1365 struct list_head *prev;
1366
1367 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1368 prev = dst->lru.prev;
1369 list_del(&dst->lru);
1370
1371 if (unlikely(page_has_movable_ops(&src->page))) {
1372 rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
1373 if (rc)
1374 goto out;
1375 goto out_unlock_both;
1376 }
1377
1378 if (folio_order(src) > 1 &&
1379 !data_race(list_empty(&src->_deferred_list))) {
1380 src_deferred_split = true;
1381 src_partially_mapped = folio_test_partially_mapped(src);
1382 }
1383
1384 rc = move_to_new_folio(dst, src, mode);
1385 if (rc)
1386 goto out;
1387
1388 /*
1389 * Requeue the destination folio on the deferred split queue if
1390 * the source was on the queue. The source is unqueued in
1391 * __folio_migrate_mapping(), so we recorded the state from
1392 * before move_to_new_folio().
1393 */
1394 if (src_deferred_split)
1395 deferred_split_folio(dst, src_partially_mapped);
1396
1397 /*
1398 * When successful, push dst to LRU immediately: so that if it
1399 * turns out to be an mlocked page, remove_migration_ptes() will
1400 * automatically build up the correct dst->mlock_count for it.
1401 *
1402 * We would like to do something similar for the old page, when
1403 * unsuccessful, and other cases when a page has been temporarily
1404 * isolated from the unevictable LRU: but this case is the easiest.
1405 */
1406 folio_add_lru(dst);
1407 if (old_page_state & PAGE_WAS_MLOCKED)
1408 lru_add_drain();
1409
1410 if (old_page_state & PAGE_WAS_MAPPED)
1411 remove_migration_ptes(src, dst, 0);
1412
1413 out_unlock_both:
1414 folio_unlock(dst);
1415 folio_set_owner_migrate_reason(dst, reason);
1416 /*
1417 * If migration is successful, decrease refcount of dst,
1418 * which will not free the page because new page owner increased
1419 * refcounter.
1420 */
1421 folio_put(dst);
1422
1423 /*
1424 * A folio that has been migrated has all references removed
1425 * and will be freed.
1426 */
1427 list_del(&src->lru);
1428 /* Drop an anon_vma reference if we took one */
1429 if (anon_vma)
1430 put_anon_vma(anon_vma);
1431 folio_unlock(src);
1432 migrate_folio_done(src, reason);
1433
1434 return rc;
1435 out:
1436 /*
1437 * A folio that has not been migrated will be restored to
1438 * right list unless we want to retry.
1439 */
1440 if (rc == -EAGAIN) {
1441 list_add(&dst->lru, prev);
1442 __migrate_folio_record(dst, old_page_state, anon_vma);
1443 return rc;
1444 }
1445
1446 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1447 anon_vma, true, ret);
1448 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1449
1450 return rc;
1451 }
1452
1453 /*
1454 * Counterpart of unmap_and_move_page() for hugepage migration.
1455 *
1456 * This function doesn't wait the completion of hugepage I/O
1457 * because there is no race between I/O and migration for hugepage.
1458 * Note that currently hugepage I/O occurs only in direct I/O
1459 * where no lock is held and PG_writeback is irrelevant,
1460 * and writeback status of all subpages are counted in the reference
1461 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1462 * under direct I/O, the reference of the head page is 512 and a bit more.)
1463 * This means that when we try to migrate hugepage whose subpages are
1464 * doing direct I/O, some references remain after try_to_unmap() and
1465 * hugepage migration fails without data corruption.
1466 *
1467 * There is also no race when direct I/O is issued on the page under migration,
1468 * because then pte is replaced with migration swap entry and direct I/O code
1469 * will wait in the page fault for migration to complete.
1470 */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1471 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1472 free_folio_t put_new_folio, unsigned long private,
1473 struct folio *src, int force, enum migrate_mode mode,
1474 int reason, struct list_head *ret)
1475 {
1476 struct folio *dst;
1477 int rc = -EAGAIN;
1478 int page_was_mapped = 0;
1479 struct anon_vma *anon_vma = NULL;
1480 struct address_space *mapping = NULL;
1481 enum ttu_flags ttu = 0;
1482
1483 if (folio_ref_count(src) == 1) {
1484 /* page was freed from under us. So we are done. */
1485 folio_putback_hugetlb(src);
1486 return 0;
1487 }
1488
1489 dst = get_new_folio(src, private);
1490 if (!dst)
1491 return -ENOMEM;
1492
1493 if (!folio_trylock(src)) {
1494 if (!force)
1495 goto out;
1496 switch (mode) {
1497 case MIGRATE_SYNC:
1498 break;
1499 default:
1500 goto out;
1501 }
1502 folio_lock(src);
1503 }
1504
1505 /*
1506 * Check for pages which are in the process of being freed. Without
1507 * folio_mapping() set, hugetlbfs specific move page routine will not
1508 * be called and we could leak usage counts for subpools.
1509 */
1510 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1511 rc = -EBUSY;
1512 goto out_unlock;
1513 }
1514
1515 if (folio_test_anon(src))
1516 anon_vma = folio_get_anon_vma(src);
1517
1518 if (unlikely(!folio_trylock(dst)))
1519 goto put_anon;
1520
1521 if (folio_mapped(src)) {
1522 if (!folio_test_anon(src)) {
1523 /*
1524 * In shared mappings, try_to_unmap could potentially
1525 * call huge_pmd_unshare. Because of this, take
1526 * semaphore in write mode here and set TTU_RMAP_LOCKED
1527 * to let lower levels know we have taken the lock.
1528 */
1529 mapping = hugetlb_folio_mapping_lock_write(src);
1530 if (unlikely(!mapping))
1531 goto unlock_put_anon;
1532
1533 ttu = TTU_RMAP_LOCKED;
1534 }
1535
1536 try_to_migrate(src, ttu);
1537 page_was_mapped = 1;
1538 }
1539
1540 if (!folio_mapped(src))
1541 rc = move_to_new_folio(dst, src, mode);
1542
1543 if (page_was_mapped)
1544 remove_migration_ptes(src, !rc ? dst : src, ttu);
1545
1546 if (ttu & TTU_RMAP_LOCKED)
1547 i_mmap_unlock_write(mapping);
1548
1549 unlock_put_anon:
1550 folio_unlock(dst);
1551
1552 put_anon:
1553 if (anon_vma)
1554 put_anon_vma(anon_vma);
1555
1556 if (!rc) {
1557 move_hugetlb_state(src, dst, reason);
1558 put_new_folio = NULL;
1559 }
1560
1561 out_unlock:
1562 folio_unlock(src);
1563 out:
1564 if (!rc)
1565 folio_putback_hugetlb(src);
1566 else if (rc != -EAGAIN)
1567 list_move_tail(&src->lru, ret);
1568
1569 /*
1570 * If migration was not successful and there's a freeing callback,
1571 * return the folio to that special allocator. Otherwise, simply drop
1572 * our additional reference.
1573 */
1574 if (put_new_folio)
1575 put_new_folio(dst, private);
1576 else
1577 folio_put(dst);
1578
1579 return rc;
1580 }
1581
try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode)1582 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1583 enum migrate_mode mode)
1584 {
1585 int rc;
1586
1587 if (mode == MIGRATE_ASYNC) {
1588 if (!folio_trylock(folio))
1589 return -EAGAIN;
1590 } else {
1591 folio_lock(folio);
1592 }
1593 rc = split_folio_to_list(folio, split_folios);
1594 folio_unlock(folio);
1595 if (!rc)
1596 list_move_tail(&folio->lru, split_folios);
1597
1598 return rc;
1599 }
1600
1601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1602 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1603 #else
1604 #define NR_MAX_BATCHED_MIGRATION 512
1605 #endif
1606 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1607 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1608 #define NR_MAX_MIGRATE_SYNC_RETRY \
1609 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1610
1611 struct migrate_pages_stats {
1612 int nr_succeeded; /* Normal and large folios migrated successfully, in
1613 units of base pages */
1614 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1615 units of base pages. Untried folios aren't counted */
1616 int nr_thp_succeeded; /* THP migrated successfully */
1617 int nr_thp_failed; /* THP failed to be migrated */
1618 int nr_thp_split; /* THP split before migrating */
1619 int nr_split; /* Large folio (include THP) split before migrating */
1620 };
1621
1622 /*
1623 * Returns the number of hugetlb folios that were not migrated, or an error code
1624 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1625 * any more because the list has become empty or no retryable hugetlb folios
1626 * exist any more. It is caller's responsibility to call putback_movable_pages()
1627 * only if ret != 0.
1628 */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1629 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1630 free_folio_t put_new_folio, unsigned long private,
1631 enum migrate_mode mode, int reason,
1632 struct migrate_pages_stats *stats,
1633 struct list_head *ret_folios)
1634 {
1635 int retry = 1;
1636 int nr_failed = 0;
1637 int nr_retry_pages = 0;
1638 int pass = 0;
1639 struct folio *folio, *folio2;
1640 int rc, nr_pages;
1641
1642 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1643 retry = 0;
1644 nr_retry_pages = 0;
1645
1646 list_for_each_entry_safe(folio, folio2, from, lru) {
1647 if (!folio_test_hugetlb(folio))
1648 continue;
1649
1650 nr_pages = folio_nr_pages(folio);
1651
1652 cond_resched();
1653
1654 /*
1655 * Migratability of hugepages depends on architectures and
1656 * their size. This check is necessary because some callers
1657 * of hugepage migration like soft offline and memory
1658 * hotremove don't walk through page tables or check whether
1659 * the hugepage is pmd-based or not before kicking migration.
1660 */
1661 if (!hugepage_migration_supported(folio_hstate(folio))) {
1662 nr_failed++;
1663 stats->nr_failed_pages += nr_pages;
1664 list_move_tail(&folio->lru, ret_folios);
1665 continue;
1666 }
1667
1668 rc = unmap_and_move_huge_page(get_new_folio,
1669 put_new_folio, private,
1670 folio, pass > 2, mode,
1671 reason, ret_folios);
1672 /*
1673 * The rules are:
1674 * 0: hugetlb folio will be put back
1675 * -EAGAIN: stay on the from list
1676 * -ENOMEM: stay on the from list
1677 * Other errno: put on ret_folios list
1678 */
1679 switch(rc) {
1680 case -ENOMEM:
1681 /*
1682 * When memory is low, don't bother to try to migrate
1683 * other folios, just exit.
1684 */
1685 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1686 return -ENOMEM;
1687 case -EAGAIN:
1688 retry++;
1689 nr_retry_pages += nr_pages;
1690 break;
1691 case 0:
1692 stats->nr_succeeded += nr_pages;
1693 break;
1694 default:
1695 /*
1696 * Permanent failure (-EBUSY, etc.):
1697 * unlike -EAGAIN case, the failed folio is
1698 * removed from migration folio list and not
1699 * retried in the next outer loop.
1700 */
1701 nr_failed++;
1702 stats->nr_failed_pages += nr_pages;
1703 break;
1704 }
1705 }
1706 }
1707 /*
1708 * nr_failed is number of hugetlb folios failed to be migrated. After
1709 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1710 * folios as failed.
1711 */
1712 nr_failed += retry;
1713 stats->nr_failed_pages += nr_retry_pages;
1714
1715 return nr_failed;
1716 }
1717
migrate_folios_move(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct migrate_pages_stats * stats,int * retry,int * thp_retry,int * nr_failed,int * nr_retry_pages)1718 static void migrate_folios_move(struct list_head *src_folios,
1719 struct list_head *dst_folios,
1720 free_folio_t put_new_folio, unsigned long private,
1721 enum migrate_mode mode, int reason,
1722 struct list_head *ret_folios,
1723 struct migrate_pages_stats *stats,
1724 int *retry, int *thp_retry, int *nr_failed,
1725 int *nr_retry_pages)
1726 {
1727 struct folio *folio, *folio2, *dst, *dst2;
1728 bool is_thp;
1729 int nr_pages;
1730 int rc;
1731
1732 dst = list_first_entry(dst_folios, struct folio, lru);
1733 dst2 = list_next_entry(dst, lru);
1734 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1735 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1736 nr_pages = folio_nr_pages(folio);
1737
1738 cond_resched();
1739
1740 rc = migrate_folio_move(put_new_folio, private,
1741 folio, dst, mode,
1742 reason, ret_folios);
1743 /*
1744 * The rules are:
1745 * 0: folio will be freed
1746 * -EAGAIN: stay on the unmap_folios list
1747 * Other errno: put on ret_folios list
1748 */
1749 switch (rc) {
1750 case -EAGAIN:
1751 *retry += 1;
1752 *thp_retry += is_thp;
1753 *nr_retry_pages += nr_pages;
1754 break;
1755 case 0:
1756 stats->nr_succeeded += nr_pages;
1757 stats->nr_thp_succeeded += is_thp;
1758 break;
1759 default:
1760 *nr_failed += 1;
1761 stats->nr_thp_failed += is_thp;
1762 stats->nr_failed_pages += nr_pages;
1763 break;
1764 }
1765 dst = dst2;
1766 dst2 = list_next_entry(dst, lru);
1767 }
1768 }
1769
migrate_folios_undo(struct list_head * src_folios,struct list_head * dst_folios,free_folio_t put_new_folio,unsigned long private,struct list_head * ret_folios)1770 static void migrate_folios_undo(struct list_head *src_folios,
1771 struct list_head *dst_folios,
1772 free_folio_t put_new_folio, unsigned long private,
1773 struct list_head *ret_folios)
1774 {
1775 struct folio *folio, *folio2, *dst, *dst2;
1776
1777 dst = list_first_entry(dst_folios, struct folio, lru);
1778 dst2 = list_next_entry(dst, lru);
1779 list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1780 int old_page_state = 0;
1781 struct anon_vma *anon_vma = NULL;
1782
1783 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1784 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1785 anon_vma, true, ret_folios);
1786 list_del(&dst->lru);
1787 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1788 dst = dst2;
1789 dst2 = list_next_entry(dst, lru);
1790 }
1791 }
1792
1793 /*
1794 * migrate_pages_batch() first unmaps folios in the from list as many as
1795 * possible, then move the unmapped folios.
1796 *
1797 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1798 * lock or bit when we have locked more than one folio. Which may cause
1799 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1800 * length of the from list must be <= 1.
1801 */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1802 static int migrate_pages_batch(struct list_head *from,
1803 new_folio_t get_new_folio, free_folio_t put_new_folio,
1804 unsigned long private, enum migrate_mode mode, int reason,
1805 struct list_head *ret_folios, struct list_head *split_folios,
1806 struct migrate_pages_stats *stats, int nr_pass)
1807 {
1808 int retry = 1;
1809 int thp_retry = 1;
1810 int nr_failed = 0;
1811 int nr_retry_pages = 0;
1812 int pass = 0;
1813 bool is_thp = false;
1814 bool is_large = false;
1815 struct folio *folio, *folio2, *dst = NULL;
1816 int rc, rc_saved = 0, nr_pages;
1817 LIST_HEAD(unmap_folios);
1818 LIST_HEAD(dst_folios);
1819 bool nosplit = (reason == MR_NUMA_MISPLACED);
1820
1821 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1822 !list_empty(from) && !list_is_singular(from));
1823
1824 for (pass = 0; pass < nr_pass && retry; pass++) {
1825 retry = 0;
1826 thp_retry = 0;
1827 nr_retry_pages = 0;
1828
1829 list_for_each_entry_safe(folio, folio2, from, lru) {
1830 is_large = folio_test_large(folio);
1831 is_thp = folio_test_pmd_mappable(folio);
1832 nr_pages = folio_nr_pages(folio);
1833
1834 cond_resched();
1835
1836 /*
1837 * The rare folio on the deferred split list should
1838 * be split now. It should not count as a failure:
1839 * but increment nr_failed because, without doing so,
1840 * migrate_pages() may report success with (split but
1841 * unmigrated) pages still on its fromlist; whereas it
1842 * always reports success when its fromlist is empty.
1843 * stats->nr_thp_failed should be increased too,
1844 * otherwise stats inconsistency will happen when
1845 * migrate_pages_batch is called via migrate_pages()
1846 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1847 *
1848 * Only check it without removing it from the list.
1849 * Since the folio can be on deferred_split_scan()
1850 * local list and removing it can cause the local list
1851 * corruption. Folio split process below can handle it
1852 * with the help of folio_ref_freeze().
1853 *
1854 * nr_pages > 2 is needed to avoid checking order-1
1855 * page cache folios. They exist, in contrast to
1856 * non-existent order-1 anonymous folios, and do not
1857 * use _deferred_list.
1858 */
1859 if (nr_pages > 2 &&
1860 !list_empty(&folio->_deferred_list) &&
1861 folio_test_partially_mapped(folio)) {
1862 if (!try_split_folio(folio, split_folios, mode)) {
1863 nr_failed++;
1864 stats->nr_thp_failed += is_thp;
1865 stats->nr_thp_split += is_thp;
1866 stats->nr_split++;
1867 continue;
1868 }
1869 }
1870
1871 /*
1872 * Large folio migration might be unsupported or
1873 * the allocation might be failed so we should retry
1874 * on the same folio with the large folio split
1875 * to normal folios.
1876 *
1877 * Split folios are put in split_folios, and
1878 * we will migrate them after the rest of the
1879 * list is processed.
1880 */
1881 if (!thp_migration_supported() && is_thp) {
1882 nr_failed++;
1883 stats->nr_thp_failed++;
1884 if (!try_split_folio(folio, split_folios, mode)) {
1885 stats->nr_thp_split++;
1886 stats->nr_split++;
1887 continue;
1888 }
1889 stats->nr_failed_pages += nr_pages;
1890 list_move_tail(&folio->lru, ret_folios);
1891 continue;
1892 }
1893
1894 /*
1895 * If we are holding the last folio reference, the folio
1896 * was freed from under us, so just drop our reference.
1897 */
1898 if (likely(!page_has_movable_ops(&folio->page)) &&
1899 folio_ref_count(folio) == 1) {
1900 folio_clear_active(folio);
1901 folio_clear_unevictable(folio);
1902 list_del(&folio->lru);
1903 migrate_folio_done(folio, reason);
1904 stats->nr_succeeded += nr_pages;
1905 stats->nr_thp_succeeded += is_thp;
1906 continue;
1907 }
1908
1909 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1910 private, folio, &dst, mode, ret_folios);
1911 /*
1912 * The rules are:
1913 * 0: folio will be put on unmap_folios list,
1914 * dst folio put on dst_folios list
1915 * -EAGAIN: stay on the from list
1916 * -ENOMEM: stay on the from list
1917 * Other errno: put on ret_folios list
1918 */
1919 switch(rc) {
1920 case -ENOMEM:
1921 /*
1922 * When memory is low, don't bother to try to migrate
1923 * other folios, move unmapped folios, then exit.
1924 */
1925 nr_failed++;
1926 stats->nr_thp_failed += is_thp;
1927 /* Large folio NUMA faulting doesn't split to retry. */
1928 if (is_large && !nosplit) {
1929 int ret = try_split_folio(folio, split_folios, mode);
1930
1931 if (!ret) {
1932 stats->nr_thp_split += is_thp;
1933 stats->nr_split++;
1934 break;
1935 } else if (reason == MR_LONGTERM_PIN &&
1936 ret == -EAGAIN) {
1937 /*
1938 * Try again to split large folio to
1939 * mitigate the failure of longterm pinning.
1940 */
1941 retry++;
1942 thp_retry += is_thp;
1943 nr_retry_pages += nr_pages;
1944 /* Undo duplicated failure counting. */
1945 nr_failed--;
1946 stats->nr_thp_failed -= is_thp;
1947 break;
1948 }
1949 }
1950
1951 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1952 /* nr_failed isn't updated for not used */
1953 stats->nr_thp_failed += thp_retry;
1954 rc_saved = rc;
1955 if (list_empty(&unmap_folios))
1956 goto out;
1957 else
1958 goto move;
1959 case -EAGAIN:
1960 retry++;
1961 thp_retry += is_thp;
1962 nr_retry_pages += nr_pages;
1963 break;
1964 case 0:
1965 list_move_tail(&folio->lru, &unmap_folios);
1966 list_add_tail(&dst->lru, &dst_folios);
1967 break;
1968 default:
1969 /*
1970 * Permanent failure (-EBUSY, etc.):
1971 * unlike -EAGAIN case, the failed folio is
1972 * removed from migration folio list and not
1973 * retried in the next outer loop.
1974 */
1975 nr_failed++;
1976 stats->nr_thp_failed += is_thp;
1977 stats->nr_failed_pages += nr_pages;
1978 break;
1979 }
1980 }
1981 }
1982 nr_failed += retry;
1983 stats->nr_thp_failed += thp_retry;
1984 stats->nr_failed_pages += nr_retry_pages;
1985 move:
1986 /* Flush TLBs for all unmapped folios */
1987 try_to_unmap_flush();
1988
1989 retry = 1;
1990 for (pass = 0; pass < nr_pass && retry; pass++) {
1991 retry = 0;
1992 thp_retry = 0;
1993 nr_retry_pages = 0;
1994
1995 /* Move the unmapped folios */
1996 migrate_folios_move(&unmap_folios, &dst_folios,
1997 put_new_folio, private, mode, reason,
1998 ret_folios, stats, &retry, &thp_retry,
1999 &nr_failed, &nr_retry_pages);
2000 }
2001 nr_failed += retry;
2002 stats->nr_thp_failed += thp_retry;
2003 stats->nr_failed_pages += nr_retry_pages;
2004
2005 rc = rc_saved ? : nr_failed;
2006 out:
2007 /* Cleanup remaining folios */
2008 migrate_folios_undo(&unmap_folios, &dst_folios,
2009 put_new_folio, private, ret_folios);
2010
2011 return rc;
2012 }
2013
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)2014 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
2015 free_folio_t put_new_folio, unsigned long private,
2016 enum migrate_mode mode, int reason,
2017 struct list_head *ret_folios, struct list_head *split_folios,
2018 struct migrate_pages_stats *stats)
2019 {
2020 int rc, nr_failed = 0;
2021 LIST_HEAD(folios);
2022 struct migrate_pages_stats astats;
2023
2024 memset(&astats, 0, sizeof(astats));
2025 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
2026 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2027 reason, &folios, split_folios, &astats,
2028 NR_MAX_MIGRATE_ASYNC_RETRY);
2029 stats->nr_succeeded += astats.nr_succeeded;
2030 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
2031 stats->nr_thp_split += astats.nr_thp_split;
2032 stats->nr_split += astats.nr_split;
2033 if (rc < 0) {
2034 stats->nr_failed_pages += astats.nr_failed_pages;
2035 stats->nr_thp_failed += astats.nr_thp_failed;
2036 list_splice_tail(&folios, ret_folios);
2037 return rc;
2038 }
2039 stats->nr_thp_failed += astats.nr_thp_split;
2040 /*
2041 * Do not count rc, as pages will be retried below.
2042 * Count nr_split only, since it includes nr_thp_split.
2043 */
2044 nr_failed += astats.nr_split;
2045 /*
2046 * Fall back to migrate all failed folios one by one synchronously. All
2047 * failed folios except split THPs will be retried, so their failure
2048 * isn't counted
2049 */
2050 list_splice_tail_init(&folios, from);
2051 while (!list_empty(from)) {
2052 list_move(from->next, &folios);
2053 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2054 private, mode, reason, ret_folios,
2055 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
2056 list_splice_tail_init(&folios, ret_folios);
2057 if (rc < 0)
2058 return rc;
2059 nr_failed += rc;
2060 }
2061
2062 return nr_failed;
2063 }
2064
2065 /*
2066 * migrate_pages - migrate the folios specified in a list, to the free folios
2067 * supplied as the target for the page migration
2068 *
2069 * @from: The list of folios to be migrated.
2070 * @get_new_folio: The function used to allocate free folios to be used
2071 * as the target of the folio migration.
2072 * @put_new_folio: The function used to free target folios if migration
2073 * fails, or NULL if no special handling is necessary.
2074 * @private: Private data to be passed on to get_new_folio()
2075 * @mode: The migration mode that specifies the constraints for
2076 * folio migration, if any.
2077 * @reason: The reason for folio migration.
2078 * @ret_succeeded: Set to the number of folios migrated successfully if
2079 * the caller passes a non-NULL pointer.
2080 *
2081 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2082 * are movable any more because the list has become empty or no retryable folios
2083 * exist any more. It is caller's responsibility to call putback_movable_pages()
2084 * only if ret != 0.
2085 *
2086 * Returns the number of {normal folio, large folio, hugetlb} that were not
2087 * migrated, or an error code. The number of large folio splits will be
2088 * considered as the number of non-migrated large folio, no matter how many
2089 * split folios of the large folio are migrated successfully.
2090 */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)2091 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2092 free_folio_t put_new_folio, unsigned long private,
2093 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2094 {
2095 int rc, rc_gather;
2096 int nr_pages;
2097 struct folio *folio, *folio2;
2098 LIST_HEAD(folios);
2099 LIST_HEAD(ret_folios);
2100 LIST_HEAD(split_folios);
2101 struct migrate_pages_stats stats;
2102
2103 trace_mm_migrate_pages_start(mode, reason);
2104
2105 memset(&stats, 0, sizeof(stats));
2106
2107 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2108 mode, reason, &stats, &ret_folios);
2109 if (rc_gather < 0)
2110 goto out;
2111
2112 again:
2113 nr_pages = 0;
2114 list_for_each_entry_safe(folio, folio2, from, lru) {
2115 /* Retried hugetlb folios will be kept in list */
2116 if (folio_test_hugetlb(folio)) {
2117 list_move_tail(&folio->lru, &ret_folios);
2118 continue;
2119 }
2120
2121 nr_pages += folio_nr_pages(folio);
2122 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2123 break;
2124 }
2125 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2126 list_cut_before(&folios, from, &folio2->lru);
2127 else
2128 list_splice_init(from, &folios);
2129 if (mode == MIGRATE_ASYNC)
2130 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2131 private, mode, reason, &ret_folios,
2132 &split_folios, &stats,
2133 NR_MAX_MIGRATE_PAGES_RETRY);
2134 else
2135 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2136 private, mode, reason, &ret_folios,
2137 &split_folios, &stats);
2138 list_splice_tail_init(&folios, &ret_folios);
2139 if (rc < 0) {
2140 rc_gather = rc;
2141 list_splice_tail(&split_folios, &ret_folios);
2142 goto out;
2143 }
2144 if (!list_empty(&split_folios)) {
2145 /*
2146 * Failure isn't counted since all split folios of a large folio
2147 * is counted as 1 failure already. And, we only try to migrate
2148 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2149 */
2150 migrate_pages_batch(&split_folios, get_new_folio,
2151 put_new_folio, private, MIGRATE_ASYNC, reason,
2152 &ret_folios, NULL, &stats, 1);
2153 list_splice_tail_init(&split_folios, &ret_folios);
2154 }
2155 rc_gather += rc;
2156 if (!list_empty(from))
2157 goto again;
2158 out:
2159 /*
2160 * Put the permanent failure folio back to migration list, they
2161 * will be put back to the right list by the caller.
2162 */
2163 list_splice(&ret_folios, from);
2164
2165 /*
2166 * Return 0 in case all split folios of fail-to-migrate large folios
2167 * are migrated successfully.
2168 */
2169 if (list_empty(from))
2170 rc_gather = 0;
2171
2172 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2173 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2174 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2175 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2176 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2177 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2178 stats.nr_thp_succeeded, stats.nr_thp_failed,
2179 stats.nr_thp_split, stats.nr_split, mode,
2180 reason);
2181
2182 if (ret_succeeded)
2183 *ret_succeeded = stats.nr_succeeded;
2184
2185 return rc_gather;
2186 }
2187
alloc_migration_target(struct folio * src,unsigned long private)2188 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2189 {
2190 struct migration_target_control *mtc;
2191 gfp_t gfp_mask;
2192 unsigned int order = 0;
2193 int nid;
2194 enum zone_type zidx;
2195
2196 mtc = (struct migration_target_control *)private;
2197 gfp_mask = mtc->gfp_mask;
2198 nid = mtc->nid;
2199 if (nid == NUMA_NO_NODE)
2200 nid = folio_nid(src);
2201
2202 if (folio_test_hugetlb(src)) {
2203 struct hstate *h = folio_hstate(src);
2204
2205 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2206 return alloc_hugetlb_folio_nodemask(h, nid,
2207 mtc->nmask, gfp_mask,
2208 htlb_allow_alloc_fallback(mtc->reason));
2209 }
2210
2211 if (folio_test_large(src)) {
2212 /*
2213 * clear __GFP_RECLAIM to make the migration callback
2214 * consistent with regular THP allocations.
2215 */
2216 gfp_mask &= ~__GFP_RECLAIM;
2217 gfp_mask |= GFP_TRANSHUGE;
2218 order = folio_order(src);
2219 }
2220 zidx = folio_zonenum(src);
2221 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2222 gfp_mask |= __GFP_HIGHMEM;
2223
2224 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2225 }
2226
2227 #ifdef CONFIG_NUMA_MIGRATION
store_status(int __user * status,int start,int value,int nr)2228 static int store_status(int __user *status, int start, int value, int nr)
2229 {
2230 while (nr-- > 0) {
2231 if (put_user(value, status + start))
2232 return -EFAULT;
2233 start++;
2234 }
2235
2236 return 0;
2237 }
2238
do_move_pages_to_node(struct list_head * pagelist,int node)2239 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2240 {
2241 int err;
2242 struct migration_target_control mtc = {
2243 .nid = node,
2244 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2245 .reason = MR_SYSCALL,
2246 };
2247
2248 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2249 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2250 if (err)
2251 putback_movable_pages(pagelist);
2252 return err;
2253 }
2254
__add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all)2255 static int __add_folio_for_migration(struct folio *folio, int node,
2256 struct list_head *pagelist, bool migrate_all)
2257 {
2258 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2259 return -EFAULT;
2260
2261 if (folio_is_zone_device(folio))
2262 return -ENOENT;
2263
2264 if (folio_nid(folio) == node)
2265 return 0;
2266
2267 if (folio_maybe_mapped_shared(folio) && !migrate_all)
2268 return -EACCES;
2269
2270 if (folio_test_hugetlb(folio)) {
2271 if (folio_isolate_hugetlb(folio, pagelist))
2272 return 1;
2273 } else if (folio_isolate_lru(folio)) {
2274 list_add_tail(&folio->lru, pagelist);
2275 node_stat_mod_folio(folio,
2276 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2277 folio_nr_pages(folio));
2278 return 1;
2279 }
2280 return -EBUSY;
2281 }
2282
2283 /*
2284 * Resolves the given address to a struct folio, isolates it from the LRU and
2285 * puts it to the given pagelist.
2286 * Returns:
2287 * errno - if the folio cannot be found/isolated
2288 * 0 - when it doesn't have to be migrated because it is already on the
2289 * target node
2290 * 1 - when it has been queued
2291 */
add_folio_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2292 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2293 int node, struct list_head *pagelist, bool migrate_all)
2294 {
2295 struct vm_area_struct *vma;
2296 struct folio_walk fw;
2297 struct folio *folio;
2298 unsigned long addr;
2299 int err = -EFAULT;
2300
2301 mmap_read_lock(mm);
2302 addr = (unsigned long)untagged_addr_remote(mm, p);
2303
2304 vma = vma_lookup(mm, addr);
2305 if (vma && vma_migratable(vma)) {
2306 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2307 if (folio) {
2308 err = __add_folio_for_migration(folio, node, pagelist,
2309 migrate_all);
2310 folio_walk_end(&fw, vma);
2311 } else {
2312 err = -ENOENT;
2313 }
2314 }
2315 mmap_read_unlock(mm);
2316 return err;
2317 }
2318
move_pages_and_store_status(int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2319 static int move_pages_and_store_status(int node,
2320 struct list_head *pagelist, int __user *status,
2321 int start, int i, unsigned long nr_pages)
2322 {
2323 int err;
2324
2325 if (list_empty(pagelist))
2326 return 0;
2327
2328 err = do_move_pages_to_node(pagelist, node);
2329 if (err) {
2330 /*
2331 * Positive err means the number of failed
2332 * pages to migrate. Since we are going to
2333 * abort and return the number of non-migrated
2334 * pages, so need to include the rest of the
2335 * nr_pages that have not been attempted as
2336 * well.
2337 */
2338 if (err > 0)
2339 err += nr_pages - i;
2340 return err;
2341 }
2342 return store_status(status, start, node, i - start);
2343 }
2344
2345 /*
2346 * Migrate an array of page address onto an array of nodes and fill
2347 * the corresponding array of status.
2348 */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2349 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2350 unsigned long nr_pages,
2351 const void __user * __user *pages,
2352 const int __user *nodes,
2353 int __user *status, int flags)
2354 {
2355 compat_uptr_t __user *compat_pages = (void __user *)pages;
2356 int current_node = NUMA_NO_NODE;
2357 LIST_HEAD(pagelist);
2358 int start, i;
2359 int err = 0, err1;
2360
2361 lru_cache_disable();
2362
2363 for (i = start = 0; i < nr_pages; i++) {
2364 const void __user *p;
2365 int node;
2366
2367 err = -EFAULT;
2368 if (in_compat_syscall()) {
2369 compat_uptr_t cp;
2370
2371 if (get_user(cp, compat_pages + i))
2372 goto out_flush;
2373
2374 p = compat_ptr(cp);
2375 } else {
2376 if (get_user(p, pages + i))
2377 goto out_flush;
2378 }
2379 if (get_user(node, nodes + i))
2380 goto out_flush;
2381
2382 err = -ENODEV;
2383 if (node < 0 || node >= MAX_NUMNODES)
2384 goto out_flush;
2385 if (!node_state(node, N_MEMORY))
2386 goto out_flush;
2387
2388 err = -EACCES;
2389 if (!node_isset(node, task_nodes))
2390 goto out_flush;
2391
2392 if (current_node == NUMA_NO_NODE) {
2393 current_node = node;
2394 start = i;
2395 } else if (node != current_node) {
2396 err = move_pages_and_store_status(current_node,
2397 &pagelist, status, start, i, nr_pages);
2398 if (err)
2399 goto out;
2400 start = i;
2401 current_node = node;
2402 }
2403
2404 /*
2405 * Errors in the page lookup or isolation are not fatal and we simply
2406 * report them via status
2407 */
2408 err = add_folio_for_migration(mm, p, current_node, &pagelist,
2409 flags & MPOL_MF_MOVE_ALL);
2410
2411 if (err > 0) {
2412 /* The page is successfully queued for migration */
2413 continue;
2414 }
2415
2416 /*
2417 * If the page is already on the target node (!err), store the
2418 * node, otherwise, store the err.
2419 */
2420 err = store_status(status, i, err ? : current_node, 1);
2421 if (err)
2422 goto out_flush;
2423
2424 err = move_pages_and_store_status(current_node, &pagelist,
2425 status, start, i, nr_pages);
2426 if (err) {
2427 /* We have accounted for page i */
2428 if (err > 0)
2429 err--;
2430 goto out;
2431 }
2432 current_node = NUMA_NO_NODE;
2433 }
2434 out_flush:
2435 /* Make sure we do not overwrite the existing error */
2436 err1 = move_pages_and_store_status(current_node, &pagelist,
2437 status, start, i, nr_pages);
2438 if (err >= 0)
2439 err = err1;
2440 out:
2441 lru_cache_enable();
2442 return err;
2443 }
2444
2445 /*
2446 * Determine the nodes of an array of pages and store it in an array of status.
2447 */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2448 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2449 const void __user **pages, int *status)
2450 {
2451 unsigned long i;
2452
2453 mmap_read_lock(mm);
2454
2455 for (i = 0; i < nr_pages; i++) {
2456 unsigned long addr = (unsigned long)(*pages);
2457 struct vm_area_struct *vma;
2458 struct folio_walk fw;
2459 struct folio *folio;
2460 int err = -EFAULT;
2461
2462 vma = vma_lookup(mm, addr);
2463 if (!vma)
2464 goto set_status;
2465
2466 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2467 if (folio) {
2468 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2469 err = -EFAULT;
2470 else if (folio_is_zone_device(folio))
2471 err = -ENOENT;
2472 else
2473 err = folio_nid(folio);
2474 folio_walk_end(&fw, vma);
2475 } else {
2476 err = -ENOENT;
2477 }
2478 set_status:
2479 *status = err;
2480
2481 pages++;
2482 status++;
2483 }
2484
2485 mmap_read_unlock(mm);
2486 }
2487
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_offset,unsigned long chunk_nr)2488 static int get_compat_pages_array(const void __user *chunk_pages[],
2489 const void __user * __user *pages,
2490 unsigned long chunk_offset,
2491 unsigned long chunk_nr)
2492 {
2493 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2494 compat_uptr_t p;
2495 int i;
2496
2497 for (i = 0; i < chunk_nr; i++) {
2498 if (get_user(p, pages32 + chunk_offset + i))
2499 return -EFAULT;
2500 chunk_pages[i] = compat_ptr(p);
2501 }
2502
2503 return 0;
2504 }
2505
2506 /*
2507 * Determine the nodes of a user array of pages and store it in
2508 * a user array of status.
2509 */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2510 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2511 const void __user * __user *pages,
2512 int __user *status)
2513 {
2514 #define DO_PAGES_STAT_CHUNK_NR 16UL
2515 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2516 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2517 unsigned long chunk_offset = 0;
2518
2519 while (nr_pages) {
2520 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2521
2522 if (in_compat_syscall()) {
2523 if (get_compat_pages_array(chunk_pages, pages,
2524 chunk_offset, chunk_nr))
2525 break;
2526 } else {
2527 if (copy_from_user(chunk_pages, pages + chunk_offset,
2528 chunk_nr * sizeof(*chunk_pages)))
2529 break;
2530 }
2531
2532 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2533
2534 if (copy_to_user(status + chunk_offset, chunk_status,
2535 chunk_nr * sizeof(*status)))
2536 break;
2537
2538 chunk_offset += chunk_nr;
2539 nr_pages -= chunk_nr;
2540 }
2541 return nr_pages ? -EFAULT : 0;
2542 }
2543
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2544 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2545 {
2546 struct task_struct *task;
2547 struct mm_struct *mm;
2548
2549 /*
2550 * There is no need to check if current process has the right to modify
2551 * the specified process when they are same.
2552 */
2553 if (!pid) {
2554 mmget(current->mm);
2555 *mem_nodes = cpuset_mems_allowed(current);
2556 return current->mm;
2557 }
2558
2559 task = find_get_task_by_vpid(pid);
2560 if (!task) {
2561 return ERR_PTR(-ESRCH);
2562 }
2563
2564 /*
2565 * Check if this process has the right to modify the specified
2566 * process. Use the regular "ptrace_may_access()" checks.
2567 */
2568 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2569 mm = ERR_PTR(-EPERM);
2570 goto out;
2571 }
2572
2573 mm = ERR_PTR(security_task_movememory(task));
2574 if (IS_ERR(mm))
2575 goto out;
2576 *mem_nodes = cpuset_mems_allowed(task);
2577 mm = get_task_mm(task);
2578 out:
2579 put_task_struct(task);
2580 if (!mm)
2581 mm = ERR_PTR(-EINVAL);
2582 return mm;
2583 }
2584
2585 /*
2586 * Move a list of pages in the address space of the currently executing
2587 * process.
2588 */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2589 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2590 const void __user * __user *pages,
2591 const int __user *nodes,
2592 int __user *status, int flags)
2593 {
2594 struct mm_struct *mm;
2595 int err;
2596 nodemask_t task_nodes;
2597
2598 /* Check flags */
2599 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2600 return -EINVAL;
2601
2602 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2603 return -EPERM;
2604
2605 mm = find_mm_struct(pid, &task_nodes);
2606 if (IS_ERR(mm))
2607 return PTR_ERR(mm);
2608
2609 if (nodes)
2610 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2611 nodes, status, flags);
2612 else
2613 err = do_pages_stat(mm, nr_pages, pages, status);
2614
2615 mmput(mm);
2616 return err;
2617 }
2618
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2619 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2620 const void __user * __user *, pages,
2621 const int __user *, nodes,
2622 int __user *, status, int, flags)
2623 {
2624 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2625 }
2626 #endif /* CONFIG_NUMA_MIGRATION */
2627
2628 #ifdef CONFIG_NUMA_BALANCING
2629 /*
2630 * Returns true if this is a safe migration target node for misplaced NUMA
2631 * pages. Currently it only checks the watermarks which is crude.
2632 */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2633 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2634 unsigned long nr_migrate_pages)
2635 {
2636 int z;
2637
2638 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2639 struct zone *zone = pgdat->node_zones + z;
2640
2641 if (!managed_zone(zone))
2642 continue;
2643
2644 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2645 if (!zone_watermark_ok(zone, 0,
2646 high_wmark_pages(zone) +
2647 nr_migrate_pages,
2648 ZONE_MOVABLE, ALLOC_CMA))
2649 continue;
2650 return true;
2651 }
2652 return false;
2653 }
2654
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2655 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2656 unsigned long data)
2657 {
2658 int nid = (int) data;
2659 int order = folio_order(src);
2660 gfp_t gfp = __GFP_THISNODE;
2661
2662 if (order > 0)
2663 gfp |= GFP_TRANSHUGE_LIGHT;
2664 else {
2665 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2666 __GFP_NOWARN;
2667 gfp &= ~__GFP_RECLAIM;
2668 }
2669 return __folio_alloc_node(gfp, order, nid);
2670 }
2671
2672 /*
2673 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2674 * permitted. Must be called with the PTL still held.
2675 */
migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node)2676 int migrate_misplaced_folio_prepare(struct folio *folio,
2677 struct vm_area_struct *vma, int node)
2678 {
2679 int nr_pages = folio_nr_pages(folio);
2680 pg_data_t *pgdat = NODE_DATA(node);
2681
2682 if (folio_is_file_lru(folio)) {
2683 /*
2684 * Do not migrate file folios that are mapped in multiple
2685 * processes with execute permissions as they are probably
2686 * shared libraries.
2687 *
2688 * See folio_maybe_mapped_shared() on possible imprecision
2689 * when we cannot easily detect if a folio is shared.
2690 */
2691 if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
2692 return -EACCES;
2693
2694 /*
2695 * Do not migrate dirty folios as not all filesystems can move
2696 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2697 * cycles.
2698 */
2699 if (folio_test_dirty(folio))
2700 return -EAGAIN;
2701 }
2702
2703 /* Avoid migrating to a node that is nearly full */
2704 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2705 int z;
2706
2707 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2708 return -EAGAIN;
2709 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2710 if (managed_zone(pgdat->node_zones + z))
2711 break;
2712 }
2713
2714 /*
2715 * If there are no managed zones, it should not proceed
2716 * further.
2717 */
2718 if (z < 0)
2719 return -EAGAIN;
2720
2721 wakeup_kswapd(pgdat->node_zones + z, 0,
2722 folio_order(folio), ZONE_MOVABLE);
2723 return -EAGAIN;
2724 }
2725
2726 if (!folio_isolate_lru(folio))
2727 return -EAGAIN;
2728
2729 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2730 nr_pages);
2731 return 0;
2732 }
2733
2734 /*
2735 * Attempt to migrate a misplaced folio to the specified destination
2736 * node. Caller is expected to have isolated the folio by calling
2737 * migrate_misplaced_folio_prepare(), which will result in an
2738 * elevated reference count on the folio. This function will un-isolate the
2739 * folio, dereferencing the folio before returning.
2740 */
migrate_misplaced_folio(struct folio * folio,int node)2741 int migrate_misplaced_folio(struct folio *folio, int node)
2742 {
2743 pg_data_t *pgdat = NODE_DATA(node);
2744 int nr_remaining;
2745 unsigned int nr_succeeded;
2746 LIST_HEAD(migratepages);
2747 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2748 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2749
2750 list_add(&folio->lru, &migratepages);
2751 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2752 NULL, node, MIGRATE_ASYNC,
2753 MR_NUMA_MISPLACED, &nr_succeeded);
2754 if (nr_remaining && !list_empty(&migratepages))
2755 putback_movable_pages(&migratepages);
2756 if (nr_succeeded) {
2757 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2758 count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2759 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2760 && !node_is_toptier(folio_nid(folio))
2761 && node_is_toptier(node))
2762 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2763 }
2764 mem_cgroup_put(memcg);
2765 BUG_ON(!list_empty(&migratepages));
2766 return nr_remaining ? -EAGAIN : 0;
2767 }
2768 #endif /* CONFIG_NUMA_BALANCING */
2769