xref: /linux/mm/migrate.c (revision a06247c6804f1a7c86a2e5398a4c1f1db1471848)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pagewalk.h>
42 #include <linux/pfn_t.h>
43 #include <linux/memremap.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/balloon_compaction.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/page_idle.h>
48 #include <linux/page_owner.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ptrace.h>
51 #include <linux/oom.h>
52 #include <linux/memory.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/migrate.h>
58 
59 #include "internal.h"
60 
61 int isolate_movable_page(struct page *page, isolate_mode_t mode)
62 {
63 	struct address_space *mapping;
64 
65 	/*
66 	 * Avoid burning cycles with pages that are yet under __free_pages(),
67 	 * or just got freed under us.
68 	 *
69 	 * In case we 'win' a race for a movable page being freed under us and
70 	 * raise its refcount preventing __free_pages() from doing its job
71 	 * the put_page() at the end of this block will take care of
72 	 * release this page, thus avoiding a nasty leakage.
73 	 */
74 	if (unlikely(!get_page_unless_zero(page)))
75 		goto out;
76 
77 	/*
78 	 * Check PageMovable before holding a PG_lock because page's owner
79 	 * assumes anybody doesn't touch PG_lock of newly allocated page
80 	 * so unconditionally grabbing the lock ruins page's owner side.
81 	 */
82 	if (unlikely(!__PageMovable(page)))
83 		goto out_putpage;
84 	/*
85 	 * As movable pages are not isolated from LRU lists, concurrent
86 	 * compaction threads can race against page migration functions
87 	 * as well as race against the releasing a page.
88 	 *
89 	 * In order to avoid having an already isolated movable page
90 	 * being (wrongly) re-isolated while it is under migration,
91 	 * or to avoid attempting to isolate pages being released,
92 	 * lets be sure we have the page lock
93 	 * before proceeding with the movable page isolation steps.
94 	 */
95 	if (unlikely(!trylock_page(page)))
96 		goto out_putpage;
97 
98 	if (!PageMovable(page) || PageIsolated(page))
99 		goto out_no_isolated;
100 
101 	mapping = page_mapping(page);
102 	VM_BUG_ON_PAGE(!mapping, page);
103 
104 	if (!mapping->a_ops->isolate_page(page, mode))
105 		goto out_no_isolated;
106 
107 	/* Driver shouldn't use PG_isolated bit of page->flags */
108 	WARN_ON_ONCE(PageIsolated(page));
109 	__SetPageIsolated(page);
110 	unlock_page(page);
111 
112 	return 0;
113 
114 out_no_isolated:
115 	unlock_page(page);
116 out_putpage:
117 	put_page(page);
118 out:
119 	return -EBUSY;
120 }
121 
122 static void putback_movable_page(struct page *page)
123 {
124 	struct address_space *mapping;
125 
126 	mapping = page_mapping(page);
127 	mapping->a_ops->putback_page(page);
128 	__ClearPageIsolated(page);
129 }
130 
131 /*
132  * Put previously isolated pages back onto the appropriate lists
133  * from where they were once taken off for compaction/migration.
134  *
135  * This function shall be used whenever the isolated pageset has been
136  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
137  * and isolate_huge_page().
138  */
139 void putback_movable_pages(struct list_head *l)
140 {
141 	struct page *page;
142 	struct page *page2;
143 
144 	list_for_each_entry_safe(page, page2, l, lru) {
145 		if (unlikely(PageHuge(page))) {
146 			putback_active_hugepage(page);
147 			continue;
148 		}
149 		list_del(&page->lru);
150 		/*
151 		 * We isolated non-lru movable page so here we can use
152 		 * __PageMovable because LRU page's mapping cannot have
153 		 * PAGE_MAPPING_MOVABLE.
154 		 */
155 		if (unlikely(__PageMovable(page))) {
156 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
157 			lock_page(page);
158 			if (PageMovable(page))
159 				putback_movable_page(page);
160 			else
161 				__ClearPageIsolated(page);
162 			unlock_page(page);
163 			put_page(page);
164 		} else {
165 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
166 					page_is_file_lru(page), -thp_nr_pages(page));
167 			putback_lru_page(page);
168 		}
169 	}
170 }
171 
172 /*
173  * Restore a potential migration pte to a working pte entry
174  */
175 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
176 				 unsigned long addr, void *old)
177 {
178 	struct page_vma_mapped_walk pvmw = {
179 		.page = old,
180 		.vma = vma,
181 		.address = addr,
182 		.flags = PVMW_SYNC | PVMW_MIGRATION,
183 	};
184 	struct page *new;
185 	pte_t pte;
186 	swp_entry_t entry;
187 
188 	VM_BUG_ON_PAGE(PageTail(page), page);
189 	while (page_vma_mapped_walk(&pvmw)) {
190 		if (PageKsm(page))
191 			new = page;
192 		else
193 			new = page - pvmw.page->index +
194 				linear_page_index(vma, pvmw.address);
195 
196 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
197 		/* PMD-mapped THP migration entry */
198 		if (!pvmw.pte) {
199 			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
200 			remove_migration_pmd(&pvmw, new);
201 			continue;
202 		}
203 #endif
204 
205 		get_page(new);
206 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
207 		if (pte_swp_soft_dirty(*pvmw.pte))
208 			pte = pte_mksoft_dirty(pte);
209 
210 		/*
211 		 * Recheck VMA as permissions can change since migration started
212 		 */
213 		entry = pte_to_swp_entry(*pvmw.pte);
214 		if (is_writable_migration_entry(entry))
215 			pte = maybe_mkwrite(pte, vma);
216 		else if (pte_swp_uffd_wp(*pvmw.pte))
217 			pte = pte_mkuffd_wp(pte);
218 
219 		if (unlikely(is_device_private_page(new))) {
220 			if (pte_write(pte))
221 				entry = make_writable_device_private_entry(
222 							page_to_pfn(new));
223 			else
224 				entry = make_readable_device_private_entry(
225 							page_to_pfn(new));
226 			pte = swp_entry_to_pte(entry);
227 			if (pte_swp_soft_dirty(*pvmw.pte))
228 				pte = pte_swp_mksoft_dirty(pte);
229 			if (pte_swp_uffd_wp(*pvmw.pte))
230 				pte = pte_swp_mkuffd_wp(pte);
231 		}
232 
233 #ifdef CONFIG_HUGETLB_PAGE
234 		if (PageHuge(new)) {
235 			unsigned int shift = huge_page_shift(hstate_vma(vma));
236 
237 			pte = pte_mkhuge(pte);
238 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
239 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
240 			if (PageAnon(new))
241 				hugepage_add_anon_rmap(new, vma, pvmw.address);
242 			else
243 				page_dup_rmap(new, true);
244 		} else
245 #endif
246 		{
247 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
248 
249 			if (PageAnon(new))
250 				page_add_anon_rmap(new, vma, pvmw.address, false);
251 			else
252 				page_add_file_rmap(new, false);
253 		}
254 		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
255 			mlock_vma_page(new);
256 
257 		if (PageTransHuge(page) && PageMlocked(page))
258 			clear_page_mlock(page);
259 
260 		/* No need to invalidate - it was non-present before */
261 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
262 	}
263 
264 	return true;
265 }
266 
267 /*
268  * Get rid of all migration entries and replace them by
269  * references to the indicated page.
270  */
271 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
272 {
273 	struct rmap_walk_control rwc = {
274 		.rmap_one = remove_migration_pte,
275 		.arg = old,
276 	};
277 
278 	if (locked)
279 		rmap_walk_locked(new, &rwc);
280 	else
281 		rmap_walk(new, &rwc);
282 }
283 
284 /*
285  * Something used the pte of a page under migration. We need to
286  * get to the page and wait until migration is finished.
287  * When we return from this function the fault will be retried.
288  */
289 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
290 				spinlock_t *ptl)
291 {
292 	pte_t pte;
293 	swp_entry_t entry;
294 	struct folio *folio;
295 
296 	spin_lock(ptl);
297 	pte = *ptep;
298 	if (!is_swap_pte(pte))
299 		goto out;
300 
301 	entry = pte_to_swp_entry(pte);
302 	if (!is_migration_entry(entry))
303 		goto out;
304 
305 	folio = page_folio(pfn_swap_entry_to_page(entry));
306 
307 	/*
308 	 * Once page cache replacement of page migration started, page_count
309 	 * is zero; but we must not call folio_put_wait_locked() without
310 	 * a ref. Use folio_try_get(), and just fault again if it fails.
311 	 */
312 	if (!folio_try_get(folio))
313 		goto out;
314 	pte_unmap_unlock(ptep, ptl);
315 	folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
316 	return;
317 out:
318 	pte_unmap_unlock(ptep, ptl);
319 }
320 
321 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
322 				unsigned long address)
323 {
324 	spinlock_t *ptl = pte_lockptr(mm, pmd);
325 	pte_t *ptep = pte_offset_map(pmd, address);
326 	__migration_entry_wait(mm, ptep, ptl);
327 }
328 
329 void migration_entry_wait_huge(struct vm_area_struct *vma,
330 		struct mm_struct *mm, pte_t *pte)
331 {
332 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
333 	__migration_entry_wait(mm, pte, ptl);
334 }
335 
336 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
337 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
338 {
339 	spinlock_t *ptl;
340 	struct folio *folio;
341 
342 	ptl = pmd_lock(mm, pmd);
343 	if (!is_pmd_migration_entry(*pmd))
344 		goto unlock;
345 	folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
346 	if (!folio_try_get(folio))
347 		goto unlock;
348 	spin_unlock(ptl);
349 	folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
350 	return;
351 unlock:
352 	spin_unlock(ptl);
353 }
354 #endif
355 
356 static int expected_page_refs(struct address_space *mapping, struct page *page)
357 {
358 	int expected_count = 1;
359 
360 	/*
361 	 * Device private pages have an extra refcount as they are
362 	 * ZONE_DEVICE pages.
363 	 */
364 	expected_count += is_device_private_page(page);
365 	if (mapping)
366 		expected_count += compound_nr(page) + page_has_private(page);
367 
368 	return expected_count;
369 }
370 
371 /*
372  * Replace the page in the mapping.
373  *
374  * The number of remaining references must be:
375  * 1 for anonymous pages without a mapping
376  * 2 for pages with a mapping
377  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
378  */
379 int folio_migrate_mapping(struct address_space *mapping,
380 		struct folio *newfolio, struct folio *folio, int extra_count)
381 {
382 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
383 	struct zone *oldzone, *newzone;
384 	int dirty;
385 	int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
386 	long nr = folio_nr_pages(folio);
387 
388 	if (!mapping) {
389 		/* Anonymous page without mapping */
390 		if (folio_ref_count(folio) != expected_count)
391 			return -EAGAIN;
392 
393 		/* No turning back from here */
394 		newfolio->index = folio->index;
395 		newfolio->mapping = folio->mapping;
396 		if (folio_test_swapbacked(folio))
397 			__folio_set_swapbacked(newfolio);
398 
399 		return MIGRATEPAGE_SUCCESS;
400 	}
401 
402 	oldzone = folio_zone(folio);
403 	newzone = folio_zone(newfolio);
404 
405 	xas_lock_irq(&xas);
406 	if (!folio_ref_freeze(folio, expected_count)) {
407 		xas_unlock_irq(&xas);
408 		return -EAGAIN;
409 	}
410 
411 	/*
412 	 * Now we know that no one else is looking at the folio:
413 	 * no turning back from here.
414 	 */
415 	newfolio->index = folio->index;
416 	newfolio->mapping = folio->mapping;
417 	folio_ref_add(newfolio, nr); /* add cache reference */
418 	if (folio_test_swapbacked(folio)) {
419 		__folio_set_swapbacked(newfolio);
420 		if (folio_test_swapcache(folio)) {
421 			folio_set_swapcache(newfolio);
422 			newfolio->private = folio_get_private(folio);
423 		}
424 	} else {
425 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
426 	}
427 
428 	/* Move dirty while page refs frozen and newpage not yet exposed */
429 	dirty = folio_test_dirty(folio);
430 	if (dirty) {
431 		folio_clear_dirty(folio);
432 		folio_set_dirty(newfolio);
433 	}
434 
435 	xas_store(&xas, newfolio);
436 
437 	/*
438 	 * Drop cache reference from old page by unfreezing
439 	 * to one less reference.
440 	 * We know this isn't the last reference.
441 	 */
442 	folio_ref_unfreeze(folio, expected_count - nr);
443 
444 	xas_unlock(&xas);
445 	/* Leave irq disabled to prevent preemption while updating stats */
446 
447 	/*
448 	 * If moved to a different zone then also account
449 	 * the page for that zone. Other VM counters will be
450 	 * taken care of when we establish references to the
451 	 * new page and drop references to the old page.
452 	 *
453 	 * Note that anonymous pages are accounted for
454 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
455 	 * are mapped to swap space.
456 	 */
457 	if (newzone != oldzone) {
458 		struct lruvec *old_lruvec, *new_lruvec;
459 		struct mem_cgroup *memcg;
460 
461 		memcg = folio_memcg(folio);
462 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
463 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
464 
465 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
466 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
467 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
468 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
469 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
470 		}
471 #ifdef CONFIG_SWAP
472 		if (folio_test_swapcache(folio)) {
473 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
474 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
475 		}
476 #endif
477 		if (dirty && mapping_can_writeback(mapping)) {
478 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
479 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
480 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
481 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
482 		}
483 	}
484 	local_irq_enable();
485 
486 	return MIGRATEPAGE_SUCCESS;
487 }
488 EXPORT_SYMBOL(folio_migrate_mapping);
489 
490 /*
491  * The expected number of remaining references is the same as that
492  * of folio_migrate_mapping().
493  */
494 int migrate_huge_page_move_mapping(struct address_space *mapping,
495 				   struct page *newpage, struct page *page)
496 {
497 	XA_STATE(xas, &mapping->i_pages, page_index(page));
498 	int expected_count;
499 
500 	xas_lock_irq(&xas);
501 	expected_count = 2 + page_has_private(page);
502 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
503 		xas_unlock_irq(&xas);
504 		return -EAGAIN;
505 	}
506 
507 	if (!page_ref_freeze(page, expected_count)) {
508 		xas_unlock_irq(&xas);
509 		return -EAGAIN;
510 	}
511 
512 	newpage->index = page->index;
513 	newpage->mapping = page->mapping;
514 
515 	get_page(newpage);
516 
517 	xas_store(&xas, newpage);
518 
519 	page_ref_unfreeze(page, expected_count - 1);
520 
521 	xas_unlock_irq(&xas);
522 
523 	return MIGRATEPAGE_SUCCESS;
524 }
525 
526 /*
527  * Copy the flags and some other ancillary information
528  */
529 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
530 {
531 	int cpupid;
532 
533 	if (folio_test_error(folio))
534 		folio_set_error(newfolio);
535 	if (folio_test_referenced(folio))
536 		folio_set_referenced(newfolio);
537 	if (folio_test_uptodate(folio))
538 		folio_mark_uptodate(newfolio);
539 	if (folio_test_clear_active(folio)) {
540 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
541 		folio_set_active(newfolio);
542 	} else if (folio_test_clear_unevictable(folio))
543 		folio_set_unevictable(newfolio);
544 	if (folio_test_workingset(folio))
545 		folio_set_workingset(newfolio);
546 	if (folio_test_checked(folio))
547 		folio_set_checked(newfolio);
548 	if (folio_test_mappedtodisk(folio))
549 		folio_set_mappedtodisk(newfolio);
550 
551 	/* Move dirty on pages not done by folio_migrate_mapping() */
552 	if (folio_test_dirty(folio))
553 		folio_set_dirty(newfolio);
554 
555 	if (folio_test_young(folio))
556 		folio_set_young(newfolio);
557 	if (folio_test_idle(folio))
558 		folio_set_idle(newfolio);
559 
560 	/*
561 	 * Copy NUMA information to the new page, to prevent over-eager
562 	 * future migrations of this same page.
563 	 */
564 	cpupid = page_cpupid_xchg_last(&folio->page, -1);
565 	page_cpupid_xchg_last(&newfolio->page, cpupid);
566 
567 	folio_migrate_ksm(newfolio, folio);
568 	/*
569 	 * Please do not reorder this without considering how mm/ksm.c's
570 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
571 	 */
572 	if (folio_test_swapcache(folio))
573 		folio_clear_swapcache(folio);
574 	folio_clear_private(folio);
575 
576 	/* page->private contains hugetlb specific flags */
577 	if (!folio_test_hugetlb(folio))
578 		folio->private = NULL;
579 
580 	/*
581 	 * If any waiters have accumulated on the new page then
582 	 * wake them up.
583 	 */
584 	if (folio_test_writeback(newfolio))
585 		folio_end_writeback(newfolio);
586 
587 	/*
588 	 * PG_readahead shares the same bit with PG_reclaim.  The above
589 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
590 	 * bit after that.
591 	 */
592 	if (folio_test_readahead(folio))
593 		folio_set_readahead(newfolio);
594 
595 	folio_copy_owner(newfolio, folio);
596 
597 	if (!folio_test_hugetlb(folio))
598 		mem_cgroup_migrate(folio, newfolio);
599 }
600 EXPORT_SYMBOL(folio_migrate_flags);
601 
602 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
603 {
604 	folio_copy(newfolio, folio);
605 	folio_migrate_flags(newfolio, folio);
606 }
607 EXPORT_SYMBOL(folio_migrate_copy);
608 
609 /************************************************************
610  *                    Migration functions
611  ***********************************************************/
612 
613 /*
614  * Common logic to directly migrate a single LRU page suitable for
615  * pages that do not use PagePrivate/PagePrivate2.
616  *
617  * Pages are locked upon entry and exit.
618  */
619 int migrate_page(struct address_space *mapping,
620 		struct page *newpage, struct page *page,
621 		enum migrate_mode mode)
622 {
623 	struct folio *newfolio = page_folio(newpage);
624 	struct folio *folio = page_folio(page);
625 	int rc;
626 
627 	BUG_ON(folio_test_writeback(folio));	/* Writeback must be complete */
628 
629 	rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
630 
631 	if (rc != MIGRATEPAGE_SUCCESS)
632 		return rc;
633 
634 	if (mode != MIGRATE_SYNC_NO_COPY)
635 		folio_migrate_copy(newfolio, folio);
636 	else
637 		folio_migrate_flags(newfolio, folio);
638 	return MIGRATEPAGE_SUCCESS;
639 }
640 EXPORT_SYMBOL(migrate_page);
641 
642 #ifdef CONFIG_BLOCK
643 /* Returns true if all buffers are successfully locked */
644 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
645 							enum migrate_mode mode)
646 {
647 	struct buffer_head *bh = head;
648 
649 	/* Simple case, sync compaction */
650 	if (mode != MIGRATE_ASYNC) {
651 		do {
652 			lock_buffer(bh);
653 			bh = bh->b_this_page;
654 
655 		} while (bh != head);
656 
657 		return true;
658 	}
659 
660 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
661 	do {
662 		if (!trylock_buffer(bh)) {
663 			/*
664 			 * We failed to lock the buffer and cannot stall in
665 			 * async migration. Release the taken locks
666 			 */
667 			struct buffer_head *failed_bh = bh;
668 			bh = head;
669 			while (bh != failed_bh) {
670 				unlock_buffer(bh);
671 				bh = bh->b_this_page;
672 			}
673 			return false;
674 		}
675 
676 		bh = bh->b_this_page;
677 	} while (bh != head);
678 	return true;
679 }
680 
681 static int __buffer_migrate_page(struct address_space *mapping,
682 		struct page *newpage, struct page *page, enum migrate_mode mode,
683 		bool check_refs)
684 {
685 	struct buffer_head *bh, *head;
686 	int rc;
687 	int expected_count;
688 
689 	if (!page_has_buffers(page))
690 		return migrate_page(mapping, newpage, page, mode);
691 
692 	/* Check whether page does not have extra refs before we do more work */
693 	expected_count = expected_page_refs(mapping, page);
694 	if (page_count(page) != expected_count)
695 		return -EAGAIN;
696 
697 	head = page_buffers(page);
698 	if (!buffer_migrate_lock_buffers(head, mode))
699 		return -EAGAIN;
700 
701 	if (check_refs) {
702 		bool busy;
703 		bool invalidated = false;
704 
705 recheck_buffers:
706 		busy = false;
707 		spin_lock(&mapping->private_lock);
708 		bh = head;
709 		do {
710 			if (atomic_read(&bh->b_count)) {
711 				busy = true;
712 				break;
713 			}
714 			bh = bh->b_this_page;
715 		} while (bh != head);
716 		if (busy) {
717 			if (invalidated) {
718 				rc = -EAGAIN;
719 				goto unlock_buffers;
720 			}
721 			spin_unlock(&mapping->private_lock);
722 			invalidate_bh_lrus();
723 			invalidated = true;
724 			goto recheck_buffers;
725 		}
726 	}
727 
728 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
729 	if (rc != MIGRATEPAGE_SUCCESS)
730 		goto unlock_buffers;
731 
732 	attach_page_private(newpage, detach_page_private(page));
733 
734 	bh = head;
735 	do {
736 		set_bh_page(bh, newpage, bh_offset(bh));
737 		bh = bh->b_this_page;
738 
739 	} while (bh != head);
740 
741 	if (mode != MIGRATE_SYNC_NO_COPY)
742 		migrate_page_copy(newpage, page);
743 	else
744 		migrate_page_states(newpage, page);
745 
746 	rc = MIGRATEPAGE_SUCCESS;
747 unlock_buffers:
748 	if (check_refs)
749 		spin_unlock(&mapping->private_lock);
750 	bh = head;
751 	do {
752 		unlock_buffer(bh);
753 		bh = bh->b_this_page;
754 
755 	} while (bh != head);
756 
757 	return rc;
758 }
759 
760 /*
761  * Migration function for pages with buffers. This function can only be used
762  * if the underlying filesystem guarantees that no other references to "page"
763  * exist. For example attached buffer heads are accessed only under page lock.
764  */
765 int buffer_migrate_page(struct address_space *mapping,
766 		struct page *newpage, struct page *page, enum migrate_mode mode)
767 {
768 	return __buffer_migrate_page(mapping, newpage, page, mode, false);
769 }
770 EXPORT_SYMBOL(buffer_migrate_page);
771 
772 /*
773  * Same as above except that this variant is more careful and checks that there
774  * are also no buffer head references. This function is the right one for
775  * mappings where buffer heads are directly looked up and referenced (such as
776  * block device mappings).
777  */
778 int buffer_migrate_page_norefs(struct address_space *mapping,
779 		struct page *newpage, struct page *page, enum migrate_mode mode)
780 {
781 	return __buffer_migrate_page(mapping, newpage, page, mode, true);
782 }
783 #endif
784 
785 /*
786  * Writeback a page to clean the dirty state
787  */
788 static int writeout(struct address_space *mapping, struct page *page)
789 {
790 	struct writeback_control wbc = {
791 		.sync_mode = WB_SYNC_NONE,
792 		.nr_to_write = 1,
793 		.range_start = 0,
794 		.range_end = LLONG_MAX,
795 		.for_reclaim = 1
796 	};
797 	int rc;
798 
799 	if (!mapping->a_ops->writepage)
800 		/* No write method for the address space */
801 		return -EINVAL;
802 
803 	if (!clear_page_dirty_for_io(page))
804 		/* Someone else already triggered a write */
805 		return -EAGAIN;
806 
807 	/*
808 	 * A dirty page may imply that the underlying filesystem has
809 	 * the page on some queue. So the page must be clean for
810 	 * migration. Writeout may mean we loose the lock and the
811 	 * page state is no longer what we checked for earlier.
812 	 * At this point we know that the migration attempt cannot
813 	 * be successful.
814 	 */
815 	remove_migration_ptes(page, page, false);
816 
817 	rc = mapping->a_ops->writepage(page, &wbc);
818 
819 	if (rc != AOP_WRITEPAGE_ACTIVATE)
820 		/* unlocked. Relock */
821 		lock_page(page);
822 
823 	return (rc < 0) ? -EIO : -EAGAIN;
824 }
825 
826 /*
827  * Default handling if a filesystem does not provide a migration function.
828  */
829 static int fallback_migrate_page(struct address_space *mapping,
830 	struct page *newpage, struct page *page, enum migrate_mode mode)
831 {
832 	if (PageDirty(page)) {
833 		/* Only writeback pages in full synchronous migration */
834 		switch (mode) {
835 		case MIGRATE_SYNC:
836 		case MIGRATE_SYNC_NO_COPY:
837 			break;
838 		default:
839 			return -EBUSY;
840 		}
841 		return writeout(mapping, page);
842 	}
843 
844 	/*
845 	 * Buffers may be managed in a filesystem specific way.
846 	 * We must have no buffers or drop them.
847 	 */
848 	if (page_has_private(page) &&
849 	    !try_to_release_page(page, GFP_KERNEL))
850 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
851 
852 	return migrate_page(mapping, newpage, page, mode);
853 }
854 
855 /*
856  * Move a page to a newly allocated page
857  * The page is locked and all ptes have been successfully removed.
858  *
859  * The new page will have replaced the old page if this function
860  * is successful.
861  *
862  * Return value:
863  *   < 0 - error code
864  *  MIGRATEPAGE_SUCCESS - success
865  */
866 static int move_to_new_page(struct page *newpage, struct page *page,
867 				enum migrate_mode mode)
868 {
869 	struct address_space *mapping;
870 	int rc = -EAGAIN;
871 	bool is_lru = !__PageMovable(page);
872 
873 	VM_BUG_ON_PAGE(!PageLocked(page), page);
874 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
875 
876 	mapping = page_mapping(page);
877 
878 	if (likely(is_lru)) {
879 		if (!mapping)
880 			rc = migrate_page(mapping, newpage, page, mode);
881 		else if (mapping->a_ops->migratepage)
882 			/*
883 			 * Most pages have a mapping and most filesystems
884 			 * provide a migratepage callback. Anonymous pages
885 			 * are part of swap space which also has its own
886 			 * migratepage callback. This is the most common path
887 			 * for page migration.
888 			 */
889 			rc = mapping->a_ops->migratepage(mapping, newpage,
890 							page, mode);
891 		else
892 			rc = fallback_migrate_page(mapping, newpage,
893 							page, mode);
894 	} else {
895 		/*
896 		 * In case of non-lru page, it could be released after
897 		 * isolation step. In that case, we shouldn't try migration.
898 		 */
899 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
900 		if (!PageMovable(page)) {
901 			rc = MIGRATEPAGE_SUCCESS;
902 			__ClearPageIsolated(page);
903 			goto out;
904 		}
905 
906 		rc = mapping->a_ops->migratepage(mapping, newpage,
907 						page, mode);
908 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
909 			!PageIsolated(page));
910 	}
911 
912 	/*
913 	 * When successful, old pagecache page->mapping must be cleared before
914 	 * page is freed; but stats require that PageAnon be left as PageAnon.
915 	 */
916 	if (rc == MIGRATEPAGE_SUCCESS) {
917 		if (__PageMovable(page)) {
918 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
919 
920 			/*
921 			 * We clear PG_movable under page_lock so any compactor
922 			 * cannot try to migrate this page.
923 			 */
924 			__ClearPageIsolated(page);
925 		}
926 
927 		/*
928 		 * Anonymous and movable page->mapping will be cleared by
929 		 * free_pages_prepare so don't reset it here for keeping
930 		 * the type to work PageAnon, for example.
931 		 */
932 		if (!PageMappingFlags(page))
933 			page->mapping = NULL;
934 
935 		if (likely(!is_zone_device_page(newpage)))
936 			flush_dcache_page(newpage);
937 
938 	}
939 out:
940 	return rc;
941 }
942 
943 static int __unmap_and_move(struct page *page, struct page *newpage,
944 				int force, enum migrate_mode mode)
945 {
946 	int rc = -EAGAIN;
947 	bool page_was_mapped = false;
948 	struct anon_vma *anon_vma = NULL;
949 	bool is_lru = !__PageMovable(page);
950 
951 	if (!trylock_page(page)) {
952 		if (!force || mode == MIGRATE_ASYNC)
953 			goto out;
954 
955 		/*
956 		 * It's not safe for direct compaction to call lock_page.
957 		 * For example, during page readahead pages are added locked
958 		 * to the LRU. Later, when the IO completes the pages are
959 		 * marked uptodate and unlocked. However, the queueing
960 		 * could be merging multiple pages for one bio (e.g.
961 		 * mpage_readahead). If an allocation happens for the
962 		 * second or third page, the process can end up locking
963 		 * the same page twice and deadlocking. Rather than
964 		 * trying to be clever about what pages can be locked,
965 		 * avoid the use of lock_page for direct compaction
966 		 * altogether.
967 		 */
968 		if (current->flags & PF_MEMALLOC)
969 			goto out;
970 
971 		lock_page(page);
972 	}
973 
974 	if (PageWriteback(page)) {
975 		/*
976 		 * Only in the case of a full synchronous migration is it
977 		 * necessary to wait for PageWriteback. In the async case,
978 		 * the retry loop is too short and in the sync-light case,
979 		 * the overhead of stalling is too much
980 		 */
981 		switch (mode) {
982 		case MIGRATE_SYNC:
983 		case MIGRATE_SYNC_NO_COPY:
984 			break;
985 		default:
986 			rc = -EBUSY;
987 			goto out_unlock;
988 		}
989 		if (!force)
990 			goto out_unlock;
991 		wait_on_page_writeback(page);
992 	}
993 
994 	/*
995 	 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
996 	 * we cannot notice that anon_vma is freed while we migrates a page.
997 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
998 	 * of migration. File cache pages are no problem because of page_lock()
999 	 * File Caches may use write_page() or lock_page() in migration, then,
1000 	 * just care Anon page here.
1001 	 *
1002 	 * Only page_get_anon_vma() understands the subtleties of
1003 	 * getting a hold on an anon_vma from outside one of its mms.
1004 	 * But if we cannot get anon_vma, then we won't need it anyway,
1005 	 * because that implies that the anon page is no longer mapped
1006 	 * (and cannot be remapped so long as we hold the page lock).
1007 	 */
1008 	if (PageAnon(page) && !PageKsm(page))
1009 		anon_vma = page_get_anon_vma(page);
1010 
1011 	/*
1012 	 * Block others from accessing the new page when we get around to
1013 	 * establishing additional references. We are usually the only one
1014 	 * holding a reference to newpage at this point. We used to have a BUG
1015 	 * here if trylock_page(newpage) fails, but would like to allow for
1016 	 * cases where there might be a race with the previous use of newpage.
1017 	 * This is much like races on refcount of oldpage: just don't BUG().
1018 	 */
1019 	if (unlikely(!trylock_page(newpage)))
1020 		goto out_unlock;
1021 
1022 	if (unlikely(!is_lru)) {
1023 		rc = move_to_new_page(newpage, page, mode);
1024 		goto out_unlock_both;
1025 	}
1026 
1027 	/*
1028 	 * Corner case handling:
1029 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1030 	 * and treated as swapcache but it has no rmap yet.
1031 	 * Calling try_to_unmap() against a page->mapping==NULL page will
1032 	 * trigger a BUG.  So handle it here.
1033 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1034 	 * fs-private metadata. The page can be picked up due to memory
1035 	 * offlining.  Everywhere else except page reclaim, the page is
1036 	 * invisible to the vm, so the page can not be migrated.  So try to
1037 	 * free the metadata, so the page can be freed.
1038 	 */
1039 	if (!page->mapping) {
1040 		VM_BUG_ON_PAGE(PageAnon(page), page);
1041 		if (page_has_private(page)) {
1042 			try_to_free_buffers(page);
1043 			goto out_unlock_both;
1044 		}
1045 	} else if (page_mapped(page)) {
1046 		/* Establish migration ptes */
1047 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1048 				page);
1049 		try_to_migrate(page, 0);
1050 		page_was_mapped = true;
1051 	}
1052 
1053 	if (!page_mapped(page))
1054 		rc = move_to_new_page(newpage, page, mode);
1055 
1056 	if (page_was_mapped)
1057 		remove_migration_ptes(page,
1058 			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1059 
1060 out_unlock_both:
1061 	unlock_page(newpage);
1062 out_unlock:
1063 	/* Drop an anon_vma reference if we took one */
1064 	if (anon_vma)
1065 		put_anon_vma(anon_vma);
1066 	unlock_page(page);
1067 out:
1068 	/*
1069 	 * If migration is successful, decrease refcount of the newpage
1070 	 * which will not free the page because new page owner increased
1071 	 * refcounter. As well, if it is LRU page, add the page to LRU
1072 	 * list in here. Use the old state of the isolated source page to
1073 	 * determine if we migrated a LRU page. newpage was already unlocked
1074 	 * and possibly modified by its owner - don't rely on the page
1075 	 * state.
1076 	 */
1077 	if (rc == MIGRATEPAGE_SUCCESS) {
1078 		if (unlikely(!is_lru))
1079 			put_page(newpage);
1080 		else
1081 			putback_lru_page(newpage);
1082 	}
1083 
1084 	return rc;
1085 }
1086 
1087 
1088 /*
1089  * node_demotion[] example:
1090  *
1091  * Consider a system with two sockets.  Each socket has
1092  * three classes of memory attached: fast, medium and slow.
1093  * Each memory class is placed in its own NUMA node.  The
1094  * CPUs are placed in the node with the "fast" memory.  The
1095  * 6 NUMA nodes (0-5) might be split among the sockets like
1096  * this:
1097  *
1098  *	Socket A: 0, 1, 2
1099  *	Socket B: 3, 4, 5
1100  *
1101  * When Node 0 fills up, its memory should be migrated to
1102  * Node 1.  When Node 1 fills up, it should be migrated to
1103  * Node 2.  The migration path start on the nodes with the
1104  * processors (since allocations default to this node) and
1105  * fast memory, progress through medium and end with the
1106  * slow memory:
1107  *
1108  *	0 -> 1 -> 2 -> stop
1109  *	3 -> 4 -> 5 -> stop
1110  *
1111  * This is represented in the node_demotion[] like this:
1112  *
1113  *	{  1, // Node 0 migrates to 1
1114  *	   2, // Node 1 migrates to 2
1115  *	  -1, // Node 2 does not migrate
1116  *	   4, // Node 3 migrates to 4
1117  *	   5, // Node 4 migrates to 5
1118  *	  -1} // Node 5 does not migrate
1119  */
1120 
1121 /*
1122  * Writes to this array occur without locking.  Cycles are
1123  * not allowed: Node X demotes to Y which demotes to X...
1124  *
1125  * If multiple reads are performed, a single rcu_read_lock()
1126  * must be held over all reads to ensure that no cycles are
1127  * observed.
1128  */
1129 static int node_demotion[MAX_NUMNODES] __read_mostly =
1130 	{[0 ...  MAX_NUMNODES - 1] = NUMA_NO_NODE};
1131 
1132 /**
1133  * next_demotion_node() - Get the next node in the demotion path
1134  * @node: The starting node to lookup the next node
1135  *
1136  * Return: node id for next memory node in the demotion path hierarchy
1137  * from @node; NUMA_NO_NODE if @node is terminal.  This does not keep
1138  * @node online or guarantee that it *continues* to be the next demotion
1139  * target.
1140  */
1141 int next_demotion_node(int node)
1142 {
1143 	int target;
1144 
1145 	/*
1146 	 * node_demotion[] is updated without excluding this
1147 	 * function from running.  RCU doesn't provide any
1148 	 * compiler barriers, so the READ_ONCE() is required
1149 	 * to avoid compiler reordering or read merging.
1150 	 *
1151 	 * Make sure to use RCU over entire code blocks if
1152 	 * node_demotion[] reads need to be consistent.
1153 	 */
1154 	rcu_read_lock();
1155 	target = READ_ONCE(node_demotion[node]);
1156 	rcu_read_unlock();
1157 
1158 	return target;
1159 }
1160 
1161 /*
1162  * Obtain the lock on page, remove all ptes and migrate the page
1163  * to the newly allocated page in newpage.
1164  */
1165 static int unmap_and_move(new_page_t get_new_page,
1166 				   free_page_t put_new_page,
1167 				   unsigned long private, struct page *page,
1168 				   int force, enum migrate_mode mode,
1169 				   enum migrate_reason reason,
1170 				   struct list_head *ret)
1171 {
1172 	int rc = MIGRATEPAGE_SUCCESS;
1173 	struct page *newpage = NULL;
1174 
1175 	if (!thp_migration_supported() && PageTransHuge(page))
1176 		return -ENOSYS;
1177 
1178 	if (page_count(page) == 1) {
1179 		/* page was freed from under us. So we are done. */
1180 		ClearPageActive(page);
1181 		ClearPageUnevictable(page);
1182 		if (unlikely(__PageMovable(page))) {
1183 			lock_page(page);
1184 			if (!PageMovable(page))
1185 				__ClearPageIsolated(page);
1186 			unlock_page(page);
1187 		}
1188 		goto out;
1189 	}
1190 
1191 	newpage = get_new_page(page, private);
1192 	if (!newpage)
1193 		return -ENOMEM;
1194 
1195 	rc = __unmap_and_move(page, newpage, force, mode);
1196 	if (rc == MIGRATEPAGE_SUCCESS)
1197 		set_page_owner_migrate_reason(newpage, reason);
1198 
1199 out:
1200 	if (rc != -EAGAIN) {
1201 		/*
1202 		 * A page that has been migrated has all references
1203 		 * removed and will be freed. A page that has not been
1204 		 * migrated will have kept its references and be restored.
1205 		 */
1206 		list_del(&page->lru);
1207 	}
1208 
1209 	/*
1210 	 * If migration is successful, releases reference grabbed during
1211 	 * isolation. Otherwise, restore the page to right list unless
1212 	 * we want to retry.
1213 	 */
1214 	if (rc == MIGRATEPAGE_SUCCESS) {
1215 		/*
1216 		 * Compaction can migrate also non-LRU pages which are
1217 		 * not accounted to NR_ISOLATED_*. They can be recognized
1218 		 * as __PageMovable
1219 		 */
1220 		if (likely(!__PageMovable(page)))
1221 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1222 					page_is_file_lru(page), -thp_nr_pages(page));
1223 
1224 		if (reason != MR_MEMORY_FAILURE)
1225 			/*
1226 			 * We release the page in page_handle_poison.
1227 			 */
1228 			put_page(page);
1229 	} else {
1230 		if (rc != -EAGAIN)
1231 			list_add_tail(&page->lru, ret);
1232 
1233 		if (put_new_page)
1234 			put_new_page(newpage, private);
1235 		else
1236 			put_page(newpage);
1237 	}
1238 
1239 	return rc;
1240 }
1241 
1242 /*
1243  * Counterpart of unmap_and_move_page() for hugepage migration.
1244  *
1245  * This function doesn't wait the completion of hugepage I/O
1246  * because there is no race between I/O and migration for hugepage.
1247  * Note that currently hugepage I/O occurs only in direct I/O
1248  * where no lock is held and PG_writeback is irrelevant,
1249  * and writeback status of all subpages are counted in the reference
1250  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1251  * under direct I/O, the reference of the head page is 512 and a bit more.)
1252  * This means that when we try to migrate hugepage whose subpages are
1253  * doing direct I/O, some references remain after try_to_unmap() and
1254  * hugepage migration fails without data corruption.
1255  *
1256  * There is also no race when direct I/O is issued on the page under migration,
1257  * because then pte is replaced with migration swap entry and direct I/O code
1258  * will wait in the page fault for migration to complete.
1259  */
1260 static int unmap_and_move_huge_page(new_page_t get_new_page,
1261 				free_page_t put_new_page, unsigned long private,
1262 				struct page *hpage, int force,
1263 				enum migrate_mode mode, int reason,
1264 				struct list_head *ret)
1265 {
1266 	int rc = -EAGAIN;
1267 	int page_was_mapped = 0;
1268 	struct page *new_hpage;
1269 	struct anon_vma *anon_vma = NULL;
1270 	struct address_space *mapping = NULL;
1271 
1272 	/*
1273 	 * Migratability of hugepages depends on architectures and their size.
1274 	 * This check is necessary because some callers of hugepage migration
1275 	 * like soft offline and memory hotremove don't walk through page
1276 	 * tables or check whether the hugepage is pmd-based or not before
1277 	 * kicking migration.
1278 	 */
1279 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1280 		list_move_tail(&hpage->lru, ret);
1281 		return -ENOSYS;
1282 	}
1283 
1284 	if (page_count(hpage) == 1) {
1285 		/* page was freed from under us. So we are done. */
1286 		putback_active_hugepage(hpage);
1287 		return MIGRATEPAGE_SUCCESS;
1288 	}
1289 
1290 	new_hpage = get_new_page(hpage, private);
1291 	if (!new_hpage)
1292 		return -ENOMEM;
1293 
1294 	if (!trylock_page(hpage)) {
1295 		if (!force)
1296 			goto out;
1297 		switch (mode) {
1298 		case MIGRATE_SYNC:
1299 		case MIGRATE_SYNC_NO_COPY:
1300 			break;
1301 		default:
1302 			goto out;
1303 		}
1304 		lock_page(hpage);
1305 	}
1306 
1307 	/*
1308 	 * Check for pages which are in the process of being freed.  Without
1309 	 * page_mapping() set, hugetlbfs specific move page routine will not
1310 	 * be called and we could leak usage counts for subpools.
1311 	 */
1312 	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
1313 		rc = -EBUSY;
1314 		goto out_unlock;
1315 	}
1316 
1317 	if (PageAnon(hpage))
1318 		anon_vma = page_get_anon_vma(hpage);
1319 
1320 	if (unlikely(!trylock_page(new_hpage)))
1321 		goto put_anon;
1322 
1323 	if (page_mapped(hpage)) {
1324 		bool mapping_locked = false;
1325 		enum ttu_flags ttu = 0;
1326 
1327 		if (!PageAnon(hpage)) {
1328 			/*
1329 			 * In shared mappings, try_to_unmap could potentially
1330 			 * call huge_pmd_unshare.  Because of this, take
1331 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1332 			 * to let lower levels know we have taken the lock.
1333 			 */
1334 			mapping = hugetlb_page_mapping_lock_write(hpage);
1335 			if (unlikely(!mapping))
1336 				goto unlock_put_anon;
1337 
1338 			mapping_locked = true;
1339 			ttu |= TTU_RMAP_LOCKED;
1340 		}
1341 
1342 		try_to_migrate(hpage, ttu);
1343 		page_was_mapped = 1;
1344 
1345 		if (mapping_locked)
1346 			i_mmap_unlock_write(mapping);
1347 	}
1348 
1349 	if (!page_mapped(hpage))
1350 		rc = move_to_new_page(new_hpage, hpage, mode);
1351 
1352 	if (page_was_mapped)
1353 		remove_migration_ptes(hpage,
1354 			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1355 
1356 unlock_put_anon:
1357 	unlock_page(new_hpage);
1358 
1359 put_anon:
1360 	if (anon_vma)
1361 		put_anon_vma(anon_vma);
1362 
1363 	if (rc == MIGRATEPAGE_SUCCESS) {
1364 		move_hugetlb_state(hpage, new_hpage, reason);
1365 		put_new_page = NULL;
1366 	}
1367 
1368 out_unlock:
1369 	unlock_page(hpage);
1370 out:
1371 	if (rc == MIGRATEPAGE_SUCCESS)
1372 		putback_active_hugepage(hpage);
1373 	else if (rc != -EAGAIN)
1374 		list_move_tail(&hpage->lru, ret);
1375 
1376 	/*
1377 	 * If migration was not successful and there's a freeing callback, use
1378 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1379 	 * isolation.
1380 	 */
1381 	if (put_new_page)
1382 		put_new_page(new_hpage, private);
1383 	else
1384 		putback_active_hugepage(new_hpage);
1385 
1386 	return rc;
1387 }
1388 
1389 static inline int try_split_thp(struct page *page, struct page **page2,
1390 				struct list_head *from)
1391 {
1392 	int rc = 0;
1393 
1394 	lock_page(page);
1395 	rc = split_huge_page_to_list(page, from);
1396 	unlock_page(page);
1397 	if (!rc)
1398 		list_safe_reset_next(page, *page2, lru);
1399 
1400 	return rc;
1401 }
1402 
1403 /*
1404  * migrate_pages - migrate the pages specified in a list, to the free pages
1405  *		   supplied as the target for the page migration
1406  *
1407  * @from:		The list of pages to be migrated.
1408  * @get_new_page:	The function used to allocate free pages to be used
1409  *			as the target of the page migration.
1410  * @put_new_page:	The function used to free target pages if migration
1411  *			fails, or NULL if no special handling is necessary.
1412  * @private:		Private data to be passed on to get_new_page()
1413  * @mode:		The migration mode that specifies the constraints for
1414  *			page migration, if any.
1415  * @reason:		The reason for page migration.
1416  * @ret_succeeded:	Set to the number of pages migrated successfully if
1417  *			the caller passes a non-NULL pointer.
1418  *
1419  * The function returns after 10 attempts or if no pages are movable any more
1420  * because the list has become empty or no retryable pages exist any more.
1421  * It is caller's responsibility to call putback_movable_pages() to return pages
1422  * to the LRU or free list only if ret != 0.
1423  *
1424  * Returns the number of pages that were not migrated, or an error code.
1425  */
1426 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1427 		free_page_t put_new_page, unsigned long private,
1428 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1429 {
1430 	int retry = 1;
1431 	int thp_retry = 1;
1432 	int nr_failed = 0;
1433 	int nr_succeeded = 0;
1434 	int nr_thp_succeeded = 0;
1435 	int nr_thp_failed = 0;
1436 	int nr_thp_split = 0;
1437 	int pass = 0;
1438 	bool is_thp = false;
1439 	struct page *page;
1440 	struct page *page2;
1441 	int swapwrite = current->flags & PF_SWAPWRITE;
1442 	int rc, nr_subpages;
1443 	LIST_HEAD(ret_pages);
1444 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1445 
1446 	trace_mm_migrate_pages_start(mode, reason);
1447 
1448 	if (!swapwrite)
1449 		current->flags |= PF_SWAPWRITE;
1450 
1451 	for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1452 		retry = 0;
1453 		thp_retry = 0;
1454 
1455 		list_for_each_entry_safe(page, page2, from, lru) {
1456 retry:
1457 			/*
1458 			 * THP statistics is based on the source huge page.
1459 			 * Capture required information that might get lost
1460 			 * during migration.
1461 			 */
1462 			is_thp = PageTransHuge(page) && !PageHuge(page);
1463 			nr_subpages = thp_nr_pages(page);
1464 			cond_resched();
1465 
1466 			if (PageHuge(page))
1467 				rc = unmap_and_move_huge_page(get_new_page,
1468 						put_new_page, private, page,
1469 						pass > 2, mode, reason,
1470 						&ret_pages);
1471 			else
1472 				rc = unmap_and_move(get_new_page, put_new_page,
1473 						private, page, pass > 2, mode,
1474 						reason, &ret_pages);
1475 			/*
1476 			 * The rules are:
1477 			 *	Success: non hugetlb page will be freed, hugetlb
1478 			 *		 page will be put back
1479 			 *	-EAGAIN: stay on the from list
1480 			 *	-ENOMEM: stay on the from list
1481 			 *	Other errno: put on ret_pages list then splice to
1482 			 *		     from list
1483 			 */
1484 			switch(rc) {
1485 			/*
1486 			 * THP migration might be unsupported or the
1487 			 * allocation could've failed so we should
1488 			 * retry on the same page with the THP split
1489 			 * to base pages.
1490 			 *
1491 			 * Head page is retried immediately and tail
1492 			 * pages are added to the tail of the list so
1493 			 * we encounter them after the rest of the list
1494 			 * is processed.
1495 			 */
1496 			case -ENOSYS:
1497 				/* THP migration is unsupported */
1498 				if (is_thp) {
1499 					if (!try_split_thp(page, &page2, from)) {
1500 						nr_thp_split++;
1501 						goto retry;
1502 					}
1503 
1504 					nr_thp_failed++;
1505 					nr_failed += nr_subpages;
1506 					break;
1507 				}
1508 
1509 				/* Hugetlb migration is unsupported */
1510 				nr_failed++;
1511 				break;
1512 			case -ENOMEM:
1513 				/*
1514 				 * When memory is low, don't bother to try to migrate
1515 				 * other pages, just exit.
1516 				 * THP NUMA faulting doesn't split THP to retry.
1517 				 */
1518 				if (is_thp && !nosplit) {
1519 					if (!try_split_thp(page, &page2, from)) {
1520 						nr_thp_split++;
1521 						goto retry;
1522 					}
1523 
1524 					nr_thp_failed++;
1525 					nr_failed += nr_subpages;
1526 					goto out;
1527 				}
1528 				nr_failed++;
1529 				goto out;
1530 			case -EAGAIN:
1531 				if (is_thp) {
1532 					thp_retry++;
1533 					break;
1534 				}
1535 				retry++;
1536 				break;
1537 			case MIGRATEPAGE_SUCCESS:
1538 				if (is_thp) {
1539 					nr_thp_succeeded++;
1540 					nr_succeeded += nr_subpages;
1541 					break;
1542 				}
1543 				nr_succeeded++;
1544 				break;
1545 			default:
1546 				/*
1547 				 * Permanent failure (-EBUSY, etc.):
1548 				 * unlike -EAGAIN case, the failed page is
1549 				 * removed from migration page list and not
1550 				 * retried in the next outer loop.
1551 				 */
1552 				if (is_thp) {
1553 					nr_thp_failed++;
1554 					nr_failed += nr_subpages;
1555 					break;
1556 				}
1557 				nr_failed++;
1558 				break;
1559 			}
1560 		}
1561 	}
1562 	nr_failed += retry + thp_retry;
1563 	nr_thp_failed += thp_retry;
1564 	rc = nr_failed;
1565 out:
1566 	/*
1567 	 * Put the permanent failure page back to migration list, they
1568 	 * will be put back to the right list by the caller.
1569 	 */
1570 	list_splice(&ret_pages, from);
1571 
1572 	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1573 	count_vm_events(PGMIGRATE_FAIL, nr_failed);
1574 	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1575 	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1576 	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1577 	trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1578 			       nr_thp_failed, nr_thp_split, mode, reason);
1579 
1580 	if (!swapwrite)
1581 		current->flags &= ~PF_SWAPWRITE;
1582 
1583 	if (ret_succeeded)
1584 		*ret_succeeded = nr_succeeded;
1585 
1586 	return rc;
1587 }
1588 
1589 struct page *alloc_migration_target(struct page *page, unsigned long private)
1590 {
1591 	struct migration_target_control *mtc;
1592 	gfp_t gfp_mask;
1593 	unsigned int order = 0;
1594 	struct page *new_page = NULL;
1595 	int nid;
1596 	int zidx;
1597 
1598 	mtc = (struct migration_target_control *)private;
1599 	gfp_mask = mtc->gfp_mask;
1600 	nid = mtc->nid;
1601 	if (nid == NUMA_NO_NODE)
1602 		nid = page_to_nid(page);
1603 
1604 	if (PageHuge(page)) {
1605 		struct hstate *h = page_hstate(compound_head(page));
1606 
1607 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1608 		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1609 	}
1610 
1611 	if (PageTransHuge(page)) {
1612 		/*
1613 		 * clear __GFP_RECLAIM to make the migration callback
1614 		 * consistent with regular THP allocations.
1615 		 */
1616 		gfp_mask &= ~__GFP_RECLAIM;
1617 		gfp_mask |= GFP_TRANSHUGE;
1618 		order = HPAGE_PMD_ORDER;
1619 	}
1620 	zidx = zone_idx(page_zone(page));
1621 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1622 		gfp_mask |= __GFP_HIGHMEM;
1623 
1624 	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
1625 
1626 	if (new_page && PageTransHuge(new_page))
1627 		prep_transhuge_page(new_page);
1628 
1629 	return new_page;
1630 }
1631 
1632 #ifdef CONFIG_NUMA
1633 
1634 static int store_status(int __user *status, int start, int value, int nr)
1635 {
1636 	while (nr-- > 0) {
1637 		if (put_user(value, status + start))
1638 			return -EFAULT;
1639 		start++;
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static int do_move_pages_to_node(struct mm_struct *mm,
1646 		struct list_head *pagelist, int node)
1647 {
1648 	int err;
1649 	struct migration_target_control mtc = {
1650 		.nid = node,
1651 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1652 	};
1653 
1654 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1655 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1656 	if (err)
1657 		putback_movable_pages(pagelist);
1658 	return err;
1659 }
1660 
1661 /*
1662  * Resolves the given address to a struct page, isolates it from the LRU and
1663  * puts it to the given pagelist.
1664  * Returns:
1665  *     errno - if the page cannot be found/isolated
1666  *     0 - when it doesn't have to be migrated because it is already on the
1667  *         target node
1668  *     1 - when it has been queued
1669  */
1670 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1671 		int node, struct list_head *pagelist, bool migrate_all)
1672 {
1673 	struct vm_area_struct *vma;
1674 	struct page *page;
1675 	unsigned int follflags;
1676 	int err;
1677 
1678 	mmap_read_lock(mm);
1679 	err = -EFAULT;
1680 	vma = find_vma(mm, addr);
1681 	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1682 		goto out;
1683 
1684 	/* FOLL_DUMP to ignore special (like zero) pages */
1685 	follflags = FOLL_GET | FOLL_DUMP;
1686 	page = follow_page(vma, addr, follflags);
1687 
1688 	err = PTR_ERR(page);
1689 	if (IS_ERR(page))
1690 		goto out;
1691 
1692 	err = -ENOENT;
1693 	if (!page)
1694 		goto out;
1695 
1696 	err = 0;
1697 	if (page_to_nid(page) == node)
1698 		goto out_putpage;
1699 
1700 	err = -EACCES;
1701 	if (page_mapcount(page) > 1 && !migrate_all)
1702 		goto out_putpage;
1703 
1704 	if (PageHuge(page)) {
1705 		if (PageHead(page)) {
1706 			isolate_huge_page(page, pagelist);
1707 			err = 1;
1708 		}
1709 	} else {
1710 		struct page *head;
1711 
1712 		head = compound_head(page);
1713 		err = isolate_lru_page(head);
1714 		if (err)
1715 			goto out_putpage;
1716 
1717 		err = 1;
1718 		list_add_tail(&head->lru, pagelist);
1719 		mod_node_page_state(page_pgdat(head),
1720 			NR_ISOLATED_ANON + page_is_file_lru(head),
1721 			thp_nr_pages(head));
1722 	}
1723 out_putpage:
1724 	/*
1725 	 * Either remove the duplicate refcount from
1726 	 * isolate_lru_page() or drop the page ref if it was
1727 	 * not isolated.
1728 	 */
1729 	put_page(page);
1730 out:
1731 	mmap_read_unlock(mm);
1732 	return err;
1733 }
1734 
1735 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1736 		struct list_head *pagelist, int __user *status,
1737 		int start, int i, unsigned long nr_pages)
1738 {
1739 	int err;
1740 
1741 	if (list_empty(pagelist))
1742 		return 0;
1743 
1744 	err = do_move_pages_to_node(mm, pagelist, node);
1745 	if (err) {
1746 		/*
1747 		 * Positive err means the number of failed
1748 		 * pages to migrate.  Since we are going to
1749 		 * abort and return the number of non-migrated
1750 		 * pages, so need to include the rest of the
1751 		 * nr_pages that have not been attempted as
1752 		 * well.
1753 		 */
1754 		if (err > 0)
1755 			err += nr_pages - i - 1;
1756 		return err;
1757 	}
1758 	return store_status(status, start, node, i - start);
1759 }
1760 
1761 /*
1762  * Migrate an array of page address onto an array of nodes and fill
1763  * the corresponding array of status.
1764  */
1765 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1766 			 unsigned long nr_pages,
1767 			 const void __user * __user *pages,
1768 			 const int __user *nodes,
1769 			 int __user *status, int flags)
1770 {
1771 	int current_node = NUMA_NO_NODE;
1772 	LIST_HEAD(pagelist);
1773 	int start, i;
1774 	int err = 0, err1;
1775 
1776 	lru_cache_disable();
1777 
1778 	for (i = start = 0; i < nr_pages; i++) {
1779 		const void __user *p;
1780 		unsigned long addr;
1781 		int node;
1782 
1783 		err = -EFAULT;
1784 		if (get_user(p, pages + i))
1785 			goto out_flush;
1786 		if (get_user(node, nodes + i))
1787 			goto out_flush;
1788 		addr = (unsigned long)untagged_addr(p);
1789 
1790 		err = -ENODEV;
1791 		if (node < 0 || node >= MAX_NUMNODES)
1792 			goto out_flush;
1793 		if (!node_state(node, N_MEMORY))
1794 			goto out_flush;
1795 
1796 		err = -EACCES;
1797 		if (!node_isset(node, task_nodes))
1798 			goto out_flush;
1799 
1800 		if (current_node == NUMA_NO_NODE) {
1801 			current_node = node;
1802 			start = i;
1803 		} else if (node != current_node) {
1804 			err = move_pages_and_store_status(mm, current_node,
1805 					&pagelist, status, start, i, nr_pages);
1806 			if (err)
1807 				goto out;
1808 			start = i;
1809 			current_node = node;
1810 		}
1811 
1812 		/*
1813 		 * Errors in the page lookup or isolation are not fatal and we simply
1814 		 * report them via status
1815 		 */
1816 		err = add_page_for_migration(mm, addr, current_node,
1817 				&pagelist, flags & MPOL_MF_MOVE_ALL);
1818 
1819 		if (err > 0) {
1820 			/* The page is successfully queued for migration */
1821 			continue;
1822 		}
1823 
1824 		/*
1825 		 * If the page is already on the target node (!err), store the
1826 		 * node, otherwise, store the err.
1827 		 */
1828 		err = store_status(status, i, err ? : current_node, 1);
1829 		if (err)
1830 			goto out_flush;
1831 
1832 		err = move_pages_and_store_status(mm, current_node, &pagelist,
1833 				status, start, i, nr_pages);
1834 		if (err)
1835 			goto out;
1836 		current_node = NUMA_NO_NODE;
1837 	}
1838 out_flush:
1839 	/* Make sure we do not overwrite the existing error */
1840 	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1841 				status, start, i, nr_pages);
1842 	if (err >= 0)
1843 		err = err1;
1844 out:
1845 	lru_cache_enable();
1846 	return err;
1847 }
1848 
1849 /*
1850  * Determine the nodes of an array of pages and store it in an array of status.
1851  */
1852 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1853 				const void __user **pages, int *status)
1854 {
1855 	unsigned long i;
1856 
1857 	mmap_read_lock(mm);
1858 
1859 	for (i = 0; i < nr_pages; i++) {
1860 		unsigned long addr = (unsigned long)(*pages);
1861 		struct vm_area_struct *vma;
1862 		struct page *page;
1863 		int err = -EFAULT;
1864 
1865 		vma = vma_lookup(mm, addr);
1866 		if (!vma)
1867 			goto set_status;
1868 
1869 		/* FOLL_DUMP to ignore special (like zero) pages */
1870 		page = follow_page(vma, addr, FOLL_DUMP);
1871 
1872 		err = PTR_ERR(page);
1873 		if (IS_ERR(page))
1874 			goto set_status;
1875 
1876 		err = page ? page_to_nid(page) : -ENOENT;
1877 set_status:
1878 		*status = err;
1879 
1880 		pages++;
1881 		status++;
1882 	}
1883 
1884 	mmap_read_unlock(mm);
1885 }
1886 
1887 static int get_compat_pages_array(const void __user *chunk_pages[],
1888 				  const void __user * __user *pages,
1889 				  unsigned long chunk_nr)
1890 {
1891 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1892 	compat_uptr_t p;
1893 	int i;
1894 
1895 	for (i = 0; i < chunk_nr; i++) {
1896 		if (get_user(p, pages32 + i))
1897 			return -EFAULT;
1898 		chunk_pages[i] = compat_ptr(p);
1899 	}
1900 
1901 	return 0;
1902 }
1903 
1904 /*
1905  * Determine the nodes of a user array of pages and store it in
1906  * a user array of status.
1907  */
1908 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1909 			 const void __user * __user *pages,
1910 			 int __user *status)
1911 {
1912 #define DO_PAGES_STAT_CHUNK_NR 16
1913 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1914 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1915 
1916 	while (nr_pages) {
1917 		unsigned long chunk_nr;
1918 
1919 		chunk_nr = nr_pages;
1920 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1921 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1922 
1923 		if (in_compat_syscall()) {
1924 			if (get_compat_pages_array(chunk_pages, pages,
1925 						   chunk_nr))
1926 				break;
1927 		} else {
1928 			if (copy_from_user(chunk_pages, pages,
1929 				      chunk_nr * sizeof(*chunk_pages)))
1930 				break;
1931 		}
1932 
1933 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1934 
1935 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1936 			break;
1937 
1938 		pages += chunk_nr;
1939 		status += chunk_nr;
1940 		nr_pages -= chunk_nr;
1941 	}
1942 	return nr_pages ? -EFAULT : 0;
1943 }
1944 
1945 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1946 {
1947 	struct task_struct *task;
1948 	struct mm_struct *mm;
1949 
1950 	/*
1951 	 * There is no need to check if current process has the right to modify
1952 	 * the specified process when they are same.
1953 	 */
1954 	if (!pid) {
1955 		mmget(current->mm);
1956 		*mem_nodes = cpuset_mems_allowed(current);
1957 		return current->mm;
1958 	}
1959 
1960 	/* Find the mm_struct */
1961 	rcu_read_lock();
1962 	task = find_task_by_vpid(pid);
1963 	if (!task) {
1964 		rcu_read_unlock();
1965 		return ERR_PTR(-ESRCH);
1966 	}
1967 	get_task_struct(task);
1968 
1969 	/*
1970 	 * Check if this process has the right to modify the specified
1971 	 * process. Use the regular "ptrace_may_access()" checks.
1972 	 */
1973 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1974 		rcu_read_unlock();
1975 		mm = ERR_PTR(-EPERM);
1976 		goto out;
1977 	}
1978 	rcu_read_unlock();
1979 
1980 	mm = ERR_PTR(security_task_movememory(task));
1981 	if (IS_ERR(mm))
1982 		goto out;
1983 	*mem_nodes = cpuset_mems_allowed(task);
1984 	mm = get_task_mm(task);
1985 out:
1986 	put_task_struct(task);
1987 	if (!mm)
1988 		mm = ERR_PTR(-EINVAL);
1989 	return mm;
1990 }
1991 
1992 /*
1993  * Move a list of pages in the address space of the currently executing
1994  * process.
1995  */
1996 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1997 			     const void __user * __user *pages,
1998 			     const int __user *nodes,
1999 			     int __user *status, int flags)
2000 {
2001 	struct mm_struct *mm;
2002 	int err;
2003 	nodemask_t task_nodes;
2004 
2005 	/* Check flags */
2006 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2007 		return -EINVAL;
2008 
2009 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2010 		return -EPERM;
2011 
2012 	mm = find_mm_struct(pid, &task_nodes);
2013 	if (IS_ERR(mm))
2014 		return PTR_ERR(mm);
2015 
2016 	if (nodes)
2017 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2018 				    nodes, status, flags);
2019 	else
2020 		err = do_pages_stat(mm, nr_pages, pages, status);
2021 
2022 	mmput(mm);
2023 	return err;
2024 }
2025 
2026 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2027 		const void __user * __user *, pages,
2028 		const int __user *, nodes,
2029 		int __user *, status, int, flags)
2030 {
2031 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2032 }
2033 
2034 #ifdef CONFIG_NUMA_BALANCING
2035 /*
2036  * Returns true if this is a safe migration target node for misplaced NUMA
2037  * pages. Currently it only checks the watermarks which crude
2038  */
2039 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2040 				   unsigned long nr_migrate_pages)
2041 {
2042 	int z;
2043 
2044 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2045 		struct zone *zone = pgdat->node_zones + z;
2046 
2047 		if (!populated_zone(zone))
2048 			continue;
2049 
2050 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2051 		if (!zone_watermark_ok(zone, 0,
2052 				       high_wmark_pages(zone) +
2053 				       nr_migrate_pages,
2054 				       ZONE_MOVABLE, 0))
2055 			continue;
2056 		return true;
2057 	}
2058 	return false;
2059 }
2060 
2061 static struct page *alloc_misplaced_dst_page(struct page *page,
2062 					   unsigned long data)
2063 {
2064 	int nid = (int) data;
2065 	struct page *newpage;
2066 
2067 	newpage = __alloc_pages_node(nid,
2068 					 (GFP_HIGHUSER_MOVABLE |
2069 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
2070 					  __GFP_NORETRY | __GFP_NOWARN) &
2071 					 ~__GFP_RECLAIM, 0);
2072 
2073 	return newpage;
2074 }
2075 
2076 static struct page *alloc_misplaced_dst_page_thp(struct page *page,
2077 						 unsigned long data)
2078 {
2079 	int nid = (int) data;
2080 	struct page *newpage;
2081 
2082 	newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2083 				   HPAGE_PMD_ORDER);
2084 	if (!newpage)
2085 		goto out;
2086 
2087 	prep_transhuge_page(newpage);
2088 
2089 out:
2090 	return newpage;
2091 }
2092 
2093 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2094 {
2095 	int page_lru;
2096 	int nr_pages = thp_nr_pages(page);
2097 
2098 	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2099 
2100 	/* Do not migrate THP mapped by multiple processes */
2101 	if (PageTransHuge(page) && total_mapcount(page) > 1)
2102 		return 0;
2103 
2104 	/* Avoid migrating to a node that is nearly full */
2105 	if (!migrate_balanced_pgdat(pgdat, nr_pages))
2106 		return 0;
2107 
2108 	if (isolate_lru_page(page))
2109 		return 0;
2110 
2111 	page_lru = page_is_file_lru(page);
2112 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2113 			    nr_pages);
2114 
2115 	/*
2116 	 * Isolating the page has taken another reference, so the
2117 	 * caller's reference can be safely dropped without the page
2118 	 * disappearing underneath us during migration.
2119 	 */
2120 	put_page(page);
2121 	return 1;
2122 }
2123 
2124 /*
2125  * Attempt to migrate a misplaced page to the specified destination
2126  * node. Caller is expected to have an elevated reference count on
2127  * the page that will be dropped by this function before returning.
2128  */
2129 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2130 			   int node)
2131 {
2132 	pg_data_t *pgdat = NODE_DATA(node);
2133 	int isolated;
2134 	int nr_remaining;
2135 	LIST_HEAD(migratepages);
2136 	new_page_t *new;
2137 	bool compound;
2138 	int nr_pages = thp_nr_pages(page);
2139 
2140 	/*
2141 	 * PTE mapped THP or HugeTLB page can't reach here so the page could
2142 	 * be either base page or THP.  And it must be head page if it is
2143 	 * THP.
2144 	 */
2145 	compound = PageTransHuge(page);
2146 
2147 	if (compound)
2148 		new = alloc_misplaced_dst_page_thp;
2149 	else
2150 		new = alloc_misplaced_dst_page;
2151 
2152 	/*
2153 	 * Don't migrate file pages that are mapped in multiple processes
2154 	 * with execute permissions as they are probably shared libraries.
2155 	 */
2156 	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2157 	    (vma->vm_flags & VM_EXEC))
2158 		goto out;
2159 
2160 	/*
2161 	 * Also do not migrate dirty pages as not all filesystems can move
2162 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2163 	 */
2164 	if (page_is_file_lru(page) && PageDirty(page))
2165 		goto out;
2166 
2167 	isolated = numamigrate_isolate_page(pgdat, page);
2168 	if (!isolated)
2169 		goto out;
2170 
2171 	list_add(&page->lru, &migratepages);
2172 	nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
2173 				     MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL);
2174 	if (nr_remaining) {
2175 		if (!list_empty(&migratepages)) {
2176 			list_del(&page->lru);
2177 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2178 					page_is_file_lru(page), -nr_pages);
2179 			putback_lru_page(page);
2180 		}
2181 		isolated = 0;
2182 	} else
2183 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages);
2184 	BUG_ON(!list_empty(&migratepages));
2185 	return isolated;
2186 
2187 out:
2188 	put_page(page);
2189 	return 0;
2190 }
2191 #endif /* CONFIG_NUMA_BALANCING */
2192 #endif /* CONFIG_NUMA */
2193 
2194 #ifdef CONFIG_DEVICE_PRIVATE
2195 static int migrate_vma_collect_skip(unsigned long start,
2196 				    unsigned long end,
2197 				    struct mm_walk *walk)
2198 {
2199 	struct migrate_vma *migrate = walk->private;
2200 	unsigned long addr;
2201 
2202 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2203 		migrate->dst[migrate->npages] = 0;
2204 		migrate->src[migrate->npages++] = 0;
2205 	}
2206 
2207 	return 0;
2208 }
2209 
2210 static int migrate_vma_collect_hole(unsigned long start,
2211 				    unsigned long end,
2212 				    __always_unused int depth,
2213 				    struct mm_walk *walk)
2214 {
2215 	struct migrate_vma *migrate = walk->private;
2216 	unsigned long addr;
2217 
2218 	/* Only allow populating anonymous memory. */
2219 	if (!vma_is_anonymous(walk->vma))
2220 		return migrate_vma_collect_skip(start, end, walk);
2221 
2222 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2223 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2224 		migrate->dst[migrate->npages] = 0;
2225 		migrate->npages++;
2226 		migrate->cpages++;
2227 	}
2228 
2229 	return 0;
2230 }
2231 
2232 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2233 				   unsigned long start,
2234 				   unsigned long end,
2235 				   struct mm_walk *walk)
2236 {
2237 	struct migrate_vma *migrate = walk->private;
2238 	struct vm_area_struct *vma = walk->vma;
2239 	struct mm_struct *mm = vma->vm_mm;
2240 	unsigned long addr = start, unmapped = 0;
2241 	spinlock_t *ptl;
2242 	pte_t *ptep;
2243 
2244 again:
2245 	if (pmd_none(*pmdp))
2246 		return migrate_vma_collect_hole(start, end, -1, walk);
2247 
2248 	if (pmd_trans_huge(*pmdp)) {
2249 		struct page *page;
2250 
2251 		ptl = pmd_lock(mm, pmdp);
2252 		if (unlikely(!pmd_trans_huge(*pmdp))) {
2253 			spin_unlock(ptl);
2254 			goto again;
2255 		}
2256 
2257 		page = pmd_page(*pmdp);
2258 		if (is_huge_zero_page(page)) {
2259 			spin_unlock(ptl);
2260 			split_huge_pmd(vma, pmdp, addr);
2261 			if (pmd_trans_unstable(pmdp))
2262 				return migrate_vma_collect_skip(start, end,
2263 								walk);
2264 		} else {
2265 			int ret;
2266 
2267 			get_page(page);
2268 			spin_unlock(ptl);
2269 			if (unlikely(!trylock_page(page)))
2270 				return migrate_vma_collect_skip(start, end,
2271 								walk);
2272 			ret = split_huge_page(page);
2273 			unlock_page(page);
2274 			put_page(page);
2275 			if (ret)
2276 				return migrate_vma_collect_skip(start, end,
2277 								walk);
2278 			if (pmd_none(*pmdp))
2279 				return migrate_vma_collect_hole(start, end, -1,
2280 								walk);
2281 		}
2282 	}
2283 
2284 	if (unlikely(pmd_bad(*pmdp)))
2285 		return migrate_vma_collect_skip(start, end, walk);
2286 
2287 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2288 	arch_enter_lazy_mmu_mode();
2289 
2290 	for (; addr < end; addr += PAGE_SIZE, ptep++) {
2291 		unsigned long mpfn = 0, pfn;
2292 		struct page *page;
2293 		swp_entry_t entry;
2294 		pte_t pte;
2295 
2296 		pte = *ptep;
2297 
2298 		if (pte_none(pte)) {
2299 			if (vma_is_anonymous(vma)) {
2300 				mpfn = MIGRATE_PFN_MIGRATE;
2301 				migrate->cpages++;
2302 			}
2303 			goto next;
2304 		}
2305 
2306 		if (!pte_present(pte)) {
2307 			/*
2308 			 * Only care about unaddressable device page special
2309 			 * page table entry. Other special swap entries are not
2310 			 * migratable, and we ignore regular swapped page.
2311 			 */
2312 			entry = pte_to_swp_entry(pte);
2313 			if (!is_device_private_entry(entry))
2314 				goto next;
2315 
2316 			page = pfn_swap_entry_to_page(entry);
2317 			if (!(migrate->flags &
2318 				MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2319 			    page->pgmap->owner != migrate->pgmap_owner)
2320 				goto next;
2321 
2322 			mpfn = migrate_pfn(page_to_pfn(page)) |
2323 					MIGRATE_PFN_MIGRATE;
2324 			if (is_writable_device_private_entry(entry))
2325 				mpfn |= MIGRATE_PFN_WRITE;
2326 		} else {
2327 			if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2328 				goto next;
2329 			pfn = pte_pfn(pte);
2330 			if (is_zero_pfn(pfn)) {
2331 				mpfn = MIGRATE_PFN_MIGRATE;
2332 				migrate->cpages++;
2333 				goto next;
2334 			}
2335 			page = vm_normal_page(migrate->vma, addr, pte);
2336 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2337 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2338 		}
2339 
2340 		/* FIXME support THP */
2341 		if (!page || !page->mapping || PageTransCompound(page)) {
2342 			mpfn = 0;
2343 			goto next;
2344 		}
2345 
2346 		/*
2347 		 * By getting a reference on the page we pin it and that blocks
2348 		 * any kind of migration. Side effect is that it "freezes" the
2349 		 * pte.
2350 		 *
2351 		 * We drop this reference after isolating the page from the lru
2352 		 * for non device page (device page are not on the lru and thus
2353 		 * can't be dropped from it).
2354 		 */
2355 		get_page(page);
2356 
2357 		/*
2358 		 * Optimize for the common case where page is only mapped once
2359 		 * in one process. If we can lock the page, then we can safely
2360 		 * set up a special migration page table entry now.
2361 		 */
2362 		if (trylock_page(page)) {
2363 			pte_t swp_pte;
2364 
2365 			migrate->cpages++;
2366 			ptep_get_and_clear(mm, addr, ptep);
2367 
2368 			/* Setup special migration page table entry */
2369 			if (mpfn & MIGRATE_PFN_WRITE)
2370 				entry = make_writable_migration_entry(
2371 							page_to_pfn(page));
2372 			else
2373 				entry = make_readable_migration_entry(
2374 							page_to_pfn(page));
2375 			swp_pte = swp_entry_to_pte(entry);
2376 			if (pte_present(pte)) {
2377 				if (pte_soft_dirty(pte))
2378 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2379 				if (pte_uffd_wp(pte))
2380 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2381 			} else {
2382 				if (pte_swp_soft_dirty(pte))
2383 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2384 				if (pte_swp_uffd_wp(pte))
2385 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2386 			}
2387 			set_pte_at(mm, addr, ptep, swp_pte);
2388 
2389 			/*
2390 			 * This is like regular unmap: we remove the rmap and
2391 			 * drop page refcount. Page won't be freed, as we took
2392 			 * a reference just above.
2393 			 */
2394 			page_remove_rmap(page, false);
2395 			put_page(page);
2396 
2397 			if (pte_present(pte))
2398 				unmapped++;
2399 		} else {
2400 			put_page(page);
2401 			mpfn = 0;
2402 		}
2403 
2404 next:
2405 		migrate->dst[migrate->npages] = 0;
2406 		migrate->src[migrate->npages++] = mpfn;
2407 	}
2408 	arch_leave_lazy_mmu_mode();
2409 	pte_unmap_unlock(ptep - 1, ptl);
2410 
2411 	/* Only flush the TLB if we actually modified any entries */
2412 	if (unmapped)
2413 		flush_tlb_range(walk->vma, start, end);
2414 
2415 	return 0;
2416 }
2417 
2418 static const struct mm_walk_ops migrate_vma_walk_ops = {
2419 	.pmd_entry		= migrate_vma_collect_pmd,
2420 	.pte_hole		= migrate_vma_collect_hole,
2421 };
2422 
2423 /*
2424  * migrate_vma_collect() - collect pages over a range of virtual addresses
2425  * @migrate: migrate struct containing all migration information
2426  *
2427  * This will walk the CPU page table. For each virtual address backed by a
2428  * valid page, it updates the src array and takes a reference on the page, in
2429  * order to pin the page until we lock it and unmap it.
2430  */
2431 static void migrate_vma_collect(struct migrate_vma *migrate)
2432 {
2433 	struct mmu_notifier_range range;
2434 
2435 	/*
2436 	 * Note that the pgmap_owner is passed to the mmu notifier callback so
2437 	 * that the registered device driver can skip invalidating device
2438 	 * private page mappings that won't be migrated.
2439 	 */
2440 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
2441 		migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
2442 		migrate->pgmap_owner);
2443 	mmu_notifier_invalidate_range_start(&range);
2444 
2445 	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2446 			&migrate_vma_walk_ops, migrate);
2447 
2448 	mmu_notifier_invalidate_range_end(&range);
2449 	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2450 }
2451 
2452 /*
2453  * migrate_vma_check_page() - check if page is pinned or not
2454  * @page: struct page to check
2455  *
2456  * Pinned pages cannot be migrated. This is the same test as in
2457  * folio_migrate_mapping(), except that here we allow migration of a
2458  * ZONE_DEVICE page.
2459  */
2460 static bool migrate_vma_check_page(struct page *page)
2461 {
2462 	/*
2463 	 * One extra ref because caller holds an extra reference, either from
2464 	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2465 	 * a device page.
2466 	 */
2467 	int extra = 1;
2468 
2469 	/*
2470 	 * FIXME support THP (transparent huge page), it is bit more complex to
2471 	 * check them than regular pages, because they can be mapped with a pmd
2472 	 * or with a pte (split pte mapping).
2473 	 */
2474 	if (PageCompound(page))
2475 		return false;
2476 
2477 	/* Page from ZONE_DEVICE have one extra reference */
2478 	if (is_zone_device_page(page)) {
2479 		/*
2480 		 * Private page can never be pin as they have no valid pte and
2481 		 * GUP will fail for those. Yet if there is a pending migration
2482 		 * a thread might try to wait on the pte migration entry and
2483 		 * will bump the page reference count. Sadly there is no way to
2484 		 * differentiate a regular pin from migration wait. Hence to
2485 		 * avoid 2 racing thread trying to migrate back to CPU to enter
2486 		 * infinite loop (one stopping migration because the other is
2487 		 * waiting on pte migration entry). We always return true here.
2488 		 *
2489 		 * FIXME proper solution is to rework migration_entry_wait() so
2490 		 * it does not need to take a reference on page.
2491 		 */
2492 		return is_device_private_page(page);
2493 	}
2494 
2495 	/* For file back page */
2496 	if (page_mapping(page))
2497 		extra += 1 + page_has_private(page);
2498 
2499 	if ((page_count(page) - extra) > page_mapcount(page))
2500 		return false;
2501 
2502 	return true;
2503 }
2504 
2505 /*
2506  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2507  * @migrate: migrate struct containing all migration information
2508  *
2509  * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
2510  * special migration pte entry and check if it has been pinned. Pinned pages are
2511  * restored because we cannot migrate them.
2512  *
2513  * This is the last step before we call the device driver callback to allocate
2514  * destination memory and copy contents of original page over to new page.
2515  */
2516 static void migrate_vma_unmap(struct migrate_vma *migrate)
2517 {
2518 	const unsigned long npages = migrate->npages;
2519 	const unsigned long start = migrate->start;
2520 	unsigned long addr, i, restore = 0;
2521 	bool allow_drain = true;
2522 
2523 	lru_add_drain();
2524 
2525 	for (i = 0; i < npages; i++) {
2526 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2527 
2528 		if (!page)
2529 			continue;
2530 
2531 		/* ZONE_DEVICE pages are not on LRU */
2532 		if (!is_zone_device_page(page)) {
2533 			if (!PageLRU(page) && allow_drain) {
2534 				/* Drain CPU's pagevec */
2535 				lru_add_drain_all();
2536 				allow_drain = false;
2537 			}
2538 
2539 			if (isolate_lru_page(page)) {
2540 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2541 				migrate->cpages--;
2542 				restore++;
2543 				continue;
2544 			}
2545 
2546 			/* Drop the reference we took in collect */
2547 			put_page(page);
2548 		}
2549 
2550 		if (page_mapped(page))
2551 			try_to_migrate(page, 0);
2552 
2553 		if (page_mapped(page) || !migrate_vma_check_page(page)) {
2554 			if (!is_zone_device_page(page)) {
2555 				get_page(page);
2556 				putback_lru_page(page);
2557 			}
2558 
2559 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2560 			migrate->cpages--;
2561 			restore++;
2562 			continue;
2563 		}
2564 	}
2565 
2566 	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2567 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2568 
2569 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2570 			continue;
2571 
2572 		remove_migration_ptes(page, page, false);
2573 
2574 		migrate->src[i] = 0;
2575 		unlock_page(page);
2576 		put_page(page);
2577 		restore--;
2578 	}
2579 }
2580 
2581 /**
2582  * migrate_vma_setup() - prepare to migrate a range of memory
2583  * @args: contains the vma, start, and pfns arrays for the migration
2584  *
2585  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2586  * without an error.
2587  *
2588  * Prepare to migrate a range of memory virtual address range by collecting all
2589  * the pages backing each virtual address in the range, saving them inside the
2590  * src array.  Then lock those pages and unmap them. Once the pages are locked
2591  * and unmapped, check whether each page is pinned or not.  Pages that aren't
2592  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2593  * corresponding src array entry.  Then restores any pages that are pinned, by
2594  * remapping and unlocking those pages.
2595  *
2596  * The caller should then allocate destination memory and copy source memory to
2597  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2598  * flag set).  Once these are allocated and copied, the caller must update each
2599  * corresponding entry in the dst array with the pfn value of the destination
2600  * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
2601  * lock_page().
2602  *
2603  * Note that the caller does not have to migrate all the pages that are marked
2604  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2605  * device memory to system memory.  If the caller cannot migrate a device page
2606  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2607  * consequences for the userspace process, so it must be avoided if at all
2608  * possible.
2609  *
2610  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2611  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2612  * allowing the caller to allocate device memory for those unbacked virtual
2613  * addresses.  For this the caller simply has to allocate device memory and
2614  * properly set the destination entry like for regular migration.  Note that
2615  * this can still fail, and thus inside the device driver you must check if the
2616  * migration was successful for those entries after calling migrate_vma_pages(),
2617  * just like for regular migration.
2618  *
2619  * After that, the callers must call migrate_vma_pages() to go over each entry
2620  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2621  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2622  * then migrate_vma_pages() to migrate struct page information from the source
2623  * struct page to the destination struct page.  If it fails to migrate the
2624  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2625  * src array.
2626  *
2627  * At this point all successfully migrated pages have an entry in the src
2628  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2629  * array entry with MIGRATE_PFN_VALID flag set.
2630  *
2631  * Once migrate_vma_pages() returns the caller may inspect which pages were
2632  * successfully migrated, and which were not.  Successfully migrated pages will
2633  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2634  *
2635  * It is safe to update device page table after migrate_vma_pages() because
2636  * both destination and source page are still locked, and the mmap_lock is held
2637  * in read mode (hence no one can unmap the range being migrated).
2638  *
2639  * Once the caller is done cleaning up things and updating its page table (if it
2640  * chose to do so, this is not an obligation) it finally calls
2641  * migrate_vma_finalize() to update the CPU page table to point to new pages
2642  * for successfully migrated pages or otherwise restore the CPU page table to
2643  * point to the original source pages.
2644  */
2645 int migrate_vma_setup(struct migrate_vma *args)
2646 {
2647 	long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2648 
2649 	args->start &= PAGE_MASK;
2650 	args->end &= PAGE_MASK;
2651 	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2652 	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2653 		return -EINVAL;
2654 	if (nr_pages <= 0)
2655 		return -EINVAL;
2656 	if (args->start < args->vma->vm_start ||
2657 	    args->start >= args->vma->vm_end)
2658 		return -EINVAL;
2659 	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2660 		return -EINVAL;
2661 	if (!args->src || !args->dst)
2662 		return -EINVAL;
2663 
2664 	memset(args->src, 0, sizeof(*args->src) * nr_pages);
2665 	args->cpages = 0;
2666 	args->npages = 0;
2667 
2668 	migrate_vma_collect(args);
2669 
2670 	if (args->cpages)
2671 		migrate_vma_unmap(args);
2672 
2673 	/*
2674 	 * At this point pages are locked and unmapped, and thus they have
2675 	 * stable content and can safely be copied to destination memory that
2676 	 * is allocated by the drivers.
2677 	 */
2678 	return 0;
2679 
2680 }
2681 EXPORT_SYMBOL(migrate_vma_setup);
2682 
2683 /*
2684  * This code closely matches the code in:
2685  *   __handle_mm_fault()
2686  *     handle_pte_fault()
2687  *       do_anonymous_page()
2688  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2689  * private page.
2690  */
2691 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2692 				    unsigned long addr,
2693 				    struct page *page,
2694 				    unsigned long *src)
2695 {
2696 	struct vm_area_struct *vma = migrate->vma;
2697 	struct mm_struct *mm = vma->vm_mm;
2698 	bool flush = false;
2699 	spinlock_t *ptl;
2700 	pte_t entry;
2701 	pgd_t *pgdp;
2702 	p4d_t *p4dp;
2703 	pud_t *pudp;
2704 	pmd_t *pmdp;
2705 	pte_t *ptep;
2706 
2707 	/* Only allow populating anonymous memory */
2708 	if (!vma_is_anonymous(vma))
2709 		goto abort;
2710 
2711 	pgdp = pgd_offset(mm, addr);
2712 	p4dp = p4d_alloc(mm, pgdp, addr);
2713 	if (!p4dp)
2714 		goto abort;
2715 	pudp = pud_alloc(mm, p4dp, addr);
2716 	if (!pudp)
2717 		goto abort;
2718 	pmdp = pmd_alloc(mm, pudp, addr);
2719 	if (!pmdp)
2720 		goto abort;
2721 
2722 	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2723 		goto abort;
2724 
2725 	/*
2726 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
2727 	 * pte_offset_map() on pmds where a huge pmd might be created
2728 	 * from a different thread.
2729 	 *
2730 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2731 	 * parallel threads are excluded by other means.
2732 	 *
2733 	 * Here we only have mmap_read_lock(mm).
2734 	 */
2735 	if (pte_alloc(mm, pmdp))
2736 		goto abort;
2737 
2738 	/* See the comment in pte_alloc_one_map() */
2739 	if (unlikely(pmd_trans_unstable(pmdp)))
2740 		goto abort;
2741 
2742 	if (unlikely(anon_vma_prepare(vma)))
2743 		goto abort;
2744 	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
2745 		goto abort;
2746 
2747 	/*
2748 	 * The memory barrier inside __SetPageUptodate makes sure that
2749 	 * preceding stores to the page contents become visible before
2750 	 * the set_pte_at() write.
2751 	 */
2752 	__SetPageUptodate(page);
2753 
2754 	if (is_zone_device_page(page)) {
2755 		if (is_device_private_page(page)) {
2756 			swp_entry_t swp_entry;
2757 
2758 			if (vma->vm_flags & VM_WRITE)
2759 				swp_entry = make_writable_device_private_entry(
2760 							page_to_pfn(page));
2761 			else
2762 				swp_entry = make_readable_device_private_entry(
2763 							page_to_pfn(page));
2764 			entry = swp_entry_to_pte(swp_entry);
2765 		} else {
2766 			/*
2767 			 * For now we only support migrating to un-addressable
2768 			 * device memory.
2769 			 */
2770 			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2771 			goto abort;
2772 		}
2773 	} else {
2774 		entry = mk_pte(page, vma->vm_page_prot);
2775 		if (vma->vm_flags & VM_WRITE)
2776 			entry = pte_mkwrite(pte_mkdirty(entry));
2777 	}
2778 
2779 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2780 
2781 	if (check_stable_address_space(mm))
2782 		goto unlock_abort;
2783 
2784 	if (pte_present(*ptep)) {
2785 		unsigned long pfn = pte_pfn(*ptep);
2786 
2787 		if (!is_zero_pfn(pfn))
2788 			goto unlock_abort;
2789 		flush = true;
2790 	} else if (!pte_none(*ptep))
2791 		goto unlock_abort;
2792 
2793 	/*
2794 	 * Check for userfaultfd but do not deliver the fault. Instead,
2795 	 * just back off.
2796 	 */
2797 	if (userfaultfd_missing(vma))
2798 		goto unlock_abort;
2799 
2800 	inc_mm_counter(mm, MM_ANONPAGES);
2801 	page_add_new_anon_rmap(page, vma, addr, false);
2802 	if (!is_zone_device_page(page))
2803 		lru_cache_add_inactive_or_unevictable(page, vma);
2804 	get_page(page);
2805 
2806 	if (flush) {
2807 		flush_cache_page(vma, addr, pte_pfn(*ptep));
2808 		ptep_clear_flush_notify(vma, addr, ptep);
2809 		set_pte_at_notify(mm, addr, ptep, entry);
2810 		update_mmu_cache(vma, addr, ptep);
2811 	} else {
2812 		/* No need to invalidate - it was non-present before */
2813 		set_pte_at(mm, addr, ptep, entry);
2814 		update_mmu_cache(vma, addr, ptep);
2815 	}
2816 
2817 	pte_unmap_unlock(ptep, ptl);
2818 	*src = MIGRATE_PFN_MIGRATE;
2819 	return;
2820 
2821 unlock_abort:
2822 	pte_unmap_unlock(ptep, ptl);
2823 abort:
2824 	*src &= ~MIGRATE_PFN_MIGRATE;
2825 }
2826 
2827 /**
2828  * migrate_vma_pages() - migrate meta-data from src page to dst page
2829  * @migrate: migrate struct containing all migration information
2830  *
2831  * This migrates struct page meta-data from source struct page to destination
2832  * struct page. This effectively finishes the migration from source page to the
2833  * destination page.
2834  */
2835 void migrate_vma_pages(struct migrate_vma *migrate)
2836 {
2837 	const unsigned long npages = migrate->npages;
2838 	const unsigned long start = migrate->start;
2839 	struct mmu_notifier_range range;
2840 	unsigned long addr, i;
2841 	bool notified = false;
2842 
2843 	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2844 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2845 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2846 		struct address_space *mapping;
2847 		int r;
2848 
2849 		if (!newpage) {
2850 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2851 			continue;
2852 		}
2853 
2854 		if (!page) {
2855 			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2856 				continue;
2857 			if (!notified) {
2858 				notified = true;
2859 
2860 				mmu_notifier_range_init_owner(&range,
2861 					MMU_NOTIFY_MIGRATE, 0, migrate->vma,
2862 					migrate->vma->vm_mm, addr, migrate->end,
2863 					migrate->pgmap_owner);
2864 				mmu_notifier_invalidate_range_start(&range);
2865 			}
2866 			migrate_vma_insert_page(migrate, addr, newpage,
2867 						&migrate->src[i]);
2868 			continue;
2869 		}
2870 
2871 		mapping = page_mapping(page);
2872 
2873 		if (is_zone_device_page(newpage)) {
2874 			if (is_device_private_page(newpage)) {
2875 				/*
2876 				 * For now only support private anonymous when
2877 				 * migrating to un-addressable device memory.
2878 				 */
2879 				if (mapping) {
2880 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2881 					continue;
2882 				}
2883 			} else {
2884 				/*
2885 				 * Other types of ZONE_DEVICE page are not
2886 				 * supported.
2887 				 */
2888 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2889 				continue;
2890 			}
2891 		}
2892 
2893 		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2894 		if (r != MIGRATEPAGE_SUCCESS)
2895 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2896 	}
2897 
2898 	/*
2899 	 * No need to double call mmu_notifier->invalidate_range() callback as
2900 	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2901 	 * did already call it.
2902 	 */
2903 	if (notified)
2904 		mmu_notifier_invalidate_range_only_end(&range);
2905 }
2906 EXPORT_SYMBOL(migrate_vma_pages);
2907 
2908 /**
2909  * migrate_vma_finalize() - restore CPU page table entry
2910  * @migrate: migrate struct containing all migration information
2911  *
2912  * This replaces the special migration pte entry with either a mapping to the
2913  * new page if migration was successful for that page, or to the original page
2914  * otherwise.
2915  *
2916  * This also unlocks the pages and puts them back on the lru, or drops the extra
2917  * refcount, for device pages.
2918  */
2919 void migrate_vma_finalize(struct migrate_vma *migrate)
2920 {
2921 	const unsigned long npages = migrate->npages;
2922 	unsigned long i;
2923 
2924 	for (i = 0; i < npages; i++) {
2925 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2926 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2927 
2928 		if (!page) {
2929 			if (newpage) {
2930 				unlock_page(newpage);
2931 				put_page(newpage);
2932 			}
2933 			continue;
2934 		}
2935 
2936 		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2937 			if (newpage) {
2938 				unlock_page(newpage);
2939 				put_page(newpage);
2940 			}
2941 			newpage = page;
2942 		}
2943 
2944 		remove_migration_ptes(page, newpage, false);
2945 		unlock_page(page);
2946 
2947 		if (is_zone_device_page(page))
2948 			put_page(page);
2949 		else
2950 			putback_lru_page(page);
2951 
2952 		if (newpage != page) {
2953 			unlock_page(newpage);
2954 			if (is_zone_device_page(newpage))
2955 				put_page(newpage);
2956 			else
2957 				putback_lru_page(newpage);
2958 		}
2959 	}
2960 }
2961 EXPORT_SYMBOL(migrate_vma_finalize);
2962 #endif /* CONFIG_DEVICE_PRIVATE */
2963 
2964 #if defined(CONFIG_HOTPLUG_CPU)
2965 /* Disable reclaim-based migration. */
2966 static void __disable_all_migrate_targets(void)
2967 {
2968 	int node;
2969 
2970 	for_each_online_node(node)
2971 		node_demotion[node] = NUMA_NO_NODE;
2972 }
2973 
2974 static void disable_all_migrate_targets(void)
2975 {
2976 	__disable_all_migrate_targets();
2977 
2978 	/*
2979 	 * Ensure that the "disable" is visible across the system.
2980 	 * Readers will see either a combination of before+disable
2981 	 * state or disable+after.  They will never see before and
2982 	 * after state together.
2983 	 *
2984 	 * The before+after state together might have cycles and
2985 	 * could cause readers to do things like loop until this
2986 	 * function finishes.  This ensures they can only see a
2987 	 * single "bad" read and would, for instance, only loop
2988 	 * once.
2989 	 */
2990 	synchronize_rcu();
2991 }
2992 
2993 /*
2994  * Find an automatic demotion target for 'node'.
2995  * Failing here is OK.  It might just indicate
2996  * being at the end of a chain.
2997  */
2998 static int establish_migrate_target(int node, nodemask_t *used)
2999 {
3000 	int migration_target;
3001 
3002 	/*
3003 	 * Can not set a migration target on a
3004 	 * node with it already set.
3005 	 *
3006 	 * No need for READ_ONCE() here since this
3007 	 * in the write path for node_demotion[].
3008 	 * This should be the only thread writing.
3009 	 */
3010 	if (node_demotion[node] != NUMA_NO_NODE)
3011 		return NUMA_NO_NODE;
3012 
3013 	migration_target = find_next_best_node(node, used);
3014 	if (migration_target == NUMA_NO_NODE)
3015 		return NUMA_NO_NODE;
3016 
3017 	node_demotion[node] = migration_target;
3018 
3019 	return migration_target;
3020 }
3021 
3022 /*
3023  * When memory fills up on a node, memory contents can be
3024  * automatically migrated to another node instead of
3025  * discarded at reclaim.
3026  *
3027  * Establish a "migration path" which will start at nodes
3028  * with CPUs and will follow the priorities used to build the
3029  * page allocator zonelists.
3030  *
3031  * The difference here is that cycles must be avoided.  If
3032  * node0 migrates to node1, then neither node1, nor anything
3033  * node1 migrates to can migrate to node0.
3034  *
3035  * This function can run simultaneously with readers of
3036  * node_demotion[].  However, it can not run simultaneously
3037  * with itself.  Exclusion is provided by memory hotplug events
3038  * being single-threaded.
3039  */
3040 static void __set_migration_target_nodes(void)
3041 {
3042 	nodemask_t next_pass	= NODE_MASK_NONE;
3043 	nodemask_t this_pass	= NODE_MASK_NONE;
3044 	nodemask_t used_targets = NODE_MASK_NONE;
3045 	int node;
3046 
3047 	/*
3048 	 * Avoid any oddities like cycles that could occur
3049 	 * from changes in the topology.  This will leave
3050 	 * a momentary gap when migration is disabled.
3051 	 */
3052 	disable_all_migrate_targets();
3053 
3054 	/*
3055 	 * Allocations go close to CPUs, first.  Assume that
3056 	 * the migration path starts at the nodes with CPUs.
3057 	 */
3058 	next_pass = node_states[N_CPU];
3059 again:
3060 	this_pass = next_pass;
3061 	next_pass = NODE_MASK_NONE;
3062 	/*
3063 	 * To avoid cycles in the migration "graph", ensure
3064 	 * that migration sources are not future targets by
3065 	 * setting them in 'used_targets'.  Do this only
3066 	 * once per pass so that multiple source nodes can
3067 	 * share a target node.
3068 	 *
3069 	 * 'used_targets' will become unavailable in future
3070 	 * passes.  This limits some opportunities for
3071 	 * multiple source nodes to share a destination.
3072 	 */
3073 	nodes_or(used_targets, used_targets, this_pass);
3074 	for_each_node_mask(node, this_pass) {
3075 		int target_node = establish_migrate_target(node, &used_targets);
3076 
3077 		if (target_node == NUMA_NO_NODE)
3078 			continue;
3079 
3080 		/*
3081 		 * Visit targets from this pass in the next pass.
3082 		 * Eventually, every node will have been part of
3083 		 * a pass, and will become set in 'used_targets'.
3084 		 */
3085 		node_set(target_node, next_pass);
3086 	}
3087 	/*
3088 	 * 'next_pass' contains nodes which became migration
3089 	 * targets in this pass.  Make additional passes until
3090 	 * no more migrations targets are available.
3091 	 */
3092 	if (!nodes_empty(next_pass))
3093 		goto again;
3094 }
3095 
3096 /*
3097  * For callers that do not hold get_online_mems() already.
3098  */
3099 static void set_migration_target_nodes(void)
3100 {
3101 	get_online_mems();
3102 	__set_migration_target_nodes();
3103 	put_online_mems();
3104 }
3105 
3106 /*
3107  * This leaves migrate-on-reclaim transiently disabled between
3108  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
3109  * whether reclaim-based migration is enabled or not, which
3110  * ensures that the user can turn reclaim-based migration at
3111  * any time without needing to recalculate migration targets.
3112  *
3113  * These callbacks already hold get_online_mems().  That is why
3114  * __set_migration_target_nodes() can be used as opposed to
3115  * set_migration_target_nodes().
3116  */
3117 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
3118 						 unsigned long action, void *_arg)
3119 {
3120 	struct memory_notify *arg = _arg;
3121 
3122 	/*
3123 	 * Only update the node migration order when a node is
3124 	 * changing status, like online->offline.  This avoids
3125 	 * the overhead of synchronize_rcu() in most cases.
3126 	 */
3127 	if (arg->status_change_nid < 0)
3128 		return notifier_from_errno(0);
3129 
3130 	switch (action) {
3131 	case MEM_GOING_OFFLINE:
3132 		/*
3133 		 * Make sure there are not transient states where
3134 		 * an offline node is a migration target.  This
3135 		 * will leave migration disabled until the offline
3136 		 * completes and the MEM_OFFLINE case below runs.
3137 		 */
3138 		disable_all_migrate_targets();
3139 		break;
3140 	case MEM_OFFLINE:
3141 	case MEM_ONLINE:
3142 		/*
3143 		 * Recalculate the target nodes once the node
3144 		 * reaches its final state (online or offline).
3145 		 */
3146 		__set_migration_target_nodes();
3147 		break;
3148 	case MEM_CANCEL_OFFLINE:
3149 		/*
3150 		 * MEM_GOING_OFFLINE disabled all the migration
3151 		 * targets.  Reenable them.
3152 		 */
3153 		__set_migration_target_nodes();
3154 		break;
3155 	case MEM_GOING_ONLINE:
3156 	case MEM_CANCEL_ONLINE:
3157 		break;
3158 	}
3159 
3160 	return notifier_from_errno(0);
3161 }
3162 
3163 /*
3164  * React to hotplug events that might affect the migration targets
3165  * like events that online or offline NUMA nodes.
3166  *
3167  * The ordering is also currently dependent on which nodes have
3168  * CPUs.  That means we need CPU on/offline notification too.
3169  */
3170 static int migration_online_cpu(unsigned int cpu)
3171 {
3172 	set_migration_target_nodes();
3173 	return 0;
3174 }
3175 
3176 static int migration_offline_cpu(unsigned int cpu)
3177 {
3178 	set_migration_target_nodes();
3179 	return 0;
3180 }
3181 
3182 static int __init migrate_on_reclaim_init(void)
3183 {
3184 	int ret;
3185 
3186 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
3187 					NULL, migration_offline_cpu);
3188 	/*
3189 	 * In the unlikely case that this fails, the automatic
3190 	 * migration targets may become suboptimal for nodes
3191 	 * where N_CPU changes.  With such a small impact in a
3192 	 * rare case, do not bother trying to do anything special.
3193 	 */
3194 	WARN_ON(ret < 0);
3195 	ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
3196 				migration_online_cpu, NULL);
3197 	WARN_ON(ret < 0);
3198 
3199 	hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
3200 	return 0;
3201 }
3202 late_initcall(migrate_on_reclaim_init);
3203 #endif /* CONFIG_HOTPLUG_CPU */
3204 
3205 bool numa_demotion_enabled = false;
3206 
3207 #ifdef CONFIG_SYSFS
3208 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
3209 					  struct kobj_attribute *attr, char *buf)
3210 {
3211 	return sysfs_emit(buf, "%s\n",
3212 			  numa_demotion_enabled ? "true" : "false");
3213 }
3214 
3215 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
3216 					   struct kobj_attribute *attr,
3217 					   const char *buf, size_t count)
3218 {
3219 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3220 		numa_demotion_enabled = true;
3221 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3222 		numa_demotion_enabled = false;
3223 	else
3224 		return -EINVAL;
3225 
3226 	return count;
3227 }
3228 
3229 static struct kobj_attribute numa_demotion_enabled_attr =
3230 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3231 	       numa_demotion_enabled_store);
3232 
3233 static struct attribute *numa_attrs[] = {
3234 	&numa_demotion_enabled_attr.attr,
3235 	NULL,
3236 };
3237 
3238 static const struct attribute_group numa_attr_group = {
3239 	.attrs = numa_attrs,
3240 };
3241 
3242 static int __init numa_init_sysfs(void)
3243 {
3244 	int err;
3245 	struct kobject *numa_kobj;
3246 
3247 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
3248 	if (!numa_kobj) {
3249 		pr_err("failed to create numa kobject\n");
3250 		return -ENOMEM;
3251 	}
3252 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
3253 	if (err) {
3254 		pr_err("failed to register numa group\n");
3255 		goto delete_obj;
3256 	}
3257 	return 0;
3258 
3259 delete_obj:
3260 	kobject_put(numa_kobj);
3261 	return err;
3262 }
3263 subsys_initcall(numa_init_sysfs);
3264 #endif
3265