xref: /linux/mm/migrate.c (revision c9fdc4d5487a16bd1f003fc8b66e91f88efb50e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pagewalk.h>
42 #include <linux/pfn_t.h>
43 #include <linux/memremap.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/balloon_compaction.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/page_idle.h>
48 #include <linux/page_owner.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ptrace.h>
51 #include <linux/oom.h>
52 #include <linux/memory.h>
53 #include <linux/random.h>
54 
55 #include <asm/tlbflush.h>
56 
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/migrate.h>
59 
60 #include "internal.h"
61 
62 int isolate_movable_page(struct page *page, isolate_mode_t mode)
63 {
64 	struct address_space *mapping;
65 
66 	/*
67 	 * Avoid burning cycles with pages that are yet under __free_pages(),
68 	 * or just got freed under us.
69 	 *
70 	 * In case we 'win' a race for a movable page being freed under us and
71 	 * raise its refcount preventing __free_pages() from doing its job
72 	 * the put_page() at the end of this block will take care of
73 	 * release this page, thus avoiding a nasty leakage.
74 	 */
75 	if (unlikely(!get_page_unless_zero(page)))
76 		goto out;
77 
78 	/*
79 	 * Check PageMovable before holding a PG_lock because page's owner
80 	 * assumes anybody doesn't touch PG_lock of newly allocated page
81 	 * so unconditionally grabbing the lock ruins page's owner side.
82 	 */
83 	if (unlikely(!__PageMovable(page)))
84 		goto out_putpage;
85 	/*
86 	 * As movable pages are not isolated from LRU lists, concurrent
87 	 * compaction threads can race against page migration functions
88 	 * as well as race against the releasing a page.
89 	 *
90 	 * In order to avoid having an already isolated movable page
91 	 * being (wrongly) re-isolated while it is under migration,
92 	 * or to avoid attempting to isolate pages being released,
93 	 * lets be sure we have the page lock
94 	 * before proceeding with the movable page isolation steps.
95 	 */
96 	if (unlikely(!trylock_page(page)))
97 		goto out_putpage;
98 
99 	if (!PageMovable(page) || PageIsolated(page))
100 		goto out_no_isolated;
101 
102 	mapping = page_mapping(page);
103 	VM_BUG_ON_PAGE(!mapping, page);
104 
105 	if (!mapping->a_ops->isolate_page(page, mode))
106 		goto out_no_isolated;
107 
108 	/* Driver shouldn't use PG_isolated bit of page->flags */
109 	WARN_ON_ONCE(PageIsolated(page));
110 	__SetPageIsolated(page);
111 	unlock_page(page);
112 
113 	return 0;
114 
115 out_no_isolated:
116 	unlock_page(page);
117 out_putpage:
118 	put_page(page);
119 out:
120 	return -EBUSY;
121 }
122 
123 static void putback_movable_page(struct page *page)
124 {
125 	struct address_space *mapping;
126 
127 	mapping = page_mapping(page);
128 	mapping->a_ops->putback_page(page);
129 	__ClearPageIsolated(page);
130 }
131 
132 /*
133  * Put previously isolated pages back onto the appropriate lists
134  * from where they were once taken off for compaction/migration.
135  *
136  * This function shall be used whenever the isolated pageset has been
137  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
138  * and isolate_huge_page().
139  */
140 void putback_movable_pages(struct list_head *l)
141 {
142 	struct page *page;
143 	struct page *page2;
144 
145 	list_for_each_entry_safe(page, page2, l, lru) {
146 		if (unlikely(PageHuge(page))) {
147 			putback_active_hugepage(page);
148 			continue;
149 		}
150 		list_del(&page->lru);
151 		/*
152 		 * We isolated non-lru movable page so here we can use
153 		 * __PageMovable because LRU page's mapping cannot have
154 		 * PAGE_MAPPING_MOVABLE.
155 		 */
156 		if (unlikely(__PageMovable(page))) {
157 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
158 			lock_page(page);
159 			if (PageMovable(page))
160 				putback_movable_page(page);
161 			else
162 				__ClearPageIsolated(page);
163 			unlock_page(page);
164 			put_page(page);
165 		} else {
166 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
167 					page_is_file_lru(page), -thp_nr_pages(page));
168 			putback_lru_page(page);
169 		}
170 	}
171 }
172 
173 /*
174  * Restore a potential migration pte to a working pte entry
175  */
176 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
177 				 unsigned long addr, void *old)
178 {
179 	struct page_vma_mapped_walk pvmw = {
180 		.page = old,
181 		.vma = vma,
182 		.address = addr,
183 		.flags = PVMW_SYNC | PVMW_MIGRATION,
184 	};
185 	struct page *new;
186 	pte_t pte;
187 	swp_entry_t entry;
188 
189 	VM_BUG_ON_PAGE(PageTail(page), page);
190 	while (page_vma_mapped_walk(&pvmw)) {
191 		if (PageKsm(page))
192 			new = page;
193 		else
194 			new = page - pvmw.page->index +
195 				linear_page_index(vma, pvmw.address);
196 
197 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
198 		/* PMD-mapped THP migration entry */
199 		if (!pvmw.pte) {
200 			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
201 			remove_migration_pmd(&pvmw, new);
202 			continue;
203 		}
204 #endif
205 
206 		get_page(new);
207 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
208 		if (pte_swp_soft_dirty(*pvmw.pte))
209 			pte = pte_mksoft_dirty(pte);
210 
211 		/*
212 		 * Recheck VMA as permissions can change since migration started
213 		 */
214 		entry = pte_to_swp_entry(*pvmw.pte);
215 		if (is_writable_migration_entry(entry))
216 			pte = maybe_mkwrite(pte, vma);
217 		else if (pte_swp_uffd_wp(*pvmw.pte))
218 			pte = pte_mkuffd_wp(pte);
219 
220 		if (unlikely(is_device_private_page(new))) {
221 			if (pte_write(pte))
222 				entry = make_writable_device_private_entry(
223 							page_to_pfn(new));
224 			else
225 				entry = make_readable_device_private_entry(
226 							page_to_pfn(new));
227 			pte = swp_entry_to_pte(entry);
228 			if (pte_swp_soft_dirty(*pvmw.pte))
229 				pte = pte_swp_mksoft_dirty(pte);
230 			if (pte_swp_uffd_wp(*pvmw.pte))
231 				pte = pte_swp_mkuffd_wp(pte);
232 		}
233 
234 #ifdef CONFIG_HUGETLB_PAGE
235 		if (PageHuge(new)) {
236 			unsigned int shift = huge_page_shift(hstate_vma(vma));
237 
238 			pte = pte_mkhuge(pte);
239 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
240 			if (PageAnon(new))
241 				hugepage_add_anon_rmap(new, vma, pvmw.address);
242 			else
243 				page_dup_rmap(new, true);
244 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
245 		} else
246 #endif
247 		{
248 			if (PageAnon(new))
249 				page_add_anon_rmap(new, vma, pvmw.address, false);
250 			else
251 				page_add_file_rmap(new, false);
252 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
253 		}
254 		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
255 			mlock_vma_page(new);
256 
257 		if (PageTransHuge(page) && PageMlocked(page))
258 			clear_page_mlock(page);
259 
260 		/* No need to invalidate - it was non-present before */
261 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
262 	}
263 
264 	return true;
265 }
266 
267 /*
268  * Get rid of all migration entries and replace them by
269  * references to the indicated page.
270  */
271 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
272 {
273 	struct rmap_walk_control rwc = {
274 		.rmap_one = remove_migration_pte,
275 		.arg = old,
276 	};
277 
278 	if (locked)
279 		rmap_walk_locked(new, &rwc);
280 	else
281 		rmap_walk(new, &rwc);
282 }
283 
284 /*
285  * Something used the pte of a page under migration. We need to
286  * get to the page and wait until migration is finished.
287  * When we return from this function the fault will be retried.
288  */
289 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
290 				spinlock_t *ptl)
291 {
292 	pte_t pte;
293 	swp_entry_t entry;
294 	struct page *page;
295 
296 	spin_lock(ptl);
297 	pte = *ptep;
298 	if (!is_swap_pte(pte))
299 		goto out;
300 
301 	entry = pte_to_swp_entry(pte);
302 	if (!is_migration_entry(entry))
303 		goto out;
304 
305 	page = pfn_swap_entry_to_page(entry);
306 	page = compound_head(page);
307 
308 	/*
309 	 * Once page cache replacement of page migration started, page_count
310 	 * is zero; but we must not call put_and_wait_on_page_locked() without
311 	 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
312 	 */
313 	if (!get_page_unless_zero(page))
314 		goto out;
315 	pte_unmap_unlock(ptep, ptl);
316 	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
317 	return;
318 out:
319 	pte_unmap_unlock(ptep, ptl);
320 }
321 
322 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
323 				unsigned long address)
324 {
325 	spinlock_t *ptl = pte_lockptr(mm, pmd);
326 	pte_t *ptep = pte_offset_map(pmd, address);
327 	__migration_entry_wait(mm, ptep, ptl);
328 }
329 
330 void migration_entry_wait_huge(struct vm_area_struct *vma,
331 		struct mm_struct *mm, pte_t *pte)
332 {
333 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
334 	__migration_entry_wait(mm, pte, ptl);
335 }
336 
337 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
338 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
339 {
340 	spinlock_t *ptl;
341 	struct page *page;
342 
343 	ptl = pmd_lock(mm, pmd);
344 	if (!is_pmd_migration_entry(*pmd))
345 		goto unlock;
346 	page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
347 	if (!get_page_unless_zero(page))
348 		goto unlock;
349 	spin_unlock(ptl);
350 	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
351 	return;
352 unlock:
353 	spin_unlock(ptl);
354 }
355 #endif
356 
357 static int expected_page_refs(struct address_space *mapping, struct page *page)
358 {
359 	int expected_count = 1;
360 
361 	/*
362 	 * Device private pages have an extra refcount as they are
363 	 * ZONE_DEVICE pages.
364 	 */
365 	expected_count += is_device_private_page(page);
366 	if (mapping)
367 		expected_count += compound_nr(page) + page_has_private(page);
368 
369 	return expected_count;
370 }
371 
372 /*
373  * Replace the page in the mapping.
374  *
375  * The number of remaining references must be:
376  * 1 for anonymous pages without a mapping
377  * 2 for pages with a mapping
378  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
379  */
380 int folio_migrate_mapping(struct address_space *mapping,
381 		struct folio *newfolio, struct folio *folio, int extra_count)
382 {
383 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
384 	struct zone *oldzone, *newzone;
385 	int dirty;
386 	int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
387 	long nr = folio_nr_pages(folio);
388 
389 	if (!mapping) {
390 		/* Anonymous page without mapping */
391 		if (folio_ref_count(folio) != expected_count)
392 			return -EAGAIN;
393 
394 		/* No turning back from here */
395 		newfolio->index = folio->index;
396 		newfolio->mapping = folio->mapping;
397 		if (folio_test_swapbacked(folio))
398 			__folio_set_swapbacked(newfolio);
399 
400 		return MIGRATEPAGE_SUCCESS;
401 	}
402 
403 	oldzone = folio_zone(folio);
404 	newzone = folio_zone(newfolio);
405 
406 	xas_lock_irq(&xas);
407 	if (!folio_ref_freeze(folio, expected_count)) {
408 		xas_unlock_irq(&xas);
409 		return -EAGAIN;
410 	}
411 
412 	/*
413 	 * Now we know that no one else is looking at the folio:
414 	 * no turning back from here.
415 	 */
416 	newfolio->index = folio->index;
417 	newfolio->mapping = folio->mapping;
418 	folio_ref_add(newfolio, nr); /* add cache reference */
419 	if (folio_test_swapbacked(folio)) {
420 		__folio_set_swapbacked(newfolio);
421 		if (folio_test_swapcache(folio)) {
422 			folio_set_swapcache(newfolio);
423 			newfolio->private = folio_get_private(folio);
424 		}
425 	} else {
426 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
427 	}
428 
429 	/* Move dirty while page refs frozen and newpage not yet exposed */
430 	dirty = folio_test_dirty(folio);
431 	if (dirty) {
432 		folio_clear_dirty(folio);
433 		folio_set_dirty(newfolio);
434 	}
435 
436 	xas_store(&xas, newfolio);
437 	if (nr > 1) {
438 		int i;
439 
440 		for (i = 1; i < nr; i++) {
441 			xas_next(&xas);
442 			xas_store(&xas, newfolio);
443 		}
444 	}
445 
446 	/*
447 	 * Drop cache reference from old page by unfreezing
448 	 * to one less reference.
449 	 * We know this isn't the last reference.
450 	 */
451 	folio_ref_unfreeze(folio, expected_count - nr);
452 
453 	xas_unlock(&xas);
454 	/* Leave irq disabled to prevent preemption while updating stats */
455 
456 	/*
457 	 * If moved to a different zone then also account
458 	 * the page for that zone. Other VM counters will be
459 	 * taken care of when we establish references to the
460 	 * new page and drop references to the old page.
461 	 *
462 	 * Note that anonymous pages are accounted for
463 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
464 	 * are mapped to swap space.
465 	 */
466 	if (newzone != oldzone) {
467 		struct lruvec *old_lruvec, *new_lruvec;
468 		struct mem_cgroup *memcg;
469 
470 		memcg = folio_memcg(folio);
471 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
472 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
473 
474 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
475 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
476 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
477 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
478 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
479 		}
480 #ifdef CONFIG_SWAP
481 		if (folio_test_swapcache(folio)) {
482 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
483 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
484 		}
485 #endif
486 		if (dirty && mapping_can_writeback(mapping)) {
487 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
488 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
489 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
490 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
491 		}
492 	}
493 	local_irq_enable();
494 
495 	return MIGRATEPAGE_SUCCESS;
496 }
497 EXPORT_SYMBOL(folio_migrate_mapping);
498 
499 /*
500  * The expected number of remaining references is the same as that
501  * of folio_migrate_mapping().
502  */
503 int migrate_huge_page_move_mapping(struct address_space *mapping,
504 				   struct page *newpage, struct page *page)
505 {
506 	XA_STATE(xas, &mapping->i_pages, page_index(page));
507 	int expected_count;
508 
509 	xas_lock_irq(&xas);
510 	expected_count = 2 + page_has_private(page);
511 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
512 		xas_unlock_irq(&xas);
513 		return -EAGAIN;
514 	}
515 
516 	if (!page_ref_freeze(page, expected_count)) {
517 		xas_unlock_irq(&xas);
518 		return -EAGAIN;
519 	}
520 
521 	newpage->index = page->index;
522 	newpage->mapping = page->mapping;
523 
524 	get_page(newpage);
525 
526 	xas_store(&xas, newpage);
527 
528 	page_ref_unfreeze(page, expected_count - 1);
529 
530 	xas_unlock_irq(&xas);
531 
532 	return MIGRATEPAGE_SUCCESS;
533 }
534 
535 /*
536  * Copy the flags and some other ancillary information
537  */
538 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
539 {
540 	int cpupid;
541 
542 	if (folio_test_error(folio))
543 		folio_set_error(newfolio);
544 	if (folio_test_referenced(folio))
545 		folio_set_referenced(newfolio);
546 	if (folio_test_uptodate(folio))
547 		folio_mark_uptodate(newfolio);
548 	if (folio_test_clear_active(folio)) {
549 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
550 		folio_set_active(newfolio);
551 	} else if (folio_test_clear_unevictable(folio))
552 		folio_set_unevictable(newfolio);
553 	if (folio_test_workingset(folio))
554 		folio_set_workingset(newfolio);
555 	if (folio_test_checked(folio))
556 		folio_set_checked(newfolio);
557 	if (folio_test_mappedtodisk(folio))
558 		folio_set_mappedtodisk(newfolio);
559 
560 	/* Move dirty on pages not done by folio_migrate_mapping() */
561 	if (folio_test_dirty(folio))
562 		folio_set_dirty(newfolio);
563 
564 	if (folio_test_young(folio))
565 		folio_set_young(newfolio);
566 	if (folio_test_idle(folio))
567 		folio_set_idle(newfolio);
568 
569 	/*
570 	 * Copy NUMA information to the new page, to prevent over-eager
571 	 * future migrations of this same page.
572 	 */
573 	cpupid = page_cpupid_xchg_last(&folio->page, -1);
574 	page_cpupid_xchg_last(&newfolio->page, cpupid);
575 
576 	folio_migrate_ksm(newfolio, folio);
577 	/*
578 	 * Please do not reorder this without considering how mm/ksm.c's
579 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
580 	 */
581 	if (folio_test_swapcache(folio))
582 		folio_clear_swapcache(folio);
583 	folio_clear_private(folio);
584 
585 	/* page->private contains hugetlb specific flags */
586 	if (!folio_test_hugetlb(folio))
587 		folio->private = NULL;
588 
589 	/*
590 	 * If any waiters have accumulated on the new page then
591 	 * wake them up.
592 	 */
593 	if (folio_test_writeback(newfolio))
594 		folio_end_writeback(newfolio);
595 
596 	/*
597 	 * PG_readahead shares the same bit with PG_reclaim.  The above
598 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
599 	 * bit after that.
600 	 */
601 	if (folio_test_readahead(folio))
602 		folio_set_readahead(newfolio);
603 
604 	folio_copy_owner(newfolio, folio);
605 
606 	if (!folio_test_hugetlb(folio))
607 		mem_cgroup_migrate(folio, newfolio);
608 }
609 EXPORT_SYMBOL(folio_migrate_flags);
610 
611 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
612 {
613 	folio_copy(newfolio, folio);
614 	folio_migrate_flags(newfolio, folio);
615 }
616 EXPORT_SYMBOL(folio_migrate_copy);
617 
618 /************************************************************
619  *                    Migration functions
620  ***********************************************************/
621 
622 /*
623  * Common logic to directly migrate a single LRU page suitable for
624  * pages that do not use PagePrivate/PagePrivate2.
625  *
626  * Pages are locked upon entry and exit.
627  */
628 int migrate_page(struct address_space *mapping,
629 		struct page *newpage, struct page *page,
630 		enum migrate_mode mode)
631 {
632 	struct folio *newfolio = page_folio(newpage);
633 	struct folio *folio = page_folio(page);
634 	int rc;
635 
636 	BUG_ON(folio_test_writeback(folio));	/* Writeback must be complete */
637 
638 	rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
639 
640 	if (rc != MIGRATEPAGE_SUCCESS)
641 		return rc;
642 
643 	if (mode != MIGRATE_SYNC_NO_COPY)
644 		folio_migrate_copy(newfolio, folio);
645 	else
646 		folio_migrate_flags(newfolio, folio);
647 	return MIGRATEPAGE_SUCCESS;
648 }
649 EXPORT_SYMBOL(migrate_page);
650 
651 #ifdef CONFIG_BLOCK
652 /* Returns true if all buffers are successfully locked */
653 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
654 							enum migrate_mode mode)
655 {
656 	struct buffer_head *bh = head;
657 
658 	/* Simple case, sync compaction */
659 	if (mode != MIGRATE_ASYNC) {
660 		do {
661 			lock_buffer(bh);
662 			bh = bh->b_this_page;
663 
664 		} while (bh != head);
665 
666 		return true;
667 	}
668 
669 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
670 	do {
671 		if (!trylock_buffer(bh)) {
672 			/*
673 			 * We failed to lock the buffer and cannot stall in
674 			 * async migration. Release the taken locks
675 			 */
676 			struct buffer_head *failed_bh = bh;
677 			bh = head;
678 			while (bh != failed_bh) {
679 				unlock_buffer(bh);
680 				bh = bh->b_this_page;
681 			}
682 			return false;
683 		}
684 
685 		bh = bh->b_this_page;
686 	} while (bh != head);
687 	return true;
688 }
689 
690 static int __buffer_migrate_page(struct address_space *mapping,
691 		struct page *newpage, struct page *page, enum migrate_mode mode,
692 		bool check_refs)
693 {
694 	struct buffer_head *bh, *head;
695 	int rc;
696 	int expected_count;
697 
698 	if (!page_has_buffers(page))
699 		return migrate_page(mapping, newpage, page, mode);
700 
701 	/* Check whether page does not have extra refs before we do more work */
702 	expected_count = expected_page_refs(mapping, page);
703 	if (page_count(page) != expected_count)
704 		return -EAGAIN;
705 
706 	head = page_buffers(page);
707 	if (!buffer_migrate_lock_buffers(head, mode))
708 		return -EAGAIN;
709 
710 	if (check_refs) {
711 		bool busy;
712 		bool invalidated = false;
713 
714 recheck_buffers:
715 		busy = false;
716 		spin_lock(&mapping->private_lock);
717 		bh = head;
718 		do {
719 			if (atomic_read(&bh->b_count)) {
720 				busy = true;
721 				break;
722 			}
723 			bh = bh->b_this_page;
724 		} while (bh != head);
725 		if (busy) {
726 			if (invalidated) {
727 				rc = -EAGAIN;
728 				goto unlock_buffers;
729 			}
730 			spin_unlock(&mapping->private_lock);
731 			invalidate_bh_lrus();
732 			invalidated = true;
733 			goto recheck_buffers;
734 		}
735 	}
736 
737 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
738 	if (rc != MIGRATEPAGE_SUCCESS)
739 		goto unlock_buffers;
740 
741 	attach_page_private(newpage, detach_page_private(page));
742 
743 	bh = head;
744 	do {
745 		set_bh_page(bh, newpage, bh_offset(bh));
746 		bh = bh->b_this_page;
747 
748 	} while (bh != head);
749 
750 	if (mode != MIGRATE_SYNC_NO_COPY)
751 		migrate_page_copy(newpage, page);
752 	else
753 		migrate_page_states(newpage, page);
754 
755 	rc = MIGRATEPAGE_SUCCESS;
756 unlock_buffers:
757 	if (check_refs)
758 		spin_unlock(&mapping->private_lock);
759 	bh = head;
760 	do {
761 		unlock_buffer(bh);
762 		bh = bh->b_this_page;
763 
764 	} while (bh != head);
765 
766 	return rc;
767 }
768 
769 /*
770  * Migration function for pages with buffers. This function can only be used
771  * if the underlying filesystem guarantees that no other references to "page"
772  * exist. For example attached buffer heads are accessed only under page lock.
773  */
774 int buffer_migrate_page(struct address_space *mapping,
775 		struct page *newpage, struct page *page, enum migrate_mode mode)
776 {
777 	return __buffer_migrate_page(mapping, newpage, page, mode, false);
778 }
779 EXPORT_SYMBOL(buffer_migrate_page);
780 
781 /*
782  * Same as above except that this variant is more careful and checks that there
783  * are also no buffer head references. This function is the right one for
784  * mappings where buffer heads are directly looked up and referenced (such as
785  * block device mappings).
786  */
787 int buffer_migrate_page_norefs(struct address_space *mapping,
788 		struct page *newpage, struct page *page, enum migrate_mode mode)
789 {
790 	return __buffer_migrate_page(mapping, newpage, page, mode, true);
791 }
792 #endif
793 
794 /*
795  * Writeback a page to clean the dirty state
796  */
797 static int writeout(struct address_space *mapping, struct page *page)
798 {
799 	struct writeback_control wbc = {
800 		.sync_mode = WB_SYNC_NONE,
801 		.nr_to_write = 1,
802 		.range_start = 0,
803 		.range_end = LLONG_MAX,
804 		.for_reclaim = 1
805 	};
806 	int rc;
807 
808 	if (!mapping->a_ops->writepage)
809 		/* No write method for the address space */
810 		return -EINVAL;
811 
812 	if (!clear_page_dirty_for_io(page))
813 		/* Someone else already triggered a write */
814 		return -EAGAIN;
815 
816 	/*
817 	 * A dirty page may imply that the underlying filesystem has
818 	 * the page on some queue. So the page must be clean for
819 	 * migration. Writeout may mean we loose the lock and the
820 	 * page state is no longer what we checked for earlier.
821 	 * At this point we know that the migration attempt cannot
822 	 * be successful.
823 	 */
824 	remove_migration_ptes(page, page, false);
825 
826 	rc = mapping->a_ops->writepage(page, &wbc);
827 
828 	if (rc != AOP_WRITEPAGE_ACTIVATE)
829 		/* unlocked. Relock */
830 		lock_page(page);
831 
832 	return (rc < 0) ? -EIO : -EAGAIN;
833 }
834 
835 /*
836  * Default handling if a filesystem does not provide a migration function.
837  */
838 static int fallback_migrate_page(struct address_space *mapping,
839 	struct page *newpage, struct page *page, enum migrate_mode mode)
840 {
841 	if (PageDirty(page)) {
842 		/* Only writeback pages in full synchronous migration */
843 		switch (mode) {
844 		case MIGRATE_SYNC:
845 		case MIGRATE_SYNC_NO_COPY:
846 			break;
847 		default:
848 			return -EBUSY;
849 		}
850 		return writeout(mapping, page);
851 	}
852 
853 	/*
854 	 * Buffers may be managed in a filesystem specific way.
855 	 * We must have no buffers or drop them.
856 	 */
857 	if (page_has_private(page) &&
858 	    !try_to_release_page(page, GFP_KERNEL))
859 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
860 
861 	return migrate_page(mapping, newpage, page, mode);
862 }
863 
864 /*
865  * Move a page to a newly allocated page
866  * The page is locked and all ptes have been successfully removed.
867  *
868  * The new page will have replaced the old page if this function
869  * is successful.
870  *
871  * Return value:
872  *   < 0 - error code
873  *  MIGRATEPAGE_SUCCESS - success
874  */
875 static int move_to_new_page(struct page *newpage, struct page *page,
876 				enum migrate_mode mode)
877 {
878 	struct address_space *mapping;
879 	int rc = -EAGAIN;
880 	bool is_lru = !__PageMovable(page);
881 
882 	VM_BUG_ON_PAGE(!PageLocked(page), page);
883 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
884 
885 	mapping = page_mapping(page);
886 
887 	if (likely(is_lru)) {
888 		if (!mapping)
889 			rc = migrate_page(mapping, newpage, page, mode);
890 		else if (mapping->a_ops->migratepage)
891 			/*
892 			 * Most pages have a mapping and most filesystems
893 			 * provide a migratepage callback. Anonymous pages
894 			 * are part of swap space which also has its own
895 			 * migratepage callback. This is the most common path
896 			 * for page migration.
897 			 */
898 			rc = mapping->a_ops->migratepage(mapping, newpage,
899 							page, mode);
900 		else
901 			rc = fallback_migrate_page(mapping, newpage,
902 							page, mode);
903 	} else {
904 		/*
905 		 * In case of non-lru page, it could be released after
906 		 * isolation step. In that case, we shouldn't try migration.
907 		 */
908 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
909 		if (!PageMovable(page)) {
910 			rc = MIGRATEPAGE_SUCCESS;
911 			__ClearPageIsolated(page);
912 			goto out;
913 		}
914 
915 		rc = mapping->a_ops->migratepage(mapping, newpage,
916 						page, mode);
917 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
918 			!PageIsolated(page));
919 	}
920 
921 	/*
922 	 * When successful, old pagecache page->mapping must be cleared before
923 	 * page is freed; but stats require that PageAnon be left as PageAnon.
924 	 */
925 	if (rc == MIGRATEPAGE_SUCCESS) {
926 		if (__PageMovable(page)) {
927 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
928 
929 			/*
930 			 * We clear PG_movable under page_lock so any compactor
931 			 * cannot try to migrate this page.
932 			 */
933 			__ClearPageIsolated(page);
934 		}
935 
936 		/*
937 		 * Anonymous and movable page->mapping will be cleared by
938 		 * free_pages_prepare so don't reset it here for keeping
939 		 * the type to work PageAnon, for example.
940 		 */
941 		if (!PageMappingFlags(page))
942 			page->mapping = NULL;
943 
944 		if (likely(!is_zone_device_page(newpage)))
945 			flush_dcache_page(newpage);
946 
947 	}
948 out:
949 	return rc;
950 }
951 
952 static int __unmap_and_move(struct page *page, struct page *newpage,
953 				int force, enum migrate_mode mode)
954 {
955 	int rc = -EAGAIN;
956 	bool page_was_mapped = false;
957 	struct anon_vma *anon_vma = NULL;
958 	bool is_lru = !__PageMovable(page);
959 
960 	if (!trylock_page(page)) {
961 		if (!force || mode == MIGRATE_ASYNC)
962 			goto out;
963 
964 		/*
965 		 * It's not safe for direct compaction to call lock_page.
966 		 * For example, during page readahead pages are added locked
967 		 * to the LRU. Later, when the IO completes the pages are
968 		 * marked uptodate and unlocked. However, the queueing
969 		 * could be merging multiple pages for one bio (e.g.
970 		 * mpage_readahead). If an allocation happens for the
971 		 * second or third page, the process can end up locking
972 		 * the same page twice and deadlocking. Rather than
973 		 * trying to be clever about what pages can be locked,
974 		 * avoid the use of lock_page for direct compaction
975 		 * altogether.
976 		 */
977 		if (current->flags & PF_MEMALLOC)
978 			goto out;
979 
980 		lock_page(page);
981 	}
982 
983 	if (PageWriteback(page)) {
984 		/*
985 		 * Only in the case of a full synchronous migration is it
986 		 * necessary to wait for PageWriteback. In the async case,
987 		 * the retry loop is too short and in the sync-light case,
988 		 * the overhead of stalling is too much
989 		 */
990 		switch (mode) {
991 		case MIGRATE_SYNC:
992 		case MIGRATE_SYNC_NO_COPY:
993 			break;
994 		default:
995 			rc = -EBUSY;
996 			goto out_unlock;
997 		}
998 		if (!force)
999 			goto out_unlock;
1000 		wait_on_page_writeback(page);
1001 	}
1002 
1003 	/*
1004 	 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
1005 	 * we cannot notice that anon_vma is freed while we migrates a page.
1006 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1007 	 * of migration. File cache pages are no problem because of page_lock()
1008 	 * File Caches may use write_page() or lock_page() in migration, then,
1009 	 * just care Anon page here.
1010 	 *
1011 	 * Only page_get_anon_vma() understands the subtleties of
1012 	 * getting a hold on an anon_vma from outside one of its mms.
1013 	 * But if we cannot get anon_vma, then we won't need it anyway,
1014 	 * because that implies that the anon page is no longer mapped
1015 	 * (and cannot be remapped so long as we hold the page lock).
1016 	 */
1017 	if (PageAnon(page) && !PageKsm(page))
1018 		anon_vma = page_get_anon_vma(page);
1019 
1020 	/*
1021 	 * Block others from accessing the new page when we get around to
1022 	 * establishing additional references. We are usually the only one
1023 	 * holding a reference to newpage at this point. We used to have a BUG
1024 	 * here if trylock_page(newpage) fails, but would like to allow for
1025 	 * cases where there might be a race with the previous use of newpage.
1026 	 * This is much like races on refcount of oldpage: just don't BUG().
1027 	 */
1028 	if (unlikely(!trylock_page(newpage)))
1029 		goto out_unlock;
1030 
1031 	if (unlikely(!is_lru)) {
1032 		rc = move_to_new_page(newpage, page, mode);
1033 		goto out_unlock_both;
1034 	}
1035 
1036 	/*
1037 	 * Corner case handling:
1038 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1039 	 * and treated as swapcache but it has no rmap yet.
1040 	 * Calling try_to_unmap() against a page->mapping==NULL page will
1041 	 * trigger a BUG.  So handle it here.
1042 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1043 	 * fs-private metadata. The page can be picked up due to memory
1044 	 * offlining.  Everywhere else except page reclaim, the page is
1045 	 * invisible to the vm, so the page can not be migrated.  So try to
1046 	 * free the metadata, so the page can be freed.
1047 	 */
1048 	if (!page->mapping) {
1049 		VM_BUG_ON_PAGE(PageAnon(page), page);
1050 		if (page_has_private(page)) {
1051 			try_to_free_buffers(page);
1052 			goto out_unlock_both;
1053 		}
1054 	} else if (page_mapped(page)) {
1055 		/* Establish migration ptes */
1056 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1057 				page);
1058 		try_to_migrate(page, 0);
1059 		page_was_mapped = true;
1060 	}
1061 
1062 	if (!page_mapped(page))
1063 		rc = move_to_new_page(newpage, page, mode);
1064 
1065 	if (page_was_mapped)
1066 		remove_migration_ptes(page,
1067 			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1068 
1069 out_unlock_both:
1070 	unlock_page(newpage);
1071 out_unlock:
1072 	/* Drop an anon_vma reference if we took one */
1073 	if (anon_vma)
1074 		put_anon_vma(anon_vma);
1075 	unlock_page(page);
1076 out:
1077 	/*
1078 	 * If migration is successful, decrease refcount of the newpage
1079 	 * which will not free the page because new page owner increased
1080 	 * refcounter. As well, if it is LRU page, add the page to LRU
1081 	 * list in here. Use the old state of the isolated source page to
1082 	 * determine if we migrated a LRU page. newpage was already unlocked
1083 	 * and possibly modified by its owner - don't rely on the page
1084 	 * state.
1085 	 */
1086 	if (rc == MIGRATEPAGE_SUCCESS) {
1087 		if (unlikely(!is_lru))
1088 			put_page(newpage);
1089 		else
1090 			putback_lru_page(newpage);
1091 	}
1092 
1093 	return rc;
1094 }
1095 
1096 /*
1097  * Obtain the lock on page, remove all ptes and migrate the page
1098  * to the newly allocated page in newpage.
1099  */
1100 static int unmap_and_move(new_page_t get_new_page,
1101 				   free_page_t put_new_page,
1102 				   unsigned long private, struct page *page,
1103 				   int force, enum migrate_mode mode,
1104 				   enum migrate_reason reason,
1105 				   struct list_head *ret)
1106 {
1107 	int rc = MIGRATEPAGE_SUCCESS;
1108 	struct page *newpage = NULL;
1109 
1110 	if (!thp_migration_supported() && PageTransHuge(page))
1111 		return -ENOSYS;
1112 
1113 	if (page_count(page) == 1) {
1114 		/* page was freed from under us. So we are done. */
1115 		ClearPageActive(page);
1116 		ClearPageUnevictable(page);
1117 		if (unlikely(__PageMovable(page))) {
1118 			lock_page(page);
1119 			if (!PageMovable(page))
1120 				__ClearPageIsolated(page);
1121 			unlock_page(page);
1122 		}
1123 		goto out;
1124 	}
1125 
1126 	newpage = get_new_page(page, private);
1127 	if (!newpage)
1128 		return -ENOMEM;
1129 
1130 	rc = __unmap_and_move(page, newpage, force, mode);
1131 	if (rc == MIGRATEPAGE_SUCCESS)
1132 		set_page_owner_migrate_reason(newpage, reason);
1133 
1134 out:
1135 	if (rc != -EAGAIN) {
1136 		/*
1137 		 * A page that has been migrated has all references
1138 		 * removed and will be freed. A page that has not been
1139 		 * migrated will have kept its references and be restored.
1140 		 */
1141 		list_del(&page->lru);
1142 	}
1143 
1144 	/*
1145 	 * If migration is successful, releases reference grabbed during
1146 	 * isolation. Otherwise, restore the page to right list unless
1147 	 * we want to retry.
1148 	 */
1149 	if (rc == MIGRATEPAGE_SUCCESS) {
1150 		/*
1151 		 * Compaction can migrate also non-LRU pages which are
1152 		 * not accounted to NR_ISOLATED_*. They can be recognized
1153 		 * as __PageMovable
1154 		 */
1155 		if (likely(!__PageMovable(page)))
1156 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1157 					page_is_file_lru(page), -thp_nr_pages(page));
1158 
1159 		if (reason != MR_MEMORY_FAILURE)
1160 			/*
1161 			 * We release the page in page_handle_poison.
1162 			 */
1163 			put_page(page);
1164 	} else {
1165 		if (rc != -EAGAIN)
1166 			list_add_tail(&page->lru, ret);
1167 
1168 		if (put_new_page)
1169 			put_new_page(newpage, private);
1170 		else
1171 			put_page(newpage);
1172 	}
1173 
1174 	return rc;
1175 }
1176 
1177 /*
1178  * Counterpart of unmap_and_move_page() for hugepage migration.
1179  *
1180  * This function doesn't wait the completion of hugepage I/O
1181  * because there is no race between I/O and migration for hugepage.
1182  * Note that currently hugepage I/O occurs only in direct I/O
1183  * where no lock is held and PG_writeback is irrelevant,
1184  * and writeback status of all subpages are counted in the reference
1185  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1186  * under direct I/O, the reference of the head page is 512 and a bit more.)
1187  * This means that when we try to migrate hugepage whose subpages are
1188  * doing direct I/O, some references remain after try_to_unmap() and
1189  * hugepage migration fails without data corruption.
1190  *
1191  * There is also no race when direct I/O is issued on the page under migration,
1192  * because then pte is replaced with migration swap entry and direct I/O code
1193  * will wait in the page fault for migration to complete.
1194  */
1195 static int unmap_and_move_huge_page(new_page_t get_new_page,
1196 				free_page_t put_new_page, unsigned long private,
1197 				struct page *hpage, int force,
1198 				enum migrate_mode mode, int reason,
1199 				struct list_head *ret)
1200 {
1201 	int rc = -EAGAIN;
1202 	int page_was_mapped = 0;
1203 	struct page *new_hpage;
1204 	struct anon_vma *anon_vma = NULL;
1205 	struct address_space *mapping = NULL;
1206 
1207 	/*
1208 	 * Migratability of hugepages depends on architectures and their size.
1209 	 * This check is necessary because some callers of hugepage migration
1210 	 * like soft offline and memory hotremove don't walk through page
1211 	 * tables or check whether the hugepage is pmd-based or not before
1212 	 * kicking migration.
1213 	 */
1214 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1215 		list_move_tail(&hpage->lru, ret);
1216 		return -ENOSYS;
1217 	}
1218 
1219 	if (page_count(hpage) == 1) {
1220 		/* page was freed from under us. So we are done. */
1221 		putback_active_hugepage(hpage);
1222 		return MIGRATEPAGE_SUCCESS;
1223 	}
1224 
1225 	new_hpage = get_new_page(hpage, private);
1226 	if (!new_hpage)
1227 		return -ENOMEM;
1228 
1229 	if (!trylock_page(hpage)) {
1230 		if (!force)
1231 			goto out;
1232 		switch (mode) {
1233 		case MIGRATE_SYNC:
1234 		case MIGRATE_SYNC_NO_COPY:
1235 			break;
1236 		default:
1237 			goto out;
1238 		}
1239 		lock_page(hpage);
1240 	}
1241 
1242 	/*
1243 	 * Check for pages which are in the process of being freed.  Without
1244 	 * page_mapping() set, hugetlbfs specific move page routine will not
1245 	 * be called and we could leak usage counts for subpools.
1246 	 */
1247 	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
1248 		rc = -EBUSY;
1249 		goto out_unlock;
1250 	}
1251 
1252 	if (PageAnon(hpage))
1253 		anon_vma = page_get_anon_vma(hpage);
1254 
1255 	if (unlikely(!trylock_page(new_hpage)))
1256 		goto put_anon;
1257 
1258 	if (page_mapped(hpage)) {
1259 		bool mapping_locked = false;
1260 		enum ttu_flags ttu = 0;
1261 
1262 		if (!PageAnon(hpage)) {
1263 			/*
1264 			 * In shared mappings, try_to_unmap could potentially
1265 			 * call huge_pmd_unshare.  Because of this, take
1266 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1267 			 * to let lower levels know we have taken the lock.
1268 			 */
1269 			mapping = hugetlb_page_mapping_lock_write(hpage);
1270 			if (unlikely(!mapping))
1271 				goto unlock_put_anon;
1272 
1273 			mapping_locked = true;
1274 			ttu |= TTU_RMAP_LOCKED;
1275 		}
1276 
1277 		try_to_migrate(hpage, ttu);
1278 		page_was_mapped = 1;
1279 
1280 		if (mapping_locked)
1281 			i_mmap_unlock_write(mapping);
1282 	}
1283 
1284 	if (!page_mapped(hpage))
1285 		rc = move_to_new_page(new_hpage, hpage, mode);
1286 
1287 	if (page_was_mapped)
1288 		remove_migration_ptes(hpage,
1289 			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1290 
1291 unlock_put_anon:
1292 	unlock_page(new_hpage);
1293 
1294 put_anon:
1295 	if (anon_vma)
1296 		put_anon_vma(anon_vma);
1297 
1298 	if (rc == MIGRATEPAGE_SUCCESS) {
1299 		move_hugetlb_state(hpage, new_hpage, reason);
1300 		put_new_page = NULL;
1301 	}
1302 
1303 out_unlock:
1304 	unlock_page(hpage);
1305 out:
1306 	if (rc == MIGRATEPAGE_SUCCESS)
1307 		putback_active_hugepage(hpage);
1308 	else if (rc != -EAGAIN)
1309 		list_move_tail(&hpage->lru, ret);
1310 
1311 	/*
1312 	 * If migration was not successful and there's a freeing callback, use
1313 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1314 	 * isolation.
1315 	 */
1316 	if (put_new_page)
1317 		put_new_page(new_hpage, private);
1318 	else
1319 		putback_active_hugepage(new_hpage);
1320 
1321 	return rc;
1322 }
1323 
1324 static inline int try_split_thp(struct page *page, struct page **page2,
1325 				struct list_head *from)
1326 {
1327 	int rc = 0;
1328 
1329 	lock_page(page);
1330 	rc = split_huge_page_to_list(page, from);
1331 	unlock_page(page);
1332 	if (!rc)
1333 		list_safe_reset_next(page, *page2, lru);
1334 
1335 	return rc;
1336 }
1337 
1338 /*
1339  * migrate_pages - migrate the pages specified in a list, to the free pages
1340  *		   supplied as the target for the page migration
1341  *
1342  * @from:		The list of pages to be migrated.
1343  * @get_new_page:	The function used to allocate free pages to be used
1344  *			as the target of the page migration.
1345  * @put_new_page:	The function used to free target pages if migration
1346  *			fails, or NULL if no special handling is necessary.
1347  * @private:		Private data to be passed on to get_new_page()
1348  * @mode:		The migration mode that specifies the constraints for
1349  *			page migration, if any.
1350  * @reason:		The reason for page migration.
1351  * @ret_succeeded:	Set to the number of normal pages migrated successfully if
1352  *			the caller passes a non-NULL pointer.
1353  *
1354  * The function returns after 10 attempts or if no pages are movable any more
1355  * because the list has become empty or no retryable pages exist any more.
1356  * It is caller's responsibility to call putback_movable_pages() to return pages
1357  * to the LRU or free list only if ret != 0.
1358  *
1359  * Returns the number of {normal page, THP, hugetlb} that were not migrated, or
1360  * an error code. The number of THP splits will be considered as the number of
1361  * non-migrated THP, no matter how many subpages of the THP are migrated successfully.
1362  */
1363 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1364 		free_page_t put_new_page, unsigned long private,
1365 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1366 {
1367 	int retry = 1;
1368 	int thp_retry = 1;
1369 	int nr_failed = 0;
1370 	int nr_failed_pages = 0;
1371 	int nr_succeeded = 0;
1372 	int nr_thp_succeeded = 0;
1373 	int nr_thp_failed = 0;
1374 	int nr_thp_split = 0;
1375 	int pass = 0;
1376 	bool is_thp = false;
1377 	struct page *page;
1378 	struct page *page2;
1379 	int swapwrite = current->flags & PF_SWAPWRITE;
1380 	int rc, nr_subpages;
1381 	LIST_HEAD(ret_pages);
1382 	LIST_HEAD(thp_split_pages);
1383 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1384 	bool no_subpage_counting = false;
1385 
1386 	trace_mm_migrate_pages_start(mode, reason);
1387 
1388 	if (!swapwrite)
1389 		current->flags |= PF_SWAPWRITE;
1390 
1391 thp_subpage_migration:
1392 	for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1393 		retry = 0;
1394 		thp_retry = 0;
1395 
1396 		list_for_each_entry_safe(page, page2, from, lru) {
1397 retry:
1398 			/*
1399 			 * THP statistics is based on the source huge page.
1400 			 * Capture required information that might get lost
1401 			 * during migration.
1402 			 */
1403 			is_thp = PageTransHuge(page) && !PageHuge(page);
1404 			nr_subpages = compound_nr(page);
1405 			cond_resched();
1406 
1407 			if (PageHuge(page))
1408 				rc = unmap_and_move_huge_page(get_new_page,
1409 						put_new_page, private, page,
1410 						pass > 2, mode, reason,
1411 						&ret_pages);
1412 			else
1413 				rc = unmap_and_move(get_new_page, put_new_page,
1414 						private, page, pass > 2, mode,
1415 						reason, &ret_pages);
1416 			/*
1417 			 * The rules are:
1418 			 *	Success: non hugetlb page will be freed, hugetlb
1419 			 *		 page will be put back
1420 			 *	-EAGAIN: stay on the from list
1421 			 *	-ENOMEM: stay on the from list
1422 			 *	Other errno: put on ret_pages list then splice to
1423 			 *		     from list
1424 			 */
1425 			switch(rc) {
1426 			/*
1427 			 * THP migration might be unsupported or the
1428 			 * allocation could've failed so we should
1429 			 * retry on the same page with the THP split
1430 			 * to base pages.
1431 			 *
1432 			 * Head page is retried immediately and tail
1433 			 * pages are added to the tail of the list so
1434 			 * we encounter them after the rest of the list
1435 			 * is processed.
1436 			 */
1437 			case -ENOSYS:
1438 				/* THP migration is unsupported */
1439 				if (is_thp) {
1440 					nr_thp_failed++;
1441 					if (!try_split_thp(page, &page2, &thp_split_pages)) {
1442 						nr_thp_split++;
1443 						goto retry;
1444 					}
1445 
1446 					nr_failed_pages += nr_subpages;
1447 					break;
1448 				}
1449 
1450 				/* Hugetlb migration is unsupported */
1451 				if (!no_subpage_counting)
1452 					nr_failed++;
1453 				nr_failed_pages += nr_subpages;
1454 				break;
1455 			case -ENOMEM:
1456 				/*
1457 				 * When memory is low, don't bother to try to migrate
1458 				 * other pages, just exit.
1459 				 * THP NUMA faulting doesn't split THP to retry.
1460 				 */
1461 				if (is_thp && !nosplit) {
1462 					nr_thp_failed++;
1463 					if (!try_split_thp(page, &page2, &thp_split_pages)) {
1464 						nr_thp_split++;
1465 						goto retry;
1466 					}
1467 
1468 					nr_failed_pages += nr_subpages;
1469 					goto out;
1470 				}
1471 
1472 				if (!no_subpage_counting)
1473 					nr_failed++;
1474 				nr_failed_pages += nr_subpages;
1475 				goto out;
1476 			case -EAGAIN:
1477 				if (is_thp) {
1478 					thp_retry++;
1479 					break;
1480 				}
1481 				retry++;
1482 				break;
1483 			case MIGRATEPAGE_SUCCESS:
1484 				nr_succeeded += nr_subpages;
1485 				if (is_thp) {
1486 					nr_thp_succeeded++;
1487 					break;
1488 				}
1489 				break;
1490 			default:
1491 				/*
1492 				 * Permanent failure (-EBUSY, etc.):
1493 				 * unlike -EAGAIN case, the failed page is
1494 				 * removed from migration page list and not
1495 				 * retried in the next outer loop.
1496 				 */
1497 				if (is_thp) {
1498 					nr_thp_failed++;
1499 					nr_failed_pages += nr_subpages;
1500 					break;
1501 				}
1502 
1503 				if (!no_subpage_counting)
1504 					nr_failed++;
1505 				nr_failed_pages += nr_subpages;
1506 				break;
1507 			}
1508 		}
1509 	}
1510 	nr_failed += retry;
1511 	nr_thp_failed += thp_retry;
1512 	/*
1513 	 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed
1514 	 * counting in this round, since all subpages of a THP is counted
1515 	 * as 1 failure in the first round.
1516 	 */
1517 	if (!list_empty(&thp_split_pages)) {
1518 		/*
1519 		 * Move non-migrated pages (after 10 retries) to ret_pages
1520 		 * to avoid migrating them again.
1521 		 */
1522 		list_splice_init(from, &ret_pages);
1523 		list_splice_init(&thp_split_pages, from);
1524 		no_subpage_counting = true;
1525 		retry = 1;
1526 		goto thp_subpage_migration;
1527 	}
1528 
1529 	rc = nr_failed + nr_thp_failed;
1530 out:
1531 	/*
1532 	 * Put the permanent failure page back to migration list, they
1533 	 * will be put back to the right list by the caller.
1534 	 */
1535 	list_splice(&ret_pages, from);
1536 
1537 	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1538 	count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1539 	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1540 	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1541 	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1542 	trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1543 			       nr_thp_failed, nr_thp_split, mode, reason);
1544 
1545 	if (!swapwrite)
1546 		current->flags &= ~PF_SWAPWRITE;
1547 
1548 	if (ret_succeeded)
1549 		*ret_succeeded = nr_succeeded;
1550 
1551 	return rc;
1552 }
1553 
1554 struct page *alloc_migration_target(struct page *page, unsigned long private)
1555 {
1556 	struct migration_target_control *mtc;
1557 	gfp_t gfp_mask;
1558 	unsigned int order = 0;
1559 	struct page *new_page = NULL;
1560 	int nid;
1561 	int zidx;
1562 
1563 	mtc = (struct migration_target_control *)private;
1564 	gfp_mask = mtc->gfp_mask;
1565 	nid = mtc->nid;
1566 	if (nid == NUMA_NO_NODE)
1567 		nid = page_to_nid(page);
1568 
1569 	if (PageHuge(page)) {
1570 		struct hstate *h = page_hstate(compound_head(page));
1571 
1572 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1573 		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1574 	}
1575 
1576 	if (PageTransHuge(page)) {
1577 		/*
1578 		 * clear __GFP_RECLAIM to make the migration callback
1579 		 * consistent with regular THP allocations.
1580 		 */
1581 		gfp_mask &= ~__GFP_RECLAIM;
1582 		gfp_mask |= GFP_TRANSHUGE;
1583 		order = HPAGE_PMD_ORDER;
1584 	}
1585 	zidx = zone_idx(page_zone(page));
1586 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1587 		gfp_mask |= __GFP_HIGHMEM;
1588 
1589 	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
1590 
1591 	if (new_page && PageTransHuge(new_page))
1592 		prep_transhuge_page(new_page);
1593 
1594 	return new_page;
1595 }
1596 
1597 #ifdef CONFIG_NUMA
1598 
1599 static int store_status(int __user *status, int start, int value, int nr)
1600 {
1601 	while (nr-- > 0) {
1602 		if (put_user(value, status + start))
1603 			return -EFAULT;
1604 		start++;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static int do_move_pages_to_node(struct mm_struct *mm,
1611 		struct list_head *pagelist, int node)
1612 {
1613 	int err;
1614 	struct migration_target_control mtc = {
1615 		.nid = node,
1616 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1617 	};
1618 
1619 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1620 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1621 	if (err)
1622 		putback_movable_pages(pagelist);
1623 	return err;
1624 }
1625 
1626 /*
1627  * Resolves the given address to a struct page, isolates it from the LRU and
1628  * puts it to the given pagelist.
1629  * Returns:
1630  *     errno - if the page cannot be found/isolated
1631  *     0 - when it doesn't have to be migrated because it is already on the
1632  *         target node
1633  *     1 - when it has been queued
1634  */
1635 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1636 		int node, struct list_head *pagelist, bool migrate_all)
1637 {
1638 	struct vm_area_struct *vma;
1639 	struct page *page;
1640 	unsigned int follflags;
1641 	int err;
1642 
1643 	mmap_read_lock(mm);
1644 	err = -EFAULT;
1645 	vma = find_vma(mm, addr);
1646 	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1647 		goto out;
1648 
1649 	/* FOLL_DUMP to ignore special (like zero) pages */
1650 	follflags = FOLL_GET | FOLL_DUMP;
1651 	page = follow_page(vma, addr, follflags);
1652 
1653 	err = PTR_ERR(page);
1654 	if (IS_ERR(page))
1655 		goto out;
1656 
1657 	err = -ENOENT;
1658 	if (!page)
1659 		goto out;
1660 
1661 	err = 0;
1662 	if (page_to_nid(page) == node)
1663 		goto out_putpage;
1664 
1665 	err = -EACCES;
1666 	if (page_mapcount(page) > 1 && !migrate_all)
1667 		goto out_putpage;
1668 
1669 	if (PageHuge(page)) {
1670 		if (PageHead(page)) {
1671 			isolate_huge_page(page, pagelist);
1672 			err = 1;
1673 		}
1674 	} else {
1675 		struct page *head;
1676 
1677 		head = compound_head(page);
1678 		err = isolate_lru_page(head);
1679 		if (err)
1680 			goto out_putpage;
1681 
1682 		err = 1;
1683 		list_add_tail(&head->lru, pagelist);
1684 		mod_node_page_state(page_pgdat(head),
1685 			NR_ISOLATED_ANON + page_is_file_lru(head),
1686 			thp_nr_pages(head));
1687 	}
1688 out_putpage:
1689 	/*
1690 	 * Either remove the duplicate refcount from
1691 	 * isolate_lru_page() or drop the page ref if it was
1692 	 * not isolated.
1693 	 */
1694 	put_page(page);
1695 out:
1696 	mmap_read_unlock(mm);
1697 	return err;
1698 }
1699 
1700 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1701 		struct list_head *pagelist, int __user *status,
1702 		int start, int i, unsigned long nr_pages)
1703 {
1704 	int err;
1705 
1706 	if (list_empty(pagelist))
1707 		return 0;
1708 
1709 	err = do_move_pages_to_node(mm, pagelist, node);
1710 	if (err) {
1711 		/*
1712 		 * Positive err means the number of failed
1713 		 * pages to migrate.  Since we are going to
1714 		 * abort and return the number of non-migrated
1715 		 * pages, so need to include the rest of the
1716 		 * nr_pages that have not been attempted as
1717 		 * well.
1718 		 */
1719 		if (err > 0)
1720 			err += nr_pages - i - 1;
1721 		return err;
1722 	}
1723 	return store_status(status, start, node, i - start);
1724 }
1725 
1726 /*
1727  * Migrate an array of page address onto an array of nodes and fill
1728  * the corresponding array of status.
1729  */
1730 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1731 			 unsigned long nr_pages,
1732 			 const void __user * __user *pages,
1733 			 const int __user *nodes,
1734 			 int __user *status, int flags)
1735 {
1736 	int current_node = NUMA_NO_NODE;
1737 	LIST_HEAD(pagelist);
1738 	int start, i;
1739 	int err = 0, err1;
1740 
1741 	lru_cache_disable();
1742 
1743 	for (i = start = 0; i < nr_pages; i++) {
1744 		const void __user *p;
1745 		unsigned long addr;
1746 		int node;
1747 
1748 		err = -EFAULT;
1749 		if (get_user(p, pages + i))
1750 			goto out_flush;
1751 		if (get_user(node, nodes + i))
1752 			goto out_flush;
1753 		addr = (unsigned long)untagged_addr(p);
1754 
1755 		err = -ENODEV;
1756 		if (node < 0 || node >= MAX_NUMNODES)
1757 			goto out_flush;
1758 		if (!node_state(node, N_MEMORY))
1759 			goto out_flush;
1760 
1761 		err = -EACCES;
1762 		if (!node_isset(node, task_nodes))
1763 			goto out_flush;
1764 
1765 		if (current_node == NUMA_NO_NODE) {
1766 			current_node = node;
1767 			start = i;
1768 		} else if (node != current_node) {
1769 			err = move_pages_and_store_status(mm, current_node,
1770 					&pagelist, status, start, i, nr_pages);
1771 			if (err)
1772 				goto out;
1773 			start = i;
1774 			current_node = node;
1775 		}
1776 
1777 		/*
1778 		 * Errors in the page lookup or isolation are not fatal and we simply
1779 		 * report them via status
1780 		 */
1781 		err = add_page_for_migration(mm, addr, current_node,
1782 				&pagelist, flags & MPOL_MF_MOVE_ALL);
1783 
1784 		if (err > 0) {
1785 			/* The page is successfully queued for migration */
1786 			continue;
1787 		}
1788 
1789 		/*
1790 		 * If the page is already on the target node (!err), store the
1791 		 * node, otherwise, store the err.
1792 		 */
1793 		err = store_status(status, i, err ? : current_node, 1);
1794 		if (err)
1795 			goto out_flush;
1796 
1797 		err = move_pages_and_store_status(mm, current_node, &pagelist,
1798 				status, start, i, nr_pages);
1799 		if (err)
1800 			goto out;
1801 		current_node = NUMA_NO_NODE;
1802 	}
1803 out_flush:
1804 	/* Make sure we do not overwrite the existing error */
1805 	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1806 				status, start, i, nr_pages);
1807 	if (err >= 0)
1808 		err = err1;
1809 out:
1810 	lru_cache_enable();
1811 	return err;
1812 }
1813 
1814 /*
1815  * Determine the nodes of an array of pages and store it in an array of status.
1816  */
1817 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1818 				const void __user **pages, int *status)
1819 {
1820 	unsigned long i;
1821 
1822 	mmap_read_lock(mm);
1823 
1824 	for (i = 0; i < nr_pages; i++) {
1825 		unsigned long addr = (unsigned long)(*pages);
1826 		struct vm_area_struct *vma;
1827 		struct page *page;
1828 		int err = -EFAULT;
1829 
1830 		vma = vma_lookup(mm, addr);
1831 		if (!vma)
1832 			goto set_status;
1833 
1834 		/* FOLL_DUMP to ignore special (like zero) pages */
1835 		page = follow_page(vma, addr, FOLL_DUMP);
1836 
1837 		err = PTR_ERR(page);
1838 		if (IS_ERR(page))
1839 			goto set_status;
1840 
1841 		err = page ? page_to_nid(page) : -ENOENT;
1842 set_status:
1843 		*status = err;
1844 
1845 		pages++;
1846 		status++;
1847 	}
1848 
1849 	mmap_read_unlock(mm);
1850 }
1851 
1852 static int get_compat_pages_array(const void __user *chunk_pages[],
1853 				  const void __user * __user *pages,
1854 				  unsigned long chunk_nr)
1855 {
1856 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1857 	compat_uptr_t p;
1858 	int i;
1859 
1860 	for (i = 0; i < chunk_nr; i++) {
1861 		if (get_user(p, pages32 + i))
1862 			return -EFAULT;
1863 		chunk_pages[i] = compat_ptr(p);
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 /*
1870  * Determine the nodes of a user array of pages and store it in
1871  * a user array of status.
1872  */
1873 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1874 			 const void __user * __user *pages,
1875 			 int __user *status)
1876 {
1877 #define DO_PAGES_STAT_CHUNK_NR 16
1878 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1879 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1880 
1881 	while (nr_pages) {
1882 		unsigned long chunk_nr;
1883 
1884 		chunk_nr = nr_pages;
1885 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1886 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1887 
1888 		if (in_compat_syscall()) {
1889 			if (get_compat_pages_array(chunk_pages, pages,
1890 						   chunk_nr))
1891 				break;
1892 		} else {
1893 			if (copy_from_user(chunk_pages, pages,
1894 				      chunk_nr * sizeof(*chunk_pages)))
1895 				break;
1896 		}
1897 
1898 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1899 
1900 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1901 			break;
1902 
1903 		pages += chunk_nr;
1904 		status += chunk_nr;
1905 		nr_pages -= chunk_nr;
1906 	}
1907 	return nr_pages ? -EFAULT : 0;
1908 }
1909 
1910 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1911 {
1912 	struct task_struct *task;
1913 	struct mm_struct *mm;
1914 
1915 	/*
1916 	 * There is no need to check if current process has the right to modify
1917 	 * the specified process when they are same.
1918 	 */
1919 	if (!pid) {
1920 		mmget(current->mm);
1921 		*mem_nodes = cpuset_mems_allowed(current);
1922 		return current->mm;
1923 	}
1924 
1925 	/* Find the mm_struct */
1926 	rcu_read_lock();
1927 	task = find_task_by_vpid(pid);
1928 	if (!task) {
1929 		rcu_read_unlock();
1930 		return ERR_PTR(-ESRCH);
1931 	}
1932 	get_task_struct(task);
1933 
1934 	/*
1935 	 * Check if this process has the right to modify the specified
1936 	 * process. Use the regular "ptrace_may_access()" checks.
1937 	 */
1938 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1939 		rcu_read_unlock();
1940 		mm = ERR_PTR(-EPERM);
1941 		goto out;
1942 	}
1943 	rcu_read_unlock();
1944 
1945 	mm = ERR_PTR(security_task_movememory(task));
1946 	if (IS_ERR(mm))
1947 		goto out;
1948 	*mem_nodes = cpuset_mems_allowed(task);
1949 	mm = get_task_mm(task);
1950 out:
1951 	put_task_struct(task);
1952 	if (!mm)
1953 		mm = ERR_PTR(-EINVAL);
1954 	return mm;
1955 }
1956 
1957 /*
1958  * Move a list of pages in the address space of the currently executing
1959  * process.
1960  */
1961 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1962 			     const void __user * __user *pages,
1963 			     const int __user *nodes,
1964 			     int __user *status, int flags)
1965 {
1966 	struct mm_struct *mm;
1967 	int err;
1968 	nodemask_t task_nodes;
1969 
1970 	/* Check flags */
1971 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1972 		return -EINVAL;
1973 
1974 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1975 		return -EPERM;
1976 
1977 	mm = find_mm_struct(pid, &task_nodes);
1978 	if (IS_ERR(mm))
1979 		return PTR_ERR(mm);
1980 
1981 	if (nodes)
1982 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
1983 				    nodes, status, flags);
1984 	else
1985 		err = do_pages_stat(mm, nr_pages, pages, status);
1986 
1987 	mmput(mm);
1988 	return err;
1989 }
1990 
1991 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1992 		const void __user * __user *, pages,
1993 		const int __user *, nodes,
1994 		int __user *, status, int, flags)
1995 {
1996 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1997 }
1998 
1999 #ifdef CONFIG_NUMA_BALANCING
2000 /*
2001  * Returns true if this is a safe migration target node for misplaced NUMA
2002  * pages. Currently it only checks the watermarks which crude
2003  */
2004 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2005 				   unsigned long nr_migrate_pages)
2006 {
2007 	int z;
2008 
2009 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2010 		struct zone *zone = pgdat->node_zones + z;
2011 
2012 		if (!populated_zone(zone))
2013 			continue;
2014 
2015 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2016 		if (!zone_watermark_ok(zone, 0,
2017 				       high_wmark_pages(zone) +
2018 				       nr_migrate_pages,
2019 				       ZONE_MOVABLE, 0))
2020 			continue;
2021 		return true;
2022 	}
2023 	return false;
2024 }
2025 
2026 static struct page *alloc_misplaced_dst_page(struct page *page,
2027 					   unsigned long data)
2028 {
2029 	int nid = (int) data;
2030 	struct page *newpage;
2031 
2032 	newpage = __alloc_pages_node(nid,
2033 					 (GFP_HIGHUSER_MOVABLE |
2034 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
2035 					  __GFP_NORETRY | __GFP_NOWARN) &
2036 					 ~__GFP_RECLAIM, 0);
2037 
2038 	return newpage;
2039 }
2040 
2041 static struct page *alloc_misplaced_dst_page_thp(struct page *page,
2042 						 unsigned long data)
2043 {
2044 	int nid = (int) data;
2045 	struct page *newpage;
2046 
2047 	newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2048 				   HPAGE_PMD_ORDER);
2049 	if (!newpage)
2050 		goto out;
2051 
2052 	prep_transhuge_page(newpage);
2053 
2054 out:
2055 	return newpage;
2056 }
2057 
2058 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2059 {
2060 	int page_lru;
2061 	int nr_pages = thp_nr_pages(page);
2062 
2063 	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2064 
2065 	/* Do not migrate THP mapped by multiple processes */
2066 	if (PageTransHuge(page) && total_mapcount(page) > 1)
2067 		return 0;
2068 
2069 	/* Avoid migrating to a node that is nearly full */
2070 	if (!migrate_balanced_pgdat(pgdat, nr_pages))
2071 		return 0;
2072 
2073 	if (isolate_lru_page(page))
2074 		return 0;
2075 
2076 	page_lru = page_is_file_lru(page);
2077 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2078 			    nr_pages);
2079 
2080 	/*
2081 	 * Isolating the page has taken another reference, so the
2082 	 * caller's reference can be safely dropped without the page
2083 	 * disappearing underneath us during migration.
2084 	 */
2085 	put_page(page);
2086 	return 1;
2087 }
2088 
2089 /*
2090  * Attempt to migrate a misplaced page to the specified destination
2091  * node. Caller is expected to have an elevated reference count on
2092  * the page that will be dropped by this function before returning.
2093  */
2094 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2095 			   int node)
2096 {
2097 	pg_data_t *pgdat = NODE_DATA(node);
2098 	int isolated;
2099 	int nr_remaining;
2100 	LIST_HEAD(migratepages);
2101 	new_page_t *new;
2102 	bool compound;
2103 	int nr_pages = thp_nr_pages(page);
2104 
2105 	/*
2106 	 * PTE mapped THP or HugeTLB page can't reach here so the page could
2107 	 * be either base page or THP.  And it must be head page if it is
2108 	 * THP.
2109 	 */
2110 	compound = PageTransHuge(page);
2111 
2112 	if (compound)
2113 		new = alloc_misplaced_dst_page_thp;
2114 	else
2115 		new = alloc_misplaced_dst_page;
2116 
2117 	/*
2118 	 * Don't migrate file pages that are mapped in multiple processes
2119 	 * with execute permissions as they are probably shared libraries.
2120 	 */
2121 	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2122 	    (vma->vm_flags & VM_EXEC))
2123 		goto out;
2124 
2125 	/*
2126 	 * Also do not migrate dirty pages as not all filesystems can move
2127 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2128 	 */
2129 	if (page_is_file_lru(page) && PageDirty(page))
2130 		goto out;
2131 
2132 	isolated = numamigrate_isolate_page(pgdat, page);
2133 	if (!isolated)
2134 		goto out;
2135 
2136 	list_add(&page->lru, &migratepages);
2137 	nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
2138 				     MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL);
2139 	if (nr_remaining) {
2140 		if (!list_empty(&migratepages)) {
2141 			list_del(&page->lru);
2142 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2143 					page_is_file_lru(page), -nr_pages);
2144 			putback_lru_page(page);
2145 		}
2146 		isolated = 0;
2147 	} else
2148 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages);
2149 	BUG_ON(!list_empty(&migratepages));
2150 	return isolated;
2151 
2152 out:
2153 	put_page(page);
2154 	return 0;
2155 }
2156 #endif /* CONFIG_NUMA_BALANCING */
2157 #endif /* CONFIG_NUMA */
2158 
2159 #ifdef CONFIG_DEVICE_PRIVATE
2160 static int migrate_vma_collect_skip(unsigned long start,
2161 				    unsigned long end,
2162 				    struct mm_walk *walk)
2163 {
2164 	struct migrate_vma *migrate = walk->private;
2165 	unsigned long addr;
2166 
2167 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2168 		migrate->dst[migrate->npages] = 0;
2169 		migrate->src[migrate->npages++] = 0;
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 static int migrate_vma_collect_hole(unsigned long start,
2176 				    unsigned long end,
2177 				    __always_unused int depth,
2178 				    struct mm_walk *walk)
2179 {
2180 	struct migrate_vma *migrate = walk->private;
2181 	unsigned long addr;
2182 
2183 	/* Only allow populating anonymous memory. */
2184 	if (!vma_is_anonymous(walk->vma))
2185 		return migrate_vma_collect_skip(start, end, walk);
2186 
2187 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2188 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2189 		migrate->dst[migrate->npages] = 0;
2190 		migrate->npages++;
2191 		migrate->cpages++;
2192 	}
2193 
2194 	return 0;
2195 }
2196 
2197 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2198 				   unsigned long start,
2199 				   unsigned long end,
2200 				   struct mm_walk *walk)
2201 {
2202 	struct migrate_vma *migrate = walk->private;
2203 	struct vm_area_struct *vma = walk->vma;
2204 	struct mm_struct *mm = vma->vm_mm;
2205 	unsigned long addr = start, unmapped = 0;
2206 	spinlock_t *ptl;
2207 	pte_t *ptep;
2208 
2209 again:
2210 	if (pmd_none(*pmdp))
2211 		return migrate_vma_collect_hole(start, end, -1, walk);
2212 
2213 	if (pmd_trans_huge(*pmdp)) {
2214 		struct page *page;
2215 
2216 		ptl = pmd_lock(mm, pmdp);
2217 		if (unlikely(!pmd_trans_huge(*pmdp))) {
2218 			spin_unlock(ptl);
2219 			goto again;
2220 		}
2221 
2222 		page = pmd_page(*pmdp);
2223 		if (is_huge_zero_page(page)) {
2224 			spin_unlock(ptl);
2225 			split_huge_pmd(vma, pmdp, addr);
2226 			if (pmd_trans_unstable(pmdp))
2227 				return migrate_vma_collect_skip(start, end,
2228 								walk);
2229 		} else {
2230 			int ret;
2231 
2232 			get_page(page);
2233 			spin_unlock(ptl);
2234 			if (unlikely(!trylock_page(page)))
2235 				return migrate_vma_collect_skip(start, end,
2236 								walk);
2237 			ret = split_huge_page(page);
2238 			unlock_page(page);
2239 			put_page(page);
2240 			if (ret)
2241 				return migrate_vma_collect_skip(start, end,
2242 								walk);
2243 			if (pmd_none(*pmdp))
2244 				return migrate_vma_collect_hole(start, end, -1,
2245 								walk);
2246 		}
2247 	}
2248 
2249 	if (unlikely(pmd_bad(*pmdp)))
2250 		return migrate_vma_collect_skip(start, end, walk);
2251 
2252 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2253 	arch_enter_lazy_mmu_mode();
2254 
2255 	for (; addr < end; addr += PAGE_SIZE, ptep++) {
2256 		unsigned long mpfn = 0, pfn;
2257 		struct page *page;
2258 		swp_entry_t entry;
2259 		pte_t pte;
2260 
2261 		pte = *ptep;
2262 
2263 		if (pte_none(pte)) {
2264 			if (vma_is_anonymous(vma)) {
2265 				mpfn = MIGRATE_PFN_MIGRATE;
2266 				migrate->cpages++;
2267 			}
2268 			goto next;
2269 		}
2270 
2271 		if (!pte_present(pte)) {
2272 			/*
2273 			 * Only care about unaddressable device page special
2274 			 * page table entry. Other special swap entries are not
2275 			 * migratable, and we ignore regular swapped page.
2276 			 */
2277 			entry = pte_to_swp_entry(pte);
2278 			if (!is_device_private_entry(entry))
2279 				goto next;
2280 
2281 			page = pfn_swap_entry_to_page(entry);
2282 			if (!(migrate->flags &
2283 				MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2284 			    page->pgmap->owner != migrate->pgmap_owner)
2285 				goto next;
2286 
2287 			mpfn = migrate_pfn(page_to_pfn(page)) |
2288 					MIGRATE_PFN_MIGRATE;
2289 			if (is_writable_device_private_entry(entry))
2290 				mpfn |= MIGRATE_PFN_WRITE;
2291 		} else {
2292 			if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2293 				goto next;
2294 			pfn = pte_pfn(pte);
2295 			if (is_zero_pfn(pfn)) {
2296 				mpfn = MIGRATE_PFN_MIGRATE;
2297 				migrate->cpages++;
2298 				goto next;
2299 			}
2300 			page = vm_normal_page(migrate->vma, addr, pte);
2301 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2302 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2303 		}
2304 
2305 		/* FIXME support THP */
2306 		if (!page || !page->mapping || PageTransCompound(page)) {
2307 			mpfn = 0;
2308 			goto next;
2309 		}
2310 
2311 		/*
2312 		 * By getting a reference on the page we pin it and that blocks
2313 		 * any kind of migration. Side effect is that it "freezes" the
2314 		 * pte.
2315 		 *
2316 		 * We drop this reference after isolating the page from the lru
2317 		 * for non device page (device page are not on the lru and thus
2318 		 * can't be dropped from it).
2319 		 */
2320 		get_page(page);
2321 
2322 		/*
2323 		 * Optimize for the common case where page is only mapped once
2324 		 * in one process. If we can lock the page, then we can safely
2325 		 * set up a special migration page table entry now.
2326 		 */
2327 		if (trylock_page(page)) {
2328 			pte_t swp_pte;
2329 
2330 			migrate->cpages++;
2331 			ptep_get_and_clear(mm, addr, ptep);
2332 
2333 			/* Setup special migration page table entry */
2334 			if (mpfn & MIGRATE_PFN_WRITE)
2335 				entry = make_writable_migration_entry(
2336 							page_to_pfn(page));
2337 			else
2338 				entry = make_readable_migration_entry(
2339 							page_to_pfn(page));
2340 			swp_pte = swp_entry_to_pte(entry);
2341 			if (pte_present(pte)) {
2342 				if (pte_soft_dirty(pte))
2343 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2344 				if (pte_uffd_wp(pte))
2345 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2346 			} else {
2347 				if (pte_swp_soft_dirty(pte))
2348 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2349 				if (pte_swp_uffd_wp(pte))
2350 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2351 			}
2352 			set_pte_at(mm, addr, ptep, swp_pte);
2353 
2354 			/*
2355 			 * This is like regular unmap: we remove the rmap and
2356 			 * drop page refcount. Page won't be freed, as we took
2357 			 * a reference just above.
2358 			 */
2359 			page_remove_rmap(page, false);
2360 			put_page(page);
2361 
2362 			if (pte_present(pte))
2363 				unmapped++;
2364 		} else {
2365 			put_page(page);
2366 			mpfn = 0;
2367 		}
2368 
2369 next:
2370 		migrate->dst[migrate->npages] = 0;
2371 		migrate->src[migrate->npages++] = mpfn;
2372 	}
2373 	arch_leave_lazy_mmu_mode();
2374 	pte_unmap_unlock(ptep - 1, ptl);
2375 
2376 	/* Only flush the TLB if we actually modified any entries */
2377 	if (unmapped)
2378 		flush_tlb_range(walk->vma, start, end);
2379 
2380 	return 0;
2381 }
2382 
2383 static const struct mm_walk_ops migrate_vma_walk_ops = {
2384 	.pmd_entry		= migrate_vma_collect_pmd,
2385 	.pte_hole		= migrate_vma_collect_hole,
2386 };
2387 
2388 /*
2389  * migrate_vma_collect() - collect pages over a range of virtual addresses
2390  * @migrate: migrate struct containing all migration information
2391  *
2392  * This will walk the CPU page table. For each virtual address backed by a
2393  * valid page, it updates the src array and takes a reference on the page, in
2394  * order to pin the page until we lock it and unmap it.
2395  */
2396 static void migrate_vma_collect(struct migrate_vma *migrate)
2397 {
2398 	struct mmu_notifier_range range;
2399 
2400 	/*
2401 	 * Note that the pgmap_owner is passed to the mmu notifier callback so
2402 	 * that the registered device driver can skip invalidating device
2403 	 * private page mappings that won't be migrated.
2404 	 */
2405 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
2406 		migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
2407 		migrate->pgmap_owner);
2408 	mmu_notifier_invalidate_range_start(&range);
2409 
2410 	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2411 			&migrate_vma_walk_ops, migrate);
2412 
2413 	mmu_notifier_invalidate_range_end(&range);
2414 	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2415 }
2416 
2417 /*
2418  * migrate_vma_check_page() - check if page is pinned or not
2419  * @page: struct page to check
2420  *
2421  * Pinned pages cannot be migrated. This is the same test as in
2422  * folio_migrate_mapping(), except that here we allow migration of a
2423  * ZONE_DEVICE page.
2424  */
2425 static bool migrate_vma_check_page(struct page *page)
2426 {
2427 	/*
2428 	 * One extra ref because caller holds an extra reference, either from
2429 	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2430 	 * a device page.
2431 	 */
2432 	int extra = 1;
2433 
2434 	/*
2435 	 * FIXME support THP (transparent huge page), it is bit more complex to
2436 	 * check them than regular pages, because they can be mapped with a pmd
2437 	 * or with a pte (split pte mapping).
2438 	 */
2439 	if (PageCompound(page))
2440 		return false;
2441 
2442 	/* Page from ZONE_DEVICE have one extra reference */
2443 	if (is_zone_device_page(page)) {
2444 		/*
2445 		 * Private page can never be pin as they have no valid pte and
2446 		 * GUP will fail for those. Yet if there is a pending migration
2447 		 * a thread might try to wait on the pte migration entry and
2448 		 * will bump the page reference count. Sadly there is no way to
2449 		 * differentiate a regular pin from migration wait. Hence to
2450 		 * avoid 2 racing thread trying to migrate back to CPU to enter
2451 		 * infinite loop (one stopping migration because the other is
2452 		 * waiting on pte migration entry). We always return true here.
2453 		 *
2454 		 * FIXME proper solution is to rework migration_entry_wait() so
2455 		 * it does not need to take a reference on page.
2456 		 */
2457 		return is_device_private_page(page);
2458 	}
2459 
2460 	/* For file back page */
2461 	if (page_mapping(page))
2462 		extra += 1 + page_has_private(page);
2463 
2464 	if ((page_count(page) - extra) > page_mapcount(page))
2465 		return false;
2466 
2467 	return true;
2468 }
2469 
2470 /*
2471  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2472  * @migrate: migrate struct containing all migration information
2473  *
2474  * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
2475  * special migration pte entry and check if it has been pinned. Pinned pages are
2476  * restored because we cannot migrate them.
2477  *
2478  * This is the last step before we call the device driver callback to allocate
2479  * destination memory and copy contents of original page over to new page.
2480  */
2481 static void migrate_vma_unmap(struct migrate_vma *migrate)
2482 {
2483 	const unsigned long npages = migrate->npages;
2484 	unsigned long i, restore = 0;
2485 	bool allow_drain = true;
2486 
2487 	lru_add_drain();
2488 
2489 	for (i = 0; i < npages; i++) {
2490 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2491 
2492 		if (!page)
2493 			continue;
2494 
2495 		/* ZONE_DEVICE pages are not on LRU */
2496 		if (!is_zone_device_page(page)) {
2497 			if (!PageLRU(page) && allow_drain) {
2498 				/* Drain CPU's pagevec */
2499 				lru_add_drain_all();
2500 				allow_drain = false;
2501 			}
2502 
2503 			if (isolate_lru_page(page)) {
2504 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2505 				migrate->cpages--;
2506 				restore++;
2507 				continue;
2508 			}
2509 
2510 			/* Drop the reference we took in collect */
2511 			put_page(page);
2512 		}
2513 
2514 		if (page_mapped(page))
2515 			try_to_migrate(page, 0);
2516 
2517 		if (page_mapped(page) || !migrate_vma_check_page(page)) {
2518 			if (!is_zone_device_page(page)) {
2519 				get_page(page);
2520 				putback_lru_page(page);
2521 			}
2522 
2523 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2524 			migrate->cpages--;
2525 			restore++;
2526 			continue;
2527 		}
2528 	}
2529 
2530 	for (i = 0; i < npages && restore; i++) {
2531 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2532 
2533 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2534 			continue;
2535 
2536 		remove_migration_ptes(page, page, false);
2537 
2538 		migrate->src[i] = 0;
2539 		unlock_page(page);
2540 		put_page(page);
2541 		restore--;
2542 	}
2543 }
2544 
2545 /**
2546  * migrate_vma_setup() - prepare to migrate a range of memory
2547  * @args: contains the vma, start, and pfns arrays for the migration
2548  *
2549  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2550  * without an error.
2551  *
2552  * Prepare to migrate a range of memory virtual address range by collecting all
2553  * the pages backing each virtual address in the range, saving them inside the
2554  * src array.  Then lock those pages and unmap them. Once the pages are locked
2555  * and unmapped, check whether each page is pinned or not.  Pages that aren't
2556  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2557  * corresponding src array entry.  Then restores any pages that are pinned, by
2558  * remapping and unlocking those pages.
2559  *
2560  * The caller should then allocate destination memory and copy source memory to
2561  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2562  * flag set).  Once these are allocated and copied, the caller must update each
2563  * corresponding entry in the dst array with the pfn value of the destination
2564  * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
2565  * lock_page().
2566  *
2567  * Note that the caller does not have to migrate all the pages that are marked
2568  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2569  * device memory to system memory.  If the caller cannot migrate a device page
2570  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2571  * consequences for the userspace process, so it must be avoided if at all
2572  * possible.
2573  *
2574  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2575  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2576  * allowing the caller to allocate device memory for those unbacked virtual
2577  * addresses.  For this the caller simply has to allocate device memory and
2578  * properly set the destination entry like for regular migration.  Note that
2579  * this can still fail, and thus inside the device driver you must check if the
2580  * migration was successful for those entries after calling migrate_vma_pages(),
2581  * just like for regular migration.
2582  *
2583  * After that, the callers must call migrate_vma_pages() to go over each entry
2584  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2585  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2586  * then migrate_vma_pages() to migrate struct page information from the source
2587  * struct page to the destination struct page.  If it fails to migrate the
2588  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2589  * src array.
2590  *
2591  * At this point all successfully migrated pages have an entry in the src
2592  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2593  * array entry with MIGRATE_PFN_VALID flag set.
2594  *
2595  * Once migrate_vma_pages() returns the caller may inspect which pages were
2596  * successfully migrated, and which were not.  Successfully migrated pages will
2597  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2598  *
2599  * It is safe to update device page table after migrate_vma_pages() because
2600  * both destination and source page are still locked, and the mmap_lock is held
2601  * in read mode (hence no one can unmap the range being migrated).
2602  *
2603  * Once the caller is done cleaning up things and updating its page table (if it
2604  * chose to do so, this is not an obligation) it finally calls
2605  * migrate_vma_finalize() to update the CPU page table to point to new pages
2606  * for successfully migrated pages or otherwise restore the CPU page table to
2607  * point to the original source pages.
2608  */
2609 int migrate_vma_setup(struct migrate_vma *args)
2610 {
2611 	long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2612 
2613 	args->start &= PAGE_MASK;
2614 	args->end &= PAGE_MASK;
2615 	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2616 	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2617 		return -EINVAL;
2618 	if (nr_pages <= 0)
2619 		return -EINVAL;
2620 	if (args->start < args->vma->vm_start ||
2621 	    args->start >= args->vma->vm_end)
2622 		return -EINVAL;
2623 	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2624 		return -EINVAL;
2625 	if (!args->src || !args->dst)
2626 		return -EINVAL;
2627 
2628 	memset(args->src, 0, sizeof(*args->src) * nr_pages);
2629 	args->cpages = 0;
2630 	args->npages = 0;
2631 
2632 	migrate_vma_collect(args);
2633 
2634 	if (args->cpages)
2635 		migrate_vma_unmap(args);
2636 
2637 	/*
2638 	 * At this point pages are locked and unmapped, and thus they have
2639 	 * stable content and can safely be copied to destination memory that
2640 	 * is allocated by the drivers.
2641 	 */
2642 	return 0;
2643 
2644 }
2645 EXPORT_SYMBOL(migrate_vma_setup);
2646 
2647 /*
2648  * This code closely matches the code in:
2649  *   __handle_mm_fault()
2650  *     handle_pte_fault()
2651  *       do_anonymous_page()
2652  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2653  * private page.
2654  */
2655 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2656 				    unsigned long addr,
2657 				    struct page *page,
2658 				    unsigned long *src)
2659 {
2660 	struct vm_area_struct *vma = migrate->vma;
2661 	struct mm_struct *mm = vma->vm_mm;
2662 	bool flush = false;
2663 	spinlock_t *ptl;
2664 	pte_t entry;
2665 	pgd_t *pgdp;
2666 	p4d_t *p4dp;
2667 	pud_t *pudp;
2668 	pmd_t *pmdp;
2669 	pte_t *ptep;
2670 
2671 	/* Only allow populating anonymous memory */
2672 	if (!vma_is_anonymous(vma))
2673 		goto abort;
2674 
2675 	pgdp = pgd_offset(mm, addr);
2676 	p4dp = p4d_alloc(mm, pgdp, addr);
2677 	if (!p4dp)
2678 		goto abort;
2679 	pudp = pud_alloc(mm, p4dp, addr);
2680 	if (!pudp)
2681 		goto abort;
2682 	pmdp = pmd_alloc(mm, pudp, addr);
2683 	if (!pmdp)
2684 		goto abort;
2685 
2686 	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2687 		goto abort;
2688 
2689 	/*
2690 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
2691 	 * pte_offset_map() on pmds where a huge pmd might be created
2692 	 * from a different thread.
2693 	 *
2694 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2695 	 * parallel threads are excluded by other means.
2696 	 *
2697 	 * Here we only have mmap_read_lock(mm).
2698 	 */
2699 	if (pte_alloc(mm, pmdp))
2700 		goto abort;
2701 
2702 	/* See the comment in pte_alloc_one_map() */
2703 	if (unlikely(pmd_trans_unstable(pmdp)))
2704 		goto abort;
2705 
2706 	if (unlikely(anon_vma_prepare(vma)))
2707 		goto abort;
2708 	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
2709 		goto abort;
2710 
2711 	/*
2712 	 * The memory barrier inside __SetPageUptodate makes sure that
2713 	 * preceding stores to the page contents become visible before
2714 	 * the set_pte_at() write.
2715 	 */
2716 	__SetPageUptodate(page);
2717 
2718 	if (is_zone_device_page(page)) {
2719 		if (is_device_private_page(page)) {
2720 			swp_entry_t swp_entry;
2721 
2722 			if (vma->vm_flags & VM_WRITE)
2723 				swp_entry = make_writable_device_private_entry(
2724 							page_to_pfn(page));
2725 			else
2726 				swp_entry = make_readable_device_private_entry(
2727 							page_to_pfn(page));
2728 			entry = swp_entry_to_pte(swp_entry);
2729 		} else {
2730 			/*
2731 			 * For now we only support migrating to un-addressable
2732 			 * device memory.
2733 			 */
2734 			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2735 			goto abort;
2736 		}
2737 	} else {
2738 		entry = mk_pte(page, vma->vm_page_prot);
2739 		if (vma->vm_flags & VM_WRITE)
2740 			entry = pte_mkwrite(pte_mkdirty(entry));
2741 	}
2742 
2743 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2744 
2745 	if (check_stable_address_space(mm))
2746 		goto unlock_abort;
2747 
2748 	if (pte_present(*ptep)) {
2749 		unsigned long pfn = pte_pfn(*ptep);
2750 
2751 		if (!is_zero_pfn(pfn))
2752 			goto unlock_abort;
2753 		flush = true;
2754 	} else if (!pte_none(*ptep))
2755 		goto unlock_abort;
2756 
2757 	/*
2758 	 * Check for userfaultfd but do not deliver the fault. Instead,
2759 	 * just back off.
2760 	 */
2761 	if (userfaultfd_missing(vma))
2762 		goto unlock_abort;
2763 
2764 	inc_mm_counter(mm, MM_ANONPAGES);
2765 	page_add_new_anon_rmap(page, vma, addr, false);
2766 	if (!is_zone_device_page(page))
2767 		lru_cache_add_inactive_or_unevictable(page, vma);
2768 	get_page(page);
2769 
2770 	if (flush) {
2771 		flush_cache_page(vma, addr, pte_pfn(*ptep));
2772 		ptep_clear_flush_notify(vma, addr, ptep);
2773 		set_pte_at_notify(mm, addr, ptep, entry);
2774 		update_mmu_cache(vma, addr, ptep);
2775 	} else {
2776 		/* No need to invalidate - it was non-present before */
2777 		set_pte_at(mm, addr, ptep, entry);
2778 		update_mmu_cache(vma, addr, ptep);
2779 	}
2780 
2781 	pte_unmap_unlock(ptep, ptl);
2782 	*src = MIGRATE_PFN_MIGRATE;
2783 	return;
2784 
2785 unlock_abort:
2786 	pte_unmap_unlock(ptep, ptl);
2787 abort:
2788 	*src &= ~MIGRATE_PFN_MIGRATE;
2789 }
2790 
2791 /**
2792  * migrate_vma_pages() - migrate meta-data from src page to dst page
2793  * @migrate: migrate struct containing all migration information
2794  *
2795  * This migrates struct page meta-data from source struct page to destination
2796  * struct page. This effectively finishes the migration from source page to the
2797  * destination page.
2798  */
2799 void migrate_vma_pages(struct migrate_vma *migrate)
2800 {
2801 	const unsigned long npages = migrate->npages;
2802 	const unsigned long start = migrate->start;
2803 	struct mmu_notifier_range range;
2804 	unsigned long addr, i;
2805 	bool notified = false;
2806 
2807 	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2808 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2809 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2810 		struct address_space *mapping;
2811 		int r;
2812 
2813 		if (!newpage) {
2814 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2815 			continue;
2816 		}
2817 
2818 		if (!page) {
2819 			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2820 				continue;
2821 			if (!notified) {
2822 				notified = true;
2823 
2824 				mmu_notifier_range_init_owner(&range,
2825 					MMU_NOTIFY_MIGRATE, 0, migrate->vma,
2826 					migrate->vma->vm_mm, addr, migrate->end,
2827 					migrate->pgmap_owner);
2828 				mmu_notifier_invalidate_range_start(&range);
2829 			}
2830 			migrate_vma_insert_page(migrate, addr, newpage,
2831 						&migrate->src[i]);
2832 			continue;
2833 		}
2834 
2835 		mapping = page_mapping(page);
2836 
2837 		if (is_zone_device_page(newpage)) {
2838 			if (is_device_private_page(newpage)) {
2839 				/*
2840 				 * For now only support private anonymous when
2841 				 * migrating to un-addressable device memory.
2842 				 */
2843 				if (mapping) {
2844 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2845 					continue;
2846 				}
2847 			} else {
2848 				/*
2849 				 * Other types of ZONE_DEVICE page are not
2850 				 * supported.
2851 				 */
2852 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2853 				continue;
2854 			}
2855 		}
2856 
2857 		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2858 		if (r != MIGRATEPAGE_SUCCESS)
2859 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2860 	}
2861 
2862 	/*
2863 	 * No need to double call mmu_notifier->invalidate_range() callback as
2864 	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2865 	 * did already call it.
2866 	 */
2867 	if (notified)
2868 		mmu_notifier_invalidate_range_only_end(&range);
2869 }
2870 EXPORT_SYMBOL(migrate_vma_pages);
2871 
2872 /**
2873  * migrate_vma_finalize() - restore CPU page table entry
2874  * @migrate: migrate struct containing all migration information
2875  *
2876  * This replaces the special migration pte entry with either a mapping to the
2877  * new page if migration was successful for that page, or to the original page
2878  * otherwise.
2879  *
2880  * This also unlocks the pages and puts them back on the lru, or drops the extra
2881  * refcount, for device pages.
2882  */
2883 void migrate_vma_finalize(struct migrate_vma *migrate)
2884 {
2885 	const unsigned long npages = migrate->npages;
2886 	unsigned long i;
2887 
2888 	for (i = 0; i < npages; i++) {
2889 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2890 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2891 
2892 		if (!page) {
2893 			if (newpage) {
2894 				unlock_page(newpage);
2895 				put_page(newpage);
2896 			}
2897 			continue;
2898 		}
2899 
2900 		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2901 			if (newpage) {
2902 				unlock_page(newpage);
2903 				put_page(newpage);
2904 			}
2905 			newpage = page;
2906 		}
2907 
2908 		remove_migration_ptes(page, newpage, false);
2909 		unlock_page(page);
2910 
2911 		if (is_zone_device_page(page))
2912 			put_page(page);
2913 		else
2914 			putback_lru_page(page);
2915 
2916 		if (newpage != page) {
2917 			unlock_page(newpage);
2918 			if (is_zone_device_page(newpage))
2919 				put_page(newpage);
2920 			else
2921 				putback_lru_page(newpage);
2922 		}
2923 	}
2924 }
2925 EXPORT_SYMBOL(migrate_vma_finalize);
2926 #endif /* CONFIG_DEVICE_PRIVATE */
2927 
2928 /*
2929  * node_demotion[] example:
2930  *
2931  * Consider a system with two sockets.  Each socket has
2932  * three classes of memory attached: fast, medium and slow.
2933  * Each memory class is placed in its own NUMA node.  The
2934  * CPUs are placed in the node with the "fast" memory.  The
2935  * 6 NUMA nodes (0-5) might be split among the sockets like
2936  * this:
2937  *
2938  *	Socket A: 0, 1, 2
2939  *	Socket B: 3, 4, 5
2940  *
2941  * When Node 0 fills up, its memory should be migrated to
2942  * Node 1.  When Node 1 fills up, it should be migrated to
2943  * Node 2.  The migration path start on the nodes with the
2944  * processors (since allocations default to this node) and
2945  * fast memory, progress through medium and end with the
2946  * slow memory:
2947  *
2948  *	0 -> 1 -> 2 -> stop
2949  *	3 -> 4 -> 5 -> stop
2950  *
2951  * This is represented in the node_demotion[] like this:
2952  *
2953  *	{  nr=1, nodes[0]=1 }, // Node 0 migrates to 1
2954  *	{  nr=1, nodes[0]=2 }, // Node 1 migrates to 2
2955  *	{  nr=0, nodes[0]=-1 }, // Node 2 does not migrate
2956  *	{  nr=1, nodes[0]=4 }, // Node 3 migrates to 4
2957  *	{  nr=1, nodes[0]=5 }, // Node 4 migrates to 5
2958  *	{  nr=0, nodes[0]=-1 }, // Node 5 does not migrate
2959  *
2960  * Moreover some systems may have multiple slow memory nodes.
2961  * Suppose a system has one socket with 3 memory nodes, node 0
2962  * is fast memory type, and node 1/2 both are slow memory
2963  * type, and the distance between fast memory node and slow
2964  * memory node is same. So the migration path should be:
2965  *
2966  *	0 -> 1/2 -> stop
2967  *
2968  * This is represented in the node_demotion[] like this:
2969  *	{ nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2
2970  *	{ nr=0, nodes[0]=-1, }, // Node 1 dose not migrate
2971  *	{ nr=0, nodes[0]=-1, }, // Node 2 does not migrate
2972  */
2973 
2974 /*
2975  * Writes to this array occur without locking.  Cycles are
2976  * not allowed: Node X demotes to Y which demotes to X...
2977  *
2978  * If multiple reads are performed, a single rcu_read_lock()
2979  * must be held over all reads to ensure that no cycles are
2980  * observed.
2981  */
2982 #define DEFAULT_DEMOTION_TARGET_NODES 15
2983 
2984 #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES
2985 #define DEMOTION_TARGET_NODES	(MAX_NUMNODES - 1)
2986 #else
2987 #define DEMOTION_TARGET_NODES	DEFAULT_DEMOTION_TARGET_NODES
2988 #endif
2989 
2990 struct demotion_nodes {
2991 	unsigned short nr;
2992 	short nodes[DEMOTION_TARGET_NODES];
2993 };
2994 
2995 static struct demotion_nodes *node_demotion __read_mostly;
2996 
2997 /**
2998  * next_demotion_node() - Get the next node in the demotion path
2999  * @node: The starting node to lookup the next node
3000  *
3001  * Return: node id for next memory node in the demotion path hierarchy
3002  * from @node; NUMA_NO_NODE if @node is terminal.  This does not keep
3003  * @node online or guarantee that it *continues* to be the next demotion
3004  * target.
3005  */
3006 int next_demotion_node(int node)
3007 {
3008 	struct demotion_nodes *nd;
3009 	unsigned short target_nr, index;
3010 	int target;
3011 
3012 	if (!node_demotion)
3013 		return NUMA_NO_NODE;
3014 
3015 	nd = &node_demotion[node];
3016 
3017 	/*
3018 	 * node_demotion[] is updated without excluding this
3019 	 * function from running.  RCU doesn't provide any
3020 	 * compiler barriers, so the READ_ONCE() is required
3021 	 * to avoid compiler reordering or read merging.
3022 	 *
3023 	 * Make sure to use RCU over entire code blocks if
3024 	 * node_demotion[] reads need to be consistent.
3025 	 */
3026 	rcu_read_lock();
3027 	target_nr = READ_ONCE(nd->nr);
3028 
3029 	switch (target_nr) {
3030 	case 0:
3031 		target = NUMA_NO_NODE;
3032 		goto out;
3033 	case 1:
3034 		index = 0;
3035 		break;
3036 	default:
3037 		/*
3038 		 * If there are multiple target nodes, just select one
3039 		 * target node randomly.
3040 		 *
3041 		 * In addition, we can also use round-robin to select
3042 		 * target node, but we should introduce another variable
3043 		 * for node_demotion[] to record last selected target node,
3044 		 * that may cause cache ping-pong due to the changing of
3045 		 * last target node. Or introducing per-cpu data to avoid
3046 		 * caching issue, which seems more complicated. So selecting
3047 		 * target node randomly seems better until now.
3048 		 */
3049 		index = get_random_int() % target_nr;
3050 		break;
3051 	}
3052 
3053 	target = READ_ONCE(nd->nodes[index]);
3054 
3055 out:
3056 	rcu_read_unlock();
3057 	return target;
3058 }
3059 
3060 #if defined(CONFIG_HOTPLUG_CPU)
3061 /* Disable reclaim-based migration. */
3062 static void __disable_all_migrate_targets(void)
3063 {
3064 	int node, i;
3065 
3066 	if (!node_demotion)
3067 		return;
3068 
3069 	for_each_online_node(node) {
3070 		node_demotion[node].nr = 0;
3071 		for (i = 0; i < DEMOTION_TARGET_NODES; i++)
3072 			node_demotion[node].nodes[i] = NUMA_NO_NODE;
3073 	}
3074 }
3075 
3076 static void disable_all_migrate_targets(void)
3077 {
3078 	__disable_all_migrate_targets();
3079 
3080 	/*
3081 	 * Ensure that the "disable" is visible across the system.
3082 	 * Readers will see either a combination of before+disable
3083 	 * state or disable+after.  They will never see before and
3084 	 * after state together.
3085 	 *
3086 	 * The before+after state together might have cycles and
3087 	 * could cause readers to do things like loop until this
3088 	 * function finishes.  This ensures they can only see a
3089 	 * single "bad" read and would, for instance, only loop
3090 	 * once.
3091 	 */
3092 	synchronize_rcu();
3093 }
3094 
3095 /*
3096  * Find an automatic demotion target for 'node'.
3097  * Failing here is OK.  It might just indicate
3098  * being at the end of a chain.
3099  */
3100 static int establish_migrate_target(int node, nodemask_t *used,
3101 				    int best_distance)
3102 {
3103 	int migration_target, index, val;
3104 	struct demotion_nodes *nd;
3105 
3106 	if (!node_demotion)
3107 		return NUMA_NO_NODE;
3108 
3109 	nd = &node_demotion[node];
3110 
3111 	migration_target = find_next_best_node(node, used);
3112 	if (migration_target == NUMA_NO_NODE)
3113 		return NUMA_NO_NODE;
3114 
3115 	/*
3116 	 * If the node has been set a migration target node before,
3117 	 * which means it's the best distance between them. Still
3118 	 * check if this node can be demoted to other target nodes
3119 	 * if they have a same best distance.
3120 	 */
3121 	if (best_distance != -1) {
3122 		val = node_distance(node, migration_target);
3123 		if (val > best_distance)
3124 			return NUMA_NO_NODE;
3125 	}
3126 
3127 	index = nd->nr;
3128 	if (WARN_ONCE(index >= DEMOTION_TARGET_NODES,
3129 		      "Exceeds maximum demotion target nodes\n"))
3130 		return NUMA_NO_NODE;
3131 
3132 	nd->nodes[index] = migration_target;
3133 	nd->nr++;
3134 
3135 	return migration_target;
3136 }
3137 
3138 /*
3139  * When memory fills up on a node, memory contents can be
3140  * automatically migrated to another node instead of
3141  * discarded at reclaim.
3142  *
3143  * Establish a "migration path" which will start at nodes
3144  * with CPUs and will follow the priorities used to build the
3145  * page allocator zonelists.
3146  *
3147  * The difference here is that cycles must be avoided.  If
3148  * node0 migrates to node1, then neither node1, nor anything
3149  * node1 migrates to can migrate to node0. Also one node can
3150  * be migrated to multiple nodes if the target nodes all have
3151  * a same best-distance against the source node.
3152  *
3153  * This function can run simultaneously with readers of
3154  * node_demotion[].  However, it can not run simultaneously
3155  * with itself.  Exclusion is provided by memory hotplug events
3156  * being single-threaded.
3157  */
3158 static void __set_migration_target_nodes(void)
3159 {
3160 	nodemask_t next_pass	= NODE_MASK_NONE;
3161 	nodemask_t this_pass	= NODE_MASK_NONE;
3162 	nodemask_t used_targets = NODE_MASK_NONE;
3163 	int node, best_distance;
3164 
3165 	/*
3166 	 * Avoid any oddities like cycles that could occur
3167 	 * from changes in the topology.  This will leave
3168 	 * a momentary gap when migration is disabled.
3169 	 */
3170 	disable_all_migrate_targets();
3171 
3172 	/*
3173 	 * Allocations go close to CPUs, first.  Assume that
3174 	 * the migration path starts at the nodes with CPUs.
3175 	 */
3176 	next_pass = node_states[N_CPU];
3177 again:
3178 	this_pass = next_pass;
3179 	next_pass = NODE_MASK_NONE;
3180 	/*
3181 	 * To avoid cycles in the migration "graph", ensure
3182 	 * that migration sources are not future targets by
3183 	 * setting them in 'used_targets'.  Do this only
3184 	 * once per pass so that multiple source nodes can
3185 	 * share a target node.
3186 	 *
3187 	 * 'used_targets' will become unavailable in future
3188 	 * passes.  This limits some opportunities for
3189 	 * multiple source nodes to share a destination.
3190 	 */
3191 	nodes_or(used_targets, used_targets, this_pass);
3192 
3193 	for_each_node_mask(node, this_pass) {
3194 		best_distance = -1;
3195 
3196 		/*
3197 		 * Try to set up the migration path for the node, and the target
3198 		 * migration nodes can be multiple, so doing a loop to find all
3199 		 * the target nodes if they all have a best node distance.
3200 		 */
3201 		do {
3202 			int target_node =
3203 				establish_migrate_target(node, &used_targets,
3204 							 best_distance);
3205 
3206 			if (target_node == NUMA_NO_NODE)
3207 				break;
3208 
3209 			if (best_distance == -1)
3210 				best_distance = node_distance(node, target_node);
3211 
3212 			/*
3213 			 * Visit targets from this pass in the next pass.
3214 			 * Eventually, every node will have been part of
3215 			 * a pass, and will become set in 'used_targets'.
3216 			 */
3217 			node_set(target_node, next_pass);
3218 		} while (1);
3219 	}
3220 	/*
3221 	 * 'next_pass' contains nodes which became migration
3222 	 * targets in this pass.  Make additional passes until
3223 	 * no more migrations targets are available.
3224 	 */
3225 	if (!nodes_empty(next_pass))
3226 		goto again;
3227 }
3228 
3229 /*
3230  * For callers that do not hold get_online_mems() already.
3231  */
3232 static void set_migration_target_nodes(void)
3233 {
3234 	get_online_mems();
3235 	__set_migration_target_nodes();
3236 	put_online_mems();
3237 }
3238 
3239 /*
3240  * This leaves migrate-on-reclaim transiently disabled between
3241  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
3242  * whether reclaim-based migration is enabled or not, which
3243  * ensures that the user can turn reclaim-based migration at
3244  * any time without needing to recalculate migration targets.
3245  *
3246  * These callbacks already hold get_online_mems().  That is why
3247  * __set_migration_target_nodes() can be used as opposed to
3248  * set_migration_target_nodes().
3249  */
3250 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
3251 						 unsigned long action, void *_arg)
3252 {
3253 	struct memory_notify *arg = _arg;
3254 
3255 	/*
3256 	 * Only update the node migration order when a node is
3257 	 * changing status, like online->offline.  This avoids
3258 	 * the overhead of synchronize_rcu() in most cases.
3259 	 */
3260 	if (arg->status_change_nid < 0)
3261 		return notifier_from_errno(0);
3262 
3263 	switch (action) {
3264 	case MEM_GOING_OFFLINE:
3265 		/*
3266 		 * Make sure there are not transient states where
3267 		 * an offline node is a migration target.  This
3268 		 * will leave migration disabled until the offline
3269 		 * completes and the MEM_OFFLINE case below runs.
3270 		 */
3271 		disable_all_migrate_targets();
3272 		break;
3273 	case MEM_OFFLINE:
3274 	case MEM_ONLINE:
3275 		/*
3276 		 * Recalculate the target nodes once the node
3277 		 * reaches its final state (online or offline).
3278 		 */
3279 		__set_migration_target_nodes();
3280 		break;
3281 	case MEM_CANCEL_OFFLINE:
3282 		/*
3283 		 * MEM_GOING_OFFLINE disabled all the migration
3284 		 * targets.  Reenable them.
3285 		 */
3286 		__set_migration_target_nodes();
3287 		break;
3288 	case MEM_GOING_ONLINE:
3289 	case MEM_CANCEL_ONLINE:
3290 		break;
3291 	}
3292 
3293 	return notifier_from_errno(0);
3294 }
3295 
3296 /*
3297  * React to hotplug events that might affect the migration targets
3298  * like events that online or offline NUMA nodes.
3299  *
3300  * The ordering is also currently dependent on which nodes have
3301  * CPUs.  That means we need CPU on/offline notification too.
3302  */
3303 static int migration_online_cpu(unsigned int cpu)
3304 {
3305 	set_migration_target_nodes();
3306 	return 0;
3307 }
3308 
3309 static int migration_offline_cpu(unsigned int cpu)
3310 {
3311 	set_migration_target_nodes();
3312 	return 0;
3313 }
3314 
3315 static int __init migrate_on_reclaim_init(void)
3316 {
3317 	int ret;
3318 
3319 	node_demotion = kmalloc_array(nr_node_ids,
3320 				      sizeof(struct demotion_nodes),
3321 				      GFP_KERNEL);
3322 	WARN_ON(!node_demotion);
3323 
3324 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
3325 					NULL, migration_offline_cpu);
3326 	/*
3327 	 * In the unlikely case that this fails, the automatic
3328 	 * migration targets may become suboptimal for nodes
3329 	 * where N_CPU changes.  With such a small impact in a
3330 	 * rare case, do not bother trying to do anything special.
3331 	 */
3332 	WARN_ON(ret < 0);
3333 	ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
3334 				migration_online_cpu, NULL);
3335 	WARN_ON(ret < 0);
3336 
3337 	hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
3338 	return 0;
3339 }
3340 late_initcall(migrate_on_reclaim_init);
3341 #endif /* CONFIG_HOTPLUG_CPU */
3342 
3343 bool numa_demotion_enabled = false;
3344 
3345 #ifdef CONFIG_SYSFS
3346 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
3347 					  struct kobj_attribute *attr, char *buf)
3348 {
3349 	return sysfs_emit(buf, "%s\n",
3350 			  numa_demotion_enabled ? "true" : "false");
3351 }
3352 
3353 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
3354 					   struct kobj_attribute *attr,
3355 					   const char *buf, size_t count)
3356 {
3357 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3358 		numa_demotion_enabled = true;
3359 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3360 		numa_demotion_enabled = false;
3361 	else
3362 		return -EINVAL;
3363 
3364 	return count;
3365 }
3366 
3367 static struct kobj_attribute numa_demotion_enabled_attr =
3368 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3369 	       numa_demotion_enabled_store);
3370 
3371 static struct attribute *numa_attrs[] = {
3372 	&numa_demotion_enabled_attr.attr,
3373 	NULL,
3374 };
3375 
3376 static const struct attribute_group numa_attr_group = {
3377 	.attrs = numa_attrs,
3378 };
3379 
3380 static int __init numa_init_sysfs(void)
3381 {
3382 	int err;
3383 	struct kobject *numa_kobj;
3384 
3385 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
3386 	if (!numa_kobj) {
3387 		pr_err("failed to create numa kobject\n");
3388 		return -ENOMEM;
3389 	}
3390 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
3391 	if (err) {
3392 		pr_err("failed to register numa group\n");
3393 		goto delete_obj;
3394 	}
3395 	return 0;
3396 
3397 delete_obj:
3398 	kobject_put(numa_kobj);
3399 	return err;
3400 }
3401 subsys_initcall(numa_init_sysfs);
3402 #endif
3403