xref: /linux/mm/migrate.c (revision 3e4f9376659d8646302a317848ffb3a12101aa89)
1 /*
2  * Memory Migration functionality - linux/mm/migrate.c
3  *
4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5  *
6  * Page migration was first developed in the context of the memory hotplug
7  * project. The main authors of the migration code are:
8  *
9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10  * Hirokazu Takahashi <taka@valinux.co.jp>
11  * Dave Hansen <haveblue@us.ibm.com>
12  * Christoph Lameter
13  */
14 
15 #include <linux/migrate.h>
16 #include <linux/export.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/hugetlb.h>
37 #include <linux/hugetlb_cgroup.h>
38 #include <linux/gfp.h>
39 #include <linux/balloon_compaction.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/page_idle.h>
42 #include <linux/page_owner.h>
43 #include <linux/sched/mm.h>
44 
45 #include <asm/tlbflush.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/migrate.h>
49 
50 #include "internal.h"
51 
52 /*
53  * migrate_prep() needs to be called before we start compiling a list of pages
54  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
55  * undesirable, use migrate_prep_local()
56  */
57 int migrate_prep(void)
58 {
59 	/*
60 	 * Clear the LRU lists so pages can be isolated.
61 	 * Note that pages may be moved off the LRU after we have
62 	 * drained them. Those pages will fail to migrate like other
63 	 * pages that may be busy.
64 	 */
65 	lru_add_drain_all();
66 
67 	return 0;
68 }
69 
70 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
71 int migrate_prep_local(void)
72 {
73 	lru_add_drain();
74 
75 	return 0;
76 }
77 
78 int isolate_movable_page(struct page *page, isolate_mode_t mode)
79 {
80 	struct address_space *mapping;
81 
82 	/*
83 	 * Avoid burning cycles with pages that are yet under __free_pages(),
84 	 * or just got freed under us.
85 	 *
86 	 * In case we 'win' a race for a movable page being freed under us and
87 	 * raise its refcount preventing __free_pages() from doing its job
88 	 * the put_page() at the end of this block will take care of
89 	 * release this page, thus avoiding a nasty leakage.
90 	 */
91 	if (unlikely(!get_page_unless_zero(page)))
92 		goto out;
93 
94 	/*
95 	 * Check PageMovable before holding a PG_lock because page's owner
96 	 * assumes anybody doesn't touch PG_lock of newly allocated page
97 	 * so unconditionally grapping the lock ruins page's owner side.
98 	 */
99 	if (unlikely(!__PageMovable(page)))
100 		goto out_putpage;
101 	/*
102 	 * As movable pages are not isolated from LRU lists, concurrent
103 	 * compaction threads can race against page migration functions
104 	 * as well as race against the releasing a page.
105 	 *
106 	 * In order to avoid having an already isolated movable page
107 	 * being (wrongly) re-isolated while it is under migration,
108 	 * or to avoid attempting to isolate pages being released,
109 	 * lets be sure we have the page lock
110 	 * before proceeding with the movable page isolation steps.
111 	 */
112 	if (unlikely(!trylock_page(page)))
113 		goto out_putpage;
114 
115 	if (!PageMovable(page) || PageIsolated(page))
116 		goto out_no_isolated;
117 
118 	mapping = page_mapping(page);
119 	VM_BUG_ON_PAGE(!mapping, page);
120 
121 	if (!mapping->a_ops->isolate_page(page, mode))
122 		goto out_no_isolated;
123 
124 	/* Driver shouldn't use PG_isolated bit of page->flags */
125 	WARN_ON_ONCE(PageIsolated(page));
126 	__SetPageIsolated(page);
127 	unlock_page(page);
128 
129 	return 0;
130 
131 out_no_isolated:
132 	unlock_page(page);
133 out_putpage:
134 	put_page(page);
135 out:
136 	return -EBUSY;
137 }
138 
139 /* It should be called on page which is PG_movable */
140 void putback_movable_page(struct page *page)
141 {
142 	struct address_space *mapping;
143 
144 	VM_BUG_ON_PAGE(!PageLocked(page), page);
145 	VM_BUG_ON_PAGE(!PageMovable(page), page);
146 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
147 
148 	mapping = page_mapping(page);
149 	mapping->a_ops->putback_page(page);
150 	__ClearPageIsolated(page);
151 }
152 
153 /*
154  * Put previously isolated pages back onto the appropriate lists
155  * from where they were once taken off for compaction/migration.
156  *
157  * This function shall be used whenever the isolated pageset has been
158  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
159  * and isolate_huge_page().
160  */
161 void putback_movable_pages(struct list_head *l)
162 {
163 	struct page *page;
164 	struct page *page2;
165 
166 	list_for_each_entry_safe(page, page2, l, lru) {
167 		if (unlikely(PageHuge(page))) {
168 			putback_active_hugepage(page);
169 			continue;
170 		}
171 		list_del(&page->lru);
172 		/*
173 		 * We isolated non-lru movable page so here we can use
174 		 * __PageMovable because LRU page's mapping cannot have
175 		 * PAGE_MAPPING_MOVABLE.
176 		 */
177 		if (unlikely(__PageMovable(page))) {
178 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
179 			lock_page(page);
180 			if (PageMovable(page))
181 				putback_movable_page(page);
182 			else
183 				__ClearPageIsolated(page);
184 			unlock_page(page);
185 			put_page(page);
186 		} else {
187 			dec_node_page_state(page, NR_ISOLATED_ANON +
188 					page_is_file_cache(page));
189 			putback_lru_page(page);
190 		}
191 	}
192 }
193 
194 /*
195  * Restore a potential migration pte to a working pte entry
196  */
197 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
198 				 unsigned long addr, void *old)
199 {
200 	struct page_vma_mapped_walk pvmw = {
201 		.page = old,
202 		.vma = vma,
203 		.address = addr,
204 		.flags = PVMW_SYNC | PVMW_MIGRATION,
205 	};
206 	struct page *new;
207 	pte_t pte;
208 	swp_entry_t entry;
209 
210 	VM_BUG_ON_PAGE(PageTail(page), page);
211 	while (page_vma_mapped_walk(&pvmw)) {
212 		if (PageKsm(page))
213 			new = page;
214 		else
215 			new = page - pvmw.page->index +
216 				linear_page_index(vma, pvmw.address);
217 
218 		get_page(new);
219 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
220 		if (pte_swp_soft_dirty(*pvmw.pte))
221 			pte = pte_mksoft_dirty(pte);
222 
223 		/*
224 		 * Recheck VMA as permissions can change since migration started
225 		 */
226 		entry = pte_to_swp_entry(*pvmw.pte);
227 		if (is_write_migration_entry(entry))
228 			pte = maybe_mkwrite(pte, vma);
229 
230 		flush_dcache_page(new);
231 #ifdef CONFIG_HUGETLB_PAGE
232 		if (PageHuge(new)) {
233 			pte = pte_mkhuge(pte);
234 			pte = arch_make_huge_pte(pte, vma, new, 0);
235 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
236 			if (PageAnon(new))
237 				hugepage_add_anon_rmap(new, vma, pvmw.address);
238 			else
239 				page_dup_rmap(new, true);
240 		} else
241 #endif
242 		{
243 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
244 
245 			if (PageAnon(new))
246 				page_add_anon_rmap(new, vma, pvmw.address, false);
247 			else
248 				page_add_file_rmap(new, false);
249 		}
250 		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
251 			mlock_vma_page(new);
252 
253 		/* No need to invalidate - it was non-present before */
254 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
255 	}
256 
257 	return true;
258 }
259 
260 /*
261  * Get rid of all migration entries and replace them by
262  * references to the indicated page.
263  */
264 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
265 {
266 	struct rmap_walk_control rwc = {
267 		.rmap_one = remove_migration_pte,
268 		.arg = old,
269 	};
270 
271 	if (locked)
272 		rmap_walk_locked(new, &rwc);
273 	else
274 		rmap_walk(new, &rwc);
275 }
276 
277 /*
278  * Something used the pte of a page under migration. We need to
279  * get to the page and wait until migration is finished.
280  * When we return from this function the fault will be retried.
281  */
282 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
283 				spinlock_t *ptl)
284 {
285 	pte_t pte;
286 	swp_entry_t entry;
287 	struct page *page;
288 
289 	spin_lock(ptl);
290 	pte = *ptep;
291 	if (!is_swap_pte(pte))
292 		goto out;
293 
294 	entry = pte_to_swp_entry(pte);
295 	if (!is_migration_entry(entry))
296 		goto out;
297 
298 	page = migration_entry_to_page(entry);
299 
300 	/*
301 	 * Once radix-tree replacement of page migration started, page_count
302 	 * *must* be zero. And, we don't want to call wait_on_page_locked()
303 	 * against a page without get_page().
304 	 * So, we use get_page_unless_zero(), here. Even failed, page fault
305 	 * will occur again.
306 	 */
307 	if (!get_page_unless_zero(page))
308 		goto out;
309 	pte_unmap_unlock(ptep, ptl);
310 	wait_on_page_locked(page);
311 	put_page(page);
312 	return;
313 out:
314 	pte_unmap_unlock(ptep, ptl);
315 }
316 
317 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
318 				unsigned long address)
319 {
320 	spinlock_t *ptl = pte_lockptr(mm, pmd);
321 	pte_t *ptep = pte_offset_map(pmd, address);
322 	__migration_entry_wait(mm, ptep, ptl);
323 }
324 
325 void migration_entry_wait_huge(struct vm_area_struct *vma,
326 		struct mm_struct *mm, pte_t *pte)
327 {
328 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
329 	__migration_entry_wait(mm, pte, ptl);
330 }
331 
332 #ifdef CONFIG_BLOCK
333 /* Returns true if all buffers are successfully locked */
334 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
335 							enum migrate_mode mode)
336 {
337 	struct buffer_head *bh = head;
338 
339 	/* Simple case, sync compaction */
340 	if (mode != MIGRATE_ASYNC) {
341 		do {
342 			get_bh(bh);
343 			lock_buffer(bh);
344 			bh = bh->b_this_page;
345 
346 		} while (bh != head);
347 
348 		return true;
349 	}
350 
351 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
352 	do {
353 		get_bh(bh);
354 		if (!trylock_buffer(bh)) {
355 			/*
356 			 * We failed to lock the buffer and cannot stall in
357 			 * async migration. Release the taken locks
358 			 */
359 			struct buffer_head *failed_bh = bh;
360 			put_bh(failed_bh);
361 			bh = head;
362 			while (bh != failed_bh) {
363 				unlock_buffer(bh);
364 				put_bh(bh);
365 				bh = bh->b_this_page;
366 			}
367 			return false;
368 		}
369 
370 		bh = bh->b_this_page;
371 	} while (bh != head);
372 	return true;
373 }
374 #else
375 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
376 							enum migrate_mode mode)
377 {
378 	return true;
379 }
380 #endif /* CONFIG_BLOCK */
381 
382 /*
383  * Replace the page in the mapping.
384  *
385  * The number of remaining references must be:
386  * 1 for anonymous pages without a mapping
387  * 2 for pages with a mapping
388  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
389  */
390 int migrate_page_move_mapping(struct address_space *mapping,
391 		struct page *newpage, struct page *page,
392 		struct buffer_head *head, enum migrate_mode mode,
393 		int extra_count)
394 {
395 	struct zone *oldzone, *newzone;
396 	int dirty;
397 	int expected_count = 1 + extra_count;
398 	void **pslot;
399 
400 	if (!mapping) {
401 		/* Anonymous page without mapping */
402 		if (page_count(page) != expected_count)
403 			return -EAGAIN;
404 
405 		/* No turning back from here */
406 		newpage->index = page->index;
407 		newpage->mapping = page->mapping;
408 		if (PageSwapBacked(page))
409 			__SetPageSwapBacked(newpage);
410 
411 		return MIGRATEPAGE_SUCCESS;
412 	}
413 
414 	oldzone = page_zone(page);
415 	newzone = page_zone(newpage);
416 
417 	spin_lock_irq(&mapping->tree_lock);
418 
419 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
420  					page_index(page));
421 
422 	expected_count += 1 + page_has_private(page);
423 	if (page_count(page) != expected_count ||
424 		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
425 		spin_unlock_irq(&mapping->tree_lock);
426 		return -EAGAIN;
427 	}
428 
429 	if (!page_ref_freeze(page, expected_count)) {
430 		spin_unlock_irq(&mapping->tree_lock);
431 		return -EAGAIN;
432 	}
433 
434 	/*
435 	 * In the async migration case of moving a page with buffers, lock the
436 	 * buffers using trylock before the mapping is moved. If the mapping
437 	 * was moved, we later failed to lock the buffers and could not move
438 	 * the mapping back due to an elevated page count, we would have to
439 	 * block waiting on other references to be dropped.
440 	 */
441 	if (mode == MIGRATE_ASYNC && head &&
442 			!buffer_migrate_lock_buffers(head, mode)) {
443 		page_ref_unfreeze(page, expected_count);
444 		spin_unlock_irq(&mapping->tree_lock);
445 		return -EAGAIN;
446 	}
447 
448 	/*
449 	 * Now we know that no one else is looking at the page:
450 	 * no turning back from here.
451 	 */
452 	newpage->index = page->index;
453 	newpage->mapping = page->mapping;
454 	get_page(newpage);	/* add cache reference */
455 	if (PageSwapBacked(page)) {
456 		__SetPageSwapBacked(newpage);
457 		if (PageSwapCache(page)) {
458 			SetPageSwapCache(newpage);
459 			set_page_private(newpage, page_private(page));
460 		}
461 	} else {
462 		VM_BUG_ON_PAGE(PageSwapCache(page), page);
463 	}
464 
465 	/* Move dirty while page refs frozen and newpage not yet exposed */
466 	dirty = PageDirty(page);
467 	if (dirty) {
468 		ClearPageDirty(page);
469 		SetPageDirty(newpage);
470 	}
471 
472 	radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
473 
474 	/*
475 	 * Drop cache reference from old page by unfreezing
476 	 * to one less reference.
477 	 * We know this isn't the last reference.
478 	 */
479 	page_ref_unfreeze(page, expected_count - 1);
480 
481 	spin_unlock(&mapping->tree_lock);
482 	/* Leave irq disabled to prevent preemption while updating stats */
483 
484 	/*
485 	 * If moved to a different zone then also account
486 	 * the page for that zone. Other VM counters will be
487 	 * taken care of when we establish references to the
488 	 * new page and drop references to the old page.
489 	 *
490 	 * Note that anonymous pages are accounted for
491 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
492 	 * are mapped to swap space.
493 	 */
494 	if (newzone != oldzone) {
495 		__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
496 		__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
497 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
498 			__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
499 			__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
500 		}
501 		if (dirty && mapping_cap_account_dirty(mapping)) {
502 			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
503 			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
504 			__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
505 			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
506 		}
507 	}
508 	local_irq_enable();
509 
510 	return MIGRATEPAGE_SUCCESS;
511 }
512 EXPORT_SYMBOL(migrate_page_move_mapping);
513 
514 /*
515  * The expected number of remaining references is the same as that
516  * of migrate_page_move_mapping().
517  */
518 int migrate_huge_page_move_mapping(struct address_space *mapping,
519 				   struct page *newpage, struct page *page)
520 {
521 	int expected_count;
522 	void **pslot;
523 
524 	spin_lock_irq(&mapping->tree_lock);
525 
526 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
527 					page_index(page));
528 
529 	expected_count = 2 + page_has_private(page);
530 	if (page_count(page) != expected_count ||
531 		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
532 		spin_unlock_irq(&mapping->tree_lock);
533 		return -EAGAIN;
534 	}
535 
536 	if (!page_ref_freeze(page, expected_count)) {
537 		spin_unlock_irq(&mapping->tree_lock);
538 		return -EAGAIN;
539 	}
540 
541 	newpage->index = page->index;
542 	newpage->mapping = page->mapping;
543 
544 	get_page(newpage);
545 
546 	radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
547 
548 	page_ref_unfreeze(page, expected_count - 1);
549 
550 	spin_unlock_irq(&mapping->tree_lock);
551 
552 	return MIGRATEPAGE_SUCCESS;
553 }
554 
555 /*
556  * Gigantic pages are so large that we do not guarantee that page++ pointer
557  * arithmetic will work across the entire page.  We need something more
558  * specialized.
559  */
560 static void __copy_gigantic_page(struct page *dst, struct page *src,
561 				int nr_pages)
562 {
563 	int i;
564 	struct page *dst_base = dst;
565 	struct page *src_base = src;
566 
567 	for (i = 0; i < nr_pages; ) {
568 		cond_resched();
569 		copy_highpage(dst, src);
570 
571 		i++;
572 		dst = mem_map_next(dst, dst_base, i);
573 		src = mem_map_next(src, src_base, i);
574 	}
575 }
576 
577 static void copy_huge_page(struct page *dst, struct page *src)
578 {
579 	int i;
580 	int nr_pages;
581 
582 	if (PageHuge(src)) {
583 		/* hugetlbfs page */
584 		struct hstate *h = page_hstate(src);
585 		nr_pages = pages_per_huge_page(h);
586 
587 		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
588 			__copy_gigantic_page(dst, src, nr_pages);
589 			return;
590 		}
591 	} else {
592 		/* thp page */
593 		BUG_ON(!PageTransHuge(src));
594 		nr_pages = hpage_nr_pages(src);
595 	}
596 
597 	for (i = 0; i < nr_pages; i++) {
598 		cond_resched();
599 		copy_highpage(dst + i, src + i);
600 	}
601 }
602 
603 /*
604  * Copy the page to its new location
605  */
606 void migrate_page_copy(struct page *newpage, struct page *page)
607 {
608 	int cpupid;
609 
610 	if (PageHuge(page) || PageTransHuge(page))
611 		copy_huge_page(newpage, page);
612 	else
613 		copy_highpage(newpage, page);
614 
615 	if (PageError(page))
616 		SetPageError(newpage);
617 	if (PageReferenced(page))
618 		SetPageReferenced(newpage);
619 	if (PageUptodate(page))
620 		SetPageUptodate(newpage);
621 	if (TestClearPageActive(page)) {
622 		VM_BUG_ON_PAGE(PageUnevictable(page), page);
623 		SetPageActive(newpage);
624 	} else if (TestClearPageUnevictable(page))
625 		SetPageUnevictable(newpage);
626 	if (PageChecked(page))
627 		SetPageChecked(newpage);
628 	if (PageMappedToDisk(page))
629 		SetPageMappedToDisk(newpage);
630 
631 	/* Move dirty on pages not done by migrate_page_move_mapping() */
632 	if (PageDirty(page))
633 		SetPageDirty(newpage);
634 
635 	if (page_is_young(page))
636 		set_page_young(newpage);
637 	if (page_is_idle(page))
638 		set_page_idle(newpage);
639 
640 	/*
641 	 * Copy NUMA information to the new page, to prevent over-eager
642 	 * future migrations of this same page.
643 	 */
644 	cpupid = page_cpupid_xchg_last(page, -1);
645 	page_cpupid_xchg_last(newpage, cpupid);
646 
647 	ksm_migrate_page(newpage, page);
648 	/*
649 	 * Please do not reorder this without considering how mm/ksm.c's
650 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
651 	 */
652 	if (PageSwapCache(page))
653 		ClearPageSwapCache(page);
654 	ClearPagePrivate(page);
655 	set_page_private(page, 0);
656 
657 	/*
658 	 * If any waiters have accumulated on the new page then
659 	 * wake them up.
660 	 */
661 	if (PageWriteback(newpage))
662 		end_page_writeback(newpage);
663 
664 	copy_page_owner(page, newpage);
665 
666 	mem_cgroup_migrate(page, newpage);
667 }
668 EXPORT_SYMBOL(migrate_page_copy);
669 
670 /************************************************************
671  *                    Migration functions
672  ***********************************************************/
673 
674 /*
675  * Common logic to directly migrate a single LRU page suitable for
676  * pages that do not use PagePrivate/PagePrivate2.
677  *
678  * Pages are locked upon entry and exit.
679  */
680 int migrate_page(struct address_space *mapping,
681 		struct page *newpage, struct page *page,
682 		enum migrate_mode mode)
683 {
684 	int rc;
685 
686 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
687 
688 	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
689 
690 	if (rc != MIGRATEPAGE_SUCCESS)
691 		return rc;
692 
693 	migrate_page_copy(newpage, page);
694 	return MIGRATEPAGE_SUCCESS;
695 }
696 EXPORT_SYMBOL(migrate_page);
697 
698 #ifdef CONFIG_BLOCK
699 /*
700  * Migration function for pages with buffers. This function can only be used
701  * if the underlying filesystem guarantees that no other references to "page"
702  * exist.
703  */
704 int buffer_migrate_page(struct address_space *mapping,
705 		struct page *newpage, struct page *page, enum migrate_mode mode)
706 {
707 	struct buffer_head *bh, *head;
708 	int rc;
709 
710 	if (!page_has_buffers(page))
711 		return migrate_page(mapping, newpage, page, mode);
712 
713 	head = page_buffers(page);
714 
715 	rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
716 
717 	if (rc != MIGRATEPAGE_SUCCESS)
718 		return rc;
719 
720 	/*
721 	 * In the async case, migrate_page_move_mapping locked the buffers
722 	 * with an IRQ-safe spinlock held. In the sync case, the buffers
723 	 * need to be locked now
724 	 */
725 	if (mode != MIGRATE_ASYNC)
726 		BUG_ON(!buffer_migrate_lock_buffers(head, mode));
727 
728 	ClearPagePrivate(page);
729 	set_page_private(newpage, page_private(page));
730 	set_page_private(page, 0);
731 	put_page(page);
732 	get_page(newpage);
733 
734 	bh = head;
735 	do {
736 		set_bh_page(bh, newpage, bh_offset(bh));
737 		bh = bh->b_this_page;
738 
739 	} while (bh != head);
740 
741 	SetPagePrivate(newpage);
742 
743 	migrate_page_copy(newpage, page);
744 
745 	bh = head;
746 	do {
747 		unlock_buffer(bh);
748  		put_bh(bh);
749 		bh = bh->b_this_page;
750 
751 	} while (bh != head);
752 
753 	return MIGRATEPAGE_SUCCESS;
754 }
755 EXPORT_SYMBOL(buffer_migrate_page);
756 #endif
757 
758 /*
759  * Writeback a page to clean the dirty state
760  */
761 static int writeout(struct address_space *mapping, struct page *page)
762 {
763 	struct writeback_control wbc = {
764 		.sync_mode = WB_SYNC_NONE,
765 		.nr_to_write = 1,
766 		.range_start = 0,
767 		.range_end = LLONG_MAX,
768 		.for_reclaim = 1
769 	};
770 	int rc;
771 
772 	if (!mapping->a_ops->writepage)
773 		/* No write method for the address space */
774 		return -EINVAL;
775 
776 	if (!clear_page_dirty_for_io(page))
777 		/* Someone else already triggered a write */
778 		return -EAGAIN;
779 
780 	/*
781 	 * A dirty page may imply that the underlying filesystem has
782 	 * the page on some queue. So the page must be clean for
783 	 * migration. Writeout may mean we loose the lock and the
784 	 * page state is no longer what we checked for earlier.
785 	 * At this point we know that the migration attempt cannot
786 	 * be successful.
787 	 */
788 	remove_migration_ptes(page, page, false);
789 
790 	rc = mapping->a_ops->writepage(page, &wbc);
791 
792 	if (rc != AOP_WRITEPAGE_ACTIVATE)
793 		/* unlocked. Relock */
794 		lock_page(page);
795 
796 	return (rc < 0) ? -EIO : -EAGAIN;
797 }
798 
799 /*
800  * Default handling if a filesystem does not provide a migration function.
801  */
802 static int fallback_migrate_page(struct address_space *mapping,
803 	struct page *newpage, struct page *page, enum migrate_mode mode)
804 {
805 	if (PageDirty(page)) {
806 		/* Only writeback pages in full synchronous migration */
807 		if (mode != MIGRATE_SYNC)
808 			return -EBUSY;
809 		return writeout(mapping, page);
810 	}
811 
812 	/*
813 	 * Buffers may be managed in a filesystem specific way.
814 	 * We must have no buffers or drop them.
815 	 */
816 	if (page_has_private(page) &&
817 	    !try_to_release_page(page, GFP_KERNEL))
818 		return -EAGAIN;
819 
820 	return migrate_page(mapping, newpage, page, mode);
821 }
822 
823 /*
824  * Move a page to a newly allocated page
825  * The page is locked and all ptes have been successfully removed.
826  *
827  * The new page will have replaced the old page if this function
828  * is successful.
829  *
830  * Return value:
831  *   < 0 - error code
832  *  MIGRATEPAGE_SUCCESS - success
833  */
834 static int move_to_new_page(struct page *newpage, struct page *page,
835 				enum migrate_mode mode)
836 {
837 	struct address_space *mapping;
838 	int rc = -EAGAIN;
839 	bool is_lru = !__PageMovable(page);
840 
841 	VM_BUG_ON_PAGE(!PageLocked(page), page);
842 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
843 
844 	mapping = page_mapping(page);
845 
846 	if (likely(is_lru)) {
847 		if (!mapping)
848 			rc = migrate_page(mapping, newpage, page, mode);
849 		else if (mapping->a_ops->migratepage)
850 			/*
851 			 * Most pages have a mapping and most filesystems
852 			 * provide a migratepage callback. Anonymous pages
853 			 * are part of swap space which also has its own
854 			 * migratepage callback. This is the most common path
855 			 * for page migration.
856 			 */
857 			rc = mapping->a_ops->migratepage(mapping, newpage,
858 							page, mode);
859 		else
860 			rc = fallback_migrate_page(mapping, newpage,
861 							page, mode);
862 	} else {
863 		/*
864 		 * In case of non-lru page, it could be released after
865 		 * isolation step. In that case, we shouldn't try migration.
866 		 */
867 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
868 		if (!PageMovable(page)) {
869 			rc = MIGRATEPAGE_SUCCESS;
870 			__ClearPageIsolated(page);
871 			goto out;
872 		}
873 
874 		rc = mapping->a_ops->migratepage(mapping, newpage,
875 						page, mode);
876 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
877 			!PageIsolated(page));
878 	}
879 
880 	/*
881 	 * When successful, old pagecache page->mapping must be cleared before
882 	 * page is freed; but stats require that PageAnon be left as PageAnon.
883 	 */
884 	if (rc == MIGRATEPAGE_SUCCESS) {
885 		if (__PageMovable(page)) {
886 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
887 
888 			/*
889 			 * We clear PG_movable under page_lock so any compactor
890 			 * cannot try to migrate this page.
891 			 */
892 			__ClearPageIsolated(page);
893 		}
894 
895 		/*
896 		 * Anonymous and movable page->mapping will be cleard by
897 		 * free_pages_prepare so don't reset it here for keeping
898 		 * the type to work PageAnon, for example.
899 		 */
900 		if (!PageMappingFlags(page))
901 			page->mapping = NULL;
902 	}
903 out:
904 	return rc;
905 }
906 
907 static int __unmap_and_move(struct page *page, struct page *newpage,
908 				int force, enum migrate_mode mode)
909 {
910 	int rc = -EAGAIN;
911 	int page_was_mapped = 0;
912 	struct anon_vma *anon_vma = NULL;
913 	bool is_lru = !__PageMovable(page);
914 
915 	if (!trylock_page(page)) {
916 		if (!force || mode == MIGRATE_ASYNC)
917 			goto out;
918 
919 		/*
920 		 * It's not safe for direct compaction to call lock_page.
921 		 * For example, during page readahead pages are added locked
922 		 * to the LRU. Later, when the IO completes the pages are
923 		 * marked uptodate and unlocked. However, the queueing
924 		 * could be merging multiple pages for one bio (e.g.
925 		 * mpage_readpages). If an allocation happens for the
926 		 * second or third page, the process can end up locking
927 		 * the same page twice and deadlocking. Rather than
928 		 * trying to be clever about what pages can be locked,
929 		 * avoid the use of lock_page for direct compaction
930 		 * altogether.
931 		 */
932 		if (current->flags & PF_MEMALLOC)
933 			goto out;
934 
935 		lock_page(page);
936 	}
937 
938 	if (PageWriteback(page)) {
939 		/*
940 		 * Only in the case of a full synchronous migration is it
941 		 * necessary to wait for PageWriteback. In the async case,
942 		 * the retry loop is too short and in the sync-light case,
943 		 * the overhead of stalling is too much
944 		 */
945 		if (mode != MIGRATE_SYNC) {
946 			rc = -EBUSY;
947 			goto out_unlock;
948 		}
949 		if (!force)
950 			goto out_unlock;
951 		wait_on_page_writeback(page);
952 	}
953 
954 	/*
955 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
956 	 * we cannot notice that anon_vma is freed while we migrates a page.
957 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
958 	 * of migration. File cache pages are no problem because of page_lock()
959 	 * File Caches may use write_page() or lock_page() in migration, then,
960 	 * just care Anon page here.
961 	 *
962 	 * Only page_get_anon_vma() understands the subtleties of
963 	 * getting a hold on an anon_vma from outside one of its mms.
964 	 * But if we cannot get anon_vma, then we won't need it anyway,
965 	 * because that implies that the anon page is no longer mapped
966 	 * (and cannot be remapped so long as we hold the page lock).
967 	 */
968 	if (PageAnon(page) && !PageKsm(page))
969 		anon_vma = page_get_anon_vma(page);
970 
971 	/*
972 	 * Block others from accessing the new page when we get around to
973 	 * establishing additional references. We are usually the only one
974 	 * holding a reference to newpage at this point. We used to have a BUG
975 	 * here if trylock_page(newpage) fails, but would like to allow for
976 	 * cases where there might be a race with the previous use of newpage.
977 	 * This is much like races on refcount of oldpage: just don't BUG().
978 	 */
979 	if (unlikely(!trylock_page(newpage)))
980 		goto out_unlock;
981 
982 	if (unlikely(!is_lru)) {
983 		rc = move_to_new_page(newpage, page, mode);
984 		goto out_unlock_both;
985 	}
986 
987 	/*
988 	 * Corner case handling:
989 	 * 1. When a new swap-cache page is read into, it is added to the LRU
990 	 * and treated as swapcache but it has no rmap yet.
991 	 * Calling try_to_unmap() against a page->mapping==NULL page will
992 	 * trigger a BUG.  So handle it here.
993 	 * 2. An orphaned page (see truncate_complete_page) might have
994 	 * fs-private metadata. The page can be picked up due to memory
995 	 * offlining.  Everywhere else except page reclaim, the page is
996 	 * invisible to the vm, so the page can not be migrated.  So try to
997 	 * free the metadata, so the page can be freed.
998 	 */
999 	if (!page->mapping) {
1000 		VM_BUG_ON_PAGE(PageAnon(page), page);
1001 		if (page_has_private(page)) {
1002 			try_to_free_buffers(page);
1003 			goto out_unlock_both;
1004 		}
1005 	} else if (page_mapped(page)) {
1006 		/* Establish migration ptes */
1007 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1008 				page);
1009 		try_to_unmap(page,
1010 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1011 		page_was_mapped = 1;
1012 	}
1013 
1014 	if (!page_mapped(page))
1015 		rc = move_to_new_page(newpage, page, mode);
1016 
1017 	if (page_was_mapped)
1018 		remove_migration_ptes(page,
1019 			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1020 
1021 out_unlock_both:
1022 	unlock_page(newpage);
1023 out_unlock:
1024 	/* Drop an anon_vma reference if we took one */
1025 	if (anon_vma)
1026 		put_anon_vma(anon_vma);
1027 	unlock_page(page);
1028 out:
1029 	/*
1030 	 * If migration is successful, decrease refcount of the newpage
1031 	 * which will not free the page because new page owner increased
1032 	 * refcounter. As well, if it is LRU page, add the page to LRU
1033 	 * list in here.
1034 	 */
1035 	if (rc == MIGRATEPAGE_SUCCESS) {
1036 		if (unlikely(__PageMovable(newpage)))
1037 			put_page(newpage);
1038 		else
1039 			putback_lru_page(newpage);
1040 	}
1041 
1042 	return rc;
1043 }
1044 
1045 /*
1046  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1047  * around it.
1048  */
1049 #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1050 #define ICE_noinline noinline
1051 #else
1052 #define ICE_noinline
1053 #endif
1054 
1055 /*
1056  * Obtain the lock on page, remove all ptes and migrate the page
1057  * to the newly allocated page in newpage.
1058  */
1059 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1060 				   free_page_t put_new_page,
1061 				   unsigned long private, struct page *page,
1062 				   int force, enum migrate_mode mode,
1063 				   enum migrate_reason reason)
1064 {
1065 	int rc = MIGRATEPAGE_SUCCESS;
1066 	int *result = NULL;
1067 	struct page *newpage;
1068 
1069 	newpage = get_new_page(page, private, &result);
1070 	if (!newpage)
1071 		return -ENOMEM;
1072 
1073 	if (page_count(page) == 1) {
1074 		/* page was freed from under us. So we are done. */
1075 		ClearPageActive(page);
1076 		ClearPageUnevictable(page);
1077 		if (unlikely(__PageMovable(page))) {
1078 			lock_page(page);
1079 			if (!PageMovable(page))
1080 				__ClearPageIsolated(page);
1081 			unlock_page(page);
1082 		}
1083 		if (put_new_page)
1084 			put_new_page(newpage, private);
1085 		else
1086 			put_page(newpage);
1087 		goto out;
1088 	}
1089 
1090 	if (unlikely(PageTransHuge(page))) {
1091 		lock_page(page);
1092 		rc = split_huge_page(page);
1093 		unlock_page(page);
1094 		if (rc)
1095 			goto out;
1096 	}
1097 
1098 	rc = __unmap_and_move(page, newpage, force, mode);
1099 	if (rc == MIGRATEPAGE_SUCCESS)
1100 		set_page_owner_migrate_reason(newpage, reason);
1101 
1102 out:
1103 	if (rc != -EAGAIN) {
1104 		/*
1105 		 * A page that has been migrated has all references
1106 		 * removed and will be freed. A page that has not been
1107 		 * migrated will have kepts its references and be
1108 		 * restored.
1109 		 */
1110 		list_del(&page->lru);
1111 
1112 		/*
1113 		 * Compaction can migrate also non-LRU pages which are
1114 		 * not accounted to NR_ISOLATED_*. They can be recognized
1115 		 * as __PageMovable
1116 		 */
1117 		if (likely(!__PageMovable(page)))
1118 			dec_node_page_state(page, NR_ISOLATED_ANON +
1119 					page_is_file_cache(page));
1120 	}
1121 
1122 	/*
1123 	 * If migration is successful, releases reference grabbed during
1124 	 * isolation. Otherwise, restore the page to right list unless
1125 	 * we want to retry.
1126 	 */
1127 	if (rc == MIGRATEPAGE_SUCCESS) {
1128 		put_page(page);
1129 		if (reason == MR_MEMORY_FAILURE) {
1130 			/*
1131 			 * Set PG_HWPoison on just freed page
1132 			 * intentionally. Although it's rather weird,
1133 			 * it's how HWPoison flag works at the moment.
1134 			 */
1135 			if (!test_set_page_hwpoison(page))
1136 				num_poisoned_pages_inc();
1137 		}
1138 	} else {
1139 		if (rc != -EAGAIN) {
1140 			if (likely(!__PageMovable(page))) {
1141 				putback_lru_page(page);
1142 				goto put_new;
1143 			}
1144 
1145 			lock_page(page);
1146 			if (PageMovable(page))
1147 				putback_movable_page(page);
1148 			else
1149 				__ClearPageIsolated(page);
1150 			unlock_page(page);
1151 			put_page(page);
1152 		}
1153 put_new:
1154 		if (put_new_page)
1155 			put_new_page(newpage, private);
1156 		else
1157 			put_page(newpage);
1158 	}
1159 
1160 	if (result) {
1161 		if (rc)
1162 			*result = rc;
1163 		else
1164 			*result = page_to_nid(newpage);
1165 	}
1166 	return rc;
1167 }
1168 
1169 /*
1170  * Counterpart of unmap_and_move_page() for hugepage migration.
1171  *
1172  * This function doesn't wait the completion of hugepage I/O
1173  * because there is no race between I/O and migration for hugepage.
1174  * Note that currently hugepage I/O occurs only in direct I/O
1175  * where no lock is held and PG_writeback is irrelevant,
1176  * and writeback status of all subpages are counted in the reference
1177  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1178  * under direct I/O, the reference of the head page is 512 and a bit more.)
1179  * This means that when we try to migrate hugepage whose subpages are
1180  * doing direct I/O, some references remain after try_to_unmap() and
1181  * hugepage migration fails without data corruption.
1182  *
1183  * There is also no race when direct I/O is issued on the page under migration,
1184  * because then pte is replaced with migration swap entry and direct I/O code
1185  * will wait in the page fault for migration to complete.
1186  */
1187 static int unmap_and_move_huge_page(new_page_t get_new_page,
1188 				free_page_t put_new_page, unsigned long private,
1189 				struct page *hpage, int force,
1190 				enum migrate_mode mode, int reason)
1191 {
1192 	int rc = -EAGAIN;
1193 	int *result = NULL;
1194 	int page_was_mapped = 0;
1195 	struct page *new_hpage;
1196 	struct anon_vma *anon_vma = NULL;
1197 
1198 	/*
1199 	 * Movability of hugepages depends on architectures and hugepage size.
1200 	 * This check is necessary because some callers of hugepage migration
1201 	 * like soft offline and memory hotremove don't walk through page
1202 	 * tables or check whether the hugepage is pmd-based or not before
1203 	 * kicking migration.
1204 	 */
1205 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1206 		putback_active_hugepage(hpage);
1207 		return -ENOSYS;
1208 	}
1209 
1210 	new_hpage = get_new_page(hpage, private, &result);
1211 	if (!new_hpage)
1212 		return -ENOMEM;
1213 
1214 	if (!trylock_page(hpage)) {
1215 		if (!force || mode != MIGRATE_SYNC)
1216 			goto out;
1217 		lock_page(hpage);
1218 	}
1219 
1220 	if (PageAnon(hpage))
1221 		anon_vma = page_get_anon_vma(hpage);
1222 
1223 	if (unlikely(!trylock_page(new_hpage)))
1224 		goto put_anon;
1225 
1226 	if (page_mapped(hpage)) {
1227 		try_to_unmap(hpage,
1228 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1229 		page_was_mapped = 1;
1230 	}
1231 
1232 	if (!page_mapped(hpage))
1233 		rc = move_to_new_page(new_hpage, hpage, mode);
1234 
1235 	if (page_was_mapped)
1236 		remove_migration_ptes(hpage,
1237 			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1238 
1239 	unlock_page(new_hpage);
1240 
1241 put_anon:
1242 	if (anon_vma)
1243 		put_anon_vma(anon_vma);
1244 
1245 	if (rc == MIGRATEPAGE_SUCCESS) {
1246 		hugetlb_cgroup_migrate(hpage, new_hpage);
1247 		put_new_page = NULL;
1248 		set_page_owner_migrate_reason(new_hpage, reason);
1249 	}
1250 
1251 	unlock_page(hpage);
1252 out:
1253 	if (rc != -EAGAIN)
1254 		putback_active_hugepage(hpage);
1255 
1256 	/*
1257 	 * If migration was not successful and there's a freeing callback, use
1258 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1259 	 * isolation.
1260 	 */
1261 	if (put_new_page)
1262 		put_new_page(new_hpage, private);
1263 	else
1264 		putback_active_hugepage(new_hpage);
1265 
1266 	if (result) {
1267 		if (rc)
1268 			*result = rc;
1269 		else
1270 			*result = page_to_nid(new_hpage);
1271 	}
1272 	return rc;
1273 }
1274 
1275 /*
1276  * migrate_pages - migrate the pages specified in a list, to the free pages
1277  *		   supplied as the target for the page migration
1278  *
1279  * @from:		The list of pages to be migrated.
1280  * @get_new_page:	The function used to allocate free pages to be used
1281  *			as the target of the page migration.
1282  * @put_new_page:	The function used to free target pages if migration
1283  *			fails, or NULL if no special handling is necessary.
1284  * @private:		Private data to be passed on to get_new_page()
1285  * @mode:		The migration mode that specifies the constraints for
1286  *			page migration, if any.
1287  * @reason:		The reason for page migration.
1288  *
1289  * The function returns after 10 attempts or if no pages are movable any more
1290  * because the list has become empty or no retryable pages exist any more.
1291  * The caller should call putback_movable_pages() to return pages to the LRU
1292  * or free list only if ret != 0.
1293  *
1294  * Returns the number of pages that were not migrated, or an error code.
1295  */
1296 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1297 		free_page_t put_new_page, unsigned long private,
1298 		enum migrate_mode mode, int reason)
1299 {
1300 	int retry = 1;
1301 	int nr_failed = 0;
1302 	int nr_succeeded = 0;
1303 	int pass = 0;
1304 	struct page *page;
1305 	struct page *page2;
1306 	int swapwrite = current->flags & PF_SWAPWRITE;
1307 	int rc;
1308 
1309 	if (!swapwrite)
1310 		current->flags |= PF_SWAPWRITE;
1311 
1312 	for(pass = 0; pass < 10 && retry; pass++) {
1313 		retry = 0;
1314 
1315 		list_for_each_entry_safe(page, page2, from, lru) {
1316 			cond_resched();
1317 
1318 			if (PageHuge(page))
1319 				rc = unmap_and_move_huge_page(get_new_page,
1320 						put_new_page, private, page,
1321 						pass > 2, mode, reason);
1322 			else
1323 				rc = unmap_and_move(get_new_page, put_new_page,
1324 						private, page, pass > 2, mode,
1325 						reason);
1326 
1327 			switch(rc) {
1328 			case -ENOMEM:
1329 				nr_failed++;
1330 				goto out;
1331 			case -EAGAIN:
1332 				retry++;
1333 				break;
1334 			case MIGRATEPAGE_SUCCESS:
1335 				nr_succeeded++;
1336 				break;
1337 			default:
1338 				/*
1339 				 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1340 				 * unlike -EAGAIN case, the failed page is
1341 				 * removed from migration page list and not
1342 				 * retried in the next outer loop.
1343 				 */
1344 				nr_failed++;
1345 				break;
1346 			}
1347 		}
1348 	}
1349 	nr_failed += retry;
1350 	rc = nr_failed;
1351 out:
1352 	if (nr_succeeded)
1353 		count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1354 	if (nr_failed)
1355 		count_vm_events(PGMIGRATE_FAIL, nr_failed);
1356 	trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1357 
1358 	if (!swapwrite)
1359 		current->flags &= ~PF_SWAPWRITE;
1360 
1361 	return rc;
1362 }
1363 
1364 #ifdef CONFIG_NUMA
1365 /*
1366  * Move a list of individual pages
1367  */
1368 struct page_to_node {
1369 	unsigned long addr;
1370 	struct page *page;
1371 	int node;
1372 	int status;
1373 };
1374 
1375 static struct page *new_page_node(struct page *p, unsigned long private,
1376 		int **result)
1377 {
1378 	struct page_to_node *pm = (struct page_to_node *)private;
1379 
1380 	while (pm->node != MAX_NUMNODES && pm->page != p)
1381 		pm++;
1382 
1383 	if (pm->node == MAX_NUMNODES)
1384 		return NULL;
1385 
1386 	*result = &pm->status;
1387 
1388 	if (PageHuge(p))
1389 		return alloc_huge_page_node(page_hstate(compound_head(p)),
1390 					pm->node);
1391 	else
1392 		return __alloc_pages_node(pm->node,
1393 				GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1394 }
1395 
1396 /*
1397  * Move a set of pages as indicated in the pm array. The addr
1398  * field must be set to the virtual address of the page to be moved
1399  * and the node number must contain a valid target node.
1400  * The pm array ends with node = MAX_NUMNODES.
1401  */
1402 static int do_move_page_to_node_array(struct mm_struct *mm,
1403 				      struct page_to_node *pm,
1404 				      int migrate_all)
1405 {
1406 	int err;
1407 	struct page_to_node *pp;
1408 	LIST_HEAD(pagelist);
1409 
1410 	down_read(&mm->mmap_sem);
1411 
1412 	/*
1413 	 * Build a list of pages to migrate
1414 	 */
1415 	for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1416 		struct vm_area_struct *vma;
1417 		struct page *page;
1418 
1419 		err = -EFAULT;
1420 		vma = find_vma(mm, pp->addr);
1421 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1422 			goto set_status;
1423 
1424 		/* FOLL_DUMP to ignore special (like zero) pages */
1425 		page = follow_page(vma, pp->addr,
1426 				FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1427 
1428 		err = PTR_ERR(page);
1429 		if (IS_ERR(page))
1430 			goto set_status;
1431 
1432 		err = -ENOENT;
1433 		if (!page)
1434 			goto set_status;
1435 
1436 		pp->page = page;
1437 		err = page_to_nid(page);
1438 
1439 		if (err == pp->node)
1440 			/*
1441 			 * Node already in the right place
1442 			 */
1443 			goto put_and_set;
1444 
1445 		err = -EACCES;
1446 		if (page_mapcount(page) > 1 &&
1447 				!migrate_all)
1448 			goto put_and_set;
1449 
1450 		if (PageHuge(page)) {
1451 			if (PageHead(page))
1452 				isolate_huge_page(page, &pagelist);
1453 			goto put_and_set;
1454 		}
1455 
1456 		err = isolate_lru_page(page);
1457 		if (!err) {
1458 			list_add_tail(&page->lru, &pagelist);
1459 			inc_node_page_state(page, NR_ISOLATED_ANON +
1460 					    page_is_file_cache(page));
1461 		}
1462 put_and_set:
1463 		/*
1464 		 * Either remove the duplicate refcount from
1465 		 * isolate_lru_page() or drop the page ref if it was
1466 		 * not isolated.
1467 		 */
1468 		put_page(page);
1469 set_status:
1470 		pp->status = err;
1471 	}
1472 
1473 	err = 0;
1474 	if (!list_empty(&pagelist)) {
1475 		err = migrate_pages(&pagelist, new_page_node, NULL,
1476 				(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1477 		if (err)
1478 			putback_movable_pages(&pagelist);
1479 	}
1480 
1481 	up_read(&mm->mmap_sem);
1482 	return err;
1483 }
1484 
1485 /*
1486  * Migrate an array of page address onto an array of nodes and fill
1487  * the corresponding array of status.
1488  */
1489 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1490 			 unsigned long nr_pages,
1491 			 const void __user * __user *pages,
1492 			 const int __user *nodes,
1493 			 int __user *status, int flags)
1494 {
1495 	struct page_to_node *pm;
1496 	unsigned long chunk_nr_pages;
1497 	unsigned long chunk_start;
1498 	int err;
1499 
1500 	err = -ENOMEM;
1501 	pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1502 	if (!pm)
1503 		goto out;
1504 
1505 	migrate_prep();
1506 
1507 	/*
1508 	 * Store a chunk of page_to_node array in a page,
1509 	 * but keep the last one as a marker
1510 	 */
1511 	chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1512 
1513 	for (chunk_start = 0;
1514 	     chunk_start < nr_pages;
1515 	     chunk_start += chunk_nr_pages) {
1516 		int j;
1517 
1518 		if (chunk_start + chunk_nr_pages > nr_pages)
1519 			chunk_nr_pages = nr_pages - chunk_start;
1520 
1521 		/* fill the chunk pm with addrs and nodes from user-space */
1522 		for (j = 0; j < chunk_nr_pages; j++) {
1523 			const void __user *p;
1524 			int node;
1525 
1526 			err = -EFAULT;
1527 			if (get_user(p, pages + j + chunk_start))
1528 				goto out_pm;
1529 			pm[j].addr = (unsigned long) p;
1530 
1531 			if (get_user(node, nodes + j + chunk_start))
1532 				goto out_pm;
1533 
1534 			err = -ENODEV;
1535 			if (node < 0 || node >= MAX_NUMNODES)
1536 				goto out_pm;
1537 
1538 			if (!node_state(node, N_MEMORY))
1539 				goto out_pm;
1540 
1541 			err = -EACCES;
1542 			if (!node_isset(node, task_nodes))
1543 				goto out_pm;
1544 
1545 			pm[j].node = node;
1546 		}
1547 
1548 		/* End marker for this chunk */
1549 		pm[chunk_nr_pages].node = MAX_NUMNODES;
1550 
1551 		/* Migrate this chunk */
1552 		err = do_move_page_to_node_array(mm, pm,
1553 						 flags & MPOL_MF_MOVE_ALL);
1554 		if (err < 0)
1555 			goto out_pm;
1556 
1557 		/* Return status information */
1558 		for (j = 0; j < chunk_nr_pages; j++)
1559 			if (put_user(pm[j].status, status + j + chunk_start)) {
1560 				err = -EFAULT;
1561 				goto out_pm;
1562 			}
1563 	}
1564 	err = 0;
1565 
1566 out_pm:
1567 	free_page((unsigned long)pm);
1568 out:
1569 	return err;
1570 }
1571 
1572 /*
1573  * Determine the nodes of an array of pages and store it in an array of status.
1574  */
1575 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1576 				const void __user **pages, int *status)
1577 {
1578 	unsigned long i;
1579 
1580 	down_read(&mm->mmap_sem);
1581 
1582 	for (i = 0; i < nr_pages; i++) {
1583 		unsigned long addr = (unsigned long)(*pages);
1584 		struct vm_area_struct *vma;
1585 		struct page *page;
1586 		int err = -EFAULT;
1587 
1588 		vma = find_vma(mm, addr);
1589 		if (!vma || addr < vma->vm_start)
1590 			goto set_status;
1591 
1592 		/* FOLL_DUMP to ignore special (like zero) pages */
1593 		page = follow_page(vma, addr, FOLL_DUMP);
1594 
1595 		err = PTR_ERR(page);
1596 		if (IS_ERR(page))
1597 			goto set_status;
1598 
1599 		err = page ? page_to_nid(page) : -ENOENT;
1600 set_status:
1601 		*status = err;
1602 
1603 		pages++;
1604 		status++;
1605 	}
1606 
1607 	up_read(&mm->mmap_sem);
1608 }
1609 
1610 /*
1611  * Determine the nodes of a user array of pages and store it in
1612  * a user array of status.
1613  */
1614 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1615 			 const void __user * __user *pages,
1616 			 int __user *status)
1617 {
1618 #define DO_PAGES_STAT_CHUNK_NR 16
1619 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1620 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1621 
1622 	while (nr_pages) {
1623 		unsigned long chunk_nr;
1624 
1625 		chunk_nr = nr_pages;
1626 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1627 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1628 
1629 		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1630 			break;
1631 
1632 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1633 
1634 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1635 			break;
1636 
1637 		pages += chunk_nr;
1638 		status += chunk_nr;
1639 		nr_pages -= chunk_nr;
1640 	}
1641 	return nr_pages ? -EFAULT : 0;
1642 }
1643 
1644 /*
1645  * Move a list of pages in the address space of the currently executing
1646  * process.
1647  */
1648 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1649 		const void __user * __user *, pages,
1650 		const int __user *, nodes,
1651 		int __user *, status, int, flags)
1652 {
1653 	const struct cred *cred = current_cred(), *tcred;
1654 	struct task_struct *task;
1655 	struct mm_struct *mm;
1656 	int err;
1657 	nodemask_t task_nodes;
1658 
1659 	/* Check flags */
1660 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1661 		return -EINVAL;
1662 
1663 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1664 		return -EPERM;
1665 
1666 	/* Find the mm_struct */
1667 	rcu_read_lock();
1668 	task = pid ? find_task_by_vpid(pid) : current;
1669 	if (!task) {
1670 		rcu_read_unlock();
1671 		return -ESRCH;
1672 	}
1673 	get_task_struct(task);
1674 
1675 	/*
1676 	 * Check if this process has the right to modify the specified
1677 	 * process. The right exists if the process has administrative
1678 	 * capabilities, superuser privileges or the same
1679 	 * userid as the target process.
1680 	 */
1681 	tcred = __task_cred(task);
1682 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1683 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1684 	    !capable(CAP_SYS_NICE)) {
1685 		rcu_read_unlock();
1686 		err = -EPERM;
1687 		goto out;
1688 	}
1689 	rcu_read_unlock();
1690 
1691  	err = security_task_movememory(task);
1692  	if (err)
1693 		goto out;
1694 
1695 	task_nodes = cpuset_mems_allowed(task);
1696 	mm = get_task_mm(task);
1697 	put_task_struct(task);
1698 
1699 	if (!mm)
1700 		return -EINVAL;
1701 
1702 	if (nodes)
1703 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
1704 				    nodes, status, flags);
1705 	else
1706 		err = do_pages_stat(mm, nr_pages, pages, status);
1707 
1708 	mmput(mm);
1709 	return err;
1710 
1711 out:
1712 	put_task_struct(task);
1713 	return err;
1714 }
1715 
1716 #ifdef CONFIG_NUMA_BALANCING
1717 /*
1718  * Returns true if this is a safe migration target node for misplaced NUMA
1719  * pages. Currently it only checks the watermarks which crude
1720  */
1721 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1722 				   unsigned long nr_migrate_pages)
1723 {
1724 	int z;
1725 
1726 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1727 		struct zone *zone = pgdat->node_zones + z;
1728 
1729 		if (!populated_zone(zone))
1730 			continue;
1731 
1732 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
1733 		if (!zone_watermark_ok(zone, 0,
1734 				       high_wmark_pages(zone) +
1735 				       nr_migrate_pages,
1736 				       0, 0))
1737 			continue;
1738 		return true;
1739 	}
1740 	return false;
1741 }
1742 
1743 static struct page *alloc_misplaced_dst_page(struct page *page,
1744 					   unsigned long data,
1745 					   int **result)
1746 {
1747 	int nid = (int) data;
1748 	struct page *newpage;
1749 
1750 	newpage = __alloc_pages_node(nid,
1751 					 (GFP_HIGHUSER_MOVABLE |
1752 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
1753 					  __GFP_NORETRY | __GFP_NOWARN) &
1754 					 ~__GFP_RECLAIM, 0);
1755 
1756 	return newpage;
1757 }
1758 
1759 /*
1760  * page migration rate limiting control.
1761  * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1762  * window of time. Default here says do not migrate more than 1280M per second.
1763  */
1764 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1765 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1766 
1767 /* Returns true if the node is migrate rate-limited after the update */
1768 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1769 					unsigned long nr_pages)
1770 {
1771 	/*
1772 	 * Rate-limit the amount of data that is being migrated to a node.
1773 	 * Optimal placement is no good if the memory bus is saturated and
1774 	 * all the time is being spent migrating!
1775 	 */
1776 	if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1777 		spin_lock(&pgdat->numabalancing_migrate_lock);
1778 		pgdat->numabalancing_migrate_nr_pages = 0;
1779 		pgdat->numabalancing_migrate_next_window = jiffies +
1780 			msecs_to_jiffies(migrate_interval_millisecs);
1781 		spin_unlock(&pgdat->numabalancing_migrate_lock);
1782 	}
1783 	if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1784 		trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1785 								nr_pages);
1786 		return true;
1787 	}
1788 
1789 	/*
1790 	 * This is an unlocked non-atomic update so errors are possible.
1791 	 * The consequences are failing to migrate when we potentiall should
1792 	 * have which is not severe enough to warrant locking. If it is ever
1793 	 * a problem, it can be converted to a per-cpu counter.
1794 	 */
1795 	pgdat->numabalancing_migrate_nr_pages += nr_pages;
1796 	return false;
1797 }
1798 
1799 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1800 {
1801 	int page_lru;
1802 
1803 	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1804 
1805 	/* Avoid migrating to a node that is nearly full */
1806 	if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1807 		return 0;
1808 
1809 	if (isolate_lru_page(page))
1810 		return 0;
1811 
1812 	/*
1813 	 * migrate_misplaced_transhuge_page() skips page migration's usual
1814 	 * check on page_count(), so we must do it here, now that the page
1815 	 * has been isolated: a GUP pin, or any other pin, prevents migration.
1816 	 * The expected page count is 3: 1 for page's mapcount and 1 for the
1817 	 * caller's pin and 1 for the reference taken by isolate_lru_page().
1818 	 */
1819 	if (PageTransHuge(page) && page_count(page) != 3) {
1820 		putback_lru_page(page);
1821 		return 0;
1822 	}
1823 
1824 	page_lru = page_is_file_cache(page);
1825 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1826 				hpage_nr_pages(page));
1827 
1828 	/*
1829 	 * Isolating the page has taken another reference, so the
1830 	 * caller's reference can be safely dropped without the page
1831 	 * disappearing underneath us during migration.
1832 	 */
1833 	put_page(page);
1834 	return 1;
1835 }
1836 
1837 bool pmd_trans_migrating(pmd_t pmd)
1838 {
1839 	struct page *page = pmd_page(pmd);
1840 	return PageLocked(page);
1841 }
1842 
1843 /*
1844  * Attempt to migrate a misplaced page to the specified destination
1845  * node. Caller is expected to have an elevated reference count on
1846  * the page that will be dropped by this function before returning.
1847  */
1848 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1849 			   int node)
1850 {
1851 	pg_data_t *pgdat = NODE_DATA(node);
1852 	int isolated;
1853 	int nr_remaining;
1854 	LIST_HEAD(migratepages);
1855 
1856 	/*
1857 	 * Don't migrate file pages that are mapped in multiple processes
1858 	 * with execute permissions as they are probably shared libraries.
1859 	 */
1860 	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1861 	    (vma->vm_flags & VM_EXEC))
1862 		goto out;
1863 
1864 	/*
1865 	 * Rate-limit the amount of data that is being migrated to a node.
1866 	 * Optimal placement is no good if the memory bus is saturated and
1867 	 * all the time is being spent migrating!
1868 	 */
1869 	if (numamigrate_update_ratelimit(pgdat, 1))
1870 		goto out;
1871 
1872 	isolated = numamigrate_isolate_page(pgdat, page);
1873 	if (!isolated)
1874 		goto out;
1875 
1876 	list_add(&page->lru, &migratepages);
1877 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1878 				     NULL, node, MIGRATE_ASYNC,
1879 				     MR_NUMA_MISPLACED);
1880 	if (nr_remaining) {
1881 		if (!list_empty(&migratepages)) {
1882 			list_del(&page->lru);
1883 			dec_node_page_state(page, NR_ISOLATED_ANON +
1884 					page_is_file_cache(page));
1885 			putback_lru_page(page);
1886 		}
1887 		isolated = 0;
1888 	} else
1889 		count_vm_numa_event(NUMA_PAGE_MIGRATE);
1890 	BUG_ON(!list_empty(&migratepages));
1891 	return isolated;
1892 
1893 out:
1894 	put_page(page);
1895 	return 0;
1896 }
1897 #endif /* CONFIG_NUMA_BALANCING */
1898 
1899 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1900 /*
1901  * Migrates a THP to a given target node. page must be locked and is unlocked
1902  * before returning.
1903  */
1904 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1905 				struct vm_area_struct *vma,
1906 				pmd_t *pmd, pmd_t entry,
1907 				unsigned long address,
1908 				struct page *page, int node)
1909 {
1910 	spinlock_t *ptl;
1911 	pg_data_t *pgdat = NODE_DATA(node);
1912 	int isolated = 0;
1913 	struct page *new_page = NULL;
1914 	int page_lru = page_is_file_cache(page);
1915 	unsigned long mmun_start = address & HPAGE_PMD_MASK;
1916 	unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1917 	pmd_t orig_entry;
1918 
1919 	/*
1920 	 * Rate-limit the amount of data that is being migrated to a node.
1921 	 * Optimal placement is no good if the memory bus is saturated and
1922 	 * all the time is being spent migrating!
1923 	 */
1924 	if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1925 		goto out_dropref;
1926 
1927 	new_page = alloc_pages_node(node,
1928 		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1929 		HPAGE_PMD_ORDER);
1930 	if (!new_page)
1931 		goto out_fail;
1932 	prep_transhuge_page(new_page);
1933 
1934 	isolated = numamigrate_isolate_page(pgdat, page);
1935 	if (!isolated) {
1936 		put_page(new_page);
1937 		goto out_fail;
1938 	}
1939 	/*
1940 	 * We are not sure a pending tlb flush here is for a huge page
1941 	 * mapping or not. Hence use the tlb range variant
1942 	 */
1943 	if (mm_tlb_flush_pending(mm))
1944 		flush_tlb_range(vma, mmun_start, mmun_end);
1945 
1946 	/* Prepare a page as a migration target */
1947 	__SetPageLocked(new_page);
1948 	if (PageSwapBacked(page))
1949 		__SetPageSwapBacked(new_page);
1950 
1951 	/* anon mapping, we can simply copy page->mapping to the new page: */
1952 	new_page->mapping = page->mapping;
1953 	new_page->index = page->index;
1954 	migrate_page_copy(new_page, page);
1955 	WARN_ON(PageLRU(new_page));
1956 
1957 	/* Recheck the target PMD */
1958 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1959 	ptl = pmd_lock(mm, pmd);
1960 	if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1961 fail_putback:
1962 		spin_unlock(ptl);
1963 		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1964 
1965 		/* Reverse changes made by migrate_page_copy() */
1966 		if (TestClearPageActive(new_page))
1967 			SetPageActive(page);
1968 		if (TestClearPageUnevictable(new_page))
1969 			SetPageUnevictable(page);
1970 
1971 		unlock_page(new_page);
1972 		put_page(new_page);		/* Free it */
1973 
1974 		/* Retake the callers reference and putback on LRU */
1975 		get_page(page);
1976 		putback_lru_page(page);
1977 		mod_node_page_state(page_pgdat(page),
1978 			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1979 
1980 		goto out_unlock;
1981 	}
1982 
1983 	orig_entry = *pmd;
1984 	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1985 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1986 
1987 	/*
1988 	 * Clear the old entry under pagetable lock and establish the new PTE.
1989 	 * Any parallel GUP will either observe the old page blocking on the
1990 	 * page lock, block on the page table lock or observe the new page.
1991 	 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1992 	 * guarantee the copy is visible before the pagetable update.
1993 	 */
1994 	flush_cache_range(vma, mmun_start, mmun_end);
1995 	page_add_anon_rmap(new_page, vma, mmun_start, true);
1996 	pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
1997 	set_pmd_at(mm, mmun_start, pmd, entry);
1998 	update_mmu_cache_pmd(vma, address, &entry);
1999 
2000 	if (page_count(page) != 2) {
2001 		set_pmd_at(mm, mmun_start, pmd, orig_entry);
2002 		flush_pmd_tlb_range(vma, mmun_start, mmun_end);
2003 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2004 		update_mmu_cache_pmd(vma, address, &entry);
2005 		page_remove_rmap(new_page, true);
2006 		goto fail_putback;
2007 	}
2008 
2009 	mlock_migrate_page(new_page, page);
2010 	page_remove_rmap(page, true);
2011 	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2012 
2013 	spin_unlock(ptl);
2014 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2015 
2016 	/* Take an "isolate" reference and put new page on the LRU. */
2017 	get_page(new_page);
2018 	putback_lru_page(new_page);
2019 
2020 	unlock_page(new_page);
2021 	unlock_page(page);
2022 	put_page(page);			/* Drop the rmap reference */
2023 	put_page(page);			/* Drop the LRU isolation reference */
2024 
2025 	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2026 	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2027 
2028 	mod_node_page_state(page_pgdat(page),
2029 			NR_ISOLATED_ANON + page_lru,
2030 			-HPAGE_PMD_NR);
2031 	return isolated;
2032 
2033 out_fail:
2034 	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2035 out_dropref:
2036 	ptl = pmd_lock(mm, pmd);
2037 	if (pmd_same(*pmd, entry)) {
2038 		entry = pmd_modify(entry, vma->vm_page_prot);
2039 		set_pmd_at(mm, mmun_start, pmd, entry);
2040 		update_mmu_cache_pmd(vma, address, &entry);
2041 	}
2042 	spin_unlock(ptl);
2043 
2044 out_unlock:
2045 	unlock_page(page);
2046 	put_page(page);
2047 	return 0;
2048 }
2049 #endif /* CONFIG_NUMA_BALANCING */
2050 
2051 #endif /* CONFIG_NUMA */
2052