xref: /linux/mm/huge_memory.c (revision ac46d4f3c43241ffa23d5bf36153a0830c0e02cc)
171e3aac0SAndrea Arcangeli /*
271e3aac0SAndrea Arcangeli  *  Copyright (C) 2009  Red Hat, Inc.
371e3aac0SAndrea Arcangeli  *
471e3aac0SAndrea Arcangeli  *  This work is licensed under the terms of the GNU GPL, version 2. See
571e3aac0SAndrea Arcangeli  *  the COPYING file in the top-level directory.
671e3aac0SAndrea Arcangeli  */
771e3aac0SAndrea Arcangeli 
8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9ae3a8c1cSAndrew Morton 
1071e3aac0SAndrea Arcangeli #include <linux/mm.h>
1171e3aac0SAndrea Arcangeli #include <linux/sched.h>
12f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
136a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
1471e3aac0SAndrea Arcangeli #include <linux/highmem.h>
1571e3aac0SAndrea Arcangeli #include <linux/hugetlb.h>
1671e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h>
1771e3aac0SAndrea Arcangeli #include <linux/rmap.h>
1871e3aac0SAndrea Arcangeli #include <linux/swap.h>
1997ae1749SKirill A. Shutemov #include <linux/shrinker.h>
20ba76149fSAndrea Arcangeli #include <linux/mm_inline.h>
21e9b61f19SKirill A. Shutemov #include <linux/swapops.h>
224897c765SMatthew Wilcox #include <linux/dax.h>
23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h>
24878aee7dSAndrea Arcangeli #include <linux/freezer.h>
25f25748e3SDan Williams #include <linux/pfn_t.h>
26a664b2d8SAndrea Arcangeli #include <linux/mman.h>
273565fce3SDan Williams #include <linux/memremap.h>
28325adeb5SRalf Baechle #include <linux/pagemap.h>
2949071d43SKirill A. Shutemov #include <linux/debugfs.h>
304daae3b4SMel Gorman #include <linux/migrate.h>
3143b5fbbdSSasha Levin #include <linux/hashtable.h>
326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h>
3333c3fc71SVladimir Davydov #include <linux/page_idle.h>
34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h>
356b31d595SMichal Hocko #include <linux/oom.h>
3697ae1749SKirill A. Shutemov 
3771e3aac0SAndrea Arcangeli #include <asm/tlb.h>
3871e3aac0SAndrea Arcangeli #include <asm/pgalloc.h>
3971e3aac0SAndrea Arcangeli #include "internal.h"
4071e3aac0SAndrea Arcangeli 
41ba76149fSAndrea Arcangeli /*
42b14d595aSMichael DeGuzis  * By default, transparent hugepage support is disabled in order to avoid
43b14d595aSMichael DeGuzis  * risking an increased memory footprint for applications that are not
44b14d595aSMichael DeGuzis  * guaranteed to benefit from it. When transparent hugepage support is
45b14d595aSMichael DeGuzis  * enabled, it is for all mappings, and khugepaged scans all mappings.
468bfa3f9aSJianguo Wu  * Defrag is invoked by khugepaged hugepage allocations and by page faults
478bfa3f9aSJianguo Wu  * for all hugepage allocations.
48ba76149fSAndrea Arcangeli  */
4971e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly =
5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
51ba76149fSAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
5213ece886SAndrea Arcangeli #endif
5313ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
5413ece886SAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
5513ece886SAndrea Arcangeli #endif
56444eb2a4SMel Gorman 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
5779da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
5879da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
59ba76149fSAndrea Arcangeli 
609a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker;
61f000565aSAndrea Arcangeli 
6297ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount;
6356873f43SWang, Yalin struct page *huge_zero_page __read_mostly;
644a6c1297SKirill A. Shutemov 
656fcb52a5SAaron Lu static struct page *get_huge_zero_page(void)
6697ae1749SKirill A. Shutemov {
6797ae1749SKirill A. Shutemov 	struct page *zero_page;
6897ae1749SKirill A. Shutemov retry:
6997ae1749SKirill A. Shutemov 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
704db0c3c2SJason Low 		return READ_ONCE(huge_zero_page);
7197ae1749SKirill A. Shutemov 
7297ae1749SKirill A. Shutemov 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
7397ae1749SKirill A. Shutemov 			HPAGE_PMD_ORDER);
74d8a8e1f0SKirill A. Shutemov 	if (!zero_page) {
75d8a8e1f0SKirill A. Shutemov 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
765918d10aSKirill A. Shutemov 		return NULL;
77d8a8e1f0SKirill A. Shutemov 	}
78d8a8e1f0SKirill A. Shutemov 	count_vm_event(THP_ZERO_PAGE_ALLOC);
7997ae1749SKirill A. Shutemov 	preempt_disable();
805918d10aSKirill A. Shutemov 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
8197ae1749SKirill A. Shutemov 		preempt_enable();
825ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
8397ae1749SKirill A. Shutemov 		goto retry;
8497ae1749SKirill A. Shutemov 	}
8597ae1749SKirill A. Shutemov 
8697ae1749SKirill A. Shutemov 	/* We take additional reference here. It will be put back by shrinker */
8797ae1749SKirill A. Shutemov 	atomic_set(&huge_zero_refcount, 2);
8897ae1749SKirill A. Shutemov 	preempt_enable();
894db0c3c2SJason Low 	return READ_ONCE(huge_zero_page);
9097ae1749SKirill A. Shutemov }
9197ae1749SKirill A. Shutemov 
926fcb52a5SAaron Lu static void put_huge_zero_page(void)
9397ae1749SKirill A. Shutemov {
9497ae1749SKirill A. Shutemov 	/*
9597ae1749SKirill A. Shutemov 	 * Counter should never go to zero here. Only shrinker can put
9697ae1749SKirill A. Shutemov 	 * last reference.
9797ae1749SKirill A. Shutemov 	 */
9897ae1749SKirill A. Shutemov 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
9997ae1749SKirill A. Shutemov }
10097ae1749SKirill A. Shutemov 
1016fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm)
1026fcb52a5SAaron Lu {
1036fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1046fcb52a5SAaron Lu 		return READ_ONCE(huge_zero_page);
1056fcb52a5SAaron Lu 
1066fcb52a5SAaron Lu 	if (!get_huge_zero_page())
1076fcb52a5SAaron Lu 		return NULL;
1086fcb52a5SAaron Lu 
1096fcb52a5SAaron Lu 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1106fcb52a5SAaron Lu 		put_huge_zero_page();
1116fcb52a5SAaron Lu 
1126fcb52a5SAaron Lu 	return READ_ONCE(huge_zero_page);
1136fcb52a5SAaron Lu }
1146fcb52a5SAaron Lu 
1156fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm)
1166fcb52a5SAaron Lu {
1176fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1186fcb52a5SAaron Lu 		put_huge_zero_page();
1196fcb52a5SAaron Lu }
1206fcb52a5SAaron Lu 
12148896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
12297ae1749SKirill A. Shutemov 					struct shrink_control *sc)
12397ae1749SKirill A. Shutemov {
12497ae1749SKirill A. Shutemov 	/* we can free zero page only if last reference remains */
12597ae1749SKirill A. Shutemov 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
12648896466SGlauber Costa }
12797ae1749SKirill A. Shutemov 
12848896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
12948896466SGlauber Costa 				       struct shrink_control *sc)
13048896466SGlauber Costa {
13197ae1749SKirill A. Shutemov 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
1325918d10aSKirill A. Shutemov 		struct page *zero_page = xchg(&huge_zero_page, NULL);
1335918d10aSKirill A. Shutemov 		BUG_ON(zero_page == NULL);
1345ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
13548896466SGlauber Costa 		return HPAGE_PMD_NR;
13697ae1749SKirill A. Shutemov 	}
13797ae1749SKirill A. Shutemov 
13897ae1749SKirill A. Shutemov 	return 0;
13997ae1749SKirill A. Shutemov }
14097ae1749SKirill A. Shutemov 
14197ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = {
14248896466SGlauber Costa 	.count_objects = shrink_huge_zero_page_count,
14348896466SGlauber Costa 	.scan_objects = shrink_huge_zero_page_scan,
14497ae1749SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
14597ae1749SKirill A. Shutemov };
14697ae1749SKirill A. Shutemov 
14771e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS
14871e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj,
14971e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr, char *buf)
15071e3aac0SAndrea Arcangeli {
151444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
152444eb2a4SMel Gorman 		return sprintf(buf, "[always] madvise never\n");
153444eb2a4SMel Gorman 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
154444eb2a4SMel Gorman 		return sprintf(buf, "always [madvise] never\n");
155444eb2a4SMel Gorman 	else
156444eb2a4SMel Gorman 		return sprintf(buf, "always madvise [never]\n");
15771e3aac0SAndrea Arcangeli }
158444eb2a4SMel Gorman 
15971e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj,
16071e3aac0SAndrea Arcangeli 			     struct kobj_attribute *attr,
16171e3aac0SAndrea Arcangeli 			     const char *buf, size_t count)
16271e3aac0SAndrea Arcangeli {
16321440d7eSDavid Rientjes 	ssize_t ret = count;
164ba76149fSAndrea Arcangeli 
16521440d7eSDavid Rientjes 	if (!memcmp("always", buf,
16621440d7eSDavid Rientjes 		    min(sizeof("always")-1, count))) {
16721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
16821440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
16921440d7eSDavid Rientjes 	} else if (!memcmp("madvise", buf,
17021440d7eSDavid Rientjes 			   min(sizeof("madvise")-1, count))) {
17121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
17221440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
17321440d7eSDavid Rientjes 	} else if (!memcmp("never", buf,
17421440d7eSDavid Rientjes 			   min(sizeof("never")-1, count))) {
17521440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
17621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
17721440d7eSDavid Rientjes 	} else
17821440d7eSDavid Rientjes 		ret = -EINVAL;
179ba76149fSAndrea Arcangeli 
180ba76149fSAndrea Arcangeli 	if (ret > 0) {
181b46e756fSKirill A. Shutemov 		int err = start_stop_khugepaged();
182ba76149fSAndrea Arcangeli 		if (err)
183ba76149fSAndrea Arcangeli 			ret = err;
184ba76149fSAndrea Arcangeli 	}
185ba76149fSAndrea Arcangeli 	return ret;
18671e3aac0SAndrea Arcangeli }
18771e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr =
18871e3aac0SAndrea Arcangeli 	__ATTR(enabled, 0644, enabled_show, enabled_store);
18971e3aac0SAndrea Arcangeli 
190b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj,
19171e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf,
19271e3aac0SAndrea Arcangeli 				enum transparent_hugepage_flag flag)
19371e3aac0SAndrea Arcangeli {
194e27e6151SBen Hutchings 	return sprintf(buf, "%d\n",
195e27e6151SBen Hutchings 		       !!test_bit(flag, &transparent_hugepage_flags));
19671e3aac0SAndrea Arcangeli }
197e27e6151SBen Hutchings 
198b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj,
19971e3aac0SAndrea Arcangeli 				 struct kobj_attribute *attr,
20071e3aac0SAndrea Arcangeli 				 const char *buf, size_t count,
20171e3aac0SAndrea Arcangeli 				 enum transparent_hugepage_flag flag)
20271e3aac0SAndrea Arcangeli {
203e27e6151SBen Hutchings 	unsigned long value;
204e27e6151SBen Hutchings 	int ret;
205e27e6151SBen Hutchings 
206e27e6151SBen Hutchings 	ret = kstrtoul(buf, 10, &value);
207e27e6151SBen Hutchings 	if (ret < 0)
208e27e6151SBen Hutchings 		return ret;
209e27e6151SBen Hutchings 	if (value > 1)
21071e3aac0SAndrea Arcangeli 		return -EINVAL;
21171e3aac0SAndrea Arcangeli 
212e27e6151SBen Hutchings 	if (value)
213e27e6151SBen Hutchings 		set_bit(flag, &transparent_hugepage_flags);
214e27e6151SBen Hutchings 	else
215e27e6151SBen Hutchings 		clear_bit(flag, &transparent_hugepage_flags);
216e27e6151SBen Hutchings 
21771e3aac0SAndrea Arcangeli 	return count;
21871e3aac0SAndrea Arcangeli }
21971e3aac0SAndrea Arcangeli 
22071e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj,
22171e3aac0SAndrea Arcangeli 			   struct kobj_attribute *attr, char *buf)
22271e3aac0SAndrea Arcangeli {
223444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
22421440d7eSDavid Rientjes 		return sprintf(buf, "[always] defer defer+madvise madvise never\n");
225444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
22621440d7eSDavid Rientjes 		return sprintf(buf, "always [defer] defer+madvise madvise never\n");
22721440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
22821440d7eSDavid Rientjes 		return sprintf(buf, "always defer [defer+madvise] madvise never\n");
22921440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
23021440d7eSDavid Rientjes 		return sprintf(buf, "always defer defer+madvise [madvise] never\n");
23121440d7eSDavid Rientjes 	return sprintf(buf, "always defer defer+madvise madvise [never]\n");
23271e3aac0SAndrea Arcangeli }
23321440d7eSDavid Rientjes 
23471e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj,
23571e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr,
23671e3aac0SAndrea Arcangeli 			    const char *buf, size_t count)
23771e3aac0SAndrea Arcangeli {
23821440d7eSDavid Rientjes 	if (!memcmp("always", buf,
23921440d7eSDavid Rientjes 		    min(sizeof("always")-1, count))) {
24021440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
24121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
24221440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
24321440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
24421440d7eSDavid Rientjes 	} else if (!memcmp("defer+madvise", buf,
24521440d7eSDavid Rientjes 		    min(sizeof("defer+madvise")-1, count))) {
24621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
24721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
24821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
24921440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2504fad7fb6SDavid Rientjes 	} else if (!memcmp("defer", buf,
2514fad7fb6SDavid Rientjes 		    min(sizeof("defer")-1, count))) {
2524fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2534fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2544fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2554fad7fb6SDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
25621440d7eSDavid Rientjes 	} else if (!memcmp("madvise", buf,
25721440d7eSDavid Rientjes 			   min(sizeof("madvise")-1, count))) {
25821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
25921440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
26021440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
26121440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
26221440d7eSDavid Rientjes 	} else if (!memcmp("never", buf,
26321440d7eSDavid Rientjes 			   min(sizeof("never")-1, count))) {
26421440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
26521440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
26621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
26721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
26821440d7eSDavid Rientjes 	} else
26921440d7eSDavid Rientjes 		return -EINVAL;
27021440d7eSDavid Rientjes 
27121440d7eSDavid Rientjes 	return count;
27271e3aac0SAndrea Arcangeli }
27371e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr =
27471e3aac0SAndrea Arcangeli 	__ATTR(defrag, 0644, defrag_show, defrag_store);
27571e3aac0SAndrea Arcangeli 
27679da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj,
27779da5407SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
27879da5407SKirill A. Shutemov {
279b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
28079da5407SKirill A. Shutemov 				TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
28179da5407SKirill A. Shutemov }
28279da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj,
28379da5407SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
28479da5407SKirill A. Shutemov {
285b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
28679da5407SKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
28779da5407SKirill A. Shutemov }
28879da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr =
28979da5407SKirill A. Shutemov 	__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
29049920d28SHugh Dickins 
29149920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj,
29249920d28SHugh Dickins 		struct kobj_attribute *attr, char *buf)
29349920d28SHugh Dickins {
29449920d28SHugh Dickins 	return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
29549920d28SHugh Dickins }
29649920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr =
29749920d28SHugh Dickins 	__ATTR_RO(hpage_pmd_size);
29849920d28SHugh Dickins 
29971e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
30071e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj,
30171e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf)
30271e3aac0SAndrea Arcangeli {
303b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
30471e3aac0SAndrea Arcangeli 				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
30571e3aac0SAndrea Arcangeli }
30671e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj,
30771e3aac0SAndrea Arcangeli 			       struct kobj_attribute *attr,
30871e3aac0SAndrea Arcangeli 			       const char *buf, size_t count)
30971e3aac0SAndrea Arcangeli {
310b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
31171e3aac0SAndrea Arcangeli 				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
31271e3aac0SAndrea Arcangeli }
31371e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr =
31471e3aac0SAndrea Arcangeli 	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
31571e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */
31671e3aac0SAndrea Arcangeli 
31771e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = {
31871e3aac0SAndrea Arcangeli 	&enabled_attr.attr,
31971e3aac0SAndrea Arcangeli 	&defrag_attr.attr,
32079da5407SKirill A. Shutemov 	&use_zero_page_attr.attr,
32149920d28SHugh Dickins 	&hpage_pmd_size_attr.attr,
322e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
3235a6e75f8SKirill A. Shutemov 	&shmem_enabled_attr.attr,
3245a6e75f8SKirill A. Shutemov #endif
32571e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
32671e3aac0SAndrea Arcangeli 	&debug_cow_attr.attr,
32771e3aac0SAndrea Arcangeli #endif
32871e3aac0SAndrea Arcangeli 	NULL,
32971e3aac0SAndrea Arcangeli };
33071e3aac0SAndrea Arcangeli 
3318aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = {
33271e3aac0SAndrea Arcangeli 	.attrs = hugepage_attr,
333ba76149fSAndrea Arcangeli };
334ba76149fSAndrea Arcangeli 
335569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
336569e5590SShaohua Li {
337569e5590SShaohua Li 	int err;
338569e5590SShaohua Li 
339569e5590SShaohua Li 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
340569e5590SShaohua Li 	if (unlikely(!*hugepage_kobj)) {
341ae3a8c1cSAndrew Morton 		pr_err("failed to create transparent hugepage kobject\n");
342569e5590SShaohua Li 		return -ENOMEM;
343569e5590SShaohua Li 	}
344569e5590SShaohua Li 
345569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
346569e5590SShaohua Li 	if (err) {
347ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
348569e5590SShaohua Li 		goto delete_obj;
349569e5590SShaohua Li 	}
350569e5590SShaohua Li 
351569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
352569e5590SShaohua Li 	if (err) {
353ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
354569e5590SShaohua Li 		goto remove_hp_group;
355569e5590SShaohua Li 	}
356569e5590SShaohua Li 
357569e5590SShaohua Li 	return 0;
358569e5590SShaohua Li 
359569e5590SShaohua Li remove_hp_group:
360569e5590SShaohua Li 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
361569e5590SShaohua Li delete_obj:
362569e5590SShaohua Li 	kobject_put(*hugepage_kobj);
363569e5590SShaohua Li 	return err;
364569e5590SShaohua Li }
365569e5590SShaohua Li 
366569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
367569e5590SShaohua Li {
368569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
369569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
370569e5590SShaohua Li 	kobject_put(hugepage_kobj);
371569e5590SShaohua Li }
372569e5590SShaohua Li #else
373569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
374569e5590SShaohua Li {
375569e5590SShaohua Li 	return 0;
376569e5590SShaohua Li }
377569e5590SShaohua Li 
378569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
379569e5590SShaohua Li {
380569e5590SShaohua Li }
38171e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */
38271e3aac0SAndrea Arcangeli 
38371e3aac0SAndrea Arcangeli static int __init hugepage_init(void)
38471e3aac0SAndrea Arcangeli {
38571e3aac0SAndrea Arcangeli 	int err;
386569e5590SShaohua Li 	struct kobject *hugepage_kobj;
38771e3aac0SAndrea Arcangeli 
3884b7167b9SAndrea Arcangeli 	if (!has_transparent_hugepage()) {
3894b7167b9SAndrea Arcangeli 		transparent_hugepage_flags = 0;
390569e5590SShaohua Li 		return -EINVAL;
3914b7167b9SAndrea Arcangeli 	}
3924b7167b9SAndrea Arcangeli 
393ff20c2e0SKirill A. Shutemov 	/*
394ff20c2e0SKirill A. Shutemov 	 * hugepages can't be allocated by the buddy allocator
395ff20c2e0SKirill A. Shutemov 	 */
396ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
397ff20c2e0SKirill A. Shutemov 	/*
398ff20c2e0SKirill A. Shutemov 	 * we use page->mapping and page->index in second tail page
399ff20c2e0SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
400ff20c2e0SKirill A. Shutemov 	 */
401ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
402ff20c2e0SKirill A. Shutemov 
403569e5590SShaohua Li 	err = hugepage_init_sysfs(&hugepage_kobj);
404569e5590SShaohua Li 	if (err)
40565ebb64fSKirill A. Shutemov 		goto err_sysfs;
406ba76149fSAndrea Arcangeli 
407b46e756fSKirill A. Shutemov 	err = khugepaged_init();
408ba76149fSAndrea Arcangeli 	if (err)
40965ebb64fSKirill A. Shutemov 		goto err_slab;
410ba76149fSAndrea Arcangeli 
41165ebb64fSKirill A. Shutemov 	err = register_shrinker(&huge_zero_page_shrinker);
41265ebb64fSKirill A. Shutemov 	if (err)
41365ebb64fSKirill A. Shutemov 		goto err_hzp_shrinker;
4149a982250SKirill A. Shutemov 	err = register_shrinker(&deferred_split_shrinker);
4159a982250SKirill A. Shutemov 	if (err)
4169a982250SKirill A. Shutemov 		goto err_split_shrinker;
41797ae1749SKirill A. Shutemov 
41897562cd2SRik van Riel 	/*
41997562cd2SRik van Riel 	 * By default disable transparent hugepages on smaller systems,
42097562cd2SRik van Riel 	 * where the extra memory used could hurt more than TLB overhead
42197562cd2SRik van Riel 	 * is likely to save.  The admin can still enable it through /sys.
42297562cd2SRik van Riel 	 */
423ca79b0c2SArun KS 	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
42497562cd2SRik van Riel 		transparent_hugepage_flags = 0;
42579553da2SKirill A. Shutemov 		return 0;
42679553da2SKirill A. Shutemov 	}
42797562cd2SRik van Riel 
42879553da2SKirill A. Shutemov 	err = start_stop_khugepaged();
42965ebb64fSKirill A. Shutemov 	if (err)
43065ebb64fSKirill A. Shutemov 		goto err_khugepaged;
431ba76149fSAndrea Arcangeli 
432569e5590SShaohua Li 	return 0;
43365ebb64fSKirill A. Shutemov err_khugepaged:
4349a982250SKirill A. Shutemov 	unregister_shrinker(&deferred_split_shrinker);
4359a982250SKirill A. Shutemov err_split_shrinker:
43665ebb64fSKirill A. Shutemov 	unregister_shrinker(&huge_zero_page_shrinker);
43765ebb64fSKirill A. Shutemov err_hzp_shrinker:
438b46e756fSKirill A. Shutemov 	khugepaged_destroy();
43965ebb64fSKirill A. Shutemov err_slab:
440569e5590SShaohua Li 	hugepage_exit_sysfs(hugepage_kobj);
44165ebb64fSKirill A. Shutemov err_sysfs:
442ba76149fSAndrea Arcangeli 	return err;
44371e3aac0SAndrea Arcangeli }
444a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init);
44571e3aac0SAndrea Arcangeli 
44671e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str)
44771e3aac0SAndrea Arcangeli {
44871e3aac0SAndrea Arcangeli 	int ret = 0;
44971e3aac0SAndrea Arcangeli 	if (!str)
45071e3aac0SAndrea Arcangeli 		goto out;
45171e3aac0SAndrea Arcangeli 	if (!strcmp(str, "always")) {
45271e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
45371e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
45471e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
45571e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
45671e3aac0SAndrea Arcangeli 		ret = 1;
45771e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "madvise")) {
45871e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
45971e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46071e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46171e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
46271e3aac0SAndrea Arcangeli 		ret = 1;
46371e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "never")) {
46471e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
46571e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46671e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46771e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46871e3aac0SAndrea Arcangeli 		ret = 1;
46971e3aac0SAndrea Arcangeli 	}
47071e3aac0SAndrea Arcangeli out:
47171e3aac0SAndrea Arcangeli 	if (!ret)
472ae3a8c1cSAndrew Morton 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
47371e3aac0SAndrea Arcangeli 	return ret;
47471e3aac0SAndrea Arcangeli }
47571e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage);
47671e3aac0SAndrea Arcangeli 
477f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
47871e3aac0SAndrea Arcangeli {
479f55e1014SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
48071e3aac0SAndrea Arcangeli 		pmd = pmd_mkwrite(pmd);
48171e3aac0SAndrea Arcangeli 	return pmd;
48271e3aac0SAndrea Arcangeli }
48371e3aac0SAndrea Arcangeli 
4849a982250SKirill A. Shutemov static inline struct list_head *page_deferred_list(struct page *page)
4859a982250SKirill A. Shutemov {
486fa3015b7SMatthew Wilcox 	/* ->lru in the tail pages is occupied by compound_head. */
487fa3015b7SMatthew Wilcox 	return &page[2].deferred_list;
4889a982250SKirill A. Shutemov }
4899a982250SKirill A. Shutemov 
4909a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page)
4919a982250SKirill A. Shutemov {
4929a982250SKirill A. Shutemov 	/*
4939a982250SKirill A. Shutemov 	 * we use page->mapping and page->indexlru in second tail page
4949a982250SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
4959a982250SKirill A. Shutemov 	 */
4969a982250SKirill A. Shutemov 
4979a982250SKirill A. Shutemov 	INIT_LIST_HEAD(page_deferred_list(page));
4989a982250SKirill A. Shutemov 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
4999a982250SKirill A. Shutemov }
5009a982250SKirill A. Shutemov 
50174d2fad1SToshi Kani unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
50274d2fad1SToshi Kani 		loff_t off, unsigned long flags, unsigned long size)
50374d2fad1SToshi Kani {
50474d2fad1SToshi Kani 	unsigned long addr;
50574d2fad1SToshi Kani 	loff_t off_end = off + len;
50674d2fad1SToshi Kani 	loff_t off_align = round_up(off, size);
50774d2fad1SToshi Kani 	unsigned long len_pad;
50874d2fad1SToshi Kani 
50974d2fad1SToshi Kani 	if (off_end <= off_align || (off_end - off_align) < size)
51074d2fad1SToshi Kani 		return 0;
51174d2fad1SToshi Kani 
51274d2fad1SToshi Kani 	len_pad = len + size;
51374d2fad1SToshi Kani 	if (len_pad < len || (off + len_pad) < off)
51474d2fad1SToshi Kani 		return 0;
51574d2fad1SToshi Kani 
51674d2fad1SToshi Kani 	addr = current->mm->get_unmapped_area(filp, 0, len_pad,
51774d2fad1SToshi Kani 					      off >> PAGE_SHIFT, flags);
51874d2fad1SToshi Kani 	if (IS_ERR_VALUE(addr))
51974d2fad1SToshi Kani 		return 0;
52074d2fad1SToshi Kani 
52174d2fad1SToshi Kani 	addr += (off - addr) & (size - 1);
52274d2fad1SToshi Kani 	return addr;
52374d2fad1SToshi Kani }
52474d2fad1SToshi Kani 
52574d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
52674d2fad1SToshi Kani 		unsigned long len, unsigned long pgoff, unsigned long flags)
52774d2fad1SToshi Kani {
52874d2fad1SToshi Kani 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
52974d2fad1SToshi Kani 
53074d2fad1SToshi Kani 	if (addr)
53174d2fad1SToshi Kani 		goto out;
53274d2fad1SToshi Kani 	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
53374d2fad1SToshi Kani 		goto out;
53474d2fad1SToshi Kani 
53574d2fad1SToshi Kani 	addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
53674d2fad1SToshi Kani 	if (addr)
53774d2fad1SToshi Kani 		return addr;
53874d2fad1SToshi Kani 
53974d2fad1SToshi Kani  out:
54074d2fad1SToshi Kani 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
54174d2fad1SToshi Kani }
54274d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
54374d2fad1SToshi Kani 
5442b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
5452b740303SSouptick Joarder 			struct page *page, gfp_t gfp)
54671e3aac0SAndrea Arcangeli {
54782b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
54800501b53SJohannes Weiner 	struct mem_cgroup *memcg;
54971e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
55082b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5512b740303SSouptick Joarder 	vm_fault_t ret = 0;
55271e3aac0SAndrea Arcangeli 
553309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page), page);
55400501b53SJohannes Weiner 
5552cf85583STejun Heo 	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
5566b251fc9SAndrea Arcangeli 		put_page(page);
5576b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_FALLBACK);
5586b251fc9SAndrea Arcangeli 		return VM_FAULT_FALLBACK;
5596b251fc9SAndrea Arcangeli 	}
56071e3aac0SAndrea Arcangeli 
561bae473a4SKirill A. Shutemov 	pgtable = pte_alloc_one(vma->vm_mm, haddr);
56200501b53SJohannes Weiner 	if (unlikely(!pgtable)) {
5636b31d595SMichal Hocko 		ret = VM_FAULT_OOM;
5646b31d595SMichal Hocko 		goto release;
56500501b53SJohannes Weiner 	}
56600501b53SJohannes Weiner 
567c79b57e4SHuang Ying 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
56852f37629SMinchan Kim 	/*
56952f37629SMinchan Kim 	 * The memory barrier inside __SetPageUptodate makes sure that
57052f37629SMinchan Kim 	 * clear_huge_page writes become visible before the set_pmd_at()
57152f37629SMinchan Kim 	 * write.
57252f37629SMinchan Kim 	 */
57371e3aac0SAndrea Arcangeli 	__SetPageUptodate(page);
57471e3aac0SAndrea Arcangeli 
57582b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
57682b0f8c3SJan Kara 	if (unlikely(!pmd_none(*vmf->pmd))) {
5776b31d595SMichal Hocko 		goto unlock_release;
57871e3aac0SAndrea Arcangeli 	} else {
57971e3aac0SAndrea Arcangeli 		pmd_t entry;
5806b251fc9SAndrea Arcangeli 
5816b31d595SMichal Hocko 		ret = check_stable_address_space(vma->vm_mm);
5826b31d595SMichal Hocko 		if (ret)
5836b31d595SMichal Hocko 			goto unlock_release;
5846b31d595SMichal Hocko 
5856b251fc9SAndrea Arcangeli 		/* Deliver the page fault to userland */
5866b251fc9SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
5872b740303SSouptick Joarder 			vm_fault_t ret2;
5886b251fc9SAndrea Arcangeli 
58982b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
590f627c2f5SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg, true);
5916b251fc9SAndrea Arcangeli 			put_page(page);
592bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
5932b740303SSouptick Joarder 			ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
5942b740303SSouptick Joarder 			VM_BUG_ON(ret2 & VM_FAULT_FALLBACK);
5952b740303SSouptick Joarder 			return ret2;
5966b251fc9SAndrea Arcangeli 		}
5976b251fc9SAndrea Arcangeli 
5983122359aSKirill A. Shutemov 		entry = mk_huge_pmd(page, vma->vm_page_prot);
599f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
600d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(page, vma, haddr, true);
601f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, false, true);
60200501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(page, vma);
60382b0f8c3SJan Kara 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
60482b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
605bae473a4SKirill A. Shutemov 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
606c4812909SKirill A. Shutemov 		mm_inc_nr_ptes(vma->vm_mm);
60782b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
6086b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_ALLOC);
60971e3aac0SAndrea Arcangeli 	}
61071e3aac0SAndrea Arcangeli 
611aa2e878eSDavid Rientjes 	return 0;
6126b31d595SMichal Hocko unlock_release:
6136b31d595SMichal Hocko 	spin_unlock(vmf->ptl);
6146b31d595SMichal Hocko release:
6156b31d595SMichal Hocko 	if (pgtable)
6166b31d595SMichal Hocko 		pte_free(vma->vm_mm, pgtable);
6176b31d595SMichal Hocko 	mem_cgroup_cancel_charge(page, memcg, true);
6186b31d595SMichal Hocko 	put_page(page);
6196b31d595SMichal Hocko 	return ret;
6206b31d595SMichal Hocko 
62171e3aac0SAndrea Arcangeli }
62271e3aac0SAndrea Arcangeli 
623444eb2a4SMel Gorman /*
62421440d7eSDavid Rientjes  * always: directly stall for all thp allocations
62521440d7eSDavid Rientjes  * defer: wake kswapd and fail if not immediately available
62621440d7eSDavid Rientjes  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
62721440d7eSDavid Rientjes  *		  fail if not immediately available
62821440d7eSDavid Rientjes  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
62921440d7eSDavid Rientjes  *	    available
63021440d7eSDavid Rientjes  * never: never stall for any thp allocation
631444eb2a4SMel Gorman  */
632356ff8a9SDavid Rientjes static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
6330bbbc0b3SAndrea Arcangeli {
63421440d7eSDavid Rientjes 	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
63589c83fb5SMichal Hocko 
6362f0799a0SDavid Rientjes 	/* Always do synchronous compaction */
63721440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
638356ff8a9SDavid Rientjes 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
6392f0799a0SDavid Rientjes 
6402f0799a0SDavid Rientjes 	/* Kick kcompactd and fail quickly */
64121440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
642356ff8a9SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
6432f0799a0SDavid Rientjes 
6442f0799a0SDavid Rientjes 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
64521440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
646356ff8a9SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT |
647356ff8a9SDavid Rientjes 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
6482f0799a0SDavid Rientjes 					__GFP_KSWAPD_RECLAIM);
6492f0799a0SDavid Rientjes 
6502f0799a0SDavid Rientjes 	/* Only do synchronous compaction if madvised */
65121440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
652356ff8a9SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT |
653356ff8a9SDavid Rientjes 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
6542f0799a0SDavid Rientjes 
655356ff8a9SDavid Rientjes 	return GFP_TRANSHUGE_LIGHT;
656444eb2a4SMel Gorman }
657444eb2a4SMel Gorman 
658c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */
659d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
66097ae1749SKirill A. Shutemov 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
6615918d10aSKirill A. Shutemov 		struct page *zero_page)
662fc9fe822SKirill A. Shutemov {
663fc9fe822SKirill A. Shutemov 	pmd_t entry;
6647c414164SAndrew Morton 	if (!pmd_none(*pmd))
6657c414164SAndrew Morton 		return false;
6665918d10aSKirill A. Shutemov 	entry = mk_pmd(zero_page, vma->vm_page_prot);
667fc9fe822SKirill A. Shutemov 	entry = pmd_mkhuge(entry);
66812c9d70bSMatthew Wilcox 	if (pgtable)
6696b0b50b0SAneesh Kumar K.V 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
670fc9fe822SKirill A. Shutemov 	set_pmd_at(mm, haddr, pmd, entry);
671c4812909SKirill A. Shutemov 	mm_inc_nr_ptes(mm);
6727c414164SAndrew Morton 	return true;
673fc9fe822SKirill A. Shutemov }
674fc9fe822SKirill A. Shutemov 
6752b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
67671e3aac0SAndrea Arcangeli {
67782b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
678077fcf11SAneesh Kumar K.V 	gfp_t gfp;
67971e3aac0SAndrea Arcangeli 	struct page *page;
68082b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
68171e3aac0SAndrea Arcangeli 
682128ec037SKirill A. Shutemov 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
683c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
68471e3aac0SAndrea Arcangeli 	if (unlikely(anon_vma_prepare(vma)))
68571e3aac0SAndrea Arcangeli 		return VM_FAULT_OOM;
6866d50e60cSDavid Rientjes 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
687ba76149fSAndrea Arcangeli 		return VM_FAULT_OOM;
68882b0f8c3SJan Kara 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
689bae473a4SKirill A. Shutemov 			!mm_forbids_zeropage(vma->vm_mm) &&
69079da5407SKirill A. Shutemov 			transparent_hugepage_use_zero_page()) {
69180371957SKirill A. Shutemov 		pgtable_t pgtable;
6925918d10aSKirill A. Shutemov 		struct page *zero_page;
6933ea41e62SKirill A. Shutemov 		bool set;
6942b740303SSouptick Joarder 		vm_fault_t ret;
695bae473a4SKirill A. Shutemov 		pgtable = pte_alloc_one(vma->vm_mm, haddr);
69680371957SKirill A. Shutemov 		if (unlikely(!pgtable))
69780371957SKirill A. Shutemov 			return VM_FAULT_OOM;
6986fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
6995918d10aSKirill A. Shutemov 		if (unlikely(!zero_page)) {
700bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
70197ae1749SKirill A. Shutemov 			count_vm_event(THP_FAULT_FALLBACK);
702c0292554SKirill A. Shutemov 			return VM_FAULT_FALLBACK;
70397ae1749SKirill A. Shutemov 		}
70482b0f8c3SJan Kara 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
7056b251fc9SAndrea Arcangeli 		ret = 0;
7066b251fc9SAndrea Arcangeli 		set = false;
70782b0f8c3SJan Kara 		if (pmd_none(*vmf->pmd)) {
7086b31d595SMichal Hocko 			ret = check_stable_address_space(vma->vm_mm);
7096b31d595SMichal Hocko 			if (ret) {
7106b31d595SMichal Hocko 				spin_unlock(vmf->ptl);
7116b31d595SMichal Hocko 			} else if (userfaultfd_missing(vma)) {
71282b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
71382b0f8c3SJan Kara 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
7146b251fc9SAndrea Arcangeli 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
7156b251fc9SAndrea Arcangeli 			} else {
716bae473a4SKirill A. Shutemov 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
71782b0f8c3SJan Kara 						   haddr, vmf->pmd, zero_page);
71882b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
7196b251fc9SAndrea Arcangeli 				set = true;
7206b251fc9SAndrea Arcangeli 			}
7216b251fc9SAndrea Arcangeli 		} else
72282b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
7236fcb52a5SAaron Lu 		if (!set)
724bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
7256b251fc9SAndrea Arcangeli 		return ret;
72680371957SKirill A. Shutemov 	}
727356ff8a9SDavid Rientjes 	gfp = alloc_hugepage_direct_gfpmask(vma);
728356ff8a9SDavid Rientjes 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
72981ab4201SAndi Kleen 	if (unlikely(!page)) {
73081ab4201SAndi Kleen 		count_vm_event(THP_FAULT_FALLBACK);
731c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
73281ab4201SAndi Kleen 	}
7339a982250SKirill A. Shutemov 	prep_transhuge_page(page);
73482b0f8c3SJan Kara 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
73571e3aac0SAndrea Arcangeli }
73671e3aac0SAndrea Arcangeli 
737ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
7383b6521f5SOliver O'Halloran 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
7393b6521f5SOliver O'Halloran 		pgtable_t pgtable)
7405cad465dSMatthew Wilcox {
7415cad465dSMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
7425cad465dSMatthew Wilcox 	pmd_t entry;
7435cad465dSMatthew Wilcox 	spinlock_t *ptl;
7445cad465dSMatthew Wilcox 
7455cad465dSMatthew Wilcox 	ptl = pmd_lock(mm, pmd);
746f25748e3SDan Williams 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
747f25748e3SDan Williams 	if (pfn_t_devmap(pfn))
748f25748e3SDan Williams 		entry = pmd_mkdevmap(entry);
7495cad465dSMatthew Wilcox 	if (write) {
750f55e1014SLinus Torvalds 		entry = pmd_mkyoung(pmd_mkdirty(entry));
751f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(entry, vma);
7525cad465dSMatthew Wilcox 	}
7533b6521f5SOliver O'Halloran 
7543b6521f5SOliver O'Halloran 	if (pgtable) {
7553b6521f5SOliver O'Halloran 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
756c4812909SKirill A. Shutemov 		mm_inc_nr_ptes(mm);
7573b6521f5SOliver O'Halloran 	}
7583b6521f5SOliver O'Halloran 
7595cad465dSMatthew Wilcox 	set_pmd_at(mm, addr, pmd, entry);
7605cad465dSMatthew Wilcox 	update_mmu_cache_pmd(vma, addr, pmd);
7615cad465dSMatthew Wilcox 	spin_unlock(ptl);
7625cad465dSMatthew Wilcox }
7635cad465dSMatthew Wilcox 
764226ab561SDan Williams vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
765f25748e3SDan Williams 			pmd_t *pmd, pfn_t pfn, bool write)
7665cad465dSMatthew Wilcox {
7675cad465dSMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
7683b6521f5SOliver O'Halloran 	pgtable_t pgtable = NULL;
7695cad465dSMatthew Wilcox 	/*
7705cad465dSMatthew Wilcox 	 * If we had pmd_special, we could avoid all these restrictions,
7715cad465dSMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
7725cad465dSMatthew Wilcox 	 * can't support a 'special' bit.
7735cad465dSMatthew Wilcox 	 */
774e1fb4a08SDave Jiang 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
775e1fb4a08SDave Jiang 			!pfn_t_devmap(pfn));
7765cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
7775cad465dSMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
7785cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
7795cad465dSMatthew Wilcox 
7805cad465dSMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
7815cad465dSMatthew Wilcox 		return VM_FAULT_SIGBUS;
782308a047cSBorislav Petkov 
7833b6521f5SOliver O'Halloran 	if (arch_needs_pgtable_deposit()) {
7843b6521f5SOliver O'Halloran 		pgtable = pte_alloc_one(vma->vm_mm, addr);
7853b6521f5SOliver O'Halloran 		if (!pgtable)
7863b6521f5SOliver O'Halloran 			return VM_FAULT_OOM;
7873b6521f5SOliver O'Halloran 	}
7883b6521f5SOliver O'Halloran 
789308a047cSBorislav Petkov 	track_pfn_insert(vma, &pgprot, pfn);
790308a047cSBorislav Petkov 
7913b6521f5SOliver O'Halloran 	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
792ae18d6dcSMatthew Wilcox 	return VM_FAULT_NOPAGE;
7935cad465dSMatthew Wilcox }
794dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
7955cad465dSMatthew Wilcox 
796a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
797f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
798a00cc7d9SMatthew Wilcox {
799f55e1014SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
800a00cc7d9SMatthew Wilcox 		pud = pud_mkwrite(pud);
801a00cc7d9SMatthew Wilcox 	return pud;
802a00cc7d9SMatthew Wilcox }
803a00cc7d9SMatthew Wilcox 
804a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
805a00cc7d9SMatthew Wilcox 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
806a00cc7d9SMatthew Wilcox {
807a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
808a00cc7d9SMatthew Wilcox 	pud_t entry;
809a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
810a00cc7d9SMatthew Wilcox 
811a00cc7d9SMatthew Wilcox 	ptl = pud_lock(mm, pud);
812a00cc7d9SMatthew Wilcox 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
813a00cc7d9SMatthew Wilcox 	if (pfn_t_devmap(pfn))
814a00cc7d9SMatthew Wilcox 		entry = pud_mkdevmap(entry);
815a00cc7d9SMatthew Wilcox 	if (write) {
816f55e1014SLinus Torvalds 		entry = pud_mkyoung(pud_mkdirty(entry));
817f55e1014SLinus Torvalds 		entry = maybe_pud_mkwrite(entry, vma);
818a00cc7d9SMatthew Wilcox 	}
819a00cc7d9SMatthew Wilcox 	set_pud_at(mm, addr, pud, entry);
820a00cc7d9SMatthew Wilcox 	update_mmu_cache_pud(vma, addr, pud);
821a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
822a00cc7d9SMatthew Wilcox }
823a00cc7d9SMatthew Wilcox 
824226ab561SDan Williams vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
825a00cc7d9SMatthew Wilcox 			pud_t *pud, pfn_t pfn, bool write)
826a00cc7d9SMatthew Wilcox {
827a00cc7d9SMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
828a00cc7d9SMatthew Wilcox 	/*
829a00cc7d9SMatthew Wilcox 	 * If we had pud_special, we could avoid all these restrictions,
830a00cc7d9SMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
831a00cc7d9SMatthew Wilcox 	 * can't support a 'special' bit.
832a00cc7d9SMatthew Wilcox 	 */
83362ec0d8cSDave Jiang 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
83462ec0d8cSDave Jiang 			!pfn_t_devmap(pfn));
835a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
836a00cc7d9SMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
837a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
838a00cc7d9SMatthew Wilcox 
839a00cc7d9SMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
840a00cc7d9SMatthew Wilcox 		return VM_FAULT_SIGBUS;
841a00cc7d9SMatthew Wilcox 
842a00cc7d9SMatthew Wilcox 	track_pfn_insert(vma, &pgprot, pfn);
843a00cc7d9SMatthew Wilcox 
844a00cc7d9SMatthew Wilcox 	insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
845a00cc7d9SMatthew Wilcox 	return VM_FAULT_NOPAGE;
846a00cc7d9SMatthew Wilcox }
847a00cc7d9SMatthew Wilcox EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
848a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
849a00cc7d9SMatthew Wilcox 
8503565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
851a8f97366SKirill A. Shutemov 		pmd_t *pmd, int flags)
8523565fce3SDan Williams {
8533565fce3SDan Williams 	pmd_t _pmd;
8543565fce3SDan Williams 
855a8f97366SKirill A. Shutemov 	_pmd = pmd_mkyoung(*pmd);
856a8f97366SKirill A. Shutemov 	if (flags & FOLL_WRITE)
857a8f97366SKirill A. Shutemov 		_pmd = pmd_mkdirty(_pmd);
8583565fce3SDan Williams 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
859a8f97366SKirill A. Shutemov 				pmd, _pmd, flags & FOLL_WRITE))
8603565fce3SDan Williams 		update_mmu_cache_pmd(vma, addr, pmd);
8613565fce3SDan Williams }
8623565fce3SDan Williams 
8633565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
864df06b37fSKeith Busch 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
8653565fce3SDan Williams {
8663565fce3SDan Williams 	unsigned long pfn = pmd_pfn(*pmd);
8673565fce3SDan Williams 	struct mm_struct *mm = vma->vm_mm;
8683565fce3SDan Williams 	struct page *page;
8693565fce3SDan Williams 
8703565fce3SDan Williams 	assert_spin_locked(pmd_lockptr(mm, pmd));
8713565fce3SDan Williams 
8728310d48bSKeno Fischer 	/*
8738310d48bSKeno Fischer 	 * When we COW a devmap PMD entry, we split it into PTEs, so we should
8748310d48bSKeno Fischer 	 * not be in this function with `flags & FOLL_COW` set.
8758310d48bSKeno Fischer 	 */
8768310d48bSKeno Fischer 	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
8778310d48bSKeno Fischer 
878f6f37321SLinus Torvalds 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
8793565fce3SDan Williams 		return NULL;
8803565fce3SDan Williams 
8813565fce3SDan Williams 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
8823565fce3SDan Williams 		/* pass */;
8833565fce3SDan Williams 	else
8843565fce3SDan Williams 		return NULL;
8853565fce3SDan Williams 
8863565fce3SDan Williams 	if (flags & FOLL_TOUCH)
887a8f97366SKirill A. Shutemov 		touch_pmd(vma, addr, pmd, flags);
8883565fce3SDan Williams 
8893565fce3SDan Williams 	/*
8903565fce3SDan Williams 	 * device mapped pages can only be returned if the
8913565fce3SDan Williams 	 * caller will manage the page reference count.
8923565fce3SDan Williams 	 */
8933565fce3SDan Williams 	if (!(flags & FOLL_GET))
8943565fce3SDan Williams 		return ERR_PTR(-EEXIST);
8953565fce3SDan Williams 
8963565fce3SDan Williams 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
897df06b37fSKeith Busch 	*pgmap = get_dev_pagemap(pfn, *pgmap);
898df06b37fSKeith Busch 	if (!*pgmap)
8993565fce3SDan Williams 		return ERR_PTR(-EFAULT);
9003565fce3SDan Williams 	page = pfn_to_page(pfn);
9013565fce3SDan Williams 	get_page(page);
9023565fce3SDan Williams 
9033565fce3SDan Williams 	return page;
9043565fce3SDan Williams }
9053565fce3SDan Williams 
90671e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
90771e3aac0SAndrea Arcangeli 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
90871e3aac0SAndrea Arcangeli 		  struct vm_area_struct *vma)
90971e3aac0SAndrea Arcangeli {
910c4088ebdSKirill A. Shutemov 	spinlock_t *dst_ptl, *src_ptl;
91171e3aac0SAndrea Arcangeli 	struct page *src_page;
91271e3aac0SAndrea Arcangeli 	pmd_t pmd;
91312c9d70bSMatthew Wilcox 	pgtable_t pgtable = NULL;
914628d47ceSKirill A. Shutemov 	int ret = -ENOMEM;
91571e3aac0SAndrea Arcangeli 
916628d47ceSKirill A. Shutemov 	/* Skip if can be re-fill on fault */
917628d47ceSKirill A. Shutemov 	if (!vma_is_anonymous(vma))
918628d47ceSKirill A. Shutemov 		return 0;
919628d47ceSKirill A. Shutemov 
92071e3aac0SAndrea Arcangeli 	pgtable = pte_alloc_one(dst_mm, addr);
92171e3aac0SAndrea Arcangeli 	if (unlikely(!pgtable))
92271e3aac0SAndrea Arcangeli 		goto out;
92371e3aac0SAndrea Arcangeli 
924c4088ebdSKirill A. Shutemov 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
925c4088ebdSKirill A. Shutemov 	src_ptl = pmd_lockptr(src_mm, src_pmd);
926c4088ebdSKirill A. Shutemov 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
92771e3aac0SAndrea Arcangeli 
92871e3aac0SAndrea Arcangeli 	ret = -EAGAIN;
92971e3aac0SAndrea Arcangeli 	pmd = *src_pmd;
93084c3fc4eSZi Yan 
93184c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
93284c3fc4eSZi Yan 	if (unlikely(is_swap_pmd(pmd))) {
93384c3fc4eSZi Yan 		swp_entry_t entry = pmd_to_swp_entry(pmd);
93484c3fc4eSZi Yan 
93584c3fc4eSZi Yan 		VM_BUG_ON(!is_pmd_migration_entry(pmd));
93684c3fc4eSZi Yan 		if (is_write_migration_entry(entry)) {
93784c3fc4eSZi Yan 			make_migration_entry_read(&entry);
93884c3fc4eSZi Yan 			pmd = swp_entry_to_pmd(entry);
939ab6e3d09SNaoya Horiguchi 			if (pmd_swp_soft_dirty(*src_pmd))
940ab6e3d09SNaoya Horiguchi 				pmd = pmd_swp_mksoft_dirty(pmd);
94184c3fc4eSZi Yan 			set_pmd_at(src_mm, addr, src_pmd, pmd);
94284c3fc4eSZi Yan 		}
943dd8a67f9SZi Yan 		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
944af5b0f6aSKirill A. Shutemov 		mm_inc_nr_ptes(dst_mm);
945dd8a67f9SZi Yan 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
94684c3fc4eSZi Yan 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
94784c3fc4eSZi Yan 		ret = 0;
94884c3fc4eSZi Yan 		goto out_unlock;
94984c3fc4eSZi Yan 	}
95084c3fc4eSZi Yan #endif
95184c3fc4eSZi Yan 
952628d47ceSKirill A. Shutemov 	if (unlikely(!pmd_trans_huge(pmd))) {
95371e3aac0SAndrea Arcangeli 		pte_free(dst_mm, pgtable);
95471e3aac0SAndrea Arcangeli 		goto out_unlock;
95571e3aac0SAndrea Arcangeli 	}
956fc9fe822SKirill A. Shutemov 	/*
957c4088ebdSKirill A. Shutemov 	 * When page table lock is held, the huge zero pmd should not be
958fc9fe822SKirill A. Shutemov 	 * under splitting since we don't split the page itself, only pmd to
959fc9fe822SKirill A. Shutemov 	 * a page table.
960fc9fe822SKirill A. Shutemov 	 */
961fc9fe822SKirill A. Shutemov 	if (is_huge_zero_pmd(pmd)) {
9625918d10aSKirill A. Shutemov 		struct page *zero_page;
96397ae1749SKirill A. Shutemov 		/*
96497ae1749SKirill A. Shutemov 		 * get_huge_zero_page() will never allocate a new page here,
96597ae1749SKirill A. Shutemov 		 * since we already have a zero page to copy. It just takes a
96697ae1749SKirill A. Shutemov 		 * reference.
96797ae1749SKirill A. Shutemov 		 */
9686fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(dst_mm);
9696b251fc9SAndrea Arcangeli 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
9705918d10aSKirill A. Shutemov 				zero_page);
971fc9fe822SKirill A. Shutemov 		ret = 0;
972fc9fe822SKirill A. Shutemov 		goto out_unlock;
973fc9fe822SKirill A. Shutemov 	}
974de466bd6SMel Gorman 
97571e3aac0SAndrea Arcangeli 	src_page = pmd_page(pmd);
976309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
97771e3aac0SAndrea Arcangeli 	get_page(src_page);
97853f9263bSKirill A. Shutemov 	page_dup_rmap(src_page, true);
97971e3aac0SAndrea Arcangeli 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
980c4812909SKirill A. Shutemov 	mm_inc_nr_ptes(dst_mm);
9815c7fb56eSDan Williams 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
98271e3aac0SAndrea Arcangeli 
98371e3aac0SAndrea Arcangeli 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
98471e3aac0SAndrea Arcangeli 	pmd = pmd_mkold(pmd_wrprotect(pmd));
98571e3aac0SAndrea Arcangeli 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
98671e3aac0SAndrea Arcangeli 
98771e3aac0SAndrea Arcangeli 	ret = 0;
98871e3aac0SAndrea Arcangeli out_unlock:
989c4088ebdSKirill A. Shutemov 	spin_unlock(src_ptl);
990c4088ebdSKirill A. Shutemov 	spin_unlock(dst_ptl);
99171e3aac0SAndrea Arcangeli out:
99271e3aac0SAndrea Arcangeli 	return ret;
99371e3aac0SAndrea Arcangeli }
99471e3aac0SAndrea Arcangeli 
995a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
996a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
997a8f97366SKirill A. Shutemov 		pud_t *pud, int flags)
998a00cc7d9SMatthew Wilcox {
999a00cc7d9SMatthew Wilcox 	pud_t _pud;
1000a00cc7d9SMatthew Wilcox 
1001a8f97366SKirill A. Shutemov 	_pud = pud_mkyoung(*pud);
1002a8f97366SKirill A. Shutemov 	if (flags & FOLL_WRITE)
1003a8f97366SKirill A. Shutemov 		_pud = pud_mkdirty(_pud);
1004a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1005a8f97366SKirill A. Shutemov 				pud, _pud, flags & FOLL_WRITE))
1006a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vma, addr, pud);
1007a00cc7d9SMatthew Wilcox }
1008a00cc7d9SMatthew Wilcox 
1009a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1010df06b37fSKeith Busch 		pud_t *pud, int flags, struct dev_pagemap **pgmap)
1011a00cc7d9SMatthew Wilcox {
1012a00cc7d9SMatthew Wilcox 	unsigned long pfn = pud_pfn(*pud);
1013a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
1014a00cc7d9SMatthew Wilcox 	struct page *page;
1015a00cc7d9SMatthew Wilcox 
1016a00cc7d9SMatthew Wilcox 	assert_spin_locked(pud_lockptr(mm, pud));
1017a00cc7d9SMatthew Wilcox 
1018f6f37321SLinus Torvalds 	if (flags & FOLL_WRITE && !pud_write(*pud))
1019a00cc7d9SMatthew Wilcox 		return NULL;
1020a00cc7d9SMatthew Wilcox 
1021a00cc7d9SMatthew Wilcox 	if (pud_present(*pud) && pud_devmap(*pud))
1022a00cc7d9SMatthew Wilcox 		/* pass */;
1023a00cc7d9SMatthew Wilcox 	else
1024a00cc7d9SMatthew Wilcox 		return NULL;
1025a00cc7d9SMatthew Wilcox 
1026a00cc7d9SMatthew Wilcox 	if (flags & FOLL_TOUCH)
1027a8f97366SKirill A. Shutemov 		touch_pud(vma, addr, pud, flags);
1028a00cc7d9SMatthew Wilcox 
1029a00cc7d9SMatthew Wilcox 	/*
1030a00cc7d9SMatthew Wilcox 	 * device mapped pages can only be returned if the
1031a00cc7d9SMatthew Wilcox 	 * caller will manage the page reference count.
1032a00cc7d9SMatthew Wilcox 	 */
1033a00cc7d9SMatthew Wilcox 	if (!(flags & FOLL_GET))
1034a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EEXIST);
1035a00cc7d9SMatthew Wilcox 
1036a00cc7d9SMatthew Wilcox 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1037df06b37fSKeith Busch 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1038df06b37fSKeith Busch 	if (!*pgmap)
1039a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EFAULT);
1040a00cc7d9SMatthew Wilcox 	page = pfn_to_page(pfn);
1041a00cc7d9SMatthew Wilcox 	get_page(page);
1042a00cc7d9SMatthew Wilcox 
1043a00cc7d9SMatthew Wilcox 	return page;
1044a00cc7d9SMatthew Wilcox }
1045a00cc7d9SMatthew Wilcox 
1046a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1047a00cc7d9SMatthew Wilcox 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1048a00cc7d9SMatthew Wilcox 		  struct vm_area_struct *vma)
1049a00cc7d9SMatthew Wilcox {
1050a00cc7d9SMatthew Wilcox 	spinlock_t *dst_ptl, *src_ptl;
1051a00cc7d9SMatthew Wilcox 	pud_t pud;
1052a00cc7d9SMatthew Wilcox 	int ret;
1053a00cc7d9SMatthew Wilcox 
1054a00cc7d9SMatthew Wilcox 	dst_ptl = pud_lock(dst_mm, dst_pud);
1055a00cc7d9SMatthew Wilcox 	src_ptl = pud_lockptr(src_mm, src_pud);
1056a00cc7d9SMatthew Wilcox 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1057a00cc7d9SMatthew Wilcox 
1058a00cc7d9SMatthew Wilcox 	ret = -EAGAIN;
1059a00cc7d9SMatthew Wilcox 	pud = *src_pud;
1060a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1061a00cc7d9SMatthew Wilcox 		goto out_unlock;
1062a00cc7d9SMatthew Wilcox 
1063a00cc7d9SMatthew Wilcox 	/*
1064a00cc7d9SMatthew Wilcox 	 * When page table lock is held, the huge zero pud should not be
1065a00cc7d9SMatthew Wilcox 	 * under splitting since we don't split the page itself, only pud to
1066a00cc7d9SMatthew Wilcox 	 * a page table.
1067a00cc7d9SMatthew Wilcox 	 */
1068a00cc7d9SMatthew Wilcox 	if (is_huge_zero_pud(pud)) {
1069a00cc7d9SMatthew Wilcox 		/* No huge zero pud yet */
1070a00cc7d9SMatthew Wilcox 	}
1071a00cc7d9SMatthew Wilcox 
1072a00cc7d9SMatthew Wilcox 	pudp_set_wrprotect(src_mm, addr, src_pud);
1073a00cc7d9SMatthew Wilcox 	pud = pud_mkold(pud_wrprotect(pud));
1074a00cc7d9SMatthew Wilcox 	set_pud_at(dst_mm, addr, dst_pud, pud);
1075a00cc7d9SMatthew Wilcox 
1076a00cc7d9SMatthew Wilcox 	ret = 0;
1077a00cc7d9SMatthew Wilcox out_unlock:
1078a00cc7d9SMatthew Wilcox 	spin_unlock(src_ptl);
1079a00cc7d9SMatthew Wilcox 	spin_unlock(dst_ptl);
1080a00cc7d9SMatthew Wilcox 	return ret;
1081a00cc7d9SMatthew Wilcox }
1082a00cc7d9SMatthew Wilcox 
1083a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1084a00cc7d9SMatthew Wilcox {
1085a00cc7d9SMatthew Wilcox 	pud_t entry;
1086a00cc7d9SMatthew Wilcox 	unsigned long haddr;
1087a00cc7d9SMatthew Wilcox 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1088a00cc7d9SMatthew Wilcox 
1089a00cc7d9SMatthew Wilcox 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1090a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1091a00cc7d9SMatthew Wilcox 		goto unlock;
1092a00cc7d9SMatthew Wilcox 
1093a00cc7d9SMatthew Wilcox 	entry = pud_mkyoung(orig_pud);
1094a00cc7d9SMatthew Wilcox 	if (write)
1095a00cc7d9SMatthew Wilcox 		entry = pud_mkdirty(entry);
1096a00cc7d9SMatthew Wilcox 	haddr = vmf->address & HPAGE_PUD_MASK;
1097a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1098a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1099a00cc7d9SMatthew Wilcox 
1100a00cc7d9SMatthew Wilcox unlock:
1101a00cc7d9SMatthew Wilcox 	spin_unlock(vmf->ptl);
1102a00cc7d9SMatthew Wilcox }
1103a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1104a00cc7d9SMatthew Wilcox 
110582b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1106a1dd450bSWill Deacon {
1107a1dd450bSWill Deacon 	pmd_t entry;
1108a1dd450bSWill Deacon 	unsigned long haddr;
110920f664aaSMinchan Kim 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1110a1dd450bSWill Deacon 
111182b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
111282b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1113a1dd450bSWill Deacon 		goto unlock;
1114a1dd450bSWill Deacon 
1115a1dd450bSWill Deacon 	entry = pmd_mkyoung(orig_pmd);
111620f664aaSMinchan Kim 	if (write)
111720f664aaSMinchan Kim 		entry = pmd_mkdirty(entry);
111882b0f8c3SJan Kara 	haddr = vmf->address & HPAGE_PMD_MASK;
111920f664aaSMinchan Kim 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
112082b0f8c3SJan Kara 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1121a1dd450bSWill Deacon 
1122a1dd450bSWill Deacon unlock:
112382b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1124a1dd450bSWill Deacon }
1125a1dd450bSWill Deacon 
11262b740303SSouptick Joarder static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
11272b740303SSouptick Joarder 			pmd_t orig_pmd, struct page *page)
112871e3aac0SAndrea Arcangeli {
112982b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
113082b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
113100501b53SJohannes Weiner 	struct mem_cgroup *memcg;
113271e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
113371e3aac0SAndrea Arcangeli 	pmd_t _pmd;
11342b740303SSouptick Joarder 	int i;
11352b740303SSouptick Joarder 	vm_fault_t ret = 0;
113671e3aac0SAndrea Arcangeli 	struct page **pages;
1137*ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
113871e3aac0SAndrea Arcangeli 
11396da2ec56SKees Cook 	pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
114071e3aac0SAndrea Arcangeli 			      GFP_KERNEL);
114171e3aac0SAndrea Arcangeli 	if (unlikely(!pages)) {
114271e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_OOM;
114371e3aac0SAndrea Arcangeli 		goto out;
114471e3aac0SAndrea Arcangeli 	}
114571e3aac0SAndrea Arcangeli 
114671e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
114741b6167eSMichal Hocko 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
114882b0f8c3SJan Kara 					       vmf->address, page_to_nid(page));
1149b9bbfbe3SAndrea Arcangeli 		if (unlikely(!pages[i] ||
11502cf85583STejun Heo 			     mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
1151bae473a4SKirill A. Shutemov 				     GFP_KERNEL, &memcg, false))) {
1152b9bbfbe3SAndrea Arcangeli 			if (pages[i])
115371e3aac0SAndrea Arcangeli 				put_page(pages[i]);
1154b9bbfbe3SAndrea Arcangeli 			while (--i >= 0) {
115500501b53SJohannes Weiner 				memcg = (void *)page_private(pages[i]);
115600501b53SJohannes Weiner 				set_page_private(pages[i], 0);
1157f627c2f5SKirill A. Shutemov 				mem_cgroup_cancel_charge(pages[i], memcg,
1158f627c2f5SKirill A. Shutemov 						false);
1159b9bbfbe3SAndrea Arcangeli 				put_page(pages[i]);
1160b9bbfbe3SAndrea Arcangeli 			}
116171e3aac0SAndrea Arcangeli 			kfree(pages);
116271e3aac0SAndrea Arcangeli 			ret |= VM_FAULT_OOM;
116371e3aac0SAndrea Arcangeli 			goto out;
116471e3aac0SAndrea Arcangeli 		}
116500501b53SJohannes Weiner 		set_page_private(pages[i], (unsigned long)memcg);
116671e3aac0SAndrea Arcangeli 	}
116771e3aac0SAndrea Arcangeli 
116871e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
116971e3aac0SAndrea Arcangeli 		copy_user_highpage(pages[i], page + i,
11700089e485SHillf Danton 				   haddr + PAGE_SIZE * i, vma);
117171e3aac0SAndrea Arcangeli 		__SetPageUptodate(pages[i]);
117271e3aac0SAndrea Arcangeli 		cond_resched();
117371e3aac0SAndrea Arcangeli 	}
117471e3aac0SAndrea Arcangeli 
1175*ac46d4f3SJérôme Glisse 	mmu_notifier_range_init(&range, vma->vm_mm, haddr,
1176*ac46d4f3SJérôme Glisse 				haddr + HPAGE_PMD_SIZE);
1177*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
11782ec74c3eSSagi Grimberg 
117982b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
118082b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
118171e3aac0SAndrea Arcangeli 		goto out_free_pages;
1182309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
118371e3aac0SAndrea Arcangeli 
11840f10851eSJérôme Glisse 	/*
11850f10851eSJérôme Glisse 	 * Leave pmd empty until pte is filled note we must notify here as
11860f10851eSJérôme Glisse 	 * concurrent CPU thread might write to new page before the call to
11870f10851eSJérôme Glisse 	 * mmu_notifier_invalidate_range_end() happens which can lead to a
11880f10851eSJérôme Glisse 	 * device seeing memory write in different order than CPU.
11890f10851eSJérôme Glisse 	 *
1190ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
11910f10851eSJérôme Glisse 	 */
119282b0f8c3SJan Kara 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
119371e3aac0SAndrea Arcangeli 
119482b0f8c3SJan Kara 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
1195bae473a4SKirill A. Shutemov 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
119671e3aac0SAndrea Arcangeli 
119771e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1198bae473a4SKirill A. Shutemov 		pte_t entry;
119971e3aac0SAndrea Arcangeli 		entry = mk_pte(pages[i], vma->vm_page_prot);
120071e3aac0SAndrea Arcangeli 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
120100501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
120200501b53SJohannes Weiner 		set_page_private(pages[i], 0);
120382b0f8c3SJan Kara 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
1204f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(pages[i], memcg, false, false);
120500501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(pages[i], vma);
120682b0f8c3SJan Kara 		vmf->pte = pte_offset_map(&_pmd, haddr);
120782b0f8c3SJan Kara 		VM_BUG_ON(!pte_none(*vmf->pte));
120882b0f8c3SJan Kara 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
120982b0f8c3SJan Kara 		pte_unmap(vmf->pte);
121071e3aac0SAndrea Arcangeli 	}
121171e3aac0SAndrea Arcangeli 	kfree(pages);
121271e3aac0SAndrea Arcangeli 
121371e3aac0SAndrea Arcangeli 	smp_wmb(); /* make pte visible before pmd */
121482b0f8c3SJan Kara 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
1215d281ee61SKirill A. Shutemov 	page_remove_rmap(page, true);
121682b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
121771e3aac0SAndrea Arcangeli 
12184645b9feSJérôme Glisse 	/*
12194645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
12204645b9feSJérôme Glisse 	 * the above pmdp_huge_clear_flush_notify() did already call it.
12214645b9feSJérôme Glisse 	 */
1222*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
12232ec74c3eSSagi Grimberg 
122471e3aac0SAndrea Arcangeli 	ret |= VM_FAULT_WRITE;
122571e3aac0SAndrea Arcangeli 	put_page(page);
122671e3aac0SAndrea Arcangeli 
122771e3aac0SAndrea Arcangeli out:
122871e3aac0SAndrea Arcangeli 	return ret;
122971e3aac0SAndrea Arcangeli 
123071e3aac0SAndrea Arcangeli out_free_pages:
123182b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1232*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1233b9bbfbe3SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
123400501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
123500501b53SJohannes Weiner 		set_page_private(pages[i], 0);
1236f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(pages[i], memcg, false);
123771e3aac0SAndrea Arcangeli 		put_page(pages[i]);
1238b9bbfbe3SAndrea Arcangeli 	}
123971e3aac0SAndrea Arcangeli 	kfree(pages);
124071e3aac0SAndrea Arcangeli 	goto out;
124171e3aac0SAndrea Arcangeli }
124271e3aac0SAndrea Arcangeli 
12432b740303SSouptick Joarder vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
124471e3aac0SAndrea Arcangeli {
124582b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
124693b4796dSKirill A. Shutemov 	struct page *page = NULL, *new_page;
124700501b53SJohannes Weiner 	struct mem_cgroup *memcg;
124882b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1249*ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
12503b363692SMichal Hocko 	gfp_t huge_gfp;			/* for allocation and charge */
12512b740303SSouptick Joarder 	vm_fault_t ret = 0;
125271e3aac0SAndrea Arcangeli 
125382b0f8c3SJan Kara 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
125481d1b09cSSasha Levin 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
125593b4796dSKirill A. Shutemov 	if (is_huge_zero_pmd(orig_pmd))
125693b4796dSKirill A. Shutemov 		goto alloc;
125782b0f8c3SJan Kara 	spin_lock(vmf->ptl);
125882b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
125971e3aac0SAndrea Arcangeli 		goto out_unlock;
126071e3aac0SAndrea Arcangeli 
126171e3aac0SAndrea Arcangeli 	page = pmd_page(orig_pmd);
1262309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
12631f25fe20SKirill A. Shutemov 	/*
12641f25fe20SKirill A. Shutemov 	 * We can only reuse the page if nobody else maps the huge page or it's
12656d0a07edSAndrea Arcangeli 	 * part.
12661f25fe20SKirill A. Shutemov 	 */
1267ba3c4ce6SHuang Ying 	if (!trylock_page(page)) {
1268ba3c4ce6SHuang Ying 		get_page(page);
1269ba3c4ce6SHuang Ying 		spin_unlock(vmf->ptl);
1270ba3c4ce6SHuang Ying 		lock_page(page);
1271ba3c4ce6SHuang Ying 		spin_lock(vmf->ptl);
1272ba3c4ce6SHuang Ying 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1273ba3c4ce6SHuang Ying 			unlock_page(page);
1274ba3c4ce6SHuang Ying 			put_page(page);
1275ba3c4ce6SHuang Ying 			goto out_unlock;
1276ba3c4ce6SHuang Ying 		}
1277ba3c4ce6SHuang Ying 		put_page(page);
1278ba3c4ce6SHuang Ying 	}
1279ba3c4ce6SHuang Ying 	if (reuse_swap_page(page, NULL)) {
128071e3aac0SAndrea Arcangeli 		pmd_t entry;
128171e3aac0SAndrea Arcangeli 		entry = pmd_mkyoung(orig_pmd);
1282f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
128382b0f8c3SJan Kara 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
128482b0f8c3SJan Kara 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
128571e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
1286ba3c4ce6SHuang Ying 		unlock_page(page);
128771e3aac0SAndrea Arcangeli 		goto out_unlock;
128871e3aac0SAndrea Arcangeli 	}
1289ba3c4ce6SHuang Ying 	unlock_page(page);
1290ddc58f27SKirill A. Shutemov 	get_page(page);
129182b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
129293b4796dSKirill A. Shutemov alloc:
129371e3aac0SAndrea Arcangeli 	if (transparent_hugepage_enabled(vma) &&
1294077fcf11SAneesh Kumar K.V 	    !transparent_hugepage_debug_cow()) {
1295356ff8a9SDavid Rientjes 		huge_gfp = alloc_hugepage_direct_gfpmask(vma);
1296356ff8a9SDavid Rientjes 		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1297077fcf11SAneesh Kumar K.V 	} else
129871e3aac0SAndrea Arcangeli 		new_page = NULL;
129971e3aac0SAndrea Arcangeli 
13009a982250SKirill A. Shutemov 	if (likely(new_page)) {
13019a982250SKirill A. Shutemov 		prep_transhuge_page(new_page);
13029a982250SKirill A. Shutemov 	} else {
1303eecc1e42SHugh Dickins 		if (!page) {
130482b0f8c3SJan Kara 			split_huge_pmd(vma, vmf->pmd, vmf->address);
1305e9b71ca9SKirill A. Shutemov 			ret |= VM_FAULT_FALLBACK;
130693b4796dSKirill A. Shutemov 		} else {
130782b0f8c3SJan Kara 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
13089845cbbdSKirill A. Shutemov 			if (ret & VM_FAULT_OOM) {
130982b0f8c3SJan Kara 				split_huge_pmd(vma, vmf->pmd, vmf->address);
13109845cbbdSKirill A. Shutemov 				ret |= VM_FAULT_FALLBACK;
13119845cbbdSKirill A. Shutemov 			}
1312ddc58f27SKirill A. Shutemov 			put_page(page);
131393b4796dSKirill A. Shutemov 		}
131417766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
131571e3aac0SAndrea Arcangeli 		goto out;
131671e3aac0SAndrea Arcangeli 	}
131771e3aac0SAndrea Arcangeli 
13182cf85583STejun Heo 	if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
13192a70f6a7SMichal Hocko 					huge_gfp, &memcg, true))) {
1320b9bbfbe3SAndrea Arcangeli 		put_page(new_page);
132182b0f8c3SJan Kara 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1322bae473a4SKirill A. Shutemov 		if (page)
1323ddc58f27SKirill A. Shutemov 			put_page(page);
13249845cbbdSKirill A. Shutemov 		ret |= VM_FAULT_FALLBACK;
132517766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
1326b9bbfbe3SAndrea Arcangeli 		goto out;
1327b9bbfbe3SAndrea Arcangeli 	}
1328b9bbfbe3SAndrea Arcangeli 
132917766ddeSDavid Rientjes 	count_vm_event(THP_FAULT_ALLOC);
133017766ddeSDavid Rientjes 
1331eecc1e42SHugh Dickins 	if (!page)
1332c79b57e4SHuang Ying 		clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
133393b4796dSKirill A. Shutemov 	else
1334c9f4cd71SHuang Ying 		copy_user_huge_page(new_page, page, vmf->address,
1335c9f4cd71SHuang Ying 				    vma, HPAGE_PMD_NR);
133671e3aac0SAndrea Arcangeli 	__SetPageUptodate(new_page);
133771e3aac0SAndrea Arcangeli 
1338*ac46d4f3SJérôme Glisse 	mmu_notifier_range_init(&range, vma->vm_mm, haddr,
1339*ac46d4f3SJérôme Glisse 				haddr + HPAGE_PMD_SIZE);
1340*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
13412ec74c3eSSagi Grimberg 
134282b0f8c3SJan Kara 	spin_lock(vmf->ptl);
134393b4796dSKirill A. Shutemov 	if (page)
1344ddc58f27SKirill A. Shutemov 		put_page(page);
134582b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
134682b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
1347f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(new_page, memcg, true);
134871e3aac0SAndrea Arcangeli 		put_page(new_page);
13492ec74c3eSSagi Grimberg 		goto out_mn;
1350b9bbfbe3SAndrea Arcangeli 	} else {
135171e3aac0SAndrea Arcangeli 		pmd_t entry;
13523122359aSKirill A. Shutemov 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1353f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
135482b0f8c3SJan Kara 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1355d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(new_page, vma, haddr, true);
1356f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(new_page, memcg, false, true);
135700501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(new_page, vma);
135882b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
135982b0f8c3SJan Kara 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1360eecc1e42SHugh Dickins 		if (!page) {
1361bae473a4SKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
136297ae1749SKirill A. Shutemov 		} else {
1363309381feSSasha Levin 			VM_BUG_ON_PAGE(!PageHead(page), page);
1364d281ee61SKirill A. Shutemov 			page_remove_rmap(page, true);
136571e3aac0SAndrea Arcangeli 			put_page(page);
136693b4796dSKirill A. Shutemov 		}
136771e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
136871e3aac0SAndrea Arcangeli 	}
136982b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
13702ec74c3eSSagi Grimberg out_mn:
13714645b9feSJérôme Glisse 	/*
13724645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
13734645b9feSJérôme Glisse 	 * the above pmdp_huge_clear_flush_notify() did already call it.
13744645b9feSJérôme Glisse 	 */
1375*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
13762ec74c3eSSagi Grimberg out:
13772ec74c3eSSagi Grimberg 	return ret;
137871e3aac0SAndrea Arcangeli out_unlock:
137982b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
138071e3aac0SAndrea Arcangeli 	return ret;
138171e3aac0SAndrea Arcangeli }
138271e3aac0SAndrea Arcangeli 
13838310d48bSKeno Fischer /*
13848310d48bSKeno Fischer  * FOLL_FORCE can write to even unwritable pmd's, but only
13858310d48bSKeno Fischer  * after we've gone through a COW cycle and they are dirty.
13868310d48bSKeno Fischer  */
13878310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
13888310d48bSKeno Fischer {
1389f6f37321SLinus Torvalds 	return pmd_write(pmd) ||
13908310d48bSKeno Fischer 	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
13918310d48bSKeno Fischer }
13928310d48bSKeno Fischer 
1393b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
139471e3aac0SAndrea Arcangeli 				   unsigned long addr,
139571e3aac0SAndrea Arcangeli 				   pmd_t *pmd,
139671e3aac0SAndrea Arcangeli 				   unsigned int flags)
139771e3aac0SAndrea Arcangeli {
1398b676b293SDavid Rientjes 	struct mm_struct *mm = vma->vm_mm;
139971e3aac0SAndrea Arcangeli 	struct page *page = NULL;
140071e3aac0SAndrea Arcangeli 
1401c4088ebdSKirill A. Shutemov 	assert_spin_locked(pmd_lockptr(mm, pmd));
140271e3aac0SAndrea Arcangeli 
14038310d48bSKeno Fischer 	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
140471e3aac0SAndrea Arcangeli 		goto out;
140571e3aac0SAndrea Arcangeli 
140685facf25SKirill A. Shutemov 	/* Avoid dumping huge zero page */
140785facf25SKirill A. Shutemov 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
140885facf25SKirill A. Shutemov 		return ERR_PTR(-EFAULT);
140985facf25SKirill A. Shutemov 
14102b4847e7SMel Gorman 	/* Full NUMA hinting faults to serialise migration in fault paths */
14118a0516edSMel Gorman 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
14122b4847e7SMel Gorman 		goto out;
14132b4847e7SMel Gorman 
141471e3aac0SAndrea Arcangeli 	page = pmd_page(*pmd);
1415ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
14163565fce3SDan Williams 	if (flags & FOLL_TOUCH)
1417a8f97366SKirill A. Shutemov 		touch_pmd(vma, addr, pmd, flags);
1418de60f5f1SEric B Munson 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1419e90309c9SKirill A. Shutemov 		/*
1420e90309c9SKirill A. Shutemov 		 * We don't mlock() pte-mapped THPs. This way we can avoid
1421e90309c9SKirill A. Shutemov 		 * leaking mlocked pages into non-VM_LOCKED VMAs.
1422e90309c9SKirill A. Shutemov 		 *
14239a73f61bSKirill A. Shutemov 		 * For anon THP:
14249a73f61bSKirill A. Shutemov 		 *
1425e90309c9SKirill A. Shutemov 		 * In most cases the pmd is the only mapping of the page as we
1426e90309c9SKirill A. Shutemov 		 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1427e90309c9SKirill A. Shutemov 		 * writable private mappings in populate_vma_page_range().
1428e90309c9SKirill A. Shutemov 		 *
1429e90309c9SKirill A. Shutemov 		 * The only scenario when we have the page shared here is if we
1430e90309c9SKirill A. Shutemov 		 * mlocking read-only mapping shared over fork(). We skip
1431e90309c9SKirill A. Shutemov 		 * mlocking such pages.
14329a73f61bSKirill A. Shutemov 		 *
14339a73f61bSKirill A. Shutemov 		 * For file THP:
14349a73f61bSKirill A. Shutemov 		 *
14359a73f61bSKirill A. Shutemov 		 * We can expect PageDoubleMap() to be stable under page lock:
14369a73f61bSKirill A. Shutemov 		 * for file pages we set it in page_add_file_rmap(), which
14379a73f61bSKirill A. Shutemov 		 * requires page to be locked.
1438e90309c9SKirill A. Shutemov 		 */
14399a73f61bSKirill A. Shutemov 
14409a73f61bSKirill A. Shutemov 		if (PageAnon(page) && compound_mapcount(page) != 1)
14419a73f61bSKirill A. Shutemov 			goto skip_mlock;
14429a73f61bSKirill A. Shutemov 		if (PageDoubleMap(page) || !page->mapping)
14439a73f61bSKirill A. Shutemov 			goto skip_mlock;
14449a73f61bSKirill A. Shutemov 		if (!trylock_page(page))
14459a73f61bSKirill A. Shutemov 			goto skip_mlock;
1446b676b293SDavid Rientjes 		lru_add_drain();
14479a73f61bSKirill A. Shutemov 		if (page->mapping && !PageDoubleMap(page))
1448b676b293SDavid Rientjes 			mlock_vma_page(page);
1449b676b293SDavid Rientjes 		unlock_page(page);
1450b676b293SDavid Rientjes 	}
14519a73f61bSKirill A. Shutemov skip_mlock:
145271e3aac0SAndrea Arcangeli 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1453ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
145471e3aac0SAndrea Arcangeli 	if (flags & FOLL_GET)
1455ddc58f27SKirill A. Shutemov 		get_page(page);
145671e3aac0SAndrea Arcangeli 
145771e3aac0SAndrea Arcangeli out:
145871e3aac0SAndrea Arcangeli 	return page;
145971e3aac0SAndrea Arcangeli }
146071e3aac0SAndrea Arcangeli 
1461d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */
14622b740303SSouptick Joarder vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1463d10e63f2SMel Gorman {
146482b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
1465b8916634SMel Gorman 	struct anon_vma *anon_vma = NULL;
1466b32967ffSMel Gorman 	struct page *page;
146782b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
14688191acbdSMel Gorman 	int page_nid = -1, this_nid = numa_node_id();
146990572890SPeter Zijlstra 	int target_nid, last_cpupid = -1;
14708191acbdSMel Gorman 	bool page_locked;
14718191acbdSMel Gorman 	bool migrated = false;
1472b191f9b1SMel Gorman 	bool was_writable;
14736688cc05SPeter Zijlstra 	int flags = 0;
1474d10e63f2SMel Gorman 
147582b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
147682b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1477d10e63f2SMel Gorman 		goto out_unlock;
1478d10e63f2SMel Gorman 
1479de466bd6SMel Gorman 	/*
1480de466bd6SMel Gorman 	 * If there are potential migrations, wait for completion and retry
1481de466bd6SMel Gorman 	 * without disrupting NUMA hinting information. Do not relock and
1482de466bd6SMel Gorman 	 * check_same as the page may no longer be mapped.
1483de466bd6SMel Gorman 	 */
148482b0f8c3SJan Kara 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
148582b0f8c3SJan Kara 		page = pmd_page(*vmf->pmd);
14863c226c63SMark Rutland 		if (!get_page_unless_zero(page))
14873c226c63SMark Rutland 			goto out_unlock;
148882b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
14899a1ea439SHugh Dickins 		put_and_wait_on_page_locked(page);
1490de466bd6SMel Gorman 		goto out;
1491de466bd6SMel Gorman 	}
1492de466bd6SMel Gorman 
1493d10e63f2SMel Gorman 	page = pmd_page(pmd);
1494a1a46184SMel Gorman 	BUG_ON(is_huge_zero_page(page));
14958191acbdSMel Gorman 	page_nid = page_to_nid(page);
149690572890SPeter Zijlstra 	last_cpupid = page_cpupid_last(page);
149703c5a6e1SMel Gorman 	count_vm_numa_event(NUMA_HINT_FAULTS);
149804bb2f94SRik van Riel 	if (page_nid == this_nid) {
149903c5a6e1SMel Gorman 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
150004bb2f94SRik van Riel 		flags |= TNF_FAULT_LOCAL;
150104bb2f94SRik van Riel 	}
15024daae3b4SMel Gorman 
1503bea66fbdSMel Gorman 	/* See similar comment in do_numa_page for explanation */
1504288bc549SAneesh Kumar K.V 	if (!pmd_savedwrite(pmd))
15056688cc05SPeter Zijlstra 		flags |= TNF_NO_GROUP;
15066688cc05SPeter Zijlstra 
15076688cc05SPeter Zijlstra 	/*
1508ff9042b1SMel Gorman 	 * Acquire the page lock to serialise THP migrations but avoid dropping
1509ff9042b1SMel Gorman 	 * page_table_lock if at all possible
1510ff9042b1SMel Gorman 	 */
1511b8916634SMel Gorman 	page_locked = trylock_page(page);
1512b8916634SMel Gorman 	target_nid = mpol_misplaced(page, vma, haddr);
1513b8916634SMel Gorman 	if (target_nid == -1) {
1514b8916634SMel Gorman 		/* If the page was locked, there are no parallel migrations */
1515a54a407fSMel Gorman 		if (page_locked)
1516b8916634SMel Gorman 			goto clear_pmdnuma;
15172b4847e7SMel Gorman 	}
1518cbee9f88SPeter Zijlstra 
1519de466bd6SMel Gorman 	/* Migration could have started since the pmd_trans_migrating check */
15202b4847e7SMel Gorman 	if (!page_locked) {
15213c226c63SMark Rutland 		page_nid = -1;
15223c226c63SMark Rutland 		if (!get_page_unless_zero(page))
15233c226c63SMark Rutland 			goto out_unlock;
152482b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
15259a1ea439SHugh Dickins 		put_and_wait_on_page_locked(page);
1526b8916634SMel Gorman 		goto out;
1527b8916634SMel Gorman 	}
1528b8916634SMel Gorman 
15292b4847e7SMel Gorman 	/*
15302b4847e7SMel Gorman 	 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
15312b4847e7SMel Gorman 	 * to serialises splits
15322b4847e7SMel Gorman 	 */
1533b8916634SMel Gorman 	get_page(page);
153482b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1535b8916634SMel Gorman 	anon_vma = page_lock_anon_vma_read(page);
1536b32967ffSMel Gorman 
1537c69307d5SPeter Zijlstra 	/* Confirm the PMD did not change while page_table_lock was released */
153882b0f8c3SJan Kara 	spin_lock(vmf->ptl);
153982b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1540b32967ffSMel Gorman 		unlock_page(page);
1541b32967ffSMel Gorman 		put_page(page);
1542a54a407fSMel Gorman 		page_nid = -1;
1543b32967ffSMel Gorman 		goto out_unlock;
1544b32967ffSMel Gorman 	}
1545ff9042b1SMel Gorman 
1546c3a489caSMel Gorman 	/* Bail if we fail to protect against THP splits for any reason */
1547c3a489caSMel Gorman 	if (unlikely(!anon_vma)) {
1548c3a489caSMel Gorman 		put_page(page);
1549c3a489caSMel Gorman 		page_nid = -1;
1550c3a489caSMel Gorman 		goto clear_pmdnuma;
1551c3a489caSMel Gorman 	}
1552c3a489caSMel Gorman 
1553a54a407fSMel Gorman 	/*
15548b1b436dSPeter Zijlstra 	 * Since we took the NUMA fault, we must have observed the !accessible
15558b1b436dSPeter Zijlstra 	 * bit. Make sure all other CPUs agree with that, to avoid them
15568b1b436dSPeter Zijlstra 	 * modifying the page we're about to migrate.
15578b1b436dSPeter Zijlstra 	 *
15588b1b436dSPeter Zijlstra 	 * Must be done under PTL such that we'll observe the relevant
1559ccde85baSPeter Zijlstra 	 * inc_tlb_flush_pending().
1560ccde85baSPeter Zijlstra 	 *
1561ccde85baSPeter Zijlstra 	 * We are not sure a pending tlb flush here is for a huge page
1562ccde85baSPeter Zijlstra 	 * mapping or not. Hence use the tlb range variant
15638b1b436dSPeter Zijlstra 	 */
15647066f0f9SAndrea Arcangeli 	if (mm_tlb_flush_pending(vma->vm_mm)) {
1565ccde85baSPeter Zijlstra 		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
15667066f0f9SAndrea Arcangeli 		/*
15677066f0f9SAndrea Arcangeli 		 * change_huge_pmd() released the pmd lock before
15687066f0f9SAndrea Arcangeli 		 * invalidating the secondary MMUs sharing the primary
15697066f0f9SAndrea Arcangeli 		 * MMU pagetables (with ->invalidate_range()). The
15707066f0f9SAndrea Arcangeli 		 * mmu_notifier_invalidate_range_end() (which
15717066f0f9SAndrea Arcangeli 		 * internally calls ->invalidate_range()) in
15727066f0f9SAndrea Arcangeli 		 * change_pmd_range() will run after us, so we can't
15737066f0f9SAndrea Arcangeli 		 * rely on it here and we need an explicit invalidate.
15747066f0f9SAndrea Arcangeli 		 */
15757066f0f9SAndrea Arcangeli 		mmu_notifier_invalidate_range(vma->vm_mm, haddr,
15767066f0f9SAndrea Arcangeli 					      haddr + HPAGE_PMD_SIZE);
15777066f0f9SAndrea Arcangeli 	}
15788b1b436dSPeter Zijlstra 
15798b1b436dSPeter Zijlstra 	/*
1580a54a407fSMel Gorman 	 * Migrate the THP to the requested node, returns with page unlocked
15818a0516edSMel Gorman 	 * and access rights restored.
1582a54a407fSMel Gorman 	 */
158382b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
15848b1b436dSPeter Zijlstra 
1585bae473a4SKirill A. Shutemov 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
158682b0f8c3SJan Kara 				vmf->pmd, pmd, vmf->address, page, target_nid);
15876688cc05SPeter Zijlstra 	if (migrated) {
15886688cc05SPeter Zijlstra 		flags |= TNF_MIGRATED;
15898191acbdSMel Gorman 		page_nid = target_nid;
1590074c2381SMel Gorman 	} else
1591074c2381SMel Gorman 		flags |= TNF_MIGRATE_FAIL;
1592b32967ffSMel Gorman 
15938191acbdSMel Gorman 	goto out;
15944daae3b4SMel Gorman clear_pmdnuma:
1595a54a407fSMel Gorman 	BUG_ON(!PageLocked(page));
1596288bc549SAneesh Kumar K.V 	was_writable = pmd_savedwrite(pmd);
15974d942466SMel Gorman 	pmd = pmd_modify(pmd, vma->vm_page_prot);
1598b7b04004SMel Gorman 	pmd = pmd_mkyoung(pmd);
1599b191f9b1SMel Gorman 	if (was_writable)
1600b191f9b1SMel Gorman 		pmd = pmd_mkwrite(pmd);
160182b0f8c3SJan Kara 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
160282b0f8c3SJan Kara 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1603a54a407fSMel Gorman 	unlock_page(page);
1604d10e63f2SMel Gorman out_unlock:
160582b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1606b8916634SMel Gorman 
1607b8916634SMel Gorman out:
1608b8916634SMel Gorman 	if (anon_vma)
1609b8916634SMel Gorman 		page_unlock_anon_vma_read(anon_vma);
1610b8916634SMel Gorman 
16118191acbdSMel Gorman 	if (page_nid != -1)
161282b0f8c3SJan Kara 		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
16139a8b300fSAneesh Kumar K.V 				flags);
16148191acbdSMel Gorman 
1615d10e63f2SMel Gorman 	return 0;
1616d10e63f2SMel Gorman }
1617d10e63f2SMel Gorman 
1618319904adSHuang Ying /*
1619319904adSHuang Ying  * Return true if we do MADV_FREE successfully on entire pmd page.
1620319904adSHuang Ying  * Otherwise, return false.
1621319904adSHuang Ying  */
1622319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1623b8d3c4c3SMinchan Kim 		pmd_t *pmd, unsigned long addr, unsigned long next)
1624b8d3c4c3SMinchan Kim {
1625b8d3c4c3SMinchan Kim 	spinlock_t *ptl;
1626b8d3c4c3SMinchan Kim 	pmd_t orig_pmd;
1627b8d3c4c3SMinchan Kim 	struct page *page;
1628b8d3c4c3SMinchan Kim 	struct mm_struct *mm = tlb->mm;
1629319904adSHuang Ying 	bool ret = false;
1630b8d3c4c3SMinchan Kim 
163107e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
163207e32661SAneesh Kumar K.V 
1633b6ec57f4SKirill A. Shutemov 	ptl = pmd_trans_huge_lock(pmd, vma);
1634b6ec57f4SKirill A. Shutemov 	if (!ptl)
163525eedabeSLinus Torvalds 		goto out_unlocked;
1636b8d3c4c3SMinchan Kim 
1637b8d3c4c3SMinchan Kim 	orig_pmd = *pmd;
1638319904adSHuang Ying 	if (is_huge_zero_pmd(orig_pmd))
1639b8d3c4c3SMinchan Kim 		goto out;
1640b8d3c4c3SMinchan Kim 
164184c3fc4eSZi Yan 	if (unlikely(!pmd_present(orig_pmd))) {
164284c3fc4eSZi Yan 		VM_BUG_ON(thp_migration_supported() &&
164384c3fc4eSZi Yan 				  !is_pmd_migration_entry(orig_pmd));
164484c3fc4eSZi Yan 		goto out;
164584c3fc4eSZi Yan 	}
164684c3fc4eSZi Yan 
1647b8d3c4c3SMinchan Kim 	page = pmd_page(orig_pmd);
1648b8d3c4c3SMinchan Kim 	/*
1649b8d3c4c3SMinchan Kim 	 * If other processes are mapping this page, we couldn't discard
1650b8d3c4c3SMinchan Kim 	 * the page unless they all do MADV_FREE so let's skip the page.
1651b8d3c4c3SMinchan Kim 	 */
1652b8d3c4c3SMinchan Kim 	if (page_mapcount(page) != 1)
1653b8d3c4c3SMinchan Kim 		goto out;
1654b8d3c4c3SMinchan Kim 
1655b8d3c4c3SMinchan Kim 	if (!trylock_page(page))
1656b8d3c4c3SMinchan Kim 		goto out;
1657b8d3c4c3SMinchan Kim 
1658b8d3c4c3SMinchan Kim 	/*
1659b8d3c4c3SMinchan Kim 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1660b8d3c4c3SMinchan Kim 	 * will deactivate only them.
1661b8d3c4c3SMinchan Kim 	 */
1662b8d3c4c3SMinchan Kim 	if (next - addr != HPAGE_PMD_SIZE) {
1663b8d3c4c3SMinchan Kim 		get_page(page);
1664b8d3c4c3SMinchan Kim 		spin_unlock(ptl);
16659818b8cdSHuang Ying 		split_huge_page(page);
1666b8d3c4c3SMinchan Kim 		unlock_page(page);
1667bbf29ffcSKirill A. Shutemov 		put_page(page);
1668b8d3c4c3SMinchan Kim 		goto out_unlocked;
1669b8d3c4c3SMinchan Kim 	}
1670b8d3c4c3SMinchan Kim 
1671b8d3c4c3SMinchan Kim 	if (PageDirty(page))
1672b8d3c4c3SMinchan Kim 		ClearPageDirty(page);
1673b8d3c4c3SMinchan Kim 	unlock_page(page);
1674b8d3c4c3SMinchan Kim 
1675b8d3c4c3SMinchan Kim 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
167658ceeb6bSKirill A. Shutemov 		pmdp_invalidate(vma, addr, pmd);
1677b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkold(orig_pmd);
1678b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkclean(orig_pmd);
1679b8d3c4c3SMinchan Kim 
1680b8d3c4c3SMinchan Kim 		set_pmd_at(mm, addr, pmd, orig_pmd);
1681b8d3c4c3SMinchan Kim 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1682b8d3c4c3SMinchan Kim 	}
1683802a3a92SShaohua Li 
1684802a3a92SShaohua Li 	mark_page_lazyfree(page);
1685319904adSHuang Ying 	ret = true;
1686b8d3c4c3SMinchan Kim out:
1687b8d3c4c3SMinchan Kim 	spin_unlock(ptl);
1688b8d3c4c3SMinchan Kim out_unlocked:
1689b8d3c4c3SMinchan Kim 	return ret;
1690b8d3c4c3SMinchan Kim }
1691b8d3c4c3SMinchan Kim 
1692953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1693953c66c2SAneesh Kumar K.V {
1694953c66c2SAneesh Kumar K.V 	pgtable_t pgtable;
1695953c66c2SAneesh Kumar K.V 
1696953c66c2SAneesh Kumar K.V 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1697953c66c2SAneesh Kumar K.V 	pte_free(mm, pgtable);
1698c4812909SKirill A. Shutemov 	mm_dec_nr_ptes(mm);
1699953c66c2SAneesh Kumar K.V }
1700953c66c2SAneesh Kumar K.V 
170171e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1702f21760b1SShaohua Li 		 pmd_t *pmd, unsigned long addr)
170371e3aac0SAndrea Arcangeli {
1704f5c8ad47SDavid Miller 	pmd_t orig_pmd;
1705da146769SKirill A. Shutemov 	spinlock_t *ptl;
1706da146769SKirill A. Shutemov 
170707e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
170807e32661SAneesh Kumar K.V 
1709b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
1710b6ec57f4SKirill A. Shutemov 	if (!ptl)
1711da146769SKirill A. Shutemov 		return 0;
1712a6bf2bb0SAneesh Kumar K.V 	/*
1713a6bf2bb0SAneesh Kumar K.V 	 * For architectures like ppc64 we look at deposited pgtable
17148809aa2dSAneesh Kumar K.V 	 * when calling pmdp_huge_get_and_clear. So do the
1715a6bf2bb0SAneesh Kumar K.V 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1716a6bf2bb0SAneesh Kumar K.V 	 * operations.
1717a6bf2bb0SAneesh Kumar K.V 	 */
17188809aa2dSAneesh Kumar K.V 	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1719fcbe08d6SMartin Schwidefsky 			tlb->fullmm);
1720f21760b1SShaohua Li 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
17214897c765SMatthew Wilcox 	if (vma_is_dax(vma)) {
17223b6521f5SOliver O'Halloran 		if (arch_needs_pgtable_deposit())
17233b6521f5SOliver O'Halloran 			zap_deposited_table(tlb->mm, pmd);
17244897c765SMatthew Wilcox 		spin_unlock(ptl);
1725da146769SKirill A. Shutemov 		if (is_huge_zero_pmd(orig_pmd))
1726c0f2e176SAneesh Kumar K.V 			tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1727da146769SKirill A. Shutemov 	} else if (is_huge_zero_pmd(orig_pmd)) {
1728c14a6eb4SOliver O'Halloran 		zap_deposited_table(tlb->mm, pmd);
1729bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1730c0f2e176SAneesh Kumar K.V 		tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1731479f0abbSKirill A. Shutemov 	} else {
1732616b8371SZi Yan 		struct page *page = NULL;
1733616b8371SZi Yan 		int flush_needed = 1;
1734616b8371SZi Yan 
1735616b8371SZi Yan 		if (pmd_present(orig_pmd)) {
1736616b8371SZi Yan 			page = pmd_page(orig_pmd);
1737d281ee61SKirill A. Shutemov 			page_remove_rmap(page, true);
1738309381feSSasha Levin 			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1739309381feSSasha Levin 			VM_BUG_ON_PAGE(!PageHead(page), page);
1740616b8371SZi Yan 		} else if (thp_migration_supported()) {
1741616b8371SZi Yan 			swp_entry_t entry;
1742616b8371SZi Yan 
1743616b8371SZi Yan 			VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1744616b8371SZi Yan 			entry = pmd_to_swp_entry(orig_pmd);
1745616b8371SZi Yan 			page = pfn_to_page(swp_offset(entry));
1746616b8371SZi Yan 			flush_needed = 0;
1747616b8371SZi Yan 		} else
1748616b8371SZi Yan 			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1749616b8371SZi Yan 
1750b5072380SKirill A. Shutemov 		if (PageAnon(page)) {
1751c14a6eb4SOliver O'Halloran 			zap_deposited_table(tlb->mm, pmd);
1752b5072380SKirill A. Shutemov 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1753b5072380SKirill A. Shutemov 		} else {
1754953c66c2SAneesh Kumar K.V 			if (arch_needs_pgtable_deposit())
1755953c66c2SAneesh Kumar K.V 				zap_deposited_table(tlb->mm, pmd);
1756fadae295SYang Shi 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1757b5072380SKirill A. Shutemov 		}
1758616b8371SZi Yan 
1759bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1760616b8371SZi Yan 		if (flush_needed)
1761e77b0852SAneesh Kumar K.V 			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1762479f0abbSKirill A. Shutemov 	}
1763da146769SKirill A. Shutemov 	return 1;
176471e3aac0SAndrea Arcangeli }
176571e3aac0SAndrea Arcangeli 
17661dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw
17671dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
17681dd38b6cSAneesh Kumar K.V 					 spinlock_t *old_pmd_ptl,
17691dd38b6cSAneesh Kumar K.V 					 struct vm_area_struct *vma)
17701dd38b6cSAneesh Kumar K.V {
17711dd38b6cSAneesh Kumar K.V 	/*
17721dd38b6cSAneesh Kumar K.V 	 * With split pmd lock we also need to move preallocated
17731dd38b6cSAneesh Kumar K.V 	 * PTE page table if new_pmd is on different PMD page table.
17741dd38b6cSAneesh Kumar K.V 	 *
17751dd38b6cSAneesh Kumar K.V 	 * We also don't deposit and withdraw tables for file pages.
17761dd38b6cSAneesh Kumar K.V 	 */
17771dd38b6cSAneesh Kumar K.V 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
17781dd38b6cSAneesh Kumar K.V }
17791dd38b6cSAneesh Kumar K.V #endif
17801dd38b6cSAneesh Kumar K.V 
1781ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1782ab6e3d09SNaoya Horiguchi {
1783ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY
1784ab6e3d09SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(pmd)))
1785ab6e3d09SNaoya Horiguchi 		pmd = pmd_swp_mksoft_dirty(pmd);
1786ab6e3d09SNaoya Horiguchi 	else if (pmd_present(pmd))
1787ab6e3d09SNaoya Horiguchi 		pmd = pmd_mksoft_dirty(pmd);
1788ab6e3d09SNaoya Horiguchi #endif
1789ab6e3d09SNaoya Horiguchi 	return pmd;
1790ab6e3d09SNaoya Horiguchi }
1791ab6e3d09SNaoya Horiguchi 
1792bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
179337a1c49aSAndrea Arcangeli 		  unsigned long new_addr, unsigned long old_end,
1794eb66ae03SLinus Torvalds 		  pmd_t *old_pmd, pmd_t *new_pmd)
179537a1c49aSAndrea Arcangeli {
1796bf929152SKirill A. Shutemov 	spinlock_t *old_ptl, *new_ptl;
179737a1c49aSAndrea Arcangeli 	pmd_t pmd;
179837a1c49aSAndrea Arcangeli 	struct mm_struct *mm = vma->vm_mm;
17995d190420SAaron Lu 	bool force_flush = false;
180037a1c49aSAndrea Arcangeli 
180137a1c49aSAndrea Arcangeli 	if ((old_addr & ~HPAGE_PMD_MASK) ||
180237a1c49aSAndrea Arcangeli 	    (new_addr & ~HPAGE_PMD_MASK) ||
1803bf8616d5SHugh Dickins 	    old_end - old_addr < HPAGE_PMD_SIZE)
18044b471e88SKirill A. Shutemov 		return false;
180537a1c49aSAndrea Arcangeli 
180637a1c49aSAndrea Arcangeli 	/*
180737a1c49aSAndrea Arcangeli 	 * The destination pmd shouldn't be established, free_pgtables()
180837a1c49aSAndrea Arcangeli 	 * should have release it.
180937a1c49aSAndrea Arcangeli 	 */
181037a1c49aSAndrea Arcangeli 	if (WARN_ON(!pmd_none(*new_pmd))) {
181137a1c49aSAndrea Arcangeli 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
18124b471e88SKirill A. Shutemov 		return false;
181337a1c49aSAndrea Arcangeli 	}
181437a1c49aSAndrea Arcangeli 
1815bf929152SKirill A. Shutemov 	/*
1816bf929152SKirill A. Shutemov 	 * We don't have to worry about the ordering of src and dst
1817bf929152SKirill A. Shutemov 	 * ptlocks because exclusive mmap_sem prevents deadlock.
1818bf929152SKirill A. Shutemov 	 */
1819b6ec57f4SKirill A. Shutemov 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1820b6ec57f4SKirill A. Shutemov 	if (old_ptl) {
1821bf929152SKirill A. Shutemov 		new_ptl = pmd_lockptr(mm, new_pmd);
1822bf929152SKirill A. Shutemov 		if (new_ptl != old_ptl)
1823bf929152SKirill A. Shutemov 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
18248809aa2dSAneesh Kumar K.V 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1825eb66ae03SLinus Torvalds 		if (pmd_present(pmd))
1826a2ce2666SAaron Lu 			force_flush = true;
182737a1c49aSAndrea Arcangeli 		VM_BUG_ON(!pmd_none(*new_pmd));
18283592806cSKirill A. Shutemov 
18291dd38b6cSAneesh Kumar K.V 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1830b3084f4dSAneesh Kumar K.V 			pgtable_t pgtable;
18313592806cSKirill A. Shutemov 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
18323592806cSKirill A. Shutemov 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
18333592806cSKirill A. Shutemov 		}
1834ab6e3d09SNaoya Horiguchi 		pmd = move_soft_dirty_pmd(pmd);
1835ab6e3d09SNaoya Horiguchi 		set_pmd_at(mm, new_addr, new_pmd, pmd);
18365d190420SAaron Lu 		if (force_flush)
18375d190420SAaron Lu 			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1838eb66ae03SLinus Torvalds 		if (new_ptl != old_ptl)
1839eb66ae03SLinus Torvalds 			spin_unlock(new_ptl);
1840bf929152SKirill A. Shutemov 		spin_unlock(old_ptl);
18414b471e88SKirill A. Shutemov 		return true;
184237a1c49aSAndrea Arcangeli 	}
18434b471e88SKirill A. Shutemov 	return false;
184437a1c49aSAndrea Arcangeli }
184537a1c49aSAndrea Arcangeli 
1846f123d74aSMel Gorman /*
1847f123d74aSMel Gorman  * Returns
1848f123d74aSMel Gorman  *  - 0 if PMD could not be locked
1849f123d74aSMel Gorman  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1850f123d74aSMel Gorman  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1851f123d74aSMel Gorman  */
1852cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1853e944fd67SMel Gorman 		unsigned long addr, pgprot_t newprot, int prot_numa)
1854cd7548abSJohannes Weiner {
1855cd7548abSJohannes Weiner 	struct mm_struct *mm = vma->vm_mm;
1856bf929152SKirill A. Shutemov 	spinlock_t *ptl;
18570a85e51dSKirill A. Shutemov 	pmd_t entry;
18580a85e51dSKirill A. Shutemov 	bool preserve_write;
18590a85e51dSKirill A. Shutemov 	int ret;
1860cd7548abSJohannes Weiner 
1861b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
18620a85e51dSKirill A. Shutemov 	if (!ptl)
18630a85e51dSKirill A. Shutemov 		return 0;
18640a85e51dSKirill A. Shutemov 
18650a85e51dSKirill A. Shutemov 	preserve_write = prot_numa && pmd_write(*pmd);
1866ba68bc01SMel Gorman 	ret = 1;
1867e944fd67SMel Gorman 
186884c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
186984c3fc4eSZi Yan 	if (is_swap_pmd(*pmd)) {
187084c3fc4eSZi Yan 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
187184c3fc4eSZi Yan 
187284c3fc4eSZi Yan 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
187384c3fc4eSZi Yan 		if (is_write_migration_entry(entry)) {
187484c3fc4eSZi Yan 			pmd_t newpmd;
187584c3fc4eSZi Yan 			/*
187684c3fc4eSZi Yan 			 * A protection check is difficult so
187784c3fc4eSZi Yan 			 * just be safe and disable write
187884c3fc4eSZi Yan 			 */
187984c3fc4eSZi Yan 			make_migration_entry_read(&entry);
188084c3fc4eSZi Yan 			newpmd = swp_entry_to_pmd(entry);
1881ab6e3d09SNaoya Horiguchi 			if (pmd_swp_soft_dirty(*pmd))
1882ab6e3d09SNaoya Horiguchi 				newpmd = pmd_swp_mksoft_dirty(newpmd);
188384c3fc4eSZi Yan 			set_pmd_at(mm, addr, pmd, newpmd);
188484c3fc4eSZi Yan 		}
188584c3fc4eSZi Yan 		goto unlock;
188684c3fc4eSZi Yan 	}
188784c3fc4eSZi Yan #endif
188884c3fc4eSZi Yan 
1889e944fd67SMel Gorman 	/*
1890e944fd67SMel Gorman 	 * Avoid trapping faults against the zero page. The read-only
1891e944fd67SMel Gorman 	 * data is likely to be read-cached on the local CPU and
1892e944fd67SMel Gorman 	 * local/remote hits to the zero page are not interesting.
1893e944fd67SMel Gorman 	 */
18940a85e51dSKirill A. Shutemov 	if (prot_numa && is_huge_zero_pmd(*pmd))
18950a85e51dSKirill A. Shutemov 		goto unlock;
1896e944fd67SMel Gorman 
18970a85e51dSKirill A. Shutemov 	if (prot_numa && pmd_protnone(*pmd))
18980a85e51dSKirill A. Shutemov 		goto unlock;
18990a85e51dSKirill A. Shutemov 
1900ced10803SKirill A. Shutemov 	/*
1901ced10803SKirill A. Shutemov 	 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1902ced10803SKirill A. Shutemov 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1903ced10803SKirill A. Shutemov 	 * which is also under down_read(mmap_sem):
1904ced10803SKirill A. Shutemov 	 *
1905ced10803SKirill A. Shutemov 	 *	CPU0:				CPU1:
1906ced10803SKirill A. Shutemov 	 *				change_huge_pmd(prot_numa=1)
1907ced10803SKirill A. Shutemov 	 *				 pmdp_huge_get_and_clear_notify()
1908ced10803SKirill A. Shutemov 	 * madvise_dontneed()
1909ced10803SKirill A. Shutemov 	 *  zap_pmd_range()
1910ced10803SKirill A. Shutemov 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
1911ced10803SKirill A. Shutemov 	 *   // skip the pmd
1912ced10803SKirill A. Shutemov 	 *				 set_pmd_at();
1913ced10803SKirill A. Shutemov 	 *				 // pmd is re-established
1914ced10803SKirill A. Shutemov 	 *
1915ced10803SKirill A. Shutemov 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1916ced10803SKirill A. Shutemov 	 * which may break userspace.
1917ced10803SKirill A. Shutemov 	 *
1918ced10803SKirill A. Shutemov 	 * pmdp_invalidate() is required to make sure we don't miss
1919ced10803SKirill A. Shutemov 	 * dirty/young flags set by hardware.
1920ced10803SKirill A. Shutemov 	 */
1921a3cf988fSKirill A. Shutemov 	entry = pmdp_invalidate(vma, addr, pmd);
1922ced10803SKirill A. Shutemov 
1923cd7548abSJohannes Weiner 	entry = pmd_modify(entry, newprot);
1924b191f9b1SMel Gorman 	if (preserve_write)
1925288bc549SAneesh Kumar K.V 		entry = pmd_mk_savedwrite(entry);
1926f123d74aSMel Gorman 	ret = HPAGE_PMD_NR;
192756eecdb9SAneesh Kumar K.V 	set_pmd_at(mm, addr, pmd, entry);
19280a85e51dSKirill A. Shutemov 	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
19290a85e51dSKirill A. Shutemov unlock:
1930bf929152SKirill A. Shutemov 	spin_unlock(ptl);
1931cd7548abSJohannes Weiner 	return ret;
1932cd7548abSJohannes Weiner }
1933cd7548abSJohannes Weiner 
1934025c5b24SNaoya Horiguchi /*
19358f19b0c0SHuang Ying  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1936025c5b24SNaoya Horiguchi  *
19378f19b0c0SHuang Ying  * Note that if it returns page table lock pointer, this routine returns without
19388f19b0c0SHuang Ying  * unlocking page table lock. So callers must unlock it.
1939025c5b24SNaoya Horiguchi  */
1940b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1941025c5b24SNaoya Horiguchi {
1942b6ec57f4SKirill A. Shutemov 	spinlock_t *ptl;
1943b6ec57f4SKirill A. Shutemov 	ptl = pmd_lock(vma->vm_mm, pmd);
194484c3fc4eSZi Yan 	if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
194584c3fc4eSZi Yan 			pmd_devmap(*pmd)))
1946b6ec57f4SKirill A. Shutemov 		return ptl;
1947b6ec57f4SKirill A. Shutemov 	spin_unlock(ptl);
1948b6ec57f4SKirill A. Shutemov 	return NULL;
1949025c5b24SNaoya Horiguchi }
1950025c5b24SNaoya Horiguchi 
1951a00cc7d9SMatthew Wilcox /*
1952a00cc7d9SMatthew Wilcox  * Returns true if a given pud maps a thp, false otherwise.
1953a00cc7d9SMatthew Wilcox  *
1954a00cc7d9SMatthew Wilcox  * Note that if it returns true, this routine returns without unlocking page
1955a00cc7d9SMatthew Wilcox  * table lock. So callers must unlock it.
1956a00cc7d9SMatthew Wilcox  */
1957a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1958a00cc7d9SMatthew Wilcox {
1959a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
1960a00cc7d9SMatthew Wilcox 
1961a00cc7d9SMatthew Wilcox 	ptl = pud_lock(vma->vm_mm, pud);
1962a00cc7d9SMatthew Wilcox 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1963a00cc7d9SMatthew Wilcox 		return ptl;
1964a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
1965a00cc7d9SMatthew Wilcox 	return NULL;
1966a00cc7d9SMatthew Wilcox }
1967a00cc7d9SMatthew Wilcox 
1968a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1969a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1970a00cc7d9SMatthew Wilcox 		 pud_t *pud, unsigned long addr)
1971a00cc7d9SMatthew Wilcox {
1972a00cc7d9SMatthew Wilcox 	pud_t orig_pud;
1973a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
1974a00cc7d9SMatthew Wilcox 
1975a00cc7d9SMatthew Wilcox 	ptl = __pud_trans_huge_lock(pud, vma);
1976a00cc7d9SMatthew Wilcox 	if (!ptl)
1977a00cc7d9SMatthew Wilcox 		return 0;
1978a00cc7d9SMatthew Wilcox 	/*
1979a00cc7d9SMatthew Wilcox 	 * For architectures like ppc64 we look at deposited pgtable
1980a00cc7d9SMatthew Wilcox 	 * when calling pudp_huge_get_and_clear. So do the
1981a00cc7d9SMatthew Wilcox 	 * pgtable_trans_huge_withdraw after finishing pudp related
1982a00cc7d9SMatthew Wilcox 	 * operations.
1983a00cc7d9SMatthew Wilcox 	 */
1984a00cc7d9SMatthew Wilcox 	orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
1985a00cc7d9SMatthew Wilcox 			tlb->fullmm);
1986a00cc7d9SMatthew Wilcox 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
1987a00cc7d9SMatthew Wilcox 	if (vma_is_dax(vma)) {
1988a00cc7d9SMatthew Wilcox 		spin_unlock(ptl);
1989a00cc7d9SMatthew Wilcox 		/* No zero page support yet */
1990a00cc7d9SMatthew Wilcox 	} else {
1991a00cc7d9SMatthew Wilcox 		/* No support for anonymous PUD pages yet */
1992a00cc7d9SMatthew Wilcox 		BUG();
1993a00cc7d9SMatthew Wilcox 	}
1994a00cc7d9SMatthew Wilcox 	return 1;
1995a00cc7d9SMatthew Wilcox }
1996a00cc7d9SMatthew Wilcox 
1997a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1998a00cc7d9SMatthew Wilcox 		unsigned long haddr)
1999a00cc7d9SMatthew Wilcox {
2000a00cc7d9SMatthew Wilcox 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2001a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2002a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2003a00cc7d9SMatthew Wilcox 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2004a00cc7d9SMatthew Wilcox 
2005ce9311cfSYisheng Xie 	count_vm_event(THP_SPLIT_PUD);
2006a00cc7d9SMatthew Wilcox 
2007a00cc7d9SMatthew Wilcox 	pudp_huge_clear_flush_notify(vma, haddr, pud);
2008a00cc7d9SMatthew Wilcox }
2009a00cc7d9SMatthew Wilcox 
2010a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2011a00cc7d9SMatthew Wilcox 		unsigned long address)
2012a00cc7d9SMatthew Wilcox {
2013a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
2014*ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
2015a00cc7d9SMatthew Wilcox 
2016*ac46d4f3SJérôme Glisse 	mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK,
2017*ac46d4f3SJérôme Glisse 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2018*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
2019*ac46d4f3SJérôme Glisse 	ptl = pud_lock(vma->vm_mm, pud);
2020a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2021a00cc7d9SMatthew Wilcox 		goto out;
2022*ac46d4f3SJérôme Glisse 	__split_huge_pud_locked(vma, pud, range.start);
2023a00cc7d9SMatthew Wilcox 
2024a00cc7d9SMatthew Wilcox out:
2025a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
20264645b9feSJérôme Glisse 	/*
20274645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
20284645b9feSJérôme Glisse 	 * the above pudp_huge_clear_flush_notify() did already call it.
20294645b9feSJérôme Glisse 	 */
2030*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
2031a00cc7d9SMatthew Wilcox }
2032a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2033a00cc7d9SMatthew Wilcox 
2034eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2035eef1b3baSKirill A. Shutemov 		unsigned long haddr, pmd_t *pmd)
2036eef1b3baSKirill A. Shutemov {
2037eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
2038eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
2039eef1b3baSKirill A. Shutemov 	pmd_t _pmd;
2040eef1b3baSKirill A. Shutemov 	int i;
2041eef1b3baSKirill A. Shutemov 
20420f10851eSJérôme Glisse 	/*
20430f10851eSJérôme Glisse 	 * Leave pmd empty until pte is filled note that it is fine to delay
20440f10851eSJérôme Glisse 	 * notification until mmu_notifier_invalidate_range_end() as we are
20450f10851eSJérôme Glisse 	 * replacing a zero pmd write protected page with a zero pte write
20460f10851eSJérôme Glisse 	 * protected page.
20470f10851eSJérôme Glisse 	 *
2048ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
20490f10851eSJérôme Glisse 	 */
20500f10851eSJérôme Glisse 	pmdp_huge_clear_flush(vma, haddr, pmd);
2051eef1b3baSKirill A. Shutemov 
2052eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2053eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
2054eef1b3baSKirill A. Shutemov 
2055eef1b3baSKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2056eef1b3baSKirill A. Shutemov 		pte_t *pte, entry;
2057eef1b3baSKirill A. Shutemov 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2058eef1b3baSKirill A. Shutemov 		entry = pte_mkspecial(entry);
2059eef1b3baSKirill A. Shutemov 		pte = pte_offset_map(&_pmd, haddr);
2060eef1b3baSKirill A. Shutemov 		VM_BUG_ON(!pte_none(*pte));
2061eef1b3baSKirill A. Shutemov 		set_pte_at(mm, haddr, pte, entry);
2062eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
2063eef1b3baSKirill A. Shutemov 	}
2064eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
2065eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
2066eef1b3baSKirill A. Shutemov }
2067eef1b3baSKirill A. Shutemov 
2068eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2069ba988280SKirill A. Shutemov 		unsigned long haddr, bool freeze)
2070eef1b3baSKirill A. Shutemov {
2071eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
2072eef1b3baSKirill A. Shutemov 	struct page *page;
2073eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
2074423ac9afSAneesh Kumar K.V 	pmd_t old_pmd, _pmd;
2075a3cf988fSKirill A. Shutemov 	bool young, write, soft_dirty, pmd_migration = false;
20762ac015e2SKirill A. Shutemov 	unsigned long addr;
2077eef1b3baSKirill A. Shutemov 	int i;
2078eef1b3baSKirill A. Shutemov 
2079eef1b3baSKirill A. Shutemov 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2080eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2081eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
208284c3fc4eSZi Yan 	VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
208384c3fc4eSZi Yan 				&& !pmd_devmap(*pmd));
2084eef1b3baSKirill A. Shutemov 
2085eef1b3baSKirill A. Shutemov 	count_vm_event(THP_SPLIT_PMD);
2086eef1b3baSKirill A. Shutemov 
2087d21b9e57SKirill A. Shutemov 	if (!vma_is_anonymous(vma)) {
2088d21b9e57SKirill A. Shutemov 		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2089953c66c2SAneesh Kumar K.V 		/*
2090953c66c2SAneesh Kumar K.V 		 * We are going to unmap this huge page. So
2091953c66c2SAneesh Kumar K.V 		 * just go ahead and zap it
2092953c66c2SAneesh Kumar K.V 		 */
2093953c66c2SAneesh Kumar K.V 		if (arch_needs_pgtable_deposit())
2094953c66c2SAneesh Kumar K.V 			zap_deposited_table(mm, pmd);
2095d21b9e57SKirill A. Shutemov 		if (vma_is_dax(vma))
2096d21b9e57SKirill A. Shutemov 			return;
2097d21b9e57SKirill A. Shutemov 		page = pmd_page(_pmd);
2098e1f1b157SHugh Dickins 		if (!PageDirty(page) && pmd_dirty(_pmd))
2099e1f1b157SHugh Dickins 			set_page_dirty(page);
2100d21b9e57SKirill A. Shutemov 		if (!PageReferenced(page) && pmd_young(_pmd))
2101d21b9e57SKirill A. Shutemov 			SetPageReferenced(page);
2102d21b9e57SKirill A. Shutemov 		page_remove_rmap(page, true);
2103d21b9e57SKirill A. Shutemov 		put_page(page);
2104fadae295SYang Shi 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2105eef1b3baSKirill A. Shutemov 		return;
2106eef1b3baSKirill A. Shutemov 	} else if (is_huge_zero_pmd(*pmd)) {
21074645b9feSJérôme Glisse 		/*
21084645b9feSJérôme Glisse 		 * FIXME: Do we want to invalidate secondary mmu by calling
21094645b9feSJérôme Glisse 		 * mmu_notifier_invalidate_range() see comments below inside
21104645b9feSJérôme Glisse 		 * __split_huge_pmd() ?
21114645b9feSJérôme Glisse 		 *
21124645b9feSJérôme Glisse 		 * We are going from a zero huge page write protected to zero
21134645b9feSJérôme Glisse 		 * small page also write protected so it does not seems useful
21144645b9feSJérôme Glisse 		 * to invalidate secondary mmu at this time.
21154645b9feSJérôme Glisse 		 */
2116eef1b3baSKirill A. Shutemov 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
2117eef1b3baSKirill A. Shutemov 	}
2118eef1b3baSKirill A. Shutemov 
2119423ac9afSAneesh Kumar K.V 	/*
2120423ac9afSAneesh Kumar K.V 	 * Up to this point the pmd is present and huge and userland has the
2121423ac9afSAneesh Kumar K.V 	 * whole access to the hugepage during the split (which happens in
2122423ac9afSAneesh Kumar K.V 	 * place). If we overwrite the pmd with the not-huge version pointing
2123423ac9afSAneesh Kumar K.V 	 * to the pte here (which of course we could if all CPUs were bug
2124423ac9afSAneesh Kumar K.V 	 * free), userland could trigger a small page size TLB miss on the
2125423ac9afSAneesh Kumar K.V 	 * small sized TLB while the hugepage TLB entry is still established in
2126423ac9afSAneesh Kumar K.V 	 * the huge TLB. Some CPU doesn't like that.
2127423ac9afSAneesh Kumar K.V 	 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2128423ac9afSAneesh Kumar K.V 	 * 383 on page 93. Intel should be safe but is also warns that it's
2129423ac9afSAneesh Kumar K.V 	 * only safe if the permission and cache attributes of the two entries
2130423ac9afSAneesh Kumar K.V 	 * loaded in the two TLB is identical (which should be the case here).
2131423ac9afSAneesh Kumar K.V 	 * But it is generally safer to never allow small and huge TLB entries
2132423ac9afSAneesh Kumar K.V 	 * for the same virtual address to be loaded simultaneously. So instead
2133423ac9afSAneesh Kumar K.V 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2134423ac9afSAneesh Kumar K.V 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2135423ac9afSAneesh Kumar K.V 	 * must remain set at all times on the pmd until the split is complete
2136423ac9afSAneesh Kumar K.V 	 * for this pmd), then we flush the SMP TLB and finally we write the
2137423ac9afSAneesh Kumar K.V 	 * non-huge version of the pmd entry with pmd_populate.
2138423ac9afSAneesh Kumar K.V 	 */
2139423ac9afSAneesh Kumar K.V 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
2140423ac9afSAneesh Kumar K.V 
2141423ac9afSAneesh Kumar K.V 	pmd_migration = is_pmd_migration_entry(old_pmd);
21422e83ee1dSPeter Xu 	if (unlikely(pmd_migration)) {
214384c3fc4eSZi Yan 		swp_entry_t entry;
214484c3fc4eSZi Yan 
2145423ac9afSAneesh Kumar K.V 		entry = pmd_to_swp_entry(old_pmd);
214684c3fc4eSZi Yan 		page = pfn_to_page(swp_offset(entry));
21472e83ee1dSPeter Xu 		write = is_write_migration_entry(entry);
21482e83ee1dSPeter Xu 		young = false;
21492e83ee1dSPeter Xu 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
21502e83ee1dSPeter Xu 	} else {
2151423ac9afSAneesh Kumar K.V 		page = pmd_page(old_pmd);
2152423ac9afSAneesh Kumar K.V 		if (pmd_dirty(old_pmd))
2153423ac9afSAneesh Kumar K.V 			SetPageDirty(page);
2154423ac9afSAneesh Kumar K.V 		write = pmd_write(old_pmd);
2155423ac9afSAneesh Kumar K.V 		young = pmd_young(old_pmd);
2156423ac9afSAneesh Kumar K.V 		soft_dirty = pmd_soft_dirty(old_pmd);
21572e83ee1dSPeter Xu 	}
21582e83ee1dSPeter Xu 	VM_BUG_ON_PAGE(!page_count(page), page);
21592e83ee1dSPeter Xu 	page_ref_add(page, HPAGE_PMD_NR - 1);
2160eef1b3baSKirill A. Shutemov 
2161423ac9afSAneesh Kumar K.V 	/*
2162423ac9afSAneesh Kumar K.V 	 * Withdraw the table only after we mark the pmd entry invalid.
2163423ac9afSAneesh Kumar K.V 	 * This's critical for some architectures (Power).
2164423ac9afSAneesh Kumar K.V 	 */
2165eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2166eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
2167eef1b3baSKirill A. Shutemov 
21682ac015e2SKirill A. Shutemov 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2169eef1b3baSKirill A. Shutemov 		pte_t entry, *pte;
2170eef1b3baSKirill A. Shutemov 		/*
2171eef1b3baSKirill A. Shutemov 		 * Note that NUMA hinting access restrictions are not
2172eef1b3baSKirill A. Shutemov 		 * transferred to avoid any possibility of altering
2173eef1b3baSKirill A. Shutemov 		 * permissions across VMAs.
2174eef1b3baSKirill A. Shutemov 		 */
217584c3fc4eSZi Yan 		if (freeze || pmd_migration) {
2176ba988280SKirill A. Shutemov 			swp_entry_t swp_entry;
2177ba988280SKirill A. Shutemov 			swp_entry = make_migration_entry(page + i, write);
2178ba988280SKirill A. Shutemov 			entry = swp_entry_to_pte(swp_entry);
2179804dd150SAndrea Arcangeli 			if (soft_dirty)
2180804dd150SAndrea Arcangeli 				entry = pte_swp_mksoft_dirty(entry);
2181ba988280SKirill A. Shutemov 		} else {
21826d2329f8SAndrea Arcangeli 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2183b8d3c4c3SMinchan Kim 			entry = maybe_mkwrite(entry, vma);
2184eef1b3baSKirill A. Shutemov 			if (!write)
2185eef1b3baSKirill A. Shutemov 				entry = pte_wrprotect(entry);
2186eef1b3baSKirill A. Shutemov 			if (!young)
2187eef1b3baSKirill A. Shutemov 				entry = pte_mkold(entry);
2188804dd150SAndrea Arcangeli 			if (soft_dirty)
2189804dd150SAndrea Arcangeli 				entry = pte_mksoft_dirty(entry);
2190ba988280SKirill A. Shutemov 		}
21912ac015e2SKirill A. Shutemov 		pte = pte_offset_map(&_pmd, addr);
2192eef1b3baSKirill A. Shutemov 		BUG_ON(!pte_none(*pte));
21932ac015e2SKirill A. Shutemov 		set_pte_at(mm, addr, pte, entry);
2194eef1b3baSKirill A. Shutemov 		atomic_inc(&page[i]._mapcount);
2195eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
2196eef1b3baSKirill A. Shutemov 	}
2197eef1b3baSKirill A. Shutemov 
2198eef1b3baSKirill A. Shutemov 	/*
2199eef1b3baSKirill A. Shutemov 	 * Set PG_double_map before dropping compound_mapcount to avoid
2200eef1b3baSKirill A. Shutemov 	 * false-negative page_mapped().
2201eef1b3baSKirill A. Shutemov 	 */
2202eef1b3baSKirill A. Shutemov 	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2203eef1b3baSKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
2204eef1b3baSKirill A. Shutemov 			atomic_inc(&page[i]._mapcount);
2205eef1b3baSKirill A. Shutemov 	}
2206eef1b3baSKirill A. Shutemov 
2207eef1b3baSKirill A. Shutemov 	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2208eef1b3baSKirill A. Shutemov 		/* Last compound_mapcount is gone. */
220911fb9989SMel Gorman 		__dec_node_page_state(page, NR_ANON_THPS);
2210eef1b3baSKirill A. Shutemov 		if (TestClearPageDoubleMap(page)) {
2211eef1b3baSKirill A. Shutemov 			/* No need in mapcount reference anymore */
2212eef1b3baSKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++)
2213eef1b3baSKirill A. Shutemov 				atomic_dec(&page[i]._mapcount);
2214eef1b3baSKirill A. Shutemov 		}
2215eef1b3baSKirill A. Shutemov 	}
2216eef1b3baSKirill A. Shutemov 
2217eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
2218eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
2219e9b61f19SKirill A. Shutemov 
2220e9b61f19SKirill A. Shutemov 	if (freeze) {
22212ac015e2SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++) {
2222e9b61f19SKirill A. Shutemov 			page_remove_rmap(page + i, false);
2223e9b61f19SKirill A. Shutemov 			put_page(page + i);
2224e9b61f19SKirill A. Shutemov 		}
2225e9b61f19SKirill A. Shutemov 	}
2226eef1b3baSKirill A. Shutemov }
2227eef1b3baSKirill A. Shutemov 
2228eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
222933f4751eSNaoya Horiguchi 		unsigned long address, bool freeze, struct page *page)
2230eef1b3baSKirill A. Shutemov {
2231eef1b3baSKirill A. Shutemov 	spinlock_t *ptl;
2232*ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
2233eef1b3baSKirill A. Shutemov 
2234*ac46d4f3SJérôme Glisse 	mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK,
2235*ac46d4f3SJérôme Glisse 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2236*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
2237*ac46d4f3SJérôme Glisse 	ptl = pmd_lock(vma->vm_mm, pmd);
223833f4751eSNaoya Horiguchi 
223933f4751eSNaoya Horiguchi 	/*
224033f4751eSNaoya Horiguchi 	 * If caller asks to setup a migration entries, we need a page to check
224133f4751eSNaoya Horiguchi 	 * pmd against. Otherwise we can end up replacing wrong page.
224233f4751eSNaoya Horiguchi 	 */
224333f4751eSNaoya Horiguchi 	VM_BUG_ON(freeze && !page);
224433f4751eSNaoya Horiguchi 	if (page && page != pmd_page(*pmd))
224533f4751eSNaoya Horiguchi 	        goto out;
224633f4751eSNaoya Horiguchi 
22475c7fb56eSDan Williams 	if (pmd_trans_huge(*pmd)) {
224833f4751eSNaoya Horiguchi 		page = pmd_page(*pmd);
2249e90309c9SKirill A. Shutemov 		if (PageMlocked(page))
22505f737714SKirill A. Shutemov 			clear_page_mlock(page);
225184c3fc4eSZi Yan 	} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
22525c7fb56eSDan Williams 		goto out;
2253*ac46d4f3SJérôme Glisse 	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2254e90309c9SKirill A. Shutemov out:
2255eef1b3baSKirill A. Shutemov 	spin_unlock(ptl);
22564645b9feSJérôme Glisse 	/*
22574645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback.
22584645b9feSJérôme Glisse 	 * They are 3 cases to consider inside __split_huge_pmd_locked():
22594645b9feSJérôme Glisse 	 *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
22604645b9feSJérôme Glisse 	 *  2) __split_huge_zero_page_pmd() read only zero page and any write
22614645b9feSJérôme Glisse 	 *    fault will trigger a flush_notify before pointing to a new page
22624645b9feSJérôme Glisse 	 *    (it is fine if the secondary mmu keeps pointing to the old zero
22634645b9feSJérôme Glisse 	 *    page in the meantime)
22644645b9feSJérôme Glisse 	 *  3) Split a huge pmd into pte pointing to the same page. No need
22654645b9feSJérôme Glisse 	 *     to invalidate secondary tlb entry they are all still valid.
22664645b9feSJérôme Glisse 	 *     any further changes to individual pte will notify. So no need
22674645b9feSJérôme Glisse 	 *     to call mmu_notifier->invalidate_range()
22684645b9feSJérôme Glisse 	 */
2269*ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
2270eef1b3baSKirill A. Shutemov }
2271eef1b3baSKirill A. Shutemov 
2272fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2273fec89c10SKirill A. Shutemov 		bool freeze, struct page *page)
227494fcc585SAndrea Arcangeli {
2275f72e7dcdSHugh Dickins 	pgd_t *pgd;
2276c2febafcSKirill A. Shutemov 	p4d_t *p4d;
2277f72e7dcdSHugh Dickins 	pud_t *pud;
227894fcc585SAndrea Arcangeli 	pmd_t *pmd;
227994fcc585SAndrea Arcangeli 
228078ddc534SKirill A. Shutemov 	pgd = pgd_offset(vma->vm_mm, address);
2281f72e7dcdSHugh Dickins 	if (!pgd_present(*pgd))
2282f72e7dcdSHugh Dickins 		return;
2283f72e7dcdSHugh Dickins 
2284c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
2285c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
2286c2febafcSKirill A. Shutemov 		return;
2287c2febafcSKirill A. Shutemov 
2288c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
2289f72e7dcdSHugh Dickins 	if (!pud_present(*pud))
2290f72e7dcdSHugh Dickins 		return;
2291f72e7dcdSHugh Dickins 
2292f72e7dcdSHugh Dickins 	pmd = pmd_offset(pud, address);
2293fec89c10SKirill A. Shutemov 
229433f4751eSNaoya Horiguchi 	__split_huge_pmd(vma, pmd, address, freeze, page);
229594fcc585SAndrea Arcangeli }
229694fcc585SAndrea Arcangeli 
2297e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma,
229894fcc585SAndrea Arcangeli 			     unsigned long start,
229994fcc585SAndrea Arcangeli 			     unsigned long end,
230094fcc585SAndrea Arcangeli 			     long adjust_next)
230194fcc585SAndrea Arcangeli {
230294fcc585SAndrea Arcangeli 	/*
230394fcc585SAndrea Arcangeli 	 * If the new start address isn't hpage aligned and it could
230494fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
230594fcc585SAndrea Arcangeli 	 * an huge pmd.
230694fcc585SAndrea Arcangeli 	 */
230794fcc585SAndrea Arcangeli 	if (start & ~HPAGE_PMD_MASK &&
230894fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
230994fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2310fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, start, false, NULL);
231194fcc585SAndrea Arcangeli 
231294fcc585SAndrea Arcangeli 	/*
231394fcc585SAndrea Arcangeli 	 * If the new end address isn't hpage aligned and it could
231494fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
231594fcc585SAndrea Arcangeli 	 * an huge pmd.
231694fcc585SAndrea Arcangeli 	 */
231794fcc585SAndrea Arcangeli 	if (end & ~HPAGE_PMD_MASK &&
231894fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
231994fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2320fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, end, false, NULL);
232194fcc585SAndrea Arcangeli 
232294fcc585SAndrea Arcangeli 	/*
232394fcc585SAndrea Arcangeli 	 * If we're also updating the vma->vm_next->vm_start, if the new
232494fcc585SAndrea Arcangeli 	 * vm_next->vm_start isn't page aligned and it could previously
232594fcc585SAndrea Arcangeli 	 * contain an hugepage: check if we need to split an huge pmd.
232694fcc585SAndrea Arcangeli 	 */
232794fcc585SAndrea Arcangeli 	if (adjust_next > 0) {
232894fcc585SAndrea Arcangeli 		struct vm_area_struct *next = vma->vm_next;
232994fcc585SAndrea Arcangeli 		unsigned long nstart = next->vm_start;
233094fcc585SAndrea Arcangeli 		nstart += adjust_next << PAGE_SHIFT;
233194fcc585SAndrea Arcangeli 		if (nstart & ~HPAGE_PMD_MASK &&
233294fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
233394fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2334fec89c10SKirill A. Shutemov 			split_huge_pmd_address(next, nstart, false, NULL);
233594fcc585SAndrea Arcangeli 	}
233694fcc585SAndrea Arcangeli }
2337e9b61f19SKirill A. Shutemov 
2338906f9cdfSHugh Dickins static void unmap_page(struct page *page)
2339e9b61f19SKirill A. Shutemov {
2340baa355fdSKirill A. Shutemov 	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2341c7ab0d2fSKirill A. Shutemov 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
2342666e5a40SMinchan Kim 	bool unmap_success;
2343e9b61f19SKirill A. Shutemov 
2344e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageHead(page), page);
2345e9b61f19SKirill A. Shutemov 
2346baa355fdSKirill A. Shutemov 	if (PageAnon(page))
2347b5ff8161SNaoya Horiguchi 		ttu_flags |= TTU_SPLIT_FREEZE;
2348baa355fdSKirill A. Shutemov 
2349666e5a40SMinchan Kim 	unmap_success = try_to_unmap(page, ttu_flags);
2350666e5a40SMinchan Kim 	VM_BUG_ON_PAGE(!unmap_success, page);
2351bd56086fSKirill A. Shutemov }
2352bd56086fSKirill A. Shutemov 
2353906f9cdfSHugh Dickins static void remap_page(struct page *page)
2354e9b61f19SKirill A. Shutemov {
2355fec89c10SKirill A. Shutemov 	int i;
2356ace71a19SKirill A. Shutemov 	if (PageTransHuge(page)) {
2357ace71a19SKirill A. Shutemov 		remove_migration_ptes(page, page, true);
2358ace71a19SKirill A. Shutemov 	} else {
2359fec89c10SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
2360fec89c10SKirill A. Shutemov 			remove_migration_ptes(page + i, page + i, true);
2361e9b61f19SKirill A. Shutemov 	}
2362ace71a19SKirill A. Shutemov }
2363e9b61f19SKirill A. Shutemov 
23648df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail,
2365e9b61f19SKirill A. Shutemov 		struct lruvec *lruvec, struct list_head *list)
2366e9b61f19SKirill A. Shutemov {
2367e9b61f19SKirill A. Shutemov 	struct page *page_tail = head + tail;
2368e9b61f19SKirill A. Shutemov 
23698df651c7SKirill A. Shutemov 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2370e9b61f19SKirill A. Shutemov 
2371e9b61f19SKirill A. Shutemov 	/*
2372605ca5edSKonstantin Khlebnikov 	 * Clone page flags before unfreezing refcount.
2373605ca5edSKonstantin Khlebnikov 	 *
2374605ca5edSKonstantin Khlebnikov 	 * After successful get_page_unless_zero() might follow flags change,
2375605ca5edSKonstantin Khlebnikov 	 * for exmaple lock_page() which set PG_waiters.
2376e9b61f19SKirill A. Shutemov 	 */
2377e9b61f19SKirill A. Shutemov 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2378e9b61f19SKirill A. Shutemov 	page_tail->flags |= (head->flags &
2379e9b61f19SKirill A. Shutemov 			((1L << PG_referenced) |
2380e9b61f19SKirill A. Shutemov 			 (1L << PG_swapbacked) |
238138d8b4e6SHuang Ying 			 (1L << PG_swapcache) |
2382e9b61f19SKirill A. Shutemov 			 (1L << PG_mlocked) |
2383e9b61f19SKirill A. Shutemov 			 (1L << PG_uptodate) |
2384e9b61f19SKirill A. Shutemov 			 (1L << PG_active) |
23851899ad18SJohannes Weiner 			 (1L << PG_workingset) |
2386e9b61f19SKirill A. Shutemov 			 (1L << PG_locked) |
2387b8d3c4c3SMinchan Kim 			 (1L << PG_unevictable) |
2388b8d3c4c3SMinchan Kim 			 (1L << PG_dirty)));
2389e9b61f19SKirill A. Shutemov 
2390173d9d9fSHugh Dickins 	/* ->mapping in first tail page is compound_mapcount */
2391173d9d9fSHugh Dickins 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2392173d9d9fSHugh Dickins 			page_tail);
2393173d9d9fSHugh Dickins 	page_tail->mapping = head->mapping;
2394173d9d9fSHugh Dickins 	page_tail->index = head->index + tail;
2395173d9d9fSHugh Dickins 
2396605ca5edSKonstantin Khlebnikov 	/* Page flags must be visible before we make the page non-compound. */
2397e9b61f19SKirill A. Shutemov 	smp_wmb();
2398e9b61f19SKirill A. Shutemov 
2399605ca5edSKonstantin Khlebnikov 	/*
2400605ca5edSKonstantin Khlebnikov 	 * Clear PageTail before unfreezing page refcount.
2401605ca5edSKonstantin Khlebnikov 	 *
2402605ca5edSKonstantin Khlebnikov 	 * After successful get_page_unless_zero() might follow put_page()
2403605ca5edSKonstantin Khlebnikov 	 * which needs correct compound_head().
2404605ca5edSKonstantin Khlebnikov 	 */
2405e9b61f19SKirill A. Shutemov 	clear_compound_head(page_tail);
2406e9b61f19SKirill A. Shutemov 
2407605ca5edSKonstantin Khlebnikov 	/* Finally unfreeze refcount. Additional reference from page cache. */
2408605ca5edSKonstantin Khlebnikov 	page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2409605ca5edSKonstantin Khlebnikov 					  PageSwapCache(head)));
2410605ca5edSKonstantin Khlebnikov 
2411e9b61f19SKirill A. Shutemov 	if (page_is_young(head))
2412e9b61f19SKirill A. Shutemov 		set_page_young(page_tail);
2413e9b61f19SKirill A. Shutemov 	if (page_is_idle(head))
2414e9b61f19SKirill A. Shutemov 		set_page_idle(page_tail);
2415e9b61f19SKirill A. Shutemov 
2416e9b61f19SKirill A. Shutemov 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
241794723aafSMichal Hocko 
241894723aafSMichal Hocko 	/*
241994723aafSMichal Hocko 	 * always add to the tail because some iterators expect new
242094723aafSMichal Hocko 	 * pages to show after the currently processed elements - e.g.
242194723aafSMichal Hocko 	 * migrate_pages
242294723aafSMichal Hocko 	 */
2423e9b61f19SKirill A. Shutemov 	lru_add_page_tail(head, page_tail, lruvec, list);
2424e9b61f19SKirill A. Shutemov }
2425e9b61f19SKirill A. Shutemov 
2426baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list,
2427006d3ff2SHugh Dickins 		pgoff_t end, unsigned long flags)
2428e9b61f19SKirill A. Shutemov {
2429e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2430e9b61f19SKirill A. Shutemov 	struct zone *zone = page_zone(head);
2431e9b61f19SKirill A. Shutemov 	struct lruvec *lruvec;
24328df651c7SKirill A. Shutemov 	int i;
2433e9b61f19SKirill A. Shutemov 
2434599d0c95SMel Gorman 	lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
2435e9b61f19SKirill A. Shutemov 
2436e9b61f19SKirill A. Shutemov 	/* complete memcg works before add pages to LRU */
2437e9b61f19SKirill A. Shutemov 	mem_cgroup_split_huge_fixup(head);
2438e9b61f19SKirill A. Shutemov 
2439baa355fdSKirill A. Shutemov 	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
24408df651c7SKirill A. Shutemov 		__split_huge_page_tail(head, i, lruvec, list);
2441baa355fdSKirill A. Shutemov 		/* Some pages can be beyond i_size: drop them from page cache */
2442baa355fdSKirill A. Shutemov 		if (head[i].index >= end) {
24432d077d4bSHugh Dickins 			ClearPageDirty(head + i);
2444baa355fdSKirill A. Shutemov 			__delete_from_page_cache(head + i, NULL);
2445800d8c63SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2446800d8c63SKirill A. Shutemov 				shmem_uncharge(head->mapping->host, 1);
2447baa355fdSKirill A. Shutemov 			put_page(head + i);
2448baa355fdSKirill A. Shutemov 		}
2449baa355fdSKirill A. Shutemov 	}
2450e9b61f19SKirill A. Shutemov 
2451e9b61f19SKirill A. Shutemov 	ClearPageCompound(head);
2452baa355fdSKirill A. Shutemov 	/* See comment in __split_huge_page_tail() */
2453baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2454aa5dc07fSMatthew Wilcox 		/* Additional pin to swap cache */
245538d8b4e6SHuang Ying 		if (PageSwapCache(head))
245638d8b4e6SHuang Ying 			page_ref_add(head, 2);
245738d8b4e6SHuang Ying 		else
2458baa355fdSKirill A. Shutemov 			page_ref_inc(head);
2459baa355fdSKirill A. Shutemov 	} else {
2460aa5dc07fSMatthew Wilcox 		/* Additional pin to page cache */
2461baa355fdSKirill A. Shutemov 		page_ref_add(head, 2);
2462b93b0163SMatthew Wilcox 		xa_unlock(&head->mapping->i_pages);
2463baa355fdSKirill A. Shutemov 	}
2464baa355fdSKirill A. Shutemov 
2465a52633d8SMel Gorman 	spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2466e9b61f19SKirill A. Shutemov 
2467906f9cdfSHugh Dickins 	remap_page(head);
2468e9b61f19SKirill A. Shutemov 
2469e9b61f19SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++) {
2470e9b61f19SKirill A. Shutemov 		struct page *subpage = head + i;
2471e9b61f19SKirill A. Shutemov 		if (subpage == page)
2472e9b61f19SKirill A. Shutemov 			continue;
2473e9b61f19SKirill A. Shutemov 		unlock_page(subpage);
2474e9b61f19SKirill A. Shutemov 
2475e9b61f19SKirill A. Shutemov 		/*
2476e9b61f19SKirill A. Shutemov 		 * Subpages may be freed if there wasn't any mapping
2477e9b61f19SKirill A. Shutemov 		 * like if add_to_swap() is running on a lru page that
2478e9b61f19SKirill A. Shutemov 		 * had its mapping zapped. And freeing these pages
2479e9b61f19SKirill A. Shutemov 		 * requires taking the lru_lock so we do the put_page
2480e9b61f19SKirill A. Shutemov 		 * of the tail pages after the split is complete.
2481e9b61f19SKirill A. Shutemov 		 */
2482e9b61f19SKirill A. Shutemov 		put_page(subpage);
2483e9b61f19SKirill A. Shutemov 	}
2484e9b61f19SKirill A. Shutemov }
2485e9b61f19SKirill A. Shutemov 
2486b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page)
2487b20ce5e0SKirill A. Shutemov {
2488dd78feddSKirill A. Shutemov 	int i, compound, ret;
2489b20ce5e0SKirill A. Shutemov 
2490b20ce5e0SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
2491b20ce5e0SKirill A. Shutemov 
2492b20ce5e0SKirill A. Shutemov 	if (likely(!PageCompound(page)))
2493b20ce5e0SKirill A. Shutemov 		return atomic_read(&page->_mapcount) + 1;
2494b20ce5e0SKirill A. Shutemov 
2495dd78feddSKirill A. Shutemov 	compound = compound_mapcount(page);
2496b20ce5e0SKirill A. Shutemov 	if (PageHuge(page))
2497dd78feddSKirill A. Shutemov 		return compound;
2498dd78feddSKirill A. Shutemov 	ret = compound;
2499b20ce5e0SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++)
2500b20ce5e0SKirill A. Shutemov 		ret += atomic_read(&page[i]._mapcount) + 1;
2501dd78feddSKirill A. Shutemov 	/* File pages has compound_mapcount included in _mapcount */
2502dd78feddSKirill A. Shutemov 	if (!PageAnon(page))
2503dd78feddSKirill A. Shutemov 		return ret - compound * HPAGE_PMD_NR;
2504b20ce5e0SKirill A. Shutemov 	if (PageDoubleMap(page))
2505b20ce5e0SKirill A. Shutemov 		ret -= HPAGE_PMD_NR;
2506b20ce5e0SKirill A. Shutemov 	return ret;
2507b20ce5e0SKirill A. Shutemov }
2508b20ce5e0SKirill A. Shutemov 
2509e9b61f19SKirill A. Shutemov /*
25106d0a07edSAndrea Arcangeli  * This calculates accurately how many mappings a transparent hugepage
25116d0a07edSAndrea Arcangeli  * has (unlike page_mapcount() which isn't fully accurate). This full
25126d0a07edSAndrea Arcangeli  * accuracy is primarily needed to know if copy-on-write faults can
25136d0a07edSAndrea Arcangeli  * reuse the page and change the mapping to read-write instead of
25146d0a07edSAndrea Arcangeli  * copying them. At the same time this returns the total_mapcount too.
25156d0a07edSAndrea Arcangeli  *
25166d0a07edSAndrea Arcangeli  * The function returns the highest mapcount any one of the subpages
25176d0a07edSAndrea Arcangeli  * has. If the return value is one, even if different processes are
25186d0a07edSAndrea Arcangeli  * mapping different subpages of the transparent hugepage, they can
25196d0a07edSAndrea Arcangeli  * all reuse it, because each process is reusing a different subpage.
25206d0a07edSAndrea Arcangeli  *
25216d0a07edSAndrea Arcangeli  * The total_mapcount is instead counting all virtual mappings of the
25226d0a07edSAndrea Arcangeli  * subpages. If the total_mapcount is equal to "one", it tells the
25236d0a07edSAndrea Arcangeli  * caller all mappings belong to the same "mm" and in turn the
25246d0a07edSAndrea Arcangeli  * anon_vma of the transparent hugepage can become the vma->anon_vma
25256d0a07edSAndrea Arcangeli  * local one as no other process may be mapping any of the subpages.
25266d0a07edSAndrea Arcangeli  *
25276d0a07edSAndrea Arcangeli  * It would be more accurate to replace page_mapcount() with
25286d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount(), however we only use
25296d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() in the copy-on-write faults where we
25306d0a07edSAndrea Arcangeli  * need full accuracy to avoid breaking page pinning, because
25316d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() is slower than page_mapcount().
25326d0a07edSAndrea Arcangeli  */
25336d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
25346d0a07edSAndrea Arcangeli {
25356d0a07edSAndrea Arcangeli 	int i, ret, _total_mapcount, mapcount;
25366d0a07edSAndrea Arcangeli 
25376d0a07edSAndrea Arcangeli 	/* hugetlbfs shouldn't call it */
25386d0a07edSAndrea Arcangeli 	VM_BUG_ON_PAGE(PageHuge(page), page);
25396d0a07edSAndrea Arcangeli 
25406d0a07edSAndrea Arcangeli 	if (likely(!PageTransCompound(page))) {
25416d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page->_mapcount) + 1;
25426d0a07edSAndrea Arcangeli 		if (total_mapcount)
25436d0a07edSAndrea Arcangeli 			*total_mapcount = mapcount;
25446d0a07edSAndrea Arcangeli 		return mapcount;
25456d0a07edSAndrea Arcangeli 	}
25466d0a07edSAndrea Arcangeli 
25476d0a07edSAndrea Arcangeli 	page = compound_head(page);
25486d0a07edSAndrea Arcangeli 
25496d0a07edSAndrea Arcangeli 	_total_mapcount = ret = 0;
25506d0a07edSAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
25516d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page[i]._mapcount) + 1;
25526d0a07edSAndrea Arcangeli 		ret = max(ret, mapcount);
25536d0a07edSAndrea Arcangeli 		_total_mapcount += mapcount;
25546d0a07edSAndrea Arcangeli 	}
25556d0a07edSAndrea Arcangeli 	if (PageDoubleMap(page)) {
25566d0a07edSAndrea Arcangeli 		ret -= 1;
25576d0a07edSAndrea Arcangeli 		_total_mapcount -= HPAGE_PMD_NR;
25586d0a07edSAndrea Arcangeli 	}
25596d0a07edSAndrea Arcangeli 	mapcount = compound_mapcount(page);
25606d0a07edSAndrea Arcangeli 	ret += mapcount;
25616d0a07edSAndrea Arcangeli 	_total_mapcount += mapcount;
25626d0a07edSAndrea Arcangeli 	if (total_mapcount)
25636d0a07edSAndrea Arcangeli 		*total_mapcount = _total_mapcount;
25646d0a07edSAndrea Arcangeli 	return ret;
25656d0a07edSAndrea Arcangeli }
25666d0a07edSAndrea Arcangeli 
2567b8f593cdSHuang Ying /* Racy check whether the huge page can be split */
2568b8f593cdSHuang Ying bool can_split_huge_page(struct page *page, int *pextra_pins)
2569b8f593cdSHuang Ying {
2570b8f593cdSHuang Ying 	int extra_pins;
2571b8f593cdSHuang Ying 
2572aa5dc07fSMatthew Wilcox 	/* Additional pins from page cache */
2573b8f593cdSHuang Ying 	if (PageAnon(page))
2574b8f593cdSHuang Ying 		extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
2575b8f593cdSHuang Ying 	else
2576b8f593cdSHuang Ying 		extra_pins = HPAGE_PMD_NR;
2577b8f593cdSHuang Ying 	if (pextra_pins)
2578b8f593cdSHuang Ying 		*pextra_pins = extra_pins;
2579b8f593cdSHuang Ying 	return total_mapcount(page) == page_count(page) - extra_pins - 1;
2580b8f593cdSHuang Ying }
2581b8f593cdSHuang Ying 
25826d0a07edSAndrea Arcangeli /*
2583e9b61f19SKirill A. Shutemov  * This function splits huge page into normal pages. @page can point to any
2584e9b61f19SKirill A. Shutemov  * subpage of huge page to split. Split doesn't change the position of @page.
2585e9b61f19SKirill A. Shutemov  *
2586e9b61f19SKirill A. Shutemov  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2587e9b61f19SKirill A. Shutemov  * The huge page must be locked.
2588e9b61f19SKirill A. Shutemov  *
2589e9b61f19SKirill A. Shutemov  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2590e9b61f19SKirill A. Shutemov  *
2591e9b61f19SKirill A. Shutemov  * Both head page and tail pages will inherit mapping, flags, and so on from
2592e9b61f19SKirill A. Shutemov  * the hugepage.
2593e9b61f19SKirill A. Shutemov  *
2594e9b61f19SKirill A. Shutemov  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2595e9b61f19SKirill A. Shutemov  * they are not mapped.
2596e9b61f19SKirill A. Shutemov  *
2597e9b61f19SKirill A. Shutemov  * Returns 0 if the hugepage is split successfully.
2598e9b61f19SKirill A. Shutemov  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2599e9b61f19SKirill A. Shutemov  * us.
2600e9b61f19SKirill A. Shutemov  */
2601e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list)
2602e9b61f19SKirill A. Shutemov {
2603e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2604a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2605baa355fdSKirill A. Shutemov 	struct anon_vma *anon_vma = NULL;
2606baa355fdSKirill A. Shutemov 	struct address_space *mapping = NULL;
2607baa355fdSKirill A. Shutemov 	int count, mapcount, extra_pins, ret;
2608d9654322SKirill A. Shutemov 	bool mlocked;
26090b9b6fffSKirill A. Shutemov 	unsigned long flags;
2610006d3ff2SHugh Dickins 	pgoff_t end;
2611e9b61f19SKirill A. Shutemov 
2612e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2613e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2614e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageCompound(page), page);
2615e9b61f19SKirill A. Shutemov 
261659807685SHuang Ying 	if (PageWriteback(page))
261759807685SHuang Ying 		return -EBUSY;
261859807685SHuang Ying 
2619baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2620e9b61f19SKirill A. Shutemov 		/*
2621baa355fdSKirill A. Shutemov 		 * The caller does not necessarily hold an mmap_sem that would
2622baa355fdSKirill A. Shutemov 		 * prevent the anon_vma disappearing so we first we take a
2623baa355fdSKirill A. Shutemov 		 * reference to it and then lock the anon_vma for write. This
2624baa355fdSKirill A. Shutemov 		 * is similar to page_lock_anon_vma_read except the write lock
2625baa355fdSKirill A. Shutemov 		 * is taken to serialise against parallel split or collapse
2626baa355fdSKirill A. Shutemov 		 * operations.
2627e9b61f19SKirill A. Shutemov 		 */
2628e9b61f19SKirill A. Shutemov 		anon_vma = page_get_anon_vma(head);
2629e9b61f19SKirill A. Shutemov 		if (!anon_vma) {
2630e9b61f19SKirill A. Shutemov 			ret = -EBUSY;
2631e9b61f19SKirill A. Shutemov 			goto out;
2632e9b61f19SKirill A. Shutemov 		}
2633006d3ff2SHugh Dickins 		end = -1;
2634baa355fdSKirill A. Shutemov 		mapping = NULL;
2635e9b61f19SKirill A. Shutemov 		anon_vma_lock_write(anon_vma);
2636baa355fdSKirill A. Shutemov 	} else {
2637baa355fdSKirill A. Shutemov 		mapping = head->mapping;
2638baa355fdSKirill A. Shutemov 
2639baa355fdSKirill A. Shutemov 		/* Truncated ? */
2640baa355fdSKirill A. Shutemov 		if (!mapping) {
2641baa355fdSKirill A. Shutemov 			ret = -EBUSY;
2642baa355fdSKirill A. Shutemov 			goto out;
2643baa355fdSKirill A. Shutemov 		}
2644baa355fdSKirill A. Shutemov 
2645baa355fdSKirill A. Shutemov 		anon_vma = NULL;
2646baa355fdSKirill A. Shutemov 		i_mmap_lock_read(mapping);
2647006d3ff2SHugh Dickins 
2648006d3ff2SHugh Dickins 		/*
2649006d3ff2SHugh Dickins 		 *__split_huge_page() may need to trim off pages beyond EOF:
2650006d3ff2SHugh Dickins 		 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2651006d3ff2SHugh Dickins 		 * which cannot be nested inside the page tree lock. So note
2652006d3ff2SHugh Dickins 		 * end now: i_size itself may be changed at any moment, but
2653006d3ff2SHugh Dickins 		 * head page lock is good enough to serialize the trimming.
2654006d3ff2SHugh Dickins 		 */
2655006d3ff2SHugh Dickins 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2656baa355fdSKirill A. Shutemov 	}
2657e9b61f19SKirill A. Shutemov 
2658e9b61f19SKirill A. Shutemov 	/*
2659906f9cdfSHugh Dickins 	 * Racy check if we can split the page, before unmap_page() will
2660e9b61f19SKirill A. Shutemov 	 * split PMDs
2661e9b61f19SKirill A. Shutemov 	 */
2662b8f593cdSHuang Ying 	if (!can_split_huge_page(head, &extra_pins)) {
2663e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2664e9b61f19SKirill A. Shutemov 		goto out_unlock;
2665e9b61f19SKirill A. Shutemov 	}
2666e9b61f19SKirill A. Shutemov 
2667d9654322SKirill A. Shutemov 	mlocked = PageMlocked(page);
2668906f9cdfSHugh Dickins 	unmap_page(head);
2669e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(compound_mapcount(head), head);
2670e9b61f19SKirill A. Shutemov 
2671d9654322SKirill A. Shutemov 	/* Make sure the page is not on per-CPU pagevec as it takes pin */
2672d9654322SKirill A. Shutemov 	if (mlocked)
2673d9654322SKirill A. Shutemov 		lru_add_drain();
2674d9654322SKirill A. Shutemov 
2675baa355fdSKirill A. Shutemov 	/* prevent PageLRU to go away from under us, and freeze lru stats */
2676a52633d8SMel Gorman 	spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
2677baa355fdSKirill A. Shutemov 
2678baa355fdSKirill A. Shutemov 	if (mapping) {
2679aa5dc07fSMatthew Wilcox 		XA_STATE(xas, &mapping->i_pages, page_index(head));
2680baa355fdSKirill A. Shutemov 
2681baa355fdSKirill A. Shutemov 		/*
2682aa5dc07fSMatthew Wilcox 		 * Check if the head page is present in page cache.
2683baa355fdSKirill A. Shutemov 		 * We assume all tail are present too, if head is there.
2684baa355fdSKirill A. Shutemov 		 */
2685aa5dc07fSMatthew Wilcox 		xa_lock(&mapping->i_pages);
2686aa5dc07fSMatthew Wilcox 		if (xas_load(&xas) != head)
2687baa355fdSKirill A. Shutemov 			goto fail;
2688baa355fdSKirill A. Shutemov 	}
2689baa355fdSKirill A. Shutemov 
26900139aa7bSJoonsoo Kim 	/* Prevent deferred_split_scan() touching ->_refcount */
2691baa355fdSKirill A. Shutemov 	spin_lock(&pgdata->split_queue_lock);
2692e9b61f19SKirill A. Shutemov 	count = page_count(head);
2693e9b61f19SKirill A. Shutemov 	mapcount = total_mapcount(head);
2694baa355fdSKirill A. Shutemov 	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
26959a982250SKirill A. Shutemov 		if (!list_empty(page_deferred_list(head))) {
2696a3d0a918SKirill A. Shutemov 			pgdata->split_queue_len--;
26979a982250SKirill A. Shutemov 			list_del(page_deferred_list(head));
26989a982250SKirill A. Shutemov 		}
269965c45377SKirill A. Shutemov 		if (mapping)
270011fb9989SMel Gorman 			__dec_node_page_state(page, NR_SHMEM_THPS);
2701baa355fdSKirill A. Shutemov 		spin_unlock(&pgdata->split_queue_lock);
2702006d3ff2SHugh Dickins 		__split_huge_page(page, list, end, flags);
270359807685SHuang Ying 		if (PageSwapCache(head)) {
270459807685SHuang Ying 			swp_entry_t entry = { .val = page_private(head) };
270559807685SHuang Ying 
270659807685SHuang Ying 			ret = split_swap_cluster(entry);
270759807685SHuang Ying 		} else
2708e9b61f19SKirill A. Shutemov 			ret = 0;
2709baa355fdSKirill A. Shutemov 	} else {
2710baa355fdSKirill A. Shutemov 		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2711e9b61f19SKirill A. Shutemov 			pr_alert("total_mapcount: %u, page_count(): %u\n",
2712e9b61f19SKirill A. Shutemov 					mapcount, count);
2713e9b61f19SKirill A. Shutemov 			if (PageTail(page))
2714e9b61f19SKirill A. Shutemov 				dump_page(head, NULL);
2715bd56086fSKirill A. Shutemov 			dump_page(page, "total_mapcount(head) > 0");
2716e9b61f19SKirill A. Shutemov 			BUG();
2717baa355fdSKirill A. Shutemov 		}
2718baa355fdSKirill A. Shutemov 		spin_unlock(&pgdata->split_queue_lock);
2719baa355fdSKirill A. Shutemov fail:		if (mapping)
2720b93b0163SMatthew Wilcox 			xa_unlock(&mapping->i_pages);
2721a52633d8SMel Gorman 		spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2722906f9cdfSHugh Dickins 		remap_page(head);
2723e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2724e9b61f19SKirill A. Shutemov 	}
2725e9b61f19SKirill A. Shutemov 
2726e9b61f19SKirill A. Shutemov out_unlock:
2727baa355fdSKirill A. Shutemov 	if (anon_vma) {
2728e9b61f19SKirill A. Shutemov 		anon_vma_unlock_write(anon_vma);
2729e9b61f19SKirill A. Shutemov 		put_anon_vma(anon_vma);
2730baa355fdSKirill A. Shutemov 	}
2731baa355fdSKirill A. Shutemov 	if (mapping)
2732baa355fdSKirill A. Shutemov 		i_mmap_unlock_read(mapping);
2733e9b61f19SKirill A. Shutemov out:
2734e9b61f19SKirill A. Shutemov 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2735e9b61f19SKirill A. Shutemov 	return ret;
2736e9b61f19SKirill A. Shutemov }
27379a982250SKirill A. Shutemov 
27389a982250SKirill A. Shutemov void free_transhuge_page(struct page *page)
27399a982250SKirill A. Shutemov {
2740a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
27419a982250SKirill A. Shutemov 	unsigned long flags;
27429a982250SKirill A. Shutemov 
2743a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
27449a982250SKirill A. Shutemov 	if (!list_empty(page_deferred_list(page))) {
2745a3d0a918SKirill A. Shutemov 		pgdata->split_queue_len--;
27469a982250SKirill A. Shutemov 		list_del(page_deferred_list(page));
27479a982250SKirill A. Shutemov 	}
2748a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
27499a982250SKirill A. Shutemov 	free_compound_page(page);
27509a982250SKirill A. Shutemov }
27519a982250SKirill A. Shutemov 
27529a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page)
27539a982250SKirill A. Shutemov {
2754a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
27559a982250SKirill A. Shutemov 	unsigned long flags;
27569a982250SKirill A. Shutemov 
27579a982250SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
27589a982250SKirill A. Shutemov 
2759a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
27609a982250SKirill A. Shutemov 	if (list_empty(page_deferred_list(page))) {
2761f9719a03SKirill A. Shutemov 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2762a3d0a918SKirill A. Shutemov 		list_add_tail(page_deferred_list(page), &pgdata->split_queue);
2763a3d0a918SKirill A. Shutemov 		pgdata->split_queue_len++;
27649a982250SKirill A. Shutemov 	}
2765a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
27669a982250SKirill A. Shutemov }
27679a982250SKirill A. Shutemov 
27689a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink,
27699a982250SKirill A. Shutemov 		struct shrink_control *sc)
27709a982250SKirill A. Shutemov {
2771a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
27726aa7de05SMark Rutland 	return READ_ONCE(pgdata->split_queue_len);
27739a982250SKirill A. Shutemov }
27749a982250SKirill A. Shutemov 
27759a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink,
27769a982250SKirill A. Shutemov 		struct shrink_control *sc)
27779a982250SKirill A. Shutemov {
2778a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
27799a982250SKirill A. Shutemov 	unsigned long flags;
27809a982250SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
27819a982250SKirill A. Shutemov 	struct page *page;
27829a982250SKirill A. Shutemov 	int split = 0;
27839a982250SKirill A. Shutemov 
2784a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
27859a982250SKirill A. Shutemov 	/* Take pin on all head pages to avoid freeing them under us */
2786ae026204SKirill A. Shutemov 	list_for_each_safe(pos, next, &pgdata->split_queue) {
27879a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
27889a982250SKirill A. Shutemov 		page = compound_head(page);
2789e3ae1953SKirill A. Shutemov 		if (get_page_unless_zero(page)) {
2790e3ae1953SKirill A. Shutemov 			list_move(page_deferred_list(page), &list);
2791e3ae1953SKirill A. Shutemov 		} else {
2792e3ae1953SKirill A. Shutemov 			/* We lost race with put_compound_page() */
27939a982250SKirill A. Shutemov 			list_del_init(page_deferred_list(page));
2794a3d0a918SKirill A. Shutemov 			pgdata->split_queue_len--;
27959a982250SKirill A. Shutemov 		}
2796e3ae1953SKirill A. Shutemov 		if (!--sc->nr_to_scan)
2797e3ae1953SKirill A. Shutemov 			break;
27989a982250SKirill A. Shutemov 	}
2799a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
28009a982250SKirill A. Shutemov 
28019a982250SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
28029a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
2803fa41b900SKirill A. Shutemov 		if (!trylock_page(page))
2804fa41b900SKirill A. Shutemov 			goto next;
28059a982250SKirill A. Shutemov 		/* split_huge_page() removes page from list on success */
28069a982250SKirill A. Shutemov 		if (!split_huge_page(page))
28079a982250SKirill A. Shutemov 			split++;
28089a982250SKirill A. Shutemov 		unlock_page(page);
2809fa41b900SKirill A. Shutemov next:
28109a982250SKirill A. Shutemov 		put_page(page);
28119a982250SKirill A. Shutemov 	}
28129a982250SKirill A. Shutemov 
2813a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2814a3d0a918SKirill A. Shutemov 	list_splice_tail(&list, &pgdata->split_queue);
2815a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
28169a982250SKirill A. Shutemov 
2817cb8d68ecSKirill A. Shutemov 	/*
2818cb8d68ecSKirill A. Shutemov 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2819cb8d68ecSKirill A. Shutemov 	 * This can happen if pages were freed under us.
2820cb8d68ecSKirill A. Shutemov 	 */
2821cb8d68ecSKirill A. Shutemov 	if (!split && list_empty(&pgdata->split_queue))
2822cb8d68ecSKirill A. Shutemov 		return SHRINK_STOP;
2823cb8d68ecSKirill A. Shutemov 	return split;
28249a982250SKirill A. Shutemov }
28259a982250SKirill A. Shutemov 
28269a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = {
28279a982250SKirill A. Shutemov 	.count_objects = deferred_split_count,
28289a982250SKirill A. Shutemov 	.scan_objects = deferred_split_scan,
28299a982250SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
2830a3d0a918SKirill A. Shutemov 	.flags = SHRINKER_NUMA_AWARE,
28319a982250SKirill A. Shutemov };
283249071d43SKirill A. Shutemov 
283349071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS
283449071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val)
283549071d43SKirill A. Shutemov {
283649071d43SKirill A. Shutemov 	struct zone *zone;
283749071d43SKirill A. Shutemov 	struct page *page;
283849071d43SKirill A. Shutemov 	unsigned long pfn, max_zone_pfn;
283949071d43SKirill A. Shutemov 	unsigned long total = 0, split = 0;
284049071d43SKirill A. Shutemov 
284149071d43SKirill A. Shutemov 	if (val != 1)
284249071d43SKirill A. Shutemov 		return -EINVAL;
284349071d43SKirill A. Shutemov 
284449071d43SKirill A. Shutemov 	for_each_populated_zone(zone) {
284549071d43SKirill A. Shutemov 		max_zone_pfn = zone_end_pfn(zone);
284649071d43SKirill A. Shutemov 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
284749071d43SKirill A. Shutemov 			if (!pfn_valid(pfn))
284849071d43SKirill A. Shutemov 				continue;
284949071d43SKirill A. Shutemov 
285049071d43SKirill A. Shutemov 			page = pfn_to_page(pfn);
285149071d43SKirill A. Shutemov 			if (!get_page_unless_zero(page))
285249071d43SKirill A. Shutemov 				continue;
285349071d43SKirill A. Shutemov 
285449071d43SKirill A. Shutemov 			if (zone != page_zone(page))
285549071d43SKirill A. Shutemov 				goto next;
285649071d43SKirill A. Shutemov 
2857baa355fdSKirill A. Shutemov 			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
285849071d43SKirill A. Shutemov 				goto next;
285949071d43SKirill A. Shutemov 
286049071d43SKirill A. Shutemov 			total++;
286149071d43SKirill A. Shutemov 			lock_page(page);
286249071d43SKirill A. Shutemov 			if (!split_huge_page(page))
286349071d43SKirill A. Shutemov 				split++;
286449071d43SKirill A. Shutemov 			unlock_page(page);
286549071d43SKirill A. Shutemov next:
286649071d43SKirill A. Shutemov 			put_page(page);
286749071d43SKirill A. Shutemov 		}
286849071d43SKirill A. Shutemov 	}
286949071d43SKirill A. Shutemov 
2870145bdaa1SYang Shi 	pr_info("%lu of %lu THP split\n", split, total);
287149071d43SKirill A. Shutemov 
287249071d43SKirill A. Shutemov 	return 0;
287349071d43SKirill A. Shutemov }
287449071d43SKirill A. Shutemov DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
287549071d43SKirill A. Shutemov 		"%llu\n");
287649071d43SKirill A. Shutemov 
287749071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void)
287849071d43SKirill A. Shutemov {
287949071d43SKirill A. Shutemov 	void *ret;
288049071d43SKirill A. Shutemov 
2881145bdaa1SYang Shi 	ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
288249071d43SKirill A. Shutemov 			&split_huge_pages_fops);
288349071d43SKirill A. Shutemov 	if (!ret)
288449071d43SKirill A. Shutemov 		pr_warn("Failed to create split_huge_pages in debugfs");
288549071d43SKirill A. Shutemov 	return 0;
288649071d43SKirill A. Shutemov }
288749071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs);
288849071d43SKirill A. Shutemov #endif
2889616b8371SZi Yan 
2890616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2891616b8371SZi Yan void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
2892616b8371SZi Yan 		struct page *page)
2893616b8371SZi Yan {
2894616b8371SZi Yan 	struct vm_area_struct *vma = pvmw->vma;
2895616b8371SZi Yan 	struct mm_struct *mm = vma->vm_mm;
2896616b8371SZi Yan 	unsigned long address = pvmw->address;
2897616b8371SZi Yan 	pmd_t pmdval;
2898616b8371SZi Yan 	swp_entry_t entry;
2899ab6e3d09SNaoya Horiguchi 	pmd_t pmdswp;
2900616b8371SZi Yan 
2901616b8371SZi Yan 	if (!(pvmw->pmd && !pvmw->pte))
2902616b8371SZi Yan 		return;
2903616b8371SZi Yan 
2904616b8371SZi Yan 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
2905616b8371SZi Yan 	pmdval = *pvmw->pmd;
2906616b8371SZi Yan 	pmdp_invalidate(vma, address, pvmw->pmd);
2907616b8371SZi Yan 	if (pmd_dirty(pmdval))
2908616b8371SZi Yan 		set_page_dirty(page);
2909616b8371SZi Yan 	entry = make_migration_entry(page, pmd_write(pmdval));
2910ab6e3d09SNaoya Horiguchi 	pmdswp = swp_entry_to_pmd(entry);
2911ab6e3d09SNaoya Horiguchi 	if (pmd_soft_dirty(pmdval))
2912ab6e3d09SNaoya Horiguchi 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
2913ab6e3d09SNaoya Horiguchi 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
2914616b8371SZi Yan 	page_remove_rmap(page, true);
2915616b8371SZi Yan 	put_page(page);
2916616b8371SZi Yan }
2917616b8371SZi Yan 
2918616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2919616b8371SZi Yan {
2920616b8371SZi Yan 	struct vm_area_struct *vma = pvmw->vma;
2921616b8371SZi Yan 	struct mm_struct *mm = vma->vm_mm;
2922616b8371SZi Yan 	unsigned long address = pvmw->address;
2923616b8371SZi Yan 	unsigned long mmun_start = address & HPAGE_PMD_MASK;
2924616b8371SZi Yan 	pmd_t pmde;
2925616b8371SZi Yan 	swp_entry_t entry;
2926616b8371SZi Yan 
2927616b8371SZi Yan 	if (!(pvmw->pmd && !pvmw->pte))
2928616b8371SZi Yan 		return;
2929616b8371SZi Yan 
2930616b8371SZi Yan 	entry = pmd_to_swp_entry(*pvmw->pmd);
2931616b8371SZi Yan 	get_page(new);
2932616b8371SZi Yan 	pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
2933ab6e3d09SNaoya Horiguchi 	if (pmd_swp_soft_dirty(*pvmw->pmd))
2934ab6e3d09SNaoya Horiguchi 		pmde = pmd_mksoft_dirty(pmde);
2935616b8371SZi Yan 	if (is_write_migration_entry(entry))
2936f55e1014SLinus Torvalds 		pmde = maybe_pmd_mkwrite(pmde, vma);
2937616b8371SZi Yan 
2938616b8371SZi Yan 	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
2939e71769aeSNaoya Horiguchi 	if (PageAnon(new))
2940616b8371SZi Yan 		page_add_anon_rmap(new, vma, mmun_start, true);
2941e71769aeSNaoya Horiguchi 	else
2942e71769aeSNaoya Horiguchi 		page_add_file_rmap(new, true);
2943616b8371SZi Yan 	set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2944e125fe40SKirill A. Shutemov 	if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
2945616b8371SZi Yan 		mlock_vma_page(new);
2946616b8371SZi Yan 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
2947616b8371SZi Yan }
2948616b8371SZi Yan #endif
2949