xref: /linux/mm/huge_memory.c (revision 8a8683ad9ba48b4b52a57f013513d1635c1ca5c4)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
271e3aac0SAndrea Arcangeli /*
371e3aac0SAndrea Arcangeli  *  Copyright (C) 2009  Red Hat, Inc.
471e3aac0SAndrea Arcangeli  */
571e3aac0SAndrea Arcangeli 
6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7ae3a8c1cSAndrew Morton 
871e3aac0SAndrea Arcangeli #include <linux/mm.h>
971e3aac0SAndrea Arcangeli #include <linux/sched.h>
10f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
116a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
1271e3aac0SAndrea Arcangeli #include <linux/highmem.h>
1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h>
1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h>
1571e3aac0SAndrea Arcangeli #include <linux/rmap.h>
1671e3aac0SAndrea Arcangeli #include <linux/swap.h>
1797ae1749SKirill A. Shutemov #include <linux/shrinker.h>
18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h>
19e9b61f19SKirill A. Shutemov #include <linux/swapops.h>
204897c765SMatthew Wilcox #include <linux/dax.h>
21ba76149fSAndrea Arcangeli #include <linux/khugepaged.h>
22878aee7dSAndrea Arcangeli #include <linux/freezer.h>
23f25748e3SDan Williams #include <linux/pfn_t.h>
24a664b2d8SAndrea Arcangeli #include <linux/mman.h>
253565fce3SDan Williams #include <linux/memremap.h>
26325adeb5SRalf Baechle #include <linux/pagemap.h>
2749071d43SKirill A. Shutemov #include <linux/debugfs.h>
284daae3b4SMel Gorman #include <linux/migrate.h>
2943b5fbbdSSasha Levin #include <linux/hashtable.h>
306b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h>
3133c3fc71SVladimir Davydov #include <linux/page_idle.h>
32baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h>
336b31d595SMichal Hocko #include <linux/oom.h>
3498fa15f3SAnshuman Khandual #include <linux/numa.h>
35f7da677bSVlastimil Babka #include <linux/page_owner.h>
3697ae1749SKirill A. Shutemov 
3771e3aac0SAndrea Arcangeli #include <asm/tlb.h>
3871e3aac0SAndrea Arcangeli #include <asm/pgalloc.h>
3971e3aac0SAndrea Arcangeli #include "internal.h"
4071e3aac0SAndrea Arcangeli 
41ba76149fSAndrea Arcangeli /*
42b14d595aSMichael DeGuzis  * By default, transparent hugepage support is disabled in order to avoid
43b14d595aSMichael DeGuzis  * risking an increased memory footprint for applications that are not
44b14d595aSMichael DeGuzis  * guaranteed to benefit from it. When transparent hugepage support is
45b14d595aSMichael DeGuzis  * enabled, it is for all mappings, and khugepaged scans all mappings.
468bfa3f9aSJianguo Wu  * Defrag is invoked by khugepaged hugepage allocations and by page faults
478bfa3f9aSJianguo Wu  * for all hugepage allocations.
48ba76149fSAndrea Arcangeli  */
4971e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly =
5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
51ba76149fSAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
5213ece886SAndrea Arcangeli #endif
5313ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
5413ece886SAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
5513ece886SAndrea Arcangeli #endif
56444eb2a4SMel Gorman 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
5779da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
5879da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
59ba76149fSAndrea Arcangeli 
609a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker;
61f000565aSAndrea Arcangeli 
6297ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount;
6356873f43SWang, Yalin struct page *huge_zero_page __read_mostly;
644a6c1297SKirill A. Shutemov 
657635d9cbSMichal Hocko bool transparent_hugepage_enabled(struct vm_area_struct *vma)
667635d9cbSMichal Hocko {
67c0630669SYang Shi 	/* The addr is used to check if the vma size fits */
68c0630669SYang Shi 	unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
69c0630669SYang Shi 
70c0630669SYang Shi 	if (!transhuge_vma_suitable(vma, addr))
71c0630669SYang Shi 		return false;
727635d9cbSMichal Hocko 	if (vma_is_anonymous(vma))
737635d9cbSMichal Hocko 		return __transparent_hugepage_enabled(vma);
74c0630669SYang Shi 	if (vma_is_shmem(vma))
75c0630669SYang Shi 		return shmem_huge_enabled(vma);
767635d9cbSMichal Hocko 
777635d9cbSMichal Hocko 	return false;
787635d9cbSMichal Hocko }
797635d9cbSMichal Hocko 
806fcb52a5SAaron Lu static struct page *get_huge_zero_page(void)
8197ae1749SKirill A. Shutemov {
8297ae1749SKirill A. Shutemov 	struct page *zero_page;
8397ae1749SKirill A. Shutemov retry:
8497ae1749SKirill A. Shutemov 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
854db0c3c2SJason Low 		return READ_ONCE(huge_zero_page);
8697ae1749SKirill A. Shutemov 
8797ae1749SKirill A. Shutemov 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
8897ae1749SKirill A. Shutemov 			HPAGE_PMD_ORDER);
89d8a8e1f0SKirill A. Shutemov 	if (!zero_page) {
90d8a8e1f0SKirill A. Shutemov 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
915918d10aSKirill A. Shutemov 		return NULL;
92d8a8e1f0SKirill A. Shutemov 	}
93d8a8e1f0SKirill A. Shutemov 	count_vm_event(THP_ZERO_PAGE_ALLOC);
9497ae1749SKirill A. Shutemov 	preempt_disable();
955918d10aSKirill A. Shutemov 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
9697ae1749SKirill A. Shutemov 		preempt_enable();
975ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
9897ae1749SKirill A. Shutemov 		goto retry;
9997ae1749SKirill A. Shutemov 	}
10097ae1749SKirill A. Shutemov 
10197ae1749SKirill A. Shutemov 	/* We take additional reference here. It will be put back by shrinker */
10297ae1749SKirill A. Shutemov 	atomic_set(&huge_zero_refcount, 2);
10397ae1749SKirill A. Shutemov 	preempt_enable();
1044db0c3c2SJason Low 	return READ_ONCE(huge_zero_page);
10597ae1749SKirill A. Shutemov }
10697ae1749SKirill A. Shutemov 
1076fcb52a5SAaron Lu static void put_huge_zero_page(void)
10897ae1749SKirill A. Shutemov {
10997ae1749SKirill A. Shutemov 	/*
11097ae1749SKirill A. Shutemov 	 * Counter should never go to zero here. Only shrinker can put
11197ae1749SKirill A. Shutemov 	 * last reference.
11297ae1749SKirill A. Shutemov 	 */
11397ae1749SKirill A. Shutemov 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
11497ae1749SKirill A. Shutemov }
11597ae1749SKirill A. Shutemov 
1166fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm)
1176fcb52a5SAaron Lu {
1186fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1196fcb52a5SAaron Lu 		return READ_ONCE(huge_zero_page);
1206fcb52a5SAaron Lu 
1216fcb52a5SAaron Lu 	if (!get_huge_zero_page())
1226fcb52a5SAaron Lu 		return NULL;
1236fcb52a5SAaron Lu 
1246fcb52a5SAaron Lu 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1256fcb52a5SAaron Lu 		put_huge_zero_page();
1266fcb52a5SAaron Lu 
1276fcb52a5SAaron Lu 	return READ_ONCE(huge_zero_page);
1286fcb52a5SAaron Lu }
1296fcb52a5SAaron Lu 
1306fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm)
1316fcb52a5SAaron Lu {
1326fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1336fcb52a5SAaron Lu 		put_huge_zero_page();
1346fcb52a5SAaron Lu }
1356fcb52a5SAaron Lu 
13648896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
13797ae1749SKirill A. Shutemov 					struct shrink_control *sc)
13897ae1749SKirill A. Shutemov {
13997ae1749SKirill A. Shutemov 	/* we can free zero page only if last reference remains */
14097ae1749SKirill A. Shutemov 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
14148896466SGlauber Costa }
14297ae1749SKirill A. Shutemov 
14348896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
14448896466SGlauber Costa 				       struct shrink_control *sc)
14548896466SGlauber Costa {
14697ae1749SKirill A. Shutemov 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
1475918d10aSKirill A. Shutemov 		struct page *zero_page = xchg(&huge_zero_page, NULL);
1485918d10aSKirill A. Shutemov 		BUG_ON(zero_page == NULL);
1495ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
15048896466SGlauber Costa 		return HPAGE_PMD_NR;
15197ae1749SKirill A. Shutemov 	}
15297ae1749SKirill A. Shutemov 
15397ae1749SKirill A. Shutemov 	return 0;
15497ae1749SKirill A. Shutemov }
15597ae1749SKirill A. Shutemov 
15697ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = {
15748896466SGlauber Costa 	.count_objects = shrink_huge_zero_page_count,
15848896466SGlauber Costa 	.scan_objects = shrink_huge_zero_page_scan,
15997ae1749SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
16097ae1749SKirill A. Shutemov };
16197ae1749SKirill A. Shutemov 
16271e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS
16371e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj,
16471e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr, char *buf)
16571e3aac0SAndrea Arcangeli {
166444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
167444eb2a4SMel Gorman 		return sprintf(buf, "[always] madvise never\n");
168444eb2a4SMel Gorman 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
169444eb2a4SMel Gorman 		return sprintf(buf, "always [madvise] never\n");
170444eb2a4SMel Gorman 	else
171444eb2a4SMel Gorman 		return sprintf(buf, "always madvise [never]\n");
17271e3aac0SAndrea Arcangeli }
173444eb2a4SMel Gorman 
17471e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj,
17571e3aac0SAndrea Arcangeli 			     struct kobj_attribute *attr,
17671e3aac0SAndrea Arcangeli 			     const char *buf, size_t count)
17771e3aac0SAndrea Arcangeli {
17821440d7eSDavid Rientjes 	ssize_t ret = count;
179ba76149fSAndrea Arcangeli 
180f42f2552SDavid Rientjes 	if (sysfs_streq(buf, "always")) {
18121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
18221440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
183f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "madvise")) {
18421440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
18521440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
186f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "never")) {
18721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
18821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
18921440d7eSDavid Rientjes 	} else
19021440d7eSDavid Rientjes 		ret = -EINVAL;
191ba76149fSAndrea Arcangeli 
192ba76149fSAndrea Arcangeli 	if (ret > 0) {
193b46e756fSKirill A. Shutemov 		int err = start_stop_khugepaged();
194ba76149fSAndrea Arcangeli 		if (err)
195ba76149fSAndrea Arcangeli 			ret = err;
196ba76149fSAndrea Arcangeli 	}
197ba76149fSAndrea Arcangeli 	return ret;
19871e3aac0SAndrea Arcangeli }
19971e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr =
20071e3aac0SAndrea Arcangeli 	__ATTR(enabled, 0644, enabled_show, enabled_store);
20171e3aac0SAndrea Arcangeli 
202b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj,
20371e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf,
20471e3aac0SAndrea Arcangeli 				enum transparent_hugepage_flag flag)
20571e3aac0SAndrea Arcangeli {
206e27e6151SBen Hutchings 	return sprintf(buf, "%d\n",
207e27e6151SBen Hutchings 		       !!test_bit(flag, &transparent_hugepage_flags));
20871e3aac0SAndrea Arcangeli }
209e27e6151SBen Hutchings 
210b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj,
21171e3aac0SAndrea Arcangeli 				 struct kobj_attribute *attr,
21271e3aac0SAndrea Arcangeli 				 const char *buf, size_t count,
21371e3aac0SAndrea Arcangeli 				 enum transparent_hugepage_flag flag)
21471e3aac0SAndrea Arcangeli {
215e27e6151SBen Hutchings 	unsigned long value;
216e27e6151SBen Hutchings 	int ret;
217e27e6151SBen Hutchings 
218e27e6151SBen Hutchings 	ret = kstrtoul(buf, 10, &value);
219e27e6151SBen Hutchings 	if (ret < 0)
220e27e6151SBen Hutchings 		return ret;
221e27e6151SBen Hutchings 	if (value > 1)
22271e3aac0SAndrea Arcangeli 		return -EINVAL;
22371e3aac0SAndrea Arcangeli 
224e27e6151SBen Hutchings 	if (value)
225e27e6151SBen Hutchings 		set_bit(flag, &transparent_hugepage_flags);
226e27e6151SBen Hutchings 	else
227e27e6151SBen Hutchings 		clear_bit(flag, &transparent_hugepage_flags);
228e27e6151SBen Hutchings 
22971e3aac0SAndrea Arcangeli 	return count;
23071e3aac0SAndrea Arcangeli }
23171e3aac0SAndrea Arcangeli 
23271e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj,
23371e3aac0SAndrea Arcangeli 			   struct kobj_attribute *attr, char *buf)
23471e3aac0SAndrea Arcangeli {
235444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
23621440d7eSDavid Rientjes 		return sprintf(buf, "[always] defer defer+madvise madvise never\n");
237444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
23821440d7eSDavid Rientjes 		return sprintf(buf, "always [defer] defer+madvise madvise never\n");
23921440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
24021440d7eSDavid Rientjes 		return sprintf(buf, "always defer [defer+madvise] madvise never\n");
24121440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
24221440d7eSDavid Rientjes 		return sprintf(buf, "always defer defer+madvise [madvise] never\n");
24321440d7eSDavid Rientjes 	return sprintf(buf, "always defer defer+madvise madvise [never]\n");
24471e3aac0SAndrea Arcangeli }
24521440d7eSDavid Rientjes 
24671e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj,
24771e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr,
24871e3aac0SAndrea Arcangeli 			    const char *buf, size_t count)
24971e3aac0SAndrea Arcangeli {
250f42f2552SDavid Rientjes 	if (sysfs_streq(buf, "always")) {
25121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
25221440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
25321440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
25421440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
255f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "defer+madvise")) {
25621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
25721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
25821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
25921440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
260f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "defer")) {
2614fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2624fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2634fad7fb6SDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2644fad7fb6SDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
265f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "madvise")) {
26621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
26721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
26821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
26921440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
270f42f2552SDavid Rientjes 	} else if (sysfs_streq(buf, "never")) {
27121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
27221440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
27321440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
27421440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
27521440d7eSDavid Rientjes 	} else
27621440d7eSDavid Rientjes 		return -EINVAL;
27721440d7eSDavid Rientjes 
27821440d7eSDavid Rientjes 	return count;
27971e3aac0SAndrea Arcangeli }
28071e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr =
28171e3aac0SAndrea Arcangeli 	__ATTR(defrag, 0644, defrag_show, defrag_store);
28271e3aac0SAndrea Arcangeli 
28379da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj,
28479da5407SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
28579da5407SKirill A. Shutemov {
286b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
28779da5407SKirill A. Shutemov 				TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
28879da5407SKirill A. Shutemov }
28979da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj,
29079da5407SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
29179da5407SKirill A. Shutemov {
292b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
29379da5407SKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
29479da5407SKirill A. Shutemov }
29579da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr =
29679da5407SKirill A. Shutemov 	__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
29749920d28SHugh Dickins 
29849920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj,
29949920d28SHugh Dickins 		struct kobj_attribute *attr, char *buf)
30049920d28SHugh Dickins {
30149920d28SHugh Dickins 	return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
30249920d28SHugh Dickins }
30349920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr =
30449920d28SHugh Dickins 	__ATTR_RO(hpage_pmd_size);
30549920d28SHugh Dickins 
30671e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
30771e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj,
30871e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf)
30971e3aac0SAndrea Arcangeli {
310b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
31171e3aac0SAndrea Arcangeli 				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
31271e3aac0SAndrea Arcangeli }
31371e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj,
31471e3aac0SAndrea Arcangeli 			       struct kobj_attribute *attr,
31571e3aac0SAndrea Arcangeli 			       const char *buf, size_t count)
31671e3aac0SAndrea Arcangeli {
317b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
31871e3aac0SAndrea Arcangeli 				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
31971e3aac0SAndrea Arcangeli }
32071e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr =
32171e3aac0SAndrea Arcangeli 	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
32271e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */
32371e3aac0SAndrea Arcangeli 
32471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = {
32571e3aac0SAndrea Arcangeli 	&enabled_attr.attr,
32671e3aac0SAndrea Arcangeli 	&defrag_attr.attr,
32779da5407SKirill A. Shutemov 	&use_zero_page_attr.attr,
32849920d28SHugh Dickins 	&hpage_pmd_size_attr.attr,
329e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
3305a6e75f8SKirill A. Shutemov 	&shmem_enabled_attr.attr,
3315a6e75f8SKirill A. Shutemov #endif
33271e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
33371e3aac0SAndrea Arcangeli 	&debug_cow_attr.attr,
33471e3aac0SAndrea Arcangeli #endif
33571e3aac0SAndrea Arcangeli 	NULL,
33671e3aac0SAndrea Arcangeli };
33771e3aac0SAndrea Arcangeli 
3388aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = {
33971e3aac0SAndrea Arcangeli 	.attrs = hugepage_attr,
340ba76149fSAndrea Arcangeli };
341ba76149fSAndrea Arcangeli 
342569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
343569e5590SShaohua Li {
344569e5590SShaohua Li 	int err;
345569e5590SShaohua Li 
346569e5590SShaohua Li 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
347569e5590SShaohua Li 	if (unlikely(!*hugepage_kobj)) {
348ae3a8c1cSAndrew Morton 		pr_err("failed to create transparent hugepage kobject\n");
349569e5590SShaohua Li 		return -ENOMEM;
350569e5590SShaohua Li 	}
351569e5590SShaohua Li 
352569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
353569e5590SShaohua Li 	if (err) {
354ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
355569e5590SShaohua Li 		goto delete_obj;
356569e5590SShaohua Li 	}
357569e5590SShaohua Li 
358569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
359569e5590SShaohua Li 	if (err) {
360ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
361569e5590SShaohua Li 		goto remove_hp_group;
362569e5590SShaohua Li 	}
363569e5590SShaohua Li 
364569e5590SShaohua Li 	return 0;
365569e5590SShaohua Li 
366569e5590SShaohua Li remove_hp_group:
367569e5590SShaohua Li 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
368569e5590SShaohua Li delete_obj:
369569e5590SShaohua Li 	kobject_put(*hugepage_kobj);
370569e5590SShaohua Li 	return err;
371569e5590SShaohua Li }
372569e5590SShaohua Li 
373569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
374569e5590SShaohua Li {
375569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
376569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
377569e5590SShaohua Li 	kobject_put(hugepage_kobj);
378569e5590SShaohua Li }
379569e5590SShaohua Li #else
380569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
381569e5590SShaohua Li {
382569e5590SShaohua Li 	return 0;
383569e5590SShaohua Li }
384569e5590SShaohua Li 
385569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
386569e5590SShaohua Li {
387569e5590SShaohua Li }
38871e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */
38971e3aac0SAndrea Arcangeli 
39071e3aac0SAndrea Arcangeli static int __init hugepage_init(void)
39171e3aac0SAndrea Arcangeli {
39271e3aac0SAndrea Arcangeli 	int err;
393569e5590SShaohua Li 	struct kobject *hugepage_kobj;
39471e3aac0SAndrea Arcangeli 
3954b7167b9SAndrea Arcangeli 	if (!has_transparent_hugepage()) {
3964b7167b9SAndrea Arcangeli 		transparent_hugepage_flags = 0;
397569e5590SShaohua Li 		return -EINVAL;
3984b7167b9SAndrea Arcangeli 	}
3994b7167b9SAndrea Arcangeli 
400ff20c2e0SKirill A. Shutemov 	/*
401ff20c2e0SKirill A. Shutemov 	 * hugepages can't be allocated by the buddy allocator
402ff20c2e0SKirill A. Shutemov 	 */
403ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
404ff20c2e0SKirill A. Shutemov 	/*
405ff20c2e0SKirill A. Shutemov 	 * we use page->mapping and page->index in second tail page
406ff20c2e0SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
407ff20c2e0SKirill A. Shutemov 	 */
408ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
409ff20c2e0SKirill A. Shutemov 
410569e5590SShaohua Li 	err = hugepage_init_sysfs(&hugepage_kobj);
411569e5590SShaohua Li 	if (err)
41265ebb64fSKirill A. Shutemov 		goto err_sysfs;
413ba76149fSAndrea Arcangeli 
414b46e756fSKirill A. Shutemov 	err = khugepaged_init();
415ba76149fSAndrea Arcangeli 	if (err)
41665ebb64fSKirill A. Shutemov 		goto err_slab;
417ba76149fSAndrea Arcangeli 
41865ebb64fSKirill A. Shutemov 	err = register_shrinker(&huge_zero_page_shrinker);
41965ebb64fSKirill A. Shutemov 	if (err)
42065ebb64fSKirill A. Shutemov 		goto err_hzp_shrinker;
4219a982250SKirill A. Shutemov 	err = register_shrinker(&deferred_split_shrinker);
4229a982250SKirill A. Shutemov 	if (err)
4239a982250SKirill A. Shutemov 		goto err_split_shrinker;
42497ae1749SKirill A. Shutemov 
42597562cd2SRik van Riel 	/*
42697562cd2SRik van Riel 	 * By default disable transparent hugepages on smaller systems,
42797562cd2SRik van Riel 	 * where the extra memory used could hurt more than TLB overhead
42897562cd2SRik van Riel 	 * is likely to save.  The admin can still enable it through /sys.
42997562cd2SRik van Riel 	 */
430ca79b0c2SArun KS 	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
43197562cd2SRik van Riel 		transparent_hugepage_flags = 0;
43279553da2SKirill A. Shutemov 		return 0;
43379553da2SKirill A. Shutemov 	}
43497562cd2SRik van Riel 
43579553da2SKirill A. Shutemov 	err = start_stop_khugepaged();
43665ebb64fSKirill A. Shutemov 	if (err)
43765ebb64fSKirill A. Shutemov 		goto err_khugepaged;
438ba76149fSAndrea Arcangeli 
439569e5590SShaohua Li 	return 0;
44065ebb64fSKirill A. Shutemov err_khugepaged:
4419a982250SKirill A. Shutemov 	unregister_shrinker(&deferred_split_shrinker);
4429a982250SKirill A. Shutemov err_split_shrinker:
44365ebb64fSKirill A. Shutemov 	unregister_shrinker(&huge_zero_page_shrinker);
44465ebb64fSKirill A. Shutemov err_hzp_shrinker:
445b46e756fSKirill A. Shutemov 	khugepaged_destroy();
44665ebb64fSKirill A. Shutemov err_slab:
447569e5590SShaohua Li 	hugepage_exit_sysfs(hugepage_kobj);
44865ebb64fSKirill A. Shutemov err_sysfs:
449ba76149fSAndrea Arcangeli 	return err;
45071e3aac0SAndrea Arcangeli }
451a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init);
45271e3aac0SAndrea Arcangeli 
45371e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str)
45471e3aac0SAndrea Arcangeli {
45571e3aac0SAndrea Arcangeli 	int ret = 0;
45671e3aac0SAndrea Arcangeli 	if (!str)
45771e3aac0SAndrea Arcangeli 		goto out;
45871e3aac0SAndrea Arcangeli 	if (!strcmp(str, "always")) {
45971e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
46071e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
46171e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46271e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46371e3aac0SAndrea Arcangeli 		ret = 1;
46471e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "madvise")) {
46571e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
46671e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46771e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46871e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
46971e3aac0SAndrea Arcangeli 		ret = 1;
47071e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "never")) {
47171e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
47271e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
47371e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
47471e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
47571e3aac0SAndrea Arcangeli 		ret = 1;
47671e3aac0SAndrea Arcangeli 	}
47771e3aac0SAndrea Arcangeli out:
47871e3aac0SAndrea Arcangeli 	if (!ret)
479ae3a8c1cSAndrew Morton 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
48071e3aac0SAndrea Arcangeli 	return ret;
48171e3aac0SAndrea Arcangeli }
48271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage);
48371e3aac0SAndrea Arcangeli 
484f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
48571e3aac0SAndrea Arcangeli {
486f55e1014SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
48771e3aac0SAndrea Arcangeli 		pmd = pmd_mkwrite(pmd);
48871e3aac0SAndrea Arcangeli 	return pmd;
48971e3aac0SAndrea Arcangeli }
49071e3aac0SAndrea Arcangeli 
49187eaceb3SYang Shi #ifdef CONFIG_MEMCG
49287eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page)
4939a982250SKirill A. Shutemov {
49487eaceb3SYang Shi 	struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
49587eaceb3SYang Shi 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
49687eaceb3SYang Shi 
49787eaceb3SYang Shi 	if (memcg)
49887eaceb3SYang Shi 		return &memcg->deferred_split_queue;
49987eaceb3SYang Shi 	else
50087eaceb3SYang Shi 		return &pgdat->deferred_split_queue;
5019a982250SKirill A. Shutemov }
50287eaceb3SYang Shi #else
50387eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page)
50487eaceb3SYang Shi {
50587eaceb3SYang Shi 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
50687eaceb3SYang Shi 
50787eaceb3SYang Shi 	return &pgdat->deferred_split_queue;
50887eaceb3SYang Shi }
50987eaceb3SYang Shi #endif
5109a982250SKirill A. Shutemov 
5119a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page)
5129a982250SKirill A. Shutemov {
5139a982250SKirill A. Shutemov 	/*
5149a982250SKirill A. Shutemov 	 * we use page->mapping and page->indexlru in second tail page
5159a982250SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
5169a982250SKirill A. Shutemov 	 */
5179a982250SKirill A. Shutemov 
5189a982250SKirill A. Shutemov 	INIT_LIST_HEAD(page_deferred_list(page));
5199a982250SKirill A. Shutemov 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
5209a982250SKirill A. Shutemov }
5219a982250SKirill A. Shutemov 
522005ba37cSSean Christopherson bool is_transparent_hugepage(struct page *page)
523005ba37cSSean Christopherson {
524005ba37cSSean Christopherson 	if (!PageCompound(page))
525005ba37cSSean Christopherson 		return 0;
526005ba37cSSean Christopherson 
527005ba37cSSean Christopherson 	page = compound_head(page);
528005ba37cSSean Christopherson 	return is_huge_zero_page(page) ||
529005ba37cSSean Christopherson 	       page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
530005ba37cSSean Christopherson }
531005ba37cSSean Christopherson EXPORT_SYMBOL_GPL(is_transparent_hugepage);
532005ba37cSSean Christopherson 
53397d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp,
53497d3d0f9SKirill A. Shutemov 		unsigned long addr, unsigned long len,
53574d2fad1SToshi Kani 		loff_t off, unsigned long flags, unsigned long size)
53674d2fad1SToshi Kani {
53774d2fad1SToshi Kani 	loff_t off_end = off + len;
53874d2fad1SToshi Kani 	loff_t off_align = round_up(off, size);
53997d3d0f9SKirill A. Shutemov 	unsigned long len_pad, ret;
54074d2fad1SToshi Kani 
54174d2fad1SToshi Kani 	if (off_end <= off_align || (off_end - off_align) < size)
54274d2fad1SToshi Kani 		return 0;
54374d2fad1SToshi Kani 
54474d2fad1SToshi Kani 	len_pad = len + size;
54574d2fad1SToshi Kani 	if (len_pad < len || (off + len_pad) < off)
54674d2fad1SToshi Kani 		return 0;
54774d2fad1SToshi Kani 
54897d3d0f9SKirill A. Shutemov 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
54974d2fad1SToshi Kani 					      off >> PAGE_SHIFT, flags);
55097d3d0f9SKirill A. Shutemov 
55197d3d0f9SKirill A. Shutemov 	/*
55297d3d0f9SKirill A. Shutemov 	 * The failure might be due to length padding. The caller will retry
55397d3d0f9SKirill A. Shutemov 	 * without the padding.
55497d3d0f9SKirill A. Shutemov 	 */
55597d3d0f9SKirill A. Shutemov 	if (IS_ERR_VALUE(ret))
55674d2fad1SToshi Kani 		return 0;
55774d2fad1SToshi Kani 
55897d3d0f9SKirill A. Shutemov 	/*
55997d3d0f9SKirill A. Shutemov 	 * Do not try to align to THP boundary if allocation at the address
56097d3d0f9SKirill A. Shutemov 	 * hint succeeds.
56197d3d0f9SKirill A. Shutemov 	 */
56297d3d0f9SKirill A. Shutemov 	if (ret == addr)
56374d2fad1SToshi Kani 		return addr;
56497d3d0f9SKirill A. Shutemov 
56597d3d0f9SKirill A. Shutemov 	ret += (off - ret) & (size - 1);
56697d3d0f9SKirill A. Shutemov 	return ret;
56774d2fad1SToshi Kani }
56874d2fad1SToshi Kani 
56974d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
57074d2fad1SToshi Kani 		unsigned long len, unsigned long pgoff, unsigned long flags)
57174d2fad1SToshi Kani {
57297d3d0f9SKirill A. Shutemov 	unsigned long ret;
57374d2fad1SToshi Kani 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
57474d2fad1SToshi Kani 
57574d2fad1SToshi Kani 	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
57674d2fad1SToshi Kani 		goto out;
57774d2fad1SToshi Kani 
57897d3d0f9SKirill A. Shutemov 	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
57997d3d0f9SKirill A. Shutemov 	if (ret)
58097d3d0f9SKirill A. Shutemov 		return ret;
58174d2fad1SToshi Kani out:
58274d2fad1SToshi Kani 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
58374d2fad1SToshi Kani }
58474d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
58574d2fad1SToshi Kani 
5862b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
5872b740303SSouptick Joarder 			struct page *page, gfp_t gfp)
58871e3aac0SAndrea Arcangeli {
58982b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
59000501b53SJohannes Weiner 	struct mem_cgroup *memcg;
59171e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
59282b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5932b740303SSouptick Joarder 	vm_fault_t ret = 0;
59471e3aac0SAndrea Arcangeli 
595309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page), page);
59600501b53SJohannes Weiner 
5972cf85583STejun Heo 	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
5986b251fc9SAndrea Arcangeli 		put_page(page);
5996b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_FALLBACK);
6006b251fc9SAndrea Arcangeli 		return VM_FAULT_FALLBACK;
6016b251fc9SAndrea Arcangeli 	}
60271e3aac0SAndrea Arcangeli 
6034cf58924SJoel Fernandes (Google) 	pgtable = pte_alloc_one(vma->vm_mm);
60400501b53SJohannes Weiner 	if (unlikely(!pgtable)) {
6056b31d595SMichal Hocko 		ret = VM_FAULT_OOM;
6066b31d595SMichal Hocko 		goto release;
60700501b53SJohannes Weiner 	}
60800501b53SJohannes Weiner 
609c79b57e4SHuang Ying 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
61052f37629SMinchan Kim 	/*
61152f37629SMinchan Kim 	 * The memory barrier inside __SetPageUptodate makes sure that
61252f37629SMinchan Kim 	 * clear_huge_page writes become visible before the set_pmd_at()
61352f37629SMinchan Kim 	 * write.
61452f37629SMinchan Kim 	 */
61571e3aac0SAndrea Arcangeli 	__SetPageUptodate(page);
61671e3aac0SAndrea Arcangeli 
61782b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
61882b0f8c3SJan Kara 	if (unlikely(!pmd_none(*vmf->pmd))) {
6196b31d595SMichal Hocko 		goto unlock_release;
62071e3aac0SAndrea Arcangeli 	} else {
62171e3aac0SAndrea Arcangeli 		pmd_t entry;
6226b251fc9SAndrea Arcangeli 
6236b31d595SMichal Hocko 		ret = check_stable_address_space(vma->vm_mm);
6246b31d595SMichal Hocko 		if (ret)
6256b31d595SMichal Hocko 			goto unlock_release;
6266b31d595SMichal Hocko 
6276b251fc9SAndrea Arcangeli 		/* Deliver the page fault to userland */
6286b251fc9SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
6292b740303SSouptick Joarder 			vm_fault_t ret2;
6306b251fc9SAndrea Arcangeli 
63182b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
632f627c2f5SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg, true);
6336b251fc9SAndrea Arcangeli 			put_page(page);
634bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
6352b740303SSouptick Joarder 			ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
6362b740303SSouptick Joarder 			VM_BUG_ON(ret2 & VM_FAULT_FALLBACK);
6372b740303SSouptick Joarder 			return ret2;
6386b251fc9SAndrea Arcangeli 		}
6396b251fc9SAndrea Arcangeli 
6403122359aSKirill A. Shutemov 		entry = mk_huge_pmd(page, vma->vm_page_prot);
641f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
642d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(page, vma, haddr, true);
643f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, false, true);
64400501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(page, vma);
64582b0f8c3SJan Kara 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
64682b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
647bae473a4SKirill A. Shutemov 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
648c4812909SKirill A. Shutemov 		mm_inc_nr_ptes(vma->vm_mm);
64982b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
6506b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_ALLOC);
6511ff9e6e1SChris Down 		count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
65271e3aac0SAndrea Arcangeli 	}
65371e3aac0SAndrea Arcangeli 
654aa2e878eSDavid Rientjes 	return 0;
6556b31d595SMichal Hocko unlock_release:
6566b31d595SMichal Hocko 	spin_unlock(vmf->ptl);
6576b31d595SMichal Hocko release:
6586b31d595SMichal Hocko 	if (pgtable)
6596b31d595SMichal Hocko 		pte_free(vma->vm_mm, pgtable);
6606b31d595SMichal Hocko 	mem_cgroup_cancel_charge(page, memcg, true);
6616b31d595SMichal Hocko 	put_page(page);
6626b31d595SMichal Hocko 	return ret;
6636b31d595SMichal Hocko 
66471e3aac0SAndrea Arcangeli }
66571e3aac0SAndrea Arcangeli 
666444eb2a4SMel Gorman /*
66721440d7eSDavid Rientjes  * always: directly stall for all thp allocations
66821440d7eSDavid Rientjes  * defer: wake kswapd and fail if not immediately available
66921440d7eSDavid Rientjes  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
67021440d7eSDavid Rientjes  *		  fail if not immediately available
67121440d7eSDavid Rientjes  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
67221440d7eSDavid Rientjes  *	    available
67321440d7eSDavid Rientjes  * never: never stall for any thp allocation
674444eb2a4SMel Gorman  */
67519deb769SDavid Rientjes static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
6760bbbc0b3SAndrea Arcangeli {
67721440d7eSDavid Rientjes 	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
67889c83fb5SMichal Hocko 
679ac79f78dSDavid Rientjes 	/* Always do synchronous compaction */
68021440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
681a8282608SAndrea Arcangeli 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
682ac79f78dSDavid Rientjes 
683ac79f78dSDavid Rientjes 	/* Kick kcompactd and fail quickly */
68421440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
68519deb769SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
686ac79f78dSDavid Rientjes 
687ac79f78dSDavid Rientjes 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
68821440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
68919deb769SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT |
69019deb769SDavid Rientjes 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
691ac79f78dSDavid Rientjes 					__GFP_KSWAPD_RECLAIM);
692ac79f78dSDavid Rientjes 
693ac79f78dSDavid Rientjes 	/* Only do synchronous compaction if madvised */
69421440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
69519deb769SDavid Rientjes 		return GFP_TRANSHUGE_LIGHT |
69619deb769SDavid Rientjes 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
697ac79f78dSDavid Rientjes 
69819deb769SDavid Rientjes 	return GFP_TRANSHUGE_LIGHT;
699444eb2a4SMel Gorman }
700444eb2a4SMel Gorman 
701c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */
702d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
70397ae1749SKirill A. Shutemov 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
7045918d10aSKirill A. Shutemov 		struct page *zero_page)
705fc9fe822SKirill A. Shutemov {
706fc9fe822SKirill A. Shutemov 	pmd_t entry;
7077c414164SAndrew Morton 	if (!pmd_none(*pmd))
7087c414164SAndrew Morton 		return false;
7095918d10aSKirill A. Shutemov 	entry = mk_pmd(zero_page, vma->vm_page_prot);
710fc9fe822SKirill A. Shutemov 	entry = pmd_mkhuge(entry);
71112c9d70bSMatthew Wilcox 	if (pgtable)
7126b0b50b0SAneesh Kumar K.V 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
713fc9fe822SKirill A. Shutemov 	set_pmd_at(mm, haddr, pmd, entry);
714c4812909SKirill A. Shutemov 	mm_inc_nr_ptes(mm);
7157c414164SAndrew Morton 	return true;
716fc9fe822SKirill A. Shutemov }
717fc9fe822SKirill A. Shutemov 
7182b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
71971e3aac0SAndrea Arcangeli {
72082b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
721077fcf11SAneesh Kumar K.V 	gfp_t gfp;
72271e3aac0SAndrea Arcangeli 	struct page *page;
72382b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
72471e3aac0SAndrea Arcangeli 
72543675e6fSYang Shi 	if (!transhuge_vma_suitable(vma, haddr))
726c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
72771e3aac0SAndrea Arcangeli 	if (unlikely(anon_vma_prepare(vma)))
72871e3aac0SAndrea Arcangeli 		return VM_FAULT_OOM;
7296d50e60cSDavid Rientjes 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
730ba76149fSAndrea Arcangeli 		return VM_FAULT_OOM;
73182b0f8c3SJan Kara 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
732bae473a4SKirill A. Shutemov 			!mm_forbids_zeropage(vma->vm_mm) &&
73379da5407SKirill A. Shutemov 			transparent_hugepage_use_zero_page()) {
73480371957SKirill A. Shutemov 		pgtable_t pgtable;
7355918d10aSKirill A. Shutemov 		struct page *zero_page;
7363ea41e62SKirill A. Shutemov 		bool set;
7372b740303SSouptick Joarder 		vm_fault_t ret;
7384cf58924SJoel Fernandes (Google) 		pgtable = pte_alloc_one(vma->vm_mm);
73980371957SKirill A. Shutemov 		if (unlikely(!pgtable))
74080371957SKirill A. Shutemov 			return VM_FAULT_OOM;
7416fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
7425918d10aSKirill A. Shutemov 		if (unlikely(!zero_page)) {
743bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
74497ae1749SKirill A. Shutemov 			count_vm_event(THP_FAULT_FALLBACK);
745c0292554SKirill A. Shutemov 			return VM_FAULT_FALLBACK;
74697ae1749SKirill A. Shutemov 		}
74782b0f8c3SJan Kara 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
7486b251fc9SAndrea Arcangeli 		ret = 0;
7496b251fc9SAndrea Arcangeli 		set = false;
75082b0f8c3SJan Kara 		if (pmd_none(*vmf->pmd)) {
7516b31d595SMichal Hocko 			ret = check_stable_address_space(vma->vm_mm);
7526b31d595SMichal Hocko 			if (ret) {
7536b31d595SMichal Hocko 				spin_unlock(vmf->ptl);
7546b31d595SMichal Hocko 			} else if (userfaultfd_missing(vma)) {
75582b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
75682b0f8c3SJan Kara 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
7576b251fc9SAndrea Arcangeli 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
7586b251fc9SAndrea Arcangeli 			} else {
759bae473a4SKirill A. Shutemov 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
76082b0f8c3SJan Kara 						   haddr, vmf->pmd, zero_page);
76182b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
7626b251fc9SAndrea Arcangeli 				set = true;
7636b251fc9SAndrea Arcangeli 			}
7646b251fc9SAndrea Arcangeli 		} else
76582b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
7666fcb52a5SAaron Lu 		if (!set)
767bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
7686b251fc9SAndrea Arcangeli 		return ret;
76980371957SKirill A. Shutemov 	}
77019deb769SDavid Rientjes 	gfp = alloc_hugepage_direct_gfpmask(vma);
77119deb769SDavid Rientjes 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
77281ab4201SAndi Kleen 	if (unlikely(!page)) {
77381ab4201SAndi Kleen 		count_vm_event(THP_FAULT_FALLBACK);
774c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
77581ab4201SAndi Kleen 	}
7769a982250SKirill A. Shutemov 	prep_transhuge_page(page);
77782b0f8c3SJan Kara 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
77871e3aac0SAndrea Arcangeli }
77971e3aac0SAndrea Arcangeli 
780ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
7813b6521f5SOliver O'Halloran 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
7823b6521f5SOliver O'Halloran 		pgtable_t pgtable)
7835cad465dSMatthew Wilcox {
7845cad465dSMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
7855cad465dSMatthew Wilcox 	pmd_t entry;
7865cad465dSMatthew Wilcox 	spinlock_t *ptl;
7875cad465dSMatthew Wilcox 
7885cad465dSMatthew Wilcox 	ptl = pmd_lock(mm, pmd);
789c6f3c5eeSAneesh Kumar K.V 	if (!pmd_none(*pmd)) {
790c6f3c5eeSAneesh Kumar K.V 		if (write) {
791c6f3c5eeSAneesh Kumar K.V 			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
792c6f3c5eeSAneesh Kumar K.V 				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
793c6f3c5eeSAneesh Kumar K.V 				goto out_unlock;
794c6f3c5eeSAneesh Kumar K.V 			}
795c6f3c5eeSAneesh Kumar K.V 			entry = pmd_mkyoung(*pmd);
796c6f3c5eeSAneesh Kumar K.V 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
797c6f3c5eeSAneesh Kumar K.V 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
798c6f3c5eeSAneesh Kumar K.V 				update_mmu_cache_pmd(vma, addr, pmd);
799c6f3c5eeSAneesh Kumar K.V 		}
800c6f3c5eeSAneesh Kumar K.V 
801c6f3c5eeSAneesh Kumar K.V 		goto out_unlock;
802c6f3c5eeSAneesh Kumar K.V 	}
803c6f3c5eeSAneesh Kumar K.V 
804f25748e3SDan Williams 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
805f25748e3SDan Williams 	if (pfn_t_devmap(pfn))
806f25748e3SDan Williams 		entry = pmd_mkdevmap(entry);
8075cad465dSMatthew Wilcox 	if (write) {
808f55e1014SLinus Torvalds 		entry = pmd_mkyoung(pmd_mkdirty(entry));
809f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(entry, vma);
8105cad465dSMatthew Wilcox 	}
8113b6521f5SOliver O'Halloran 
8123b6521f5SOliver O'Halloran 	if (pgtable) {
8133b6521f5SOliver O'Halloran 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
814c4812909SKirill A. Shutemov 		mm_inc_nr_ptes(mm);
815c6f3c5eeSAneesh Kumar K.V 		pgtable = NULL;
8163b6521f5SOliver O'Halloran 	}
8173b6521f5SOliver O'Halloran 
8185cad465dSMatthew Wilcox 	set_pmd_at(mm, addr, pmd, entry);
8195cad465dSMatthew Wilcox 	update_mmu_cache_pmd(vma, addr, pmd);
820c6f3c5eeSAneesh Kumar K.V 
821c6f3c5eeSAneesh Kumar K.V out_unlock:
8225cad465dSMatthew Wilcox 	spin_unlock(ptl);
823c6f3c5eeSAneesh Kumar K.V 	if (pgtable)
824c6f3c5eeSAneesh Kumar K.V 		pte_free(mm, pgtable);
8255cad465dSMatthew Wilcox }
8265cad465dSMatthew Wilcox 
827fce86ff5SDan Williams vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
8285cad465dSMatthew Wilcox {
829fce86ff5SDan Williams 	unsigned long addr = vmf->address & PMD_MASK;
830fce86ff5SDan Williams 	struct vm_area_struct *vma = vmf->vma;
8315cad465dSMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
8323b6521f5SOliver O'Halloran 	pgtable_t pgtable = NULL;
833fce86ff5SDan Williams 
8345cad465dSMatthew Wilcox 	/*
8355cad465dSMatthew Wilcox 	 * If we had pmd_special, we could avoid all these restrictions,
8365cad465dSMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
8375cad465dSMatthew Wilcox 	 * can't support a 'special' bit.
8385cad465dSMatthew Wilcox 	 */
839e1fb4a08SDave Jiang 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
840e1fb4a08SDave Jiang 			!pfn_t_devmap(pfn));
8415cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
8425cad465dSMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
8435cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
8445cad465dSMatthew Wilcox 
8455cad465dSMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
8465cad465dSMatthew Wilcox 		return VM_FAULT_SIGBUS;
847308a047cSBorislav Petkov 
8483b6521f5SOliver O'Halloran 	if (arch_needs_pgtable_deposit()) {
8494cf58924SJoel Fernandes (Google) 		pgtable = pte_alloc_one(vma->vm_mm);
8503b6521f5SOliver O'Halloran 		if (!pgtable)
8513b6521f5SOliver O'Halloran 			return VM_FAULT_OOM;
8523b6521f5SOliver O'Halloran 	}
8533b6521f5SOliver O'Halloran 
854308a047cSBorislav Petkov 	track_pfn_insert(vma, &pgprot, pfn);
855308a047cSBorislav Petkov 
856fce86ff5SDan Williams 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
857ae18d6dcSMatthew Wilcox 	return VM_FAULT_NOPAGE;
8585cad465dSMatthew Wilcox }
859dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
8605cad465dSMatthew Wilcox 
861a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
862f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
863a00cc7d9SMatthew Wilcox {
864f55e1014SLinus Torvalds 	if (likely(vma->vm_flags & VM_WRITE))
865a00cc7d9SMatthew Wilcox 		pud = pud_mkwrite(pud);
866a00cc7d9SMatthew Wilcox 	return pud;
867a00cc7d9SMatthew Wilcox }
868a00cc7d9SMatthew Wilcox 
869a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
870a00cc7d9SMatthew Wilcox 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
871a00cc7d9SMatthew Wilcox {
872a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
873a00cc7d9SMatthew Wilcox 	pud_t entry;
874a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
875a00cc7d9SMatthew Wilcox 
876a00cc7d9SMatthew Wilcox 	ptl = pud_lock(mm, pud);
877c6f3c5eeSAneesh Kumar K.V 	if (!pud_none(*pud)) {
878c6f3c5eeSAneesh Kumar K.V 		if (write) {
879c6f3c5eeSAneesh Kumar K.V 			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
880c6f3c5eeSAneesh Kumar K.V 				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
881c6f3c5eeSAneesh Kumar K.V 				goto out_unlock;
882c6f3c5eeSAneesh Kumar K.V 			}
883c6f3c5eeSAneesh Kumar K.V 			entry = pud_mkyoung(*pud);
884c6f3c5eeSAneesh Kumar K.V 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
885c6f3c5eeSAneesh Kumar K.V 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
886c6f3c5eeSAneesh Kumar K.V 				update_mmu_cache_pud(vma, addr, pud);
887c6f3c5eeSAneesh Kumar K.V 		}
888c6f3c5eeSAneesh Kumar K.V 		goto out_unlock;
889c6f3c5eeSAneesh Kumar K.V 	}
890c6f3c5eeSAneesh Kumar K.V 
891a00cc7d9SMatthew Wilcox 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
892a00cc7d9SMatthew Wilcox 	if (pfn_t_devmap(pfn))
893a00cc7d9SMatthew Wilcox 		entry = pud_mkdevmap(entry);
894a00cc7d9SMatthew Wilcox 	if (write) {
895f55e1014SLinus Torvalds 		entry = pud_mkyoung(pud_mkdirty(entry));
896f55e1014SLinus Torvalds 		entry = maybe_pud_mkwrite(entry, vma);
897a00cc7d9SMatthew Wilcox 	}
898a00cc7d9SMatthew Wilcox 	set_pud_at(mm, addr, pud, entry);
899a00cc7d9SMatthew Wilcox 	update_mmu_cache_pud(vma, addr, pud);
900c6f3c5eeSAneesh Kumar K.V 
901c6f3c5eeSAneesh Kumar K.V out_unlock:
902a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
903a00cc7d9SMatthew Wilcox }
904a00cc7d9SMatthew Wilcox 
905fce86ff5SDan Williams vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
906a00cc7d9SMatthew Wilcox {
907fce86ff5SDan Williams 	unsigned long addr = vmf->address & PUD_MASK;
908fce86ff5SDan Williams 	struct vm_area_struct *vma = vmf->vma;
909a00cc7d9SMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
910fce86ff5SDan Williams 
911a00cc7d9SMatthew Wilcox 	/*
912a00cc7d9SMatthew Wilcox 	 * If we had pud_special, we could avoid all these restrictions,
913a00cc7d9SMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
914a00cc7d9SMatthew Wilcox 	 * can't support a 'special' bit.
915a00cc7d9SMatthew Wilcox 	 */
91662ec0d8cSDave Jiang 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
91762ec0d8cSDave Jiang 			!pfn_t_devmap(pfn));
918a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
919a00cc7d9SMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
920a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
921a00cc7d9SMatthew Wilcox 
922a00cc7d9SMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
923a00cc7d9SMatthew Wilcox 		return VM_FAULT_SIGBUS;
924a00cc7d9SMatthew Wilcox 
925a00cc7d9SMatthew Wilcox 	track_pfn_insert(vma, &pgprot, pfn);
926a00cc7d9SMatthew Wilcox 
927fce86ff5SDan Williams 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
928a00cc7d9SMatthew Wilcox 	return VM_FAULT_NOPAGE;
929a00cc7d9SMatthew Wilcox }
930a00cc7d9SMatthew Wilcox EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
931a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
932a00cc7d9SMatthew Wilcox 
9333565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
934a8f97366SKirill A. Shutemov 		pmd_t *pmd, int flags)
9353565fce3SDan Williams {
9363565fce3SDan Williams 	pmd_t _pmd;
9373565fce3SDan Williams 
938a8f97366SKirill A. Shutemov 	_pmd = pmd_mkyoung(*pmd);
939a8f97366SKirill A. Shutemov 	if (flags & FOLL_WRITE)
940a8f97366SKirill A. Shutemov 		_pmd = pmd_mkdirty(_pmd);
9413565fce3SDan Williams 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
942a8f97366SKirill A. Shutemov 				pmd, _pmd, flags & FOLL_WRITE))
9433565fce3SDan Williams 		update_mmu_cache_pmd(vma, addr, pmd);
9443565fce3SDan Williams }
9453565fce3SDan Williams 
9463565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
947df06b37fSKeith Busch 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
9483565fce3SDan Williams {
9493565fce3SDan Williams 	unsigned long pfn = pmd_pfn(*pmd);
9503565fce3SDan Williams 	struct mm_struct *mm = vma->vm_mm;
9513565fce3SDan Williams 	struct page *page;
9523565fce3SDan Williams 
9533565fce3SDan Williams 	assert_spin_locked(pmd_lockptr(mm, pmd));
9543565fce3SDan Williams 
9558310d48bSKeno Fischer 	/*
9568310d48bSKeno Fischer 	 * When we COW a devmap PMD entry, we split it into PTEs, so we should
9578310d48bSKeno Fischer 	 * not be in this function with `flags & FOLL_COW` set.
9588310d48bSKeno Fischer 	 */
9598310d48bSKeno Fischer 	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
9608310d48bSKeno Fischer 
961f6f37321SLinus Torvalds 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
9623565fce3SDan Williams 		return NULL;
9633565fce3SDan Williams 
9643565fce3SDan Williams 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
9653565fce3SDan Williams 		/* pass */;
9663565fce3SDan Williams 	else
9673565fce3SDan Williams 		return NULL;
9683565fce3SDan Williams 
9693565fce3SDan Williams 	if (flags & FOLL_TOUCH)
970a8f97366SKirill A. Shutemov 		touch_pmd(vma, addr, pmd, flags);
9713565fce3SDan Williams 
9723565fce3SDan Williams 	/*
9733565fce3SDan Williams 	 * device mapped pages can only be returned if the
9743565fce3SDan Williams 	 * caller will manage the page reference count.
9753565fce3SDan Williams 	 */
9763565fce3SDan Williams 	if (!(flags & FOLL_GET))
9773565fce3SDan Williams 		return ERR_PTR(-EEXIST);
9783565fce3SDan Williams 
9793565fce3SDan Williams 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
980df06b37fSKeith Busch 	*pgmap = get_dev_pagemap(pfn, *pgmap);
981df06b37fSKeith Busch 	if (!*pgmap)
9823565fce3SDan Williams 		return ERR_PTR(-EFAULT);
9833565fce3SDan Williams 	page = pfn_to_page(pfn);
9843565fce3SDan Williams 	get_page(page);
9853565fce3SDan Williams 
9863565fce3SDan Williams 	return page;
9873565fce3SDan Williams }
9883565fce3SDan Williams 
98971e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
99071e3aac0SAndrea Arcangeli 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
99171e3aac0SAndrea Arcangeli 		  struct vm_area_struct *vma)
99271e3aac0SAndrea Arcangeli {
993c4088ebdSKirill A. Shutemov 	spinlock_t *dst_ptl, *src_ptl;
99471e3aac0SAndrea Arcangeli 	struct page *src_page;
99571e3aac0SAndrea Arcangeli 	pmd_t pmd;
99612c9d70bSMatthew Wilcox 	pgtable_t pgtable = NULL;
997628d47ceSKirill A. Shutemov 	int ret = -ENOMEM;
99871e3aac0SAndrea Arcangeli 
999628d47ceSKirill A. Shutemov 	/* Skip if can be re-fill on fault */
1000628d47ceSKirill A. Shutemov 	if (!vma_is_anonymous(vma))
1001628d47ceSKirill A. Shutemov 		return 0;
1002628d47ceSKirill A. Shutemov 
10034cf58924SJoel Fernandes (Google) 	pgtable = pte_alloc_one(dst_mm);
100471e3aac0SAndrea Arcangeli 	if (unlikely(!pgtable))
100571e3aac0SAndrea Arcangeli 		goto out;
100671e3aac0SAndrea Arcangeli 
1007c4088ebdSKirill A. Shutemov 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
1008c4088ebdSKirill A. Shutemov 	src_ptl = pmd_lockptr(src_mm, src_pmd);
1009c4088ebdSKirill A. Shutemov 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
101071e3aac0SAndrea Arcangeli 
101171e3aac0SAndrea Arcangeli 	ret = -EAGAIN;
101271e3aac0SAndrea Arcangeli 	pmd = *src_pmd;
101384c3fc4eSZi Yan 
101484c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
101584c3fc4eSZi Yan 	if (unlikely(is_swap_pmd(pmd))) {
101684c3fc4eSZi Yan 		swp_entry_t entry = pmd_to_swp_entry(pmd);
101784c3fc4eSZi Yan 
101884c3fc4eSZi Yan 		VM_BUG_ON(!is_pmd_migration_entry(pmd));
101984c3fc4eSZi Yan 		if (is_write_migration_entry(entry)) {
102084c3fc4eSZi Yan 			make_migration_entry_read(&entry);
102184c3fc4eSZi Yan 			pmd = swp_entry_to_pmd(entry);
1022ab6e3d09SNaoya Horiguchi 			if (pmd_swp_soft_dirty(*src_pmd))
1023ab6e3d09SNaoya Horiguchi 				pmd = pmd_swp_mksoft_dirty(pmd);
102484c3fc4eSZi Yan 			set_pmd_at(src_mm, addr, src_pmd, pmd);
102584c3fc4eSZi Yan 		}
1026dd8a67f9SZi Yan 		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1027af5b0f6aSKirill A. Shutemov 		mm_inc_nr_ptes(dst_mm);
1028dd8a67f9SZi Yan 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
102984c3fc4eSZi Yan 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
103084c3fc4eSZi Yan 		ret = 0;
103184c3fc4eSZi Yan 		goto out_unlock;
103284c3fc4eSZi Yan 	}
103384c3fc4eSZi Yan #endif
103484c3fc4eSZi Yan 
1035628d47ceSKirill A. Shutemov 	if (unlikely(!pmd_trans_huge(pmd))) {
103671e3aac0SAndrea Arcangeli 		pte_free(dst_mm, pgtable);
103771e3aac0SAndrea Arcangeli 		goto out_unlock;
103871e3aac0SAndrea Arcangeli 	}
1039fc9fe822SKirill A. Shutemov 	/*
1040c4088ebdSKirill A. Shutemov 	 * When page table lock is held, the huge zero pmd should not be
1041fc9fe822SKirill A. Shutemov 	 * under splitting since we don't split the page itself, only pmd to
1042fc9fe822SKirill A. Shutemov 	 * a page table.
1043fc9fe822SKirill A. Shutemov 	 */
1044fc9fe822SKirill A. Shutemov 	if (is_huge_zero_pmd(pmd)) {
10455918d10aSKirill A. Shutemov 		struct page *zero_page;
104697ae1749SKirill A. Shutemov 		/*
104797ae1749SKirill A. Shutemov 		 * get_huge_zero_page() will never allocate a new page here,
104897ae1749SKirill A. Shutemov 		 * since we already have a zero page to copy. It just takes a
104997ae1749SKirill A. Shutemov 		 * reference.
105097ae1749SKirill A. Shutemov 		 */
10516fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(dst_mm);
10526b251fc9SAndrea Arcangeli 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
10535918d10aSKirill A. Shutemov 				zero_page);
1054fc9fe822SKirill A. Shutemov 		ret = 0;
1055fc9fe822SKirill A. Shutemov 		goto out_unlock;
1056fc9fe822SKirill A. Shutemov 	}
1057de466bd6SMel Gorman 
105871e3aac0SAndrea Arcangeli 	src_page = pmd_page(pmd);
1059309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
106071e3aac0SAndrea Arcangeli 	get_page(src_page);
106153f9263bSKirill A. Shutemov 	page_dup_rmap(src_page, true);
106271e3aac0SAndrea Arcangeli 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1063c4812909SKirill A. Shutemov 	mm_inc_nr_ptes(dst_mm);
10645c7fb56eSDan Williams 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
106571e3aac0SAndrea Arcangeli 
106671e3aac0SAndrea Arcangeli 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
106771e3aac0SAndrea Arcangeli 	pmd = pmd_mkold(pmd_wrprotect(pmd));
106871e3aac0SAndrea Arcangeli 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
106971e3aac0SAndrea Arcangeli 
107071e3aac0SAndrea Arcangeli 	ret = 0;
107171e3aac0SAndrea Arcangeli out_unlock:
1072c4088ebdSKirill A. Shutemov 	spin_unlock(src_ptl);
1073c4088ebdSKirill A. Shutemov 	spin_unlock(dst_ptl);
107471e3aac0SAndrea Arcangeli out:
107571e3aac0SAndrea Arcangeli 	return ret;
107671e3aac0SAndrea Arcangeli }
107771e3aac0SAndrea Arcangeli 
1078a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1079a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1080a8f97366SKirill A. Shutemov 		pud_t *pud, int flags)
1081a00cc7d9SMatthew Wilcox {
1082a00cc7d9SMatthew Wilcox 	pud_t _pud;
1083a00cc7d9SMatthew Wilcox 
1084a8f97366SKirill A. Shutemov 	_pud = pud_mkyoung(*pud);
1085a8f97366SKirill A. Shutemov 	if (flags & FOLL_WRITE)
1086a8f97366SKirill A. Shutemov 		_pud = pud_mkdirty(_pud);
1087a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1088a8f97366SKirill A. Shutemov 				pud, _pud, flags & FOLL_WRITE))
1089a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vma, addr, pud);
1090a00cc7d9SMatthew Wilcox }
1091a00cc7d9SMatthew Wilcox 
1092a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1093df06b37fSKeith Busch 		pud_t *pud, int flags, struct dev_pagemap **pgmap)
1094a00cc7d9SMatthew Wilcox {
1095a00cc7d9SMatthew Wilcox 	unsigned long pfn = pud_pfn(*pud);
1096a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
1097a00cc7d9SMatthew Wilcox 	struct page *page;
1098a00cc7d9SMatthew Wilcox 
1099a00cc7d9SMatthew Wilcox 	assert_spin_locked(pud_lockptr(mm, pud));
1100a00cc7d9SMatthew Wilcox 
1101f6f37321SLinus Torvalds 	if (flags & FOLL_WRITE && !pud_write(*pud))
1102a00cc7d9SMatthew Wilcox 		return NULL;
1103a00cc7d9SMatthew Wilcox 
1104a00cc7d9SMatthew Wilcox 	if (pud_present(*pud) && pud_devmap(*pud))
1105a00cc7d9SMatthew Wilcox 		/* pass */;
1106a00cc7d9SMatthew Wilcox 	else
1107a00cc7d9SMatthew Wilcox 		return NULL;
1108a00cc7d9SMatthew Wilcox 
1109a00cc7d9SMatthew Wilcox 	if (flags & FOLL_TOUCH)
1110a8f97366SKirill A. Shutemov 		touch_pud(vma, addr, pud, flags);
1111a00cc7d9SMatthew Wilcox 
1112a00cc7d9SMatthew Wilcox 	/*
1113a00cc7d9SMatthew Wilcox 	 * device mapped pages can only be returned if the
1114a00cc7d9SMatthew Wilcox 	 * caller will manage the page reference count.
1115a00cc7d9SMatthew Wilcox 	 */
1116a00cc7d9SMatthew Wilcox 	if (!(flags & FOLL_GET))
1117a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EEXIST);
1118a00cc7d9SMatthew Wilcox 
1119a00cc7d9SMatthew Wilcox 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1120df06b37fSKeith Busch 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1121df06b37fSKeith Busch 	if (!*pgmap)
1122a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EFAULT);
1123a00cc7d9SMatthew Wilcox 	page = pfn_to_page(pfn);
1124a00cc7d9SMatthew Wilcox 	get_page(page);
1125a00cc7d9SMatthew Wilcox 
1126a00cc7d9SMatthew Wilcox 	return page;
1127a00cc7d9SMatthew Wilcox }
1128a00cc7d9SMatthew Wilcox 
1129a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1130a00cc7d9SMatthew Wilcox 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1131a00cc7d9SMatthew Wilcox 		  struct vm_area_struct *vma)
1132a00cc7d9SMatthew Wilcox {
1133a00cc7d9SMatthew Wilcox 	spinlock_t *dst_ptl, *src_ptl;
1134a00cc7d9SMatthew Wilcox 	pud_t pud;
1135a00cc7d9SMatthew Wilcox 	int ret;
1136a00cc7d9SMatthew Wilcox 
1137a00cc7d9SMatthew Wilcox 	dst_ptl = pud_lock(dst_mm, dst_pud);
1138a00cc7d9SMatthew Wilcox 	src_ptl = pud_lockptr(src_mm, src_pud);
1139a00cc7d9SMatthew Wilcox 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1140a00cc7d9SMatthew Wilcox 
1141a00cc7d9SMatthew Wilcox 	ret = -EAGAIN;
1142a00cc7d9SMatthew Wilcox 	pud = *src_pud;
1143a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1144a00cc7d9SMatthew Wilcox 		goto out_unlock;
1145a00cc7d9SMatthew Wilcox 
1146a00cc7d9SMatthew Wilcox 	/*
1147a00cc7d9SMatthew Wilcox 	 * When page table lock is held, the huge zero pud should not be
1148a00cc7d9SMatthew Wilcox 	 * under splitting since we don't split the page itself, only pud to
1149a00cc7d9SMatthew Wilcox 	 * a page table.
1150a00cc7d9SMatthew Wilcox 	 */
1151a00cc7d9SMatthew Wilcox 	if (is_huge_zero_pud(pud)) {
1152a00cc7d9SMatthew Wilcox 		/* No huge zero pud yet */
1153a00cc7d9SMatthew Wilcox 	}
1154a00cc7d9SMatthew Wilcox 
1155a00cc7d9SMatthew Wilcox 	pudp_set_wrprotect(src_mm, addr, src_pud);
1156a00cc7d9SMatthew Wilcox 	pud = pud_mkold(pud_wrprotect(pud));
1157a00cc7d9SMatthew Wilcox 	set_pud_at(dst_mm, addr, dst_pud, pud);
1158a00cc7d9SMatthew Wilcox 
1159a00cc7d9SMatthew Wilcox 	ret = 0;
1160a00cc7d9SMatthew Wilcox out_unlock:
1161a00cc7d9SMatthew Wilcox 	spin_unlock(src_ptl);
1162a00cc7d9SMatthew Wilcox 	spin_unlock(dst_ptl);
1163a00cc7d9SMatthew Wilcox 	return ret;
1164a00cc7d9SMatthew Wilcox }
1165a00cc7d9SMatthew Wilcox 
1166a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1167a00cc7d9SMatthew Wilcox {
1168a00cc7d9SMatthew Wilcox 	pud_t entry;
1169a00cc7d9SMatthew Wilcox 	unsigned long haddr;
1170a00cc7d9SMatthew Wilcox 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1171a00cc7d9SMatthew Wilcox 
1172a00cc7d9SMatthew Wilcox 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1173a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1174a00cc7d9SMatthew Wilcox 		goto unlock;
1175a00cc7d9SMatthew Wilcox 
1176a00cc7d9SMatthew Wilcox 	entry = pud_mkyoung(orig_pud);
1177a00cc7d9SMatthew Wilcox 	if (write)
1178a00cc7d9SMatthew Wilcox 		entry = pud_mkdirty(entry);
1179a00cc7d9SMatthew Wilcox 	haddr = vmf->address & HPAGE_PUD_MASK;
1180a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1181a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1182a00cc7d9SMatthew Wilcox 
1183a00cc7d9SMatthew Wilcox unlock:
1184a00cc7d9SMatthew Wilcox 	spin_unlock(vmf->ptl);
1185a00cc7d9SMatthew Wilcox }
1186a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1187a00cc7d9SMatthew Wilcox 
118882b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1189a1dd450bSWill Deacon {
1190a1dd450bSWill Deacon 	pmd_t entry;
1191a1dd450bSWill Deacon 	unsigned long haddr;
119220f664aaSMinchan Kim 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1193a1dd450bSWill Deacon 
119482b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
119582b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1196a1dd450bSWill Deacon 		goto unlock;
1197a1dd450bSWill Deacon 
1198a1dd450bSWill Deacon 	entry = pmd_mkyoung(orig_pmd);
119920f664aaSMinchan Kim 	if (write)
120020f664aaSMinchan Kim 		entry = pmd_mkdirty(entry);
120182b0f8c3SJan Kara 	haddr = vmf->address & HPAGE_PMD_MASK;
120220f664aaSMinchan Kim 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
120382b0f8c3SJan Kara 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1204a1dd450bSWill Deacon 
1205a1dd450bSWill Deacon unlock:
120682b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1207a1dd450bSWill Deacon }
1208a1dd450bSWill Deacon 
12092b740303SSouptick Joarder static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
12102b740303SSouptick Joarder 			pmd_t orig_pmd, struct page *page)
121171e3aac0SAndrea Arcangeli {
121282b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
121382b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
121400501b53SJohannes Weiner 	struct mem_cgroup *memcg;
121571e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
121671e3aac0SAndrea Arcangeli 	pmd_t _pmd;
12172b740303SSouptick Joarder 	int i;
12182b740303SSouptick Joarder 	vm_fault_t ret = 0;
121971e3aac0SAndrea Arcangeli 	struct page **pages;
1220ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
122171e3aac0SAndrea Arcangeli 
12226da2ec56SKees Cook 	pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
122371e3aac0SAndrea Arcangeli 			      GFP_KERNEL);
122471e3aac0SAndrea Arcangeli 	if (unlikely(!pages)) {
122571e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_OOM;
122671e3aac0SAndrea Arcangeli 		goto out;
122771e3aac0SAndrea Arcangeli 	}
122871e3aac0SAndrea Arcangeli 
122971e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
123041b6167eSMichal Hocko 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
123182b0f8c3SJan Kara 					       vmf->address, page_to_nid(page));
1232b9bbfbe3SAndrea Arcangeli 		if (unlikely(!pages[i] ||
12332cf85583STejun Heo 			     mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
1234bae473a4SKirill A. Shutemov 				     GFP_KERNEL, &memcg, false))) {
1235b9bbfbe3SAndrea Arcangeli 			if (pages[i])
123671e3aac0SAndrea Arcangeli 				put_page(pages[i]);
1237b9bbfbe3SAndrea Arcangeli 			while (--i >= 0) {
123800501b53SJohannes Weiner 				memcg = (void *)page_private(pages[i]);
123900501b53SJohannes Weiner 				set_page_private(pages[i], 0);
1240f627c2f5SKirill A. Shutemov 				mem_cgroup_cancel_charge(pages[i], memcg,
1241f627c2f5SKirill A. Shutemov 						false);
1242b9bbfbe3SAndrea Arcangeli 				put_page(pages[i]);
1243b9bbfbe3SAndrea Arcangeli 			}
124471e3aac0SAndrea Arcangeli 			kfree(pages);
124571e3aac0SAndrea Arcangeli 			ret |= VM_FAULT_OOM;
124671e3aac0SAndrea Arcangeli 			goto out;
124771e3aac0SAndrea Arcangeli 		}
124800501b53SJohannes Weiner 		set_page_private(pages[i], (unsigned long)memcg);
124971e3aac0SAndrea Arcangeli 	}
125071e3aac0SAndrea Arcangeli 
125171e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
125271e3aac0SAndrea Arcangeli 		copy_user_highpage(pages[i], page + i,
12530089e485SHillf Danton 				   haddr + PAGE_SIZE * i, vma);
125471e3aac0SAndrea Arcangeli 		__SetPageUptodate(pages[i]);
125571e3aac0SAndrea Arcangeli 		cond_resched();
125671e3aac0SAndrea Arcangeli 	}
125771e3aac0SAndrea Arcangeli 
12587269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
12597269f999SJérôme Glisse 				haddr, haddr + HPAGE_PMD_SIZE);
1260ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
12612ec74c3eSSagi Grimberg 
126282b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
126382b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
126471e3aac0SAndrea Arcangeli 		goto out_free_pages;
1265309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
126671e3aac0SAndrea Arcangeli 
12670f10851eSJérôme Glisse 	/*
12680f10851eSJérôme Glisse 	 * Leave pmd empty until pte is filled note we must notify here as
12690f10851eSJérôme Glisse 	 * concurrent CPU thread might write to new page before the call to
12700f10851eSJérôme Glisse 	 * mmu_notifier_invalidate_range_end() happens which can lead to a
12710f10851eSJérôme Glisse 	 * device seeing memory write in different order than CPU.
12720f10851eSJérôme Glisse 	 *
1273ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
12740f10851eSJérôme Glisse 	 */
127582b0f8c3SJan Kara 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
127671e3aac0SAndrea Arcangeli 
127782b0f8c3SJan Kara 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
1278bae473a4SKirill A. Shutemov 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
127971e3aac0SAndrea Arcangeli 
128071e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1281bae473a4SKirill A. Shutemov 		pte_t entry;
128271e3aac0SAndrea Arcangeli 		entry = mk_pte(pages[i], vma->vm_page_prot);
128371e3aac0SAndrea Arcangeli 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
128400501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
128500501b53SJohannes Weiner 		set_page_private(pages[i], 0);
128682b0f8c3SJan Kara 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
1287f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(pages[i], memcg, false, false);
128800501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(pages[i], vma);
128982b0f8c3SJan Kara 		vmf->pte = pte_offset_map(&_pmd, haddr);
129082b0f8c3SJan Kara 		VM_BUG_ON(!pte_none(*vmf->pte));
129182b0f8c3SJan Kara 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
129282b0f8c3SJan Kara 		pte_unmap(vmf->pte);
129371e3aac0SAndrea Arcangeli 	}
129471e3aac0SAndrea Arcangeli 	kfree(pages);
129571e3aac0SAndrea Arcangeli 
129671e3aac0SAndrea Arcangeli 	smp_wmb(); /* make pte visible before pmd */
129782b0f8c3SJan Kara 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
1298d281ee61SKirill A. Shutemov 	page_remove_rmap(page, true);
129982b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
130071e3aac0SAndrea Arcangeli 
13014645b9feSJérôme Glisse 	/*
13024645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
13034645b9feSJérôme Glisse 	 * the above pmdp_huge_clear_flush_notify() did already call it.
13044645b9feSJérôme Glisse 	 */
1305ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
13062ec74c3eSSagi Grimberg 
130771e3aac0SAndrea Arcangeli 	ret |= VM_FAULT_WRITE;
130871e3aac0SAndrea Arcangeli 	put_page(page);
130971e3aac0SAndrea Arcangeli 
131071e3aac0SAndrea Arcangeli out:
131171e3aac0SAndrea Arcangeli 	return ret;
131271e3aac0SAndrea Arcangeli 
131371e3aac0SAndrea Arcangeli out_free_pages:
131482b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1315ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1316b9bbfbe3SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
131700501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
131800501b53SJohannes Weiner 		set_page_private(pages[i], 0);
1319f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(pages[i], memcg, false);
132071e3aac0SAndrea Arcangeli 		put_page(pages[i]);
1321b9bbfbe3SAndrea Arcangeli 	}
132271e3aac0SAndrea Arcangeli 	kfree(pages);
132371e3aac0SAndrea Arcangeli 	goto out;
132471e3aac0SAndrea Arcangeli }
132571e3aac0SAndrea Arcangeli 
13262b740303SSouptick Joarder vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
132771e3aac0SAndrea Arcangeli {
132882b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
132993b4796dSKirill A. Shutemov 	struct page *page = NULL, *new_page;
133000501b53SJohannes Weiner 	struct mem_cgroup *memcg;
133182b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1332ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
13333b363692SMichal Hocko 	gfp_t huge_gfp;			/* for allocation and charge */
13342b740303SSouptick Joarder 	vm_fault_t ret = 0;
133571e3aac0SAndrea Arcangeli 
133682b0f8c3SJan Kara 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
133781d1b09cSSasha Levin 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
133893b4796dSKirill A. Shutemov 	if (is_huge_zero_pmd(orig_pmd))
133993b4796dSKirill A. Shutemov 		goto alloc;
134082b0f8c3SJan Kara 	spin_lock(vmf->ptl);
134182b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
134271e3aac0SAndrea Arcangeli 		goto out_unlock;
134371e3aac0SAndrea Arcangeli 
134471e3aac0SAndrea Arcangeli 	page = pmd_page(orig_pmd);
1345309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
13461f25fe20SKirill A. Shutemov 	/*
13471f25fe20SKirill A. Shutemov 	 * We can only reuse the page if nobody else maps the huge page or it's
13486d0a07edSAndrea Arcangeli 	 * part.
13491f25fe20SKirill A. Shutemov 	 */
1350ba3c4ce6SHuang Ying 	if (!trylock_page(page)) {
1351ba3c4ce6SHuang Ying 		get_page(page);
1352ba3c4ce6SHuang Ying 		spin_unlock(vmf->ptl);
1353ba3c4ce6SHuang Ying 		lock_page(page);
1354ba3c4ce6SHuang Ying 		spin_lock(vmf->ptl);
1355ba3c4ce6SHuang Ying 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1356ba3c4ce6SHuang Ying 			unlock_page(page);
1357ba3c4ce6SHuang Ying 			put_page(page);
1358ba3c4ce6SHuang Ying 			goto out_unlock;
1359ba3c4ce6SHuang Ying 		}
1360ba3c4ce6SHuang Ying 		put_page(page);
1361ba3c4ce6SHuang Ying 	}
1362ba3c4ce6SHuang Ying 	if (reuse_swap_page(page, NULL)) {
136371e3aac0SAndrea Arcangeli 		pmd_t entry;
136471e3aac0SAndrea Arcangeli 		entry = pmd_mkyoung(orig_pmd);
1365f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
136682b0f8c3SJan Kara 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
136782b0f8c3SJan Kara 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
136871e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
1369ba3c4ce6SHuang Ying 		unlock_page(page);
137071e3aac0SAndrea Arcangeli 		goto out_unlock;
137171e3aac0SAndrea Arcangeli 	}
1372ba3c4ce6SHuang Ying 	unlock_page(page);
1373ddc58f27SKirill A. Shutemov 	get_page(page);
137482b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
137593b4796dSKirill A. Shutemov alloc:
13767635d9cbSMichal Hocko 	if (__transparent_hugepage_enabled(vma) &&
1377077fcf11SAneesh Kumar K.V 	    !transparent_hugepage_debug_cow()) {
137819deb769SDavid Rientjes 		huge_gfp = alloc_hugepage_direct_gfpmask(vma);
137919deb769SDavid Rientjes 		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1380077fcf11SAneesh Kumar K.V 	} else
138171e3aac0SAndrea Arcangeli 		new_page = NULL;
138271e3aac0SAndrea Arcangeli 
13839a982250SKirill A. Shutemov 	if (likely(new_page)) {
13849a982250SKirill A. Shutemov 		prep_transhuge_page(new_page);
13859a982250SKirill A. Shutemov 	} else {
1386eecc1e42SHugh Dickins 		if (!page) {
138782b0f8c3SJan Kara 			split_huge_pmd(vma, vmf->pmd, vmf->address);
1388e9b71ca9SKirill A. Shutemov 			ret |= VM_FAULT_FALLBACK;
138993b4796dSKirill A. Shutemov 		} else {
139082b0f8c3SJan Kara 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
13919845cbbdSKirill A. Shutemov 			if (ret & VM_FAULT_OOM) {
139282b0f8c3SJan Kara 				split_huge_pmd(vma, vmf->pmd, vmf->address);
13939845cbbdSKirill A. Shutemov 				ret |= VM_FAULT_FALLBACK;
13949845cbbdSKirill A. Shutemov 			}
1395ddc58f27SKirill A. Shutemov 			put_page(page);
139693b4796dSKirill A. Shutemov 		}
139717766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
139871e3aac0SAndrea Arcangeli 		goto out;
139971e3aac0SAndrea Arcangeli 	}
140071e3aac0SAndrea Arcangeli 
14012cf85583STejun Heo 	if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
14022a70f6a7SMichal Hocko 					huge_gfp, &memcg, true))) {
1403b9bbfbe3SAndrea Arcangeli 		put_page(new_page);
140482b0f8c3SJan Kara 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1405bae473a4SKirill A. Shutemov 		if (page)
1406ddc58f27SKirill A. Shutemov 			put_page(page);
14079845cbbdSKirill A. Shutemov 		ret |= VM_FAULT_FALLBACK;
140817766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
1409b9bbfbe3SAndrea Arcangeli 		goto out;
1410b9bbfbe3SAndrea Arcangeli 	}
1411b9bbfbe3SAndrea Arcangeli 
141217766ddeSDavid Rientjes 	count_vm_event(THP_FAULT_ALLOC);
14131ff9e6e1SChris Down 	count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
141417766ddeSDavid Rientjes 
1415eecc1e42SHugh Dickins 	if (!page)
1416c79b57e4SHuang Ying 		clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
141793b4796dSKirill A. Shutemov 	else
1418c9f4cd71SHuang Ying 		copy_user_huge_page(new_page, page, vmf->address,
1419c9f4cd71SHuang Ying 				    vma, HPAGE_PMD_NR);
142071e3aac0SAndrea Arcangeli 	__SetPageUptodate(new_page);
142171e3aac0SAndrea Arcangeli 
14227269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
14237269f999SJérôme Glisse 				haddr, haddr + HPAGE_PMD_SIZE);
1424ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
14252ec74c3eSSagi Grimberg 
142682b0f8c3SJan Kara 	spin_lock(vmf->ptl);
142793b4796dSKirill A. Shutemov 	if (page)
1428ddc58f27SKirill A. Shutemov 		put_page(page);
142982b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
143082b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
1431f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(new_page, memcg, true);
143271e3aac0SAndrea Arcangeli 		put_page(new_page);
14332ec74c3eSSagi Grimberg 		goto out_mn;
1434b9bbfbe3SAndrea Arcangeli 	} else {
143571e3aac0SAndrea Arcangeli 		pmd_t entry;
14363122359aSKirill A. Shutemov 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1437f55e1014SLinus Torvalds 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
143882b0f8c3SJan Kara 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1439d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(new_page, vma, haddr, true);
1440f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(new_page, memcg, false, true);
144100501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(new_page, vma);
144282b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
144382b0f8c3SJan Kara 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1444eecc1e42SHugh Dickins 		if (!page) {
1445bae473a4SKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
144697ae1749SKirill A. Shutemov 		} else {
1447309381feSSasha Levin 			VM_BUG_ON_PAGE(!PageHead(page), page);
1448d281ee61SKirill A. Shutemov 			page_remove_rmap(page, true);
144971e3aac0SAndrea Arcangeli 			put_page(page);
145093b4796dSKirill A. Shutemov 		}
145171e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
145271e3aac0SAndrea Arcangeli 	}
145382b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
14542ec74c3eSSagi Grimberg out_mn:
14554645b9feSJérôme Glisse 	/*
14564645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
14574645b9feSJérôme Glisse 	 * the above pmdp_huge_clear_flush_notify() did already call it.
14584645b9feSJérôme Glisse 	 */
1459ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
14602ec74c3eSSagi Grimberg out:
14612ec74c3eSSagi Grimberg 	return ret;
146271e3aac0SAndrea Arcangeli out_unlock:
146382b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
146471e3aac0SAndrea Arcangeli 	return ret;
146571e3aac0SAndrea Arcangeli }
146671e3aac0SAndrea Arcangeli 
14678310d48bSKeno Fischer /*
14688310d48bSKeno Fischer  * FOLL_FORCE can write to even unwritable pmd's, but only
14698310d48bSKeno Fischer  * after we've gone through a COW cycle and they are dirty.
14708310d48bSKeno Fischer  */
14718310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
14728310d48bSKeno Fischer {
1473f6f37321SLinus Torvalds 	return pmd_write(pmd) ||
14748310d48bSKeno Fischer 	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
14758310d48bSKeno Fischer }
14768310d48bSKeno Fischer 
1477b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
147871e3aac0SAndrea Arcangeli 				   unsigned long addr,
147971e3aac0SAndrea Arcangeli 				   pmd_t *pmd,
148071e3aac0SAndrea Arcangeli 				   unsigned int flags)
148171e3aac0SAndrea Arcangeli {
1482b676b293SDavid Rientjes 	struct mm_struct *mm = vma->vm_mm;
148371e3aac0SAndrea Arcangeli 	struct page *page = NULL;
148471e3aac0SAndrea Arcangeli 
1485c4088ebdSKirill A. Shutemov 	assert_spin_locked(pmd_lockptr(mm, pmd));
148671e3aac0SAndrea Arcangeli 
14878310d48bSKeno Fischer 	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
148871e3aac0SAndrea Arcangeli 		goto out;
148971e3aac0SAndrea Arcangeli 
149085facf25SKirill A. Shutemov 	/* Avoid dumping huge zero page */
149185facf25SKirill A. Shutemov 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
149285facf25SKirill A. Shutemov 		return ERR_PTR(-EFAULT);
149385facf25SKirill A. Shutemov 
14942b4847e7SMel Gorman 	/* Full NUMA hinting faults to serialise migration in fault paths */
14958a0516edSMel Gorman 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
14962b4847e7SMel Gorman 		goto out;
14972b4847e7SMel Gorman 
149871e3aac0SAndrea Arcangeli 	page = pmd_page(*pmd);
1499ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
15003565fce3SDan Williams 	if (flags & FOLL_TOUCH)
1501a8f97366SKirill A. Shutemov 		touch_pmd(vma, addr, pmd, flags);
1502de60f5f1SEric B Munson 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1503e90309c9SKirill A. Shutemov 		/*
1504e90309c9SKirill A. Shutemov 		 * We don't mlock() pte-mapped THPs. This way we can avoid
1505e90309c9SKirill A. Shutemov 		 * leaking mlocked pages into non-VM_LOCKED VMAs.
1506e90309c9SKirill A. Shutemov 		 *
15079a73f61bSKirill A. Shutemov 		 * For anon THP:
15089a73f61bSKirill A. Shutemov 		 *
1509e90309c9SKirill A. Shutemov 		 * In most cases the pmd is the only mapping of the page as we
1510e90309c9SKirill A. Shutemov 		 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1511e90309c9SKirill A. Shutemov 		 * writable private mappings in populate_vma_page_range().
1512e90309c9SKirill A. Shutemov 		 *
1513e90309c9SKirill A. Shutemov 		 * The only scenario when we have the page shared here is if we
1514e90309c9SKirill A. Shutemov 		 * mlocking read-only mapping shared over fork(). We skip
1515e90309c9SKirill A. Shutemov 		 * mlocking such pages.
15169a73f61bSKirill A. Shutemov 		 *
15179a73f61bSKirill A. Shutemov 		 * For file THP:
15189a73f61bSKirill A. Shutemov 		 *
15199a73f61bSKirill A. Shutemov 		 * We can expect PageDoubleMap() to be stable under page lock:
15209a73f61bSKirill A. Shutemov 		 * for file pages we set it in page_add_file_rmap(), which
15219a73f61bSKirill A. Shutemov 		 * requires page to be locked.
1522e90309c9SKirill A. Shutemov 		 */
15239a73f61bSKirill A. Shutemov 
15249a73f61bSKirill A. Shutemov 		if (PageAnon(page) && compound_mapcount(page) != 1)
15259a73f61bSKirill A. Shutemov 			goto skip_mlock;
15269a73f61bSKirill A. Shutemov 		if (PageDoubleMap(page) || !page->mapping)
15279a73f61bSKirill A. Shutemov 			goto skip_mlock;
15289a73f61bSKirill A. Shutemov 		if (!trylock_page(page))
15299a73f61bSKirill A. Shutemov 			goto skip_mlock;
1530b676b293SDavid Rientjes 		lru_add_drain();
15319a73f61bSKirill A. Shutemov 		if (page->mapping && !PageDoubleMap(page))
1532b676b293SDavid Rientjes 			mlock_vma_page(page);
1533b676b293SDavid Rientjes 		unlock_page(page);
1534b676b293SDavid Rientjes 	}
15359a73f61bSKirill A. Shutemov skip_mlock:
153671e3aac0SAndrea Arcangeli 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1537ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
153871e3aac0SAndrea Arcangeli 	if (flags & FOLL_GET)
1539ddc58f27SKirill A. Shutemov 		get_page(page);
154071e3aac0SAndrea Arcangeli 
154171e3aac0SAndrea Arcangeli out:
154271e3aac0SAndrea Arcangeli 	return page;
154371e3aac0SAndrea Arcangeli }
154471e3aac0SAndrea Arcangeli 
1545d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */
15462b740303SSouptick Joarder vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1547d10e63f2SMel Gorman {
154882b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
1549b8916634SMel Gorman 	struct anon_vma *anon_vma = NULL;
1550b32967ffSMel Gorman 	struct page *page;
155182b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
155298fa15f3SAnshuman Khandual 	int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
155390572890SPeter Zijlstra 	int target_nid, last_cpupid = -1;
15548191acbdSMel Gorman 	bool page_locked;
15558191acbdSMel Gorman 	bool migrated = false;
1556b191f9b1SMel Gorman 	bool was_writable;
15576688cc05SPeter Zijlstra 	int flags = 0;
1558d10e63f2SMel Gorman 
155982b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
156082b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1561d10e63f2SMel Gorman 		goto out_unlock;
1562d10e63f2SMel Gorman 
1563de466bd6SMel Gorman 	/*
1564de466bd6SMel Gorman 	 * If there are potential migrations, wait for completion and retry
1565de466bd6SMel Gorman 	 * without disrupting NUMA hinting information. Do not relock and
1566de466bd6SMel Gorman 	 * check_same as the page may no longer be mapped.
1567de466bd6SMel Gorman 	 */
156882b0f8c3SJan Kara 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
156982b0f8c3SJan Kara 		page = pmd_page(*vmf->pmd);
15703c226c63SMark Rutland 		if (!get_page_unless_zero(page))
15713c226c63SMark Rutland 			goto out_unlock;
157282b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
15739a1ea439SHugh Dickins 		put_and_wait_on_page_locked(page);
1574de466bd6SMel Gorman 		goto out;
1575de466bd6SMel Gorman 	}
1576de466bd6SMel Gorman 
1577d10e63f2SMel Gorman 	page = pmd_page(pmd);
1578a1a46184SMel Gorman 	BUG_ON(is_huge_zero_page(page));
15798191acbdSMel Gorman 	page_nid = page_to_nid(page);
158090572890SPeter Zijlstra 	last_cpupid = page_cpupid_last(page);
158103c5a6e1SMel Gorman 	count_vm_numa_event(NUMA_HINT_FAULTS);
158204bb2f94SRik van Riel 	if (page_nid == this_nid) {
158303c5a6e1SMel Gorman 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
158404bb2f94SRik van Riel 		flags |= TNF_FAULT_LOCAL;
158504bb2f94SRik van Riel 	}
15864daae3b4SMel Gorman 
1587bea66fbdSMel Gorman 	/* See similar comment in do_numa_page for explanation */
1588288bc549SAneesh Kumar K.V 	if (!pmd_savedwrite(pmd))
15896688cc05SPeter Zijlstra 		flags |= TNF_NO_GROUP;
15906688cc05SPeter Zijlstra 
15916688cc05SPeter Zijlstra 	/*
1592ff9042b1SMel Gorman 	 * Acquire the page lock to serialise THP migrations but avoid dropping
1593ff9042b1SMel Gorman 	 * page_table_lock if at all possible
1594ff9042b1SMel Gorman 	 */
1595b8916634SMel Gorman 	page_locked = trylock_page(page);
1596b8916634SMel Gorman 	target_nid = mpol_misplaced(page, vma, haddr);
159798fa15f3SAnshuman Khandual 	if (target_nid == NUMA_NO_NODE) {
1598b8916634SMel Gorman 		/* If the page was locked, there are no parallel migrations */
1599a54a407fSMel Gorman 		if (page_locked)
1600b8916634SMel Gorman 			goto clear_pmdnuma;
16012b4847e7SMel Gorman 	}
1602cbee9f88SPeter Zijlstra 
1603de466bd6SMel Gorman 	/* Migration could have started since the pmd_trans_migrating check */
16042b4847e7SMel Gorman 	if (!page_locked) {
160598fa15f3SAnshuman Khandual 		page_nid = NUMA_NO_NODE;
16063c226c63SMark Rutland 		if (!get_page_unless_zero(page))
16073c226c63SMark Rutland 			goto out_unlock;
160882b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
16099a1ea439SHugh Dickins 		put_and_wait_on_page_locked(page);
1610b8916634SMel Gorman 		goto out;
1611b8916634SMel Gorman 	}
1612b8916634SMel Gorman 
16132b4847e7SMel Gorman 	/*
16142b4847e7SMel Gorman 	 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
16152b4847e7SMel Gorman 	 * to serialises splits
16162b4847e7SMel Gorman 	 */
1617b8916634SMel Gorman 	get_page(page);
161882b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1619b8916634SMel Gorman 	anon_vma = page_lock_anon_vma_read(page);
1620b32967ffSMel Gorman 
1621c69307d5SPeter Zijlstra 	/* Confirm the PMD did not change while page_table_lock was released */
162282b0f8c3SJan Kara 	spin_lock(vmf->ptl);
162382b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1624b32967ffSMel Gorman 		unlock_page(page);
1625b32967ffSMel Gorman 		put_page(page);
162698fa15f3SAnshuman Khandual 		page_nid = NUMA_NO_NODE;
1627b32967ffSMel Gorman 		goto out_unlock;
1628b32967ffSMel Gorman 	}
1629ff9042b1SMel Gorman 
1630c3a489caSMel Gorman 	/* Bail if we fail to protect against THP splits for any reason */
1631c3a489caSMel Gorman 	if (unlikely(!anon_vma)) {
1632c3a489caSMel Gorman 		put_page(page);
163398fa15f3SAnshuman Khandual 		page_nid = NUMA_NO_NODE;
1634c3a489caSMel Gorman 		goto clear_pmdnuma;
1635c3a489caSMel Gorman 	}
1636c3a489caSMel Gorman 
1637a54a407fSMel Gorman 	/*
16388b1b436dSPeter Zijlstra 	 * Since we took the NUMA fault, we must have observed the !accessible
16398b1b436dSPeter Zijlstra 	 * bit. Make sure all other CPUs agree with that, to avoid them
16408b1b436dSPeter Zijlstra 	 * modifying the page we're about to migrate.
16418b1b436dSPeter Zijlstra 	 *
16428b1b436dSPeter Zijlstra 	 * Must be done under PTL such that we'll observe the relevant
1643ccde85baSPeter Zijlstra 	 * inc_tlb_flush_pending().
1644ccde85baSPeter Zijlstra 	 *
1645ccde85baSPeter Zijlstra 	 * We are not sure a pending tlb flush here is for a huge page
1646ccde85baSPeter Zijlstra 	 * mapping or not. Hence use the tlb range variant
16478b1b436dSPeter Zijlstra 	 */
16487066f0f9SAndrea Arcangeli 	if (mm_tlb_flush_pending(vma->vm_mm)) {
1649ccde85baSPeter Zijlstra 		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
16507066f0f9SAndrea Arcangeli 		/*
16517066f0f9SAndrea Arcangeli 		 * change_huge_pmd() released the pmd lock before
16527066f0f9SAndrea Arcangeli 		 * invalidating the secondary MMUs sharing the primary
16537066f0f9SAndrea Arcangeli 		 * MMU pagetables (with ->invalidate_range()). The
16547066f0f9SAndrea Arcangeli 		 * mmu_notifier_invalidate_range_end() (which
16557066f0f9SAndrea Arcangeli 		 * internally calls ->invalidate_range()) in
16567066f0f9SAndrea Arcangeli 		 * change_pmd_range() will run after us, so we can't
16577066f0f9SAndrea Arcangeli 		 * rely on it here and we need an explicit invalidate.
16587066f0f9SAndrea Arcangeli 		 */
16597066f0f9SAndrea Arcangeli 		mmu_notifier_invalidate_range(vma->vm_mm, haddr,
16607066f0f9SAndrea Arcangeli 					      haddr + HPAGE_PMD_SIZE);
16617066f0f9SAndrea Arcangeli 	}
16628b1b436dSPeter Zijlstra 
16638b1b436dSPeter Zijlstra 	/*
1664a54a407fSMel Gorman 	 * Migrate the THP to the requested node, returns with page unlocked
16658a0516edSMel Gorman 	 * and access rights restored.
1666a54a407fSMel Gorman 	 */
166782b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
16688b1b436dSPeter Zijlstra 
1669bae473a4SKirill A. Shutemov 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
167082b0f8c3SJan Kara 				vmf->pmd, pmd, vmf->address, page, target_nid);
16716688cc05SPeter Zijlstra 	if (migrated) {
16726688cc05SPeter Zijlstra 		flags |= TNF_MIGRATED;
16738191acbdSMel Gorman 		page_nid = target_nid;
1674074c2381SMel Gorman 	} else
1675074c2381SMel Gorman 		flags |= TNF_MIGRATE_FAIL;
1676b32967ffSMel Gorman 
16778191acbdSMel Gorman 	goto out;
16784daae3b4SMel Gorman clear_pmdnuma:
1679a54a407fSMel Gorman 	BUG_ON(!PageLocked(page));
1680288bc549SAneesh Kumar K.V 	was_writable = pmd_savedwrite(pmd);
16814d942466SMel Gorman 	pmd = pmd_modify(pmd, vma->vm_page_prot);
1682b7b04004SMel Gorman 	pmd = pmd_mkyoung(pmd);
1683b191f9b1SMel Gorman 	if (was_writable)
1684b191f9b1SMel Gorman 		pmd = pmd_mkwrite(pmd);
168582b0f8c3SJan Kara 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
168682b0f8c3SJan Kara 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1687a54a407fSMel Gorman 	unlock_page(page);
1688d10e63f2SMel Gorman out_unlock:
168982b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1690b8916634SMel Gorman 
1691b8916634SMel Gorman out:
1692b8916634SMel Gorman 	if (anon_vma)
1693b8916634SMel Gorman 		page_unlock_anon_vma_read(anon_vma);
1694b8916634SMel Gorman 
169598fa15f3SAnshuman Khandual 	if (page_nid != NUMA_NO_NODE)
169682b0f8c3SJan Kara 		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
16979a8b300fSAneesh Kumar K.V 				flags);
16988191acbdSMel Gorman 
1699d10e63f2SMel Gorman 	return 0;
1700d10e63f2SMel Gorman }
1701d10e63f2SMel Gorman 
1702319904adSHuang Ying /*
1703319904adSHuang Ying  * Return true if we do MADV_FREE successfully on entire pmd page.
1704319904adSHuang Ying  * Otherwise, return false.
1705319904adSHuang Ying  */
1706319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1707b8d3c4c3SMinchan Kim 		pmd_t *pmd, unsigned long addr, unsigned long next)
1708b8d3c4c3SMinchan Kim {
1709b8d3c4c3SMinchan Kim 	spinlock_t *ptl;
1710b8d3c4c3SMinchan Kim 	pmd_t orig_pmd;
1711b8d3c4c3SMinchan Kim 	struct page *page;
1712b8d3c4c3SMinchan Kim 	struct mm_struct *mm = tlb->mm;
1713319904adSHuang Ying 	bool ret = false;
1714b8d3c4c3SMinchan Kim 
1715ed6a7935SPeter Zijlstra 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
171607e32661SAneesh Kumar K.V 
1717b6ec57f4SKirill A. Shutemov 	ptl = pmd_trans_huge_lock(pmd, vma);
1718b6ec57f4SKirill A. Shutemov 	if (!ptl)
171925eedabeSLinus Torvalds 		goto out_unlocked;
1720b8d3c4c3SMinchan Kim 
1721b8d3c4c3SMinchan Kim 	orig_pmd = *pmd;
1722319904adSHuang Ying 	if (is_huge_zero_pmd(orig_pmd))
1723b8d3c4c3SMinchan Kim 		goto out;
1724b8d3c4c3SMinchan Kim 
172584c3fc4eSZi Yan 	if (unlikely(!pmd_present(orig_pmd))) {
172684c3fc4eSZi Yan 		VM_BUG_ON(thp_migration_supported() &&
172784c3fc4eSZi Yan 				  !is_pmd_migration_entry(orig_pmd));
172884c3fc4eSZi Yan 		goto out;
172984c3fc4eSZi Yan 	}
173084c3fc4eSZi Yan 
1731b8d3c4c3SMinchan Kim 	page = pmd_page(orig_pmd);
1732b8d3c4c3SMinchan Kim 	/*
1733b8d3c4c3SMinchan Kim 	 * If other processes are mapping this page, we couldn't discard
1734b8d3c4c3SMinchan Kim 	 * the page unless they all do MADV_FREE so let's skip the page.
1735b8d3c4c3SMinchan Kim 	 */
1736b8d3c4c3SMinchan Kim 	if (page_mapcount(page) != 1)
1737b8d3c4c3SMinchan Kim 		goto out;
1738b8d3c4c3SMinchan Kim 
1739b8d3c4c3SMinchan Kim 	if (!trylock_page(page))
1740b8d3c4c3SMinchan Kim 		goto out;
1741b8d3c4c3SMinchan Kim 
1742b8d3c4c3SMinchan Kim 	/*
1743b8d3c4c3SMinchan Kim 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1744b8d3c4c3SMinchan Kim 	 * will deactivate only them.
1745b8d3c4c3SMinchan Kim 	 */
1746b8d3c4c3SMinchan Kim 	if (next - addr != HPAGE_PMD_SIZE) {
1747b8d3c4c3SMinchan Kim 		get_page(page);
1748b8d3c4c3SMinchan Kim 		spin_unlock(ptl);
17499818b8cdSHuang Ying 		split_huge_page(page);
1750b8d3c4c3SMinchan Kim 		unlock_page(page);
1751bbf29ffcSKirill A. Shutemov 		put_page(page);
1752b8d3c4c3SMinchan Kim 		goto out_unlocked;
1753b8d3c4c3SMinchan Kim 	}
1754b8d3c4c3SMinchan Kim 
1755b8d3c4c3SMinchan Kim 	if (PageDirty(page))
1756b8d3c4c3SMinchan Kim 		ClearPageDirty(page);
1757b8d3c4c3SMinchan Kim 	unlock_page(page);
1758b8d3c4c3SMinchan Kim 
1759b8d3c4c3SMinchan Kim 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
176058ceeb6bSKirill A. Shutemov 		pmdp_invalidate(vma, addr, pmd);
1761b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkold(orig_pmd);
1762b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkclean(orig_pmd);
1763b8d3c4c3SMinchan Kim 
1764b8d3c4c3SMinchan Kim 		set_pmd_at(mm, addr, pmd, orig_pmd);
1765b8d3c4c3SMinchan Kim 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1766b8d3c4c3SMinchan Kim 	}
1767802a3a92SShaohua Li 
1768802a3a92SShaohua Li 	mark_page_lazyfree(page);
1769319904adSHuang Ying 	ret = true;
1770b8d3c4c3SMinchan Kim out:
1771b8d3c4c3SMinchan Kim 	spin_unlock(ptl);
1772b8d3c4c3SMinchan Kim out_unlocked:
1773b8d3c4c3SMinchan Kim 	return ret;
1774b8d3c4c3SMinchan Kim }
1775b8d3c4c3SMinchan Kim 
1776953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1777953c66c2SAneesh Kumar K.V {
1778953c66c2SAneesh Kumar K.V 	pgtable_t pgtable;
1779953c66c2SAneesh Kumar K.V 
1780953c66c2SAneesh Kumar K.V 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1781953c66c2SAneesh Kumar K.V 	pte_free(mm, pgtable);
1782c4812909SKirill A. Shutemov 	mm_dec_nr_ptes(mm);
1783953c66c2SAneesh Kumar K.V }
1784953c66c2SAneesh Kumar K.V 
178571e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1786f21760b1SShaohua Li 		 pmd_t *pmd, unsigned long addr)
178771e3aac0SAndrea Arcangeli {
1788f5c8ad47SDavid Miller 	pmd_t orig_pmd;
1789da146769SKirill A. Shutemov 	spinlock_t *ptl;
1790da146769SKirill A. Shutemov 
1791ed6a7935SPeter Zijlstra 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
179207e32661SAneesh Kumar K.V 
1793b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
1794b6ec57f4SKirill A. Shutemov 	if (!ptl)
1795da146769SKirill A. Shutemov 		return 0;
1796a6bf2bb0SAneesh Kumar K.V 	/*
1797a6bf2bb0SAneesh Kumar K.V 	 * For architectures like ppc64 we look at deposited pgtable
17988809aa2dSAneesh Kumar K.V 	 * when calling pmdp_huge_get_and_clear. So do the
1799a6bf2bb0SAneesh Kumar K.V 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1800a6bf2bb0SAneesh Kumar K.V 	 * operations.
1801a6bf2bb0SAneesh Kumar K.V 	 */
18028809aa2dSAneesh Kumar K.V 	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1803fcbe08d6SMartin Schwidefsky 			tlb->fullmm);
1804f21760b1SShaohua Li 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
18054897c765SMatthew Wilcox 	if (vma_is_dax(vma)) {
18063b6521f5SOliver O'Halloran 		if (arch_needs_pgtable_deposit())
18073b6521f5SOliver O'Halloran 			zap_deposited_table(tlb->mm, pmd);
18084897c765SMatthew Wilcox 		spin_unlock(ptl);
1809da146769SKirill A. Shutemov 		if (is_huge_zero_pmd(orig_pmd))
1810c0f2e176SAneesh Kumar K.V 			tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1811da146769SKirill A. Shutemov 	} else if (is_huge_zero_pmd(orig_pmd)) {
1812c14a6eb4SOliver O'Halloran 		zap_deposited_table(tlb->mm, pmd);
1813bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1814c0f2e176SAneesh Kumar K.V 		tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1815479f0abbSKirill A. Shutemov 	} else {
1816616b8371SZi Yan 		struct page *page = NULL;
1817616b8371SZi Yan 		int flush_needed = 1;
1818616b8371SZi Yan 
1819616b8371SZi Yan 		if (pmd_present(orig_pmd)) {
1820616b8371SZi Yan 			page = pmd_page(orig_pmd);
1821d281ee61SKirill A. Shutemov 			page_remove_rmap(page, true);
1822309381feSSasha Levin 			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1823309381feSSasha Levin 			VM_BUG_ON_PAGE(!PageHead(page), page);
1824616b8371SZi Yan 		} else if (thp_migration_supported()) {
1825616b8371SZi Yan 			swp_entry_t entry;
1826616b8371SZi Yan 
1827616b8371SZi Yan 			VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1828616b8371SZi Yan 			entry = pmd_to_swp_entry(orig_pmd);
1829616b8371SZi Yan 			page = pfn_to_page(swp_offset(entry));
1830616b8371SZi Yan 			flush_needed = 0;
1831616b8371SZi Yan 		} else
1832616b8371SZi Yan 			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1833616b8371SZi Yan 
1834b5072380SKirill A. Shutemov 		if (PageAnon(page)) {
1835c14a6eb4SOliver O'Halloran 			zap_deposited_table(tlb->mm, pmd);
1836b5072380SKirill A. Shutemov 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1837b5072380SKirill A. Shutemov 		} else {
1838953c66c2SAneesh Kumar K.V 			if (arch_needs_pgtable_deposit())
1839953c66c2SAneesh Kumar K.V 				zap_deposited_table(tlb->mm, pmd);
1840fadae295SYang Shi 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1841b5072380SKirill A. Shutemov 		}
1842616b8371SZi Yan 
1843bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1844616b8371SZi Yan 		if (flush_needed)
1845e77b0852SAneesh Kumar K.V 			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1846479f0abbSKirill A. Shutemov 	}
1847da146769SKirill A. Shutemov 	return 1;
184871e3aac0SAndrea Arcangeli }
184971e3aac0SAndrea Arcangeli 
18501dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw
18511dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
18521dd38b6cSAneesh Kumar K.V 					 spinlock_t *old_pmd_ptl,
18531dd38b6cSAneesh Kumar K.V 					 struct vm_area_struct *vma)
18541dd38b6cSAneesh Kumar K.V {
18551dd38b6cSAneesh Kumar K.V 	/*
18561dd38b6cSAneesh Kumar K.V 	 * With split pmd lock we also need to move preallocated
18571dd38b6cSAneesh Kumar K.V 	 * PTE page table if new_pmd is on different PMD page table.
18581dd38b6cSAneesh Kumar K.V 	 *
18591dd38b6cSAneesh Kumar K.V 	 * We also don't deposit and withdraw tables for file pages.
18601dd38b6cSAneesh Kumar K.V 	 */
18611dd38b6cSAneesh Kumar K.V 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
18621dd38b6cSAneesh Kumar K.V }
18631dd38b6cSAneesh Kumar K.V #endif
18641dd38b6cSAneesh Kumar K.V 
1865ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1866ab6e3d09SNaoya Horiguchi {
1867ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY
1868ab6e3d09SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(pmd)))
1869ab6e3d09SNaoya Horiguchi 		pmd = pmd_swp_mksoft_dirty(pmd);
1870ab6e3d09SNaoya Horiguchi 	else if (pmd_present(pmd))
1871ab6e3d09SNaoya Horiguchi 		pmd = pmd_mksoft_dirty(pmd);
1872ab6e3d09SNaoya Horiguchi #endif
1873ab6e3d09SNaoya Horiguchi 	return pmd;
1874ab6e3d09SNaoya Horiguchi }
1875ab6e3d09SNaoya Horiguchi 
1876bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
187737a1c49aSAndrea Arcangeli 		  unsigned long new_addr, unsigned long old_end,
1878eb66ae03SLinus Torvalds 		  pmd_t *old_pmd, pmd_t *new_pmd)
187937a1c49aSAndrea Arcangeli {
1880bf929152SKirill A. Shutemov 	spinlock_t *old_ptl, *new_ptl;
188137a1c49aSAndrea Arcangeli 	pmd_t pmd;
188237a1c49aSAndrea Arcangeli 	struct mm_struct *mm = vma->vm_mm;
18835d190420SAaron Lu 	bool force_flush = false;
188437a1c49aSAndrea Arcangeli 
188537a1c49aSAndrea Arcangeli 	if ((old_addr & ~HPAGE_PMD_MASK) ||
188637a1c49aSAndrea Arcangeli 	    (new_addr & ~HPAGE_PMD_MASK) ||
1887bf8616d5SHugh Dickins 	    old_end - old_addr < HPAGE_PMD_SIZE)
18884b471e88SKirill A. Shutemov 		return false;
188937a1c49aSAndrea Arcangeli 
189037a1c49aSAndrea Arcangeli 	/*
189137a1c49aSAndrea Arcangeli 	 * The destination pmd shouldn't be established, free_pgtables()
189237a1c49aSAndrea Arcangeli 	 * should have release it.
189337a1c49aSAndrea Arcangeli 	 */
189437a1c49aSAndrea Arcangeli 	if (WARN_ON(!pmd_none(*new_pmd))) {
189537a1c49aSAndrea Arcangeli 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
18964b471e88SKirill A. Shutemov 		return false;
189737a1c49aSAndrea Arcangeli 	}
189837a1c49aSAndrea Arcangeli 
1899bf929152SKirill A. Shutemov 	/*
1900bf929152SKirill A. Shutemov 	 * We don't have to worry about the ordering of src and dst
1901bf929152SKirill A. Shutemov 	 * ptlocks because exclusive mmap_sem prevents deadlock.
1902bf929152SKirill A. Shutemov 	 */
1903b6ec57f4SKirill A. Shutemov 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1904b6ec57f4SKirill A. Shutemov 	if (old_ptl) {
1905bf929152SKirill A. Shutemov 		new_ptl = pmd_lockptr(mm, new_pmd);
1906bf929152SKirill A. Shutemov 		if (new_ptl != old_ptl)
1907bf929152SKirill A. Shutemov 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
19088809aa2dSAneesh Kumar K.V 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1909eb66ae03SLinus Torvalds 		if (pmd_present(pmd))
1910a2ce2666SAaron Lu 			force_flush = true;
191137a1c49aSAndrea Arcangeli 		VM_BUG_ON(!pmd_none(*new_pmd));
19123592806cSKirill A. Shutemov 
19131dd38b6cSAneesh Kumar K.V 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1914b3084f4dSAneesh Kumar K.V 			pgtable_t pgtable;
19153592806cSKirill A. Shutemov 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
19163592806cSKirill A. Shutemov 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
19173592806cSKirill A. Shutemov 		}
1918ab6e3d09SNaoya Horiguchi 		pmd = move_soft_dirty_pmd(pmd);
1919ab6e3d09SNaoya Horiguchi 		set_pmd_at(mm, new_addr, new_pmd, pmd);
19205d190420SAaron Lu 		if (force_flush)
19215d190420SAaron Lu 			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1922eb66ae03SLinus Torvalds 		if (new_ptl != old_ptl)
1923eb66ae03SLinus Torvalds 			spin_unlock(new_ptl);
1924bf929152SKirill A. Shutemov 		spin_unlock(old_ptl);
19254b471e88SKirill A. Shutemov 		return true;
192637a1c49aSAndrea Arcangeli 	}
19274b471e88SKirill A. Shutemov 	return false;
192837a1c49aSAndrea Arcangeli }
192937a1c49aSAndrea Arcangeli 
1930f123d74aSMel Gorman /*
1931f123d74aSMel Gorman  * Returns
1932f123d74aSMel Gorman  *  - 0 if PMD could not be locked
1933f123d74aSMel Gorman  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1934f123d74aSMel Gorman  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1935f123d74aSMel Gorman  */
1936cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1937e944fd67SMel Gorman 		unsigned long addr, pgprot_t newprot, int prot_numa)
1938cd7548abSJohannes Weiner {
1939cd7548abSJohannes Weiner 	struct mm_struct *mm = vma->vm_mm;
1940bf929152SKirill A. Shutemov 	spinlock_t *ptl;
19410a85e51dSKirill A. Shutemov 	pmd_t entry;
19420a85e51dSKirill A. Shutemov 	bool preserve_write;
19430a85e51dSKirill A. Shutemov 	int ret;
1944cd7548abSJohannes Weiner 
1945b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
19460a85e51dSKirill A. Shutemov 	if (!ptl)
19470a85e51dSKirill A. Shutemov 		return 0;
19480a85e51dSKirill A. Shutemov 
19490a85e51dSKirill A. Shutemov 	preserve_write = prot_numa && pmd_write(*pmd);
1950ba68bc01SMel Gorman 	ret = 1;
1951e944fd67SMel Gorman 
195284c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
195384c3fc4eSZi Yan 	if (is_swap_pmd(*pmd)) {
195484c3fc4eSZi Yan 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
195584c3fc4eSZi Yan 
195684c3fc4eSZi Yan 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
195784c3fc4eSZi Yan 		if (is_write_migration_entry(entry)) {
195884c3fc4eSZi Yan 			pmd_t newpmd;
195984c3fc4eSZi Yan 			/*
196084c3fc4eSZi Yan 			 * A protection check is difficult so
196184c3fc4eSZi Yan 			 * just be safe and disable write
196284c3fc4eSZi Yan 			 */
196384c3fc4eSZi Yan 			make_migration_entry_read(&entry);
196484c3fc4eSZi Yan 			newpmd = swp_entry_to_pmd(entry);
1965ab6e3d09SNaoya Horiguchi 			if (pmd_swp_soft_dirty(*pmd))
1966ab6e3d09SNaoya Horiguchi 				newpmd = pmd_swp_mksoft_dirty(newpmd);
196784c3fc4eSZi Yan 			set_pmd_at(mm, addr, pmd, newpmd);
196884c3fc4eSZi Yan 		}
196984c3fc4eSZi Yan 		goto unlock;
197084c3fc4eSZi Yan 	}
197184c3fc4eSZi Yan #endif
197284c3fc4eSZi Yan 
1973e944fd67SMel Gorman 	/*
1974e944fd67SMel Gorman 	 * Avoid trapping faults against the zero page. The read-only
1975e944fd67SMel Gorman 	 * data is likely to be read-cached on the local CPU and
1976e944fd67SMel Gorman 	 * local/remote hits to the zero page are not interesting.
1977e944fd67SMel Gorman 	 */
19780a85e51dSKirill A. Shutemov 	if (prot_numa && is_huge_zero_pmd(*pmd))
19790a85e51dSKirill A. Shutemov 		goto unlock;
1980e944fd67SMel Gorman 
19810a85e51dSKirill A. Shutemov 	if (prot_numa && pmd_protnone(*pmd))
19820a85e51dSKirill A. Shutemov 		goto unlock;
19830a85e51dSKirill A. Shutemov 
1984ced10803SKirill A. Shutemov 	/*
1985ced10803SKirill A. Shutemov 	 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1986ced10803SKirill A. Shutemov 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1987ced10803SKirill A. Shutemov 	 * which is also under down_read(mmap_sem):
1988ced10803SKirill A. Shutemov 	 *
1989ced10803SKirill A. Shutemov 	 *	CPU0:				CPU1:
1990ced10803SKirill A. Shutemov 	 *				change_huge_pmd(prot_numa=1)
1991ced10803SKirill A. Shutemov 	 *				 pmdp_huge_get_and_clear_notify()
1992ced10803SKirill A. Shutemov 	 * madvise_dontneed()
1993ced10803SKirill A. Shutemov 	 *  zap_pmd_range()
1994ced10803SKirill A. Shutemov 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
1995ced10803SKirill A. Shutemov 	 *   // skip the pmd
1996ced10803SKirill A. Shutemov 	 *				 set_pmd_at();
1997ced10803SKirill A. Shutemov 	 *				 // pmd is re-established
1998ced10803SKirill A. Shutemov 	 *
1999ced10803SKirill A. Shutemov 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2000ced10803SKirill A. Shutemov 	 * which may break userspace.
2001ced10803SKirill A. Shutemov 	 *
2002ced10803SKirill A. Shutemov 	 * pmdp_invalidate() is required to make sure we don't miss
2003ced10803SKirill A. Shutemov 	 * dirty/young flags set by hardware.
2004ced10803SKirill A. Shutemov 	 */
2005a3cf988fSKirill A. Shutemov 	entry = pmdp_invalidate(vma, addr, pmd);
2006ced10803SKirill A. Shutemov 
2007cd7548abSJohannes Weiner 	entry = pmd_modify(entry, newprot);
2008b191f9b1SMel Gorman 	if (preserve_write)
2009288bc549SAneesh Kumar K.V 		entry = pmd_mk_savedwrite(entry);
2010f123d74aSMel Gorman 	ret = HPAGE_PMD_NR;
201156eecdb9SAneesh Kumar K.V 	set_pmd_at(mm, addr, pmd, entry);
20120a85e51dSKirill A. Shutemov 	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
20130a85e51dSKirill A. Shutemov unlock:
2014bf929152SKirill A. Shutemov 	spin_unlock(ptl);
2015cd7548abSJohannes Weiner 	return ret;
2016cd7548abSJohannes Weiner }
2017cd7548abSJohannes Weiner 
2018025c5b24SNaoya Horiguchi /*
20198f19b0c0SHuang Ying  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2020025c5b24SNaoya Horiguchi  *
20218f19b0c0SHuang Ying  * Note that if it returns page table lock pointer, this routine returns without
20228f19b0c0SHuang Ying  * unlocking page table lock. So callers must unlock it.
2023025c5b24SNaoya Horiguchi  */
2024b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2025025c5b24SNaoya Horiguchi {
2026b6ec57f4SKirill A. Shutemov 	spinlock_t *ptl;
2027b6ec57f4SKirill A. Shutemov 	ptl = pmd_lock(vma->vm_mm, pmd);
202884c3fc4eSZi Yan 	if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
202984c3fc4eSZi Yan 			pmd_devmap(*pmd)))
2030b6ec57f4SKirill A. Shutemov 		return ptl;
2031b6ec57f4SKirill A. Shutemov 	spin_unlock(ptl);
2032b6ec57f4SKirill A. Shutemov 	return NULL;
2033025c5b24SNaoya Horiguchi }
2034025c5b24SNaoya Horiguchi 
2035a00cc7d9SMatthew Wilcox /*
2036a00cc7d9SMatthew Wilcox  * Returns true if a given pud maps a thp, false otherwise.
2037a00cc7d9SMatthew Wilcox  *
2038a00cc7d9SMatthew Wilcox  * Note that if it returns true, this routine returns without unlocking page
2039a00cc7d9SMatthew Wilcox  * table lock. So callers must unlock it.
2040a00cc7d9SMatthew Wilcox  */
2041a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2042a00cc7d9SMatthew Wilcox {
2043a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
2044a00cc7d9SMatthew Wilcox 
2045a00cc7d9SMatthew Wilcox 	ptl = pud_lock(vma->vm_mm, pud);
2046a00cc7d9SMatthew Wilcox 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2047a00cc7d9SMatthew Wilcox 		return ptl;
2048a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
2049a00cc7d9SMatthew Wilcox 	return NULL;
2050a00cc7d9SMatthew Wilcox }
2051a00cc7d9SMatthew Wilcox 
2052a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2053a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2054a00cc7d9SMatthew Wilcox 		 pud_t *pud, unsigned long addr)
2055a00cc7d9SMatthew Wilcox {
2056a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
2057a00cc7d9SMatthew Wilcox 
2058a00cc7d9SMatthew Wilcox 	ptl = __pud_trans_huge_lock(pud, vma);
2059a00cc7d9SMatthew Wilcox 	if (!ptl)
2060a00cc7d9SMatthew Wilcox 		return 0;
2061a00cc7d9SMatthew Wilcox 	/*
2062a00cc7d9SMatthew Wilcox 	 * For architectures like ppc64 we look at deposited pgtable
2063a00cc7d9SMatthew Wilcox 	 * when calling pudp_huge_get_and_clear. So do the
2064a00cc7d9SMatthew Wilcox 	 * pgtable_trans_huge_withdraw after finishing pudp related
2065a00cc7d9SMatthew Wilcox 	 * operations.
2066a00cc7d9SMatthew Wilcox 	 */
206770516b93SQian Cai 	pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
2068a00cc7d9SMatthew Wilcox 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
2069a00cc7d9SMatthew Wilcox 	if (vma_is_dax(vma)) {
2070a00cc7d9SMatthew Wilcox 		spin_unlock(ptl);
2071a00cc7d9SMatthew Wilcox 		/* No zero page support yet */
2072a00cc7d9SMatthew Wilcox 	} else {
2073a00cc7d9SMatthew Wilcox 		/* No support for anonymous PUD pages yet */
2074a00cc7d9SMatthew Wilcox 		BUG();
2075a00cc7d9SMatthew Wilcox 	}
2076a00cc7d9SMatthew Wilcox 	return 1;
2077a00cc7d9SMatthew Wilcox }
2078a00cc7d9SMatthew Wilcox 
2079a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2080a00cc7d9SMatthew Wilcox 		unsigned long haddr)
2081a00cc7d9SMatthew Wilcox {
2082a00cc7d9SMatthew Wilcox 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2083a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2084a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2085a00cc7d9SMatthew Wilcox 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2086a00cc7d9SMatthew Wilcox 
2087ce9311cfSYisheng Xie 	count_vm_event(THP_SPLIT_PUD);
2088a00cc7d9SMatthew Wilcox 
2089a00cc7d9SMatthew Wilcox 	pudp_huge_clear_flush_notify(vma, haddr, pud);
2090a00cc7d9SMatthew Wilcox }
2091a00cc7d9SMatthew Wilcox 
2092a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2093a00cc7d9SMatthew Wilcox 		unsigned long address)
2094a00cc7d9SMatthew Wilcox {
2095a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
2096ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
2097a00cc7d9SMatthew Wilcox 
20987269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
20996f4f13e8SJérôme Glisse 				address & HPAGE_PUD_MASK,
2100ac46d4f3SJérôme Glisse 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2101ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
2102ac46d4f3SJérôme Glisse 	ptl = pud_lock(vma->vm_mm, pud);
2103a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2104a00cc7d9SMatthew Wilcox 		goto out;
2105ac46d4f3SJérôme Glisse 	__split_huge_pud_locked(vma, pud, range.start);
2106a00cc7d9SMatthew Wilcox 
2107a00cc7d9SMatthew Wilcox out:
2108a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
21094645b9feSJérôme Glisse 	/*
21104645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback as
21114645b9feSJérôme Glisse 	 * the above pudp_huge_clear_flush_notify() did already call it.
21124645b9feSJérôme Glisse 	 */
2113ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
2114a00cc7d9SMatthew Wilcox }
2115a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2116a00cc7d9SMatthew Wilcox 
2117eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2118eef1b3baSKirill A. Shutemov 		unsigned long haddr, pmd_t *pmd)
2119eef1b3baSKirill A. Shutemov {
2120eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
2121eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
2122eef1b3baSKirill A. Shutemov 	pmd_t _pmd;
2123eef1b3baSKirill A. Shutemov 	int i;
2124eef1b3baSKirill A. Shutemov 
21250f10851eSJérôme Glisse 	/*
21260f10851eSJérôme Glisse 	 * Leave pmd empty until pte is filled note that it is fine to delay
21270f10851eSJérôme Glisse 	 * notification until mmu_notifier_invalidate_range_end() as we are
21280f10851eSJérôme Glisse 	 * replacing a zero pmd write protected page with a zero pte write
21290f10851eSJérôme Glisse 	 * protected page.
21300f10851eSJérôme Glisse 	 *
2131ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
21320f10851eSJérôme Glisse 	 */
21330f10851eSJérôme Glisse 	pmdp_huge_clear_flush(vma, haddr, pmd);
2134eef1b3baSKirill A. Shutemov 
2135eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2136eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
2137eef1b3baSKirill A. Shutemov 
2138eef1b3baSKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2139eef1b3baSKirill A. Shutemov 		pte_t *pte, entry;
2140eef1b3baSKirill A. Shutemov 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2141eef1b3baSKirill A. Shutemov 		entry = pte_mkspecial(entry);
2142eef1b3baSKirill A. Shutemov 		pte = pte_offset_map(&_pmd, haddr);
2143eef1b3baSKirill A. Shutemov 		VM_BUG_ON(!pte_none(*pte));
2144eef1b3baSKirill A. Shutemov 		set_pte_at(mm, haddr, pte, entry);
2145eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
2146eef1b3baSKirill A. Shutemov 	}
2147eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
2148eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
2149eef1b3baSKirill A. Shutemov }
2150eef1b3baSKirill A. Shutemov 
2151eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2152ba988280SKirill A. Shutemov 		unsigned long haddr, bool freeze)
2153eef1b3baSKirill A. Shutemov {
2154eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
2155eef1b3baSKirill A. Shutemov 	struct page *page;
2156eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
2157423ac9afSAneesh Kumar K.V 	pmd_t old_pmd, _pmd;
2158a3cf988fSKirill A. Shutemov 	bool young, write, soft_dirty, pmd_migration = false;
21592ac015e2SKirill A. Shutemov 	unsigned long addr;
2160eef1b3baSKirill A. Shutemov 	int i;
2161eef1b3baSKirill A. Shutemov 
2162eef1b3baSKirill A. Shutemov 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2163eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2164eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
216584c3fc4eSZi Yan 	VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
216684c3fc4eSZi Yan 				&& !pmd_devmap(*pmd));
2167eef1b3baSKirill A. Shutemov 
2168eef1b3baSKirill A. Shutemov 	count_vm_event(THP_SPLIT_PMD);
2169eef1b3baSKirill A. Shutemov 
2170d21b9e57SKirill A. Shutemov 	if (!vma_is_anonymous(vma)) {
2171d21b9e57SKirill A. Shutemov 		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2172953c66c2SAneesh Kumar K.V 		/*
2173953c66c2SAneesh Kumar K.V 		 * We are going to unmap this huge page. So
2174953c66c2SAneesh Kumar K.V 		 * just go ahead and zap it
2175953c66c2SAneesh Kumar K.V 		 */
2176953c66c2SAneesh Kumar K.V 		if (arch_needs_pgtable_deposit())
2177953c66c2SAneesh Kumar K.V 			zap_deposited_table(mm, pmd);
2178d21b9e57SKirill A. Shutemov 		if (vma_is_dax(vma))
2179d21b9e57SKirill A. Shutemov 			return;
2180d21b9e57SKirill A. Shutemov 		page = pmd_page(_pmd);
2181e1f1b157SHugh Dickins 		if (!PageDirty(page) && pmd_dirty(_pmd))
2182e1f1b157SHugh Dickins 			set_page_dirty(page);
2183d21b9e57SKirill A. Shutemov 		if (!PageReferenced(page) && pmd_young(_pmd))
2184d21b9e57SKirill A. Shutemov 			SetPageReferenced(page);
2185d21b9e57SKirill A. Shutemov 		page_remove_rmap(page, true);
2186d21b9e57SKirill A. Shutemov 		put_page(page);
2187fadae295SYang Shi 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2188eef1b3baSKirill A. Shutemov 		return;
2189eef1b3baSKirill A. Shutemov 	} else if (is_huge_zero_pmd(*pmd)) {
21904645b9feSJérôme Glisse 		/*
21914645b9feSJérôme Glisse 		 * FIXME: Do we want to invalidate secondary mmu by calling
21924645b9feSJérôme Glisse 		 * mmu_notifier_invalidate_range() see comments below inside
21934645b9feSJérôme Glisse 		 * __split_huge_pmd() ?
21944645b9feSJérôme Glisse 		 *
21954645b9feSJérôme Glisse 		 * We are going from a zero huge page write protected to zero
21964645b9feSJérôme Glisse 		 * small page also write protected so it does not seems useful
21974645b9feSJérôme Glisse 		 * to invalidate secondary mmu at this time.
21984645b9feSJérôme Glisse 		 */
2199eef1b3baSKirill A. Shutemov 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
2200eef1b3baSKirill A. Shutemov 	}
2201eef1b3baSKirill A. Shutemov 
2202423ac9afSAneesh Kumar K.V 	/*
2203423ac9afSAneesh Kumar K.V 	 * Up to this point the pmd is present and huge and userland has the
2204423ac9afSAneesh Kumar K.V 	 * whole access to the hugepage during the split (which happens in
2205423ac9afSAneesh Kumar K.V 	 * place). If we overwrite the pmd with the not-huge version pointing
2206423ac9afSAneesh Kumar K.V 	 * to the pte here (which of course we could if all CPUs were bug
2207423ac9afSAneesh Kumar K.V 	 * free), userland could trigger a small page size TLB miss on the
2208423ac9afSAneesh Kumar K.V 	 * small sized TLB while the hugepage TLB entry is still established in
2209423ac9afSAneesh Kumar K.V 	 * the huge TLB. Some CPU doesn't like that.
2210423ac9afSAneesh Kumar K.V 	 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2211423ac9afSAneesh Kumar K.V 	 * 383 on page 93. Intel should be safe but is also warns that it's
2212423ac9afSAneesh Kumar K.V 	 * only safe if the permission and cache attributes of the two entries
2213423ac9afSAneesh Kumar K.V 	 * loaded in the two TLB is identical (which should be the case here).
2214423ac9afSAneesh Kumar K.V 	 * But it is generally safer to never allow small and huge TLB entries
2215423ac9afSAneesh Kumar K.V 	 * for the same virtual address to be loaded simultaneously. So instead
2216423ac9afSAneesh Kumar K.V 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2217423ac9afSAneesh Kumar K.V 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2218423ac9afSAneesh Kumar K.V 	 * must remain set at all times on the pmd until the split is complete
2219423ac9afSAneesh Kumar K.V 	 * for this pmd), then we flush the SMP TLB and finally we write the
2220423ac9afSAneesh Kumar K.V 	 * non-huge version of the pmd entry with pmd_populate.
2221423ac9afSAneesh Kumar K.V 	 */
2222423ac9afSAneesh Kumar K.V 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
2223423ac9afSAneesh Kumar K.V 
2224423ac9afSAneesh Kumar K.V 	pmd_migration = is_pmd_migration_entry(old_pmd);
22252e83ee1dSPeter Xu 	if (unlikely(pmd_migration)) {
222684c3fc4eSZi Yan 		swp_entry_t entry;
222784c3fc4eSZi Yan 
2228423ac9afSAneesh Kumar K.V 		entry = pmd_to_swp_entry(old_pmd);
222984c3fc4eSZi Yan 		page = pfn_to_page(swp_offset(entry));
22302e83ee1dSPeter Xu 		write = is_write_migration_entry(entry);
22312e83ee1dSPeter Xu 		young = false;
22322e83ee1dSPeter Xu 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
22332e83ee1dSPeter Xu 	} else {
2234423ac9afSAneesh Kumar K.V 		page = pmd_page(old_pmd);
2235423ac9afSAneesh Kumar K.V 		if (pmd_dirty(old_pmd))
2236423ac9afSAneesh Kumar K.V 			SetPageDirty(page);
2237423ac9afSAneesh Kumar K.V 		write = pmd_write(old_pmd);
2238423ac9afSAneesh Kumar K.V 		young = pmd_young(old_pmd);
2239423ac9afSAneesh Kumar K.V 		soft_dirty = pmd_soft_dirty(old_pmd);
22402e83ee1dSPeter Xu 	}
22412e83ee1dSPeter Xu 	VM_BUG_ON_PAGE(!page_count(page), page);
22422e83ee1dSPeter Xu 	page_ref_add(page, HPAGE_PMD_NR - 1);
2243eef1b3baSKirill A. Shutemov 
2244423ac9afSAneesh Kumar K.V 	/*
2245423ac9afSAneesh Kumar K.V 	 * Withdraw the table only after we mark the pmd entry invalid.
2246423ac9afSAneesh Kumar K.V 	 * This's critical for some architectures (Power).
2247423ac9afSAneesh Kumar K.V 	 */
2248eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2249eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
2250eef1b3baSKirill A. Shutemov 
22512ac015e2SKirill A. Shutemov 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2252eef1b3baSKirill A. Shutemov 		pte_t entry, *pte;
2253eef1b3baSKirill A. Shutemov 		/*
2254eef1b3baSKirill A. Shutemov 		 * Note that NUMA hinting access restrictions are not
2255eef1b3baSKirill A. Shutemov 		 * transferred to avoid any possibility of altering
2256eef1b3baSKirill A. Shutemov 		 * permissions across VMAs.
2257eef1b3baSKirill A. Shutemov 		 */
225884c3fc4eSZi Yan 		if (freeze || pmd_migration) {
2259ba988280SKirill A. Shutemov 			swp_entry_t swp_entry;
2260ba988280SKirill A. Shutemov 			swp_entry = make_migration_entry(page + i, write);
2261ba988280SKirill A. Shutemov 			entry = swp_entry_to_pte(swp_entry);
2262804dd150SAndrea Arcangeli 			if (soft_dirty)
2263804dd150SAndrea Arcangeli 				entry = pte_swp_mksoft_dirty(entry);
2264ba988280SKirill A. Shutemov 		} else {
22656d2329f8SAndrea Arcangeli 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2266b8d3c4c3SMinchan Kim 			entry = maybe_mkwrite(entry, vma);
2267eef1b3baSKirill A. Shutemov 			if (!write)
2268eef1b3baSKirill A. Shutemov 				entry = pte_wrprotect(entry);
2269eef1b3baSKirill A. Shutemov 			if (!young)
2270eef1b3baSKirill A. Shutemov 				entry = pte_mkold(entry);
2271804dd150SAndrea Arcangeli 			if (soft_dirty)
2272804dd150SAndrea Arcangeli 				entry = pte_mksoft_dirty(entry);
2273ba988280SKirill A. Shutemov 		}
22742ac015e2SKirill A. Shutemov 		pte = pte_offset_map(&_pmd, addr);
2275eef1b3baSKirill A. Shutemov 		BUG_ON(!pte_none(*pte));
22762ac015e2SKirill A. Shutemov 		set_pte_at(mm, addr, pte, entry);
2277eef1b3baSKirill A. Shutemov 		atomic_inc(&page[i]._mapcount);
2278eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
2279eef1b3baSKirill A. Shutemov 	}
2280eef1b3baSKirill A. Shutemov 
2281eef1b3baSKirill A. Shutemov 	/*
2282eef1b3baSKirill A. Shutemov 	 * Set PG_double_map before dropping compound_mapcount to avoid
2283eef1b3baSKirill A. Shutemov 	 * false-negative page_mapped().
2284eef1b3baSKirill A. Shutemov 	 */
2285eef1b3baSKirill A. Shutemov 	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2286eef1b3baSKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
2287eef1b3baSKirill A. Shutemov 			atomic_inc(&page[i]._mapcount);
2288eef1b3baSKirill A. Shutemov 	}
2289eef1b3baSKirill A. Shutemov 
2290eef1b3baSKirill A. Shutemov 	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2291eef1b3baSKirill A. Shutemov 		/* Last compound_mapcount is gone. */
229211fb9989SMel Gorman 		__dec_node_page_state(page, NR_ANON_THPS);
2293eef1b3baSKirill A. Shutemov 		if (TestClearPageDoubleMap(page)) {
2294eef1b3baSKirill A. Shutemov 			/* No need in mapcount reference anymore */
2295eef1b3baSKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++)
2296eef1b3baSKirill A. Shutemov 				atomic_dec(&page[i]._mapcount);
2297eef1b3baSKirill A. Shutemov 		}
2298eef1b3baSKirill A. Shutemov 	}
2299eef1b3baSKirill A. Shutemov 
2300eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
2301eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
2302e9b61f19SKirill A. Shutemov 
2303e9b61f19SKirill A. Shutemov 	if (freeze) {
23042ac015e2SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++) {
2305e9b61f19SKirill A. Shutemov 			page_remove_rmap(page + i, false);
2306e9b61f19SKirill A. Shutemov 			put_page(page + i);
2307e9b61f19SKirill A. Shutemov 		}
2308e9b61f19SKirill A. Shutemov 	}
2309eef1b3baSKirill A. Shutemov }
2310eef1b3baSKirill A. Shutemov 
2311eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
231233f4751eSNaoya Horiguchi 		unsigned long address, bool freeze, struct page *page)
2313eef1b3baSKirill A. Shutemov {
2314eef1b3baSKirill A. Shutemov 	spinlock_t *ptl;
2315ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
2316eef1b3baSKirill A. Shutemov 
23177269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
23186f4f13e8SJérôme Glisse 				address & HPAGE_PMD_MASK,
2319ac46d4f3SJérôme Glisse 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2320ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
2321ac46d4f3SJérôme Glisse 	ptl = pmd_lock(vma->vm_mm, pmd);
232233f4751eSNaoya Horiguchi 
232333f4751eSNaoya Horiguchi 	/*
232433f4751eSNaoya Horiguchi 	 * If caller asks to setup a migration entries, we need a page to check
232533f4751eSNaoya Horiguchi 	 * pmd against. Otherwise we can end up replacing wrong page.
232633f4751eSNaoya Horiguchi 	 */
232733f4751eSNaoya Horiguchi 	VM_BUG_ON(freeze && !page);
232833f4751eSNaoya Horiguchi 	if (page && page != pmd_page(*pmd))
232933f4751eSNaoya Horiguchi 	        goto out;
233033f4751eSNaoya Horiguchi 
23315c7fb56eSDan Williams 	if (pmd_trans_huge(*pmd)) {
233233f4751eSNaoya Horiguchi 		page = pmd_page(*pmd);
2333e90309c9SKirill A. Shutemov 		if (PageMlocked(page))
23345f737714SKirill A. Shutemov 			clear_page_mlock(page);
233584c3fc4eSZi Yan 	} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
23365c7fb56eSDan Williams 		goto out;
2337ac46d4f3SJérôme Glisse 	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2338e90309c9SKirill A. Shutemov out:
2339eef1b3baSKirill A. Shutemov 	spin_unlock(ptl);
23404645b9feSJérôme Glisse 	/*
23414645b9feSJérôme Glisse 	 * No need to double call mmu_notifier->invalidate_range() callback.
23424645b9feSJérôme Glisse 	 * They are 3 cases to consider inside __split_huge_pmd_locked():
23434645b9feSJérôme Glisse 	 *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
23444645b9feSJérôme Glisse 	 *  2) __split_huge_zero_page_pmd() read only zero page and any write
23454645b9feSJérôme Glisse 	 *    fault will trigger a flush_notify before pointing to a new page
23464645b9feSJérôme Glisse 	 *    (it is fine if the secondary mmu keeps pointing to the old zero
23474645b9feSJérôme Glisse 	 *    page in the meantime)
23484645b9feSJérôme Glisse 	 *  3) Split a huge pmd into pte pointing to the same page. No need
23494645b9feSJérôme Glisse 	 *     to invalidate secondary tlb entry they are all still valid.
23504645b9feSJérôme Glisse 	 *     any further changes to individual pte will notify. So no need
23514645b9feSJérôme Glisse 	 *     to call mmu_notifier->invalidate_range()
23524645b9feSJérôme Glisse 	 */
2353ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_only_end(&range);
2354eef1b3baSKirill A. Shutemov }
2355eef1b3baSKirill A. Shutemov 
2356fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2357fec89c10SKirill A. Shutemov 		bool freeze, struct page *page)
235894fcc585SAndrea Arcangeli {
2359f72e7dcdSHugh Dickins 	pgd_t *pgd;
2360c2febafcSKirill A. Shutemov 	p4d_t *p4d;
2361f72e7dcdSHugh Dickins 	pud_t *pud;
236294fcc585SAndrea Arcangeli 	pmd_t *pmd;
236394fcc585SAndrea Arcangeli 
236478ddc534SKirill A. Shutemov 	pgd = pgd_offset(vma->vm_mm, address);
2365f72e7dcdSHugh Dickins 	if (!pgd_present(*pgd))
2366f72e7dcdSHugh Dickins 		return;
2367f72e7dcdSHugh Dickins 
2368c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
2369c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
2370c2febafcSKirill A. Shutemov 		return;
2371c2febafcSKirill A. Shutemov 
2372c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
2373f72e7dcdSHugh Dickins 	if (!pud_present(*pud))
2374f72e7dcdSHugh Dickins 		return;
2375f72e7dcdSHugh Dickins 
2376f72e7dcdSHugh Dickins 	pmd = pmd_offset(pud, address);
2377fec89c10SKirill A. Shutemov 
237833f4751eSNaoya Horiguchi 	__split_huge_pmd(vma, pmd, address, freeze, page);
237994fcc585SAndrea Arcangeli }
238094fcc585SAndrea Arcangeli 
2381e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma,
238294fcc585SAndrea Arcangeli 			     unsigned long start,
238394fcc585SAndrea Arcangeli 			     unsigned long end,
238494fcc585SAndrea Arcangeli 			     long adjust_next)
238594fcc585SAndrea Arcangeli {
238694fcc585SAndrea Arcangeli 	/*
238794fcc585SAndrea Arcangeli 	 * If the new start address isn't hpage aligned and it could
238894fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
238994fcc585SAndrea Arcangeli 	 * an huge pmd.
239094fcc585SAndrea Arcangeli 	 */
239194fcc585SAndrea Arcangeli 	if (start & ~HPAGE_PMD_MASK &&
239294fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
239394fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2394fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, start, false, NULL);
239594fcc585SAndrea Arcangeli 
239694fcc585SAndrea Arcangeli 	/*
239794fcc585SAndrea Arcangeli 	 * If the new end address isn't hpage aligned and it could
239894fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
239994fcc585SAndrea Arcangeli 	 * an huge pmd.
240094fcc585SAndrea Arcangeli 	 */
240194fcc585SAndrea Arcangeli 	if (end & ~HPAGE_PMD_MASK &&
240294fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
240394fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2404fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, end, false, NULL);
240594fcc585SAndrea Arcangeli 
240694fcc585SAndrea Arcangeli 	/*
240794fcc585SAndrea Arcangeli 	 * If we're also updating the vma->vm_next->vm_start, if the new
240894fcc585SAndrea Arcangeli 	 * vm_next->vm_start isn't page aligned and it could previously
240994fcc585SAndrea Arcangeli 	 * contain an hugepage: check if we need to split an huge pmd.
241094fcc585SAndrea Arcangeli 	 */
241194fcc585SAndrea Arcangeli 	if (adjust_next > 0) {
241294fcc585SAndrea Arcangeli 		struct vm_area_struct *next = vma->vm_next;
241394fcc585SAndrea Arcangeli 		unsigned long nstart = next->vm_start;
241494fcc585SAndrea Arcangeli 		nstart += adjust_next << PAGE_SHIFT;
241594fcc585SAndrea Arcangeli 		if (nstart & ~HPAGE_PMD_MASK &&
241694fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
241794fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2418fec89c10SKirill A. Shutemov 			split_huge_pmd_address(next, nstart, false, NULL);
241994fcc585SAndrea Arcangeli 	}
242094fcc585SAndrea Arcangeli }
2421e9b61f19SKirill A. Shutemov 
2422906f9cdfSHugh Dickins static void unmap_page(struct page *page)
2423e9b61f19SKirill A. Shutemov {
2424baa355fdSKirill A. Shutemov 	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2425c7ab0d2fSKirill A. Shutemov 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
2426666e5a40SMinchan Kim 	bool unmap_success;
2427e9b61f19SKirill A. Shutemov 
2428e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageHead(page), page);
2429e9b61f19SKirill A. Shutemov 
2430baa355fdSKirill A. Shutemov 	if (PageAnon(page))
2431b5ff8161SNaoya Horiguchi 		ttu_flags |= TTU_SPLIT_FREEZE;
2432baa355fdSKirill A. Shutemov 
2433666e5a40SMinchan Kim 	unmap_success = try_to_unmap(page, ttu_flags);
2434666e5a40SMinchan Kim 	VM_BUG_ON_PAGE(!unmap_success, page);
2435bd56086fSKirill A. Shutemov }
2436bd56086fSKirill A. Shutemov 
2437906f9cdfSHugh Dickins static void remap_page(struct page *page)
2438e9b61f19SKirill A. Shutemov {
2439fec89c10SKirill A. Shutemov 	int i;
2440ace71a19SKirill A. Shutemov 	if (PageTransHuge(page)) {
2441ace71a19SKirill A. Shutemov 		remove_migration_ptes(page, page, true);
2442ace71a19SKirill A. Shutemov 	} else {
2443fec89c10SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
2444fec89c10SKirill A. Shutemov 			remove_migration_ptes(page + i, page + i, true);
2445e9b61f19SKirill A. Shutemov 	}
2446ace71a19SKirill A. Shutemov }
2447e9b61f19SKirill A. Shutemov 
24488df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail,
2449e9b61f19SKirill A. Shutemov 		struct lruvec *lruvec, struct list_head *list)
2450e9b61f19SKirill A. Shutemov {
2451e9b61f19SKirill A. Shutemov 	struct page *page_tail = head + tail;
2452e9b61f19SKirill A. Shutemov 
24538df651c7SKirill A. Shutemov 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2454e9b61f19SKirill A. Shutemov 
2455e9b61f19SKirill A. Shutemov 	/*
2456605ca5edSKonstantin Khlebnikov 	 * Clone page flags before unfreezing refcount.
2457605ca5edSKonstantin Khlebnikov 	 *
2458605ca5edSKonstantin Khlebnikov 	 * After successful get_page_unless_zero() might follow flags change,
2459605ca5edSKonstantin Khlebnikov 	 * for exmaple lock_page() which set PG_waiters.
2460e9b61f19SKirill A. Shutemov 	 */
2461e9b61f19SKirill A. Shutemov 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2462e9b61f19SKirill A. Shutemov 	page_tail->flags |= (head->flags &
2463e9b61f19SKirill A. Shutemov 			((1L << PG_referenced) |
2464e9b61f19SKirill A. Shutemov 			 (1L << PG_swapbacked) |
246538d8b4e6SHuang Ying 			 (1L << PG_swapcache) |
2466e9b61f19SKirill A. Shutemov 			 (1L << PG_mlocked) |
2467e9b61f19SKirill A. Shutemov 			 (1L << PG_uptodate) |
2468e9b61f19SKirill A. Shutemov 			 (1L << PG_active) |
24691899ad18SJohannes Weiner 			 (1L << PG_workingset) |
2470e9b61f19SKirill A. Shutemov 			 (1L << PG_locked) |
2471b8d3c4c3SMinchan Kim 			 (1L << PG_unevictable) |
2472b8d3c4c3SMinchan Kim 			 (1L << PG_dirty)));
2473e9b61f19SKirill A. Shutemov 
2474173d9d9fSHugh Dickins 	/* ->mapping in first tail page is compound_mapcount */
2475173d9d9fSHugh Dickins 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2476173d9d9fSHugh Dickins 			page_tail);
2477173d9d9fSHugh Dickins 	page_tail->mapping = head->mapping;
2478173d9d9fSHugh Dickins 	page_tail->index = head->index + tail;
2479173d9d9fSHugh Dickins 
2480605ca5edSKonstantin Khlebnikov 	/* Page flags must be visible before we make the page non-compound. */
2481e9b61f19SKirill A. Shutemov 	smp_wmb();
2482e9b61f19SKirill A. Shutemov 
2483605ca5edSKonstantin Khlebnikov 	/*
2484605ca5edSKonstantin Khlebnikov 	 * Clear PageTail before unfreezing page refcount.
2485605ca5edSKonstantin Khlebnikov 	 *
2486605ca5edSKonstantin Khlebnikov 	 * After successful get_page_unless_zero() might follow put_page()
2487605ca5edSKonstantin Khlebnikov 	 * which needs correct compound_head().
2488605ca5edSKonstantin Khlebnikov 	 */
2489e9b61f19SKirill A. Shutemov 	clear_compound_head(page_tail);
2490e9b61f19SKirill A. Shutemov 
2491605ca5edSKonstantin Khlebnikov 	/* Finally unfreeze refcount. Additional reference from page cache. */
2492605ca5edSKonstantin Khlebnikov 	page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2493605ca5edSKonstantin Khlebnikov 					  PageSwapCache(head)));
2494605ca5edSKonstantin Khlebnikov 
2495e9b61f19SKirill A. Shutemov 	if (page_is_young(head))
2496e9b61f19SKirill A. Shutemov 		set_page_young(page_tail);
2497e9b61f19SKirill A. Shutemov 	if (page_is_idle(head))
2498e9b61f19SKirill A. Shutemov 		set_page_idle(page_tail);
2499e9b61f19SKirill A. Shutemov 
2500e9b61f19SKirill A. Shutemov 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
250194723aafSMichal Hocko 
250294723aafSMichal Hocko 	/*
250394723aafSMichal Hocko 	 * always add to the tail because some iterators expect new
250494723aafSMichal Hocko 	 * pages to show after the currently processed elements - e.g.
250594723aafSMichal Hocko 	 * migrate_pages
250694723aafSMichal Hocko 	 */
2507e9b61f19SKirill A. Shutemov 	lru_add_page_tail(head, page_tail, lruvec, list);
2508e9b61f19SKirill A. Shutemov }
2509e9b61f19SKirill A. Shutemov 
2510baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list,
2511006d3ff2SHugh Dickins 		pgoff_t end, unsigned long flags)
2512e9b61f19SKirill A. Shutemov {
2513e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2514f4b7e272SAndrey Ryabinin 	pg_data_t *pgdat = page_pgdat(head);
2515e9b61f19SKirill A. Shutemov 	struct lruvec *lruvec;
25164101196bSMatthew Wilcox (Oracle) 	struct address_space *swap_cache = NULL;
25174101196bSMatthew Wilcox (Oracle) 	unsigned long offset = 0;
25188df651c7SKirill A. Shutemov 	int i;
2519e9b61f19SKirill A. Shutemov 
2520f4b7e272SAndrey Ryabinin 	lruvec = mem_cgroup_page_lruvec(head, pgdat);
2521e9b61f19SKirill A. Shutemov 
2522e9b61f19SKirill A. Shutemov 	/* complete memcg works before add pages to LRU */
2523e9b61f19SKirill A. Shutemov 	mem_cgroup_split_huge_fixup(head);
2524e9b61f19SKirill A. Shutemov 
25254101196bSMatthew Wilcox (Oracle) 	if (PageAnon(head) && PageSwapCache(head)) {
25264101196bSMatthew Wilcox (Oracle) 		swp_entry_t entry = { .val = page_private(head) };
25274101196bSMatthew Wilcox (Oracle) 
25284101196bSMatthew Wilcox (Oracle) 		offset = swp_offset(entry);
25294101196bSMatthew Wilcox (Oracle) 		swap_cache = swap_address_space(entry);
25304101196bSMatthew Wilcox (Oracle) 		xa_lock(&swap_cache->i_pages);
25314101196bSMatthew Wilcox (Oracle) 	}
25324101196bSMatthew Wilcox (Oracle) 
2533baa355fdSKirill A. Shutemov 	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
25348df651c7SKirill A. Shutemov 		__split_huge_page_tail(head, i, lruvec, list);
2535baa355fdSKirill A. Shutemov 		/* Some pages can be beyond i_size: drop them from page cache */
2536baa355fdSKirill A. Shutemov 		if (head[i].index >= end) {
25372d077d4bSHugh Dickins 			ClearPageDirty(head + i);
2538baa355fdSKirill A. Shutemov 			__delete_from_page_cache(head + i, NULL);
2539800d8c63SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2540800d8c63SKirill A. Shutemov 				shmem_uncharge(head->mapping->host, 1);
2541baa355fdSKirill A. Shutemov 			put_page(head + i);
25424101196bSMatthew Wilcox (Oracle) 		} else if (!PageAnon(page)) {
25434101196bSMatthew Wilcox (Oracle) 			__xa_store(&head->mapping->i_pages, head[i].index,
25444101196bSMatthew Wilcox (Oracle) 					head + i, 0);
25454101196bSMatthew Wilcox (Oracle) 		} else if (swap_cache) {
25464101196bSMatthew Wilcox (Oracle) 			__xa_store(&swap_cache->i_pages, offset + i,
25474101196bSMatthew Wilcox (Oracle) 					head + i, 0);
2548baa355fdSKirill A. Shutemov 		}
2549baa355fdSKirill A. Shutemov 	}
2550e9b61f19SKirill A. Shutemov 
2551e9b61f19SKirill A. Shutemov 	ClearPageCompound(head);
2552f7da677bSVlastimil Babka 
2553f7da677bSVlastimil Babka 	split_page_owner(head, HPAGE_PMD_ORDER);
2554f7da677bSVlastimil Babka 
2555baa355fdSKirill A. Shutemov 	/* See comment in __split_huge_page_tail() */
2556baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2557aa5dc07fSMatthew Wilcox 		/* Additional pin to swap cache */
25584101196bSMatthew Wilcox (Oracle) 		if (PageSwapCache(head)) {
255938d8b4e6SHuang Ying 			page_ref_add(head, 2);
25604101196bSMatthew Wilcox (Oracle) 			xa_unlock(&swap_cache->i_pages);
25614101196bSMatthew Wilcox (Oracle) 		} else {
2562baa355fdSKirill A. Shutemov 			page_ref_inc(head);
25634101196bSMatthew Wilcox (Oracle) 		}
2564baa355fdSKirill A. Shutemov 	} else {
2565aa5dc07fSMatthew Wilcox 		/* Additional pin to page cache */
2566baa355fdSKirill A. Shutemov 		page_ref_add(head, 2);
2567b93b0163SMatthew Wilcox 		xa_unlock(&head->mapping->i_pages);
2568baa355fdSKirill A. Shutemov 	}
2569baa355fdSKirill A. Shutemov 
2570f4b7e272SAndrey Ryabinin 	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
2571e9b61f19SKirill A. Shutemov 
2572906f9cdfSHugh Dickins 	remap_page(head);
2573e9b61f19SKirill A. Shutemov 
2574e9b61f19SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++) {
2575e9b61f19SKirill A. Shutemov 		struct page *subpage = head + i;
2576e9b61f19SKirill A. Shutemov 		if (subpage == page)
2577e9b61f19SKirill A. Shutemov 			continue;
2578e9b61f19SKirill A. Shutemov 		unlock_page(subpage);
2579e9b61f19SKirill A. Shutemov 
2580e9b61f19SKirill A. Shutemov 		/*
2581e9b61f19SKirill A. Shutemov 		 * Subpages may be freed if there wasn't any mapping
2582e9b61f19SKirill A. Shutemov 		 * like if add_to_swap() is running on a lru page that
2583e9b61f19SKirill A. Shutemov 		 * had its mapping zapped. And freeing these pages
2584e9b61f19SKirill A. Shutemov 		 * requires taking the lru_lock so we do the put_page
2585e9b61f19SKirill A. Shutemov 		 * of the tail pages after the split is complete.
2586e9b61f19SKirill A. Shutemov 		 */
2587e9b61f19SKirill A. Shutemov 		put_page(subpage);
2588e9b61f19SKirill A. Shutemov 	}
2589e9b61f19SKirill A. Shutemov }
2590e9b61f19SKirill A. Shutemov 
2591b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page)
2592b20ce5e0SKirill A. Shutemov {
2593dd78feddSKirill A. Shutemov 	int i, compound, ret;
2594b20ce5e0SKirill A. Shutemov 
2595b20ce5e0SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
2596b20ce5e0SKirill A. Shutemov 
2597b20ce5e0SKirill A. Shutemov 	if (likely(!PageCompound(page)))
2598b20ce5e0SKirill A. Shutemov 		return atomic_read(&page->_mapcount) + 1;
2599b20ce5e0SKirill A. Shutemov 
2600dd78feddSKirill A. Shutemov 	compound = compound_mapcount(page);
2601b20ce5e0SKirill A. Shutemov 	if (PageHuge(page))
2602dd78feddSKirill A. Shutemov 		return compound;
2603dd78feddSKirill A. Shutemov 	ret = compound;
2604b20ce5e0SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++)
2605b20ce5e0SKirill A. Shutemov 		ret += atomic_read(&page[i]._mapcount) + 1;
2606dd78feddSKirill A. Shutemov 	/* File pages has compound_mapcount included in _mapcount */
2607dd78feddSKirill A. Shutemov 	if (!PageAnon(page))
2608dd78feddSKirill A. Shutemov 		return ret - compound * HPAGE_PMD_NR;
2609b20ce5e0SKirill A. Shutemov 	if (PageDoubleMap(page))
2610b20ce5e0SKirill A. Shutemov 		ret -= HPAGE_PMD_NR;
2611b20ce5e0SKirill A. Shutemov 	return ret;
2612b20ce5e0SKirill A. Shutemov }
2613b20ce5e0SKirill A. Shutemov 
2614e9b61f19SKirill A. Shutemov /*
26156d0a07edSAndrea Arcangeli  * This calculates accurately how many mappings a transparent hugepage
26166d0a07edSAndrea Arcangeli  * has (unlike page_mapcount() which isn't fully accurate). This full
26176d0a07edSAndrea Arcangeli  * accuracy is primarily needed to know if copy-on-write faults can
26186d0a07edSAndrea Arcangeli  * reuse the page and change the mapping to read-write instead of
26196d0a07edSAndrea Arcangeli  * copying them. At the same time this returns the total_mapcount too.
26206d0a07edSAndrea Arcangeli  *
26216d0a07edSAndrea Arcangeli  * The function returns the highest mapcount any one of the subpages
26226d0a07edSAndrea Arcangeli  * has. If the return value is one, even if different processes are
26236d0a07edSAndrea Arcangeli  * mapping different subpages of the transparent hugepage, they can
26246d0a07edSAndrea Arcangeli  * all reuse it, because each process is reusing a different subpage.
26256d0a07edSAndrea Arcangeli  *
26266d0a07edSAndrea Arcangeli  * The total_mapcount is instead counting all virtual mappings of the
26276d0a07edSAndrea Arcangeli  * subpages. If the total_mapcount is equal to "one", it tells the
26286d0a07edSAndrea Arcangeli  * caller all mappings belong to the same "mm" and in turn the
26296d0a07edSAndrea Arcangeli  * anon_vma of the transparent hugepage can become the vma->anon_vma
26306d0a07edSAndrea Arcangeli  * local one as no other process may be mapping any of the subpages.
26316d0a07edSAndrea Arcangeli  *
26326d0a07edSAndrea Arcangeli  * It would be more accurate to replace page_mapcount() with
26336d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount(), however we only use
26346d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() in the copy-on-write faults where we
26356d0a07edSAndrea Arcangeli  * need full accuracy to avoid breaking page pinning, because
26366d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() is slower than page_mapcount().
26376d0a07edSAndrea Arcangeli  */
26386d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
26396d0a07edSAndrea Arcangeli {
26406d0a07edSAndrea Arcangeli 	int i, ret, _total_mapcount, mapcount;
26416d0a07edSAndrea Arcangeli 
26426d0a07edSAndrea Arcangeli 	/* hugetlbfs shouldn't call it */
26436d0a07edSAndrea Arcangeli 	VM_BUG_ON_PAGE(PageHuge(page), page);
26446d0a07edSAndrea Arcangeli 
26456d0a07edSAndrea Arcangeli 	if (likely(!PageTransCompound(page))) {
26466d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page->_mapcount) + 1;
26476d0a07edSAndrea Arcangeli 		if (total_mapcount)
26486d0a07edSAndrea Arcangeli 			*total_mapcount = mapcount;
26496d0a07edSAndrea Arcangeli 		return mapcount;
26506d0a07edSAndrea Arcangeli 	}
26516d0a07edSAndrea Arcangeli 
26526d0a07edSAndrea Arcangeli 	page = compound_head(page);
26536d0a07edSAndrea Arcangeli 
26546d0a07edSAndrea Arcangeli 	_total_mapcount = ret = 0;
26556d0a07edSAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
26566d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page[i]._mapcount) + 1;
26576d0a07edSAndrea Arcangeli 		ret = max(ret, mapcount);
26586d0a07edSAndrea Arcangeli 		_total_mapcount += mapcount;
26596d0a07edSAndrea Arcangeli 	}
26606d0a07edSAndrea Arcangeli 	if (PageDoubleMap(page)) {
26616d0a07edSAndrea Arcangeli 		ret -= 1;
26626d0a07edSAndrea Arcangeli 		_total_mapcount -= HPAGE_PMD_NR;
26636d0a07edSAndrea Arcangeli 	}
26646d0a07edSAndrea Arcangeli 	mapcount = compound_mapcount(page);
26656d0a07edSAndrea Arcangeli 	ret += mapcount;
26666d0a07edSAndrea Arcangeli 	_total_mapcount += mapcount;
26676d0a07edSAndrea Arcangeli 	if (total_mapcount)
26686d0a07edSAndrea Arcangeli 		*total_mapcount = _total_mapcount;
26696d0a07edSAndrea Arcangeli 	return ret;
26706d0a07edSAndrea Arcangeli }
26716d0a07edSAndrea Arcangeli 
2672b8f593cdSHuang Ying /* Racy check whether the huge page can be split */
2673b8f593cdSHuang Ying bool can_split_huge_page(struct page *page, int *pextra_pins)
2674b8f593cdSHuang Ying {
2675b8f593cdSHuang Ying 	int extra_pins;
2676b8f593cdSHuang Ying 
2677aa5dc07fSMatthew Wilcox 	/* Additional pins from page cache */
2678b8f593cdSHuang Ying 	if (PageAnon(page))
2679b8f593cdSHuang Ying 		extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
2680b8f593cdSHuang Ying 	else
2681b8f593cdSHuang Ying 		extra_pins = HPAGE_PMD_NR;
2682b8f593cdSHuang Ying 	if (pextra_pins)
2683b8f593cdSHuang Ying 		*pextra_pins = extra_pins;
2684b8f593cdSHuang Ying 	return total_mapcount(page) == page_count(page) - extra_pins - 1;
2685b8f593cdSHuang Ying }
2686b8f593cdSHuang Ying 
26876d0a07edSAndrea Arcangeli /*
2688e9b61f19SKirill A. Shutemov  * This function splits huge page into normal pages. @page can point to any
2689e9b61f19SKirill A. Shutemov  * subpage of huge page to split. Split doesn't change the position of @page.
2690e9b61f19SKirill A. Shutemov  *
2691e9b61f19SKirill A. Shutemov  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2692e9b61f19SKirill A. Shutemov  * The huge page must be locked.
2693e9b61f19SKirill A. Shutemov  *
2694e9b61f19SKirill A. Shutemov  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2695e9b61f19SKirill A. Shutemov  *
2696e9b61f19SKirill A. Shutemov  * Both head page and tail pages will inherit mapping, flags, and so on from
2697e9b61f19SKirill A. Shutemov  * the hugepage.
2698e9b61f19SKirill A. Shutemov  *
2699e9b61f19SKirill A. Shutemov  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2700e9b61f19SKirill A. Shutemov  * they are not mapped.
2701e9b61f19SKirill A. Shutemov  *
2702e9b61f19SKirill A. Shutemov  * Returns 0 if the hugepage is split successfully.
2703e9b61f19SKirill A. Shutemov  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2704e9b61f19SKirill A. Shutemov  * us.
2705e9b61f19SKirill A. Shutemov  */
2706e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list)
2707e9b61f19SKirill A. Shutemov {
2708e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2709a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2710a8803e6cSWei Yang 	struct deferred_split *ds_queue = get_deferred_split_queue(head);
2711baa355fdSKirill A. Shutemov 	struct anon_vma *anon_vma = NULL;
2712baa355fdSKirill A. Shutemov 	struct address_space *mapping = NULL;
2713baa355fdSKirill A. Shutemov 	int count, mapcount, extra_pins, ret;
2714d9654322SKirill A. Shutemov 	bool mlocked;
27150b9b6fffSKirill A. Shutemov 	unsigned long flags;
2716006d3ff2SHugh Dickins 	pgoff_t end;
2717e9b61f19SKirill A. Shutemov 
2718cb829624SWei Yang 	VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
2719a8803e6cSWei Yang 	VM_BUG_ON_PAGE(!PageLocked(head), head);
2720a8803e6cSWei Yang 	VM_BUG_ON_PAGE(!PageCompound(head), head);
2721e9b61f19SKirill A. Shutemov 
2722a8803e6cSWei Yang 	if (PageWriteback(head))
272359807685SHuang Ying 		return -EBUSY;
272459807685SHuang Ying 
2725baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2726e9b61f19SKirill A. Shutemov 		/*
2727baa355fdSKirill A. Shutemov 		 * The caller does not necessarily hold an mmap_sem that would
2728baa355fdSKirill A. Shutemov 		 * prevent the anon_vma disappearing so we first we take a
2729baa355fdSKirill A. Shutemov 		 * reference to it and then lock the anon_vma for write. This
2730baa355fdSKirill A. Shutemov 		 * is similar to page_lock_anon_vma_read except the write lock
2731baa355fdSKirill A. Shutemov 		 * is taken to serialise against parallel split or collapse
2732baa355fdSKirill A. Shutemov 		 * operations.
2733e9b61f19SKirill A. Shutemov 		 */
2734e9b61f19SKirill A. Shutemov 		anon_vma = page_get_anon_vma(head);
2735e9b61f19SKirill A. Shutemov 		if (!anon_vma) {
2736e9b61f19SKirill A. Shutemov 			ret = -EBUSY;
2737e9b61f19SKirill A. Shutemov 			goto out;
2738e9b61f19SKirill A. Shutemov 		}
2739006d3ff2SHugh Dickins 		end = -1;
2740baa355fdSKirill A. Shutemov 		mapping = NULL;
2741e9b61f19SKirill A. Shutemov 		anon_vma_lock_write(anon_vma);
2742baa355fdSKirill A. Shutemov 	} else {
2743baa355fdSKirill A. Shutemov 		mapping = head->mapping;
2744baa355fdSKirill A. Shutemov 
2745baa355fdSKirill A. Shutemov 		/* Truncated ? */
2746baa355fdSKirill A. Shutemov 		if (!mapping) {
2747baa355fdSKirill A. Shutemov 			ret = -EBUSY;
2748baa355fdSKirill A. Shutemov 			goto out;
2749baa355fdSKirill A. Shutemov 		}
2750baa355fdSKirill A. Shutemov 
2751baa355fdSKirill A. Shutemov 		anon_vma = NULL;
2752baa355fdSKirill A. Shutemov 		i_mmap_lock_read(mapping);
2753006d3ff2SHugh Dickins 
2754006d3ff2SHugh Dickins 		/*
2755006d3ff2SHugh Dickins 		 *__split_huge_page() may need to trim off pages beyond EOF:
2756006d3ff2SHugh Dickins 		 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2757006d3ff2SHugh Dickins 		 * which cannot be nested inside the page tree lock. So note
2758006d3ff2SHugh Dickins 		 * end now: i_size itself may be changed at any moment, but
2759006d3ff2SHugh Dickins 		 * head page lock is good enough to serialize the trimming.
2760006d3ff2SHugh Dickins 		 */
2761006d3ff2SHugh Dickins 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2762baa355fdSKirill A. Shutemov 	}
2763e9b61f19SKirill A. Shutemov 
2764e9b61f19SKirill A. Shutemov 	/*
2765906f9cdfSHugh Dickins 	 * Racy check if we can split the page, before unmap_page() will
2766e9b61f19SKirill A. Shutemov 	 * split PMDs
2767e9b61f19SKirill A. Shutemov 	 */
2768b8f593cdSHuang Ying 	if (!can_split_huge_page(head, &extra_pins)) {
2769e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2770e9b61f19SKirill A. Shutemov 		goto out_unlock;
2771e9b61f19SKirill A. Shutemov 	}
2772e9b61f19SKirill A. Shutemov 
2773a8803e6cSWei Yang 	mlocked = PageMlocked(head);
2774906f9cdfSHugh Dickins 	unmap_page(head);
2775e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(compound_mapcount(head), head);
2776e9b61f19SKirill A. Shutemov 
2777d9654322SKirill A. Shutemov 	/* Make sure the page is not on per-CPU pagevec as it takes pin */
2778d9654322SKirill A. Shutemov 	if (mlocked)
2779d9654322SKirill A. Shutemov 		lru_add_drain();
2780d9654322SKirill A. Shutemov 
2781baa355fdSKirill A. Shutemov 	/* prevent PageLRU to go away from under us, and freeze lru stats */
2782f4b7e272SAndrey Ryabinin 	spin_lock_irqsave(&pgdata->lru_lock, flags);
2783baa355fdSKirill A. Shutemov 
2784baa355fdSKirill A. Shutemov 	if (mapping) {
2785aa5dc07fSMatthew Wilcox 		XA_STATE(xas, &mapping->i_pages, page_index(head));
2786baa355fdSKirill A. Shutemov 
2787baa355fdSKirill A. Shutemov 		/*
2788aa5dc07fSMatthew Wilcox 		 * Check if the head page is present in page cache.
2789baa355fdSKirill A. Shutemov 		 * We assume all tail are present too, if head is there.
2790baa355fdSKirill A. Shutemov 		 */
2791aa5dc07fSMatthew Wilcox 		xa_lock(&mapping->i_pages);
2792aa5dc07fSMatthew Wilcox 		if (xas_load(&xas) != head)
2793baa355fdSKirill A. Shutemov 			goto fail;
2794baa355fdSKirill A. Shutemov 	}
2795baa355fdSKirill A. Shutemov 
27960139aa7bSJoonsoo Kim 	/* Prevent deferred_split_scan() touching ->_refcount */
2797364c1eebSYang Shi 	spin_lock(&ds_queue->split_queue_lock);
2798e9b61f19SKirill A. Shutemov 	count = page_count(head);
2799e9b61f19SKirill A. Shutemov 	mapcount = total_mapcount(head);
2800baa355fdSKirill A. Shutemov 	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
28019a982250SKirill A. Shutemov 		if (!list_empty(page_deferred_list(head))) {
2802364c1eebSYang Shi 			ds_queue->split_queue_len--;
28039a982250SKirill A. Shutemov 			list_del(page_deferred_list(head));
28049a982250SKirill A. Shutemov 		}
2805afb97172SWei Yang 		spin_unlock(&ds_queue->split_queue_lock);
280606d3eff6SKirill A. Shutemov 		if (mapping) {
2807a8803e6cSWei Yang 			if (PageSwapBacked(head))
2808a8803e6cSWei Yang 				__dec_node_page_state(head, NR_SHMEM_THPS);
280906d3eff6SKirill A. Shutemov 			else
2810a8803e6cSWei Yang 				__dec_node_page_state(head, NR_FILE_THPS);
281106d3eff6SKirill A. Shutemov 		}
281206d3eff6SKirill A. Shutemov 
2813006d3ff2SHugh Dickins 		__split_huge_page(page, list, end, flags);
281459807685SHuang Ying 		if (PageSwapCache(head)) {
281559807685SHuang Ying 			swp_entry_t entry = { .val = page_private(head) };
281659807685SHuang Ying 
281759807685SHuang Ying 			ret = split_swap_cluster(entry);
281859807685SHuang Ying 		} else
2819e9b61f19SKirill A. Shutemov 			ret = 0;
2820baa355fdSKirill A. Shutemov 	} else {
2821baa355fdSKirill A. Shutemov 		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2822e9b61f19SKirill A. Shutemov 			pr_alert("total_mapcount: %u, page_count(): %u\n",
2823e9b61f19SKirill A. Shutemov 					mapcount, count);
2824e9b61f19SKirill A. Shutemov 			if (PageTail(page))
2825e9b61f19SKirill A. Shutemov 				dump_page(head, NULL);
2826bd56086fSKirill A. Shutemov 			dump_page(page, "total_mapcount(head) > 0");
2827e9b61f19SKirill A. Shutemov 			BUG();
2828baa355fdSKirill A. Shutemov 		}
2829364c1eebSYang Shi 		spin_unlock(&ds_queue->split_queue_lock);
2830baa355fdSKirill A. Shutemov fail:		if (mapping)
2831b93b0163SMatthew Wilcox 			xa_unlock(&mapping->i_pages);
2832f4b7e272SAndrey Ryabinin 		spin_unlock_irqrestore(&pgdata->lru_lock, flags);
2833906f9cdfSHugh Dickins 		remap_page(head);
2834e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2835e9b61f19SKirill A. Shutemov 	}
2836e9b61f19SKirill A. Shutemov 
2837e9b61f19SKirill A. Shutemov out_unlock:
2838baa355fdSKirill A. Shutemov 	if (anon_vma) {
2839e9b61f19SKirill A. Shutemov 		anon_vma_unlock_write(anon_vma);
2840e9b61f19SKirill A. Shutemov 		put_anon_vma(anon_vma);
2841baa355fdSKirill A. Shutemov 	}
2842baa355fdSKirill A. Shutemov 	if (mapping)
2843baa355fdSKirill A. Shutemov 		i_mmap_unlock_read(mapping);
2844e9b61f19SKirill A. Shutemov out:
2845e9b61f19SKirill A. Shutemov 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2846e9b61f19SKirill A. Shutemov 	return ret;
2847e9b61f19SKirill A. Shutemov }
28489a982250SKirill A. Shutemov 
28499a982250SKirill A. Shutemov void free_transhuge_page(struct page *page)
28509a982250SKirill A. Shutemov {
285187eaceb3SYang Shi 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
28529a982250SKirill A. Shutemov 	unsigned long flags;
28539a982250SKirill A. Shutemov 
2854364c1eebSYang Shi 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
28559a982250SKirill A. Shutemov 	if (!list_empty(page_deferred_list(page))) {
2856364c1eebSYang Shi 		ds_queue->split_queue_len--;
28579a982250SKirill A. Shutemov 		list_del(page_deferred_list(page));
28589a982250SKirill A. Shutemov 	}
2859364c1eebSYang Shi 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
28609a982250SKirill A. Shutemov 	free_compound_page(page);
28619a982250SKirill A. Shutemov }
28629a982250SKirill A. Shutemov 
28639a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page)
28649a982250SKirill A. Shutemov {
286587eaceb3SYang Shi 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
286687eaceb3SYang Shi #ifdef CONFIG_MEMCG
286787eaceb3SYang Shi 	struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
286887eaceb3SYang Shi #endif
28699a982250SKirill A. Shutemov 	unsigned long flags;
28709a982250SKirill A. Shutemov 
28719a982250SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
28729a982250SKirill A. Shutemov 
287387eaceb3SYang Shi 	/*
287487eaceb3SYang Shi 	 * The try_to_unmap() in page reclaim path might reach here too,
287587eaceb3SYang Shi 	 * this may cause a race condition to corrupt deferred split queue.
287687eaceb3SYang Shi 	 * And, if page reclaim is already handling the same page, it is
287787eaceb3SYang Shi 	 * unnecessary to handle it again in shrinker.
287887eaceb3SYang Shi 	 *
287987eaceb3SYang Shi 	 * Check PageSwapCache to determine if the page is being
288087eaceb3SYang Shi 	 * handled by page reclaim since THP swap would add the page into
288187eaceb3SYang Shi 	 * swap cache before calling try_to_unmap().
288287eaceb3SYang Shi 	 */
288387eaceb3SYang Shi 	if (PageSwapCache(page))
288487eaceb3SYang Shi 		return;
288587eaceb3SYang Shi 
2886364c1eebSYang Shi 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
28879a982250SKirill A. Shutemov 	if (list_empty(page_deferred_list(page))) {
2888f9719a03SKirill A. Shutemov 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2889364c1eebSYang Shi 		list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
2890364c1eebSYang Shi 		ds_queue->split_queue_len++;
289187eaceb3SYang Shi #ifdef CONFIG_MEMCG
289287eaceb3SYang Shi 		if (memcg)
289387eaceb3SYang Shi 			memcg_set_shrinker_bit(memcg, page_to_nid(page),
289487eaceb3SYang Shi 					       deferred_split_shrinker.id);
289587eaceb3SYang Shi #endif
28969a982250SKirill A. Shutemov 	}
2897364c1eebSYang Shi 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
28989a982250SKirill A. Shutemov }
28999a982250SKirill A. Shutemov 
29009a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink,
29019a982250SKirill A. Shutemov 		struct shrink_control *sc)
29029a982250SKirill A. Shutemov {
2903a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2904364c1eebSYang Shi 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
290587eaceb3SYang Shi 
290687eaceb3SYang Shi #ifdef CONFIG_MEMCG
290787eaceb3SYang Shi 	if (sc->memcg)
290887eaceb3SYang Shi 		ds_queue = &sc->memcg->deferred_split_queue;
290987eaceb3SYang Shi #endif
2910364c1eebSYang Shi 	return READ_ONCE(ds_queue->split_queue_len);
29119a982250SKirill A. Shutemov }
29129a982250SKirill A. Shutemov 
29139a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink,
29149a982250SKirill A. Shutemov 		struct shrink_control *sc)
29159a982250SKirill A. Shutemov {
2916a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2917364c1eebSYang Shi 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
29189a982250SKirill A. Shutemov 	unsigned long flags;
29199a982250SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
29209a982250SKirill A. Shutemov 	struct page *page;
29219a982250SKirill A. Shutemov 	int split = 0;
29229a982250SKirill A. Shutemov 
292387eaceb3SYang Shi #ifdef CONFIG_MEMCG
292487eaceb3SYang Shi 	if (sc->memcg)
292587eaceb3SYang Shi 		ds_queue = &sc->memcg->deferred_split_queue;
292687eaceb3SYang Shi #endif
292787eaceb3SYang Shi 
2928364c1eebSYang Shi 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
29299a982250SKirill A. Shutemov 	/* Take pin on all head pages to avoid freeing them under us */
2930364c1eebSYang Shi 	list_for_each_safe(pos, next, &ds_queue->split_queue) {
29319a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
29329a982250SKirill A. Shutemov 		page = compound_head(page);
2933e3ae1953SKirill A. Shutemov 		if (get_page_unless_zero(page)) {
2934e3ae1953SKirill A. Shutemov 			list_move(page_deferred_list(page), &list);
2935e3ae1953SKirill A. Shutemov 		} else {
2936e3ae1953SKirill A. Shutemov 			/* We lost race with put_compound_page() */
29379a982250SKirill A. Shutemov 			list_del_init(page_deferred_list(page));
2938364c1eebSYang Shi 			ds_queue->split_queue_len--;
29399a982250SKirill A. Shutemov 		}
2940e3ae1953SKirill A. Shutemov 		if (!--sc->nr_to_scan)
2941e3ae1953SKirill A. Shutemov 			break;
29429a982250SKirill A. Shutemov 	}
2943364c1eebSYang Shi 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
29449a982250SKirill A. Shutemov 
29459a982250SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
29469a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
2947fa41b900SKirill A. Shutemov 		if (!trylock_page(page))
2948fa41b900SKirill A. Shutemov 			goto next;
29499a982250SKirill A. Shutemov 		/* split_huge_page() removes page from list on success */
29509a982250SKirill A. Shutemov 		if (!split_huge_page(page))
29519a982250SKirill A. Shutemov 			split++;
29529a982250SKirill A. Shutemov 		unlock_page(page);
2953fa41b900SKirill A. Shutemov next:
29549a982250SKirill A. Shutemov 		put_page(page);
29559a982250SKirill A. Shutemov 	}
29569a982250SKirill A. Shutemov 
2957364c1eebSYang Shi 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2958364c1eebSYang Shi 	list_splice_tail(&list, &ds_queue->split_queue);
2959364c1eebSYang Shi 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
29609a982250SKirill A. Shutemov 
2961cb8d68ecSKirill A. Shutemov 	/*
2962cb8d68ecSKirill A. Shutemov 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2963cb8d68ecSKirill A. Shutemov 	 * This can happen if pages were freed under us.
2964cb8d68ecSKirill A. Shutemov 	 */
2965364c1eebSYang Shi 	if (!split && list_empty(&ds_queue->split_queue))
2966cb8d68ecSKirill A. Shutemov 		return SHRINK_STOP;
2967cb8d68ecSKirill A. Shutemov 	return split;
29689a982250SKirill A. Shutemov }
29699a982250SKirill A. Shutemov 
29709a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = {
29719a982250SKirill A. Shutemov 	.count_objects = deferred_split_count,
29729a982250SKirill A. Shutemov 	.scan_objects = deferred_split_scan,
29739a982250SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
297487eaceb3SYang Shi 	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
297587eaceb3SYang Shi 		 SHRINKER_NONSLAB,
29769a982250SKirill A. Shutemov };
297749071d43SKirill A. Shutemov 
297849071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS
297949071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val)
298049071d43SKirill A. Shutemov {
298149071d43SKirill A. Shutemov 	struct zone *zone;
298249071d43SKirill A. Shutemov 	struct page *page;
298349071d43SKirill A. Shutemov 	unsigned long pfn, max_zone_pfn;
298449071d43SKirill A. Shutemov 	unsigned long total = 0, split = 0;
298549071d43SKirill A. Shutemov 
298649071d43SKirill A. Shutemov 	if (val != 1)
298749071d43SKirill A. Shutemov 		return -EINVAL;
298849071d43SKirill A. Shutemov 
298949071d43SKirill A. Shutemov 	for_each_populated_zone(zone) {
299049071d43SKirill A. Shutemov 		max_zone_pfn = zone_end_pfn(zone);
299149071d43SKirill A. Shutemov 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
299249071d43SKirill A. Shutemov 			if (!pfn_valid(pfn))
299349071d43SKirill A. Shutemov 				continue;
299449071d43SKirill A. Shutemov 
299549071d43SKirill A. Shutemov 			page = pfn_to_page(pfn);
299649071d43SKirill A. Shutemov 			if (!get_page_unless_zero(page))
299749071d43SKirill A. Shutemov 				continue;
299849071d43SKirill A. Shutemov 
299949071d43SKirill A. Shutemov 			if (zone != page_zone(page))
300049071d43SKirill A. Shutemov 				goto next;
300149071d43SKirill A. Shutemov 
3002baa355fdSKirill A. Shutemov 			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
300349071d43SKirill A. Shutemov 				goto next;
300449071d43SKirill A. Shutemov 
300549071d43SKirill A. Shutemov 			total++;
300649071d43SKirill A. Shutemov 			lock_page(page);
300749071d43SKirill A. Shutemov 			if (!split_huge_page(page))
300849071d43SKirill A. Shutemov 				split++;
300949071d43SKirill A. Shutemov 			unlock_page(page);
301049071d43SKirill A. Shutemov next:
301149071d43SKirill A. Shutemov 			put_page(page);
301249071d43SKirill A. Shutemov 		}
301349071d43SKirill A. Shutemov 	}
301449071d43SKirill A. Shutemov 
3015145bdaa1SYang Shi 	pr_info("%lu of %lu THP split\n", split, total);
301649071d43SKirill A. Shutemov 
301749071d43SKirill A. Shutemov 	return 0;
301849071d43SKirill A. Shutemov }
3019f1287869Szhong jiang DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
302049071d43SKirill A. Shutemov 		"%llu\n");
302149071d43SKirill A. Shutemov 
302249071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void)
302349071d43SKirill A. Shutemov {
3024d9f7979cSGreg Kroah-Hartman 	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
302549071d43SKirill A. Shutemov 			    &split_huge_pages_fops);
302649071d43SKirill A. Shutemov 	return 0;
302749071d43SKirill A. Shutemov }
302849071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs);
302949071d43SKirill A. Shutemov #endif
3030616b8371SZi Yan 
3031616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3032616b8371SZi Yan void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3033616b8371SZi Yan 		struct page *page)
3034616b8371SZi Yan {
3035616b8371SZi Yan 	struct vm_area_struct *vma = pvmw->vma;
3036616b8371SZi Yan 	struct mm_struct *mm = vma->vm_mm;
3037616b8371SZi Yan 	unsigned long address = pvmw->address;
3038616b8371SZi Yan 	pmd_t pmdval;
3039616b8371SZi Yan 	swp_entry_t entry;
3040ab6e3d09SNaoya Horiguchi 	pmd_t pmdswp;
3041616b8371SZi Yan 
3042616b8371SZi Yan 	if (!(pvmw->pmd && !pvmw->pte))
3043616b8371SZi Yan 		return;
3044616b8371SZi Yan 
3045616b8371SZi Yan 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3046*8a8683adSHuang Ying 	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3047616b8371SZi Yan 	if (pmd_dirty(pmdval))
3048616b8371SZi Yan 		set_page_dirty(page);
3049616b8371SZi Yan 	entry = make_migration_entry(page, pmd_write(pmdval));
3050ab6e3d09SNaoya Horiguchi 	pmdswp = swp_entry_to_pmd(entry);
3051ab6e3d09SNaoya Horiguchi 	if (pmd_soft_dirty(pmdval))
3052ab6e3d09SNaoya Horiguchi 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3053ab6e3d09SNaoya Horiguchi 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3054616b8371SZi Yan 	page_remove_rmap(page, true);
3055616b8371SZi Yan 	put_page(page);
3056616b8371SZi Yan }
3057616b8371SZi Yan 
3058616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3059616b8371SZi Yan {
3060616b8371SZi Yan 	struct vm_area_struct *vma = pvmw->vma;
3061616b8371SZi Yan 	struct mm_struct *mm = vma->vm_mm;
3062616b8371SZi Yan 	unsigned long address = pvmw->address;
3063616b8371SZi Yan 	unsigned long mmun_start = address & HPAGE_PMD_MASK;
3064616b8371SZi Yan 	pmd_t pmde;
3065616b8371SZi Yan 	swp_entry_t entry;
3066616b8371SZi Yan 
3067616b8371SZi Yan 	if (!(pvmw->pmd && !pvmw->pte))
3068616b8371SZi Yan 		return;
3069616b8371SZi Yan 
3070616b8371SZi Yan 	entry = pmd_to_swp_entry(*pvmw->pmd);
3071616b8371SZi Yan 	get_page(new);
3072616b8371SZi Yan 	pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
3073ab6e3d09SNaoya Horiguchi 	if (pmd_swp_soft_dirty(*pvmw->pmd))
3074ab6e3d09SNaoya Horiguchi 		pmde = pmd_mksoft_dirty(pmde);
3075616b8371SZi Yan 	if (is_write_migration_entry(entry))
3076f55e1014SLinus Torvalds 		pmde = maybe_pmd_mkwrite(pmde, vma);
3077616b8371SZi Yan 
3078616b8371SZi Yan 	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
3079e71769aeSNaoya Horiguchi 	if (PageAnon(new))
3080616b8371SZi Yan 		page_add_anon_rmap(new, vma, mmun_start, true);
3081e71769aeSNaoya Horiguchi 	else
3082e71769aeSNaoya Horiguchi 		page_add_file_rmap(new, true);
3083616b8371SZi Yan 	set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
3084e125fe40SKirill A. Shutemov 	if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
3085616b8371SZi Yan 		mlock_vma_page(new);
3086616b8371SZi Yan 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
3087616b8371SZi Yan }
3088616b8371SZi Yan #endif
3089