xref: /linux/mm/huge_memory.c (revision 6a3827d7509cbf96b7e961f8957c1f01d1bcf894)
171e3aac0SAndrea Arcangeli /*
271e3aac0SAndrea Arcangeli  *  Copyright (C) 2009  Red Hat, Inc.
371e3aac0SAndrea Arcangeli  *
471e3aac0SAndrea Arcangeli  *  This work is licensed under the terms of the GNU GPL, version 2. See
571e3aac0SAndrea Arcangeli  *  the COPYING file in the top-level directory.
671e3aac0SAndrea Arcangeli  */
771e3aac0SAndrea Arcangeli 
8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9ae3a8c1cSAndrew Morton 
1071e3aac0SAndrea Arcangeli #include <linux/mm.h>
1171e3aac0SAndrea Arcangeli #include <linux/sched.h>
12f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
13*6a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
1471e3aac0SAndrea Arcangeli #include <linux/highmem.h>
1571e3aac0SAndrea Arcangeli #include <linux/hugetlb.h>
1671e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h>
1771e3aac0SAndrea Arcangeli #include <linux/rmap.h>
1871e3aac0SAndrea Arcangeli #include <linux/swap.h>
1997ae1749SKirill A. Shutemov #include <linux/shrinker.h>
20ba76149fSAndrea Arcangeli #include <linux/mm_inline.h>
21e9b61f19SKirill A. Shutemov #include <linux/swapops.h>
224897c765SMatthew Wilcox #include <linux/dax.h>
23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h>
24878aee7dSAndrea Arcangeli #include <linux/freezer.h>
25f25748e3SDan Williams #include <linux/pfn_t.h>
26a664b2d8SAndrea Arcangeli #include <linux/mman.h>
273565fce3SDan Williams #include <linux/memremap.h>
28325adeb5SRalf Baechle #include <linux/pagemap.h>
2949071d43SKirill A. Shutemov #include <linux/debugfs.h>
304daae3b4SMel Gorman #include <linux/migrate.h>
3143b5fbbdSSasha Levin #include <linux/hashtable.h>
326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h>
3333c3fc71SVladimir Davydov #include <linux/page_idle.h>
34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h>
3597ae1749SKirill A. Shutemov 
3671e3aac0SAndrea Arcangeli #include <asm/tlb.h>
3771e3aac0SAndrea Arcangeli #include <asm/pgalloc.h>
3871e3aac0SAndrea Arcangeli #include "internal.h"
3971e3aac0SAndrea Arcangeli 
40ba76149fSAndrea Arcangeli /*
418bfa3f9aSJianguo Wu  * By default transparent hugepage support is disabled in order that avoid
428bfa3f9aSJianguo Wu  * to risk increase the memory footprint of applications without a guaranteed
438bfa3f9aSJianguo Wu  * benefit. When transparent hugepage support is enabled, is for all mappings,
448bfa3f9aSJianguo Wu  * and khugepaged scans all mappings.
458bfa3f9aSJianguo Wu  * Defrag is invoked by khugepaged hugepage allocations and by page faults
468bfa3f9aSJianguo Wu  * for all hugepage allocations.
47ba76149fSAndrea Arcangeli  */
4871e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly =
4913ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
50ba76149fSAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
5113ece886SAndrea Arcangeli #endif
5213ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
5313ece886SAndrea Arcangeli 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
5413ece886SAndrea Arcangeli #endif
55444eb2a4SMel Gorman 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
5679da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
5779da5407SKirill A. Shutemov 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
58ba76149fSAndrea Arcangeli 
599a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker;
60f000565aSAndrea Arcangeli 
6197ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount;
6256873f43SWang, Yalin struct page *huge_zero_page __read_mostly;
634a6c1297SKirill A. Shutemov 
646fcb52a5SAaron Lu static struct page *get_huge_zero_page(void)
6597ae1749SKirill A. Shutemov {
6697ae1749SKirill A. Shutemov 	struct page *zero_page;
6797ae1749SKirill A. Shutemov retry:
6897ae1749SKirill A. Shutemov 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
694db0c3c2SJason Low 		return READ_ONCE(huge_zero_page);
7097ae1749SKirill A. Shutemov 
7197ae1749SKirill A. Shutemov 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
7297ae1749SKirill A. Shutemov 			HPAGE_PMD_ORDER);
73d8a8e1f0SKirill A. Shutemov 	if (!zero_page) {
74d8a8e1f0SKirill A. Shutemov 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
755918d10aSKirill A. Shutemov 		return NULL;
76d8a8e1f0SKirill A. Shutemov 	}
77d8a8e1f0SKirill A. Shutemov 	count_vm_event(THP_ZERO_PAGE_ALLOC);
7897ae1749SKirill A. Shutemov 	preempt_disable();
795918d10aSKirill A. Shutemov 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
8097ae1749SKirill A. Shutemov 		preempt_enable();
815ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
8297ae1749SKirill A. Shutemov 		goto retry;
8397ae1749SKirill A. Shutemov 	}
8497ae1749SKirill A. Shutemov 
8597ae1749SKirill A. Shutemov 	/* We take additional reference here. It will be put back by shrinker */
8697ae1749SKirill A. Shutemov 	atomic_set(&huge_zero_refcount, 2);
8797ae1749SKirill A. Shutemov 	preempt_enable();
884db0c3c2SJason Low 	return READ_ONCE(huge_zero_page);
8997ae1749SKirill A. Shutemov }
9097ae1749SKirill A. Shutemov 
916fcb52a5SAaron Lu static void put_huge_zero_page(void)
9297ae1749SKirill A. Shutemov {
9397ae1749SKirill A. Shutemov 	/*
9497ae1749SKirill A. Shutemov 	 * Counter should never go to zero here. Only shrinker can put
9597ae1749SKirill A. Shutemov 	 * last reference.
9697ae1749SKirill A. Shutemov 	 */
9797ae1749SKirill A. Shutemov 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
9897ae1749SKirill A. Shutemov }
9997ae1749SKirill A. Shutemov 
1006fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm)
1016fcb52a5SAaron Lu {
1026fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1036fcb52a5SAaron Lu 		return READ_ONCE(huge_zero_page);
1046fcb52a5SAaron Lu 
1056fcb52a5SAaron Lu 	if (!get_huge_zero_page())
1066fcb52a5SAaron Lu 		return NULL;
1076fcb52a5SAaron Lu 
1086fcb52a5SAaron Lu 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1096fcb52a5SAaron Lu 		put_huge_zero_page();
1106fcb52a5SAaron Lu 
1116fcb52a5SAaron Lu 	return READ_ONCE(huge_zero_page);
1126fcb52a5SAaron Lu }
1136fcb52a5SAaron Lu 
1146fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm)
1156fcb52a5SAaron Lu {
1166fcb52a5SAaron Lu 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
1176fcb52a5SAaron Lu 		put_huge_zero_page();
1186fcb52a5SAaron Lu }
1196fcb52a5SAaron Lu 
12048896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
12197ae1749SKirill A. Shutemov 					struct shrink_control *sc)
12297ae1749SKirill A. Shutemov {
12397ae1749SKirill A. Shutemov 	/* we can free zero page only if last reference remains */
12497ae1749SKirill A. Shutemov 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
12548896466SGlauber Costa }
12697ae1749SKirill A. Shutemov 
12748896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
12848896466SGlauber Costa 				       struct shrink_control *sc)
12948896466SGlauber Costa {
13097ae1749SKirill A. Shutemov 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
1315918d10aSKirill A. Shutemov 		struct page *zero_page = xchg(&huge_zero_page, NULL);
1325918d10aSKirill A. Shutemov 		BUG_ON(zero_page == NULL);
1335ddacbe9SYu Zhao 		__free_pages(zero_page, compound_order(zero_page));
13448896466SGlauber Costa 		return HPAGE_PMD_NR;
13597ae1749SKirill A. Shutemov 	}
13697ae1749SKirill A. Shutemov 
13797ae1749SKirill A. Shutemov 	return 0;
13897ae1749SKirill A. Shutemov }
13997ae1749SKirill A. Shutemov 
14097ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = {
14148896466SGlauber Costa 	.count_objects = shrink_huge_zero_page_count,
14248896466SGlauber Costa 	.scan_objects = shrink_huge_zero_page_scan,
14397ae1749SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
14497ae1749SKirill A. Shutemov };
14597ae1749SKirill A. Shutemov 
14671e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS
14771e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj,
14871e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr, char *buf)
14971e3aac0SAndrea Arcangeli {
150444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
151444eb2a4SMel Gorman 		return sprintf(buf, "[always] madvise never\n");
152444eb2a4SMel Gorman 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
153444eb2a4SMel Gorman 		return sprintf(buf, "always [madvise] never\n");
154444eb2a4SMel Gorman 	else
155444eb2a4SMel Gorman 		return sprintf(buf, "always madvise [never]\n");
15671e3aac0SAndrea Arcangeli }
157444eb2a4SMel Gorman 
15871e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj,
15971e3aac0SAndrea Arcangeli 			     struct kobj_attribute *attr,
16071e3aac0SAndrea Arcangeli 			     const char *buf, size_t count)
16171e3aac0SAndrea Arcangeli {
16221440d7eSDavid Rientjes 	ssize_t ret = count;
163ba76149fSAndrea Arcangeli 
16421440d7eSDavid Rientjes 	if (!memcmp("always", buf,
16521440d7eSDavid Rientjes 		    min(sizeof("always")-1, count))) {
16621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
16721440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
16821440d7eSDavid Rientjes 	} else if (!memcmp("madvise", buf,
16921440d7eSDavid Rientjes 			   min(sizeof("madvise")-1, count))) {
17021440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
17121440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
17221440d7eSDavid Rientjes 	} else if (!memcmp("never", buf,
17321440d7eSDavid Rientjes 			   min(sizeof("never")-1, count))) {
17421440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
17521440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
17621440d7eSDavid Rientjes 	} else
17721440d7eSDavid Rientjes 		ret = -EINVAL;
178ba76149fSAndrea Arcangeli 
179ba76149fSAndrea Arcangeli 	if (ret > 0) {
180b46e756fSKirill A. Shutemov 		int err = start_stop_khugepaged();
181ba76149fSAndrea Arcangeli 		if (err)
182ba76149fSAndrea Arcangeli 			ret = err;
183ba76149fSAndrea Arcangeli 	}
184ba76149fSAndrea Arcangeli 	return ret;
18571e3aac0SAndrea Arcangeli }
18671e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr =
18771e3aac0SAndrea Arcangeli 	__ATTR(enabled, 0644, enabled_show, enabled_store);
18871e3aac0SAndrea Arcangeli 
189b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj,
19071e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf,
19171e3aac0SAndrea Arcangeli 				enum transparent_hugepage_flag flag)
19271e3aac0SAndrea Arcangeli {
193e27e6151SBen Hutchings 	return sprintf(buf, "%d\n",
194e27e6151SBen Hutchings 		       !!test_bit(flag, &transparent_hugepage_flags));
19571e3aac0SAndrea Arcangeli }
196e27e6151SBen Hutchings 
197b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj,
19871e3aac0SAndrea Arcangeli 				 struct kobj_attribute *attr,
19971e3aac0SAndrea Arcangeli 				 const char *buf, size_t count,
20071e3aac0SAndrea Arcangeli 				 enum transparent_hugepage_flag flag)
20171e3aac0SAndrea Arcangeli {
202e27e6151SBen Hutchings 	unsigned long value;
203e27e6151SBen Hutchings 	int ret;
204e27e6151SBen Hutchings 
205e27e6151SBen Hutchings 	ret = kstrtoul(buf, 10, &value);
206e27e6151SBen Hutchings 	if (ret < 0)
207e27e6151SBen Hutchings 		return ret;
208e27e6151SBen Hutchings 	if (value > 1)
20971e3aac0SAndrea Arcangeli 		return -EINVAL;
21071e3aac0SAndrea Arcangeli 
211e27e6151SBen Hutchings 	if (value)
212e27e6151SBen Hutchings 		set_bit(flag, &transparent_hugepage_flags);
213e27e6151SBen Hutchings 	else
214e27e6151SBen Hutchings 		clear_bit(flag, &transparent_hugepage_flags);
215e27e6151SBen Hutchings 
21671e3aac0SAndrea Arcangeli 	return count;
21771e3aac0SAndrea Arcangeli }
21871e3aac0SAndrea Arcangeli 
21971e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj,
22071e3aac0SAndrea Arcangeli 			   struct kobj_attribute *attr, char *buf)
22171e3aac0SAndrea Arcangeli {
222444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
22321440d7eSDavid Rientjes 		return sprintf(buf, "[always] defer defer+madvise madvise never\n");
224444eb2a4SMel Gorman 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
22521440d7eSDavid Rientjes 		return sprintf(buf, "always [defer] defer+madvise madvise never\n");
22621440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
22721440d7eSDavid Rientjes 		return sprintf(buf, "always defer [defer+madvise] madvise never\n");
22821440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
22921440d7eSDavid Rientjes 		return sprintf(buf, "always defer defer+madvise [madvise] never\n");
23021440d7eSDavid Rientjes 	return sprintf(buf, "always defer defer+madvise madvise [never]\n");
23171e3aac0SAndrea Arcangeli }
23221440d7eSDavid Rientjes 
23371e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj,
23471e3aac0SAndrea Arcangeli 			    struct kobj_attribute *attr,
23571e3aac0SAndrea Arcangeli 			    const char *buf, size_t count)
23671e3aac0SAndrea Arcangeli {
23721440d7eSDavid Rientjes 	if (!memcmp("always", buf,
23821440d7eSDavid Rientjes 		    min(sizeof("always")-1, count))) {
23921440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
24021440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
24121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
24221440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
24321440d7eSDavid Rientjes 	} else if (!memcmp("defer", buf,
24421440d7eSDavid Rientjes 		    min(sizeof("defer")-1, count))) {
24521440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
24621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
24721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
24821440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
24921440d7eSDavid Rientjes 	} else if (!memcmp("defer+madvise", buf,
25021440d7eSDavid Rientjes 		    min(sizeof("defer+madvise")-1, count))) {
25121440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
25221440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
25321440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
25421440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
25521440d7eSDavid Rientjes 	} else if (!memcmp("madvise", buf,
25621440d7eSDavid Rientjes 			   min(sizeof("madvise")-1, count))) {
25721440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
25821440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
25921440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
26021440d7eSDavid Rientjes 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
26121440d7eSDavid Rientjes 	} else if (!memcmp("never", buf,
26221440d7eSDavid Rientjes 			   min(sizeof("never")-1, count))) {
26321440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
26421440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
26521440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
26621440d7eSDavid Rientjes 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
26721440d7eSDavid Rientjes 	} else
26821440d7eSDavid Rientjes 		return -EINVAL;
26921440d7eSDavid Rientjes 
27021440d7eSDavid Rientjes 	return count;
27171e3aac0SAndrea Arcangeli }
27271e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr =
27371e3aac0SAndrea Arcangeli 	__ATTR(defrag, 0644, defrag_show, defrag_store);
27471e3aac0SAndrea Arcangeli 
27579da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj,
27679da5407SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
27779da5407SKirill A. Shutemov {
278b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
27979da5407SKirill A. Shutemov 				TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
28079da5407SKirill A. Shutemov }
28179da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj,
28279da5407SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
28379da5407SKirill A. Shutemov {
284b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
28579da5407SKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
28679da5407SKirill A. Shutemov }
28779da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr =
28879da5407SKirill A. Shutemov 	__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
28949920d28SHugh Dickins 
29049920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj,
29149920d28SHugh Dickins 		struct kobj_attribute *attr, char *buf)
29249920d28SHugh Dickins {
29349920d28SHugh Dickins 	return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
29449920d28SHugh Dickins }
29549920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr =
29649920d28SHugh Dickins 	__ATTR_RO(hpage_pmd_size);
29749920d28SHugh Dickins 
29871e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
29971e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj,
30071e3aac0SAndrea Arcangeli 				struct kobj_attribute *attr, char *buf)
30171e3aac0SAndrea Arcangeli {
302b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
30371e3aac0SAndrea Arcangeli 				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
30471e3aac0SAndrea Arcangeli }
30571e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj,
30671e3aac0SAndrea Arcangeli 			       struct kobj_attribute *attr,
30771e3aac0SAndrea Arcangeli 			       const char *buf, size_t count)
30871e3aac0SAndrea Arcangeli {
309b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
31071e3aac0SAndrea Arcangeli 				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
31171e3aac0SAndrea Arcangeli }
31271e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr =
31371e3aac0SAndrea Arcangeli 	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
31471e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */
31571e3aac0SAndrea Arcangeli 
31671e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = {
31771e3aac0SAndrea Arcangeli 	&enabled_attr.attr,
31871e3aac0SAndrea Arcangeli 	&defrag_attr.attr,
31979da5407SKirill A. Shutemov 	&use_zero_page_attr.attr,
32049920d28SHugh Dickins 	&hpage_pmd_size_attr.attr,
321e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
3225a6e75f8SKirill A. Shutemov 	&shmem_enabled_attr.attr,
3235a6e75f8SKirill A. Shutemov #endif
32471e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM
32571e3aac0SAndrea Arcangeli 	&debug_cow_attr.attr,
32671e3aac0SAndrea Arcangeli #endif
32771e3aac0SAndrea Arcangeli 	NULL,
32871e3aac0SAndrea Arcangeli };
32971e3aac0SAndrea Arcangeli 
33071e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = {
33171e3aac0SAndrea Arcangeli 	.attrs = hugepage_attr,
332ba76149fSAndrea Arcangeli };
333ba76149fSAndrea Arcangeli 
334569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
335569e5590SShaohua Li {
336569e5590SShaohua Li 	int err;
337569e5590SShaohua Li 
338569e5590SShaohua Li 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
339569e5590SShaohua Li 	if (unlikely(!*hugepage_kobj)) {
340ae3a8c1cSAndrew Morton 		pr_err("failed to create transparent hugepage kobject\n");
341569e5590SShaohua Li 		return -ENOMEM;
342569e5590SShaohua Li 	}
343569e5590SShaohua Li 
344569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
345569e5590SShaohua Li 	if (err) {
346ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
347569e5590SShaohua Li 		goto delete_obj;
348569e5590SShaohua Li 	}
349569e5590SShaohua Li 
350569e5590SShaohua Li 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
351569e5590SShaohua Li 	if (err) {
352ae3a8c1cSAndrew Morton 		pr_err("failed to register transparent hugepage group\n");
353569e5590SShaohua Li 		goto remove_hp_group;
354569e5590SShaohua Li 	}
355569e5590SShaohua Li 
356569e5590SShaohua Li 	return 0;
357569e5590SShaohua Li 
358569e5590SShaohua Li remove_hp_group:
359569e5590SShaohua Li 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
360569e5590SShaohua Li delete_obj:
361569e5590SShaohua Li 	kobject_put(*hugepage_kobj);
362569e5590SShaohua Li 	return err;
363569e5590SShaohua Li }
364569e5590SShaohua Li 
365569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
366569e5590SShaohua Li {
367569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
368569e5590SShaohua Li 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
369569e5590SShaohua Li 	kobject_put(hugepage_kobj);
370569e5590SShaohua Li }
371569e5590SShaohua Li #else
372569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
373569e5590SShaohua Li {
374569e5590SShaohua Li 	return 0;
375569e5590SShaohua Li }
376569e5590SShaohua Li 
377569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
378569e5590SShaohua Li {
379569e5590SShaohua Li }
38071e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */
38171e3aac0SAndrea Arcangeli 
38271e3aac0SAndrea Arcangeli static int __init hugepage_init(void)
38371e3aac0SAndrea Arcangeli {
38471e3aac0SAndrea Arcangeli 	int err;
385569e5590SShaohua Li 	struct kobject *hugepage_kobj;
38671e3aac0SAndrea Arcangeli 
3874b7167b9SAndrea Arcangeli 	if (!has_transparent_hugepage()) {
3884b7167b9SAndrea Arcangeli 		transparent_hugepage_flags = 0;
389569e5590SShaohua Li 		return -EINVAL;
3904b7167b9SAndrea Arcangeli 	}
3914b7167b9SAndrea Arcangeli 
392ff20c2e0SKirill A. Shutemov 	/*
393ff20c2e0SKirill A. Shutemov 	 * hugepages can't be allocated by the buddy allocator
394ff20c2e0SKirill A. Shutemov 	 */
395ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
396ff20c2e0SKirill A. Shutemov 	/*
397ff20c2e0SKirill A. Shutemov 	 * we use page->mapping and page->index in second tail page
398ff20c2e0SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
399ff20c2e0SKirill A. Shutemov 	 */
400ff20c2e0SKirill A. Shutemov 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
401ff20c2e0SKirill A. Shutemov 
402569e5590SShaohua Li 	err = hugepage_init_sysfs(&hugepage_kobj);
403569e5590SShaohua Li 	if (err)
40465ebb64fSKirill A. Shutemov 		goto err_sysfs;
405ba76149fSAndrea Arcangeli 
406b46e756fSKirill A. Shutemov 	err = khugepaged_init();
407ba76149fSAndrea Arcangeli 	if (err)
40865ebb64fSKirill A. Shutemov 		goto err_slab;
409ba76149fSAndrea Arcangeli 
41065ebb64fSKirill A. Shutemov 	err = register_shrinker(&huge_zero_page_shrinker);
41165ebb64fSKirill A. Shutemov 	if (err)
41265ebb64fSKirill A. Shutemov 		goto err_hzp_shrinker;
4139a982250SKirill A. Shutemov 	err = register_shrinker(&deferred_split_shrinker);
4149a982250SKirill A. Shutemov 	if (err)
4159a982250SKirill A. Shutemov 		goto err_split_shrinker;
41697ae1749SKirill A. Shutemov 
41797562cd2SRik van Riel 	/*
41897562cd2SRik van Riel 	 * By default disable transparent hugepages on smaller systems,
41997562cd2SRik van Riel 	 * where the extra memory used could hurt more than TLB overhead
42097562cd2SRik van Riel 	 * is likely to save.  The admin can still enable it through /sys.
42197562cd2SRik van Riel 	 */
42279553da2SKirill A. Shutemov 	if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
42397562cd2SRik van Riel 		transparent_hugepage_flags = 0;
42479553da2SKirill A. Shutemov 		return 0;
42579553da2SKirill A. Shutemov 	}
42697562cd2SRik van Riel 
42779553da2SKirill A. Shutemov 	err = start_stop_khugepaged();
42865ebb64fSKirill A. Shutemov 	if (err)
42965ebb64fSKirill A. Shutemov 		goto err_khugepaged;
430ba76149fSAndrea Arcangeli 
431569e5590SShaohua Li 	return 0;
43265ebb64fSKirill A. Shutemov err_khugepaged:
4339a982250SKirill A. Shutemov 	unregister_shrinker(&deferred_split_shrinker);
4349a982250SKirill A. Shutemov err_split_shrinker:
43565ebb64fSKirill A. Shutemov 	unregister_shrinker(&huge_zero_page_shrinker);
43665ebb64fSKirill A. Shutemov err_hzp_shrinker:
437b46e756fSKirill A. Shutemov 	khugepaged_destroy();
43865ebb64fSKirill A. Shutemov err_slab:
439569e5590SShaohua Li 	hugepage_exit_sysfs(hugepage_kobj);
44065ebb64fSKirill A. Shutemov err_sysfs:
441ba76149fSAndrea Arcangeli 	return err;
44271e3aac0SAndrea Arcangeli }
443a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init);
44471e3aac0SAndrea Arcangeli 
44571e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str)
44671e3aac0SAndrea Arcangeli {
44771e3aac0SAndrea Arcangeli 	int ret = 0;
44871e3aac0SAndrea Arcangeli 	if (!str)
44971e3aac0SAndrea Arcangeli 		goto out;
45071e3aac0SAndrea Arcangeli 	if (!strcmp(str, "always")) {
45171e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
45271e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
45371e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
45471e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
45571e3aac0SAndrea Arcangeli 		ret = 1;
45671e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "madvise")) {
45771e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
45871e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
45971e3aac0SAndrea Arcangeli 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46071e3aac0SAndrea Arcangeli 			&transparent_hugepage_flags);
46171e3aac0SAndrea Arcangeli 		ret = 1;
46271e3aac0SAndrea Arcangeli 	} else if (!strcmp(str, "never")) {
46371e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
46471e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46571e3aac0SAndrea Arcangeli 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
46671e3aac0SAndrea Arcangeli 			  &transparent_hugepage_flags);
46771e3aac0SAndrea Arcangeli 		ret = 1;
46871e3aac0SAndrea Arcangeli 	}
46971e3aac0SAndrea Arcangeli out:
47071e3aac0SAndrea Arcangeli 	if (!ret)
471ae3a8c1cSAndrew Morton 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
47271e3aac0SAndrea Arcangeli 	return ret;
47371e3aac0SAndrea Arcangeli }
47471e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage);
47571e3aac0SAndrea Arcangeli 
476b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
47771e3aac0SAndrea Arcangeli {
47871e3aac0SAndrea Arcangeli 	if (likely(vma->vm_flags & VM_WRITE))
47971e3aac0SAndrea Arcangeli 		pmd = pmd_mkwrite(pmd);
48071e3aac0SAndrea Arcangeli 	return pmd;
48171e3aac0SAndrea Arcangeli }
48271e3aac0SAndrea Arcangeli 
4839a982250SKirill A. Shutemov static inline struct list_head *page_deferred_list(struct page *page)
4849a982250SKirill A. Shutemov {
4859a982250SKirill A. Shutemov 	/*
4869a982250SKirill A. Shutemov 	 * ->lru in the tail pages is occupied by compound_head.
4879a982250SKirill A. Shutemov 	 * Let's use ->mapping + ->index in the second tail page as list_head.
4889a982250SKirill A. Shutemov 	 */
4899a982250SKirill A. Shutemov 	return (struct list_head *)&page[2].mapping;
4909a982250SKirill A. Shutemov }
4919a982250SKirill A. Shutemov 
4929a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page)
4939a982250SKirill A. Shutemov {
4949a982250SKirill A. Shutemov 	/*
4959a982250SKirill A. Shutemov 	 * we use page->mapping and page->indexlru in second tail page
4969a982250SKirill A. Shutemov 	 * as list_head: assuming THP order >= 2
4979a982250SKirill A. Shutemov 	 */
4989a982250SKirill A. Shutemov 
4999a982250SKirill A. Shutemov 	INIT_LIST_HEAD(page_deferred_list(page));
5009a982250SKirill A. Shutemov 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
5019a982250SKirill A. Shutemov }
5029a982250SKirill A. Shutemov 
50374d2fad1SToshi Kani unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
50474d2fad1SToshi Kani 		loff_t off, unsigned long flags, unsigned long size)
50574d2fad1SToshi Kani {
50674d2fad1SToshi Kani 	unsigned long addr;
50774d2fad1SToshi Kani 	loff_t off_end = off + len;
50874d2fad1SToshi Kani 	loff_t off_align = round_up(off, size);
50974d2fad1SToshi Kani 	unsigned long len_pad;
51074d2fad1SToshi Kani 
51174d2fad1SToshi Kani 	if (off_end <= off_align || (off_end - off_align) < size)
51274d2fad1SToshi Kani 		return 0;
51374d2fad1SToshi Kani 
51474d2fad1SToshi Kani 	len_pad = len + size;
51574d2fad1SToshi Kani 	if (len_pad < len || (off + len_pad) < off)
51674d2fad1SToshi Kani 		return 0;
51774d2fad1SToshi Kani 
51874d2fad1SToshi Kani 	addr = current->mm->get_unmapped_area(filp, 0, len_pad,
51974d2fad1SToshi Kani 					      off >> PAGE_SHIFT, flags);
52074d2fad1SToshi Kani 	if (IS_ERR_VALUE(addr))
52174d2fad1SToshi Kani 		return 0;
52274d2fad1SToshi Kani 
52374d2fad1SToshi Kani 	addr += (off - addr) & (size - 1);
52474d2fad1SToshi Kani 	return addr;
52574d2fad1SToshi Kani }
52674d2fad1SToshi Kani 
52774d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
52874d2fad1SToshi Kani 		unsigned long len, unsigned long pgoff, unsigned long flags)
52974d2fad1SToshi Kani {
53074d2fad1SToshi Kani 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
53174d2fad1SToshi Kani 
53274d2fad1SToshi Kani 	if (addr)
53374d2fad1SToshi Kani 		goto out;
53474d2fad1SToshi Kani 	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
53574d2fad1SToshi Kani 		goto out;
53674d2fad1SToshi Kani 
53774d2fad1SToshi Kani 	addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
53874d2fad1SToshi Kani 	if (addr)
53974d2fad1SToshi Kani 		return addr;
54074d2fad1SToshi Kani 
54174d2fad1SToshi Kani  out:
54274d2fad1SToshi Kani 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
54374d2fad1SToshi Kani }
54474d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
54574d2fad1SToshi Kani 
54682b0f8c3SJan Kara static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
547bae473a4SKirill A. Shutemov 		gfp_t gfp)
54871e3aac0SAndrea Arcangeli {
54982b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
55000501b53SJohannes Weiner 	struct mem_cgroup *memcg;
55171e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
55282b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
55371e3aac0SAndrea Arcangeli 
554309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page), page);
55500501b53SJohannes Weiner 
556bae473a4SKirill A. Shutemov 	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
5576b251fc9SAndrea Arcangeli 		put_page(page);
5586b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_FALLBACK);
5596b251fc9SAndrea Arcangeli 		return VM_FAULT_FALLBACK;
5606b251fc9SAndrea Arcangeli 	}
56171e3aac0SAndrea Arcangeli 
562bae473a4SKirill A. Shutemov 	pgtable = pte_alloc_one(vma->vm_mm, haddr);
56300501b53SJohannes Weiner 	if (unlikely(!pgtable)) {
564f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(page, memcg, true);
5656b251fc9SAndrea Arcangeli 		put_page(page);
56600501b53SJohannes Weiner 		return VM_FAULT_OOM;
56700501b53SJohannes Weiner 	}
56800501b53SJohannes Weiner 
56971e3aac0SAndrea Arcangeli 	clear_huge_page(page, haddr, HPAGE_PMD_NR);
57052f37629SMinchan Kim 	/*
57152f37629SMinchan Kim 	 * The memory barrier inside __SetPageUptodate makes sure that
57252f37629SMinchan Kim 	 * clear_huge_page writes become visible before the set_pmd_at()
57352f37629SMinchan Kim 	 * write.
57452f37629SMinchan Kim 	 */
57571e3aac0SAndrea Arcangeli 	__SetPageUptodate(page);
57671e3aac0SAndrea Arcangeli 
57782b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
57882b0f8c3SJan Kara 	if (unlikely(!pmd_none(*vmf->pmd))) {
57982b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
580f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(page, memcg, true);
58171e3aac0SAndrea Arcangeli 		put_page(page);
582bae473a4SKirill A. Shutemov 		pte_free(vma->vm_mm, pgtable);
58371e3aac0SAndrea Arcangeli 	} else {
58471e3aac0SAndrea Arcangeli 		pmd_t entry;
5856b251fc9SAndrea Arcangeli 
5866b251fc9SAndrea Arcangeli 		/* Deliver the page fault to userland */
5876b251fc9SAndrea Arcangeli 		if (userfaultfd_missing(vma)) {
5886b251fc9SAndrea Arcangeli 			int ret;
5896b251fc9SAndrea Arcangeli 
59082b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
591f627c2f5SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg, true);
5926b251fc9SAndrea Arcangeli 			put_page(page);
593bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
59482b0f8c3SJan Kara 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
5956b251fc9SAndrea Arcangeli 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
5966b251fc9SAndrea Arcangeli 			return ret;
5976b251fc9SAndrea Arcangeli 		}
5986b251fc9SAndrea Arcangeli 
5993122359aSKirill A. Shutemov 		entry = mk_huge_pmd(page, vma->vm_page_prot);
6003122359aSKirill A. Shutemov 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
601d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(page, vma, haddr, true);
602f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, false, true);
60300501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(page, vma);
60482b0f8c3SJan Kara 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
60582b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
606bae473a4SKirill A. Shutemov 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
607bae473a4SKirill A. Shutemov 		atomic_long_inc(&vma->vm_mm->nr_ptes);
60882b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
6096b251fc9SAndrea Arcangeli 		count_vm_event(THP_FAULT_ALLOC);
61071e3aac0SAndrea Arcangeli 	}
61171e3aac0SAndrea Arcangeli 
612aa2e878eSDavid Rientjes 	return 0;
61371e3aac0SAndrea Arcangeli }
61471e3aac0SAndrea Arcangeli 
615444eb2a4SMel Gorman /*
61621440d7eSDavid Rientjes  * always: directly stall for all thp allocations
61721440d7eSDavid Rientjes  * defer: wake kswapd and fail if not immediately available
61821440d7eSDavid Rientjes  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
61921440d7eSDavid Rientjes  *		  fail if not immediately available
62021440d7eSDavid Rientjes  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
62121440d7eSDavid Rientjes  *	    available
62221440d7eSDavid Rientjes  * never: never stall for any thp allocation
623444eb2a4SMel Gorman  */
624444eb2a4SMel Gorman static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
6250bbbc0b3SAndrea Arcangeli {
62621440d7eSDavid Rientjes 	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
627444eb2a4SMel Gorman 
62821440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
62925160354SVlastimil Babka 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
63021440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
63121440d7eSDavid Rientjes 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
63221440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
63321440d7eSDavid Rientjes 		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
63421440d7eSDavid Rientjes 							     __GFP_KSWAPD_RECLAIM);
63521440d7eSDavid Rientjes 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
63621440d7eSDavid Rientjes 		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
63721440d7eSDavid Rientjes 							     0);
63825160354SVlastimil Babka 	return GFP_TRANSHUGE_LIGHT;
639444eb2a4SMel Gorman }
640444eb2a4SMel Gorman 
641c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */
642d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
64397ae1749SKirill A. Shutemov 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
6445918d10aSKirill A. Shutemov 		struct page *zero_page)
645fc9fe822SKirill A. Shutemov {
646fc9fe822SKirill A. Shutemov 	pmd_t entry;
6477c414164SAndrew Morton 	if (!pmd_none(*pmd))
6487c414164SAndrew Morton 		return false;
6495918d10aSKirill A. Shutemov 	entry = mk_pmd(zero_page, vma->vm_page_prot);
650fc9fe822SKirill A. Shutemov 	entry = pmd_mkhuge(entry);
65112c9d70bSMatthew Wilcox 	if (pgtable)
6526b0b50b0SAneesh Kumar K.V 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
653fc9fe822SKirill A. Shutemov 	set_pmd_at(mm, haddr, pmd, entry);
654e1f56c89SKirill A. Shutemov 	atomic_long_inc(&mm->nr_ptes);
6557c414164SAndrew Morton 	return true;
656fc9fe822SKirill A. Shutemov }
657fc9fe822SKirill A. Shutemov 
65882b0f8c3SJan Kara int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
65971e3aac0SAndrea Arcangeli {
66082b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
661077fcf11SAneesh Kumar K.V 	gfp_t gfp;
66271e3aac0SAndrea Arcangeli 	struct page *page;
66382b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
66471e3aac0SAndrea Arcangeli 
665128ec037SKirill A. Shutemov 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
666c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
66771e3aac0SAndrea Arcangeli 	if (unlikely(anon_vma_prepare(vma)))
66871e3aac0SAndrea Arcangeli 		return VM_FAULT_OOM;
6696d50e60cSDavid Rientjes 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
670ba76149fSAndrea Arcangeli 		return VM_FAULT_OOM;
67182b0f8c3SJan Kara 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
672bae473a4SKirill A. Shutemov 			!mm_forbids_zeropage(vma->vm_mm) &&
67379da5407SKirill A. Shutemov 			transparent_hugepage_use_zero_page()) {
67480371957SKirill A. Shutemov 		pgtable_t pgtable;
6755918d10aSKirill A. Shutemov 		struct page *zero_page;
6763ea41e62SKirill A. Shutemov 		bool set;
6776b251fc9SAndrea Arcangeli 		int ret;
678bae473a4SKirill A. Shutemov 		pgtable = pte_alloc_one(vma->vm_mm, haddr);
67980371957SKirill A. Shutemov 		if (unlikely(!pgtable))
68080371957SKirill A. Shutemov 			return VM_FAULT_OOM;
6816fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
6825918d10aSKirill A. Shutemov 		if (unlikely(!zero_page)) {
683bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
68497ae1749SKirill A. Shutemov 			count_vm_event(THP_FAULT_FALLBACK);
685c0292554SKirill A. Shutemov 			return VM_FAULT_FALLBACK;
68697ae1749SKirill A. Shutemov 		}
68782b0f8c3SJan Kara 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
6886b251fc9SAndrea Arcangeli 		ret = 0;
6896b251fc9SAndrea Arcangeli 		set = false;
69082b0f8c3SJan Kara 		if (pmd_none(*vmf->pmd)) {
6916b251fc9SAndrea Arcangeli 			if (userfaultfd_missing(vma)) {
69282b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
69382b0f8c3SJan Kara 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
6946b251fc9SAndrea Arcangeli 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
6956b251fc9SAndrea Arcangeli 			} else {
696bae473a4SKirill A. Shutemov 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
69782b0f8c3SJan Kara 						   haddr, vmf->pmd, zero_page);
69882b0f8c3SJan Kara 				spin_unlock(vmf->ptl);
6996b251fc9SAndrea Arcangeli 				set = true;
7006b251fc9SAndrea Arcangeli 			}
7016b251fc9SAndrea Arcangeli 		} else
70282b0f8c3SJan Kara 			spin_unlock(vmf->ptl);
7036fcb52a5SAaron Lu 		if (!set)
704bae473a4SKirill A. Shutemov 			pte_free(vma->vm_mm, pgtable);
7056b251fc9SAndrea Arcangeli 		return ret;
70680371957SKirill A. Shutemov 	}
707444eb2a4SMel Gorman 	gfp = alloc_hugepage_direct_gfpmask(vma);
708077fcf11SAneesh Kumar K.V 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
70981ab4201SAndi Kleen 	if (unlikely(!page)) {
71081ab4201SAndi Kleen 		count_vm_event(THP_FAULT_FALLBACK);
711c0292554SKirill A. Shutemov 		return VM_FAULT_FALLBACK;
71281ab4201SAndi Kleen 	}
7139a982250SKirill A. Shutemov 	prep_transhuge_page(page);
71482b0f8c3SJan Kara 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
71571e3aac0SAndrea Arcangeli }
71671e3aac0SAndrea Arcangeli 
717ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
718f25748e3SDan Williams 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
7195cad465dSMatthew Wilcox {
7205cad465dSMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
7215cad465dSMatthew Wilcox 	pmd_t entry;
7225cad465dSMatthew Wilcox 	spinlock_t *ptl;
7235cad465dSMatthew Wilcox 
7245cad465dSMatthew Wilcox 	ptl = pmd_lock(mm, pmd);
725f25748e3SDan Williams 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
726f25748e3SDan Williams 	if (pfn_t_devmap(pfn))
727f25748e3SDan Williams 		entry = pmd_mkdevmap(entry);
7285cad465dSMatthew Wilcox 	if (write) {
7295cad465dSMatthew Wilcox 		entry = pmd_mkyoung(pmd_mkdirty(entry));
7305cad465dSMatthew Wilcox 		entry = maybe_pmd_mkwrite(entry, vma);
7315cad465dSMatthew Wilcox 	}
7325cad465dSMatthew Wilcox 	set_pmd_at(mm, addr, pmd, entry);
7335cad465dSMatthew Wilcox 	update_mmu_cache_pmd(vma, addr, pmd);
7345cad465dSMatthew Wilcox 	spin_unlock(ptl);
7355cad465dSMatthew Wilcox }
7365cad465dSMatthew Wilcox 
7375cad465dSMatthew Wilcox int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
738f25748e3SDan Williams 			pmd_t *pmd, pfn_t pfn, bool write)
7395cad465dSMatthew Wilcox {
7405cad465dSMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
7415cad465dSMatthew Wilcox 	/*
7425cad465dSMatthew Wilcox 	 * If we had pmd_special, we could avoid all these restrictions,
7435cad465dSMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
7445cad465dSMatthew Wilcox 	 * can't support a 'special' bit.
7455cad465dSMatthew Wilcox 	 */
7465cad465dSMatthew Wilcox 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
7475cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
7485cad465dSMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
7495cad465dSMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
750f25748e3SDan Williams 	BUG_ON(!pfn_t_devmap(pfn));
7515cad465dSMatthew Wilcox 
7525cad465dSMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
7535cad465dSMatthew Wilcox 		return VM_FAULT_SIGBUS;
754308a047cSBorislav Petkov 
755308a047cSBorislav Petkov 	track_pfn_insert(vma, &pgprot, pfn);
756308a047cSBorislav Petkov 
757ae18d6dcSMatthew Wilcox 	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
758ae18d6dcSMatthew Wilcox 	return VM_FAULT_NOPAGE;
7595cad465dSMatthew Wilcox }
760dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
7615cad465dSMatthew Wilcox 
762a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
763a00cc7d9SMatthew Wilcox static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
764a00cc7d9SMatthew Wilcox {
765a00cc7d9SMatthew Wilcox 	if (likely(vma->vm_flags & VM_WRITE))
766a00cc7d9SMatthew Wilcox 		pud = pud_mkwrite(pud);
767a00cc7d9SMatthew Wilcox 	return pud;
768a00cc7d9SMatthew Wilcox }
769a00cc7d9SMatthew Wilcox 
770a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
771a00cc7d9SMatthew Wilcox 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
772a00cc7d9SMatthew Wilcox {
773a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
774a00cc7d9SMatthew Wilcox 	pud_t entry;
775a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
776a00cc7d9SMatthew Wilcox 
777a00cc7d9SMatthew Wilcox 	ptl = pud_lock(mm, pud);
778a00cc7d9SMatthew Wilcox 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
779a00cc7d9SMatthew Wilcox 	if (pfn_t_devmap(pfn))
780a00cc7d9SMatthew Wilcox 		entry = pud_mkdevmap(entry);
781a00cc7d9SMatthew Wilcox 	if (write) {
782a00cc7d9SMatthew Wilcox 		entry = pud_mkyoung(pud_mkdirty(entry));
783a00cc7d9SMatthew Wilcox 		entry = maybe_pud_mkwrite(entry, vma);
784a00cc7d9SMatthew Wilcox 	}
785a00cc7d9SMatthew Wilcox 	set_pud_at(mm, addr, pud, entry);
786a00cc7d9SMatthew Wilcox 	update_mmu_cache_pud(vma, addr, pud);
787a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
788a00cc7d9SMatthew Wilcox }
789a00cc7d9SMatthew Wilcox 
790a00cc7d9SMatthew Wilcox int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
791a00cc7d9SMatthew Wilcox 			pud_t *pud, pfn_t pfn, bool write)
792a00cc7d9SMatthew Wilcox {
793a00cc7d9SMatthew Wilcox 	pgprot_t pgprot = vma->vm_page_prot;
794a00cc7d9SMatthew Wilcox 	/*
795a00cc7d9SMatthew Wilcox 	 * If we had pud_special, we could avoid all these restrictions,
796a00cc7d9SMatthew Wilcox 	 * but we need to be consistent with PTEs and architectures that
797a00cc7d9SMatthew Wilcox 	 * can't support a 'special' bit.
798a00cc7d9SMatthew Wilcox 	 */
799a00cc7d9SMatthew Wilcox 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
800a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
801a00cc7d9SMatthew Wilcox 						(VM_PFNMAP|VM_MIXEDMAP));
802a00cc7d9SMatthew Wilcox 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
803a00cc7d9SMatthew Wilcox 	BUG_ON(!pfn_t_devmap(pfn));
804a00cc7d9SMatthew Wilcox 
805a00cc7d9SMatthew Wilcox 	if (addr < vma->vm_start || addr >= vma->vm_end)
806a00cc7d9SMatthew Wilcox 		return VM_FAULT_SIGBUS;
807a00cc7d9SMatthew Wilcox 
808a00cc7d9SMatthew Wilcox 	track_pfn_insert(vma, &pgprot, pfn);
809a00cc7d9SMatthew Wilcox 
810a00cc7d9SMatthew Wilcox 	insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
811a00cc7d9SMatthew Wilcox 	return VM_FAULT_NOPAGE;
812a00cc7d9SMatthew Wilcox }
813a00cc7d9SMatthew Wilcox EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
814a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
815a00cc7d9SMatthew Wilcox 
8163565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
8173565fce3SDan Williams 		pmd_t *pmd)
8183565fce3SDan Williams {
8193565fce3SDan Williams 	pmd_t _pmd;
8203565fce3SDan Williams 
8213565fce3SDan Williams 	/*
8223565fce3SDan Williams 	 * We should set the dirty bit only for FOLL_WRITE but for now
8233565fce3SDan Williams 	 * the dirty bit in the pmd is meaningless.  And if the dirty
8243565fce3SDan Williams 	 * bit will become meaningful and we'll only set it with
8253565fce3SDan Williams 	 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
8263565fce3SDan Williams 	 * set the young bit, instead of the current set_pmd_at.
8273565fce3SDan Williams 	 */
8283565fce3SDan Williams 	_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
8293565fce3SDan Williams 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
8303565fce3SDan Williams 				pmd, _pmd,  1))
8313565fce3SDan Williams 		update_mmu_cache_pmd(vma, addr, pmd);
8323565fce3SDan Williams }
8333565fce3SDan Williams 
8343565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
8353565fce3SDan Williams 		pmd_t *pmd, int flags)
8363565fce3SDan Williams {
8373565fce3SDan Williams 	unsigned long pfn = pmd_pfn(*pmd);
8383565fce3SDan Williams 	struct mm_struct *mm = vma->vm_mm;
8393565fce3SDan Williams 	struct dev_pagemap *pgmap;
8403565fce3SDan Williams 	struct page *page;
8413565fce3SDan Williams 
8423565fce3SDan Williams 	assert_spin_locked(pmd_lockptr(mm, pmd));
8433565fce3SDan Williams 
8448310d48bSKeno Fischer 	/*
8458310d48bSKeno Fischer 	 * When we COW a devmap PMD entry, we split it into PTEs, so we should
8468310d48bSKeno Fischer 	 * not be in this function with `flags & FOLL_COW` set.
8478310d48bSKeno Fischer 	 */
8488310d48bSKeno Fischer 	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
8498310d48bSKeno Fischer 
8503565fce3SDan Williams 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
8513565fce3SDan Williams 		return NULL;
8523565fce3SDan Williams 
8533565fce3SDan Williams 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
8543565fce3SDan Williams 		/* pass */;
8553565fce3SDan Williams 	else
8563565fce3SDan Williams 		return NULL;
8573565fce3SDan Williams 
8583565fce3SDan Williams 	if (flags & FOLL_TOUCH)
8593565fce3SDan Williams 		touch_pmd(vma, addr, pmd);
8603565fce3SDan Williams 
8613565fce3SDan Williams 	/*
8623565fce3SDan Williams 	 * device mapped pages can only be returned if the
8633565fce3SDan Williams 	 * caller will manage the page reference count.
8643565fce3SDan Williams 	 */
8653565fce3SDan Williams 	if (!(flags & FOLL_GET))
8663565fce3SDan Williams 		return ERR_PTR(-EEXIST);
8673565fce3SDan Williams 
8683565fce3SDan Williams 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
8693565fce3SDan Williams 	pgmap = get_dev_pagemap(pfn, NULL);
8703565fce3SDan Williams 	if (!pgmap)
8713565fce3SDan Williams 		return ERR_PTR(-EFAULT);
8723565fce3SDan Williams 	page = pfn_to_page(pfn);
8733565fce3SDan Williams 	get_page(page);
8743565fce3SDan Williams 	put_dev_pagemap(pgmap);
8753565fce3SDan Williams 
8763565fce3SDan Williams 	return page;
8773565fce3SDan Williams }
8783565fce3SDan Williams 
87971e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
88071e3aac0SAndrea Arcangeli 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
88171e3aac0SAndrea Arcangeli 		  struct vm_area_struct *vma)
88271e3aac0SAndrea Arcangeli {
883c4088ebdSKirill A. Shutemov 	spinlock_t *dst_ptl, *src_ptl;
88471e3aac0SAndrea Arcangeli 	struct page *src_page;
88571e3aac0SAndrea Arcangeli 	pmd_t pmd;
88612c9d70bSMatthew Wilcox 	pgtable_t pgtable = NULL;
887628d47ceSKirill A. Shutemov 	int ret = -ENOMEM;
88871e3aac0SAndrea Arcangeli 
889628d47ceSKirill A. Shutemov 	/* Skip if can be re-fill on fault */
890628d47ceSKirill A. Shutemov 	if (!vma_is_anonymous(vma))
891628d47ceSKirill A. Shutemov 		return 0;
892628d47ceSKirill A. Shutemov 
89371e3aac0SAndrea Arcangeli 	pgtable = pte_alloc_one(dst_mm, addr);
89471e3aac0SAndrea Arcangeli 	if (unlikely(!pgtable))
89571e3aac0SAndrea Arcangeli 		goto out;
89671e3aac0SAndrea Arcangeli 
897c4088ebdSKirill A. Shutemov 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
898c4088ebdSKirill A. Shutemov 	src_ptl = pmd_lockptr(src_mm, src_pmd);
899c4088ebdSKirill A. Shutemov 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
90071e3aac0SAndrea Arcangeli 
90171e3aac0SAndrea Arcangeli 	ret = -EAGAIN;
90271e3aac0SAndrea Arcangeli 	pmd = *src_pmd;
903628d47ceSKirill A. Shutemov 	if (unlikely(!pmd_trans_huge(pmd))) {
90471e3aac0SAndrea Arcangeli 		pte_free(dst_mm, pgtable);
90571e3aac0SAndrea Arcangeli 		goto out_unlock;
90671e3aac0SAndrea Arcangeli 	}
907fc9fe822SKirill A. Shutemov 	/*
908c4088ebdSKirill A. Shutemov 	 * When page table lock is held, the huge zero pmd should not be
909fc9fe822SKirill A. Shutemov 	 * under splitting since we don't split the page itself, only pmd to
910fc9fe822SKirill A. Shutemov 	 * a page table.
911fc9fe822SKirill A. Shutemov 	 */
912fc9fe822SKirill A. Shutemov 	if (is_huge_zero_pmd(pmd)) {
9135918d10aSKirill A. Shutemov 		struct page *zero_page;
91497ae1749SKirill A. Shutemov 		/*
91597ae1749SKirill A. Shutemov 		 * get_huge_zero_page() will never allocate a new page here,
91697ae1749SKirill A. Shutemov 		 * since we already have a zero page to copy. It just takes a
91797ae1749SKirill A. Shutemov 		 * reference.
91897ae1749SKirill A. Shutemov 		 */
9196fcb52a5SAaron Lu 		zero_page = mm_get_huge_zero_page(dst_mm);
9206b251fc9SAndrea Arcangeli 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
9215918d10aSKirill A. Shutemov 				zero_page);
922fc9fe822SKirill A. Shutemov 		ret = 0;
923fc9fe822SKirill A. Shutemov 		goto out_unlock;
924fc9fe822SKirill A. Shutemov 	}
925de466bd6SMel Gorman 
92671e3aac0SAndrea Arcangeli 	src_page = pmd_page(pmd);
927309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
92871e3aac0SAndrea Arcangeli 	get_page(src_page);
92953f9263bSKirill A. Shutemov 	page_dup_rmap(src_page, true);
93071e3aac0SAndrea Arcangeli 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
9315c7fb56eSDan Williams 	atomic_long_inc(&dst_mm->nr_ptes);
9325c7fb56eSDan Williams 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
93371e3aac0SAndrea Arcangeli 
93471e3aac0SAndrea Arcangeli 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
93571e3aac0SAndrea Arcangeli 	pmd = pmd_mkold(pmd_wrprotect(pmd));
93671e3aac0SAndrea Arcangeli 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
93771e3aac0SAndrea Arcangeli 
93871e3aac0SAndrea Arcangeli 	ret = 0;
93971e3aac0SAndrea Arcangeli out_unlock:
940c4088ebdSKirill A. Shutemov 	spin_unlock(src_ptl);
941c4088ebdSKirill A. Shutemov 	spin_unlock(dst_ptl);
94271e3aac0SAndrea Arcangeli out:
94371e3aac0SAndrea Arcangeli 	return ret;
94471e3aac0SAndrea Arcangeli }
94571e3aac0SAndrea Arcangeli 
946a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
947a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
948a00cc7d9SMatthew Wilcox 		pud_t *pud)
949a00cc7d9SMatthew Wilcox {
950a00cc7d9SMatthew Wilcox 	pud_t _pud;
951a00cc7d9SMatthew Wilcox 
952a00cc7d9SMatthew Wilcox 	/*
953a00cc7d9SMatthew Wilcox 	 * We should set the dirty bit only for FOLL_WRITE but for now
954a00cc7d9SMatthew Wilcox 	 * the dirty bit in the pud is meaningless.  And if the dirty
955a00cc7d9SMatthew Wilcox 	 * bit will become meaningful and we'll only set it with
956a00cc7d9SMatthew Wilcox 	 * FOLL_WRITE, an atomic set_bit will be required on the pud to
957a00cc7d9SMatthew Wilcox 	 * set the young bit, instead of the current set_pud_at.
958a00cc7d9SMatthew Wilcox 	 */
959a00cc7d9SMatthew Wilcox 	_pud = pud_mkyoung(pud_mkdirty(*pud));
960a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
961a00cc7d9SMatthew Wilcox 				pud, _pud,  1))
962a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vma, addr, pud);
963a00cc7d9SMatthew Wilcox }
964a00cc7d9SMatthew Wilcox 
965a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
966a00cc7d9SMatthew Wilcox 		pud_t *pud, int flags)
967a00cc7d9SMatthew Wilcox {
968a00cc7d9SMatthew Wilcox 	unsigned long pfn = pud_pfn(*pud);
969a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
970a00cc7d9SMatthew Wilcox 	struct dev_pagemap *pgmap;
971a00cc7d9SMatthew Wilcox 	struct page *page;
972a00cc7d9SMatthew Wilcox 
973a00cc7d9SMatthew Wilcox 	assert_spin_locked(pud_lockptr(mm, pud));
974a00cc7d9SMatthew Wilcox 
975a00cc7d9SMatthew Wilcox 	if (flags & FOLL_WRITE && !pud_write(*pud))
976a00cc7d9SMatthew Wilcox 		return NULL;
977a00cc7d9SMatthew Wilcox 
978a00cc7d9SMatthew Wilcox 	if (pud_present(*pud) && pud_devmap(*pud))
979a00cc7d9SMatthew Wilcox 		/* pass */;
980a00cc7d9SMatthew Wilcox 	else
981a00cc7d9SMatthew Wilcox 		return NULL;
982a00cc7d9SMatthew Wilcox 
983a00cc7d9SMatthew Wilcox 	if (flags & FOLL_TOUCH)
984a00cc7d9SMatthew Wilcox 		touch_pud(vma, addr, pud);
985a00cc7d9SMatthew Wilcox 
986a00cc7d9SMatthew Wilcox 	/*
987a00cc7d9SMatthew Wilcox 	 * device mapped pages can only be returned if the
988a00cc7d9SMatthew Wilcox 	 * caller will manage the page reference count.
989a00cc7d9SMatthew Wilcox 	 */
990a00cc7d9SMatthew Wilcox 	if (!(flags & FOLL_GET))
991a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EEXIST);
992a00cc7d9SMatthew Wilcox 
993a00cc7d9SMatthew Wilcox 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
994a00cc7d9SMatthew Wilcox 	pgmap = get_dev_pagemap(pfn, NULL);
995a00cc7d9SMatthew Wilcox 	if (!pgmap)
996a00cc7d9SMatthew Wilcox 		return ERR_PTR(-EFAULT);
997a00cc7d9SMatthew Wilcox 	page = pfn_to_page(pfn);
998a00cc7d9SMatthew Wilcox 	get_page(page);
999a00cc7d9SMatthew Wilcox 	put_dev_pagemap(pgmap);
1000a00cc7d9SMatthew Wilcox 
1001a00cc7d9SMatthew Wilcox 	return page;
1002a00cc7d9SMatthew Wilcox }
1003a00cc7d9SMatthew Wilcox 
1004a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1005a00cc7d9SMatthew Wilcox 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1006a00cc7d9SMatthew Wilcox 		  struct vm_area_struct *vma)
1007a00cc7d9SMatthew Wilcox {
1008a00cc7d9SMatthew Wilcox 	spinlock_t *dst_ptl, *src_ptl;
1009a00cc7d9SMatthew Wilcox 	pud_t pud;
1010a00cc7d9SMatthew Wilcox 	int ret;
1011a00cc7d9SMatthew Wilcox 
1012a00cc7d9SMatthew Wilcox 	dst_ptl = pud_lock(dst_mm, dst_pud);
1013a00cc7d9SMatthew Wilcox 	src_ptl = pud_lockptr(src_mm, src_pud);
1014a00cc7d9SMatthew Wilcox 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1015a00cc7d9SMatthew Wilcox 
1016a00cc7d9SMatthew Wilcox 	ret = -EAGAIN;
1017a00cc7d9SMatthew Wilcox 	pud = *src_pud;
1018a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1019a00cc7d9SMatthew Wilcox 		goto out_unlock;
1020a00cc7d9SMatthew Wilcox 
1021a00cc7d9SMatthew Wilcox 	/*
1022a00cc7d9SMatthew Wilcox 	 * When page table lock is held, the huge zero pud should not be
1023a00cc7d9SMatthew Wilcox 	 * under splitting since we don't split the page itself, only pud to
1024a00cc7d9SMatthew Wilcox 	 * a page table.
1025a00cc7d9SMatthew Wilcox 	 */
1026a00cc7d9SMatthew Wilcox 	if (is_huge_zero_pud(pud)) {
1027a00cc7d9SMatthew Wilcox 		/* No huge zero pud yet */
1028a00cc7d9SMatthew Wilcox 	}
1029a00cc7d9SMatthew Wilcox 
1030a00cc7d9SMatthew Wilcox 	pudp_set_wrprotect(src_mm, addr, src_pud);
1031a00cc7d9SMatthew Wilcox 	pud = pud_mkold(pud_wrprotect(pud));
1032a00cc7d9SMatthew Wilcox 	set_pud_at(dst_mm, addr, dst_pud, pud);
1033a00cc7d9SMatthew Wilcox 
1034a00cc7d9SMatthew Wilcox 	ret = 0;
1035a00cc7d9SMatthew Wilcox out_unlock:
1036a00cc7d9SMatthew Wilcox 	spin_unlock(src_ptl);
1037a00cc7d9SMatthew Wilcox 	spin_unlock(dst_ptl);
1038a00cc7d9SMatthew Wilcox 	return ret;
1039a00cc7d9SMatthew Wilcox }
1040a00cc7d9SMatthew Wilcox 
1041a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1042a00cc7d9SMatthew Wilcox {
1043a00cc7d9SMatthew Wilcox 	pud_t entry;
1044a00cc7d9SMatthew Wilcox 	unsigned long haddr;
1045a00cc7d9SMatthew Wilcox 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1046a00cc7d9SMatthew Wilcox 
1047a00cc7d9SMatthew Wilcox 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1048a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1049a00cc7d9SMatthew Wilcox 		goto unlock;
1050a00cc7d9SMatthew Wilcox 
1051a00cc7d9SMatthew Wilcox 	entry = pud_mkyoung(orig_pud);
1052a00cc7d9SMatthew Wilcox 	if (write)
1053a00cc7d9SMatthew Wilcox 		entry = pud_mkdirty(entry);
1054a00cc7d9SMatthew Wilcox 	haddr = vmf->address & HPAGE_PUD_MASK;
1055a00cc7d9SMatthew Wilcox 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1056a00cc7d9SMatthew Wilcox 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1057a00cc7d9SMatthew Wilcox 
1058a00cc7d9SMatthew Wilcox unlock:
1059a00cc7d9SMatthew Wilcox 	spin_unlock(vmf->ptl);
1060a00cc7d9SMatthew Wilcox }
1061a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1062a00cc7d9SMatthew Wilcox 
106382b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1064a1dd450bSWill Deacon {
1065a1dd450bSWill Deacon 	pmd_t entry;
1066a1dd450bSWill Deacon 	unsigned long haddr;
106720f664aaSMinchan Kim 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1068a1dd450bSWill Deacon 
106982b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
107082b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1071a1dd450bSWill Deacon 		goto unlock;
1072a1dd450bSWill Deacon 
1073a1dd450bSWill Deacon 	entry = pmd_mkyoung(orig_pmd);
107420f664aaSMinchan Kim 	if (write)
107520f664aaSMinchan Kim 		entry = pmd_mkdirty(entry);
107682b0f8c3SJan Kara 	haddr = vmf->address & HPAGE_PMD_MASK;
107720f664aaSMinchan Kim 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
107882b0f8c3SJan Kara 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1079a1dd450bSWill Deacon 
1080a1dd450bSWill Deacon unlock:
108182b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1082a1dd450bSWill Deacon }
1083a1dd450bSWill Deacon 
108482b0f8c3SJan Kara static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
1085bae473a4SKirill A. Shutemov 		struct page *page)
108671e3aac0SAndrea Arcangeli {
108782b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
108882b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
108900501b53SJohannes Weiner 	struct mem_cgroup *memcg;
109071e3aac0SAndrea Arcangeli 	pgtable_t pgtable;
109171e3aac0SAndrea Arcangeli 	pmd_t _pmd;
109271e3aac0SAndrea Arcangeli 	int ret = 0, i;
109371e3aac0SAndrea Arcangeli 	struct page **pages;
10942ec74c3eSSagi Grimberg 	unsigned long mmun_start;	/* For mmu_notifiers */
10952ec74c3eSSagi Grimberg 	unsigned long mmun_end;		/* For mmu_notifiers */
109671e3aac0SAndrea Arcangeli 
109771e3aac0SAndrea Arcangeli 	pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
109871e3aac0SAndrea Arcangeli 			GFP_KERNEL);
109971e3aac0SAndrea Arcangeli 	if (unlikely(!pages)) {
110071e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_OOM;
110171e3aac0SAndrea Arcangeli 		goto out;
110271e3aac0SAndrea Arcangeli 	}
110371e3aac0SAndrea Arcangeli 
110471e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
110541b6167eSMichal Hocko 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
110682b0f8c3SJan Kara 					       vmf->address, page_to_nid(page));
1107b9bbfbe3SAndrea Arcangeli 		if (unlikely(!pages[i] ||
1108bae473a4SKirill A. Shutemov 			     mem_cgroup_try_charge(pages[i], vma->vm_mm,
1109bae473a4SKirill A. Shutemov 				     GFP_KERNEL, &memcg, false))) {
1110b9bbfbe3SAndrea Arcangeli 			if (pages[i])
111171e3aac0SAndrea Arcangeli 				put_page(pages[i]);
1112b9bbfbe3SAndrea Arcangeli 			while (--i >= 0) {
111300501b53SJohannes Weiner 				memcg = (void *)page_private(pages[i]);
111400501b53SJohannes Weiner 				set_page_private(pages[i], 0);
1115f627c2f5SKirill A. Shutemov 				mem_cgroup_cancel_charge(pages[i], memcg,
1116f627c2f5SKirill A. Shutemov 						false);
1117b9bbfbe3SAndrea Arcangeli 				put_page(pages[i]);
1118b9bbfbe3SAndrea Arcangeli 			}
111971e3aac0SAndrea Arcangeli 			kfree(pages);
112071e3aac0SAndrea Arcangeli 			ret |= VM_FAULT_OOM;
112171e3aac0SAndrea Arcangeli 			goto out;
112271e3aac0SAndrea Arcangeli 		}
112300501b53SJohannes Weiner 		set_page_private(pages[i], (unsigned long)memcg);
112471e3aac0SAndrea Arcangeli 	}
112571e3aac0SAndrea Arcangeli 
112671e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
112771e3aac0SAndrea Arcangeli 		copy_user_highpage(pages[i], page + i,
11280089e485SHillf Danton 				   haddr + PAGE_SIZE * i, vma);
112971e3aac0SAndrea Arcangeli 		__SetPageUptodate(pages[i]);
113071e3aac0SAndrea Arcangeli 		cond_resched();
113171e3aac0SAndrea Arcangeli 	}
113271e3aac0SAndrea Arcangeli 
11332ec74c3eSSagi Grimberg 	mmun_start = haddr;
11342ec74c3eSSagi Grimberg 	mmun_end   = haddr + HPAGE_PMD_SIZE;
1135bae473a4SKirill A. Shutemov 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
11362ec74c3eSSagi Grimberg 
113782b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
113882b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
113971e3aac0SAndrea Arcangeli 		goto out_free_pages;
1140309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
114171e3aac0SAndrea Arcangeli 
114282b0f8c3SJan Kara 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
114371e3aac0SAndrea Arcangeli 	/* leave pmd empty until pte is filled */
114471e3aac0SAndrea Arcangeli 
114582b0f8c3SJan Kara 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
1146bae473a4SKirill A. Shutemov 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
114771e3aac0SAndrea Arcangeli 
114871e3aac0SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1149bae473a4SKirill A. Shutemov 		pte_t entry;
115071e3aac0SAndrea Arcangeli 		entry = mk_pte(pages[i], vma->vm_page_prot);
115171e3aac0SAndrea Arcangeli 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
115200501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
115300501b53SJohannes Weiner 		set_page_private(pages[i], 0);
115482b0f8c3SJan Kara 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
1155f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(pages[i], memcg, false, false);
115600501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(pages[i], vma);
115782b0f8c3SJan Kara 		vmf->pte = pte_offset_map(&_pmd, haddr);
115882b0f8c3SJan Kara 		VM_BUG_ON(!pte_none(*vmf->pte));
115982b0f8c3SJan Kara 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
116082b0f8c3SJan Kara 		pte_unmap(vmf->pte);
116171e3aac0SAndrea Arcangeli 	}
116271e3aac0SAndrea Arcangeli 	kfree(pages);
116371e3aac0SAndrea Arcangeli 
116471e3aac0SAndrea Arcangeli 	smp_wmb(); /* make pte visible before pmd */
116582b0f8c3SJan Kara 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
1166d281ee61SKirill A. Shutemov 	page_remove_rmap(page, true);
116782b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
116871e3aac0SAndrea Arcangeli 
1169bae473a4SKirill A. Shutemov 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
11702ec74c3eSSagi Grimberg 
117171e3aac0SAndrea Arcangeli 	ret |= VM_FAULT_WRITE;
117271e3aac0SAndrea Arcangeli 	put_page(page);
117371e3aac0SAndrea Arcangeli 
117471e3aac0SAndrea Arcangeli out:
117571e3aac0SAndrea Arcangeli 	return ret;
117671e3aac0SAndrea Arcangeli 
117771e3aac0SAndrea Arcangeli out_free_pages:
117882b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1179bae473a4SKirill A. Shutemov 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1180b9bbfbe3SAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
118100501b53SJohannes Weiner 		memcg = (void *)page_private(pages[i]);
118200501b53SJohannes Weiner 		set_page_private(pages[i], 0);
1183f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(pages[i], memcg, false);
118471e3aac0SAndrea Arcangeli 		put_page(pages[i]);
1185b9bbfbe3SAndrea Arcangeli 	}
118671e3aac0SAndrea Arcangeli 	kfree(pages);
118771e3aac0SAndrea Arcangeli 	goto out;
118871e3aac0SAndrea Arcangeli }
118971e3aac0SAndrea Arcangeli 
119082b0f8c3SJan Kara int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
119171e3aac0SAndrea Arcangeli {
119282b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
119393b4796dSKirill A. Shutemov 	struct page *page = NULL, *new_page;
119400501b53SJohannes Weiner 	struct mem_cgroup *memcg;
119582b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
11962ec74c3eSSagi Grimberg 	unsigned long mmun_start;	/* For mmu_notifiers */
11972ec74c3eSSagi Grimberg 	unsigned long mmun_end;		/* For mmu_notifiers */
11983b363692SMichal Hocko 	gfp_t huge_gfp;			/* for allocation and charge */
1199bae473a4SKirill A. Shutemov 	int ret = 0;
120071e3aac0SAndrea Arcangeli 
120182b0f8c3SJan Kara 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
120281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
120393b4796dSKirill A. Shutemov 	if (is_huge_zero_pmd(orig_pmd))
120493b4796dSKirill A. Shutemov 		goto alloc;
120582b0f8c3SJan Kara 	spin_lock(vmf->ptl);
120682b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
120771e3aac0SAndrea Arcangeli 		goto out_unlock;
120871e3aac0SAndrea Arcangeli 
120971e3aac0SAndrea Arcangeli 	page = pmd_page(orig_pmd);
1210309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
12111f25fe20SKirill A. Shutemov 	/*
12121f25fe20SKirill A. Shutemov 	 * We can only reuse the page if nobody else maps the huge page or it's
12136d0a07edSAndrea Arcangeli 	 * part.
12141f25fe20SKirill A. Shutemov 	 */
12156d0a07edSAndrea Arcangeli 	if (page_trans_huge_mapcount(page, NULL) == 1) {
121671e3aac0SAndrea Arcangeli 		pmd_t entry;
121771e3aac0SAndrea Arcangeli 		entry = pmd_mkyoung(orig_pmd);
121871e3aac0SAndrea Arcangeli 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
121982b0f8c3SJan Kara 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
122082b0f8c3SJan Kara 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
122171e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
122271e3aac0SAndrea Arcangeli 		goto out_unlock;
122371e3aac0SAndrea Arcangeli 	}
1224ddc58f27SKirill A. Shutemov 	get_page(page);
122582b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
122693b4796dSKirill A. Shutemov alloc:
122771e3aac0SAndrea Arcangeli 	if (transparent_hugepage_enabled(vma) &&
1228077fcf11SAneesh Kumar K.V 	    !transparent_hugepage_debug_cow()) {
1229444eb2a4SMel Gorman 		huge_gfp = alloc_hugepage_direct_gfpmask(vma);
12303b363692SMichal Hocko 		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1231077fcf11SAneesh Kumar K.V 	} else
123271e3aac0SAndrea Arcangeli 		new_page = NULL;
123371e3aac0SAndrea Arcangeli 
12349a982250SKirill A. Shutemov 	if (likely(new_page)) {
12359a982250SKirill A. Shutemov 		prep_transhuge_page(new_page);
12369a982250SKirill A. Shutemov 	} else {
1237eecc1e42SHugh Dickins 		if (!page) {
123882b0f8c3SJan Kara 			split_huge_pmd(vma, vmf->pmd, vmf->address);
1239e9b71ca9SKirill A. Shutemov 			ret |= VM_FAULT_FALLBACK;
124093b4796dSKirill A. Shutemov 		} else {
124182b0f8c3SJan Kara 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
12429845cbbdSKirill A. Shutemov 			if (ret & VM_FAULT_OOM) {
124382b0f8c3SJan Kara 				split_huge_pmd(vma, vmf->pmd, vmf->address);
12449845cbbdSKirill A. Shutemov 				ret |= VM_FAULT_FALLBACK;
12459845cbbdSKirill A. Shutemov 			}
1246ddc58f27SKirill A. Shutemov 			put_page(page);
124793b4796dSKirill A. Shutemov 		}
124817766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
124971e3aac0SAndrea Arcangeli 		goto out;
125071e3aac0SAndrea Arcangeli 	}
125171e3aac0SAndrea Arcangeli 
1252bae473a4SKirill A. Shutemov 	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
1253bae473a4SKirill A. Shutemov 					huge_gfp, &memcg, true))) {
1254b9bbfbe3SAndrea Arcangeli 		put_page(new_page);
125582b0f8c3SJan Kara 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1256bae473a4SKirill A. Shutemov 		if (page)
1257ddc58f27SKirill A. Shutemov 			put_page(page);
12589845cbbdSKirill A. Shutemov 		ret |= VM_FAULT_FALLBACK;
125917766ddeSDavid Rientjes 		count_vm_event(THP_FAULT_FALLBACK);
1260b9bbfbe3SAndrea Arcangeli 		goto out;
1261b9bbfbe3SAndrea Arcangeli 	}
1262b9bbfbe3SAndrea Arcangeli 
126317766ddeSDavid Rientjes 	count_vm_event(THP_FAULT_ALLOC);
126417766ddeSDavid Rientjes 
1265eecc1e42SHugh Dickins 	if (!page)
126693b4796dSKirill A. Shutemov 		clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
126793b4796dSKirill A. Shutemov 	else
126871e3aac0SAndrea Arcangeli 		copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
126971e3aac0SAndrea Arcangeli 	__SetPageUptodate(new_page);
127071e3aac0SAndrea Arcangeli 
12712ec74c3eSSagi Grimberg 	mmun_start = haddr;
12722ec74c3eSSagi Grimberg 	mmun_end   = haddr + HPAGE_PMD_SIZE;
1273bae473a4SKirill A. Shutemov 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
12742ec74c3eSSagi Grimberg 
127582b0f8c3SJan Kara 	spin_lock(vmf->ptl);
127693b4796dSKirill A. Shutemov 	if (page)
1277ddc58f27SKirill A. Shutemov 		put_page(page);
127882b0f8c3SJan Kara 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
127982b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
1280f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(new_page, memcg, true);
128171e3aac0SAndrea Arcangeli 		put_page(new_page);
12822ec74c3eSSagi Grimberg 		goto out_mn;
1283b9bbfbe3SAndrea Arcangeli 	} else {
128471e3aac0SAndrea Arcangeli 		pmd_t entry;
12853122359aSKirill A. Shutemov 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
12863122359aSKirill A. Shutemov 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
128782b0f8c3SJan Kara 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1288d281ee61SKirill A. Shutemov 		page_add_new_anon_rmap(new_page, vma, haddr, true);
1289f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(new_page, memcg, false, true);
129000501b53SJohannes Weiner 		lru_cache_add_active_or_unevictable(new_page, vma);
129182b0f8c3SJan Kara 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
129282b0f8c3SJan Kara 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1293eecc1e42SHugh Dickins 		if (!page) {
1294bae473a4SKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
129597ae1749SKirill A. Shutemov 		} else {
1296309381feSSasha Levin 			VM_BUG_ON_PAGE(!PageHead(page), page);
1297d281ee61SKirill A. Shutemov 			page_remove_rmap(page, true);
129871e3aac0SAndrea Arcangeli 			put_page(page);
129993b4796dSKirill A. Shutemov 		}
130071e3aac0SAndrea Arcangeli 		ret |= VM_FAULT_WRITE;
130171e3aac0SAndrea Arcangeli 	}
130282b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
13032ec74c3eSSagi Grimberg out_mn:
1304bae473a4SKirill A. Shutemov 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
13052ec74c3eSSagi Grimberg out:
13062ec74c3eSSagi Grimberg 	return ret;
130771e3aac0SAndrea Arcangeli out_unlock:
130882b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
130971e3aac0SAndrea Arcangeli 	return ret;
131071e3aac0SAndrea Arcangeli }
131171e3aac0SAndrea Arcangeli 
13128310d48bSKeno Fischer /*
13138310d48bSKeno Fischer  * FOLL_FORCE can write to even unwritable pmd's, but only
13148310d48bSKeno Fischer  * after we've gone through a COW cycle and they are dirty.
13158310d48bSKeno Fischer  */
13168310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
13178310d48bSKeno Fischer {
13188310d48bSKeno Fischer 	return pmd_write(pmd) ||
13198310d48bSKeno Fischer 	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
13208310d48bSKeno Fischer }
13218310d48bSKeno Fischer 
1322b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
132371e3aac0SAndrea Arcangeli 				   unsigned long addr,
132471e3aac0SAndrea Arcangeli 				   pmd_t *pmd,
132571e3aac0SAndrea Arcangeli 				   unsigned int flags)
132671e3aac0SAndrea Arcangeli {
1327b676b293SDavid Rientjes 	struct mm_struct *mm = vma->vm_mm;
132871e3aac0SAndrea Arcangeli 	struct page *page = NULL;
132971e3aac0SAndrea Arcangeli 
1330c4088ebdSKirill A. Shutemov 	assert_spin_locked(pmd_lockptr(mm, pmd));
133171e3aac0SAndrea Arcangeli 
13328310d48bSKeno Fischer 	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
133371e3aac0SAndrea Arcangeli 		goto out;
133471e3aac0SAndrea Arcangeli 
133585facf25SKirill A. Shutemov 	/* Avoid dumping huge zero page */
133685facf25SKirill A. Shutemov 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
133785facf25SKirill A. Shutemov 		return ERR_PTR(-EFAULT);
133885facf25SKirill A. Shutemov 
13392b4847e7SMel Gorman 	/* Full NUMA hinting faults to serialise migration in fault paths */
13408a0516edSMel Gorman 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
13412b4847e7SMel Gorman 		goto out;
13422b4847e7SMel Gorman 
134371e3aac0SAndrea Arcangeli 	page = pmd_page(*pmd);
1344ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
13453565fce3SDan Williams 	if (flags & FOLL_TOUCH)
13463565fce3SDan Williams 		touch_pmd(vma, addr, pmd);
1347de60f5f1SEric B Munson 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1348e90309c9SKirill A. Shutemov 		/*
1349e90309c9SKirill A. Shutemov 		 * We don't mlock() pte-mapped THPs. This way we can avoid
1350e90309c9SKirill A. Shutemov 		 * leaking mlocked pages into non-VM_LOCKED VMAs.
1351e90309c9SKirill A. Shutemov 		 *
13529a73f61bSKirill A. Shutemov 		 * For anon THP:
13539a73f61bSKirill A. Shutemov 		 *
1354e90309c9SKirill A. Shutemov 		 * In most cases the pmd is the only mapping of the page as we
1355e90309c9SKirill A. Shutemov 		 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1356e90309c9SKirill A. Shutemov 		 * writable private mappings in populate_vma_page_range().
1357e90309c9SKirill A. Shutemov 		 *
1358e90309c9SKirill A. Shutemov 		 * The only scenario when we have the page shared here is if we
1359e90309c9SKirill A. Shutemov 		 * mlocking read-only mapping shared over fork(). We skip
1360e90309c9SKirill A. Shutemov 		 * mlocking such pages.
13619a73f61bSKirill A. Shutemov 		 *
13629a73f61bSKirill A. Shutemov 		 * For file THP:
13639a73f61bSKirill A. Shutemov 		 *
13649a73f61bSKirill A. Shutemov 		 * We can expect PageDoubleMap() to be stable under page lock:
13659a73f61bSKirill A. Shutemov 		 * for file pages we set it in page_add_file_rmap(), which
13669a73f61bSKirill A. Shutemov 		 * requires page to be locked.
1367e90309c9SKirill A. Shutemov 		 */
13689a73f61bSKirill A. Shutemov 
13699a73f61bSKirill A. Shutemov 		if (PageAnon(page) && compound_mapcount(page) != 1)
13709a73f61bSKirill A. Shutemov 			goto skip_mlock;
13719a73f61bSKirill A. Shutemov 		if (PageDoubleMap(page) || !page->mapping)
13729a73f61bSKirill A. Shutemov 			goto skip_mlock;
13739a73f61bSKirill A. Shutemov 		if (!trylock_page(page))
13749a73f61bSKirill A. Shutemov 			goto skip_mlock;
1375b676b293SDavid Rientjes 		lru_add_drain();
13769a73f61bSKirill A. Shutemov 		if (page->mapping && !PageDoubleMap(page))
1377b676b293SDavid Rientjes 			mlock_vma_page(page);
1378b676b293SDavid Rientjes 		unlock_page(page);
1379b676b293SDavid Rientjes 	}
13809a73f61bSKirill A. Shutemov skip_mlock:
138171e3aac0SAndrea Arcangeli 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1382ca120cf6SDan Williams 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
138371e3aac0SAndrea Arcangeli 	if (flags & FOLL_GET)
1384ddc58f27SKirill A. Shutemov 		get_page(page);
138571e3aac0SAndrea Arcangeli 
138671e3aac0SAndrea Arcangeli out:
138771e3aac0SAndrea Arcangeli 	return page;
138871e3aac0SAndrea Arcangeli }
138971e3aac0SAndrea Arcangeli 
1390d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */
139182b0f8c3SJan Kara int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1392d10e63f2SMel Gorman {
139382b0f8c3SJan Kara 	struct vm_area_struct *vma = vmf->vma;
1394b8916634SMel Gorman 	struct anon_vma *anon_vma = NULL;
1395b32967ffSMel Gorman 	struct page *page;
139682b0f8c3SJan Kara 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
13978191acbdSMel Gorman 	int page_nid = -1, this_nid = numa_node_id();
139890572890SPeter Zijlstra 	int target_nid, last_cpupid = -1;
13998191acbdSMel Gorman 	bool page_locked;
14008191acbdSMel Gorman 	bool migrated = false;
1401b191f9b1SMel Gorman 	bool was_writable;
14026688cc05SPeter Zijlstra 	int flags = 0;
1403d10e63f2SMel Gorman 
140482b0f8c3SJan Kara 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
140582b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1406d10e63f2SMel Gorman 		goto out_unlock;
1407d10e63f2SMel Gorman 
1408de466bd6SMel Gorman 	/*
1409de466bd6SMel Gorman 	 * If there are potential migrations, wait for completion and retry
1410de466bd6SMel Gorman 	 * without disrupting NUMA hinting information. Do not relock and
1411de466bd6SMel Gorman 	 * check_same as the page may no longer be mapped.
1412de466bd6SMel Gorman 	 */
141382b0f8c3SJan Kara 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
141482b0f8c3SJan Kara 		page = pmd_page(*vmf->pmd);
141582b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
14165d833062SMel Gorman 		wait_on_page_locked(page);
1417de466bd6SMel Gorman 		goto out;
1418de466bd6SMel Gorman 	}
1419de466bd6SMel Gorman 
1420d10e63f2SMel Gorman 	page = pmd_page(pmd);
1421a1a46184SMel Gorman 	BUG_ON(is_huge_zero_page(page));
14228191acbdSMel Gorman 	page_nid = page_to_nid(page);
142390572890SPeter Zijlstra 	last_cpupid = page_cpupid_last(page);
142403c5a6e1SMel Gorman 	count_vm_numa_event(NUMA_HINT_FAULTS);
142504bb2f94SRik van Riel 	if (page_nid == this_nid) {
142603c5a6e1SMel Gorman 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
142704bb2f94SRik van Riel 		flags |= TNF_FAULT_LOCAL;
142804bb2f94SRik van Riel 	}
14294daae3b4SMel Gorman 
1430bea66fbdSMel Gorman 	/* See similar comment in do_numa_page for explanation */
1431288bc549SAneesh Kumar K.V 	if (!pmd_savedwrite(pmd))
14326688cc05SPeter Zijlstra 		flags |= TNF_NO_GROUP;
14336688cc05SPeter Zijlstra 
14346688cc05SPeter Zijlstra 	/*
1435ff9042b1SMel Gorman 	 * Acquire the page lock to serialise THP migrations but avoid dropping
1436ff9042b1SMel Gorman 	 * page_table_lock if at all possible
1437ff9042b1SMel Gorman 	 */
1438b8916634SMel Gorman 	page_locked = trylock_page(page);
1439b8916634SMel Gorman 	target_nid = mpol_misplaced(page, vma, haddr);
1440b8916634SMel Gorman 	if (target_nid == -1) {
1441b8916634SMel Gorman 		/* If the page was locked, there are no parallel migrations */
1442a54a407fSMel Gorman 		if (page_locked)
1443b8916634SMel Gorman 			goto clear_pmdnuma;
14442b4847e7SMel Gorman 	}
1445cbee9f88SPeter Zijlstra 
1446de466bd6SMel Gorman 	/* Migration could have started since the pmd_trans_migrating check */
14472b4847e7SMel Gorman 	if (!page_locked) {
144882b0f8c3SJan Kara 		spin_unlock(vmf->ptl);
1449b8916634SMel Gorman 		wait_on_page_locked(page);
1450a54a407fSMel Gorman 		page_nid = -1;
1451b8916634SMel Gorman 		goto out;
1452b8916634SMel Gorman 	}
1453b8916634SMel Gorman 
14542b4847e7SMel Gorman 	/*
14552b4847e7SMel Gorman 	 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
14562b4847e7SMel Gorman 	 * to serialises splits
14572b4847e7SMel Gorman 	 */
1458b8916634SMel Gorman 	get_page(page);
145982b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1460b8916634SMel Gorman 	anon_vma = page_lock_anon_vma_read(page);
1461b32967ffSMel Gorman 
1462c69307d5SPeter Zijlstra 	/* Confirm the PMD did not change while page_table_lock was released */
146382b0f8c3SJan Kara 	spin_lock(vmf->ptl);
146482b0f8c3SJan Kara 	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1465b32967ffSMel Gorman 		unlock_page(page);
1466b32967ffSMel Gorman 		put_page(page);
1467a54a407fSMel Gorman 		page_nid = -1;
1468b32967ffSMel Gorman 		goto out_unlock;
1469b32967ffSMel Gorman 	}
1470ff9042b1SMel Gorman 
1471c3a489caSMel Gorman 	/* Bail if we fail to protect against THP splits for any reason */
1472c3a489caSMel Gorman 	if (unlikely(!anon_vma)) {
1473c3a489caSMel Gorman 		put_page(page);
1474c3a489caSMel Gorman 		page_nid = -1;
1475c3a489caSMel Gorman 		goto clear_pmdnuma;
1476c3a489caSMel Gorman 	}
1477c3a489caSMel Gorman 
1478a54a407fSMel Gorman 	/*
1479a54a407fSMel Gorman 	 * Migrate the THP to the requested node, returns with page unlocked
14808a0516edSMel Gorman 	 * and access rights restored.
1481a54a407fSMel Gorman 	 */
148282b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1483bae473a4SKirill A. Shutemov 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
148482b0f8c3SJan Kara 				vmf->pmd, pmd, vmf->address, page, target_nid);
14856688cc05SPeter Zijlstra 	if (migrated) {
14866688cc05SPeter Zijlstra 		flags |= TNF_MIGRATED;
14878191acbdSMel Gorman 		page_nid = target_nid;
1488074c2381SMel Gorman 	} else
1489074c2381SMel Gorman 		flags |= TNF_MIGRATE_FAIL;
1490b32967ffSMel Gorman 
14918191acbdSMel Gorman 	goto out;
14924daae3b4SMel Gorman clear_pmdnuma:
1493a54a407fSMel Gorman 	BUG_ON(!PageLocked(page));
1494288bc549SAneesh Kumar K.V 	was_writable = pmd_savedwrite(pmd);
14954d942466SMel Gorman 	pmd = pmd_modify(pmd, vma->vm_page_prot);
1496b7b04004SMel Gorman 	pmd = pmd_mkyoung(pmd);
1497b191f9b1SMel Gorman 	if (was_writable)
1498b191f9b1SMel Gorman 		pmd = pmd_mkwrite(pmd);
149982b0f8c3SJan Kara 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
150082b0f8c3SJan Kara 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1501a54a407fSMel Gorman 	unlock_page(page);
1502d10e63f2SMel Gorman out_unlock:
150382b0f8c3SJan Kara 	spin_unlock(vmf->ptl);
1504b8916634SMel Gorman 
1505b8916634SMel Gorman out:
1506b8916634SMel Gorman 	if (anon_vma)
1507b8916634SMel Gorman 		page_unlock_anon_vma_read(anon_vma);
1508b8916634SMel Gorman 
15098191acbdSMel Gorman 	if (page_nid != -1)
151082b0f8c3SJan Kara 		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
15119a8b300fSAneesh Kumar K.V 				flags);
15128191acbdSMel Gorman 
1513d10e63f2SMel Gorman 	return 0;
1514d10e63f2SMel Gorman }
1515d10e63f2SMel Gorman 
1516319904adSHuang Ying /*
1517319904adSHuang Ying  * Return true if we do MADV_FREE successfully on entire pmd page.
1518319904adSHuang Ying  * Otherwise, return false.
1519319904adSHuang Ying  */
1520319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1521b8d3c4c3SMinchan Kim 		pmd_t *pmd, unsigned long addr, unsigned long next)
1522b8d3c4c3SMinchan Kim {
1523b8d3c4c3SMinchan Kim 	spinlock_t *ptl;
1524b8d3c4c3SMinchan Kim 	pmd_t orig_pmd;
1525b8d3c4c3SMinchan Kim 	struct page *page;
1526b8d3c4c3SMinchan Kim 	struct mm_struct *mm = tlb->mm;
1527319904adSHuang Ying 	bool ret = false;
1528b8d3c4c3SMinchan Kim 
152907e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
153007e32661SAneesh Kumar K.V 
1531b6ec57f4SKirill A. Shutemov 	ptl = pmd_trans_huge_lock(pmd, vma);
1532b6ec57f4SKirill A. Shutemov 	if (!ptl)
153325eedabeSLinus Torvalds 		goto out_unlocked;
1534b8d3c4c3SMinchan Kim 
1535b8d3c4c3SMinchan Kim 	orig_pmd = *pmd;
1536319904adSHuang Ying 	if (is_huge_zero_pmd(orig_pmd))
1537b8d3c4c3SMinchan Kim 		goto out;
1538b8d3c4c3SMinchan Kim 
1539b8d3c4c3SMinchan Kim 	page = pmd_page(orig_pmd);
1540b8d3c4c3SMinchan Kim 	/*
1541b8d3c4c3SMinchan Kim 	 * If other processes are mapping this page, we couldn't discard
1542b8d3c4c3SMinchan Kim 	 * the page unless they all do MADV_FREE so let's skip the page.
1543b8d3c4c3SMinchan Kim 	 */
1544b8d3c4c3SMinchan Kim 	if (page_mapcount(page) != 1)
1545b8d3c4c3SMinchan Kim 		goto out;
1546b8d3c4c3SMinchan Kim 
1547b8d3c4c3SMinchan Kim 	if (!trylock_page(page))
1548b8d3c4c3SMinchan Kim 		goto out;
1549b8d3c4c3SMinchan Kim 
1550b8d3c4c3SMinchan Kim 	/*
1551b8d3c4c3SMinchan Kim 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1552b8d3c4c3SMinchan Kim 	 * will deactivate only them.
1553b8d3c4c3SMinchan Kim 	 */
1554b8d3c4c3SMinchan Kim 	if (next - addr != HPAGE_PMD_SIZE) {
1555b8d3c4c3SMinchan Kim 		get_page(page);
1556b8d3c4c3SMinchan Kim 		spin_unlock(ptl);
15579818b8cdSHuang Ying 		split_huge_page(page);
1558b8d3c4c3SMinchan Kim 		put_page(page);
1559b8d3c4c3SMinchan Kim 		unlock_page(page);
1560b8d3c4c3SMinchan Kim 		goto out_unlocked;
1561b8d3c4c3SMinchan Kim 	}
1562b8d3c4c3SMinchan Kim 
1563b8d3c4c3SMinchan Kim 	if (PageDirty(page))
1564b8d3c4c3SMinchan Kim 		ClearPageDirty(page);
1565b8d3c4c3SMinchan Kim 	unlock_page(page);
1566b8d3c4c3SMinchan Kim 
1567b8d3c4c3SMinchan Kim 	if (PageActive(page))
1568b8d3c4c3SMinchan Kim 		deactivate_page(page);
1569b8d3c4c3SMinchan Kim 
1570b8d3c4c3SMinchan Kim 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1571b8d3c4c3SMinchan Kim 		orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1572b8d3c4c3SMinchan Kim 			tlb->fullmm);
1573b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkold(orig_pmd);
1574b8d3c4c3SMinchan Kim 		orig_pmd = pmd_mkclean(orig_pmd);
1575b8d3c4c3SMinchan Kim 
1576b8d3c4c3SMinchan Kim 		set_pmd_at(mm, addr, pmd, orig_pmd);
1577b8d3c4c3SMinchan Kim 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1578b8d3c4c3SMinchan Kim 	}
1579319904adSHuang Ying 	ret = true;
1580b8d3c4c3SMinchan Kim out:
1581b8d3c4c3SMinchan Kim 	spin_unlock(ptl);
1582b8d3c4c3SMinchan Kim out_unlocked:
1583b8d3c4c3SMinchan Kim 	return ret;
1584b8d3c4c3SMinchan Kim }
1585b8d3c4c3SMinchan Kim 
1586953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1587953c66c2SAneesh Kumar K.V {
1588953c66c2SAneesh Kumar K.V 	pgtable_t pgtable;
1589953c66c2SAneesh Kumar K.V 
1590953c66c2SAneesh Kumar K.V 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1591953c66c2SAneesh Kumar K.V 	pte_free(mm, pgtable);
1592953c66c2SAneesh Kumar K.V 	atomic_long_dec(&mm->nr_ptes);
1593953c66c2SAneesh Kumar K.V }
1594953c66c2SAneesh Kumar K.V 
159571e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1596f21760b1SShaohua Li 		 pmd_t *pmd, unsigned long addr)
159771e3aac0SAndrea Arcangeli {
1598f5c8ad47SDavid Miller 	pmd_t orig_pmd;
1599da146769SKirill A. Shutemov 	spinlock_t *ptl;
1600da146769SKirill A. Shutemov 
160107e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
160207e32661SAneesh Kumar K.V 
1603b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
1604b6ec57f4SKirill A. Shutemov 	if (!ptl)
1605da146769SKirill A. Shutemov 		return 0;
1606a6bf2bb0SAneesh Kumar K.V 	/*
1607a6bf2bb0SAneesh Kumar K.V 	 * For architectures like ppc64 we look at deposited pgtable
16088809aa2dSAneesh Kumar K.V 	 * when calling pmdp_huge_get_and_clear. So do the
1609a6bf2bb0SAneesh Kumar K.V 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1610a6bf2bb0SAneesh Kumar K.V 	 * operations.
1611a6bf2bb0SAneesh Kumar K.V 	 */
16128809aa2dSAneesh Kumar K.V 	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1613fcbe08d6SMartin Schwidefsky 			tlb->fullmm);
1614f21760b1SShaohua Li 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
16154897c765SMatthew Wilcox 	if (vma_is_dax(vma)) {
16164897c765SMatthew Wilcox 		spin_unlock(ptl);
1617da146769SKirill A. Shutemov 		if (is_huge_zero_pmd(orig_pmd))
1618c0f2e176SAneesh Kumar K.V 			tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1619da146769SKirill A. Shutemov 	} else if (is_huge_zero_pmd(orig_pmd)) {
1620da146769SKirill A. Shutemov 		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1621e1f56c89SKirill A. Shutemov 		atomic_long_dec(&tlb->mm->nr_ptes);
1622bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1623c0f2e176SAneesh Kumar K.V 		tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1624479f0abbSKirill A. Shutemov 	} else {
16254897c765SMatthew Wilcox 		struct page *page = pmd_page(orig_pmd);
1626d281ee61SKirill A. Shutemov 		page_remove_rmap(page, true);
1627309381feSSasha Levin 		VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1628309381feSSasha Levin 		VM_BUG_ON_PAGE(!PageHead(page), page);
1629b5072380SKirill A. Shutemov 		if (PageAnon(page)) {
1630b5072380SKirill A. Shutemov 			pgtable_t pgtable;
1631b5072380SKirill A. Shutemov 			pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1632b5072380SKirill A. Shutemov 			pte_free(tlb->mm, pgtable);
1633e1f56c89SKirill A. Shutemov 			atomic_long_dec(&tlb->mm->nr_ptes);
1634b5072380SKirill A. Shutemov 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1635b5072380SKirill A. Shutemov 		} else {
1636953c66c2SAneesh Kumar K.V 			if (arch_needs_pgtable_deposit())
1637953c66c2SAneesh Kumar K.V 				zap_deposited_table(tlb->mm, pmd);
1638b5072380SKirill A. Shutemov 			add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1639b5072380SKirill A. Shutemov 		}
1640bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1641e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1642479f0abbSKirill A. Shutemov 	}
1643da146769SKirill A. Shutemov 	return 1;
164471e3aac0SAndrea Arcangeli }
164571e3aac0SAndrea Arcangeli 
16461dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw
16471dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
16481dd38b6cSAneesh Kumar K.V 					 spinlock_t *old_pmd_ptl,
16491dd38b6cSAneesh Kumar K.V 					 struct vm_area_struct *vma)
16501dd38b6cSAneesh Kumar K.V {
16511dd38b6cSAneesh Kumar K.V 	/*
16521dd38b6cSAneesh Kumar K.V 	 * With split pmd lock we also need to move preallocated
16531dd38b6cSAneesh Kumar K.V 	 * PTE page table if new_pmd is on different PMD page table.
16541dd38b6cSAneesh Kumar K.V 	 *
16551dd38b6cSAneesh Kumar K.V 	 * We also don't deposit and withdraw tables for file pages.
16561dd38b6cSAneesh Kumar K.V 	 */
16571dd38b6cSAneesh Kumar K.V 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
16581dd38b6cSAneesh Kumar K.V }
16591dd38b6cSAneesh Kumar K.V #endif
16601dd38b6cSAneesh Kumar K.V 
1661bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
166237a1c49aSAndrea Arcangeli 		  unsigned long new_addr, unsigned long old_end,
16635d190420SAaron Lu 		  pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
166437a1c49aSAndrea Arcangeli {
1665bf929152SKirill A. Shutemov 	spinlock_t *old_ptl, *new_ptl;
166637a1c49aSAndrea Arcangeli 	pmd_t pmd;
166737a1c49aSAndrea Arcangeli 	struct mm_struct *mm = vma->vm_mm;
16685d190420SAaron Lu 	bool force_flush = false;
166937a1c49aSAndrea Arcangeli 
167037a1c49aSAndrea Arcangeli 	if ((old_addr & ~HPAGE_PMD_MASK) ||
167137a1c49aSAndrea Arcangeli 	    (new_addr & ~HPAGE_PMD_MASK) ||
1672bf8616d5SHugh Dickins 	    old_end - old_addr < HPAGE_PMD_SIZE)
16734b471e88SKirill A. Shutemov 		return false;
167437a1c49aSAndrea Arcangeli 
167537a1c49aSAndrea Arcangeli 	/*
167637a1c49aSAndrea Arcangeli 	 * The destination pmd shouldn't be established, free_pgtables()
167737a1c49aSAndrea Arcangeli 	 * should have release it.
167837a1c49aSAndrea Arcangeli 	 */
167937a1c49aSAndrea Arcangeli 	if (WARN_ON(!pmd_none(*new_pmd))) {
168037a1c49aSAndrea Arcangeli 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
16814b471e88SKirill A. Shutemov 		return false;
168237a1c49aSAndrea Arcangeli 	}
168337a1c49aSAndrea Arcangeli 
1684bf929152SKirill A. Shutemov 	/*
1685bf929152SKirill A. Shutemov 	 * We don't have to worry about the ordering of src and dst
1686bf929152SKirill A. Shutemov 	 * ptlocks because exclusive mmap_sem prevents deadlock.
1687bf929152SKirill A. Shutemov 	 */
1688b6ec57f4SKirill A. Shutemov 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1689b6ec57f4SKirill A. Shutemov 	if (old_ptl) {
1690bf929152SKirill A. Shutemov 		new_ptl = pmd_lockptr(mm, new_pmd);
1691bf929152SKirill A. Shutemov 		if (new_ptl != old_ptl)
1692bf929152SKirill A. Shutemov 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
16938809aa2dSAneesh Kumar K.V 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1694a2ce2666SAaron Lu 		if (pmd_present(pmd) && pmd_dirty(pmd))
1695a2ce2666SAaron Lu 			force_flush = true;
169637a1c49aSAndrea Arcangeli 		VM_BUG_ON(!pmd_none(*new_pmd));
16973592806cSKirill A. Shutemov 
16981dd38b6cSAneesh Kumar K.V 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1699b3084f4dSAneesh Kumar K.V 			pgtable_t pgtable;
17003592806cSKirill A. Shutemov 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
17013592806cSKirill A. Shutemov 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
17023592806cSKirill A. Shutemov 		}
1703b3084f4dSAneesh Kumar K.V 		set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1704b3084f4dSAneesh Kumar K.V 		if (new_ptl != old_ptl)
1705b3084f4dSAneesh Kumar K.V 			spin_unlock(new_ptl);
17065d190420SAaron Lu 		if (force_flush)
17075d190420SAaron Lu 			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
17085d190420SAaron Lu 		else
17095d190420SAaron Lu 			*need_flush = true;
1710bf929152SKirill A. Shutemov 		spin_unlock(old_ptl);
17114b471e88SKirill A. Shutemov 		return true;
171237a1c49aSAndrea Arcangeli 	}
17134b471e88SKirill A. Shutemov 	return false;
171437a1c49aSAndrea Arcangeli }
171537a1c49aSAndrea Arcangeli 
1716f123d74aSMel Gorman /*
1717f123d74aSMel Gorman  * Returns
1718f123d74aSMel Gorman  *  - 0 if PMD could not be locked
1719f123d74aSMel Gorman  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1720f123d74aSMel Gorman  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1721f123d74aSMel Gorman  */
1722cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1723e944fd67SMel Gorman 		unsigned long addr, pgprot_t newprot, int prot_numa)
1724cd7548abSJohannes Weiner {
1725cd7548abSJohannes Weiner 	struct mm_struct *mm = vma->vm_mm;
1726bf929152SKirill A. Shutemov 	spinlock_t *ptl;
1727cd7548abSJohannes Weiner 	int ret = 0;
1728cd7548abSJohannes Weiner 
1729b6ec57f4SKirill A. Shutemov 	ptl = __pmd_trans_huge_lock(pmd, vma);
1730b6ec57f4SKirill A. Shutemov 	if (ptl) {
1731cd7548abSJohannes Weiner 		pmd_t entry;
1732b191f9b1SMel Gorman 		bool preserve_write = prot_numa && pmd_write(*pmd);
1733ba68bc01SMel Gorman 		ret = 1;
1734e944fd67SMel Gorman 
1735e944fd67SMel Gorman 		/*
1736e944fd67SMel Gorman 		 * Avoid trapping faults against the zero page. The read-only
1737e944fd67SMel Gorman 		 * data is likely to be read-cached on the local CPU and
1738e944fd67SMel Gorman 		 * local/remote hits to the zero page are not interesting.
1739e944fd67SMel Gorman 		 */
1740e944fd67SMel Gorman 		if (prot_numa && is_huge_zero_pmd(*pmd)) {
1741e944fd67SMel Gorman 			spin_unlock(ptl);
1742ba68bc01SMel Gorman 			return ret;
1743e944fd67SMel Gorman 		}
1744e944fd67SMel Gorman 
174510c1045fSMel Gorman 		if (!prot_numa || !pmd_protnone(*pmd)) {
17468809aa2dSAneesh Kumar K.V 			entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1747cd7548abSJohannes Weiner 			entry = pmd_modify(entry, newprot);
1748b191f9b1SMel Gorman 			if (preserve_write)
1749288bc549SAneesh Kumar K.V 				entry = pmd_mk_savedwrite(entry);
1750f123d74aSMel Gorman 			ret = HPAGE_PMD_NR;
175156eecdb9SAneesh Kumar K.V 			set_pmd_at(mm, addr, pmd, entry);
1752b237adedSKirill A. Shutemov 			BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
1753b237adedSKirill A. Shutemov 					pmd_write(entry));
175410c1045fSMel Gorman 		}
1755bf929152SKirill A. Shutemov 		spin_unlock(ptl);
1756cd7548abSJohannes Weiner 	}
1757cd7548abSJohannes Weiner 
1758cd7548abSJohannes Weiner 	return ret;
1759cd7548abSJohannes Weiner }
1760cd7548abSJohannes Weiner 
1761025c5b24SNaoya Horiguchi /*
17628f19b0c0SHuang Ying  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1763025c5b24SNaoya Horiguchi  *
17648f19b0c0SHuang Ying  * Note that if it returns page table lock pointer, this routine returns without
17658f19b0c0SHuang Ying  * unlocking page table lock. So callers must unlock it.
1766025c5b24SNaoya Horiguchi  */
1767b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1768025c5b24SNaoya Horiguchi {
1769b6ec57f4SKirill A. Shutemov 	spinlock_t *ptl;
1770b6ec57f4SKirill A. Shutemov 	ptl = pmd_lock(vma->vm_mm, pmd);
17715c7fb56eSDan Williams 	if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
1772b6ec57f4SKirill A. Shutemov 		return ptl;
1773b6ec57f4SKirill A. Shutemov 	spin_unlock(ptl);
1774b6ec57f4SKirill A. Shutemov 	return NULL;
1775025c5b24SNaoya Horiguchi }
1776025c5b24SNaoya Horiguchi 
1777a00cc7d9SMatthew Wilcox /*
1778a00cc7d9SMatthew Wilcox  * Returns true if a given pud maps a thp, false otherwise.
1779a00cc7d9SMatthew Wilcox  *
1780a00cc7d9SMatthew Wilcox  * Note that if it returns true, this routine returns without unlocking page
1781a00cc7d9SMatthew Wilcox  * table lock. So callers must unlock it.
1782a00cc7d9SMatthew Wilcox  */
1783a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1784a00cc7d9SMatthew Wilcox {
1785a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
1786a00cc7d9SMatthew Wilcox 
1787a00cc7d9SMatthew Wilcox 	ptl = pud_lock(vma->vm_mm, pud);
1788a00cc7d9SMatthew Wilcox 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1789a00cc7d9SMatthew Wilcox 		return ptl;
1790a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
1791a00cc7d9SMatthew Wilcox 	return NULL;
1792a00cc7d9SMatthew Wilcox }
1793a00cc7d9SMatthew Wilcox 
1794a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1795a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1796a00cc7d9SMatthew Wilcox 		 pud_t *pud, unsigned long addr)
1797a00cc7d9SMatthew Wilcox {
1798a00cc7d9SMatthew Wilcox 	pud_t orig_pud;
1799a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
1800a00cc7d9SMatthew Wilcox 
1801a00cc7d9SMatthew Wilcox 	ptl = __pud_trans_huge_lock(pud, vma);
1802a00cc7d9SMatthew Wilcox 	if (!ptl)
1803a00cc7d9SMatthew Wilcox 		return 0;
1804a00cc7d9SMatthew Wilcox 	/*
1805a00cc7d9SMatthew Wilcox 	 * For architectures like ppc64 we look at deposited pgtable
1806a00cc7d9SMatthew Wilcox 	 * when calling pudp_huge_get_and_clear. So do the
1807a00cc7d9SMatthew Wilcox 	 * pgtable_trans_huge_withdraw after finishing pudp related
1808a00cc7d9SMatthew Wilcox 	 * operations.
1809a00cc7d9SMatthew Wilcox 	 */
1810a00cc7d9SMatthew Wilcox 	orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
1811a00cc7d9SMatthew Wilcox 			tlb->fullmm);
1812a00cc7d9SMatthew Wilcox 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
1813a00cc7d9SMatthew Wilcox 	if (vma_is_dax(vma)) {
1814a00cc7d9SMatthew Wilcox 		spin_unlock(ptl);
1815a00cc7d9SMatthew Wilcox 		/* No zero page support yet */
1816a00cc7d9SMatthew Wilcox 	} else {
1817a00cc7d9SMatthew Wilcox 		/* No support for anonymous PUD pages yet */
1818a00cc7d9SMatthew Wilcox 		BUG();
1819a00cc7d9SMatthew Wilcox 	}
1820a00cc7d9SMatthew Wilcox 	return 1;
1821a00cc7d9SMatthew Wilcox }
1822a00cc7d9SMatthew Wilcox 
1823a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1824a00cc7d9SMatthew Wilcox 		unsigned long haddr)
1825a00cc7d9SMatthew Wilcox {
1826a00cc7d9SMatthew Wilcox 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1827a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1828a00cc7d9SMatthew Wilcox 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1829a00cc7d9SMatthew Wilcox 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1830a00cc7d9SMatthew Wilcox 
1831a00cc7d9SMatthew Wilcox 	count_vm_event(THP_SPLIT_PMD);
1832a00cc7d9SMatthew Wilcox 
1833a00cc7d9SMatthew Wilcox 	pudp_huge_clear_flush_notify(vma, haddr, pud);
1834a00cc7d9SMatthew Wilcox }
1835a00cc7d9SMatthew Wilcox 
1836a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
1837a00cc7d9SMatthew Wilcox 		unsigned long address)
1838a00cc7d9SMatthew Wilcox {
1839a00cc7d9SMatthew Wilcox 	spinlock_t *ptl;
1840a00cc7d9SMatthew Wilcox 	struct mm_struct *mm = vma->vm_mm;
1841a00cc7d9SMatthew Wilcox 	unsigned long haddr = address & HPAGE_PUD_MASK;
1842a00cc7d9SMatthew Wilcox 
1843a00cc7d9SMatthew Wilcox 	mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
1844a00cc7d9SMatthew Wilcox 	ptl = pud_lock(mm, pud);
1845a00cc7d9SMatthew Wilcox 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
1846a00cc7d9SMatthew Wilcox 		goto out;
1847a00cc7d9SMatthew Wilcox 	__split_huge_pud_locked(vma, pud, haddr);
1848a00cc7d9SMatthew Wilcox 
1849a00cc7d9SMatthew Wilcox out:
1850a00cc7d9SMatthew Wilcox 	spin_unlock(ptl);
1851a00cc7d9SMatthew Wilcox 	mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
1852a00cc7d9SMatthew Wilcox }
1853a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1854a00cc7d9SMatthew Wilcox 
1855eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
1856eef1b3baSKirill A. Shutemov 		unsigned long haddr, pmd_t *pmd)
1857eef1b3baSKirill A. Shutemov {
1858eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
1859eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
1860eef1b3baSKirill A. Shutemov 	pmd_t _pmd;
1861eef1b3baSKirill A. Shutemov 	int i;
1862eef1b3baSKirill A. Shutemov 
1863eef1b3baSKirill A. Shutemov 	/* leave pmd empty until pte is filled */
1864eef1b3baSKirill A. Shutemov 	pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1865eef1b3baSKirill A. Shutemov 
1866eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1867eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
1868eef1b3baSKirill A. Shutemov 
1869eef1b3baSKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1870eef1b3baSKirill A. Shutemov 		pte_t *pte, entry;
1871eef1b3baSKirill A. Shutemov 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
1872eef1b3baSKirill A. Shutemov 		entry = pte_mkspecial(entry);
1873eef1b3baSKirill A. Shutemov 		pte = pte_offset_map(&_pmd, haddr);
1874eef1b3baSKirill A. Shutemov 		VM_BUG_ON(!pte_none(*pte));
1875eef1b3baSKirill A. Shutemov 		set_pte_at(mm, haddr, pte, entry);
1876eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
1877eef1b3baSKirill A. Shutemov 	}
1878eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
1879eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
1880eef1b3baSKirill A. Shutemov }
1881eef1b3baSKirill A. Shutemov 
1882eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1883ba988280SKirill A. Shutemov 		unsigned long haddr, bool freeze)
1884eef1b3baSKirill A. Shutemov {
1885eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
1886eef1b3baSKirill A. Shutemov 	struct page *page;
1887eef1b3baSKirill A. Shutemov 	pgtable_t pgtable;
1888eef1b3baSKirill A. Shutemov 	pmd_t _pmd;
1889804dd150SAndrea Arcangeli 	bool young, write, dirty, soft_dirty;
18902ac015e2SKirill A. Shutemov 	unsigned long addr;
1891eef1b3baSKirill A. Shutemov 	int i;
1892eef1b3baSKirill A. Shutemov 
1893eef1b3baSKirill A. Shutemov 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
1894eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1895eef1b3baSKirill A. Shutemov 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
18965c7fb56eSDan Williams 	VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
1897eef1b3baSKirill A. Shutemov 
1898eef1b3baSKirill A. Shutemov 	count_vm_event(THP_SPLIT_PMD);
1899eef1b3baSKirill A. Shutemov 
1900d21b9e57SKirill A. Shutemov 	if (!vma_is_anonymous(vma)) {
1901d21b9e57SKirill A. Shutemov 		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1902953c66c2SAneesh Kumar K.V 		/*
1903953c66c2SAneesh Kumar K.V 		 * We are going to unmap this huge page. So
1904953c66c2SAneesh Kumar K.V 		 * just go ahead and zap it
1905953c66c2SAneesh Kumar K.V 		 */
1906953c66c2SAneesh Kumar K.V 		if (arch_needs_pgtable_deposit())
1907953c66c2SAneesh Kumar K.V 			zap_deposited_table(mm, pmd);
1908d21b9e57SKirill A. Shutemov 		if (vma_is_dax(vma))
1909d21b9e57SKirill A. Shutemov 			return;
1910d21b9e57SKirill A. Shutemov 		page = pmd_page(_pmd);
1911d21b9e57SKirill A. Shutemov 		if (!PageReferenced(page) && pmd_young(_pmd))
1912d21b9e57SKirill A. Shutemov 			SetPageReferenced(page);
1913d21b9e57SKirill A. Shutemov 		page_remove_rmap(page, true);
1914d21b9e57SKirill A. Shutemov 		put_page(page);
1915d21b9e57SKirill A. Shutemov 		add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1916eef1b3baSKirill A. Shutemov 		return;
1917eef1b3baSKirill A. Shutemov 	} else if (is_huge_zero_pmd(*pmd)) {
1918eef1b3baSKirill A. Shutemov 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
1919eef1b3baSKirill A. Shutemov 	}
1920eef1b3baSKirill A. Shutemov 
1921eef1b3baSKirill A. Shutemov 	page = pmd_page(*pmd);
1922eef1b3baSKirill A. Shutemov 	VM_BUG_ON_PAGE(!page_count(page), page);
1923fe896d18SJoonsoo Kim 	page_ref_add(page, HPAGE_PMD_NR - 1);
1924eef1b3baSKirill A. Shutemov 	write = pmd_write(*pmd);
1925eef1b3baSKirill A. Shutemov 	young = pmd_young(*pmd);
1926b8d3c4c3SMinchan Kim 	dirty = pmd_dirty(*pmd);
1927804dd150SAndrea Arcangeli 	soft_dirty = pmd_soft_dirty(*pmd);
1928eef1b3baSKirill A. Shutemov 
1929c777e2a8SAneesh Kumar K.V 	pmdp_huge_split_prepare(vma, haddr, pmd);
1930eef1b3baSKirill A. Shutemov 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1931eef1b3baSKirill A. Shutemov 	pmd_populate(mm, &_pmd, pgtable);
1932eef1b3baSKirill A. Shutemov 
19332ac015e2SKirill A. Shutemov 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
1934eef1b3baSKirill A. Shutemov 		pte_t entry, *pte;
1935eef1b3baSKirill A. Shutemov 		/*
1936eef1b3baSKirill A. Shutemov 		 * Note that NUMA hinting access restrictions are not
1937eef1b3baSKirill A. Shutemov 		 * transferred to avoid any possibility of altering
1938eef1b3baSKirill A. Shutemov 		 * permissions across VMAs.
1939eef1b3baSKirill A. Shutemov 		 */
1940ba988280SKirill A. Shutemov 		if (freeze) {
1941ba988280SKirill A. Shutemov 			swp_entry_t swp_entry;
1942ba988280SKirill A. Shutemov 			swp_entry = make_migration_entry(page + i, write);
1943ba988280SKirill A. Shutemov 			entry = swp_entry_to_pte(swp_entry);
1944804dd150SAndrea Arcangeli 			if (soft_dirty)
1945804dd150SAndrea Arcangeli 				entry = pte_swp_mksoft_dirty(entry);
1946ba988280SKirill A. Shutemov 		} else {
19476d2329f8SAndrea Arcangeli 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
1948b8d3c4c3SMinchan Kim 			entry = maybe_mkwrite(entry, vma);
1949eef1b3baSKirill A. Shutemov 			if (!write)
1950eef1b3baSKirill A. Shutemov 				entry = pte_wrprotect(entry);
1951eef1b3baSKirill A. Shutemov 			if (!young)
1952eef1b3baSKirill A. Shutemov 				entry = pte_mkold(entry);
1953804dd150SAndrea Arcangeli 			if (soft_dirty)
1954804dd150SAndrea Arcangeli 				entry = pte_mksoft_dirty(entry);
1955ba988280SKirill A. Shutemov 		}
1956b8d3c4c3SMinchan Kim 		if (dirty)
1957b8d3c4c3SMinchan Kim 			SetPageDirty(page + i);
19582ac015e2SKirill A. Shutemov 		pte = pte_offset_map(&_pmd, addr);
1959eef1b3baSKirill A. Shutemov 		BUG_ON(!pte_none(*pte));
19602ac015e2SKirill A. Shutemov 		set_pte_at(mm, addr, pte, entry);
1961eef1b3baSKirill A. Shutemov 		atomic_inc(&page[i]._mapcount);
1962eef1b3baSKirill A. Shutemov 		pte_unmap(pte);
1963eef1b3baSKirill A. Shutemov 	}
1964eef1b3baSKirill A. Shutemov 
1965eef1b3baSKirill A. Shutemov 	/*
1966eef1b3baSKirill A. Shutemov 	 * Set PG_double_map before dropping compound_mapcount to avoid
1967eef1b3baSKirill A. Shutemov 	 * false-negative page_mapped().
1968eef1b3baSKirill A. Shutemov 	 */
1969eef1b3baSKirill A. Shutemov 	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
1970eef1b3baSKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
1971eef1b3baSKirill A. Shutemov 			atomic_inc(&page[i]._mapcount);
1972eef1b3baSKirill A. Shutemov 	}
1973eef1b3baSKirill A. Shutemov 
1974eef1b3baSKirill A. Shutemov 	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
1975eef1b3baSKirill A. Shutemov 		/* Last compound_mapcount is gone. */
197611fb9989SMel Gorman 		__dec_node_page_state(page, NR_ANON_THPS);
1977eef1b3baSKirill A. Shutemov 		if (TestClearPageDoubleMap(page)) {
1978eef1b3baSKirill A. Shutemov 			/* No need in mapcount reference anymore */
1979eef1b3baSKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++)
1980eef1b3baSKirill A. Shutemov 				atomic_dec(&page[i]._mapcount);
1981eef1b3baSKirill A. Shutemov 		}
1982eef1b3baSKirill A. Shutemov 	}
1983eef1b3baSKirill A. Shutemov 
1984eef1b3baSKirill A. Shutemov 	smp_wmb(); /* make pte visible before pmd */
1985e9b61f19SKirill A. Shutemov 	/*
1986e9b61f19SKirill A. Shutemov 	 * Up to this point the pmd is present and huge and userland has the
1987e9b61f19SKirill A. Shutemov 	 * whole access to the hugepage during the split (which happens in
1988e9b61f19SKirill A. Shutemov 	 * place). If we overwrite the pmd with the not-huge version pointing
1989e9b61f19SKirill A. Shutemov 	 * to the pte here (which of course we could if all CPUs were bug
1990e9b61f19SKirill A. Shutemov 	 * free), userland could trigger a small page size TLB miss on the
1991e9b61f19SKirill A. Shutemov 	 * small sized TLB while the hugepage TLB entry is still established in
1992e9b61f19SKirill A. Shutemov 	 * the huge TLB. Some CPU doesn't like that.
1993e9b61f19SKirill A. Shutemov 	 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
1994e9b61f19SKirill A. Shutemov 	 * 383 on page 93. Intel should be safe but is also warns that it's
1995e9b61f19SKirill A. Shutemov 	 * only safe if the permission and cache attributes of the two entries
1996e9b61f19SKirill A. Shutemov 	 * loaded in the two TLB is identical (which should be the case here).
1997e9b61f19SKirill A. Shutemov 	 * But it is generally safer to never allow small and huge TLB entries
1998e9b61f19SKirill A. Shutemov 	 * for the same virtual address to be loaded simultaneously. So instead
1999e9b61f19SKirill A. Shutemov 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2000e9b61f19SKirill A. Shutemov 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2001e9b61f19SKirill A. Shutemov 	 * and pmd_trans_splitting must remain set at all times on the pmd
2002e9b61f19SKirill A. Shutemov 	 * until the split is complete for this pmd), then we flush the SMP TLB
2003e9b61f19SKirill A. Shutemov 	 * and finally we write the non-huge version of the pmd entry with
2004e9b61f19SKirill A. Shutemov 	 * pmd_populate.
2005e9b61f19SKirill A. Shutemov 	 */
2006e9b61f19SKirill A. Shutemov 	pmdp_invalidate(vma, haddr, pmd);
2007eef1b3baSKirill A. Shutemov 	pmd_populate(mm, pmd, pgtable);
2008e9b61f19SKirill A. Shutemov 
2009e9b61f19SKirill A. Shutemov 	if (freeze) {
20102ac015e2SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++) {
2011e9b61f19SKirill A. Shutemov 			page_remove_rmap(page + i, false);
2012e9b61f19SKirill A. Shutemov 			put_page(page + i);
2013e9b61f19SKirill A. Shutemov 		}
2014e9b61f19SKirill A. Shutemov 	}
2015eef1b3baSKirill A. Shutemov }
2016eef1b3baSKirill A. Shutemov 
2017eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
201833f4751eSNaoya Horiguchi 		unsigned long address, bool freeze, struct page *page)
2019eef1b3baSKirill A. Shutemov {
2020eef1b3baSKirill A. Shutemov 	spinlock_t *ptl;
2021eef1b3baSKirill A. Shutemov 	struct mm_struct *mm = vma->vm_mm;
2022eef1b3baSKirill A. Shutemov 	unsigned long haddr = address & HPAGE_PMD_MASK;
2023eef1b3baSKirill A. Shutemov 
2024eef1b3baSKirill A. Shutemov 	mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2025eef1b3baSKirill A. Shutemov 	ptl = pmd_lock(mm, pmd);
202633f4751eSNaoya Horiguchi 
202733f4751eSNaoya Horiguchi 	/*
202833f4751eSNaoya Horiguchi 	 * If caller asks to setup a migration entries, we need a page to check
202933f4751eSNaoya Horiguchi 	 * pmd against. Otherwise we can end up replacing wrong page.
203033f4751eSNaoya Horiguchi 	 */
203133f4751eSNaoya Horiguchi 	VM_BUG_ON(freeze && !page);
203233f4751eSNaoya Horiguchi 	if (page && page != pmd_page(*pmd))
203333f4751eSNaoya Horiguchi 	        goto out;
203433f4751eSNaoya Horiguchi 
20355c7fb56eSDan Williams 	if (pmd_trans_huge(*pmd)) {
203633f4751eSNaoya Horiguchi 		page = pmd_page(*pmd);
2037e90309c9SKirill A. Shutemov 		if (PageMlocked(page))
20385f737714SKirill A. Shutemov 			clear_page_mlock(page);
20395c7fb56eSDan Williams 	} else if (!pmd_devmap(*pmd))
20405c7fb56eSDan Williams 		goto out;
2041fec89c10SKirill A. Shutemov 	__split_huge_pmd_locked(vma, pmd, haddr, freeze);
2042e90309c9SKirill A. Shutemov out:
2043eef1b3baSKirill A. Shutemov 	spin_unlock(ptl);
2044eef1b3baSKirill A. Shutemov 	mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
2045eef1b3baSKirill A. Shutemov }
2046eef1b3baSKirill A. Shutemov 
2047fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2048fec89c10SKirill A. Shutemov 		bool freeze, struct page *page)
204994fcc585SAndrea Arcangeli {
2050f72e7dcdSHugh Dickins 	pgd_t *pgd;
2051f72e7dcdSHugh Dickins 	pud_t *pud;
205294fcc585SAndrea Arcangeli 	pmd_t *pmd;
205394fcc585SAndrea Arcangeli 
205478ddc534SKirill A. Shutemov 	pgd = pgd_offset(vma->vm_mm, address);
2055f72e7dcdSHugh Dickins 	if (!pgd_present(*pgd))
2056f72e7dcdSHugh Dickins 		return;
2057f72e7dcdSHugh Dickins 
2058f72e7dcdSHugh Dickins 	pud = pud_offset(pgd, address);
2059f72e7dcdSHugh Dickins 	if (!pud_present(*pud))
2060f72e7dcdSHugh Dickins 		return;
2061f72e7dcdSHugh Dickins 
2062f72e7dcdSHugh Dickins 	pmd = pmd_offset(pud, address);
2063fec89c10SKirill A. Shutemov 
206433f4751eSNaoya Horiguchi 	__split_huge_pmd(vma, pmd, address, freeze, page);
206594fcc585SAndrea Arcangeli }
206694fcc585SAndrea Arcangeli 
2067e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma,
206894fcc585SAndrea Arcangeli 			     unsigned long start,
206994fcc585SAndrea Arcangeli 			     unsigned long end,
207094fcc585SAndrea Arcangeli 			     long adjust_next)
207194fcc585SAndrea Arcangeli {
207294fcc585SAndrea Arcangeli 	/*
207394fcc585SAndrea Arcangeli 	 * If the new start address isn't hpage aligned and it could
207494fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
207594fcc585SAndrea Arcangeli 	 * an huge pmd.
207694fcc585SAndrea Arcangeli 	 */
207794fcc585SAndrea Arcangeli 	if (start & ~HPAGE_PMD_MASK &&
207894fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
207994fcc585SAndrea Arcangeli 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2080fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, start, false, NULL);
208194fcc585SAndrea Arcangeli 
208294fcc585SAndrea Arcangeli 	/*
208394fcc585SAndrea Arcangeli 	 * If the new end address isn't hpage aligned and it could
208494fcc585SAndrea Arcangeli 	 * previously contain an hugepage: check if we need to split
208594fcc585SAndrea Arcangeli 	 * an huge pmd.
208694fcc585SAndrea Arcangeli 	 */
208794fcc585SAndrea Arcangeli 	if (end & ~HPAGE_PMD_MASK &&
208894fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
208994fcc585SAndrea Arcangeli 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2090fec89c10SKirill A. Shutemov 		split_huge_pmd_address(vma, end, false, NULL);
209194fcc585SAndrea Arcangeli 
209294fcc585SAndrea Arcangeli 	/*
209394fcc585SAndrea Arcangeli 	 * If we're also updating the vma->vm_next->vm_start, if the new
209494fcc585SAndrea Arcangeli 	 * vm_next->vm_start isn't page aligned and it could previously
209594fcc585SAndrea Arcangeli 	 * contain an hugepage: check if we need to split an huge pmd.
209694fcc585SAndrea Arcangeli 	 */
209794fcc585SAndrea Arcangeli 	if (adjust_next > 0) {
209894fcc585SAndrea Arcangeli 		struct vm_area_struct *next = vma->vm_next;
209994fcc585SAndrea Arcangeli 		unsigned long nstart = next->vm_start;
210094fcc585SAndrea Arcangeli 		nstart += adjust_next << PAGE_SHIFT;
210194fcc585SAndrea Arcangeli 		if (nstart & ~HPAGE_PMD_MASK &&
210294fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
210394fcc585SAndrea Arcangeli 		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2104fec89c10SKirill A. Shutemov 			split_huge_pmd_address(next, nstart, false, NULL);
210594fcc585SAndrea Arcangeli 	}
210694fcc585SAndrea Arcangeli }
2107e9b61f19SKirill A. Shutemov 
2108fec89c10SKirill A. Shutemov static void freeze_page(struct page *page)
2109e9b61f19SKirill A. Shutemov {
2110baa355fdSKirill A. Shutemov 	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2111c7ab0d2fSKirill A. Shutemov 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
2112c7ab0d2fSKirill A. Shutemov 	int ret;
2113e9b61f19SKirill A. Shutemov 
2114e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageHead(page), page);
2115e9b61f19SKirill A. Shutemov 
2116baa355fdSKirill A. Shutemov 	if (PageAnon(page))
2117baa355fdSKirill A. Shutemov 		ttu_flags |= TTU_MIGRATION;
2118baa355fdSKirill A. Shutemov 
2119c7ab0d2fSKirill A. Shutemov 	ret = try_to_unmap(page, ttu_flags);
2120c7ab0d2fSKirill A. Shutemov 	VM_BUG_ON_PAGE(ret, page);
2121bd56086fSKirill A. Shutemov }
2122bd56086fSKirill A. Shutemov 
2123fec89c10SKirill A. Shutemov static void unfreeze_page(struct page *page)
2124e9b61f19SKirill A. Shutemov {
2125fec89c10SKirill A. Shutemov 	int i;
2126ace71a19SKirill A. Shutemov 	if (PageTransHuge(page)) {
2127ace71a19SKirill A. Shutemov 		remove_migration_ptes(page, page, true);
2128ace71a19SKirill A. Shutemov 	} else {
2129fec89c10SKirill A. Shutemov 		for (i = 0; i < HPAGE_PMD_NR; i++)
2130fec89c10SKirill A. Shutemov 			remove_migration_ptes(page + i, page + i, true);
2131e9b61f19SKirill A. Shutemov 	}
2132ace71a19SKirill A. Shutemov }
2133e9b61f19SKirill A. Shutemov 
21348df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail,
2135e9b61f19SKirill A. Shutemov 		struct lruvec *lruvec, struct list_head *list)
2136e9b61f19SKirill A. Shutemov {
2137e9b61f19SKirill A. Shutemov 	struct page *page_tail = head + tail;
2138e9b61f19SKirill A. Shutemov 
21398df651c7SKirill A. Shutemov 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2140fe896d18SJoonsoo Kim 	VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
2141e9b61f19SKirill A. Shutemov 
2142e9b61f19SKirill A. Shutemov 	/*
21430139aa7bSJoonsoo Kim 	 * tail_page->_refcount is zero and not changing from under us. But
2144e9b61f19SKirill A. Shutemov 	 * get_page_unless_zero() may be running from under us on the
2145baa355fdSKirill A. Shutemov 	 * tail_page. If we used atomic_set() below instead of atomic_inc() or
2146baa355fdSKirill A. Shutemov 	 * atomic_add(), we would then run atomic_set() concurrently with
2147e9b61f19SKirill A. Shutemov 	 * get_page_unless_zero(), and atomic_set() is implemented in C not
2148e9b61f19SKirill A. Shutemov 	 * using locked ops. spin_unlock on x86 sometime uses locked ops
2149e9b61f19SKirill A. Shutemov 	 * because of PPro errata 66, 92, so unless somebody can guarantee
2150e9b61f19SKirill A. Shutemov 	 * atomic_set() here would be safe on all archs (and not only on x86),
2151baa355fdSKirill A. Shutemov 	 * it's safer to use atomic_inc()/atomic_add().
2152e9b61f19SKirill A. Shutemov 	 */
2153baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2154fe896d18SJoonsoo Kim 		page_ref_inc(page_tail);
2155baa355fdSKirill A. Shutemov 	} else {
2156baa355fdSKirill A. Shutemov 		/* Additional pin to radix tree */
2157baa355fdSKirill A. Shutemov 		page_ref_add(page_tail, 2);
2158baa355fdSKirill A. Shutemov 	}
2159e9b61f19SKirill A. Shutemov 
2160e9b61f19SKirill A. Shutemov 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2161e9b61f19SKirill A. Shutemov 	page_tail->flags |= (head->flags &
2162e9b61f19SKirill A. Shutemov 			((1L << PG_referenced) |
2163e9b61f19SKirill A. Shutemov 			 (1L << PG_swapbacked) |
2164e9b61f19SKirill A. Shutemov 			 (1L << PG_mlocked) |
2165e9b61f19SKirill A. Shutemov 			 (1L << PG_uptodate) |
2166e9b61f19SKirill A. Shutemov 			 (1L << PG_active) |
2167e9b61f19SKirill A. Shutemov 			 (1L << PG_locked) |
2168b8d3c4c3SMinchan Kim 			 (1L << PG_unevictable) |
2169b8d3c4c3SMinchan Kim 			 (1L << PG_dirty)));
2170e9b61f19SKirill A. Shutemov 
2171e9b61f19SKirill A. Shutemov 	/*
2172e9b61f19SKirill A. Shutemov 	 * After clearing PageTail the gup refcount can be released.
2173e9b61f19SKirill A. Shutemov 	 * Page flags also must be visible before we make the page non-compound.
2174e9b61f19SKirill A. Shutemov 	 */
2175e9b61f19SKirill A. Shutemov 	smp_wmb();
2176e9b61f19SKirill A. Shutemov 
2177e9b61f19SKirill A. Shutemov 	clear_compound_head(page_tail);
2178e9b61f19SKirill A. Shutemov 
2179e9b61f19SKirill A. Shutemov 	if (page_is_young(head))
2180e9b61f19SKirill A. Shutemov 		set_page_young(page_tail);
2181e9b61f19SKirill A. Shutemov 	if (page_is_idle(head))
2182e9b61f19SKirill A. Shutemov 		set_page_idle(page_tail);
2183e9b61f19SKirill A. Shutemov 
2184e9b61f19SKirill A. Shutemov 	/* ->mapping in first tail page is compound_mapcount */
21859a982250SKirill A. Shutemov 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2186e9b61f19SKirill A. Shutemov 			page_tail);
2187e9b61f19SKirill A. Shutemov 	page_tail->mapping = head->mapping;
2188e9b61f19SKirill A. Shutemov 
2189e9b61f19SKirill A. Shutemov 	page_tail->index = head->index + tail;
2190e9b61f19SKirill A. Shutemov 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2191e9b61f19SKirill A. Shutemov 	lru_add_page_tail(head, page_tail, lruvec, list);
2192e9b61f19SKirill A. Shutemov }
2193e9b61f19SKirill A. Shutemov 
2194baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list,
2195baa355fdSKirill A. Shutemov 		unsigned long flags)
2196e9b61f19SKirill A. Shutemov {
2197e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2198e9b61f19SKirill A. Shutemov 	struct zone *zone = page_zone(head);
2199e9b61f19SKirill A. Shutemov 	struct lruvec *lruvec;
2200baa355fdSKirill A. Shutemov 	pgoff_t end = -1;
22018df651c7SKirill A. Shutemov 	int i;
2202e9b61f19SKirill A. Shutemov 
2203599d0c95SMel Gorman 	lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
2204e9b61f19SKirill A. Shutemov 
2205e9b61f19SKirill A. Shutemov 	/* complete memcg works before add pages to LRU */
2206e9b61f19SKirill A. Shutemov 	mem_cgroup_split_huge_fixup(head);
2207e9b61f19SKirill A. Shutemov 
2208baa355fdSKirill A. Shutemov 	if (!PageAnon(page))
2209baa355fdSKirill A. Shutemov 		end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2210baa355fdSKirill A. Shutemov 
2211baa355fdSKirill A. Shutemov 	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
22128df651c7SKirill A. Shutemov 		__split_huge_page_tail(head, i, lruvec, list);
2213baa355fdSKirill A. Shutemov 		/* Some pages can be beyond i_size: drop them from page cache */
2214baa355fdSKirill A. Shutemov 		if (head[i].index >= end) {
2215baa355fdSKirill A. Shutemov 			__ClearPageDirty(head + i);
2216baa355fdSKirill A. Shutemov 			__delete_from_page_cache(head + i, NULL);
2217800d8c63SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2218800d8c63SKirill A. Shutemov 				shmem_uncharge(head->mapping->host, 1);
2219baa355fdSKirill A. Shutemov 			put_page(head + i);
2220baa355fdSKirill A. Shutemov 		}
2221baa355fdSKirill A. Shutemov 	}
2222e9b61f19SKirill A. Shutemov 
2223e9b61f19SKirill A. Shutemov 	ClearPageCompound(head);
2224baa355fdSKirill A. Shutemov 	/* See comment in __split_huge_page_tail() */
2225baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2226baa355fdSKirill A. Shutemov 		page_ref_inc(head);
2227baa355fdSKirill A. Shutemov 	} else {
2228baa355fdSKirill A. Shutemov 		/* Additional pin to radix tree */
2229baa355fdSKirill A. Shutemov 		page_ref_add(head, 2);
2230baa355fdSKirill A. Shutemov 		spin_unlock(&head->mapping->tree_lock);
2231baa355fdSKirill A. Shutemov 	}
2232baa355fdSKirill A. Shutemov 
2233a52633d8SMel Gorman 	spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2234e9b61f19SKirill A. Shutemov 
2235fec89c10SKirill A. Shutemov 	unfreeze_page(head);
2236e9b61f19SKirill A. Shutemov 
2237e9b61f19SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++) {
2238e9b61f19SKirill A. Shutemov 		struct page *subpage = head + i;
2239e9b61f19SKirill A. Shutemov 		if (subpage == page)
2240e9b61f19SKirill A. Shutemov 			continue;
2241e9b61f19SKirill A. Shutemov 		unlock_page(subpage);
2242e9b61f19SKirill A. Shutemov 
2243e9b61f19SKirill A. Shutemov 		/*
2244e9b61f19SKirill A. Shutemov 		 * Subpages may be freed if there wasn't any mapping
2245e9b61f19SKirill A. Shutemov 		 * like if add_to_swap() is running on a lru page that
2246e9b61f19SKirill A. Shutemov 		 * had its mapping zapped. And freeing these pages
2247e9b61f19SKirill A. Shutemov 		 * requires taking the lru_lock so we do the put_page
2248e9b61f19SKirill A. Shutemov 		 * of the tail pages after the split is complete.
2249e9b61f19SKirill A. Shutemov 		 */
2250e9b61f19SKirill A. Shutemov 		put_page(subpage);
2251e9b61f19SKirill A. Shutemov 	}
2252e9b61f19SKirill A. Shutemov }
2253e9b61f19SKirill A. Shutemov 
2254b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page)
2255b20ce5e0SKirill A. Shutemov {
2256dd78feddSKirill A. Shutemov 	int i, compound, ret;
2257b20ce5e0SKirill A. Shutemov 
2258b20ce5e0SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
2259b20ce5e0SKirill A. Shutemov 
2260b20ce5e0SKirill A. Shutemov 	if (likely(!PageCompound(page)))
2261b20ce5e0SKirill A. Shutemov 		return atomic_read(&page->_mapcount) + 1;
2262b20ce5e0SKirill A. Shutemov 
2263dd78feddSKirill A. Shutemov 	compound = compound_mapcount(page);
2264b20ce5e0SKirill A. Shutemov 	if (PageHuge(page))
2265dd78feddSKirill A. Shutemov 		return compound;
2266dd78feddSKirill A. Shutemov 	ret = compound;
2267b20ce5e0SKirill A. Shutemov 	for (i = 0; i < HPAGE_PMD_NR; i++)
2268b20ce5e0SKirill A. Shutemov 		ret += atomic_read(&page[i]._mapcount) + 1;
2269dd78feddSKirill A. Shutemov 	/* File pages has compound_mapcount included in _mapcount */
2270dd78feddSKirill A. Shutemov 	if (!PageAnon(page))
2271dd78feddSKirill A. Shutemov 		return ret - compound * HPAGE_PMD_NR;
2272b20ce5e0SKirill A. Shutemov 	if (PageDoubleMap(page))
2273b20ce5e0SKirill A. Shutemov 		ret -= HPAGE_PMD_NR;
2274b20ce5e0SKirill A. Shutemov 	return ret;
2275b20ce5e0SKirill A. Shutemov }
2276b20ce5e0SKirill A. Shutemov 
2277e9b61f19SKirill A. Shutemov /*
22786d0a07edSAndrea Arcangeli  * This calculates accurately how many mappings a transparent hugepage
22796d0a07edSAndrea Arcangeli  * has (unlike page_mapcount() which isn't fully accurate). This full
22806d0a07edSAndrea Arcangeli  * accuracy is primarily needed to know if copy-on-write faults can
22816d0a07edSAndrea Arcangeli  * reuse the page and change the mapping to read-write instead of
22826d0a07edSAndrea Arcangeli  * copying them. At the same time this returns the total_mapcount too.
22836d0a07edSAndrea Arcangeli  *
22846d0a07edSAndrea Arcangeli  * The function returns the highest mapcount any one of the subpages
22856d0a07edSAndrea Arcangeli  * has. If the return value is one, even if different processes are
22866d0a07edSAndrea Arcangeli  * mapping different subpages of the transparent hugepage, they can
22876d0a07edSAndrea Arcangeli  * all reuse it, because each process is reusing a different subpage.
22886d0a07edSAndrea Arcangeli  *
22896d0a07edSAndrea Arcangeli  * The total_mapcount is instead counting all virtual mappings of the
22906d0a07edSAndrea Arcangeli  * subpages. If the total_mapcount is equal to "one", it tells the
22916d0a07edSAndrea Arcangeli  * caller all mappings belong to the same "mm" and in turn the
22926d0a07edSAndrea Arcangeli  * anon_vma of the transparent hugepage can become the vma->anon_vma
22936d0a07edSAndrea Arcangeli  * local one as no other process may be mapping any of the subpages.
22946d0a07edSAndrea Arcangeli  *
22956d0a07edSAndrea Arcangeli  * It would be more accurate to replace page_mapcount() with
22966d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount(), however we only use
22976d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() in the copy-on-write faults where we
22986d0a07edSAndrea Arcangeli  * need full accuracy to avoid breaking page pinning, because
22996d0a07edSAndrea Arcangeli  * page_trans_huge_mapcount() is slower than page_mapcount().
23006d0a07edSAndrea Arcangeli  */
23016d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
23026d0a07edSAndrea Arcangeli {
23036d0a07edSAndrea Arcangeli 	int i, ret, _total_mapcount, mapcount;
23046d0a07edSAndrea Arcangeli 
23056d0a07edSAndrea Arcangeli 	/* hugetlbfs shouldn't call it */
23066d0a07edSAndrea Arcangeli 	VM_BUG_ON_PAGE(PageHuge(page), page);
23076d0a07edSAndrea Arcangeli 
23086d0a07edSAndrea Arcangeli 	if (likely(!PageTransCompound(page))) {
23096d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page->_mapcount) + 1;
23106d0a07edSAndrea Arcangeli 		if (total_mapcount)
23116d0a07edSAndrea Arcangeli 			*total_mapcount = mapcount;
23126d0a07edSAndrea Arcangeli 		return mapcount;
23136d0a07edSAndrea Arcangeli 	}
23146d0a07edSAndrea Arcangeli 
23156d0a07edSAndrea Arcangeli 	page = compound_head(page);
23166d0a07edSAndrea Arcangeli 
23176d0a07edSAndrea Arcangeli 	_total_mapcount = ret = 0;
23186d0a07edSAndrea Arcangeli 	for (i = 0; i < HPAGE_PMD_NR; i++) {
23196d0a07edSAndrea Arcangeli 		mapcount = atomic_read(&page[i]._mapcount) + 1;
23206d0a07edSAndrea Arcangeli 		ret = max(ret, mapcount);
23216d0a07edSAndrea Arcangeli 		_total_mapcount += mapcount;
23226d0a07edSAndrea Arcangeli 	}
23236d0a07edSAndrea Arcangeli 	if (PageDoubleMap(page)) {
23246d0a07edSAndrea Arcangeli 		ret -= 1;
23256d0a07edSAndrea Arcangeli 		_total_mapcount -= HPAGE_PMD_NR;
23266d0a07edSAndrea Arcangeli 	}
23276d0a07edSAndrea Arcangeli 	mapcount = compound_mapcount(page);
23286d0a07edSAndrea Arcangeli 	ret += mapcount;
23296d0a07edSAndrea Arcangeli 	_total_mapcount += mapcount;
23306d0a07edSAndrea Arcangeli 	if (total_mapcount)
23316d0a07edSAndrea Arcangeli 		*total_mapcount = _total_mapcount;
23326d0a07edSAndrea Arcangeli 	return ret;
23336d0a07edSAndrea Arcangeli }
23346d0a07edSAndrea Arcangeli 
23356d0a07edSAndrea Arcangeli /*
2336e9b61f19SKirill A. Shutemov  * This function splits huge page into normal pages. @page can point to any
2337e9b61f19SKirill A. Shutemov  * subpage of huge page to split. Split doesn't change the position of @page.
2338e9b61f19SKirill A. Shutemov  *
2339e9b61f19SKirill A. Shutemov  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2340e9b61f19SKirill A. Shutemov  * The huge page must be locked.
2341e9b61f19SKirill A. Shutemov  *
2342e9b61f19SKirill A. Shutemov  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2343e9b61f19SKirill A. Shutemov  *
2344e9b61f19SKirill A. Shutemov  * Both head page and tail pages will inherit mapping, flags, and so on from
2345e9b61f19SKirill A. Shutemov  * the hugepage.
2346e9b61f19SKirill A. Shutemov  *
2347e9b61f19SKirill A. Shutemov  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2348e9b61f19SKirill A. Shutemov  * they are not mapped.
2349e9b61f19SKirill A. Shutemov  *
2350e9b61f19SKirill A. Shutemov  * Returns 0 if the hugepage is split successfully.
2351e9b61f19SKirill A. Shutemov  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2352e9b61f19SKirill A. Shutemov  * us.
2353e9b61f19SKirill A. Shutemov  */
2354e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list)
2355e9b61f19SKirill A. Shutemov {
2356e9b61f19SKirill A. Shutemov 	struct page *head = compound_head(page);
2357a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2358baa355fdSKirill A. Shutemov 	struct anon_vma *anon_vma = NULL;
2359baa355fdSKirill A. Shutemov 	struct address_space *mapping = NULL;
2360baa355fdSKirill A. Shutemov 	int count, mapcount, extra_pins, ret;
2361d9654322SKirill A. Shutemov 	bool mlocked;
23620b9b6fffSKirill A. Shutemov 	unsigned long flags;
2363e9b61f19SKirill A. Shutemov 
2364e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2365e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2366e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2367e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageCompound(page), page);
2368e9b61f19SKirill A. Shutemov 
2369baa355fdSKirill A. Shutemov 	if (PageAnon(head)) {
2370e9b61f19SKirill A. Shutemov 		/*
2371baa355fdSKirill A. Shutemov 		 * The caller does not necessarily hold an mmap_sem that would
2372baa355fdSKirill A. Shutemov 		 * prevent the anon_vma disappearing so we first we take a
2373baa355fdSKirill A. Shutemov 		 * reference to it and then lock the anon_vma for write. This
2374baa355fdSKirill A. Shutemov 		 * is similar to page_lock_anon_vma_read except the write lock
2375baa355fdSKirill A. Shutemov 		 * is taken to serialise against parallel split or collapse
2376baa355fdSKirill A. Shutemov 		 * operations.
2377e9b61f19SKirill A. Shutemov 		 */
2378e9b61f19SKirill A. Shutemov 		anon_vma = page_get_anon_vma(head);
2379e9b61f19SKirill A. Shutemov 		if (!anon_vma) {
2380e9b61f19SKirill A. Shutemov 			ret = -EBUSY;
2381e9b61f19SKirill A. Shutemov 			goto out;
2382e9b61f19SKirill A. Shutemov 		}
2383baa355fdSKirill A. Shutemov 		extra_pins = 0;
2384baa355fdSKirill A. Shutemov 		mapping = NULL;
2385e9b61f19SKirill A. Shutemov 		anon_vma_lock_write(anon_vma);
2386baa355fdSKirill A. Shutemov 	} else {
2387baa355fdSKirill A. Shutemov 		mapping = head->mapping;
2388baa355fdSKirill A. Shutemov 
2389baa355fdSKirill A. Shutemov 		/* Truncated ? */
2390baa355fdSKirill A. Shutemov 		if (!mapping) {
2391baa355fdSKirill A. Shutemov 			ret = -EBUSY;
2392baa355fdSKirill A. Shutemov 			goto out;
2393baa355fdSKirill A. Shutemov 		}
2394baa355fdSKirill A. Shutemov 
2395baa355fdSKirill A. Shutemov 		/* Addidional pins from radix tree */
2396baa355fdSKirill A. Shutemov 		extra_pins = HPAGE_PMD_NR;
2397baa355fdSKirill A. Shutemov 		anon_vma = NULL;
2398baa355fdSKirill A. Shutemov 		i_mmap_lock_read(mapping);
2399baa355fdSKirill A. Shutemov 	}
2400e9b61f19SKirill A. Shutemov 
2401e9b61f19SKirill A. Shutemov 	/*
2402e9b61f19SKirill A. Shutemov 	 * Racy check if we can split the page, before freeze_page() will
2403e9b61f19SKirill A. Shutemov 	 * split PMDs
2404e9b61f19SKirill A. Shutemov 	 */
2405baa355fdSKirill A. Shutemov 	if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
2406e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2407e9b61f19SKirill A. Shutemov 		goto out_unlock;
2408e9b61f19SKirill A. Shutemov 	}
2409e9b61f19SKirill A. Shutemov 
2410d9654322SKirill A. Shutemov 	mlocked = PageMlocked(page);
2411fec89c10SKirill A. Shutemov 	freeze_page(head);
2412e9b61f19SKirill A. Shutemov 	VM_BUG_ON_PAGE(compound_mapcount(head), head);
2413e9b61f19SKirill A. Shutemov 
2414d9654322SKirill A. Shutemov 	/* Make sure the page is not on per-CPU pagevec as it takes pin */
2415d9654322SKirill A. Shutemov 	if (mlocked)
2416d9654322SKirill A. Shutemov 		lru_add_drain();
2417d9654322SKirill A. Shutemov 
2418baa355fdSKirill A. Shutemov 	/* prevent PageLRU to go away from under us, and freeze lru stats */
2419a52633d8SMel Gorman 	spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
2420baa355fdSKirill A. Shutemov 
2421baa355fdSKirill A. Shutemov 	if (mapping) {
2422baa355fdSKirill A. Shutemov 		void **pslot;
2423baa355fdSKirill A. Shutemov 
2424baa355fdSKirill A. Shutemov 		spin_lock(&mapping->tree_lock);
2425baa355fdSKirill A. Shutemov 		pslot = radix_tree_lookup_slot(&mapping->page_tree,
2426baa355fdSKirill A. Shutemov 				page_index(head));
2427baa355fdSKirill A. Shutemov 		/*
2428baa355fdSKirill A. Shutemov 		 * Check if the head page is present in radix tree.
2429baa355fdSKirill A. Shutemov 		 * We assume all tail are present too, if head is there.
2430baa355fdSKirill A. Shutemov 		 */
2431baa355fdSKirill A. Shutemov 		if (radix_tree_deref_slot_protected(pslot,
2432baa355fdSKirill A. Shutemov 					&mapping->tree_lock) != head)
2433baa355fdSKirill A. Shutemov 			goto fail;
2434baa355fdSKirill A. Shutemov 	}
2435baa355fdSKirill A. Shutemov 
24360139aa7bSJoonsoo Kim 	/* Prevent deferred_split_scan() touching ->_refcount */
2437baa355fdSKirill A. Shutemov 	spin_lock(&pgdata->split_queue_lock);
2438e9b61f19SKirill A. Shutemov 	count = page_count(head);
2439e9b61f19SKirill A. Shutemov 	mapcount = total_mapcount(head);
2440baa355fdSKirill A. Shutemov 	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
24419a982250SKirill A. Shutemov 		if (!list_empty(page_deferred_list(head))) {
2442a3d0a918SKirill A. Shutemov 			pgdata->split_queue_len--;
24439a982250SKirill A. Shutemov 			list_del(page_deferred_list(head));
24449a982250SKirill A. Shutemov 		}
244565c45377SKirill A. Shutemov 		if (mapping)
244611fb9989SMel Gorman 			__dec_node_page_state(page, NR_SHMEM_THPS);
2447baa355fdSKirill A. Shutemov 		spin_unlock(&pgdata->split_queue_lock);
2448baa355fdSKirill A. Shutemov 		__split_huge_page(page, list, flags);
2449e9b61f19SKirill A. Shutemov 		ret = 0;
2450baa355fdSKirill A. Shutemov 	} else {
2451baa355fdSKirill A. Shutemov 		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2452e9b61f19SKirill A. Shutemov 			pr_alert("total_mapcount: %u, page_count(): %u\n",
2453e9b61f19SKirill A. Shutemov 					mapcount, count);
2454e9b61f19SKirill A. Shutemov 			if (PageTail(page))
2455e9b61f19SKirill A. Shutemov 				dump_page(head, NULL);
2456bd56086fSKirill A. Shutemov 			dump_page(page, "total_mapcount(head) > 0");
2457e9b61f19SKirill A. Shutemov 			BUG();
2458baa355fdSKirill A. Shutemov 		}
2459baa355fdSKirill A. Shutemov 		spin_unlock(&pgdata->split_queue_lock);
2460baa355fdSKirill A. Shutemov fail:		if (mapping)
2461baa355fdSKirill A. Shutemov 			spin_unlock(&mapping->tree_lock);
2462a52633d8SMel Gorman 		spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2463fec89c10SKirill A. Shutemov 		unfreeze_page(head);
2464e9b61f19SKirill A. Shutemov 		ret = -EBUSY;
2465e9b61f19SKirill A. Shutemov 	}
2466e9b61f19SKirill A. Shutemov 
2467e9b61f19SKirill A. Shutemov out_unlock:
2468baa355fdSKirill A. Shutemov 	if (anon_vma) {
2469e9b61f19SKirill A. Shutemov 		anon_vma_unlock_write(anon_vma);
2470e9b61f19SKirill A. Shutemov 		put_anon_vma(anon_vma);
2471baa355fdSKirill A. Shutemov 	}
2472baa355fdSKirill A. Shutemov 	if (mapping)
2473baa355fdSKirill A. Shutemov 		i_mmap_unlock_read(mapping);
2474e9b61f19SKirill A. Shutemov out:
2475e9b61f19SKirill A. Shutemov 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2476e9b61f19SKirill A. Shutemov 	return ret;
2477e9b61f19SKirill A. Shutemov }
24789a982250SKirill A. Shutemov 
24799a982250SKirill A. Shutemov void free_transhuge_page(struct page *page)
24809a982250SKirill A. Shutemov {
2481a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
24829a982250SKirill A. Shutemov 	unsigned long flags;
24839a982250SKirill A. Shutemov 
2484a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
24859a982250SKirill A. Shutemov 	if (!list_empty(page_deferred_list(page))) {
2486a3d0a918SKirill A. Shutemov 		pgdata->split_queue_len--;
24879a982250SKirill A. Shutemov 		list_del(page_deferred_list(page));
24889a982250SKirill A. Shutemov 	}
2489a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
24909a982250SKirill A. Shutemov 	free_compound_page(page);
24919a982250SKirill A. Shutemov }
24929a982250SKirill A. Shutemov 
24939a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page)
24949a982250SKirill A. Shutemov {
2495a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
24969a982250SKirill A. Shutemov 	unsigned long flags;
24979a982250SKirill A. Shutemov 
24989a982250SKirill A. Shutemov 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
24999a982250SKirill A. Shutemov 
2500a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
25019a982250SKirill A. Shutemov 	if (list_empty(page_deferred_list(page))) {
2502f9719a03SKirill A. Shutemov 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2503a3d0a918SKirill A. Shutemov 		list_add_tail(page_deferred_list(page), &pgdata->split_queue);
2504a3d0a918SKirill A. Shutemov 		pgdata->split_queue_len++;
25059a982250SKirill A. Shutemov 	}
2506a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
25079a982250SKirill A. Shutemov }
25089a982250SKirill A. Shutemov 
25099a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink,
25109a982250SKirill A. Shutemov 		struct shrink_control *sc)
25119a982250SKirill A. Shutemov {
2512a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2513cb8d68ecSKirill A. Shutemov 	return ACCESS_ONCE(pgdata->split_queue_len);
25149a982250SKirill A. Shutemov }
25159a982250SKirill A. Shutemov 
25169a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink,
25179a982250SKirill A. Shutemov 		struct shrink_control *sc)
25189a982250SKirill A. Shutemov {
2519a3d0a918SKirill A. Shutemov 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
25209a982250SKirill A. Shutemov 	unsigned long flags;
25219a982250SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
25229a982250SKirill A. Shutemov 	struct page *page;
25239a982250SKirill A. Shutemov 	int split = 0;
25249a982250SKirill A. Shutemov 
2525a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
25269a982250SKirill A. Shutemov 	/* Take pin on all head pages to avoid freeing them under us */
2527ae026204SKirill A. Shutemov 	list_for_each_safe(pos, next, &pgdata->split_queue) {
25289a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
25299a982250SKirill A. Shutemov 		page = compound_head(page);
2530e3ae1953SKirill A. Shutemov 		if (get_page_unless_zero(page)) {
2531e3ae1953SKirill A. Shutemov 			list_move(page_deferred_list(page), &list);
2532e3ae1953SKirill A. Shutemov 		} else {
2533e3ae1953SKirill A. Shutemov 			/* We lost race with put_compound_page() */
25349a982250SKirill A. Shutemov 			list_del_init(page_deferred_list(page));
2535a3d0a918SKirill A. Shutemov 			pgdata->split_queue_len--;
25369a982250SKirill A. Shutemov 		}
2537e3ae1953SKirill A. Shutemov 		if (!--sc->nr_to_scan)
2538e3ae1953SKirill A. Shutemov 			break;
25399a982250SKirill A. Shutemov 	}
2540a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
25419a982250SKirill A. Shutemov 
25429a982250SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
25439a982250SKirill A. Shutemov 		page = list_entry((void *)pos, struct page, mapping);
25449a982250SKirill A. Shutemov 		lock_page(page);
25459a982250SKirill A. Shutemov 		/* split_huge_page() removes page from list on success */
25469a982250SKirill A. Shutemov 		if (!split_huge_page(page))
25479a982250SKirill A. Shutemov 			split++;
25489a982250SKirill A. Shutemov 		unlock_page(page);
25499a982250SKirill A. Shutemov 		put_page(page);
25509a982250SKirill A. Shutemov 	}
25519a982250SKirill A. Shutemov 
2552a3d0a918SKirill A. Shutemov 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2553a3d0a918SKirill A. Shutemov 	list_splice_tail(&list, &pgdata->split_queue);
2554a3d0a918SKirill A. Shutemov 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
25559a982250SKirill A. Shutemov 
2556cb8d68ecSKirill A. Shutemov 	/*
2557cb8d68ecSKirill A. Shutemov 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2558cb8d68ecSKirill A. Shutemov 	 * This can happen if pages were freed under us.
2559cb8d68ecSKirill A. Shutemov 	 */
2560cb8d68ecSKirill A. Shutemov 	if (!split && list_empty(&pgdata->split_queue))
2561cb8d68ecSKirill A. Shutemov 		return SHRINK_STOP;
2562cb8d68ecSKirill A. Shutemov 	return split;
25639a982250SKirill A. Shutemov }
25649a982250SKirill A. Shutemov 
25659a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = {
25669a982250SKirill A. Shutemov 	.count_objects = deferred_split_count,
25679a982250SKirill A. Shutemov 	.scan_objects = deferred_split_scan,
25689a982250SKirill A. Shutemov 	.seeks = DEFAULT_SEEKS,
2569a3d0a918SKirill A. Shutemov 	.flags = SHRINKER_NUMA_AWARE,
25709a982250SKirill A. Shutemov };
257149071d43SKirill A. Shutemov 
257249071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS
257349071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val)
257449071d43SKirill A. Shutemov {
257549071d43SKirill A. Shutemov 	struct zone *zone;
257649071d43SKirill A. Shutemov 	struct page *page;
257749071d43SKirill A. Shutemov 	unsigned long pfn, max_zone_pfn;
257849071d43SKirill A. Shutemov 	unsigned long total = 0, split = 0;
257949071d43SKirill A. Shutemov 
258049071d43SKirill A. Shutemov 	if (val != 1)
258149071d43SKirill A. Shutemov 		return -EINVAL;
258249071d43SKirill A. Shutemov 
258349071d43SKirill A. Shutemov 	for_each_populated_zone(zone) {
258449071d43SKirill A. Shutemov 		max_zone_pfn = zone_end_pfn(zone);
258549071d43SKirill A. Shutemov 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
258649071d43SKirill A. Shutemov 			if (!pfn_valid(pfn))
258749071d43SKirill A. Shutemov 				continue;
258849071d43SKirill A. Shutemov 
258949071d43SKirill A. Shutemov 			page = pfn_to_page(pfn);
259049071d43SKirill A. Shutemov 			if (!get_page_unless_zero(page))
259149071d43SKirill A. Shutemov 				continue;
259249071d43SKirill A. Shutemov 
259349071d43SKirill A. Shutemov 			if (zone != page_zone(page))
259449071d43SKirill A. Shutemov 				goto next;
259549071d43SKirill A. Shutemov 
2596baa355fdSKirill A. Shutemov 			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
259749071d43SKirill A. Shutemov 				goto next;
259849071d43SKirill A. Shutemov 
259949071d43SKirill A. Shutemov 			total++;
260049071d43SKirill A. Shutemov 			lock_page(page);
260149071d43SKirill A. Shutemov 			if (!split_huge_page(page))
260249071d43SKirill A. Shutemov 				split++;
260349071d43SKirill A. Shutemov 			unlock_page(page);
260449071d43SKirill A. Shutemov next:
260549071d43SKirill A. Shutemov 			put_page(page);
260649071d43SKirill A. Shutemov 		}
260749071d43SKirill A. Shutemov 	}
260849071d43SKirill A. Shutemov 
2609145bdaa1SYang Shi 	pr_info("%lu of %lu THP split\n", split, total);
261049071d43SKirill A. Shutemov 
261149071d43SKirill A. Shutemov 	return 0;
261249071d43SKirill A. Shutemov }
261349071d43SKirill A. Shutemov DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
261449071d43SKirill A. Shutemov 		"%llu\n");
261549071d43SKirill A. Shutemov 
261649071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void)
261749071d43SKirill A. Shutemov {
261849071d43SKirill A. Shutemov 	void *ret;
261949071d43SKirill A. Shutemov 
2620145bdaa1SYang Shi 	ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
262149071d43SKirill A. Shutemov 			&split_huge_pages_fops);
262249071d43SKirill A. Shutemov 	if (!ret)
262349071d43SKirill A. Shutemov 		pr_warn("Failed to create split_huge_pages in debugfs");
262449071d43SKirill A. Shutemov 	return 0;
262549071d43SKirill A. Shutemov }
262649071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs);
262749071d43SKirill A. Shutemov #endif
2628