171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9ae3a8c1cSAndrew Morton 1071e3aac0SAndrea Arcangeli #include <linux/mm.h> 1171e3aac0SAndrea Arcangeli #include <linux/sched.h> 12f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 136a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1471e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1571e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1671e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1771e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1871e3aac0SAndrea Arcangeli #include <linux/swap.h> 1997ae1749SKirill A. Shutemov #include <linux/shrinker.h> 20ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 21e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 224897c765SMatthew Wilcox #include <linux/dax.h> 23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 24878aee7dSAndrea Arcangeli #include <linux/freezer.h> 25f25748e3SDan Williams #include <linux/pfn_t.h> 26a664b2d8SAndrea Arcangeli #include <linux/mman.h> 273565fce3SDan Williams #include <linux/memremap.h> 28325adeb5SRalf Baechle #include <linux/pagemap.h> 2949071d43SKirill A. Shutemov #include <linux/debugfs.h> 304daae3b4SMel Gorman #include <linux/migrate.h> 3143b5fbbdSSasha Levin #include <linux/hashtable.h> 326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3333c3fc71SVladimir Davydov #include <linux/page_idle.h> 34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 356b31d595SMichal Hocko #include <linux/oom.h> 3697ae1749SKirill A. Shutemov 3771e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3871e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 3971e3aac0SAndrea Arcangeli #include "internal.h" 4071e3aac0SAndrea Arcangeli 41ba76149fSAndrea Arcangeli /* 42b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 43b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 44b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 45b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 468bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 478bfa3f9aSJianguo Wu * for all hugepage allocations. 48ba76149fSAndrea Arcangeli */ 4971e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 51ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 5213ece886SAndrea Arcangeli #endif 5313ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 5413ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 5513ece886SAndrea Arcangeli #endif 56444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 5779da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5879da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 59ba76149fSAndrea Arcangeli 609a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 61f000565aSAndrea Arcangeli 6297ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 6356873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 644a6c1297SKirill A. Shutemov 656fcb52a5SAaron Lu static struct page *get_huge_zero_page(void) 6697ae1749SKirill A. Shutemov { 6797ae1749SKirill A. Shutemov struct page *zero_page; 6897ae1749SKirill A. Shutemov retry: 6997ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 704db0c3c2SJason Low return READ_ONCE(huge_zero_page); 7197ae1749SKirill A. Shutemov 7297ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 7397ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 74d8a8e1f0SKirill A. Shutemov if (!zero_page) { 75d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 765918d10aSKirill A. Shutemov return NULL; 77d8a8e1f0SKirill A. Shutemov } 78d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 7997ae1749SKirill A. Shutemov preempt_disable(); 805918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 8197ae1749SKirill A. Shutemov preempt_enable(); 825ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 8397ae1749SKirill A. Shutemov goto retry; 8497ae1749SKirill A. Shutemov } 8597ae1749SKirill A. Shutemov 8697ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 8797ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 8897ae1749SKirill A. Shutemov preempt_enable(); 894db0c3c2SJason Low return READ_ONCE(huge_zero_page); 9097ae1749SKirill A. Shutemov } 9197ae1749SKirill A. Shutemov 926fcb52a5SAaron Lu static void put_huge_zero_page(void) 9397ae1749SKirill A. Shutemov { 9497ae1749SKirill A. Shutemov /* 9597ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 9697ae1749SKirill A. Shutemov * last reference. 9797ae1749SKirill A. Shutemov */ 9897ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 9997ae1749SKirill A. Shutemov } 10097ae1749SKirill A. Shutemov 1016fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1026fcb52a5SAaron Lu { 1036fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1046fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1056fcb52a5SAaron Lu 1066fcb52a5SAaron Lu if (!get_huge_zero_page()) 1076fcb52a5SAaron Lu return NULL; 1086fcb52a5SAaron Lu 1096fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1106fcb52a5SAaron Lu put_huge_zero_page(); 1116fcb52a5SAaron Lu 1126fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1136fcb52a5SAaron Lu } 1146fcb52a5SAaron Lu 1156fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 1166fcb52a5SAaron Lu { 1176fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1186fcb52a5SAaron Lu put_huge_zero_page(); 1196fcb52a5SAaron Lu } 1206fcb52a5SAaron Lu 12148896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 12297ae1749SKirill A. Shutemov struct shrink_control *sc) 12397ae1749SKirill A. Shutemov { 12497ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 12597ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 12648896466SGlauber Costa } 12797ae1749SKirill A. Shutemov 12848896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 12948896466SGlauber Costa struct shrink_control *sc) 13048896466SGlauber Costa { 13197ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 1325918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 1335918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 1345ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 13548896466SGlauber Costa return HPAGE_PMD_NR; 13697ae1749SKirill A. Shutemov } 13797ae1749SKirill A. Shutemov 13897ae1749SKirill A. Shutemov return 0; 13997ae1749SKirill A. Shutemov } 14097ae1749SKirill A. Shutemov 14197ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 14248896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 14348896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 14497ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 14597ae1749SKirill A. Shutemov }; 14697ae1749SKirill A. Shutemov 14771e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 14871e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 14971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 15071e3aac0SAndrea Arcangeli { 151444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 152444eb2a4SMel Gorman return sprintf(buf, "[always] madvise never\n"); 153444eb2a4SMel Gorman else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 154444eb2a4SMel Gorman return sprintf(buf, "always [madvise] never\n"); 155444eb2a4SMel Gorman else 156444eb2a4SMel Gorman return sprintf(buf, "always madvise [never]\n"); 15771e3aac0SAndrea Arcangeli } 158444eb2a4SMel Gorman 15971e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 16071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 16171e3aac0SAndrea Arcangeli const char *buf, size_t count) 16271e3aac0SAndrea Arcangeli { 16321440d7eSDavid Rientjes ssize_t ret = count; 164ba76149fSAndrea Arcangeli 16521440d7eSDavid Rientjes if (!memcmp("always", buf, 16621440d7eSDavid Rientjes min(sizeof("always")-1, count))) { 16721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 16821440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 16921440d7eSDavid Rientjes } else if (!memcmp("madvise", buf, 17021440d7eSDavid Rientjes min(sizeof("madvise")-1, count))) { 17121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 17221440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 17321440d7eSDavid Rientjes } else if (!memcmp("never", buf, 17421440d7eSDavid Rientjes min(sizeof("never")-1, count))) { 17521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 17621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 17721440d7eSDavid Rientjes } else 17821440d7eSDavid Rientjes ret = -EINVAL; 179ba76149fSAndrea Arcangeli 180ba76149fSAndrea Arcangeli if (ret > 0) { 181b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 182ba76149fSAndrea Arcangeli if (err) 183ba76149fSAndrea Arcangeli ret = err; 184ba76149fSAndrea Arcangeli } 185ba76149fSAndrea Arcangeli return ret; 18671e3aac0SAndrea Arcangeli } 18771e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 18871e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 18971e3aac0SAndrea Arcangeli 190b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 19171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 19271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 19371e3aac0SAndrea Arcangeli { 194e27e6151SBen Hutchings return sprintf(buf, "%d\n", 195e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 19671e3aac0SAndrea Arcangeli } 197e27e6151SBen Hutchings 198b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 19971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 20071e3aac0SAndrea Arcangeli const char *buf, size_t count, 20171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 20271e3aac0SAndrea Arcangeli { 203e27e6151SBen Hutchings unsigned long value; 204e27e6151SBen Hutchings int ret; 205e27e6151SBen Hutchings 206e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 207e27e6151SBen Hutchings if (ret < 0) 208e27e6151SBen Hutchings return ret; 209e27e6151SBen Hutchings if (value > 1) 21071e3aac0SAndrea Arcangeli return -EINVAL; 21171e3aac0SAndrea Arcangeli 212e27e6151SBen Hutchings if (value) 213e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 214e27e6151SBen Hutchings else 215e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 216e27e6151SBen Hutchings 21771e3aac0SAndrea Arcangeli return count; 21871e3aac0SAndrea Arcangeli } 21971e3aac0SAndrea Arcangeli 22071e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 22171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 22271e3aac0SAndrea Arcangeli { 223444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 22421440d7eSDavid Rientjes return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 225444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 22621440d7eSDavid Rientjes return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 22721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 22821440d7eSDavid Rientjes return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 22921440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 23021440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 23121440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 23271e3aac0SAndrea Arcangeli } 23321440d7eSDavid Rientjes 23471e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 23571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 23671e3aac0SAndrea Arcangeli const char *buf, size_t count) 23771e3aac0SAndrea Arcangeli { 23821440d7eSDavid Rientjes if (!memcmp("always", buf, 23921440d7eSDavid Rientjes min(sizeof("always")-1, count))) { 24021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 24121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 24221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 24321440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 24421440d7eSDavid Rientjes } else if (!memcmp("defer+madvise", buf, 24521440d7eSDavid Rientjes min(sizeof("defer+madvise")-1, count))) { 24621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 24721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 24821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 24921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 2504fad7fb6SDavid Rientjes } else if (!memcmp("defer", buf, 2514fad7fb6SDavid Rientjes min(sizeof("defer")-1, count))) { 2524fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 2534fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 2544fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 2554fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 25621440d7eSDavid Rientjes } else if (!memcmp("madvise", buf, 25721440d7eSDavid Rientjes min(sizeof("madvise")-1, count))) { 25821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 25921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 26021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 26121440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 26221440d7eSDavid Rientjes } else if (!memcmp("never", buf, 26321440d7eSDavid Rientjes min(sizeof("never")-1, count))) { 26421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 26521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 26621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 26721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 26821440d7eSDavid Rientjes } else 26921440d7eSDavid Rientjes return -EINVAL; 27021440d7eSDavid Rientjes 27121440d7eSDavid Rientjes return count; 27271e3aac0SAndrea Arcangeli } 27371e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 27471e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 27571e3aac0SAndrea Arcangeli 27679da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 27779da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 27879da5407SKirill A. Shutemov { 279b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 28079da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 28179da5407SKirill A. Shutemov } 28279da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 28379da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 28479da5407SKirill A. Shutemov { 285b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 28679da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 28779da5407SKirill A. Shutemov } 28879da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 28979da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 29049920d28SHugh Dickins 29149920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 29249920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 29349920d28SHugh Dickins { 29449920d28SHugh Dickins return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 29549920d28SHugh Dickins } 29649920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 29749920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 29849920d28SHugh Dickins 29971e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 30071e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 30171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30271e3aac0SAndrea Arcangeli { 303b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 30471e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 30571e3aac0SAndrea Arcangeli } 30671e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 30771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 30871e3aac0SAndrea Arcangeli const char *buf, size_t count) 30971e3aac0SAndrea Arcangeli { 310b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 31171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 31271e3aac0SAndrea Arcangeli } 31371e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 31471e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 31571e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 31671e3aac0SAndrea Arcangeli 31771e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 31871e3aac0SAndrea Arcangeli &enabled_attr.attr, 31971e3aac0SAndrea Arcangeli &defrag_attr.attr, 32079da5407SKirill A. Shutemov &use_zero_page_attr.attr, 32149920d28SHugh Dickins &hpage_pmd_size_attr.attr, 322e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 3235a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 3245a6e75f8SKirill A. Shutemov #endif 32571e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 32671e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 32771e3aac0SAndrea Arcangeli #endif 32871e3aac0SAndrea Arcangeli NULL, 32971e3aac0SAndrea Arcangeli }; 33071e3aac0SAndrea Arcangeli 3318aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 33271e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 333ba76149fSAndrea Arcangeli }; 334ba76149fSAndrea Arcangeli 335569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 336569e5590SShaohua Li { 337569e5590SShaohua Li int err; 338569e5590SShaohua Li 339569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 340569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 341ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 342569e5590SShaohua Li return -ENOMEM; 343569e5590SShaohua Li } 344569e5590SShaohua Li 345569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 346569e5590SShaohua Li if (err) { 347ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 348569e5590SShaohua Li goto delete_obj; 349569e5590SShaohua Li } 350569e5590SShaohua Li 351569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 352569e5590SShaohua Li if (err) { 353ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 354569e5590SShaohua Li goto remove_hp_group; 355569e5590SShaohua Li } 356569e5590SShaohua Li 357569e5590SShaohua Li return 0; 358569e5590SShaohua Li 359569e5590SShaohua Li remove_hp_group: 360569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 361569e5590SShaohua Li delete_obj: 362569e5590SShaohua Li kobject_put(*hugepage_kobj); 363569e5590SShaohua Li return err; 364569e5590SShaohua Li } 365569e5590SShaohua Li 366569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 367569e5590SShaohua Li { 368569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 369569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 370569e5590SShaohua Li kobject_put(hugepage_kobj); 371569e5590SShaohua Li } 372569e5590SShaohua Li #else 373569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 374569e5590SShaohua Li { 375569e5590SShaohua Li return 0; 376569e5590SShaohua Li } 377569e5590SShaohua Li 378569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 379569e5590SShaohua Li { 380569e5590SShaohua Li } 38171e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 38271e3aac0SAndrea Arcangeli 38371e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 38471e3aac0SAndrea Arcangeli { 38571e3aac0SAndrea Arcangeli int err; 386569e5590SShaohua Li struct kobject *hugepage_kobj; 38771e3aac0SAndrea Arcangeli 3884b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 3894b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 390569e5590SShaohua Li return -EINVAL; 3914b7167b9SAndrea Arcangeli } 3924b7167b9SAndrea Arcangeli 393ff20c2e0SKirill A. Shutemov /* 394ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 395ff20c2e0SKirill A. Shutemov */ 396ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 397ff20c2e0SKirill A. Shutemov /* 398ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 399ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 400ff20c2e0SKirill A. Shutemov */ 401ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 402ff20c2e0SKirill A. Shutemov 403569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 404569e5590SShaohua Li if (err) 40565ebb64fSKirill A. Shutemov goto err_sysfs; 406ba76149fSAndrea Arcangeli 407b46e756fSKirill A. Shutemov err = khugepaged_init(); 408ba76149fSAndrea Arcangeli if (err) 40965ebb64fSKirill A. Shutemov goto err_slab; 410ba76149fSAndrea Arcangeli 41165ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 41265ebb64fSKirill A. Shutemov if (err) 41365ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 4149a982250SKirill A. Shutemov err = register_shrinker(&deferred_split_shrinker); 4159a982250SKirill A. Shutemov if (err) 4169a982250SKirill A. Shutemov goto err_split_shrinker; 41797ae1749SKirill A. Shutemov 41897562cd2SRik van Riel /* 41997562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 42097562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 42197562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 42297562cd2SRik van Riel */ 42379553da2SKirill A. Shutemov if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 42497562cd2SRik van Riel transparent_hugepage_flags = 0; 42579553da2SKirill A. Shutemov return 0; 42679553da2SKirill A. Shutemov } 42797562cd2SRik van Riel 42879553da2SKirill A. Shutemov err = start_stop_khugepaged(); 42965ebb64fSKirill A. Shutemov if (err) 43065ebb64fSKirill A. Shutemov goto err_khugepaged; 431ba76149fSAndrea Arcangeli 432569e5590SShaohua Li return 0; 43365ebb64fSKirill A. Shutemov err_khugepaged: 4349a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 4359a982250SKirill A. Shutemov err_split_shrinker: 43665ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 43765ebb64fSKirill A. Shutemov err_hzp_shrinker: 438b46e756fSKirill A. Shutemov khugepaged_destroy(); 43965ebb64fSKirill A. Shutemov err_slab: 440569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 44165ebb64fSKirill A. Shutemov err_sysfs: 442ba76149fSAndrea Arcangeli return err; 44371e3aac0SAndrea Arcangeli } 444a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 44571e3aac0SAndrea Arcangeli 44671e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 44771e3aac0SAndrea Arcangeli { 44871e3aac0SAndrea Arcangeli int ret = 0; 44971e3aac0SAndrea Arcangeli if (!str) 45071e3aac0SAndrea Arcangeli goto out; 45171e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 45271e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 45371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 45571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45671e3aac0SAndrea Arcangeli ret = 1; 45771e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 45871e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 45971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46071e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 46171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46271e3aac0SAndrea Arcangeli ret = 1; 46371e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 46471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 46571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46671e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 46771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46871e3aac0SAndrea Arcangeli ret = 1; 46971e3aac0SAndrea Arcangeli } 47071e3aac0SAndrea Arcangeli out: 47171e3aac0SAndrea Arcangeli if (!ret) 472ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 47371e3aac0SAndrea Arcangeli return ret; 47471e3aac0SAndrea Arcangeli } 47571e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 47671e3aac0SAndrea Arcangeli 477f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 47871e3aac0SAndrea Arcangeli { 479f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 48071e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 48171e3aac0SAndrea Arcangeli return pmd; 48271e3aac0SAndrea Arcangeli } 48371e3aac0SAndrea Arcangeli 4849a982250SKirill A. Shutemov static inline struct list_head *page_deferred_list(struct page *page) 4859a982250SKirill A. Shutemov { 486fa3015b7SMatthew Wilcox /* ->lru in the tail pages is occupied by compound_head. */ 487fa3015b7SMatthew Wilcox return &page[2].deferred_list; 4889a982250SKirill A. Shutemov } 4899a982250SKirill A. Shutemov 4909a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 4919a982250SKirill A. Shutemov { 4929a982250SKirill A. Shutemov /* 4939a982250SKirill A. Shutemov * we use page->mapping and page->indexlru in second tail page 4949a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 4959a982250SKirill A. Shutemov */ 4969a982250SKirill A. Shutemov 4979a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 4989a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 4999a982250SKirill A. Shutemov } 5009a982250SKirill A. Shutemov 50174d2fad1SToshi Kani unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, 50274d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 50374d2fad1SToshi Kani { 50474d2fad1SToshi Kani unsigned long addr; 50574d2fad1SToshi Kani loff_t off_end = off + len; 50674d2fad1SToshi Kani loff_t off_align = round_up(off, size); 50774d2fad1SToshi Kani unsigned long len_pad; 50874d2fad1SToshi Kani 50974d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 51074d2fad1SToshi Kani return 0; 51174d2fad1SToshi Kani 51274d2fad1SToshi Kani len_pad = len + size; 51374d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 51474d2fad1SToshi Kani return 0; 51574d2fad1SToshi Kani 51674d2fad1SToshi Kani addr = current->mm->get_unmapped_area(filp, 0, len_pad, 51774d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 51874d2fad1SToshi Kani if (IS_ERR_VALUE(addr)) 51974d2fad1SToshi Kani return 0; 52074d2fad1SToshi Kani 52174d2fad1SToshi Kani addr += (off - addr) & (size - 1); 52274d2fad1SToshi Kani return addr; 52374d2fad1SToshi Kani } 52474d2fad1SToshi Kani 52574d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 52674d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 52774d2fad1SToshi Kani { 52874d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 52974d2fad1SToshi Kani 53074d2fad1SToshi Kani if (addr) 53174d2fad1SToshi Kani goto out; 53274d2fad1SToshi Kani if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 53374d2fad1SToshi Kani goto out; 53474d2fad1SToshi Kani 53574d2fad1SToshi Kani addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); 53674d2fad1SToshi Kani if (addr) 53774d2fad1SToshi Kani return addr; 53874d2fad1SToshi Kani 53974d2fad1SToshi Kani out: 54074d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 54174d2fad1SToshi Kani } 54274d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 54374d2fad1SToshi Kani 5442b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 5452b740303SSouptick Joarder struct page *page, gfp_t gfp) 54671e3aac0SAndrea Arcangeli { 54782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 54800501b53SJohannes Weiner struct mem_cgroup *memcg; 54971e3aac0SAndrea Arcangeli pgtable_t pgtable; 55082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 5512b740303SSouptick Joarder vm_fault_t ret = 0; 55271e3aac0SAndrea Arcangeli 553309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 55400501b53SJohannes Weiner 5552cf85583STejun Heo if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { 5566b251fc9SAndrea Arcangeli put_page(page); 5576b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 5586b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 5596b251fc9SAndrea Arcangeli } 56071e3aac0SAndrea Arcangeli 561bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 56200501b53SJohannes Weiner if (unlikely(!pgtable)) { 5636b31d595SMichal Hocko ret = VM_FAULT_OOM; 5646b31d595SMichal Hocko goto release; 56500501b53SJohannes Weiner } 56600501b53SJohannes Weiner 567c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 56852f37629SMinchan Kim /* 56952f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 57052f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 57152f37629SMinchan Kim * write. 57252f37629SMinchan Kim */ 57371e3aac0SAndrea Arcangeli __SetPageUptodate(page); 57471e3aac0SAndrea Arcangeli 57582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 57682b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 5776b31d595SMichal Hocko goto unlock_release; 57871e3aac0SAndrea Arcangeli } else { 57971e3aac0SAndrea Arcangeli pmd_t entry; 5806b251fc9SAndrea Arcangeli 5816b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 5826b31d595SMichal Hocko if (ret) 5836b31d595SMichal Hocko goto unlock_release; 5846b31d595SMichal Hocko 5856b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 5866b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 5872b740303SSouptick Joarder vm_fault_t ret2; 5886b251fc9SAndrea Arcangeli 58982b0f8c3SJan Kara spin_unlock(vmf->ptl); 590f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 5916b251fc9SAndrea Arcangeli put_page(page); 592bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 5932b740303SSouptick Joarder ret2 = handle_userfault(vmf, VM_UFFD_MISSING); 5942b740303SSouptick Joarder VM_BUG_ON(ret2 & VM_FAULT_FALLBACK); 5952b740303SSouptick Joarder return ret2; 5966b251fc9SAndrea Arcangeli } 5976b251fc9SAndrea Arcangeli 5983122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 599f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 600d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr, true); 601f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, true); 60200501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 60382b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 60482b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 605bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 606c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 60782b0f8c3SJan Kara spin_unlock(vmf->ptl); 6086b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 60971e3aac0SAndrea Arcangeli } 61071e3aac0SAndrea Arcangeli 611aa2e878eSDavid Rientjes return 0; 6126b31d595SMichal Hocko unlock_release: 6136b31d595SMichal Hocko spin_unlock(vmf->ptl); 6146b31d595SMichal Hocko release: 6156b31d595SMichal Hocko if (pgtable) 6166b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 6176b31d595SMichal Hocko mem_cgroup_cancel_charge(page, memcg, true); 6186b31d595SMichal Hocko put_page(page); 6196b31d595SMichal Hocko return ret; 6206b31d595SMichal Hocko 62171e3aac0SAndrea Arcangeli } 62271e3aac0SAndrea Arcangeli 623444eb2a4SMel Gorman /* 62421440d7eSDavid Rientjes * always: directly stall for all thp allocations 62521440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 62621440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 62721440d7eSDavid Rientjes * fail if not immediately available 62821440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 62921440d7eSDavid Rientjes * available 63021440d7eSDavid Rientjes * never: never stall for any thp allocation 631444eb2a4SMel Gorman */ 63289c83fb5SMichal Hocko static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) 6330bbbc0b3SAndrea Arcangeli { 63421440d7eSDavid Rientjes const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 63589c83fb5SMichal Hocko gfp_t this_node = 0; 63689c83fb5SMichal Hocko 63789c83fb5SMichal Hocko #ifdef CONFIG_NUMA 63889c83fb5SMichal Hocko struct mempolicy *pol; 63989c83fb5SMichal Hocko /* 64089c83fb5SMichal Hocko * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not 64189c83fb5SMichal Hocko * specified, to express a general desire to stay on the current 64289c83fb5SMichal Hocko * node for optimistic allocation attempts. If the defrag mode 64389c83fb5SMichal Hocko * and/or madvise hint requires the direct reclaim then we prefer 64489c83fb5SMichal Hocko * to fallback to other node rather than node reclaim because that 64589c83fb5SMichal Hocko * can lead to excessive reclaim even though there is free memory 64689c83fb5SMichal Hocko * on other nodes. We expect that NUMA preferences are specified 64789c83fb5SMichal Hocko * by memory policies. 64889c83fb5SMichal Hocko */ 64989c83fb5SMichal Hocko pol = get_vma_policy(vma, addr); 65089c83fb5SMichal Hocko if (pol->mode != MPOL_BIND) 65189c83fb5SMichal Hocko this_node = __GFP_THISNODE; 65289c83fb5SMichal Hocko mpol_cond_put(pol); 65389c83fb5SMichal Hocko #endif 654444eb2a4SMel Gorman 65521440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 65625160354SVlastimil Babka return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 65721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 65889c83fb5SMichal Hocko return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; 65921440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 66021440d7eSDavid Rientjes return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 66189c83fb5SMichal Hocko __GFP_KSWAPD_RECLAIM | this_node); 66221440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 66321440d7eSDavid Rientjes return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 66489c83fb5SMichal Hocko this_node); 66589c83fb5SMichal Hocko return GFP_TRANSHUGE_LIGHT | this_node; 666444eb2a4SMel Gorman } 667444eb2a4SMel Gorman 668c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 669d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 67097ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 6715918d10aSKirill A. Shutemov struct page *zero_page) 672fc9fe822SKirill A. Shutemov { 673fc9fe822SKirill A. Shutemov pmd_t entry; 6747c414164SAndrew Morton if (!pmd_none(*pmd)) 6757c414164SAndrew Morton return false; 6765918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 677fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 67812c9d70bSMatthew Wilcox if (pgtable) 6796b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 680fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 681c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 6827c414164SAndrew Morton return true; 683fc9fe822SKirill A. Shutemov } 684fc9fe822SKirill A. Shutemov 6852b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 68671e3aac0SAndrea Arcangeli { 68782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 688077fcf11SAneesh Kumar K.V gfp_t gfp; 68971e3aac0SAndrea Arcangeli struct page *page; 69082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 69171e3aac0SAndrea Arcangeli 692128ec037SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 693c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 69471e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 69571e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 6966d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 697ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 69882b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 699bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 70079da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 70180371957SKirill A. Shutemov pgtable_t pgtable; 7025918d10aSKirill A. Shutemov struct page *zero_page; 7033ea41e62SKirill A. Shutemov bool set; 7042b740303SSouptick Joarder vm_fault_t ret; 705bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 70680371957SKirill A. Shutemov if (unlikely(!pgtable)) 70780371957SKirill A. Shutemov return VM_FAULT_OOM; 7086fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 7095918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 710bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 71197ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 712c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 71397ae1749SKirill A. Shutemov } 71482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 7156b251fc9SAndrea Arcangeli ret = 0; 7166b251fc9SAndrea Arcangeli set = false; 71782b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 7186b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 7196b31d595SMichal Hocko if (ret) { 7206b31d595SMichal Hocko spin_unlock(vmf->ptl); 7216b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 72282b0f8c3SJan Kara spin_unlock(vmf->ptl); 72382b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 7246b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7256b251fc9SAndrea Arcangeli } else { 726bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 72782b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 72882b0f8c3SJan Kara spin_unlock(vmf->ptl); 7296b251fc9SAndrea Arcangeli set = true; 7306b251fc9SAndrea Arcangeli } 7316b251fc9SAndrea Arcangeli } else 73282b0f8c3SJan Kara spin_unlock(vmf->ptl); 7336fcb52a5SAaron Lu if (!set) 734bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 7356b251fc9SAndrea Arcangeli return ret; 73680371957SKirill A. Shutemov } 73789c83fb5SMichal Hocko gfp = alloc_hugepage_direct_gfpmask(vma, haddr); 73889c83fb5SMichal Hocko page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); 73981ab4201SAndi Kleen if (unlikely(!page)) { 74081ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 741c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 74281ab4201SAndi Kleen } 7439a982250SKirill A. Shutemov prep_transhuge_page(page); 74482b0f8c3SJan Kara return __do_huge_pmd_anonymous_page(vmf, page, gfp); 74571e3aac0SAndrea Arcangeli } 74671e3aac0SAndrea Arcangeli 747ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 7483b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 7493b6521f5SOliver O'Halloran pgtable_t pgtable) 7505cad465dSMatthew Wilcox { 7515cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 7525cad465dSMatthew Wilcox pmd_t entry; 7535cad465dSMatthew Wilcox spinlock_t *ptl; 7545cad465dSMatthew Wilcox 7555cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 756f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 757f25748e3SDan Williams if (pfn_t_devmap(pfn)) 758f25748e3SDan Williams entry = pmd_mkdevmap(entry); 7595cad465dSMatthew Wilcox if (write) { 760f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 761f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 7625cad465dSMatthew Wilcox } 7633b6521f5SOliver O'Halloran 7643b6521f5SOliver O'Halloran if (pgtable) { 7653b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 766c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 7673b6521f5SOliver O'Halloran } 7683b6521f5SOliver O'Halloran 7695cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 7705cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 7715cad465dSMatthew Wilcox spin_unlock(ptl); 7725cad465dSMatthew Wilcox } 7735cad465dSMatthew Wilcox 774226ab561SDan Williams vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 775f25748e3SDan Williams pmd_t *pmd, pfn_t pfn, bool write) 7765cad465dSMatthew Wilcox { 7775cad465dSMatthew Wilcox pgprot_t pgprot = vma->vm_page_prot; 7783b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 7795cad465dSMatthew Wilcox /* 7805cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 7815cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 7825cad465dSMatthew Wilcox * can't support a 'special' bit. 7835cad465dSMatthew Wilcox */ 784e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 785e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 7865cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 7875cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 7885cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 7895cad465dSMatthew Wilcox 7905cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 7915cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 792308a047cSBorislav Petkov 7933b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 7943b6521f5SOliver O'Halloran pgtable = pte_alloc_one(vma->vm_mm, addr); 7953b6521f5SOliver O'Halloran if (!pgtable) 7963b6521f5SOliver O'Halloran return VM_FAULT_OOM; 7973b6521f5SOliver O'Halloran } 7983b6521f5SOliver O'Halloran 799308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 800308a047cSBorislav Petkov 8013b6521f5SOliver O'Halloran insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); 802ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 8035cad465dSMatthew Wilcox } 804dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 8055cad465dSMatthew Wilcox 806a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 807f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 808a00cc7d9SMatthew Wilcox { 809f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 810a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 811a00cc7d9SMatthew Wilcox return pud; 812a00cc7d9SMatthew Wilcox } 813a00cc7d9SMatthew Wilcox 814a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 815a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 816a00cc7d9SMatthew Wilcox { 817a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 818a00cc7d9SMatthew Wilcox pud_t entry; 819a00cc7d9SMatthew Wilcox spinlock_t *ptl; 820a00cc7d9SMatthew Wilcox 821a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 822a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 823a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 824a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 825a00cc7d9SMatthew Wilcox if (write) { 826f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 827f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 828a00cc7d9SMatthew Wilcox } 829a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 830a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 831a00cc7d9SMatthew Wilcox spin_unlock(ptl); 832a00cc7d9SMatthew Wilcox } 833a00cc7d9SMatthew Wilcox 834226ab561SDan Williams vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 835a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, bool write) 836a00cc7d9SMatthew Wilcox { 837a00cc7d9SMatthew Wilcox pgprot_t pgprot = vma->vm_page_prot; 838a00cc7d9SMatthew Wilcox /* 839a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 840a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 841a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 842a00cc7d9SMatthew Wilcox */ 84362ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 84462ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 845a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 846a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 847a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 848a00cc7d9SMatthew Wilcox 849a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 850a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 851a00cc7d9SMatthew Wilcox 852a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 853a00cc7d9SMatthew Wilcox 854a00cc7d9SMatthew Wilcox insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); 855a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 856a00cc7d9SMatthew Wilcox } 857a00cc7d9SMatthew Wilcox EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 858a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 859a00cc7d9SMatthew Wilcox 8603565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 861a8f97366SKirill A. Shutemov pmd_t *pmd, int flags) 8623565fce3SDan Williams { 8633565fce3SDan Williams pmd_t _pmd; 8643565fce3SDan Williams 865a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 866a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 867a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 8683565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 869a8f97366SKirill A. Shutemov pmd, _pmd, flags & FOLL_WRITE)) 8703565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 8713565fce3SDan Williams } 8723565fce3SDan Williams 8733565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 874df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 8753565fce3SDan Williams { 8763565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 8773565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 8783565fce3SDan Williams struct page *page; 8793565fce3SDan Williams 8803565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 8813565fce3SDan Williams 8828310d48bSKeno Fischer /* 8838310d48bSKeno Fischer * When we COW a devmap PMD entry, we split it into PTEs, so we should 8848310d48bSKeno Fischer * not be in this function with `flags & FOLL_COW` set. 8858310d48bSKeno Fischer */ 8868310d48bSKeno Fischer WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 8878310d48bSKeno Fischer 888f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 8893565fce3SDan Williams return NULL; 8903565fce3SDan Williams 8913565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 8923565fce3SDan Williams /* pass */; 8933565fce3SDan Williams else 8943565fce3SDan Williams return NULL; 8953565fce3SDan Williams 8963565fce3SDan Williams if (flags & FOLL_TOUCH) 897a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 8983565fce3SDan Williams 8993565fce3SDan Williams /* 9003565fce3SDan Williams * device mapped pages can only be returned if the 9013565fce3SDan Williams * caller will manage the page reference count. 9023565fce3SDan Williams */ 9033565fce3SDan Williams if (!(flags & FOLL_GET)) 9043565fce3SDan Williams return ERR_PTR(-EEXIST); 9053565fce3SDan Williams 9063565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 907df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 908df06b37fSKeith Busch if (!*pgmap) 9093565fce3SDan Williams return ERR_PTR(-EFAULT); 9103565fce3SDan Williams page = pfn_to_page(pfn); 9113565fce3SDan Williams get_page(page); 9123565fce3SDan Williams 9133565fce3SDan Williams return page; 9143565fce3SDan Williams } 9153565fce3SDan Williams 91671e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 91771e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 91871e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 91971e3aac0SAndrea Arcangeli { 920c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 92171e3aac0SAndrea Arcangeli struct page *src_page; 92271e3aac0SAndrea Arcangeli pmd_t pmd; 92312c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 924628d47ceSKirill A. Shutemov int ret = -ENOMEM; 92571e3aac0SAndrea Arcangeli 926628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 927628d47ceSKirill A. Shutemov if (!vma_is_anonymous(vma)) 928628d47ceSKirill A. Shutemov return 0; 929628d47ceSKirill A. Shutemov 93071e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 93171e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 93271e3aac0SAndrea Arcangeli goto out; 93371e3aac0SAndrea Arcangeli 934c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 935c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 936c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 93771e3aac0SAndrea Arcangeli 93871e3aac0SAndrea Arcangeli ret = -EAGAIN; 93971e3aac0SAndrea Arcangeli pmd = *src_pmd; 94084c3fc4eSZi Yan 94184c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 94284c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 94384c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 94484c3fc4eSZi Yan 94584c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 94684c3fc4eSZi Yan if (is_write_migration_entry(entry)) { 94784c3fc4eSZi Yan make_migration_entry_read(&entry); 94884c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 949ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 950ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 95184c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 95284c3fc4eSZi Yan } 953dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 954af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 955dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 95684c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 95784c3fc4eSZi Yan ret = 0; 95884c3fc4eSZi Yan goto out_unlock; 95984c3fc4eSZi Yan } 96084c3fc4eSZi Yan #endif 96184c3fc4eSZi Yan 962628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 96371e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 96471e3aac0SAndrea Arcangeli goto out_unlock; 96571e3aac0SAndrea Arcangeli } 966fc9fe822SKirill A. Shutemov /* 967c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 968fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 969fc9fe822SKirill A. Shutemov * a page table. 970fc9fe822SKirill A. Shutemov */ 971fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 9725918d10aSKirill A. Shutemov struct page *zero_page; 97397ae1749SKirill A. Shutemov /* 97497ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 97597ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 97697ae1749SKirill A. Shutemov * reference. 97797ae1749SKirill A. Shutemov */ 9786fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(dst_mm); 9796b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 9805918d10aSKirill A. Shutemov zero_page); 981fc9fe822SKirill A. Shutemov ret = 0; 982fc9fe822SKirill A. Shutemov goto out_unlock; 983fc9fe822SKirill A. Shutemov } 984de466bd6SMel Gorman 98571e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 986309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 98771e3aac0SAndrea Arcangeli get_page(src_page); 98853f9263bSKirill A. Shutemov page_dup_rmap(src_page, true); 98971e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 990c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 9915c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 99271e3aac0SAndrea Arcangeli 99371e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 99471e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 99571e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 99671e3aac0SAndrea Arcangeli 99771e3aac0SAndrea Arcangeli ret = 0; 99871e3aac0SAndrea Arcangeli out_unlock: 999c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1000c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 100171e3aac0SAndrea Arcangeli out: 100271e3aac0SAndrea Arcangeli return ret; 100371e3aac0SAndrea Arcangeli } 100471e3aac0SAndrea Arcangeli 1005a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1006a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1007a8f97366SKirill A. Shutemov pud_t *pud, int flags) 1008a00cc7d9SMatthew Wilcox { 1009a00cc7d9SMatthew Wilcox pud_t _pud; 1010a00cc7d9SMatthew Wilcox 1011a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 1012a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 1013a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1014a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1015a8f97366SKirill A. Shutemov pud, _pud, flags & FOLL_WRITE)) 1016a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1017a00cc7d9SMatthew Wilcox } 1018a00cc7d9SMatthew Wilcox 1019a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1020df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1021a00cc7d9SMatthew Wilcox { 1022a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1023a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1024a00cc7d9SMatthew Wilcox struct page *page; 1025a00cc7d9SMatthew Wilcox 1026a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1027a00cc7d9SMatthew Wilcox 1028f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1029a00cc7d9SMatthew Wilcox return NULL; 1030a00cc7d9SMatthew Wilcox 1031a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1032a00cc7d9SMatthew Wilcox /* pass */; 1033a00cc7d9SMatthew Wilcox else 1034a00cc7d9SMatthew Wilcox return NULL; 1035a00cc7d9SMatthew Wilcox 1036a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 1037a8f97366SKirill A. Shutemov touch_pud(vma, addr, pud, flags); 1038a00cc7d9SMatthew Wilcox 1039a00cc7d9SMatthew Wilcox /* 1040a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1041a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 1042a00cc7d9SMatthew Wilcox */ 1043a00cc7d9SMatthew Wilcox if (!(flags & FOLL_GET)) 1044a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1045a00cc7d9SMatthew Wilcox 1046a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1047df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1048df06b37fSKeith Busch if (!*pgmap) 1049a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1050a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 1051a00cc7d9SMatthew Wilcox get_page(page); 1052a00cc7d9SMatthew Wilcox 1053a00cc7d9SMatthew Wilcox return page; 1054a00cc7d9SMatthew Wilcox } 1055a00cc7d9SMatthew Wilcox 1056a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1057a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1058a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1059a00cc7d9SMatthew Wilcox { 1060a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1061a00cc7d9SMatthew Wilcox pud_t pud; 1062a00cc7d9SMatthew Wilcox int ret; 1063a00cc7d9SMatthew Wilcox 1064a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1065a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1066a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1067a00cc7d9SMatthew Wilcox 1068a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1069a00cc7d9SMatthew Wilcox pud = *src_pud; 1070a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1071a00cc7d9SMatthew Wilcox goto out_unlock; 1072a00cc7d9SMatthew Wilcox 1073a00cc7d9SMatthew Wilcox /* 1074a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1075a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1076a00cc7d9SMatthew Wilcox * a page table. 1077a00cc7d9SMatthew Wilcox */ 1078a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1079a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1080a00cc7d9SMatthew Wilcox } 1081a00cc7d9SMatthew Wilcox 1082a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1083a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1084a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1085a00cc7d9SMatthew Wilcox 1086a00cc7d9SMatthew Wilcox ret = 0; 1087a00cc7d9SMatthew Wilcox out_unlock: 1088a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1089a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1090a00cc7d9SMatthew Wilcox return ret; 1091a00cc7d9SMatthew Wilcox } 1092a00cc7d9SMatthew Wilcox 1093a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1094a00cc7d9SMatthew Wilcox { 1095a00cc7d9SMatthew Wilcox pud_t entry; 1096a00cc7d9SMatthew Wilcox unsigned long haddr; 1097a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1098a00cc7d9SMatthew Wilcox 1099a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1100a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1101a00cc7d9SMatthew Wilcox goto unlock; 1102a00cc7d9SMatthew Wilcox 1103a00cc7d9SMatthew Wilcox entry = pud_mkyoung(orig_pud); 1104a00cc7d9SMatthew Wilcox if (write) 1105a00cc7d9SMatthew Wilcox entry = pud_mkdirty(entry); 1106a00cc7d9SMatthew Wilcox haddr = vmf->address & HPAGE_PUD_MASK; 1107a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1108a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1109a00cc7d9SMatthew Wilcox 1110a00cc7d9SMatthew Wilcox unlock: 1111a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1112a00cc7d9SMatthew Wilcox } 1113a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1114a00cc7d9SMatthew Wilcox 111582b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 1116a1dd450bSWill Deacon { 1117a1dd450bSWill Deacon pmd_t entry; 1118a1dd450bSWill Deacon unsigned long haddr; 111920f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1120a1dd450bSWill Deacon 112182b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 112282b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1123a1dd450bSWill Deacon goto unlock; 1124a1dd450bSWill Deacon 1125a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 112620f664aaSMinchan Kim if (write) 112720f664aaSMinchan Kim entry = pmd_mkdirty(entry); 112882b0f8c3SJan Kara haddr = vmf->address & HPAGE_PMD_MASK; 112920f664aaSMinchan Kim if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 113082b0f8c3SJan Kara update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1131a1dd450bSWill Deacon 1132a1dd450bSWill Deacon unlock: 113382b0f8c3SJan Kara spin_unlock(vmf->ptl); 1134a1dd450bSWill Deacon } 1135a1dd450bSWill Deacon 11362b740303SSouptick Joarder static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, 11372b740303SSouptick Joarder pmd_t orig_pmd, struct page *page) 113871e3aac0SAndrea Arcangeli { 113982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 114082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 114100501b53SJohannes Weiner struct mem_cgroup *memcg; 114271e3aac0SAndrea Arcangeli pgtable_t pgtable; 114371e3aac0SAndrea Arcangeli pmd_t _pmd; 11442b740303SSouptick Joarder int i; 11452b740303SSouptick Joarder vm_fault_t ret = 0; 114671e3aac0SAndrea Arcangeli struct page **pages; 11472ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 11482ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 114971e3aac0SAndrea Arcangeli 11506da2ec56SKees Cook pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), 115171e3aac0SAndrea Arcangeli GFP_KERNEL); 115271e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 115371e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 115471e3aac0SAndrea Arcangeli goto out; 115571e3aac0SAndrea Arcangeli } 115671e3aac0SAndrea Arcangeli 115771e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 115841b6167eSMichal Hocko pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, 115982b0f8c3SJan Kara vmf->address, page_to_nid(page)); 1160b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 11612cf85583STejun Heo mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, 1162bae473a4SKirill A. Shutemov GFP_KERNEL, &memcg, false))) { 1163b9bbfbe3SAndrea Arcangeli if (pages[i]) 116471e3aac0SAndrea Arcangeli put_page(pages[i]); 1165b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 116600501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 116700501b53SJohannes Weiner set_page_private(pages[i], 0); 1168f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, 1169f627c2f5SKirill A. Shutemov false); 1170b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 1171b9bbfbe3SAndrea Arcangeli } 117271e3aac0SAndrea Arcangeli kfree(pages); 117371e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 117471e3aac0SAndrea Arcangeli goto out; 117571e3aac0SAndrea Arcangeli } 117600501b53SJohannes Weiner set_page_private(pages[i], (unsigned long)memcg); 117771e3aac0SAndrea Arcangeli } 117871e3aac0SAndrea Arcangeli 117971e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 118071e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 11810089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 118271e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 118371e3aac0SAndrea Arcangeli cond_resched(); 118471e3aac0SAndrea Arcangeli } 118571e3aac0SAndrea Arcangeli 11862ec74c3eSSagi Grimberg mmun_start = haddr; 11872ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 1188bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 11892ec74c3eSSagi Grimberg 119082b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 119182b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 119271e3aac0SAndrea Arcangeli goto out_free_pages; 1193309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 119471e3aac0SAndrea Arcangeli 11950f10851eSJérôme Glisse /* 11960f10851eSJérôme Glisse * Leave pmd empty until pte is filled note we must notify here as 11970f10851eSJérôme Glisse * concurrent CPU thread might write to new page before the call to 11980f10851eSJérôme Glisse * mmu_notifier_invalidate_range_end() happens which can lead to a 11990f10851eSJérôme Glisse * device seeing memory write in different order than CPU. 12000f10851eSJérôme Glisse * 1201ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 12020f10851eSJérôme Glisse */ 120382b0f8c3SJan Kara pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 120471e3aac0SAndrea Arcangeli 120582b0f8c3SJan Kara pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); 1206bae473a4SKirill A. Shutemov pmd_populate(vma->vm_mm, &_pmd, pgtable); 120771e3aac0SAndrea Arcangeli 120871e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1209bae473a4SKirill A. Shutemov pte_t entry; 121071e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 121171e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 121200501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 121300501b53SJohannes Weiner set_page_private(pages[i], 0); 121482b0f8c3SJan Kara page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); 1215f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(pages[i], memcg, false, false); 121600501b53SJohannes Weiner lru_cache_add_active_or_unevictable(pages[i], vma); 121782b0f8c3SJan Kara vmf->pte = pte_offset_map(&_pmd, haddr); 121882b0f8c3SJan Kara VM_BUG_ON(!pte_none(*vmf->pte)); 121982b0f8c3SJan Kara set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); 122082b0f8c3SJan Kara pte_unmap(vmf->pte); 122171e3aac0SAndrea Arcangeli } 122271e3aac0SAndrea Arcangeli kfree(pages); 122371e3aac0SAndrea Arcangeli 122471e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 122582b0f8c3SJan Kara pmd_populate(vma->vm_mm, vmf->pmd, pgtable); 1226d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 122782b0f8c3SJan Kara spin_unlock(vmf->ptl); 122871e3aac0SAndrea Arcangeli 12294645b9feSJérôme Glisse /* 12304645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 12314645b9feSJérôme Glisse * the above pmdp_huge_clear_flush_notify() did already call it. 12324645b9feSJérôme Glisse */ 12334645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, 12344645b9feSJérôme Glisse mmun_end); 12352ec74c3eSSagi Grimberg 123671e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 123771e3aac0SAndrea Arcangeli put_page(page); 123871e3aac0SAndrea Arcangeli 123971e3aac0SAndrea Arcangeli out: 124071e3aac0SAndrea Arcangeli return ret; 124171e3aac0SAndrea Arcangeli 124271e3aac0SAndrea Arcangeli out_free_pages: 124382b0f8c3SJan Kara spin_unlock(vmf->ptl); 1244bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1245b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 124600501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 124700501b53SJohannes Weiner set_page_private(pages[i], 0); 1248f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, false); 124971e3aac0SAndrea Arcangeli put_page(pages[i]); 1250b9bbfbe3SAndrea Arcangeli } 125171e3aac0SAndrea Arcangeli kfree(pages); 125271e3aac0SAndrea Arcangeli goto out; 125371e3aac0SAndrea Arcangeli } 125471e3aac0SAndrea Arcangeli 12552b740303SSouptick Joarder vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 125671e3aac0SAndrea Arcangeli { 125782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 125893b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 125900501b53SJohannes Weiner struct mem_cgroup *memcg; 126082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 12612ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 12622ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 12633b363692SMichal Hocko gfp_t huge_gfp; /* for allocation and charge */ 12642b740303SSouptick Joarder vm_fault_t ret = 0; 126571e3aac0SAndrea Arcangeli 126682b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 126781d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 126893b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 126993b4796dSKirill A. Shutemov goto alloc; 127082b0f8c3SJan Kara spin_lock(vmf->ptl); 127182b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 127271e3aac0SAndrea Arcangeli goto out_unlock; 127371e3aac0SAndrea Arcangeli 127471e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1275309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 12761f25fe20SKirill A. Shutemov /* 12771f25fe20SKirill A. Shutemov * We can only reuse the page if nobody else maps the huge page or it's 12786d0a07edSAndrea Arcangeli * part. 12791f25fe20SKirill A. Shutemov */ 1280ba3c4ce6SHuang Ying if (!trylock_page(page)) { 1281ba3c4ce6SHuang Ying get_page(page); 1282ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 1283ba3c4ce6SHuang Ying lock_page(page); 1284ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1285ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1286ba3c4ce6SHuang Ying unlock_page(page); 1287ba3c4ce6SHuang Ying put_page(page); 1288ba3c4ce6SHuang Ying goto out_unlock; 1289ba3c4ce6SHuang Ying } 1290ba3c4ce6SHuang Ying put_page(page); 1291ba3c4ce6SHuang Ying } 1292ba3c4ce6SHuang Ying if (reuse_swap_page(page, NULL)) { 129371e3aac0SAndrea Arcangeli pmd_t entry; 129471e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1295f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 129682b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 129782b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 129871e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 1299ba3c4ce6SHuang Ying unlock_page(page); 130071e3aac0SAndrea Arcangeli goto out_unlock; 130171e3aac0SAndrea Arcangeli } 1302ba3c4ce6SHuang Ying unlock_page(page); 1303ddc58f27SKirill A. Shutemov get_page(page); 130482b0f8c3SJan Kara spin_unlock(vmf->ptl); 130593b4796dSKirill A. Shutemov alloc: 130671e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 1307077fcf11SAneesh Kumar K.V !transparent_hugepage_debug_cow()) { 130889c83fb5SMichal Hocko huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); 130989c83fb5SMichal Hocko new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, 131089c83fb5SMichal Hocko haddr, numa_node_id()); 1311077fcf11SAneesh Kumar K.V } else 131271e3aac0SAndrea Arcangeli new_page = NULL; 131371e3aac0SAndrea Arcangeli 13149a982250SKirill A. Shutemov if (likely(new_page)) { 13159a982250SKirill A. Shutemov prep_transhuge_page(new_page); 13169a982250SKirill A. Shutemov } else { 1317eecc1e42SHugh Dickins if (!page) { 131882b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 1319e9b71ca9SKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 132093b4796dSKirill A. Shutemov } else { 132182b0f8c3SJan Kara ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); 13229845cbbdSKirill A. Shutemov if (ret & VM_FAULT_OOM) { 132382b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 13249845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 13259845cbbdSKirill A. Shutemov } 1326ddc58f27SKirill A. Shutemov put_page(page); 132793b4796dSKirill A. Shutemov } 132817766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 132971e3aac0SAndrea Arcangeli goto out; 133071e3aac0SAndrea Arcangeli } 133171e3aac0SAndrea Arcangeli 13322cf85583STejun Heo if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, 13332a70f6a7SMichal Hocko huge_gfp, &memcg, true))) { 1334b9bbfbe3SAndrea Arcangeli put_page(new_page); 133582b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 1336bae473a4SKirill A. Shutemov if (page) 1337ddc58f27SKirill A. Shutemov put_page(page); 13389845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 133917766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 1340b9bbfbe3SAndrea Arcangeli goto out; 1341b9bbfbe3SAndrea Arcangeli } 1342b9bbfbe3SAndrea Arcangeli 134317766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 134417766ddeSDavid Rientjes 1345eecc1e42SHugh Dickins if (!page) 1346c79b57e4SHuang Ying clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); 134793b4796dSKirill A. Shutemov else 1348c9f4cd71SHuang Ying copy_user_huge_page(new_page, page, vmf->address, 1349c9f4cd71SHuang Ying vma, HPAGE_PMD_NR); 135071e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 135171e3aac0SAndrea Arcangeli 13522ec74c3eSSagi Grimberg mmun_start = haddr; 13532ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 1354bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 13552ec74c3eSSagi Grimberg 135682b0f8c3SJan Kara spin_lock(vmf->ptl); 135793b4796dSKirill A. Shutemov if (page) 1358ddc58f27SKirill A. Shutemov put_page(page); 135982b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 136082b0f8c3SJan Kara spin_unlock(vmf->ptl); 1361f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(new_page, memcg, true); 136271e3aac0SAndrea Arcangeli put_page(new_page); 13632ec74c3eSSagi Grimberg goto out_mn; 1364b9bbfbe3SAndrea Arcangeli } else { 136571e3aac0SAndrea Arcangeli pmd_t entry; 13663122359aSKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1367f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 136882b0f8c3SJan Kara pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1369d281ee61SKirill A. Shutemov page_add_new_anon_rmap(new_page, vma, haddr, true); 1370f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(new_page, memcg, false, true); 137100501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 137282b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 137382b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1374eecc1e42SHugh Dickins if (!page) { 1375bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 137697ae1749SKirill A. Shutemov } else { 1377309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1378d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 137971e3aac0SAndrea Arcangeli put_page(page); 138093b4796dSKirill A. Shutemov } 138171e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 138271e3aac0SAndrea Arcangeli } 138382b0f8c3SJan Kara spin_unlock(vmf->ptl); 13842ec74c3eSSagi Grimberg out_mn: 13854645b9feSJérôme Glisse /* 13864645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 13874645b9feSJérôme Glisse * the above pmdp_huge_clear_flush_notify() did already call it. 13884645b9feSJérôme Glisse */ 13894645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, 13904645b9feSJérôme Glisse mmun_end); 13912ec74c3eSSagi Grimberg out: 13922ec74c3eSSagi Grimberg return ret; 139371e3aac0SAndrea Arcangeli out_unlock: 139482b0f8c3SJan Kara spin_unlock(vmf->ptl); 139571e3aac0SAndrea Arcangeli return ret; 139671e3aac0SAndrea Arcangeli } 139771e3aac0SAndrea Arcangeli 13988310d48bSKeno Fischer /* 13998310d48bSKeno Fischer * FOLL_FORCE can write to even unwritable pmd's, but only 14008310d48bSKeno Fischer * after we've gone through a COW cycle and they are dirty. 14018310d48bSKeno Fischer */ 14028310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 14038310d48bSKeno Fischer { 1404f6f37321SLinus Torvalds return pmd_write(pmd) || 14058310d48bSKeno Fischer ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 14068310d48bSKeno Fischer } 14078310d48bSKeno Fischer 1408b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 140971e3aac0SAndrea Arcangeli unsigned long addr, 141071e3aac0SAndrea Arcangeli pmd_t *pmd, 141171e3aac0SAndrea Arcangeli unsigned int flags) 141271e3aac0SAndrea Arcangeli { 1413b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 141471e3aac0SAndrea Arcangeli struct page *page = NULL; 141571e3aac0SAndrea Arcangeli 1416c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 141771e3aac0SAndrea Arcangeli 14188310d48bSKeno Fischer if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 141971e3aac0SAndrea Arcangeli goto out; 142071e3aac0SAndrea Arcangeli 142185facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 142285facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 142385facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 142485facf25SKirill A. Shutemov 14252b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 14268a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 14272b4847e7SMel Gorman goto out; 14282b4847e7SMel Gorman 142971e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1430ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 14313565fce3SDan Williams if (flags & FOLL_TOUCH) 1432a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 1433de60f5f1SEric B Munson if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1434e90309c9SKirill A. Shutemov /* 1435e90309c9SKirill A. Shutemov * We don't mlock() pte-mapped THPs. This way we can avoid 1436e90309c9SKirill A. Shutemov * leaking mlocked pages into non-VM_LOCKED VMAs. 1437e90309c9SKirill A. Shutemov * 14389a73f61bSKirill A. Shutemov * For anon THP: 14399a73f61bSKirill A. Shutemov * 1440e90309c9SKirill A. Shutemov * In most cases the pmd is the only mapping of the page as we 1441e90309c9SKirill A. Shutemov * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1442e90309c9SKirill A. Shutemov * writable private mappings in populate_vma_page_range(). 1443e90309c9SKirill A. Shutemov * 1444e90309c9SKirill A. Shutemov * The only scenario when we have the page shared here is if we 1445e90309c9SKirill A. Shutemov * mlocking read-only mapping shared over fork(). We skip 1446e90309c9SKirill A. Shutemov * mlocking such pages. 14479a73f61bSKirill A. Shutemov * 14489a73f61bSKirill A. Shutemov * For file THP: 14499a73f61bSKirill A. Shutemov * 14509a73f61bSKirill A. Shutemov * We can expect PageDoubleMap() to be stable under page lock: 14519a73f61bSKirill A. Shutemov * for file pages we set it in page_add_file_rmap(), which 14529a73f61bSKirill A. Shutemov * requires page to be locked. 1453e90309c9SKirill A. Shutemov */ 14549a73f61bSKirill A. Shutemov 14559a73f61bSKirill A. Shutemov if (PageAnon(page) && compound_mapcount(page) != 1) 14569a73f61bSKirill A. Shutemov goto skip_mlock; 14579a73f61bSKirill A. Shutemov if (PageDoubleMap(page) || !page->mapping) 14589a73f61bSKirill A. Shutemov goto skip_mlock; 14599a73f61bSKirill A. Shutemov if (!trylock_page(page)) 14609a73f61bSKirill A. Shutemov goto skip_mlock; 1461b676b293SDavid Rientjes lru_add_drain(); 14629a73f61bSKirill A. Shutemov if (page->mapping && !PageDoubleMap(page)) 1463b676b293SDavid Rientjes mlock_vma_page(page); 1464b676b293SDavid Rientjes unlock_page(page); 1465b676b293SDavid Rientjes } 14669a73f61bSKirill A. Shutemov skip_mlock: 146771e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1468ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 146971e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 1470ddc58f27SKirill A. Shutemov get_page(page); 147171e3aac0SAndrea Arcangeli 147271e3aac0SAndrea Arcangeli out: 147371e3aac0SAndrea Arcangeli return page; 147471e3aac0SAndrea Arcangeli } 147571e3aac0SAndrea Arcangeli 1476d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 14772b740303SSouptick Joarder vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1478d10e63f2SMel Gorman { 147982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1480b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1481b32967ffSMel Gorman struct page *page; 148282b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 14838191acbdSMel Gorman int page_nid = -1, this_nid = numa_node_id(); 148490572890SPeter Zijlstra int target_nid, last_cpupid = -1; 14858191acbdSMel Gorman bool page_locked; 14868191acbdSMel Gorman bool migrated = false; 1487b191f9b1SMel Gorman bool was_writable; 14886688cc05SPeter Zijlstra int flags = 0; 1489d10e63f2SMel Gorman 149082b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 149182b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1492d10e63f2SMel Gorman goto out_unlock; 1493d10e63f2SMel Gorman 1494de466bd6SMel Gorman /* 1495de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1496de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1497de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1498de466bd6SMel Gorman */ 149982b0f8c3SJan Kara if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 150082b0f8c3SJan Kara page = pmd_page(*vmf->pmd); 15013c226c63SMark Rutland if (!get_page_unless_zero(page)) 15023c226c63SMark Rutland goto out_unlock; 150382b0f8c3SJan Kara spin_unlock(vmf->ptl); 15045d833062SMel Gorman wait_on_page_locked(page); 15053c226c63SMark Rutland put_page(page); 1506de466bd6SMel Gorman goto out; 1507de466bd6SMel Gorman } 1508de466bd6SMel Gorman 1509d10e63f2SMel Gorman page = pmd_page(pmd); 1510a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 15118191acbdSMel Gorman page_nid = page_to_nid(page); 151290572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 151303c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 151404bb2f94SRik van Riel if (page_nid == this_nid) { 151503c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 151604bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 151704bb2f94SRik van Riel } 15184daae3b4SMel Gorman 1519bea66fbdSMel Gorman /* See similar comment in do_numa_page for explanation */ 1520288bc549SAneesh Kumar K.V if (!pmd_savedwrite(pmd)) 15216688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 15226688cc05SPeter Zijlstra 15236688cc05SPeter Zijlstra /* 1524ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1525ff9042b1SMel Gorman * page_table_lock if at all possible 1526ff9042b1SMel Gorman */ 1527b8916634SMel Gorman page_locked = trylock_page(page); 1528b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1529b8916634SMel Gorman if (target_nid == -1) { 1530b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1531a54a407fSMel Gorman if (page_locked) 1532b8916634SMel Gorman goto clear_pmdnuma; 15332b4847e7SMel Gorman } 1534cbee9f88SPeter Zijlstra 1535de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 15362b4847e7SMel Gorman if (!page_locked) { 15373c226c63SMark Rutland page_nid = -1; 15383c226c63SMark Rutland if (!get_page_unless_zero(page)) 15393c226c63SMark Rutland goto out_unlock; 154082b0f8c3SJan Kara spin_unlock(vmf->ptl); 1541b8916634SMel Gorman wait_on_page_locked(page); 15423c226c63SMark Rutland put_page(page); 1543b8916634SMel Gorman goto out; 1544b8916634SMel Gorman } 1545b8916634SMel Gorman 15462b4847e7SMel Gorman /* 15472b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 15482b4847e7SMel Gorman * to serialises splits 15492b4847e7SMel Gorman */ 1550b8916634SMel Gorman get_page(page); 155182b0f8c3SJan Kara spin_unlock(vmf->ptl); 1552b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1553b32967ffSMel Gorman 1554c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 155582b0f8c3SJan Kara spin_lock(vmf->ptl); 155682b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1557b32967ffSMel Gorman unlock_page(page); 1558b32967ffSMel Gorman put_page(page); 1559a54a407fSMel Gorman page_nid = -1; 1560b32967ffSMel Gorman goto out_unlock; 1561b32967ffSMel Gorman } 1562ff9042b1SMel Gorman 1563c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1564c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1565c3a489caSMel Gorman put_page(page); 1566c3a489caSMel Gorman page_nid = -1; 1567c3a489caSMel Gorman goto clear_pmdnuma; 1568c3a489caSMel Gorman } 1569c3a489caSMel Gorman 1570a54a407fSMel Gorman /* 15718b1b436dSPeter Zijlstra * Since we took the NUMA fault, we must have observed the !accessible 15728b1b436dSPeter Zijlstra * bit. Make sure all other CPUs agree with that, to avoid them 15738b1b436dSPeter Zijlstra * modifying the page we're about to migrate. 15748b1b436dSPeter Zijlstra * 15758b1b436dSPeter Zijlstra * Must be done under PTL such that we'll observe the relevant 1576ccde85baSPeter Zijlstra * inc_tlb_flush_pending(). 1577ccde85baSPeter Zijlstra * 1578ccde85baSPeter Zijlstra * We are not sure a pending tlb flush here is for a huge page 1579ccde85baSPeter Zijlstra * mapping or not. Hence use the tlb range variant 15808b1b436dSPeter Zijlstra */ 15817066f0f9SAndrea Arcangeli if (mm_tlb_flush_pending(vma->vm_mm)) { 1582ccde85baSPeter Zijlstra flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 15837066f0f9SAndrea Arcangeli /* 15847066f0f9SAndrea Arcangeli * change_huge_pmd() released the pmd lock before 15857066f0f9SAndrea Arcangeli * invalidating the secondary MMUs sharing the primary 15867066f0f9SAndrea Arcangeli * MMU pagetables (with ->invalidate_range()). The 15877066f0f9SAndrea Arcangeli * mmu_notifier_invalidate_range_end() (which 15887066f0f9SAndrea Arcangeli * internally calls ->invalidate_range()) in 15897066f0f9SAndrea Arcangeli * change_pmd_range() will run after us, so we can't 15907066f0f9SAndrea Arcangeli * rely on it here and we need an explicit invalidate. 15917066f0f9SAndrea Arcangeli */ 15927066f0f9SAndrea Arcangeli mmu_notifier_invalidate_range(vma->vm_mm, haddr, 15937066f0f9SAndrea Arcangeli haddr + HPAGE_PMD_SIZE); 15947066f0f9SAndrea Arcangeli } 15958b1b436dSPeter Zijlstra 15968b1b436dSPeter Zijlstra /* 1597a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 15988a0516edSMel Gorman * and access rights restored. 1599a54a407fSMel Gorman */ 160082b0f8c3SJan Kara spin_unlock(vmf->ptl); 16018b1b436dSPeter Zijlstra 1602bae473a4SKirill A. Shutemov migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 160382b0f8c3SJan Kara vmf->pmd, pmd, vmf->address, page, target_nid); 16046688cc05SPeter Zijlstra if (migrated) { 16056688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 16068191acbdSMel Gorman page_nid = target_nid; 1607074c2381SMel Gorman } else 1608074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1609b32967ffSMel Gorman 16108191acbdSMel Gorman goto out; 16114daae3b4SMel Gorman clear_pmdnuma: 1612a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1613288bc549SAneesh Kumar K.V was_writable = pmd_savedwrite(pmd); 16144d942466SMel Gorman pmd = pmd_modify(pmd, vma->vm_page_prot); 1615b7b04004SMel Gorman pmd = pmd_mkyoung(pmd); 1616b191f9b1SMel Gorman if (was_writable) 1617b191f9b1SMel Gorman pmd = pmd_mkwrite(pmd); 161882b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 161982b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1620a54a407fSMel Gorman unlock_page(page); 1621d10e63f2SMel Gorman out_unlock: 162282b0f8c3SJan Kara spin_unlock(vmf->ptl); 1623b8916634SMel Gorman 1624b8916634SMel Gorman out: 1625b8916634SMel Gorman if (anon_vma) 1626b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1627b8916634SMel Gorman 16288191acbdSMel Gorman if (page_nid != -1) 162982b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 16309a8b300fSAneesh Kumar K.V flags); 16318191acbdSMel Gorman 1632d10e63f2SMel Gorman return 0; 1633d10e63f2SMel Gorman } 1634d10e63f2SMel Gorman 1635319904adSHuang Ying /* 1636319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1637319904adSHuang Ying * Otherwise, return false. 1638319904adSHuang Ying */ 1639319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1640b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1641b8d3c4c3SMinchan Kim { 1642b8d3c4c3SMinchan Kim spinlock_t *ptl; 1643b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1644b8d3c4c3SMinchan Kim struct page *page; 1645b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1646319904adSHuang Ying bool ret = false; 1647b8d3c4c3SMinchan Kim 164807e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 164907e32661SAneesh Kumar K.V 1650b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1651b6ec57f4SKirill A. Shutemov if (!ptl) 165225eedabeSLinus Torvalds goto out_unlocked; 1653b8d3c4c3SMinchan Kim 1654b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1655319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1656b8d3c4c3SMinchan Kim goto out; 1657b8d3c4c3SMinchan Kim 165884c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 165984c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 166084c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 166184c3fc4eSZi Yan goto out; 166284c3fc4eSZi Yan } 166384c3fc4eSZi Yan 1664b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1665b8d3c4c3SMinchan Kim /* 1666b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1667b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1668b8d3c4c3SMinchan Kim */ 1669b8d3c4c3SMinchan Kim if (page_mapcount(page) != 1) 1670b8d3c4c3SMinchan Kim goto out; 1671b8d3c4c3SMinchan Kim 1672b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1673b8d3c4c3SMinchan Kim goto out; 1674b8d3c4c3SMinchan Kim 1675b8d3c4c3SMinchan Kim /* 1676b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1677b8d3c4c3SMinchan Kim * will deactivate only them. 1678b8d3c4c3SMinchan Kim */ 1679b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1680b8d3c4c3SMinchan Kim get_page(page); 1681b8d3c4c3SMinchan Kim spin_unlock(ptl); 16829818b8cdSHuang Ying split_huge_page(page); 1683b8d3c4c3SMinchan Kim unlock_page(page); 1684bbf29ffcSKirill A. Shutemov put_page(page); 1685b8d3c4c3SMinchan Kim goto out_unlocked; 1686b8d3c4c3SMinchan Kim } 1687b8d3c4c3SMinchan Kim 1688b8d3c4c3SMinchan Kim if (PageDirty(page)) 1689b8d3c4c3SMinchan Kim ClearPageDirty(page); 1690b8d3c4c3SMinchan Kim unlock_page(page); 1691b8d3c4c3SMinchan Kim 1692b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 169358ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1694b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1695b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1696b8d3c4c3SMinchan Kim 1697b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1698b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1699b8d3c4c3SMinchan Kim } 1700802a3a92SShaohua Li 1701802a3a92SShaohua Li mark_page_lazyfree(page); 1702319904adSHuang Ying ret = true; 1703b8d3c4c3SMinchan Kim out: 1704b8d3c4c3SMinchan Kim spin_unlock(ptl); 1705b8d3c4c3SMinchan Kim out_unlocked: 1706b8d3c4c3SMinchan Kim return ret; 1707b8d3c4c3SMinchan Kim } 1708b8d3c4c3SMinchan Kim 1709953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1710953c66c2SAneesh Kumar K.V { 1711953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1712953c66c2SAneesh Kumar K.V 1713953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1714953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1715c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1716953c66c2SAneesh Kumar K.V } 1717953c66c2SAneesh Kumar K.V 171871e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1719f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 172071e3aac0SAndrea Arcangeli { 1721f5c8ad47SDavid Miller pmd_t orig_pmd; 1722da146769SKirill A. Shutemov spinlock_t *ptl; 1723da146769SKirill A. Shutemov 172407e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 172507e32661SAneesh Kumar K.V 1726b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1727b6ec57f4SKirill A. Shutemov if (!ptl) 1728da146769SKirill A. Shutemov return 0; 1729a6bf2bb0SAneesh Kumar K.V /* 1730a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 17318809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1732a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1733a6bf2bb0SAneesh Kumar K.V * operations. 1734a6bf2bb0SAneesh Kumar K.V */ 17358809aa2dSAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1736fcbe08d6SMartin Schwidefsky tlb->fullmm); 1737f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 17384897c765SMatthew Wilcox if (vma_is_dax(vma)) { 17393b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 17403b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 17414897c765SMatthew Wilcox spin_unlock(ptl); 1742da146769SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 1743c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1744da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1745c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1746bf929152SKirill A. Shutemov spin_unlock(ptl); 1747c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1748479f0abbSKirill A. Shutemov } else { 1749616b8371SZi Yan struct page *page = NULL; 1750616b8371SZi Yan int flush_needed = 1; 1751616b8371SZi Yan 1752616b8371SZi Yan if (pmd_present(orig_pmd)) { 1753616b8371SZi Yan page = pmd_page(orig_pmd); 1754d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 1755309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1756309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1757616b8371SZi Yan } else if (thp_migration_supported()) { 1758616b8371SZi Yan swp_entry_t entry; 1759616b8371SZi Yan 1760616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1761616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1762616b8371SZi Yan page = pfn_to_page(swp_offset(entry)); 1763616b8371SZi Yan flush_needed = 0; 1764616b8371SZi Yan } else 1765616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1766616b8371SZi Yan 1767b5072380SKirill A. Shutemov if (PageAnon(page)) { 1768c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1769b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1770b5072380SKirill A. Shutemov } else { 1771953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1772953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1773fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1774b5072380SKirill A. Shutemov } 1775616b8371SZi Yan 1776bf929152SKirill A. Shutemov spin_unlock(ptl); 1777616b8371SZi Yan if (flush_needed) 1778e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1779479f0abbSKirill A. Shutemov } 1780da146769SKirill A. Shutemov return 1; 178171e3aac0SAndrea Arcangeli } 178271e3aac0SAndrea Arcangeli 17831dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 17841dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 17851dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 17861dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 17871dd38b6cSAneesh Kumar K.V { 17881dd38b6cSAneesh Kumar K.V /* 17891dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17901dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17911dd38b6cSAneesh Kumar K.V * 17921dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17931dd38b6cSAneesh Kumar K.V */ 17941dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17951dd38b6cSAneesh Kumar K.V } 17961dd38b6cSAneesh Kumar K.V #endif 17971dd38b6cSAneesh Kumar K.V 1798ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1799ab6e3d09SNaoya Horiguchi { 1800ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1801ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1802ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1803ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1804ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1805ab6e3d09SNaoya Horiguchi #endif 1806ab6e3d09SNaoya Horiguchi return pmd; 1807ab6e3d09SNaoya Horiguchi } 1808ab6e3d09SNaoya Horiguchi 1809bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 181037a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 1811eb66ae03SLinus Torvalds pmd_t *old_pmd, pmd_t *new_pmd) 181237a1c49aSAndrea Arcangeli { 1813bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 181437a1c49aSAndrea Arcangeli pmd_t pmd; 181537a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 18165d190420SAaron Lu bool force_flush = false; 181737a1c49aSAndrea Arcangeli 181837a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 181937a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 1820bf8616d5SHugh Dickins old_end - old_addr < HPAGE_PMD_SIZE) 18214b471e88SKirill A. Shutemov return false; 182237a1c49aSAndrea Arcangeli 182337a1c49aSAndrea Arcangeli /* 182437a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 182537a1c49aSAndrea Arcangeli * should have release it. 182637a1c49aSAndrea Arcangeli */ 182737a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 182837a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 18294b471e88SKirill A. Shutemov return false; 183037a1c49aSAndrea Arcangeli } 183137a1c49aSAndrea Arcangeli 1832bf929152SKirill A. Shutemov /* 1833bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1834bf929152SKirill A. Shutemov * ptlocks because exclusive mmap_sem prevents deadlock. 1835bf929152SKirill A. Shutemov */ 1836b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1837b6ec57f4SKirill A. Shutemov if (old_ptl) { 1838bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1839bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1840bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 18418809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1842eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1843a2ce2666SAaron Lu force_flush = true; 184437a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 18453592806cSKirill A. Shutemov 18461dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1847b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 18483592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 18493592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 18503592806cSKirill A. Shutemov } 1851ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1852ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 18535d190420SAaron Lu if (force_flush) 18545d190420SAaron Lu flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1855eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1856eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1857bf929152SKirill A. Shutemov spin_unlock(old_ptl); 18584b471e88SKirill A. Shutemov return true; 185937a1c49aSAndrea Arcangeli } 18604b471e88SKirill A. Shutemov return false; 186137a1c49aSAndrea Arcangeli } 186237a1c49aSAndrea Arcangeli 1863f123d74aSMel Gorman /* 1864f123d74aSMel Gorman * Returns 1865f123d74aSMel Gorman * - 0 if PMD could not be locked 1866f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1867f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1868f123d74aSMel Gorman */ 1869cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1870e944fd67SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1871cd7548abSJohannes Weiner { 1872cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1873bf929152SKirill A. Shutemov spinlock_t *ptl; 18740a85e51dSKirill A. Shutemov pmd_t entry; 18750a85e51dSKirill A. Shutemov bool preserve_write; 18760a85e51dSKirill A. Shutemov int ret; 1877cd7548abSJohannes Weiner 1878b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 18790a85e51dSKirill A. Shutemov if (!ptl) 18800a85e51dSKirill A. Shutemov return 0; 18810a85e51dSKirill A. Shutemov 18820a85e51dSKirill A. Shutemov preserve_write = prot_numa && pmd_write(*pmd); 1883ba68bc01SMel Gorman ret = 1; 1884e944fd67SMel Gorman 188584c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 188684c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 188784c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 188884c3fc4eSZi Yan 188984c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 189084c3fc4eSZi Yan if (is_write_migration_entry(entry)) { 189184c3fc4eSZi Yan pmd_t newpmd; 189284c3fc4eSZi Yan /* 189384c3fc4eSZi Yan * A protection check is difficult so 189484c3fc4eSZi Yan * just be safe and disable write 189584c3fc4eSZi Yan */ 189684c3fc4eSZi Yan make_migration_entry_read(&entry); 189784c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1898ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1899ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 190084c3fc4eSZi Yan set_pmd_at(mm, addr, pmd, newpmd); 190184c3fc4eSZi Yan } 190284c3fc4eSZi Yan goto unlock; 190384c3fc4eSZi Yan } 190484c3fc4eSZi Yan #endif 190584c3fc4eSZi Yan 1906e944fd67SMel Gorman /* 1907e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1908e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1909e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1910e944fd67SMel Gorman */ 19110a85e51dSKirill A. Shutemov if (prot_numa && is_huge_zero_pmd(*pmd)) 19120a85e51dSKirill A. Shutemov goto unlock; 1913e944fd67SMel Gorman 19140a85e51dSKirill A. Shutemov if (prot_numa && pmd_protnone(*pmd)) 19150a85e51dSKirill A. Shutemov goto unlock; 19160a85e51dSKirill A. Shutemov 1917ced10803SKirill A. Shutemov /* 1918ced10803SKirill A. Shutemov * In case prot_numa, we are under down_read(mmap_sem). It's critical 1919ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 1920ced10803SKirill A. Shutemov * which is also under down_read(mmap_sem): 1921ced10803SKirill A. Shutemov * 1922ced10803SKirill A. Shutemov * CPU0: CPU1: 1923ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1924ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1925ced10803SKirill A. Shutemov * madvise_dontneed() 1926ced10803SKirill A. Shutemov * zap_pmd_range() 1927ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1928ced10803SKirill A. Shutemov * // skip the pmd 1929ced10803SKirill A. Shutemov * set_pmd_at(); 1930ced10803SKirill A. Shutemov * // pmd is re-established 1931ced10803SKirill A. Shutemov * 1932ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1933ced10803SKirill A. Shutemov * which may break userspace. 1934ced10803SKirill A. Shutemov * 1935ced10803SKirill A. Shutemov * pmdp_invalidate() is required to make sure we don't miss 1936ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1937ced10803SKirill A. Shutemov */ 1938a3cf988fSKirill A. Shutemov entry = pmdp_invalidate(vma, addr, pmd); 1939ced10803SKirill A. Shutemov 1940cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1941b191f9b1SMel Gorman if (preserve_write) 1942288bc549SAneesh Kumar K.V entry = pmd_mk_savedwrite(entry); 1943f123d74aSMel Gorman ret = HPAGE_PMD_NR; 194456eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 19450a85e51dSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 19460a85e51dSKirill A. Shutemov unlock: 1947bf929152SKirill A. Shutemov spin_unlock(ptl); 1948cd7548abSJohannes Weiner return ret; 1949cd7548abSJohannes Weiner } 1950cd7548abSJohannes Weiner 1951025c5b24SNaoya Horiguchi /* 19528f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1953025c5b24SNaoya Horiguchi * 19548f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 19558f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1956025c5b24SNaoya Horiguchi */ 1957b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1958025c5b24SNaoya Horiguchi { 1959b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1960b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 196184c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 196284c3fc4eSZi Yan pmd_devmap(*pmd))) 1963b6ec57f4SKirill A. Shutemov return ptl; 1964b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1965b6ec57f4SKirill A. Shutemov return NULL; 1966025c5b24SNaoya Horiguchi } 1967025c5b24SNaoya Horiguchi 1968a00cc7d9SMatthew Wilcox /* 1969a00cc7d9SMatthew Wilcox * Returns true if a given pud maps a thp, false otherwise. 1970a00cc7d9SMatthew Wilcox * 1971a00cc7d9SMatthew Wilcox * Note that if it returns true, this routine returns without unlocking page 1972a00cc7d9SMatthew Wilcox * table lock. So callers must unlock it. 1973a00cc7d9SMatthew Wilcox */ 1974a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1975a00cc7d9SMatthew Wilcox { 1976a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1977a00cc7d9SMatthew Wilcox 1978a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1979a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1980a00cc7d9SMatthew Wilcox return ptl; 1981a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1982a00cc7d9SMatthew Wilcox return NULL; 1983a00cc7d9SMatthew Wilcox } 1984a00cc7d9SMatthew Wilcox 1985a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1986a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1987a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1988a00cc7d9SMatthew Wilcox { 1989a00cc7d9SMatthew Wilcox pud_t orig_pud; 1990a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1991a00cc7d9SMatthew Wilcox 1992a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1993a00cc7d9SMatthew Wilcox if (!ptl) 1994a00cc7d9SMatthew Wilcox return 0; 1995a00cc7d9SMatthew Wilcox /* 1996a00cc7d9SMatthew Wilcox * For architectures like ppc64 we look at deposited pgtable 1997a00cc7d9SMatthew Wilcox * when calling pudp_huge_get_and_clear. So do the 1998a00cc7d9SMatthew Wilcox * pgtable_trans_huge_withdraw after finishing pudp related 1999a00cc7d9SMatthew Wilcox * operations. 2000a00cc7d9SMatthew Wilcox */ 2001a00cc7d9SMatthew Wilcox orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, 2002a00cc7d9SMatthew Wilcox tlb->fullmm); 2003a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 2004a00cc7d9SMatthew Wilcox if (vma_is_dax(vma)) { 2005a00cc7d9SMatthew Wilcox spin_unlock(ptl); 2006a00cc7d9SMatthew Wilcox /* No zero page support yet */ 2007a00cc7d9SMatthew Wilcox } else { 2008a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 2009a00cc7d9SMatthew Wilcox BUG(); 2010a00cc7d9SMatthew Wilcox } 2011a00cc7d9SMatthew Wilcox return 1; 2012a00cc7d9SMatthew Wilcox } 2013a00cc7d9SMatthew Wilcox 2014a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2015a00cc7d9SMatthew Wilcox unsigned long haddr) 2016a00cc7d9SMatthew Wilcox { 2017a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2018a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2019a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2020a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2021a00cc7d9SMatthew Wilcox 2022ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 2023a00cc7d9SMatthew Wilcox 2024a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 2025a00cc7d9SMatthew Wilcox } 2026a00cc7d9SMatthew Wilcox 2027a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2028a00cc7d9SMatthew Wilcox unsigned long address) 2029a00cc7d9SMatthew Wilcox { 2030a00cc7d9SMatthew Wilcox spinlock_t *ptl; 2031a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 2032a00cc7d9SMatthew Wilcox unsigned long haddr = address & HPAGE_PUD_MASK; 2033a00cc7d9SMatthew Wilcox 2034a00cc7d9SMatthew Wilcox mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); 2035a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 2036a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2037a00cc7d9SMatthew Wilcox goto out; 2038a00cc7d9SMatthew Wilcox __split_huge_pud_locked(vma, pud, haddr); 2039a00cc7d9SMatthew Wilcox 2040a00cc7d9SMatthew Wilcox out: 2041a00cc7d9SMatthew Wilcox spin_unlock(ptl); 20424645b9feSJérôme Glisse /* 20434645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 20444645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 20454645b9feSJérôme Glisse */ 20464645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + 20474645b9feSJérôme Glisse HPAGE_PUD_SIZE); 2048a00cc7d9SMatthew Wilcox } 2049a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2050a00cc7d9SMatthew Wilcox 2051eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2052eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2053eef1b3baSKirill A. Shutemov { 2054eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2055eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2056eef1b3baSKirill A. Shutemov pmd_t _pmd; 2057eef1b3baSKirill A. Shutemov int i; 2058eef1b3baSKirill A. Shutemov 20590f10851eSJérôme Glisse /* 20600f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 20610f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 20620f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 20630f10851eSJérôme Glisse * protected page. 20640f10851eSJérôme Glisse * 2065ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 20660f10851eSJérôme Glisse */ 20670f10851eSJérôme Glisse pmdp_huge_clear_flush(vma, haddr, pmd); 2068eef1b3baSKirill A. Shutemov 2069eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2070eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2071eef1b3baSKirill A. Shutemov 2072eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2073eef1b3baSKirill A. Shutemov pte_t *pte, entry; 2074eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2075eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 2076eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2077eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2078eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2079eef1b3baSKirill A. Shutemov pte_unmap(pte); 2080eef1b3baSKirill A. Shutemov } 2081eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2082eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2083eef1b3baSKirill A. Shutemov } 2084eef1b3baSKirill A. Shutemov 2085eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2086ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2087eef1b3baSKirill A. Shutemov { 2088eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2089eef1b3baSKirill A. Shutemov struct page *page; 2090eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2091423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2092a3cf988fSKirill A. Shutemov bool young, write, soft_dirty, pmd_migration = false; 20932ac015e2SKirill A. Shutemov unsigned long addr; 2094eef1b3baSKirill A. Shutemov int i; 2095eef1b3baSKirill A. Shutemov 2096eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2097eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2098eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 209984c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 210084c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2101eef1b3baSKirill A. Shutemov 2102eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2103eef1b3baSKirill A. Shutemov 2104d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 2105d21b9e57SKirill A. Shutemov _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2106953c66c2SAneesh Kumar K.V /* 2107953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2108953c66c2SAneesh Kumar K.V * just go ahead and zap it 2109953c66c2SAneesh Kumar K.V */ 2110953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2111953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 2112d21b9e57SKirill A. Shutemov if (vma_is_dax(vma)) 2113d21b9e57SKirill A. Shutemov return; 2114d21b9e57SKirill A. Shutemov page = pmd_page(_pmd); 2115e1f1b157SHugh Dickins if (!PageDirty(page) && pmd_dirty(_pmd)) 2116e1f1b157SHugh Dickins set_page_dirty(page); 2117d21b9e57SKirill A. Shutemov if (!PageReferenced(page) && pmd_young(_pmd)) 2118d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2119d21b9e57SKirill A. Shutemov page_remove_rmap(page, true); 2120d21b9e57SKirill A. Shutemov put_page(page); 2121fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2122eef1b3baSKirill A. Shutemov return; 2123eef1b3baSKirill A. Shutemov } else if (is_huge_zero_pmd(*pmd)) { 21244645b9feSJérôme Glisse /* 21254645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 21264645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 21274645b9feSJérôme Glisse * __split_huge_pmd() ? 21284645b9feSJérôme Glisse * 21294645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 21304645b9feSJérôme Glisse * small page also write protected so it does not seems useful 21314645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 21324645b9feSJérôme Glisse */ 2133eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2134eef1b3baSKirill A. Shutemov } 2135eef1b3baSKirill A. Shutemov 2136423ac9afSAneesh Kumar K.V /* 2137423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2138423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2139423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2140423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2141423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2142423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2143423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 2144423ac9afSAneesh Kumar K.V * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 2145423ac9afSAneesh Kumar K.V * 383 on page 93. Intel should be safe but is also warns that it's 2146423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2147423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2148423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2149423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2150423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2151423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2152423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2153423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2154423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2155423ac9afSAneesh Kumar K.V */ 2156423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2157423ac9afSAneesh Kumar K.V 215884c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2159423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 216084c3fc4eSZi Yan if (pmd_migration) { 216184c3fc4eSZi Yan swp_entry_t entry; 216284c3fc4eSZi Yan 2163423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 216484c3fc4eSZi Yan page = pfn_to_page(swp_offset(entry)); 216584c3fc4eSZi Yan } else 216684c3fc4eSZi Yan #endif 2167423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 2168eef1b3baSKirill A. Shutemov VM_BUG_ON_PAGE(!page_count(page), page); 2169fe896d18SJoonsoo Kim page_ref_add(page, HPAGE_PMD_NR - 1); 2170423ac9afSAneesh Kumar K.V if (pmd_dirty(old_pmd)) 2171423ac9afSAneesh Kumar K.V SetPageDirty(page); 2172423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2173423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2174423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2175eef1b3baSKirill A. Shutemov 2176423ac9afSAneesh Kumar K.V /* 2177423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2178423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2179423ac9afSAneesh Kumar K.V */ 2180eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2181eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2182eef1b3baSKirill A. Shutemov 21832ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2184eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2185eef1b3baSKirill A. Shutemov /* 2186eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2187eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2188eef1b3baSKirill A. Shutemov * permissions across VMAs. 2189eef1b3baSKirill A. Shutemov */ 219084c3fc4eSZi Yan if (freeze || pmd_migration) { 2191ba988280SKirill A. Shutemov swp_entry_t swp_entry; 2192ba988280SKirill A. Shutemov swp_entry = make_migration_entry(page + i, write); 2193ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2194804dd150SAndrea Arcangeli if (soft_dirty) 2195804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2196ba988280SKirill A. Shutemov } else { 21976d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2198b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 2199eef1b3baSKirill A. Shutemov if (!write) 2200eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 2201eef1b3baSKirill A. Shutemov if (!young) 2202eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 2203804dd150SAndrea Arcangeli if (soft_dirty) 2204804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2205ba988280SKirill A. Shutemov } 22062ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2207eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 22082ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2209eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2210eef1b3baSKirill A. Shutemov pte_unmap(pte); 2211eef1b3baSKirill A. Shutemov } 2212eef1b3baSKirill A. Shutemov 2213eef1b3baSKirill A. Shutemov /* 2214eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 2215eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 2216eef1b3baSKirill A. Shutemov */ 2217eef1b3baSKirill A. Shutemov if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 2218eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2219eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2220eef1b3baSKirill A. Shutemov } 2221eef1b3baSKirill A. Shutemov 2222eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2223eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 222411fb9989SMel Gorman __dec_node_page_state(page, NR_ANON_THPS); 2225eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 2226eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 2227eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2228eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 2229eef1b3baSKirill A. Shutemov } 2230eef1b3baSKirill A. Shutemov } 2231eef1b3baSKirill A. Shutemov 2232eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2233eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2234e9b61f19SKirill A. Shutemov 2235e9b61f19SKirill A. Shutemov if (freeze) { 22362ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2237e9b61f19SKirill A. Shutemov page_remove_rmap(page + i, false); 2238e9b61f19SKirill A. Shutemov put_page(page + i); 2239e9b61f19SKirill A. Shutemov } 2240e9b61f19SKirill A. Shutemov } 2241eef1b3baSKirill A. Shutemov } 2242eef1b3baSKirill A. Shutemov 2243eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 224433f4751eSNaoya Horiguchi unsigned long address, bool freeze, struct page *page) 2245eef1b3baSKirill A. Shutemov { 2246eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2247eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2248eef1b3baSKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 2249eef1b3baSKirill A. Shutemov 2250eef1b3baSKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); 2251eef1b3baSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 225233f4751eSNaoya Horiguchi 225333f4751eSNaoya Horiguchi /* 225433f4751eSNaoya Horiguchi * If caller asks to setup a migration entries, we need a page to check 225533f4751eSNaoya Horiguchi * pmd against. Otherwise we can end up replacing wrong page. 225633f4751eSNaoya Horiguchi */ 225733f4751eSNaoya Horiguchi VM_BUG_ON(freeze && !page); 225833f4751eSNaoya Horiguchi if (page && page != pmd_page(*pmd)) 225933f4751eSNaoya Horiguchi goto out; 226033f4751eSNaoya Horiguchi 22615c7fb56eSDan Williams if (pmd_trans_huge(*pmd)) { 226233f4751eSNaoya Horiguchi page = pmd_page(*pmd); 2263e90309c9SKirill A. Shutemov if (PageMlocked(page)) 22645f737714SKirill A. Shutemov clear_page_mlock(page); 226584c3fc4eSZi Yan } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) 22665c7fb56eSDan Williams goto out; 2267fec89c10SKirill A. Shutemov __split_huge_pmd_locked(vma, pmd, haddr, freeze); 2268e90309c9SKirill A. Shutemov out: 2269eef1b3baSKirill A. Shutemov spin_unlock(ptl); 22704645b9feSJérôme Glisse /* 22714645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 22724645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 22734645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 22744645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 22754645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 22764645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 22774645b9feSJérôme Glisse * page in the meantime) 22784645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 22794645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 22804645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 22814645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 22824645b9feSJérôme Glisse */ 22834645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + 22844645b9feSJérôme Glisse HPAGE_PMD_SIZE); 2285eef1b3baSKirill A. Shutemov } 2286eef1b3baSKirill A. Shutemov 2287fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2288fec89c10SKirill A. Shutemov bool freeze, struct page *page) 228994fcc585SAndrea Arcangeli { 2290f72e7dcdSHugh Dickins pgd_t *pgd; 2291c2febafcSKirill A. Shutemov p4d_t *p4d; 2292f72e7dcdSHugh Dickins pud_t *pud; 229394fcc585SAndrea Arcangeli pmd_t *pmd; 229494fcc585SAndrea Arcangeli 229578ddc534SKirill A. Shutemov pgd = pgd_offset(vma->vm_mm, address); 2296f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 2297f72e7dcdSHugh Dickins return; 2298f72e7dcdSHugh Dickins 2299c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 2300c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 2301c2febafcSKirill A. Shutemov return; 2302c2febafcSKirill A. Shutemov 2303c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 2304f72e7dcdSHugh Dickins if (!pud_present(*pud)) 2305f72e7dcdSHugh Dickins return; 2306f72e7dcdSHugh Dickins 2307f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 2308fec89c10SKirill A. Shutemov 230933f4751eSNaoya Horiguchi __split_huge_pmd(vma, pmd, address, freeze, page); 231094fcc585SAndrea Arcangeli } 231194fcc585SAndrea Arcangeli 2312e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 231394fcc585SAndrea Arcangeli unsigned long start, 231494fcc585SAndrea Arcangeli unsigned long end, 231594fcc585SAndrea Arcangeli long adjust_next) 231694fcc585SAndrea Arcangeli { 231794fcc585SAndrea Arcangeli /* 231894fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 231994fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 232094fcc585SAndrea Arcangeli * an huge pmd. 232194fcc585SAndrea Arcangeli */ 232294fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 232394fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 232494fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2325fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, start, false, NULL); 232694fcc585SAndrea Arcangeli 232794fcc585SAndrea Arcangeli /* 232894fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 232994fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 233094fcc585SAndrea Arcangeli * an huge pmd. 233194fcc585SAndrea Arcangeli */ 233294fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 233394fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 233494fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2335fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, end, false, NULL); 233694fcc585SAndrea Arcangeli 233794fcc585SAndrea Arcangeli /* 233894fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 233994fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 234094fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 234194fcc585SAndrea Arcangeli */ 234294fcc585SAndrea Arcangeli if (adjust_next > 0) { 234394fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 234494fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 234594fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 234694fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 234794fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 234894fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2349fec89c10SKirill A. Shutemov split_huge_pmd_address(next, nstart, false, NULL); 235094fcc585SAndrea Arcangeli } 235194fcc585SAndrea Arcangeli } 2352e9b61f19SKirill A. Shutemov 2353906f9cdfSHugh Dickins static void unmap_page(struct page *page) 2354e9b61f19SKirill A. Shutemov { 2355baa355fdSKirill A. Shutemov enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2356c7ab0d2fSKirill A. Shutemov TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2357666e5a40SMinchan Kim bool unmap_success; 2358e9b61f19SKirill A. Shutemov 2359e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 2360e9b61f19SKirill A. Shutemov 2361baa355fdSKirill A. Shutemov if (PageAnon(page)) 2362b5ff8161SNaoya Horiguchi ttu_flags |= TTU_SPLIT_FREEZE; 2363baa355fdSKirill A. Shutemov 2364666e5a40SMinchan Kim unmap_success = try_to_unmap(page, ttu_flags); 2365666e5a40SMinchan Kim VM_BUG_ON_PAGE(!unmap_success, page); 2366bd56086fSKirill A. Shutemov } 2367bd56086fSKirill A. Shutemov 2368906f9cdfSHugh Dickins static void remap_page(struct page *page) 2369e9b61f19SKirill A. Shutemov { 2370fec89c10SKirill A. Shutemov int i; 2371ace71a19SKirill A. Shutemov if (PageTransHuge(page)) { 2372ace71a19SKirill A. Shutemov remove_migration_ptes(page, page, true); 2373ace71a19SKirill A. Shutemov } else { 2374fec89c10SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2375fec89c10SKirill A. Shutemov remove_migration_ptes(page + i, page + i, true); 2376e9b61f19SKirill A. Shutemov } 2377ace71a19SKirill A. Shutemov } 2378e9b61f19SKirill A. Shutemov 23798df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2380e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2381e9b61f19SKirill A. Shutemov { 2382e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2383e9b61f19SKirill A. Shutemov 23848df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2385e9b61f19SKirill A. Shutemov 2386e9b61f19SKirill A. Shutemov /* 2387605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2388605ca5edSKonstantin Khlebnikov * 2389605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 2390605ca5edSKonstantin Khlebnikov * for exmaple lock_page() which set PG_waiters. 2391e9b61f19SKirill A. Shutemov */ 2392e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2393e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2394e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2395e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 239638d8b4e6SHuang Ying (1L << PG_swapcache) | 2397e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2398e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2399e9b61f19SKirill A. Shutemov (1L << PG_active) | 24001899ad18SJohannes Weiner (1L << PG_workingset) | 2401e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2402b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 2403b8d3c4c3SMinchan Kim (1L << PG_dirty))); 2404e9b61f19SKirill A. Shutemov 2405173d9d9fSHugh Dickins /* ->mapping in first tail page is compound_mapcount */ 2406173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2407173d9d9fSHugh Dickins page_tail); 2408173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2409173d9d9fSHugh Dickins page_tail->index = head->index + tail; 2410173d9d9fSHugh Dickins 2411605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2412e9b61f19SKirill A. Shutemov smp_wmb(); 2413e9b61f19SKirill A. Shutemov 2414605ca5edSKonstantin Khlebnikov /* 2415605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2416605ca5edSKonstantin Khlebnikov * 2417605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2418605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2419605ca5edSKonstantin Khlebnikov */ 2420e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2421e9b61f19SKirill A. Shutemov 2422605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2423605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2424605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2425605ca5edSKonstantin Khlebnikov 2426e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2427e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2428e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2429e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2430e9b61f19SKirill A. Shutemov 2431e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 243294723aafSMichal Hocko 243394723aafSMichal Hocko /* 243494723aafSMichal Hocko * always add to the tail because some iterators expect new 243594723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 243694723aafSMichal Hocko * migrate_pages 243794723aafSMichal Hocko */ 2438e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2439e9b61f19SKirill A. Shutemov } 2440e9b61f19SKirill A. Shutemov 2441baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2442*006d3ff2SHugh Dickins pgoff_t end, unsigned long flags) 2443e9b61f19SKirill A. Shutemov { 2444e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2445e9b61f19SKirill A. Shutemov struct zone *zone = page_zone(head); 2446e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 24478df651c7SKirill A. Shutemov int i; 2448e9b61f19SKirill A. Shutemov 2449599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2450e9b61f19SKirill A. Shutemov 2451e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2452e9b61f19SKirill A. Shutemov mem_cgroup_split_huge_fixup(head); 2453e9b61f19SKirill A. Shutemov 2454baa355fdSKirill A. Shutemov for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 24558df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2456baa355fdSKirill A. Shutemov /* Some pages can be beyond i_size: drop them from page cache */ 2457baa355fdSKirill A. Shutemov if (head[i].index >= end) { 24582d077d4bSHugh Dickins ClearPageDirty(head + i); 2459baa355fdSKirill A. Shutemov __delete_from_page_cache(head + i, NULL); 2460800d8c63SKirill A. Shutemov if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2461800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2462baa355fdSKirill A. Shutemov put_page(head + i); 2463baa355fdSKirill A. Shutemov } 2464baa355fdSKirill A. Shutemov } 2465e9b61f19SKirill A. Shutemov 2466e9b61f19SKirill A. Shutemov ClearPageCompound(head); 2467baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2468baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2469aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 247038d8b4e6SHuang Ying if (PageSwapCache(head)) 247138d8b4e6SHuang Ying page_ref_add(head, 2); 247238d8b4e6SHuang Ying else 2473baa355fdSKirill A. Shutemov page_ref_inc(head); 2474baa355fdSKirill A. Shutemov } else { 2475aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2476baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2477b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2478baa355fdSKirill A. Shutemov } 2479baa355fdSKirill A. Shutemov 2480a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2481e9b61f19SKirill A. Shutemov 2482906f9cdfSHugh Dickins remap_page(head); 2483e9b61f19SKirill A. Shutemov 2484e9b61f19SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2485e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2486e9b61f19SKirill A. Shutemov if (subpage == page) 2487e9b61f19SKirill A. Shutemov continue; 2488e9b61f19SKirill A. Shutemov unlock_page(subpage); 2489e9b61f19SKirill A. Shutemov 2490e9b61f19SKirill A. Shutemov /* 2491e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2492e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2493e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2494e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2495e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2496e9b61f19SKirill A. Shutemov */ 2497e9b61f19SKirill A. Shutemov put_page(subpage); 2498e9b61f19SKirill A. Shutemov } 2499e9b61f19SKirill A. Shutemov } 2500e9b61f19SKirill A. Shutemov 2501b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page) 2502b20ce5e0SKirill A. Shutemov { 2503dd78feddSKirill A. Shutemov int i, compound, ret; 2504b20ce5e0SKirill A. Shutemov 2505b20ce5e0SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2506b20ce5e0SKirill A. Shutemov 2507b20ce5e0SKirill A. Shutemov if (likely(!PageCompound(page))) 2508b20ce5e0SKirill A. Shutemov return atomic_read(&page->_mapcount) + 1; 2509b20ce5e0SKirill A. Shutemov 2510dd78feddSKirill A. Shutemov compound = compound_mapcount(page); 2511b20ce5e0SKirill A. Shutemov if (PageHuge(page)) 2512dd78feddSKirill A. Shutemov return compound; 2513dd78feddSKirill A. Shutemov ret = compound; 2514b20ce5e0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2515b20ce5e0SKirill A. Shutemov ret += atomic_read(&page[i]._mapcount) + 1; 2516dd78feddSKirill A. Shutemov /* File pages has compound_mapcount included in _mapcount */ 2517dd78feddSKirill A. Shutemov if (!PageAnon(page)) 2518dd78feddSKirill A. Shutemov return ret - compound * HPAGE_PMD_NR; 2519b20ce5e0SKirill A. Shutemov if (PageDoubleMap(page)) 2520b20ce5e0SKirill A. Shutemov ret -= HPAGE_PMD_NR; 2521b20ce5e0SKirill A. Shutemov return ret; 2522b20ce5e0SKirill A. Shutemov } 2523b20ce5e0SKirill A. Shutemov 2524e9b61f19SKirill A. Shutemov /* 25256d0a07edSAndrea Arcangeli * This calculates accurately how many mappings a transparent hugepage 25266d0a07edSAndrea Arcangeli * has (unlike page_mapcount() which isn't fully accurate). This full 25276d0a07edSAndrea Arcangeli * accuracy is primarily needed to know if copy-on-write faults can 25286d0a07edSAndrea Arcangeli * reuse the page and change the mapping to read-write instead of 25296d0a07edSAndrea Arcangeli * copying them. At the same time this returns the total_mapcount too. 25306d0a07edSAndrea Arcangeli * 25316d0a07edSAndrea Arcangeli * The function returns the highest mapcount any one of the subpages 25326d0a07edSAndrea Arcangeli * has. If the return value is one, even if different processes are 25336d0a07edSAndrea Arcangeli * mapping different subpages of the transparent hugepage, they can 25346d0a07edSAndrea Arcangeli * all reuse it, because each process is reusing a different subpage. 25356d0a07edSAndrea Arcangeli * 25366d0a07edSAndrea Arcangeli * The total_mapcount is instead counting all virtual mappings of the 25376d0a07edSAndrea Arcangeli * subpages. If the total_mapcount is equal to "one", it tells the 25386d0a07edSAndrea Arcangeli * caller all mappings belong to the same "mm" and in turn the 25396d0a07edSAndrea Arcangeli * anon_vma of the transparent hugepage can become the vma->anon_vma 25406d0a07edSAndrea Arcangeli * local one as no other process may be mapping any of the subpages. 25416d0a07edSAndrea Arcangeli * 25426d0a07edSAndrea Arcangeli * It would be more accurate to replace page_mapcount() with 25436d0a07edSAndrea Arcangeli * page_trans_huge_mapcount(), however we only use 25446d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() in the copy-on-write faults where we 25456d0a07edSAndrea Arcangeli * need full accuracy to avoid breaking page pinning, because 25466d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() is slower than page_mapcount(). 25476d0a07edSAndrea Arcangeli */ 25486d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 25496d0a07edSAndrea Arcangeli { 25506d0a07edSAndrea Arcangeli int i, ret, _total_mapcount, mapcount; 25516d0a07edSAndrea Arcangeli 25526d0a07edSAndrea Arcangeli /* hugetlbfs shouldn't call it */ 25536d0a07edSAndrea Arcangeli VM_BUG_ON_PAGE(PageHuge(page), page); 25546d0a07edSAndrea Arcangeli 25556d0a07edSAndrea Arcangeli if (likely(!PageTransCompound(page))) { 25566d0a07edSAndrea Arcangeli mapcount = atomic_read(&page->_mapcount) + 1; 25576d0a07edSAndrea Arcangeli if (total_mapcount) 25586d0a07edSAndrea Arcangeli *total_mapcount = mapcount; 25596d0a07edSAndrea Arcangeli return mapcount; 25606d0a07edSAndrea Arcangeli } 25616d0a07edSAndrea Arcangeli 25626d0a07edSAndrea Arcangeli page = compound_head(page); 25636d0a07edSAndrea Arcangeli 25646d0a07edSAndrea Arcangeli _total_mapcount = ret = 0; 25656d0a07edSAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 25666d0a07edSAndrea Arcangeli mapcount = atomic_read(&page[i]._mapcount) + 1; 25676d0a07edSAndrea Arcangeli ret = max(ret, mapcount); 25686d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25696d0a07edSAndrea Arcangeli } 25706d0a07edSAndrea Arcangeli if (PageDoubleMap(page)) { 25716d0a07edSAndrea Arcangeli ret -= 1; 25726d0a07edSAndrea Arcangeli _total_mapcount -= HPAGE_PMD_NR; 25736d0a07edSAndrea Arcangeli } 25746d0a07edSAndrea Arcangeli mapcount = compound_mapcount(page); 25756d0a07edSAndrea Arcangeli ret += mapcount; 25766d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25776d0a07edSAndrea Arcangeli if (total_mapcount) 25786d0a07edSAndrea Arcangeli *total_mapcount = _total_mapcount; 25796d0a07edSAndrea Arcangeli return ret; 25806d0a07edSAndrea Arcangeli } 25816d0a07edSAndrea Arcangeli 2582b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2583b8f593cdSHuang Ying bool can_split_huge_page(struct page *page, int *pextra_pins) 2584b8f593cdSHuang Ying { 2585b8f593cdSHuang Ying int extra_pins; 2586b8f593cdSHuang Ying 2587aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2588b8f593cdSHuang Ying if (PageAnon(page)) 2589b8f593cdSHuang Ying extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; 2590b8f593cdSHuang Ying else 2591b8f593cdSHuang Ying extra_pins = HPAGE_PMD_NR; 2592b8f593cdSHuang Ying if (pextra_pins) 2593b8f593cdSHuang Ying *pextra_pins = extra_pins; 2594b8f593cdSHuang Ying return total_mapcount(page) == page_count(page) - extra_pins - 1; 2595b8f593cdSHuang Ying } 2596b8f593cdSHuang Ying 25976d0a07edSAndrea Arcangeli /* 2598e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2599e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2600e9b61f19SKirill A. Shutemov * 2601e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2602e9b61f19SKirill A. Shutemov * The huge page must be locked. 2603e9b61f19SKirill A. Shutemov * 2604e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2605e9b61f19SKirill A. Shutemov * 2606e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2607e9b61f19SKirill A. Shutemov * the hugepage. 2608e9b61f19SKirill A. Shutemov * 2609e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2610e9b61f19SKirill A. Shutemov * they are not mapped. 2611e9b61f19SKirill A. Shutemov * 2612e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2613e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2614e9b61f19SKirill A. Shutemov * us. 2615e9b61f19SKirill A. Shutemov */ 2616e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2617e9b61f19SKirill A. Shutemov { 2618e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2619a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2620baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2621baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2622baa355fdSKirill A. Shutemov int count, mapcount, extra_pins, ret; 2623d9654322SKirill A. Shutemov bool mlocked; 26240b9b6fffSKirill A. Shutemov unsigned long flags; 2625*006d3ff2SHugh Dickins pgoff_t end; 2626e9b61f19SKirill A. Shutemov 2627e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2628e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 2629e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageCompound(page), page); 2630e9b61f19SKirill A. Shutemov 263159807685SHuang Ying if (PageWriteback(page)) 263259807685SHuang Ying return -EBUSY; 263359807685SHuang Ying 2634baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2635e9b61f19SKirill A. Shutemov /* 2636baa355fdSKirill A. Shutemov * The caller does not necessarily hold an mmap_sem that would 2637baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2638baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 2639baa355fdSKirill A. Shutemov * is similar to page_lock_anon_vma_read except the write lock 2640baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2641baa355fdSKirill A. Shutemov * operations. 2642e9b61f19SKirill A. Shutemov */ 2643e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2644e9b61f19SKirill A. Shutemov if (!anon_vma) { 2645e9b61f19SKirill A. Shutemov ret = -EBUSY; 2646e9b61f19SKirill A. Shutemov goto out; 2647e9b61f19SKirill A. Shutemov } 2648*006d3ff2SHugh Dickins end = -1; 2649baa355fdSKirill A. Shutemov mapping = NULL; 2650e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2651baa355fdSKirill A. Shutemov } else { 2652baa355fdSKirill A. Shutemov mapping = head->mapping; 2653baa355fdSKirill A. Shutemov 2654baa355fdSKirill A. Shutemov /* Truncated ? */ 2655baa355fdSKirill A. Shutemov if (!mapping) { 2656baa355fdSKirill A. Shutemov ret = -EBUSY; 2657baa355fdSKirill A. Shutemov goto out; 2658baa355fdSKirill A. Shutemov } 2659baa355fdSKirill A. Shutemov 2660baa355fdSKirill A. Shutemov anon_vma = NULL; 2661baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2662*006d3ff2SHugh Dickins 2663*006d3ff2SHugh Dickins /* 2664*006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2665*006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2666*006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2667*006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 2668*006d3ff2SHugh Dickins * head page lock is good enough to serialize the trimming. 2669*006d3ff2SHugh Dickins */ 2670*006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2671baa355fdSKirill A. Shutemov } 2672e9b61f19SKirill A. Shutemov 2673e9b61f19SKirill A. Shutemov /* 2674906f9cdfSHugh Dickins * Racy check if we can split the page, before unmap_page() will 2675e9b61f19SKirill A. Shutemov * split PMDs 2676e9b61f19SKirill A. Shutemov */ 2677b8f593cdSHuang Ying if (!can_split_huge_page(head, &extra_pins)) { 2678e9b61f19SKirill A. Shutemov ret = -EBUSY; 2679e9b61f19SKirill A. Shutemov goto out_unlock; 2680e9b61f19SKirill A. Shutemov } 2681e9b61f19SKirill A. Shutemov 2682d9654322SKirill A. Shutemov mlocked = PageMlocked(page); 2683906f9cdfSHugh Dickins unmap_page(head); 2684e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(compound_mapcount(head), head); 2685e9b61f19SKirill A. Shutemov 2686d9654322SKirill A. Shutemov /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2687d9654322SKirill A. Shutemov if (mlocked) 2688d9654322SKirill A. Shutemov lru_add_drain(); 2689d9654322SKirill A. Shutemov 2690baa355fdSKirill A. Shutemov /* prevent PageLRU to go away from under us, and freeze lru stats */ 2691a52633d8SMel Gorman spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2692baa355fdSKirill A. Shutemov 2693baa355fdSKirill A. Shutemov if (mapping) { 2694aa5dc07fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page_index(head)); 2695baa355fdSKirill A. Shutemov 2696baa355fdSKirill A. Shutemov /* 2697aa5dc07fSMatthew Wilcox * Check if the head page is present in page cache. 2698baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2699baa355fdSKirill A. Shutemov */ 2700aa5dc07fSMatthew Wilcox xa_lock(&mapping->i_pages); 2701aa5dc07fSMatthew Wilcox if (xas_load(&xas) != head) 2702baa355fdSKirill A. Shutemov goto fail; 2703baa355fdSKirill A. Shutemov } 2704baa355fdSKirill A. Shutemov 27050139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2706baa355fdSKirill A. Shutemov spin_lock(&pgdata->split_queue_lock); 2707e9b61f19SKirill A. Shutemov count = page_count(head); 2708e9b61f19SKirill A. Shutemov mapcount = total_mapcount(head); 2709baa355fdSKirill A. Shutemov if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 27109a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2711a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 27129a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 27139a982250SKirill A. Shutemov } 271465c45377SKirill A. Shutemov if (mapping) 271511fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM_THPS); 2716baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2717*006d3ff2SHugh Dickins __split_huge_page(page, list, end, flags); 271859807685SHuang Ying if (PageSwapCache(head)) { 271959807685SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 272059807685SHuang Ying 272159807685SHuang Ying ret = split_swap_cluster(entry); 272259807685SHuang Ying } else 2723e9b61f19SKirill A. Shutemov ret = 0; 2724baa355fdSKirill A. Shutemov } else { 2725baa355fdSKirill A. Shutemov if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2726e9b61f19SKirill A. Shutemov pr_alert("total_mapcount: %u, page_count(): %u\n", 2727e9b61f19SKirill A. Shutemov mapcount, count); 2728e9b61f19SKirill A. Shutemov if (PageTail(page)) 2729e9b61f19SKirill A. Shutemov dump_page(head, NULL); 2730bd56086fSKirill A. Shutemov dump_page(page, "total_mapcount(head) > 0"); 2731e9b61f19SKirill A. Shutemov BUG(); 2732baa355fdSKirill A. Shutemov } 2733baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2734baa355fdSKirill A. Shutemov fail: if (mapping) 2735b93b0163SMatthew Wilcox xa_unlock(&mapping->i_pages); 2736a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2737906f9cdfSHugh Dickins remap_page(head); 2738e9b61f19SKirill A. Shutemov ret = -EBUSY; 2739e9b61f19SKirill A. Shutemov } 2740e9b61f19SKirill A. Shutemov 2741e9b61f19SKirill A. Shutemov out_unlock: 2742baa355fdSKirill A. Shutemov if (anon_vma) { 2743e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2744e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2745baa355fdSKirill A. Shutemov } 2746baa355fdSKirill A. Shutemov if (mapping) 2747baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2748e9b61f19SKirill A. Shutemov out: 2749e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2750e9b61f19SKirill A. Shutemov return ret; 2751e9b61f19SKirill A. Shutemov } 27529a982250SKirill A. Shutemov 27539a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27549a982250SKirill A. Shutemov { 2755a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 27569a982250SKirill A. Shutemov unsigned long flags; 27579a982250SKirill A. Shutemov 2758a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 27599a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2760a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 27619a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 27629a982250SKirill A. Shutemov } 2763a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 27649a982250SKirill A. Shutemov free_compound_page(page); 27659a982250SKirill A. Shutemov } 27669a982250SKirill A. Shutemov 27679a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 27689a982250SKirill A. Shutemov { 2769a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 27709a982250SKirill A. Shutemov unsigned long flags; 27719a982250SKirill A. Shutemov 27729a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 27739a982250SKirill A. Shutemov 2774a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 27759a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2776f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2777a3d0a918SKirill A. Shutemov list_add_tail(page_deferred_list(page), &pgdata->split_queue); 2778a3d0a918SKirill A. Shutemov pgdata->split_queue_len++; 27799a982250SKirill A. Shutemov } 2780a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 27819a982250SKirill A. Shutemov } 27829a982250SKirill A. Shutemov 27839a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 27849a982250SKirill A. Shutemov struct shrink_control *sc) 27859a982250SKirill A. Shutemov { 2786a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 27876aa7de05SMark Rutland return READ_ONCE(pgdata->split_queue_len); 27889a982250SKirill A. Shutemov } 27899a982250SKirill A. Shutemov 27909a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 27919a982250SKirill A. Shutemov struct shrink_control *sc) 27929a982250SKirill A. Shutemov { 2793a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 27949a982250SKirill A. Shutemov unsigned long flags; 27959a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 27969a982250SKirill A. Shutemov struct page *page; 27979a982250SKirill A. Shutemov int split = 0; 27989a982250SKirill A. Shutemov 2799a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 28009a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2801ae026204SKirill A. Shutemov list_for_each_safe(pos, next, &pgdata->split_queue) { 28029a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 28039a982250SKirill A. Shutemov page = compound_head(page); 2804e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2805e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2806e3ae1953SKirill A. Shutemov } else { 2807e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 28089a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2809a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 28109a982250SKirill A. Shutemov } 2811e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2812e3ae1953SKirill A. Shutemov break; 28139a982250SKirill A. Shutemov } 2814a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 28159a982250SKirill A. Shutemov 28169a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 28179a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 2818fa41b900SKirill A. Shutemov if (!trylock_page(page)) 2819fa41b900SKirill A. Shutemov goto next; 28209a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 28219a982250SKirill A. Shutemov if (!split_huge_page(page)) 28229a982250SKirill A. Shutemov split++; 28239a982250SKirill A. Shutemov unlock_page(page); 2824fa41b900SKirill A. Shutemov next: 28259a982250SKirill A. Shutemov put_page(page); 28269a982250SKirill A. Shutemov } 28279a982250SKirill A. Shutemov 2828a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2829a3d0a918SKirill A. Shutemov list_splice_tail(&list, &pgdata->split_queue); 2830a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 28319a982250SKirill A. Shutemov 2832cb8d68ecSKirill A. Shutemov /* 2833cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2834cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2835cb8d68ecSKirill A. Shutemov */ 2836cb8d68ecSKirill A. Shutemov if (!split && list_empty(&pgdata->split_queue)) 2837cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2838cb8d68ecSKirill A. Shutemov return split; 28399a982250SKirill A. Shutemov } 28409a982250SKirill A. Shutemov 28419a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 28429a982250SKirill A. Shutemov .count_objects = deferred_split_count, 28439a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 28449a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 2845a3d0a918SKirill A. Shutemov .flags = SHRINKER_NUMA_AWARE, 28469a982250SKirill A. Shutemov }; 284749071d43SKirill A. Shutemov 284849071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 284949071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val) 285049071d43SKirill A. Shutemov { 285149071d43SKirill A. Shutemov struct zone *zone; 285249071d43SKirill A. Shutemov struct page *page; 285349071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 285449071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 285549071d43SKirill A. Shutemov 285649071d43SKirill A. Shutemov if (val != 1) 285749071d43SKirill A. Shutemov return -EINVAL; 285849071d43SKirill A. Shutemov 285949071d43SKirill A. Shutemov for_each_populated_zone(zone) { 286049071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 286149071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 286249071d43SKirill A. Shutemov if (!pfn_valid(pfn)) 286349071d43SKirill A. Shutemov continue; 286449071d43SKirill A. Shutemov 286549071d43SKirill A. Shutemov page = pfn_to_page(pfn); 286649071d43SKirill A. Shutemov if (!get_page_unless_zero(page)) 286749071d43SKirill A. Shutemov continue; 286849071d43SKirill A. Shutemov 286949071d43SKirill A. Shutemov if (zone != page_zone(page)) 287049071d43SKirill A. Shutemov goto next; 287149071d43SKirill A. Shutemov 2872baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 287349071d43SKirill A. Shutemov goto next; 287449071d43SKirill A. Shutemov 287549071d43SKirill A. Shutemov total++; 287649071d43SKirill A. Shutemov lock_page(page); 287749071d43SKirill A. Shutemov if (!split_huge_page(page)) 287849071d43SKirill A. Shutemov split++; 287949071d43SKirill A. Shutemov unlock_page(page); 288049071d43SKirill A. Shutemov next: 288149071d43SKirill A. Shutemov put_page(page); 288249071d43SKirill A. Shutemov } 288349071d43SKirill A. Shutemov } 288449071d43SKirill A. Shutemov 2885145bdaa1SYang Shi pr_info("%lu of %lu THP split\n", split, total); 288649071d43SKirill A. Shutemov 288749071d43SKirill A. Shutemov return 0; 288849071d43SKirill A. Shutemov } 288949071d43SKirill A. Shutemov DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 289049071d43SKirill A. Shutemov "%llu\n"); 289149071d43SKirill A. Shutemov 289249071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 289349071d43SKirill A. Shutemov { 289449071d43SKirill A. Shutemov void *ret; 289549071d43SKirill A. Shutemov 2896145bdaa1SYang Shi ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 289749071d43SKirill A. Shutemov &split_huge_pages_fops); 289849071d43SKirill A. Shutemov if (!ret) 289949071d43SKirill A. Shutemov pr_warn("Failed to create split_huge_pages in debugfs"); 290049071d43SKirill A. Shutemov return 0; 290149071d43SKirill A. Shutemov } 290249071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 290349071d43SKirill A. Shutemov #endif 2904616b8371SZi Yan 2905616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2906616b8371SZi Yan void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 2907616b8371SZi Yan struct page *page) 2908616b8371SZi Yan { 2909616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 2910616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 2911616b8371SZi Yan unsigned long address = pvmw->address; 2912616b8371SZi Yan pmd_t pmdval; 2913616b8371SZi Yan swp_entry_t entry; 2914ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 2915616b8371SZi Yan 2916616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 2917616b8371SZi Yan return; 2918616b8371SZi Yan 2919616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 2920616b8371SZi Yan pmdval = *pvmw->pmd; 2921616b8371SZi Yan pmdp_invalidate(vma, address, pvmw->pmd); 2922616b8371SZi Yan if (pmd_dirty(pmdval)) 2923616b8371SZi Yan set_page_dirty(page); 2924616b8371SZi Yan entry = make_migration_entry(page, pmd_write(pmdval)); 2925ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 2926ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 2927ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 2928ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 2929616b8371SZi Yan page_remove_rmap(page, true); 2930616b8371SZi Yan put_page(page); 2931616b8371SZi Yan } 2932616b8371SZi Yan 2933616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 2934616b8371SZi Yan { 2935616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 2936616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 2937616b8371SZi Yan unsigned long address = pvmw->address; 2938616b8371SZi Yan unsigned long mmun_start = address & HPAGE_PMD_MASK; 2939616b8371SZi Yan pmd_t pmde; 2940616b8371SZi Yan swp_entry_t entry; 2941616b8371SZi Yan 2942616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 2943616b8371SZi Yan return; 2944616b8371SZi Yan 2945616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 2946616b8371SZi Yan get_page(new); 2947616b8371SZi Yan pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); 2948ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 2949ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 2950616b8371SZi Yan if (is_write_migration_entry(entry)) 2951f55e1014SLinus Torvalds pmde = maybe_pmd_mkwrite(pmde, vma); 2952616b8371SZi Yan 2953616b8371SZi Yan flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2954e71769aeSNaoya Horiguchi if (PageAnon(new)) 2955616b8371SZi Yan page_add_anon_rmap(new, vma, mmun_start, true); 2956e71769aeSNaoya Horiguchi else 2957e71769aeSNaoya Horiguchi page_add_file_rmap(new, true); 2958616b8371SZi Yan set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2959e125fe40SKirill A. Shutemov if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) 2960616b8371SZi Yan mlock_vma_page(new); 2961616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 2962616b8371SZi Yan } 2963616b8371SZi Yan #endif 2964