120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 116a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1271e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1571e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1671e3aac0SAndrea Arcangeli #include <linux/swap.h> 1797ae1749SKirill A. Shutemov #include <linux/shrinker.h> 18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 19e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 204897c765SMatthew Wilcox #include <linux/dax.h> 21ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 22878aee7dSAndrea Arcangeli #include <linux/freezer.h> 23f25748e3SDan Williams #include <linux/pfn_t.h> 24a664b2d8SAndrea Arcangeli #include <linux/mman.h> 253565fce3SDan Williams #include <linux/memremap.h> 26325adeb5SRalf Baechle #include <linux/pagemap.h> 2749071d43SKirill A. Shutemov #include <linux/debugfs.h> 284daae3b4SMel Gorman #include <linux/migrate.h> 2943b5fbbdSSasha Levin #include <linux/hashtable.h> 306b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3133c3fc71SVladimir Davydov #include <linux/page_idle.h> 32baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 336b31d595SMichal Hocko #include <linux/oom.h> 3498fa15f3SAnshuman Khandual #include <linux/numa.h> 35f7da677bSVlastimil Babka #include <linux/page_owner.h> 3697ae1749SKirill A. Shutemov 3771e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3871e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 3971e3aac0SAndrea Arcangeli #include "internal.h" 4071e3aac0SAndrea Arcangeli 41ba76149fSAndrea Arcangeli /* 42b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 43b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 44b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 45b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 468bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 478bfa3f9aSJianguo Wu * for all hugepage allocations. 48ba76149fSAndrea Arcangeli */ 4971e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 51ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 5213ece886SAndrea Arcangeli #endif 5313ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 5413ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 5513ece886SAndrea Arcangeli #endif 56444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 5779da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5879da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 59ba76149fSAndrea Arcangeli 609a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 61f000565aSAndrea Arcangeli 6297ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 6356873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 644a6c1297SKirill A. Shutemov 657635d9cbSMichal Hocko bool transparent_hugepage_enabled(struct vm_area_struct *vma) 667635d9cbSMichal Hocko { 67c0630669SYang Shi /* The addr is used to check if the vma size fits */ 68c0630669SYang Shi unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; 69c0630669SYang Shi 70c0630669SYang Shi if (!transhuge_vma_suitable(vma, addr)) 71c0630669SYang Shi return false; 727635d9cbSMichal Hocko if (vma_is_anonymous(vma)) 737635d9cbSMichal Hocko return __transparent_hugepage_enabled(vma); 74c0630669SYang Shi if (vma_is_shmem(vma)) 75c0630669SYang Shi return shmem_huge_enabled(vma); 767635d9cbSMichal Hocko 777635d9cbSMichal Hocko return false; 787635d9cbSMichal Hocko } 797635d9cbSMichal Hocko 806fcb52a5SAaron Lu static struct page *get_huge_zero_page(void) 8197ae1749SKirill A. Shutemov { 8297ae1749SKirill A. Shutemov struct page *zero_page; 8397ae1749SKirill A. Shutemov retry: 8497ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 854db0c3c2SJason Low return READ_ONCE(huge_zero_page); 8697ae1749SKirill A. Shutemov 8797ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 8897ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 89d8a8e1f0SKirill A. Shutemov if (!zero_page) { 90d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 915918d10aSKirill A. Shutemov return NULL; 92d8a8e1f0SKirill A. Shutemov } 93d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 9497ae1749SKirill A. Shutemov preempt_disable(); 955918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 9697ae1749SKirill A. Shutemov preempt_enable(); 975ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 9897ae1749SKirill A. Shutemov goto retry; 9997ae1749SKirill A. Shutemov } 10097ae1749SKirill A. Shutemov 10197ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 10297ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 10397ae1749SKirill A. Shutemov preempt_enable(); 1044db0c3c2SJason Low return READ_ONCE(huge_zero_page); 10597ae1749SKirill A. Shutemov } 10697ae1749SKirill A. Shutemov 1076fcb52a5SAaron Lu static void put_huge_zero_page(void) 10897ae1749SKirill A. Shutemov { 10997ae1749SKirill A. Shutemov /* 11097ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 11197ae1749SKirill A. Shutemov * last reference. 11297ae1749SKirill A. Shutemov */ 11397ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 11497ae1749SKirill A. Shutemov } 11597ae1749SKirill A. Shutemov 1166fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1176fcb52a5SAaron Lu { 1186fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1196fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1206fcb52a5SAaron Lu 1216fcb52a5SAaron Lu if (!get_huge_zero_page()) 1226fcb52a5SAaron Lu return NULL; 1236fcb52a5SAaron Lu 1246fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1256fcb52a5SAaron Lu put_huge_zero_page(); 1266fcb52a5SAaron Lu 1276fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1286fcb52a5SAaron Lu } 1296fcb52a5SAaron Lu 1306fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 1316fcb52a5SAaron Lu { 1326fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1336fcb52a5SAaron Lu put_huge_zero_page(); 1346fcb52a5SAaron Lu } 1356fcb52a5SAaron Lu 13648896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 13797ae1749SKirill A. Shutemov struct shrink_control *sc) 13897ae1749SKirill A. Shutemov { 13997ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 14097ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 14148896466SGlauber Costa } 14297ae1749SKirill A. Shutemov 14348896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 14448896466SGlauber Costa struct shrink_control *sc) 14548896466SGlauber Costa { 14697ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 1475918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 1485918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 1495ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 15048896466SGlauber Costa return HPAGE_PMD_NR; 15197ae1749SKirill A. Shutemov } 15297ae1749SKirill A. Shutemov 15397ae1749SKirill A. Shutemov return 0; 15497ae1749SKirill A. Shutemov } 15597ae1749SKirill A. Shutemov 15697ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 15748896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 15848896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 15997ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 16097ae1749SKirill A. Shutemov }; 16197ae1749SKirill A. Shutemov 16271e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 16371e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 16471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 16571e3aac0SAndrea Arcangeli { 166444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 167444eb2a4SMel Gorman return sprintf(buf, "[always] madvise never\n"); 168444eb2a4SMel Gorman else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 169444eb2a4SMel Gorman return sprintf(buf, "always [madvise] never\n"); 170444eb2a4SMel Gorman else 171444eb2a4SMel Gorman return sprintf(buf, "always madvise [never]\n"); 17271e3aac0SAndrea Arcangeli } 173444eb2a4SMel Gorman 17471e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 17571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 17671e3aac0SAndrea Arcangeli const char *buf, size_t count) 17771e3aac0SAndrea Arcangeli { 17821440d7eSDavid Rientjes ssize_t ret = count; 179ba76149fSAndrea Arcangeli 180f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 18121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 18221440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 183f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 18421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 18521440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 186f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 18721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 18821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 18921440d7eSDavid Rientjes } else 19021440d7eSDavid Rientjes ret = -EINVAL; 191ba76149fSAndrea Arcangeli 192ba76149fSAndrea Arcangeli if (ret > 0) { 193b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 194ba76149fSAndrea Arcangeli if (err) 195ba76149fSAndrea Arcangeli ret = err; 196ba76149fSAndrea Arcangeli } 197ba76149fSAndrea Arcangeli return ret; 19871e3aac0SAndrea Arcangeli } 19971e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 20071e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 20171e3aac0SAndrea Arcangeli 202b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 20371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 20471e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 20571e3aac0SAndrea Arcangeli { 206e27e6151SBen Hutchings return sprintf(buf, "%d\n", 207e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 20871e3aac0SAndrea Arcangeli } 209e27e6151SBen Hutchings 210b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 21171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 21271e3aac0SAndrea Arcangeli const char *buf, size_t count, 21371e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 21471e3aac0SAndrea Arcangeli { 215e27e6151SBen Hutchings unsigned long value; 216e27e6151SBen Hutchings int ret; 217e27e6151SBen Hutchings 218e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 219e27e6151SBen Hutchings if (ret < 0) 220e27e6151SBen Hutchings return ret; 221e27e6151SBen Hutchings if (value > 1) 22271e3aac0SAndrea Arcangeli return -EINVAL; 22371e3aac0SAndrea Arcangeli 224e27e6151SBen Hutchings if (value) 225e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 226e27e6151SBen Hutchings else 227e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 228e27e6151SBen Hutchings 22971e3aac0SAndrea Arcangeli return count; 23071e3aac0SAndrea Arcangeli } 23171e3aac0SAndrea Arcangeli 23271e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 23371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 23471e3aac0SAndrea Arcangeli { 235444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 23621440d7eSDavid Rientjes return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 237444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 23821440d7eSDavid Rientjes return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 23921440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 24021440d7eSDavid Rientjes return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 24121440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 24221440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 24321440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 24471e3aac0SAndrea Arcangeli } 24521440d7eSDavid Rientjes 24671e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 24771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 24871e3aac0SAndrea Arcangeli const char *buf, size_t count) 24971e3aac0SAndrea Arcangeli { 250f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 25121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 25221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 25321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 25421440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 255f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 25621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 25721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 25821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 25921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 260f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 2614fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 2624fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 2634fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 2644fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 265f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 26621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 26721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 26821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 26921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 270f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 27121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 27221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 27321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 27421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 27521440d7eSDavid Rientjes } else 27621440d7eSDavid Rientjes return -EINVAL; 27721440d7eSDavid Rientjes 27821440d7eSDavid Rientjes return count; 27971e3aac0SAndrea Arcangeli } 28071e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 28171e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 28271e3aac0SAndrea Arcangeli 28379da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 28479da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 28579da5407SKirill A. Shutemov { 286b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 28779da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 28879da5407SKirill A. Shutemov } 28979da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 29079da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 29179da5407SKirill A. Shutemov { 292b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 29379da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 29479da5407SKirill A. Shutemov } 29579da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 29679da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 29749920d28SHugh Dickins 29849920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 29949920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 30049920d28SHugh Dickins { 30149920d28SHugh Dickins return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 30249920d28SHugh Dickins } 30349920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 30449920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 30549920d28SHugh Dickins 30671e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 30771e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 30871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30971e3aac0SAndrea Arcangeli { 310b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 31171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 31271e3aac0SAndrea Arcangeli } 31371e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 31471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 31571e3aac0SAndrea Arcangeli const char *buf, size_t count) 31671e3aac0SAndrea Arcangeli { 317b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 31871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 31971e3aac0SAndrea Arcangeli } 32071e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 32171e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 32271e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 32371e3aac0SAndrea Arcangeli 32471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 32571e3aac0SAndrea Arcangeli &enabled_attr.attr, 32671e3aac0SAndrea Arcangeli &defrag_attr.attr, 32779da5407SKirill A. Shutemov &use_zero_page_attr.attr, 32849920d28SHugh Dickins &hpage_pmd_size_attr.attr, 329396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 3305a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 3315a6e75f8SKirill A. Shutemov #endif 33271e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 33371e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 33471e3aac0SAndrea Arcangeli #endif 33571e3aac0SAndrea Arcangeli NULL, 33671e3aac0SAndrea Arcangeli }; 33771e3aac0SAndrea Arcangeli 3388aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 33971e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 340ba76149fSAndrea Arcangeli }; 341ba76149fSAndrea Arcangeli 342569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 343569e5590SShaohua Li { 344569e5590SShaohua Li int err; 345569e5590SShaohua Li 346569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 347569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 348ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 349569e5590SShaohua Li return -ENOMEM; 350569e5590SShaohua Li } 351569e5590SShaohua Li 352569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 353569e5590SShaohua Li if (err) { 354ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 355569e5590SShaohua Li goto delete_obj; 356569e5590SShaohua Li } 357569e5590SShaohua Li 358569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 359569e5590SShaohua Li if (err) { 360ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 361569e5590SShaohua Li goto remove_hp_group; 362569e5590SShaohua Li } 363569e5590SShaohua Li 364569e5590SShaohua Li return 0; 365569e5590SShaohua Li 366569e5590SShaohua Li remove_hp_group: 367569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 368569e5590SShaohua Li delete_obj: 369569e5590SShaohua Li kobject_put(*hugepage_kobj); 370569e5590SShaohua Li return err; 371569e5590SShaohua Li } 372569e5590SShaohua Li 373569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 374569e5590SShaohua Li { 375569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 376569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 377569e5590SShaohua Li kobject_put(hugepage_kobj); 378569e5590SShaohua Li } 379569e5590SShaohua Li #else 380569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 381569e5590SShaohua Li { 382569e5590SShaohua Li return 0; 383569e5590SShaohua Li } 384569e5590SShaohua Li 385569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 386569e5590SShaohua Li { 387569e5590SShaohua Li } 38871e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 38971e3aac0SAndrea Arcangeli 39071e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 39171e3aac0SAndrea Arcangeli { 39271e3aac0SAndrea Arcangeli int err; 393569e5590SShaohua Li struct kobject *hugepage_kobj; 39471e3aac0SAndrea Arcangeli 3954b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 3964b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 397569e5590SShaohua Li return -EINVAL; 3984b7167b9SAndrea Arcangeli } 3994b7167b9SAndrea Arcangeli 400ff20c2e0SKirill A. Shutemov /* 401ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 402ff20c2e0SKirill A. Shutemov */ 403ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 404ff20c2e0SKirill A. Shutemov /* 405ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 406ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 407ff20c2e0SKirill A. Shutemov */ 408ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 409ff20c2e0SKirill A. Shutemov 410569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 411569e5590SShaohua Li if (err) 41265ebb64fSKirill A. Shutemov goto err_sysfs; 413ba76149fSAndrea Arcangeli 414b46e756fSKirill A. Shutemov err = khugepaged_init(); 415ba76149fSAndrea Arcangeli if (err) 41665ebb64fSKirill A. Shutemov goto err_slab; 417ba76149fSAndrea Arcangeli 41865ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 41965ebb64fSKirill A. Shutemov if (err) 42065ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 4219a982250SKirill A. Shutemov err = register_shrinker(&deferred_split_shrinker); 4229a982250SKirill A. Shutemov if (err) 4239a982250SKirill A. Shutemov goto err_split_shrinker; 42497ae1749SKirill A. Shutemov 42597562cd2SRik van Riel /* 42697562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 42797562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 42897562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 42997562cd2SRik van Riel */ 430ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 43197562cd2SRik van Riel transparent_hugepage_flags = 0; 43279553da2SKirill A. Shutemov return 0; 43379553da2SKirill A. Shutemov } 43497562cd2SRik van Riel 43579553da2SKirill A. Shutemov err = start_stop_khugepaged(); 43665ebb64fSKirill A. Shutemov if (err) 43765ebb64fSKirill A. Shutemov goto err_khugepaged; 438ba76149fSAndrea Arcangeli 439569e5590SShaohua Li return 0; 44065ebb64fSKirill A. Shutemov err_khugepaged: 4419a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 4429a982250SKirill A. Shutemov err_split_shrinker: 44365ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 44465ebb64fSKirill A. Shutemov err_hzp_shrinker: 445b46e756fSKirill A. Shutemov khugepaged_destroy(); 44665ebb64fSKirill A. Shutemov err_slab: 447569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 44865ebb64fSKirill A. Shutemov err_sysfs: 449ba76149fSAndrea Arcangeli return err; 45071e3aac0SAndrea Arcangeli } 451a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 45271e3aac0SAndrea Arcangeli 45371e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 45471e3aac0SAndrea Arcangeli { 45571e3aac0SAndrea Arcangeli int ret = 0; 45671e3aac0SAndrea Arcangeli if (!str) 45771e3aac0SAndrea Arcangeli goto out; 45871e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 45971e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 46071e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 46271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46371e3aac0SAndrea Arcangeli ret = 1; 46471e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 46571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 46671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46771e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 46871e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46971e3aac0SAndrea Arcangeli ret = 1; 47071e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 47171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 47271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47371e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 47471e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47571e3aac0SAndrea Arcangeli ret = 1; 47671e3aac0SAndrea Arcangeli } 47771e3aac0SAndrea Arcangeli out: 47871e3aac0SAndrea Arcangeli if (!ret) 479ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 48071e3aac0SAndrea Arcangeli return ret; 48171e3aac0SAndrea Arcangeli } 48271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 48371e3aac0SAndrea Arcangeli 484f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 48571e3aac0SAndrea Arcangeli { 486f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 48771e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 48871e3aac0SAndrea Arcangeli return pmd; 48971e3aac0SAndrea Arcangeli } 49071e3aac0SAndrea Arcangeli 49187eaceb3SYang Shi #ifdef CONFIG_MEMCG 49287eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 4939a982250SKirill A. Shutemov { 49487eaceb3SYang Shi struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; 49587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 49687eaceb3SYang Shi 49787eaceb3SYang Shi if (memcg) 49887eaceb3SYang Shi return &memcg->deferred_split_queue; 49987eaceb3SYang Shi else 50087eaceb3SYang Shi return &pgdat->deferred_split_queue; 5019a982250SKirill A. Shutemov } 50287eaceb3SYang Shi #else 50387eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 50487eaceb3SYang Shi { 50587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 50687eaceb3SYang Shi 50787eaceb3SYang Shi return &pgdat->deferred_split_queue; 50887eaceb3SYang Shi } 50987eaceb3SYang Shi #endif 5109a982250SKirill A. Shutemov 5119a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5129a982250SKirill A. Shutemov { 5139a982250SKirill A. Shutemov /* 5149a982250SKirill A. Shutemov * we use page->mapping and page->indexlru in second tail page 5159a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 5169a982250SKirill A. Shutemov */ 5179a982250SKirill A. Shutemov 5189a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 5199a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 5209a982250SKirill A. Shutemov } 5219a982250SKirill A. Shutemov 522005ba37cSSean Christopherson bool is_transparent_hugepage(struct page *page) 523005ba37cSSean Christopherson { 524005ba37cSSean Christopherson if (!PageCompound(page)) 525fa1f68ccSZou Wei return false; 526005ba37cSSean Christopherson 527005ba37cSSean Christopherson page = compound_head(page); 528005ba37cSSean Christopherson return is_huge_zero_page(page) || 529005ba37cSSean Christopherson page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; 530005ba37cSSean Christopherson } 531005ba37cSSean Christopherson EXPORT_SYMBOL_GPL(is_transparent_hugepage); 532005ba37cSSean Christopherson 53397d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 53497d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 53574d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 53674d2fad1SToshi Kani { 53774d2fad1SToshi Kani loff_t off_end = off + len; 53874d2fad1SToshi Kani loff_t off_align = round_up(off, size); 53997d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 54074d2fad1SToshi Kani 54174d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 54274d2fad1SToshi Kani return 0; 54374d2fad1SToshi Kani 54474d2fad1SToshi Kani len_pad = len + size; 54574d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 54674d2fad1SToshi Kani return 0; 54774d2fad1SToshi Kani 54897d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 54974d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 55097d3d0f9SKirill A. Shutemov 55197d3d0f9SKirill A. Shutemov /* 55297d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 55397d3d0f9SKirill A. Shutemov * without the padding. 55497d3d0f9SKirill A. Shutemov */ 55597d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 55674d2fad1SToshi Kani return 0; 55774d2fad1SToshi Kani 55897d3d0f9SKirill A. Shutemov /* 55997d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 56097d3d0f9SKirill A. Shutemov * hint succeeds. 56197d3d0f9SKirill A. Shutemov */ 56297d3d0f9SKirill A. Shutemov if (ret == addr) 56374d2fad1SToshi Kani return addr; 56497d3d0f9SKirill A. Shutemov 56597d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 56697d3d0f9SKirill A. Shutemov return ret; 56774d2fad1SToshi Kani } 56874d2fad1SToshi Kani 56974d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 57074d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 57174d2fad1SToshi Kani { 57297d3d0f9SKirill A. Shutemov unsigned long ret; 57374d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 57474d2fad1SToshi Kani 57574d2fad1SToshi Kani if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 57674d2fad1SToshi Kani goto out; 57774d2fad1SToshi Kani 57897d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 57997d3d0f9SKirill A. Shutemov if (ret) 58097d3d0f9SKirill A. Shutemov return ret; 58174d2fad1SToshi Kani out: 58274d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 58374d2fad1SToshi Kani } 58474d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 58574d2fad1SToshi Kani 5862b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 5872b740303SSouptick Joarder struct page *page, gfp_t gfp) 58871e3aac0SAndrea Arcangeli { 58982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 59071e3aac0SAndrea Arcangeli pgtable_t pgtable; 59182b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 5922b740303SSouptick Joarder vm_fault_t ret = 0; 59371e3aac0SAndrea Arcangeli 594309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 59500501b53SJohannes Weiner 596d9eb1ea2SJohannes Weiner if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { 5976b251fc9SAndrea Arcangeli put_page(page); 5986b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 59985b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6006b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6016b251fc9SAndrea Arcangeli } 6029d82c694SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 60371e3aac0SAndrea Arcangeli 6044cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 60500501b53SJohannes Weiner if (unlikely(!pgtable)) { 6066b31d595SMichal Hocko ret = VM_FAULT_OOM; 6076b31d595SMichal Hocko goto release; 60800501b53SJohannes Weiner } 60900501b53SJohannes Weiner 610c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 61152f37629SMinchan Kim /* 61252f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 61352f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 61452f37629SMinchan Kim * write. 61552f37629SMinchan Kim */ 61671e3aac0SAndrea Arcangeli __SetPageUptodate(page); 61771e3aac0SAndrea Arcangeli 61882b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 61982b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6206b31d595SMichal Hocko goto unlock_release; 62171e3aac0SAndrea Arcangeli } else { 62271e3aac0SAndrea Arcangeli pmd_t entry; 6236b251fc9SAndrea Arcangeli 6246b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6256b31d595SMichal Hocko if (ret) 6266b31d595SMichal Hocko goto unlock_release; 6276b31d595SMichal Hocko 6286b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6296b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 6302b740303SSouptick Joarder vm_fault_t ret2; 6316b251fc9SAndrea Arcangeli 63282b0f8c3SJan Kara spin_unlock(vmf->ptl); 6336b251fc9SAndrea Arcangeli put_page(page); 634bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6352b740303SSouptick Joarder ret2 = handle_userfault(vmf, VM_UFFD_MISSING); 6362b740303SSouptick Joarder VM_BUG_ON(ret2 & VM_FAULT_FALLBACK); 6372b740303SSouptick Joarder return ret2; 6386b251fc9SAndrea Arcangeli } 6396b251fc9SAndrea Arcangeli 6403122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 641f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 642d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr, true); 64300501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 64482b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 64582b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 646bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 647c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 64882b0f8c3SJan Kara spin_unlock(vmf->ptl); 6496b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 6509d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 65171e3aac0SAndrea Arcangeli } 65271e3aac0SAndrea Arcangeli 653aa2e878eSDavid Rientjes return 0; 6546b31d595SMichal Hocko unlock_release: 6556b31d595SMichal Hocko spin_unlock(vmf->ptl); 6566b31d595SMichal Hocko release: 6576b31d595SMichal Hocko if (pgtable) 6586b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 6596b31d595SMichal Hocko put_page(page); 6606b31d595SMichal Hocko return ret; 6616b31d595SMichal Hocko 66271e3aac0SAndrea Arcangeli } 66371e3aac0SAndrea Arcangeli 664444eb2a4SMel Gorman /* 66521440d7eSDavid Rientjes * always: directly stall for all thp allocations 66621440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 66721440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 66821440d7eSDavid Rientjes * fail if not immediately available 66921440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 67021440d7eSDavid Rientjes * available 67121440d7eSDavid Rientjes * never: never stall for any thp allocation 672444eb2a4SMel Gorman */ 67319deb769SDavid Rientjes static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 6740bbbc0b3SAndrea Arcangeli { 67521440d7eSDavid Rientjes const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 67689c83fb5SMichal Hocko 677ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 67821440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 679a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 680ac79f78dSDavid Rientjes 681ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 68221440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 68319deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 684ac79f78dSDavid Rientjes 685ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 68621440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 68719deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 68819deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 689ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 690ac79f78dSDavid Rientjes 691ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 69221440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 69319deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 69419deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 695ac79f78dSDavid Rientjes 69619deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 697444eb2a4SMel Gorman } 698444eb2a4SMel Gorman 699c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 700d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 70197ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7025918d10aSKirill A. Shutemov struct page *zero_page) 703fc9fe822SKirill A. Shutemov { 704fc9fe822SKirill A. Shutemov pmd_t entry; 7057c414164SAndrew Morton if (!pmd_none(*pmd)) 7067c414164SAndrew Morton return false; 7075918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 708fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 70912c9d70bSMatthew Wilcox if (pgtable) 7106b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 711fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 712c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 7137c414164SAndrew Morton return true; 714fc9fe822SKirill A. Shutemov } 715fc9fe822SKirill A. Shutemov 7162b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 71771e3aac0SAndrea Arcangeli { 71882b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 719077fcf11SAneesh Kumar K.V gfp_t gfp; 72071e3aac0SAndrea Arcangeli struct page *page; 72182b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 72271e3aac0SAndrea Arcangeli 72343675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 724c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 72571e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 72671e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7276d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 728ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 72982b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 730bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 73179da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 73280371957SKirill A. Shutemov pgtable_t pgtable; 7335918d10aSKirill A. Shutemov struct page *zero_page; 7343ea41e62SKirill A. Shutemov bool set; 7352b740303SSouptick Joarder vm_fault_t ret; 7364cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 73780371957SKirill A. Shutemov if (unlikely(!pgtable)) 73880371957SKirill A. Shutemov return VM_FAULT_OOM; 7396fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 7405918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 741bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 74297ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 743c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 74497ae1749SKirill A. Shutemov } 74582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 7466b251fc9SAndrea Arcangeli ret = 0; 7476b251fc9SAndrea Arcangeli set = false; 74882b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 7496b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 7506b31d595SMichal Hocko if (ret) { 7516b31d595SMichal Hocko spin_unlock(vmf->ptl); 7526b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 75382b0f8c3SJan Kara spin_unlock(vmf->ptl); 75482b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 7556b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7566b251fc9SAndrea Arcangeli } else { 757bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 75882b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 75982b0f8c3SJan Kara spin_unlock(vmf->ptl); 7606b251fc9SAndrea Arcangeli set = true; 7616b251fc9SAndrea Arcangeli } 7626b251fc9SAndrea Arcangeli } else 76382b0f8c3SJan Kara spin_unlock(vmf->ptl); 7646fcb52a5SAaron Lu if (!set) 765bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 7666b251fc9SAndrea Arcangeli return ret; 76780371957SKirill A. Shutemov } 76819deb769SDavid Rientjes gfp = alloc_hugepage_direct_gfpmask(vma); 76919deb769SDavid Rientjes page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 77081ab4201SAndi Kleen if (unlikely(!page)) { 77181ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 772c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 77381ab4201SAndi Kleen } 7749a982250SKirill A. Shutemov prep_transhuge_page(page); 77582b0f8c3SJan Kara return __do_huge_pmd_anonymous_page(vmf, page, gfp); 77671e3aac0SAndrea Arcangeli } 77771e3aac0SAndrea Arcangeli 778ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 7793b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 7803b6521f5SOliver O'Halloran pgtable_t pgtable) 7815cad465dSMatthew Wilcox { 7825cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 7835cad465dSMatthew Wilcox pmd_t entry; 7845cad465dSMatthew Wilcox spinlock_t *ptl; 7855cad465dSMatthew Wilcox 7865cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 787c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 788c6f3c5eeSAneesh Kumar K.V if (write) { 789c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 790c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 791c6f3c5eeSAneesh Kumar K.V goto out_unlock; 792c6f3c5eeSAneesh Kumar K.V } 793c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 794c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 795c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 796c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 797c6f3c5eeSAneesh Kumar K.V } 798c6f3c5eeSAneesh Kumar K.V 799c6f3c5eeSAneesh Kumar K.V goto out_unlock; 800c6f3c5eeSAneesh Kumar K.V } 801c6f3c5eeSAneesh Kumar K.V 802f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 803f25748e3SDan Williams if (pfn_t_devmap(pfn)) 804f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8055cad465dSMatthew Wilcox if (write) { 806f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 807f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8085cad465dSMatthew Wilcox } 8093b6521f5SOliver O'Halloran 8103b6521f5SOliver O'Halloran if (pgtable) { 8113b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 812c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 813c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8143b6521f5SOliver O'Halloran } 8153b6521f5SOliver O'Halloran 8165cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8175cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 818c6f3c5eeSAneesh Kumar K.V 819c6f3c5eeSAneesh Kumar K.V out_unlock: 8205cad465dSMatthew Wilcox spin_unlock(ptl); 821c6f3c5eeSAneesh Kumar K.V if (pgtable) 822c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8235cad465dSMatthew Wilcox } 8245cad465dSMatthew Wilcox 8259a9731b1SThomas Hellstrom (VMware) /** 8269a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 8279a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8289a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8299a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 8309a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8319a9731b1SThomas Hellstrom (VMware) * 8329a9731b1SThomas Hellstrom (VMware) * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 8339a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 8349a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 8359a9731b1SThomas Hellstrom (VMware) * 8369a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 8379a9731b1SThomas Hellstrom (VMware) */ 8389a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 8399a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 8405cad465dSMatthew Wilcox { 841fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 842fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 8433b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 844fce86ff5SDan Williams 8455cad465dSMatthew Wilcox /* 8465cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 8475cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 8485cad465dSMatthew Wilcox * can't support a 'special' bit. 8495cad465dSMatthew Wilcox */ 850e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 851e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 8525cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 8535cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 8545cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 8555cad465dSMatthew Wilcox 8565cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 8575cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 858308a047cSBorislav Petkov 8593b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 8604cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 8613b6521f5SOliver O'Halloran if (!pgtable) 8623b6521f5SOliver O'Halloran return VM_FAULT_OOM; 8633b6521f5SOliver O'Halloran } 8643b6521f5SOliver O'Halloran 865308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 866308a047cSBorislav Petkov 867fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 868ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 8695cad465dSMatthew Wilcox } 8709a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 8715cad465dSMatthew Wilcox 872a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 873f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 874a00cc7d9SMatthew Wilcox { 875f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 876a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 877a00cc7d9SMatthew Wilcox return pud; 878a00cc7d9SMatthew Wilcox } 879a00cc7d9SMatthew Wilcox 880a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 881a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 882a00cc7d9SMatthew Wilcox { 883a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 884a00cc7d9SMatthew Wilcox pud_t entry; 885a00cc7d9SMatthew Wilcox spinlock_t *ptl; 886a00cc7d9SMatthew Wilcox 887a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 888c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 889c6f3c5eeSAneesh Kumar K.V if (write) { 890c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 891c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 892c6f3c5eeSAneesh Kumar K.V goto out_unlock; 893c6f3c5eeSAneesh Kumar K.V } 894c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 895c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 896c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 897c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 898c6f3c5eeSAneesh Kumar K.V } 899c6f3c5eeSAneesh Kumar K.V goto out_unlock; 900c6f3c5eeSAneesh Kumar K.V } 901c6f3c5eeSAneesh Kumar K.V 902a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 903a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 904a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 905a00cc7d9SMatthew Wilcox if (write) { 906f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 907f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 908a00cc7d9SMatthew Wilcox } 909a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 910a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 911c6f3c5eeSAneesh Kumar K.V 912c6f3c5eeSAneesh Kumar K.V out_unlock: 913a00cc7d9SMatthew Wilcox spin_unlock(ptl); 914a00cc7d9SMatthew Wilcox } 915a00cc7d9SMatthew Wilcox 9169a9731b1SThomas Hellstrom (VMware) /** 9179a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pud_prot - insert a pud size pfn 9189a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9199a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9209a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 9219a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9229a9731b1SThomas Hellstrom (VMware) * 9239a9731b1SThomas Hellstrom (VMware) * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 9249a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 9259a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9269a9731b1SThomas Hellstrom (VMware) * 9279a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9289a9731b1SThomas Hellstrom (VMware) */ 9299a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 9309a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 931a00cc7d9SMatthew Wilcox { 932fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 933fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 934fce86ff5SDan Williams 935a00cc7d9SMatthew Wilcox /* 936a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 937a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 938a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 939a00cc7d9SMatthew Wilcox */ 94062ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 94162ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 942a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 943a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 944a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 945a00cc7d9SMatthew Wilcox 946a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 947a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 948a00cc7d9SMatthew Wilcox 949a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 950a00cc7d9SMatthew Wilcox 951fce86ff5SDan Williams insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 952a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 953a00cc7d9SMatthew Wilcox } 9549a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 955a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 956a00cc7d9SMatthew Wilcox 9573565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 958a8f97366SKirill A. Shutemov pmd_t *pmd, int flags) 9593565fce3SDan Williams { 9603565fce3SDan Williams pmd_t _pmd; 9613565fce3SDan Williams 962a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 963a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 964a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 9653565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 966a8f97366SKirill A. Shutemov pmd, _pmd, flags & FOLL_WRITE)) 9673565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 9683565fce3SDan Williams } 9693565fce3SDan Williams 9703565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 971df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 9723565fce3SDan Williams { 9733565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 9743565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 9753565fce3SDan Williams struct page *page; 9763565fce3SDan Williams 9773565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 9783565fce3SDan Williams 9798310d48bSKeno Fischer /* 9808310d48bSKeno Fischer * When we COW a devmap PMD entry, we split it into PTEs, so we should 9818310d48bSKeno Fischer * not be in this function with `flags & FOLL_COW` set. 9828310d48bSKeno Fischer */ 9838310d48bSKeno Fischer WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 9848310d48bSKeno Fischer 9853faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 9863faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 9873faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 9883faa52c0SJohn Hubbard return NULL; 9893faa52c0SJohn Hubbard 990f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 9913565fce3SDan Williams return NULL; 9923565fce3SDan Williams 9933565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 9943565fce3SDan Williams /* pass */; 9953565fce3SDan Williams else 9963565fce3SDan Williams return NULL; 9973565fce3SDan Williams 9983565fce3SDan Williams if (flags & FOLL_TOUCH) 999a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 10003565fce3SDan Williams 10013565fce3SDan Williams /* 10023565fce3SDan Williams * device mapped pages can only be returned if the 10033565fce3SDan Williams * caller will manage the page reference count. 10043565fce3SDan Williams */ 10053faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10063565fce3SDan Williams return ERR_PTR(-EEXIST); 10073565fce3SDan Williams 10083565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1009df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1010df06b37fSKeith Busch if (!*pgmap) 10113565fce3SDan Williams return ERR_PTR(-EFAULT); 10123565fce3SDan Williams page = pfn_to_page(pfn); 10133faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 10143faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 10153565fce3SDan Williams 10163565fce3SDan Williams return page; 10173565fce3SDan Williams } 10183565fce3SDan Williams 101971e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 102071e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 102171e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 102271e3aac0SAndrea Arcangeli { 1023c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 102471e3aac0SAndrea Arcangeli struct page *src_page; 102571e3aac0SAndrea Arcangeli pmd_t pmd; 102612c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1027628d47ceSKirill A. Shutemov int ret = -ENOMEM; 102871e3aac0SAndrea Arcangeli 1029628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 1030628d47ceSKirill A. Shutemov if (!vma_is_anonymous(vma)) 1031628d47ceSKirill A. Shutemov return 0; 1032628d47ceSKirill A. Shutemov 10334cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 103471e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 103571e3aac0SAndrea Arcangeli goto out; 103671e3aac0SAndrea Arcangeli 1037c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1038c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1039c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 104071e3aac0SAndrea Arcangeli 104171e3aac0SAndrea Arcangeli ret = -EAGAIN; 104271e3aac0SAndrea Arcangeli pmd = *src_pmd; 104384c3fc4eSZi Yan 1044b569a176SPeter Xu /* 1045b569a176SPeter Xu * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA 1046b569a176SPeter Xu * does not have the VM_UFFD_WP, which means that the uffd 1047b569a176SPeter Xu * fork event is not enabled. 1048b569a176SPeter Xu */ 1049b569a176SPeter Xu if (!(vma->vm_flags & VM_UFFD_WP)) 1050b569a176SPeter Xu pmd = pmd_clear_uffd_wp(pmd); 1051b569a176SPeter Xu 105284c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 105384c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 105484c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 105584c3fc4eSZi Yan 105684c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 105784c3fc4eSZi Yan if (is_write_migration_entry(entry)) { 105884c3fc4eSZi Yan make_migration_entry_read(&entry); 105984c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1060ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1061ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 106284c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 106384c3fc4eSZi Yan } 1064dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1065af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1066dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 106784c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 106884c3fc4eSZi Yan ret = 0; 106984c3fc4eSZi Yan goto out_unlock; 107084c3fc4eSZi Yan } 107184c3fc4eSZi Yan #endif 107284c3fc4eSZi Yan 1073628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 107471e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 107571e3aac0SAndrea Arcangeli goto out_unlock; 107671e3aac0SAndrea Arcangeli } 1077fc9fe822SKirill A. Shutemov /* 1078c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1079fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1080fc9fe822SKirill A. Shutemov * a page table. 1081fc9fe822SKirill A. Shutemov */ 1082fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 10835918d10aSKirill A. Shutemov struct page *zero_page; 108497ae1749SKirill A. Shutemov /* 108597ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 108697ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 108797ae1749SKirill A. Shutemov * reference. 108897ae1749SKirill A. Shutemov */ 10896fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(dst_mm); 10906b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 10915918d10aSKirill A. Shutemov zero_page); 1092fc9fe822SKirill A. Shutemov ret = 0; 1093fc9fe822SKirill A. Shutemov goto out_unlock; 1094fc9fe822SKirill A. Shutemov } 1095de466bd6SMel Gorman 109671e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1097309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 109871e3aac0SAndrea Arcangeli get_page(src_page); 109953f9263bSKirill A. Shutemov page_dup_rmap(src_page, true); 110071e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1101c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11025c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 110371e3aac0SAndrea Arcangeli 110471e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 110571e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 110671e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 110771e3aac0SAndrea Arcangeli 110871e3aac0SAndrea Arcangeli ret = 0; 110971e3aac0SAndrea Arcangeli out_unlock: 1110c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1111c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 111271e3aac0SAndrea Arcangeli out: 111371e3aac0SAndrea Arcangeli return ret; 111471e3aac0SAndrea Arcangeli } 111571e3aac0SAndrea Arcangeli 1116a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1117a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1118a8f97366SKirill A. Shutemov pud_t *pud, int flags) 1119a00cc7d9SMatthew Wilcox { 1120a00cc7d9SMatthew Wilcox pud_t _pud; 1121a00cc7d9SMatthew Wilcox 1122a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 1123a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 1124a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1125a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1126a8f97366SKirill A. Shutemov pud, _pud, flags & FOLL_WRITE)) 1127a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1128a00cc7d9SMatthew Wilcox } 1129a00cc7d9SMatthew Wilcox 1130a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1131df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1132a00cc7d9SMatthew Wilcox { 1133a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1134a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1135a00cc7d9SMatthew Wilcox struct page *page; 1136a00cc7d9SMatthew Wilcox 1137a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1138a00cc7d9SMatthew Wilcox 1139f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1140a00cc7d9SMatthew Wilcox return NULL; 1141a00cc7d9SMatthew Wilcox 11423faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 11433faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 11443faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 11453faa52c0SJohn Hubbard return NULL; 11463faa52c0SJohn Hubbard 1147a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1148a00cc7d9SMatthew Wilcox /* pass */; 1149a00cc7d9SMatthew Wilcox else 1150a00cc7d9SMatthew Wilcox return NULL; 1151a00cc7d9SMatthew Wilcox 1152a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 1153a8f97366SKirill A. Shutemov touch_pud(vma, addr, pud, flags); 1154a00cc7d9SMatthew Wilcox 1155a00cc7d9SMatthew Wilcox /* 1156a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1157a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 11583faa52c0SJohn Hubbard * 11593faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1160a00cc7d9SMatthew Wilcox */ 11613faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1162a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1163a00cc7d9SMatthew Wilcox 1164a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1165df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1166df06b37fSKeith Busch if (!*pgmap) 1167a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1168a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 11693faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 11703faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 1171a00cc7d9SMatthew Wilcox 1172a00cc7d9SMatthew Wilcox return page; 1173a00cc7d9SMatthew Wilcox } 1174a00cc7d9SMatthew Wilcox 1175a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1176a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1177a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1178a00cc7d9SMatthew Wilcox { 1179a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1180a00cc7d9SMatthew Wilcox pud_t pud; 1181a00cc7d9SMatthew Wilcox int ret; 1182a00cc7d9SMatthew Wilcox 1183a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1184a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1185a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1186a00cc7d9SMatthew Wilcox 1187a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1188a00cc7d9SMatthew Wilcox pud = *src_pud; 1189a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1190a00cc7d9SMatthew Wilcox goto out_unlock; 1191a00cc7d9SMatthew Wilcox 1192a00cc7d9SMatthew Wilcox /* 1193a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1194a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1195a00cc7d9SMatthew Wilcox * a page table. 1196a00cc7d9SMatthew Wilcox */ 1197a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1198a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1199a00cc7d9SMatthew Wilcox } 1200a00cc7d9SMatthew Wilcox 1201a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1202a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1203a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1204a00cc7d9SMatthew Wilcox 1205a00cc7d9SMatthew Wilcox ret = 0; 1206a00cc7d9SMatthew Wilcox out_unlock: 1207a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1208a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1209a00cc7d9SMatthew Wilcox return ret; 1210a00cc7d9SMatthew Wilcox } 1211a00cc7d9SMatthew Wilcox 1212a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1213a00cc7d9SMatthew Wilcox { 1214a00cc7d9SMatthew Wilcox pud_t entry; 1215a00cc7d9SMatthew Wilcox unsigned long haddr; 1216a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1217a00cc7d9SMatthew Wilcox 1218a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1219a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1220a00cc7d9SMatthew Wilcox goto unlock; 1221a00cc7d9SMatthew Wilcox 1222a00cc7d9SMatthew Wilcox entry = pud_mkyoung(orig_pud); 1223a00cc7d9SMatthew Wilcox if (write) 1224a00cc7d9SMatthew Wilcox entry = pud_mkdirty(entry); 1225a00cc7d9SMatthew Wilcox haddr = vmf->address & HPAGE_PUD_MASK; 1226a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1227a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1228a00cc7d9SMatthew Wilcox 1229a00cc7d9SMatthew Wilcox unlock: 1230a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1231a00cc7d9SMatthew Wilcox } 1232a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1233a00cc7d9SMatthew Wilcox 123482b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 1235a1dd450bSWill Deacon { 1236a1dd450bSWill Deacon pmd_t entry; 1237a1dd450bSWill Deacon unsigned long haddr; 123820f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1239a1dd450bSWill Deacon 124082b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 124182b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1242a1dd450bSWill Deacon goto unlock; 1243a1dd450bSWill Deacon 1244a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 124520f664aaSMinchan Kim if (write) 124620f664aaSMinchan Kim entry = pmd_mkdirty(entry); 124782b0f8c3SJan Kara haddr = vmf->address & HPAGE_PMD_MASK; 124820f664aaSMinchan Kim if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 124982b0f8c3SJan Kara update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1250a1dd450bSWill Deacon 1251a1dd450bSWill Deacon unlock: 125282b0f8c3SJan Kara spin_unlock(vmf->ptl); 1253a1dd450bSWill Deacon } 1254a1dd450bSWill Deacon 12552b740303SSouptick Joarder vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 125671e3aac0SAndrea Arcangeli { 125782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 12583917c802SKirill A. Shutemov struct page *page; 125982b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 126071e3aac0SAndrea Arcangeli 126182b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 126281d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 12633917c802SKirill A. Shutemov 126493b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 12653917c802SKirill A. Shutemov goto fallback; 12663917c802SKirill A. Shutemov 126782b0f8c3SJan Kara spin_lock(vmf->ptl); 12683917c802SKirill A. Shutemov 12693917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 12703917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 12713917c802SKirill A. Shutemov return 0; 12723917c802SKirill A. Shutemov } 127371e3aac0SAndrea Arcangeli 127471e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1275309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 12763917c802SKirill A. Shutemov 12773917c802SKirill A. Shutemov /* Lock page for reuse_swap_page() */ 1278ba3c4ce6SHuang Ying if (!trylock_page(page)) { 1279ba3c4ce6SHuang Ying get_page(page); 1280ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 1281ba3c4ce6SHuang Ying lock_page(page); 1282ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1283ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 12843917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1285ba3c4ce6SHuang Ying unlock_page(page); 1286ba3c4ce6SHuang Ying put_page(page); 12873917c802SKirill A. Shutemov return 0; 1288ba3c4ce6SHuang Ying } 1289ba3c4ce6SHuang Ying put_page(page); 1290ba3c4ce6SHuang Ying } 12913917c802SKirill A. Shutemov 12923917c802SKirill A. Shutemov /* 12933917c802SKirill A. Shutemov * We can only reuse the page if nobody else maps the huge page or it's 12943917c802SKirill A. Shutemov * part. 12953917c802SKirill A. Shutemov */ 1296ba3c4ce6SHuang Ying if (reuse_swap_page(page, NULL)) { 129771e3aac0SAndrea Arcangeli pmd_t entry; 129871e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1299f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 130082b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 130182b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1302ba3c4ce6SHuang Ying unlock_page(page); 13033917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13043917c802SKirill A. Shutemov return VM_FAULT_WRITE; 130571e3aac0SAndrea Arcangeli } 13063917c802SKirill A. Shutemov 1307ba3c4ce6SHuang Ying unlock_page(page); 130882b0f8c3SJan Kara spin_unlock(vmf->ptl); 13093917c802SKirill A. Shutemov fallback: 13103917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13113917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 131271e3aac0SAndrea Arcangeli } 131371e3aac0SAndrea Arcangeli 13148310d48bSKeno Fischer /* 131517839856SLinus Torvalds * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, 131617839856SLinus Torvalds * but only after we've gone through a COW cycle and they are dirty. 13178310d48bSKeno Fischer */ 13188310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 13198310d48bSKeno Fischer { 132017839856SLinus Torvalds return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); 13218310d48bSKeno Fischer } 13228310d48bSKeno Fischer 1323b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 132471e3aac0SAndrea Arcangeli unsigned long addr, 132571e3aac0SAndrea Arcangeli pmd_t *pmd, 132671e3aac0SAndrea Arcangeli unsigned int flags) 132771e3aac0SAndrea Arcangeli { 1328b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 132971e3aac0SAndrea Arcangeli struct page *page = NULL; 133071e3aac0SAndrea Arcangeli 1331c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 133271e3aac0SAndrea Arcangeli 13338310d48bSKeno Fischer if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 133471e3aac0SAndrea Arcangeli goto out; 133571e3aac0SAndrea Arcangeli 133685facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 133785facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 133885facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 133985facf25SKirill A. Shutemov 13402b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 13418a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 13422b4847e7SMel Gorman goto out; 13432b4847e7SMel Gorman 134471e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1345ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 13463faa52c0SJohn Hubbard 13473faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 13483faa52c0SJohn Hubbard return ERR_PTR(-ENOMEM); 13493faa52c0SJohn Hubbard 13503565fce3SDan Williams if (flags & FOLL_TOUCH) 1351a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 13523faa52c0SJohn Hubbard 1353de60f5f1SEric B Munson if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1354e90309c9SKirill A. Shutemov /* 1355e90309c9SKirill A. Shutemov * We don't mlock() pte-mapped THPs. This way we can avoid 1356e90309c9SKirill A. Shutemov * leaking mlocked pages into non-VM_LOCKED VMAs. 1357e90309c9SKirill A. Shutemov * 13589a73f61bSKirill A. Shutemov * For anon THP: 13599a73f61bSKirill A. Shutemov * 1360e90309c9SKirill A. Shutemov * In most cases the pmd is the only mapping of the page as we 1361e90309c9SKirill A. Shutemov * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1362e90309c9SKirill A. Shutemov * writable private mappings in populate_vma_page_range(). 1363e90309c9SKirill A. Shutemov * 1364e90309c9SKirill A. Shutemov * The only scenario when we have the page shared here is if we 1365e90309c9SKirill A. Shutemov * mlocking read-only mapping shared over fork(). We skip 1366e90309c9SKirill A. Shutemov * mlocking such pages. 13679a73f61bSKirill A. Shutemov * 13689a73f61bSKirill A. Shutemov * For file THP: 13699a73f61bSKirill A. Shutemov * 13709a73f61bSKirill A. Shutemov * We can expect PageDoubleMap() to be stable under page lock: 13719a73f61bSKirill A. Shutemov * for file pages we set it in page_add_file_rmap(), which 13729a73f61bSKirill A. Shutemov * requires page to be locked. 1373e90309c9SKirill A. Shutemov */ 13749a73f61bSKirill A. Shutemov 13759a73f61bSKirill A. Shutemov if (PageAnon(page) && compound_mapcount(page) != 1) 13769a73f61bSKirill A. Shutemov goto skip_mlock; 13779a73f61bSKirill A. Shutemov if (PageDoubleMap(page) || !page->mapping) 13789a73f61bSKirill A. Shutemov goto skip_mlock; 13799a73f61bSKirill A. Shutemov if (!trylock_page(page)) 13809a73f61bSKirill A. Shutemov goto skip_mlock; 13819a73f61bSKirill A. Shutemov if (page->mapping && !PageDoubleMap(page)) 1382b676b293SDavid Rientjes mlock_vma_page(page); 1383b676b293SDavid Rientjes unlock_page(page); 1384b676b293SDavid Rientjes } 13859a73f61bSKirill A. Shutemov skip_mlock: 138671e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1387ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 138871e3aac0SAndrea Arcangeli 138971e3aac0SAndrea Arcangeli out: 139071e3aac0SAndrea Arcangeli return page; 139171e3aac0SAndrea Arcangeli } 139271e3aac0SAndrea Arcangeli 1393d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 13942b740303SSouptick Joarder vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1395d10e63f2SMel Gorman { 139682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1397b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1398b32967ffSMel Gorman struct page *page; 139982b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 140098fa15f3SAnshuman Khandual int page_nid = NUMA_NO_NODE, this_nid = numa_node_id(); 140190572890SPeter Zijlstra int target_nid, last_cpupid = -1; 14028191acbdSMel Gorman bool page_locked; 14038191acbdSMel Gorman bool migrated = false; 1404b191f9b1SMel Gorman bool was_writable; 14056688cc05SPeter Zijlstra int flags = 0; 1406d10e63f2SMel Gorman 140782b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 140882b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1409d10e63f2SMel Gorman goto out_unlock; 1410d10e63f2SMel Gorman 1411de466bd6SMel Gorman /* 1412de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1413de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1414de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1415de466bd6SMel Gorman */ 141682b0f8c3SJan Kara if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 141782b0f8c3SJan Kara page = pmd_page(*vmf->pmd); 14183c226c63SMark Rutland if (!get_page_unless_zero(page)) 14193c226c63SMark Rutland goto out_unlock; 142082b0f8c3SJan Kara spin_unlock(vmf->ptl); 14219a1ea439SHugh Dickins put_and_wait_on_page_locked(page); 1422de466bd6SMel Gorman goto out; 1423de466bd6SMel Gorman } 1424de466bd6SMel Gorman 1425d10e63f2SMel Gorman page = pmd_page(pmd); 1426a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 14278191acbdSMel Gorman page_nid = page_to_nid(page); 142890572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 142903c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 143004bb2f94SRik van Riel if (page_nid == this_nid) { 143103c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 143204bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 143304bb2f94SRik van Riel } 14344daae3b4SMel Gorman 1435bea66fbdSMel Gorman /* See similar comment in do_numa_page for explanation */ 1436288bc549SAneesh Kumar K.V if (!pmd_savedwrite(pmd)) 14376688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 14386688cc05SPeter Zijlstra 14396688cc05SPeter Zijlstra /* 1440ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1441ff9042b1SMel Gorman * page_table_lock if at all possible 1442ff9042b1SMel Gorman */ 1443b8916634SMel Gorman page_locked = trylock_page(page); 1444b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 144598fa15f3SAnshuman Khandual if (target_nid == NUMA_NO_NODE) { 1446b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1447a54a407fSMel Gorman if (page_locked) 1448b8916634SMel Gorman goto clear_pmdnuma; 14492b4847e7SMel Gorman } 1450cbee9f88SPeter Zijlstra 1451de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 14522b4847e7SMel Gorman if (!page_locked) { 145398fa15f3SAnshuman Khandual page_nid = NUMA_NO_NODE; 14543c226c63SMark Rutland if (!get_page_unless_zero(page)) 14553c226c63SMark Rutland goto out_unlock; 145682b0f8c3SJan Kara spin_unlock(vmf->ptl); 14579a1ea439SHugh Dickins put_and_wait_on_page_locked(page); 1458b8916634SMel Gorman goto out; 1459b8916634SMel Gorman } 1460b8916634SMel Gorman 14612b4847e7SMel Gorman /* 14622b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 14632b4847e7SMel Gorman * to serialises splits 14642b4847e7SMel Gorman */ 1465b8916634SMel Gorman get_page(page); 146682b0f8c3SJan Kara spin_unlock(vmf->ptl); 1467b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1468b32967ffSMel Gorman 1469c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 147082b0f8c3SJan Kara spin_lock(vmf->ptl); 147182b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1472b32967ffSMel Gorman unlock_page(page); 1473b32967ffSMel Gorman put_page(page); 147498fa15f3SAnshuman Khandual page_nid = NUMA_NO_NODE; 1475b32967ffSMel Gorman goto out_unlock; 1476b32967ffSMel Gorman } 1477ff9042b1SMel Gorman 1478c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1479c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1480c3a489caSMel Gorman put_page(page); 148198fa15f3SAnshuman Khandual page_nid = NUMA_NO_NODE; 1482c3a489caSMel Gorman goto clear_pmdnuma; 1483c3a489caSMel Gorman } 1484c3a489caSMel Gorman 1485a54a407fSMel Gorman /* 14868b1b436dSPeter Zijlstra * Since we took the NUMA fault, we must have observed the !accessible 14878b1b436dSPeter Zijlstra * bit. Make sure all other CPUs agree with that, to avoid them 14888b1b436dSPeter Zijlstra * modifying the page we're about to migrate. 14898b1b436dSPeter Zijlstra * 14908b1b436dSPeter Zijlstra * Must be done under PTL such that we'll observe the relevant 1491ccde85baSPeter Zijlstra * inc_tlb_flush_pending(). 1492ccde85baSPeter Zijlstra * 1493ccde85baSPeter Zijlstra * We are not sure a pending tlb flush here is for a huge page 1494ccde85baSPeter Zijlstra * mapping or not. Hence use the tlb range variant 14958b1b436dSPeter Zijlstra */ 14967066f0f9SAndrea Arcangeli if (mm_tlb_flush_pending(vma->vm_mm)) { 1497ccde85baSPeter Zijlstra flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 14987066f0f9SAndrea Arcangeli /* 14997066f0f9SAndrea Arcangeli * change_huge_pmd() released the pmd lock before 15007066f0f9SAndrea Arcangeli * invalidating the secondary MMUs sharing the primary 15017066f0f9SAndrea Arcangeli * MMU pagetables (with ->invalidate_range()). The 15027066f0f9SAndrea Arcangeli * mmu_notifier_invalidate_range_end() (which 15037066f0f9SAndrea Arcangeli * internally calls ->invalidate_range()) in 15047066f0f9SAndrea Arcangeli * change_pmd_range() will run after us, so we can't 15057066f0f9SAndrea Arcangeli * rely on it here and we need an explicit invalidate. 15067066f0f9SAndrea Arcangeli */ 15077066f0f9SAndrea Arcangeli mmu_notifier_invalidate_range(vma->vm_mm, haddr, 15087066f0f9SAndrea Arcangeli haddr + HPAGE_PMD_SIZE); 15097066f0f9SAndrea Arcangeli } 15108b1b436dSPeter Zijlstra 15118b1b436dSPeter Zijlstra /* 1512a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 15138a0516edSMel Gorman * and access rights restored. 1514a54a407fSMel Gorman */ 151582b0f8c3SJan Kara spin_unlock(vmf->ptl); 15168b1b436dSPeter Zijlstra 1517bae473a4SKirill A. Shutemov migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 151882b0f8c3SJan Kara vmf->pmd, pmd, vmf->address, page, target_nid); 15196688cc05SPeter Zijlstra if (migrated) { 15206688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 15218191acbdSMel Gorman page_nid = target_nid; 1522074c2381SMel Gorman } else 1523074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1524b32967ffSMel Gorman 15258191acbdSMel Gorman goto out; 15264daae3b4SMel Gorman clear_pmdnuma: 1527a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1528288bc549SAneesh Kumar K.V was_writable = pmd_savedwrite(pmd); 15294d942466SMel Gorman pmd = pmd_modify(pmd, vma->vm_page_prot); 1530b7b04004SMel Gorman pmd = pmd_mkyoung(pmd); 1531b191f9b1SMel Gorman if (was_writable) 1532b191f9b1SMel Gorman pmd = pmd_mkwrite(pmd); 153382b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 153482b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1535a54a407fSMel Gorman unlock_page(page); 1536d10e63f2SMel Gorman out_unlock: 153782b0f8c3SJan Kara spin_unlock(vmf->ptl); 1538b8916634SMel Gorman 1539b8916634SMel Gorman out: 1540b8916634SMel Gorman if (anon_vma) 1541b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1542b8916634SMel Gorman 154398fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 154482b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15459a8b300fSAneesh Kumar K.V flags); 15468191acbdSMel Gorman 1547d10e63f2SMel Gorman return 0; 1548d10e63f2SMel Gorman } 1549d10e63f2SMel Gorman 1550319904adSHuang Ying /* 1551319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1552319904adSHuang Ying * Otherwise, return false. 1553319904adSHuang Ying */ 1554319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1555b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1556b8d3c4c3SMinchan Kim { 1557b8d3c4c3SMinchan Kim spinlock_t *ptl; 1558b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1559b8d3c4c3SMinchan Kim struct page *page; 1560b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1561319904adSHuang Ying bool ret = false; 1562b8d3c4c3SMinchan Kim 1563ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 156407e32661SAneesh Kumar K.V 1565b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1566b6ec57f4SKirill A. Shutemov if (!ptl) 156725eedabeSLinus Torvalds goto out_unlocked; 1568b8d3c4c3SMinchan Kim 1569b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1570319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1571b8d3c4c3SMinchan Kim goto out; 1572b8d3c4c3SMinchan Kim 157384c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 157484c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 157584c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 157684c3fc4eSZi Yan goto out; 157784c3fc4eSZi Yan } 157884c3fc4eSZi Yan 1579b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1580b8d3c4c3SMinchan Kim /* 1581b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1582b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1583b8d3c4c3SMinchan Kim */ 1584b8d3c4c3SMinchan Kim if (page_mapcount(page) != 1) 1585b8d3c4c3SMinchan Kim goto out; 1586b8d3c4c3SMinchan Kim 1587b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1588b8d3c4c3SMinchan Kim goto out; 1589b8d3c4c3SMinchan Kim 1590b8d3c4c3SMinchan Kim /* 1591b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1592b8d3c4c3SMinchan Kim * will deactivate only them. 1593b8d3c4c3SMinchan Kim */ 1594b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1595b8d3c4c3SMinchan Kim get_page(page); 1596b8d3c4c3SMinchan Kim spin_unlock(ptl); 15979818b8cdSHuang Ying split_huge_page(page); 1598b8d3c4c3SMinchan Kim unlock_page(page); 1599bbf29ffcSKirill A. Shutemov put_page(page); 1600b8d3c4c3SMinchan Kim goto out_unlocked; 1601b8d3c4c3SMinchan Kim } 1602b8d3c4c3SMinchan Kim 1603b8d3c4c3SMinchan Kim if (PageDirty(page)) 1604b8d3c4c3SMinchan Kim ClearPageDirty(page); 1605b8d3c4c3SMinchan Kim unlock_page(page); 1606b8d3c4c3SMinchan Kim 1607b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 160858ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1609b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1610b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1611b8d3c4c3SMinchan Kim 1612b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1613b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1614b8d3c4c3SMinchan Kim } 1615802a3a92SShaohua Li 1616802a3a92SShaohua Li mark_page_lazyfree(page); 1617319904adSHuang Ying ret = true; 1618b8d3c4c3SMinchan Kim out: 1619b8d3c4c3SMinchan Kim spin_unlock(ptl); 1620b8d3c4c3SMinchan Kim out_unlocked: 1621b8d3c4c3SMinchan Kim return ret; 1622b8d3c4c3SMinchan Kim } 1623b8d3c4c3SMinchan Kim 1624953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1625953c66c2SAneesh Kumar K.V { 1626953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1627953c66c2SAneesh Kumar K.V 1628953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1629953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1630c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1631953c66c2SAneesh Kumar K.V } 1632953c66c2SAneesh Kumar K.V 163371e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1634f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 163571e3aac0SAndrea Arcangeli { 1636f5c8ad47SDavid Miller pmd_t orig_pmd; 1637da146769SKirill A. Shutemov spinlock_t *ptl; 1638da146769SKirill A. Shutemov 1639ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 164007e32661SAneesh Kumar K.V 1641b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1642b6ec57f4SKirill A. Shutemov if (!ptl) 1643da146769SKirill A. Shutemov return 0; 1644a6bf2bb0SAneesh Kumar K.V /* 1645a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16468809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1647a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1648a6bf2bb0SAneesh Kumar K.V * operations. 1649a6bf2bb0SAneesh Kumar K.V */ 165093a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1651fcbe08d6SMartin Schwidefsky tlb->fullmm); 1652f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16532484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16543b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16553b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16564897c765SMatthew Wilcox spin_unlock(ptl); 1657da146769SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 1658c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1659da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1660c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1661bf929152SKirill A. Shutemov spin_unlock(ptl); 1662c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1663479f0abbSKirill A. Shutemov } else { 1664616b8371SZi Yan struct page *page = NULL; 1665616b8371SZi Yan int flush_needed = 1; 1666616b8371SZi Yan 1667616b8371SZi Yan if (pmd_present(orig_pmd)) { 1668616b8371SZi Yan page = pmd_page(orig_pmd); 1669d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 1670309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1671309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1672616b8371SZi Yan } else if (thp_migration_supported()) { 1673616b8371SZi Yan swp_entry_t entry; 1674616b8371SZi Yan 1675616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1676616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1677616b8371SZi Yan page = pfn_to_page(swp_offset(entry)); 1678616b8371SZi Yan flush_needed = 0; 1679616b8371SZi Yan } else 1680616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1681616b8371SZi Yan 1682b5072380SKirill A. Shutemov if (PageAnon(page)) { 1683c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1684b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1685b5072380SKirill A. Shutemov } else { 1686953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1687953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1688fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1689b5072380SKirill A. Shutemov } 1690616b8371SZi Yan 1691bf929152SKirill A. Shutemov spin_unlock(ptl); 1692616b8371SZi Yan if (flush_needed) 1693e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1694479f0abbSKirill A. Shutemov } 1695da146769SKirill A. Shutemov return 1; 169671e3aac0SAndrea Arcangeli } 169771e3aac0SAndrea Arcangeli 16981dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 16991dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 17001dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 17011dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 17021dd38b6cSAneesh Kumar K.V { 17031dd38b6cSAneesh Kumar K.V /* 17041dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17051dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17061dd38b6cSAneesh Kumar K.V * 17071dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17081dd38b6cSAneesh Kumar K.V */ 17091dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17101dd38b6cSAneesh Kumar K.V } 17111dd38b6cSAneesh Kumar K.V #endif 17121dd38b6cSAneesh Kumar K.V 1713ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1714ab6e3d09SNaoya Horiguchi { 1715ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1716ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1717ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1718ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1719ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1720ab6e3d09SNaoya Horiguchi #endif 1721ab6e3d09SNaoya Horiguchi return pmd; 1722ab6e3d09SNaoya Horiguchi } 1723ab6e3d09SNaoya Horiguchi 1724bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 172537a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 1726eb66ae03SLinus Torvalds pmd_t *old_pmd, pmd_t *new_pmd) 172737a1c49aSAndrea Arcangeli { 1728bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 172937a1c49aSAndrea Arcangeli pmd_t pmd; 173037a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17315d190420SAaron Lu bool force_flush = false; 173237a1c49aSAndrea Arcangeli 173337a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 173437a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 1735bf8616d5SHugh Dickins old_end - old_addr < HPAGE_PMD_SIZE) 17364b471e88SKirill A. Shutemov return false; 173737a1c49aSAndrea Arcangeli 173837a1c49aSAndrea Arcangeli /* 173937a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 174037a1c49aSAndrea Arcangeli * should have release it. 174137a1c49aSAndrea Arcangeli */ 174237a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 174337a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17444b471e88SKirill A. Shutemov return false; 174537a1c49aSAndrea Arcangeli } 174637a1c49aSAndrea Arcangeli 1747bf929152SKirill A. Shutemov /* 1748bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1749*c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1750bf929152SKirill A. Shutemov */ 1751b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1752b6ec57f4SKirill A. Shutemov if (old_ptl) { 1753bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1754bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1755bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17568809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1757eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1758a2ce2666SAaron Lu force_flush = true; 175937a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17603592806cSKirill A. Shutemov 17611dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1762b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17633592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17643592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17653592806cSKirill A. Shutemov } 1766ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1767ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17685d190420SAaron Lu if (force_flush) 17695d190420SAaron Lu flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1770eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1771eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1772bf929152SKirill A. Shutemov spin_unlock(old_ptl); 17734b471e88SKirill A. Shutemov return true; 177437a1c49aSAndrea Arcangeli } 17754b471e88SKirill A. Shutemov return false; 177637a1c49aSAndrea Arcangeli } 177737a1c49aSAndrea Arcangeli 1778f123d74aSMel Gorman /* 1779f123d74aSMel Gorman * Returns 1780f123d74aSMel Gorman * - 0 if PMD could not be locked 1781f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1782f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1783f123d74aSMel Gorman */ 1784cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 178558705444SPeter Xu unsigned long addr, pgprot_t newprot, unsigned long cp_flags) 1786cd7548abSJohannes Weiner { 1787cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1788bf929152SKirill A. Shutemov spinlock_t *ptl; 17890a85e51dSKirill A. Shutemov pmd_t entry; 17900a85e51dSKirill A. Shutemov bool preserve_write; 17910a85e51dSKirill A. Shutemov int ret; 179258705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1793292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1794292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 1795cd7548abSJohannes Weiner 1796b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 17970a85e51dSKirill A. Shutemov if (!ptl) 17980a85e51dSKirill A. Shutemov return 0; 17990a85e51dSKirill A. Shutemov 18000a85e51dSKirill A. Shutemov preserve_write = prot_numa && pmd_write(*pmd); 1801ba68bc01SMel Gorman ret = 1; 1802e944fd67SMel Gorman 180384c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 180484c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 180584c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 180684c3fc4eSZi Yan 180784c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 180884c3fc4eSZi Yan if (is_write_migration_entry(entry)) { 180984c3fc4eSZi Yan pmd_t newpmd; 181084c3fc4eSZi Yan /* 181184c3fc4eSZi Yan * A protection check is difficult so 181284c3fc4eSZi Yan * just be safe and disable write 181384c3fc4eSZi Yan */ 181484c3fc4eSZi Yan make_migration_entry_read(&entry); 181584c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1816ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1817ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 181884c3fc4eSZi Yan set_pmd_at(mm, addr, pmd, newpmd); 181984c3fc4eSZi Yan } 182084c3fc4eSZi Yan goto unlock; 182184c3fc4eSZi Yan } 182284c3fc4eSZi Yan #endif 182384c3fc4eSZi Yan 1824e944fd67SMel Gorman /* 1825e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1826e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1827e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1828e944fd67SMel Gorman */ 18290a85e51dSKirill A. Shutemov if (prot_numa && is_huge_zero_pmd(*pmd)) 18300a85e51dSKirill A. Shutemov goto unlock; 1831e944fd67SMel Gorman 18320a85e51dSKirill A. Shutemov if (prot_numa && pmd_protnone(*pmd)) 18330a85e51dSKirill A. Shutemov goto unlock; 18340a85e51dSKirill A. Shutemov 1835ced10803SKirill A. Shutemov /* 18363e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1837ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 18383e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1839ced10803SKirill A. Shutemov * 1840ced10803SKirill A. Shutemov * CPU0: CPU1: 1841ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1842ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1843ced10803SKirill A. Shutemov * madvise_dontneed() 1844ced10803SKirill A. Shutemov * zap_pmd_range() 1845ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1846ced10803SKirill A. Shutemov * // skip the pmd 1847ced10803SKirill A. Shutemov * set_pmd_at(); 1848ced10803SKirill A. Shutemov * // pmd is re-established 1849ced10803SKirill A. Shutemov * 1850ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1851ced10803SKirill A. Shutemov * which may break userspace. 1852ced10803SKirill A. Shutemov * 1853ced10803SKirill A. Shutemov * pmdp_invalidate() is required to make sure we don't miss 1854ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1855ced10803SKirill A. Shutemov */ 1856a3cf988fSKirill A. Shutemov entry = pmdp_invalidate(vma, addr, pmd); 1857ced10803SKirill A. Shutemov 1858cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1859b191f9b1SMel Gorman if (preserve_write) 1860288bc549SAneesh Kumar K.V entry = pmd_mk_savedwrite(entry); 1861292924b2SPeter Xu if (uffd_wp) { 1862292924b2SPeter Xu entry = pmd_wrprotect(entry); 1863292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1864292924b2SPeter Xu } else if (uffd_wp_resolve) { 1865292924b2SPeter Xu /* 1866292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1867292924b2SPeter Xu * handler, then things like COW could be properly 1868292924b2SPeter Xu * handled. 1869292924b2SPeter Xu */ 1870292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1871292924b2SPeter Xu } 1872f123d74aSMel Gorman ret = HPAGE_PMD_NR; 187356eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 18740a85e51dSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 18750a85e51dSKirill A. Shutemov unlock: 1876bf929152SKirill A. Shutemov spin_unlock(ptl); 1877cd7548abSJohannes Weiner return ret; 1878cd7548abSJohannes Weiner } 1879cd7548abSJohannes Weiner 1880025c5b24SNaoya Horiguchi /* 18818f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1882025c5b24SNaoya Horiguchi * 18838f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 18848f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1885025c5b24SNaoya Horiguchi */ 1886b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1887025c5b24SNaoya Horiguchi { 1888b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1889b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 189084c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 189184c3fc4eSZi Yan pmd_devmap(*pmd))) 1892b6ec57f4SKirill A. Shutemov return ptl; 1893b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1894b6ec57f4SKirill A. Shutemov return NULL; 1895025c5b24SNaoya Horiguchi } 1896025c5b24SNaoya Horiguchi 1897a00cc7d9SMatthew Wilcox /* 1898a00cc7d9SMatthew Wilcox * Returns true if a given pud maps a thp, false otherwise. 1899a00cc7d9SMatthew Wilcox * 1900a00cc7d9SMatthew Wilcox * Note that if it returns true, this routine returns without unlocking page 1901a00cc7d9SMatthew Wilcox * table lock. So callers must unlock it. 1902a00cc7d9SMatthew Wilcox */ 1903a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1904a00cc7d9SMatthew Wilcox { 1905a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1906a00cc7d9SMatthew Wilcox 1907a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1908a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1909a00cc7d9SMatthew Wilcox return ptl; 1910a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1911a00cc7d9SMatthew Wilcox return NULL; 1912a00cc7d9SMatthew Wilcox } 1913a00cc7d9SMatthew Wilcox 1914a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1915a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1916a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1917a00cc7d9SMatthew Wilcox { 1918a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1919a00cc7d9SMatthew Wilcox 1920a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1921a00cc7d9SMatthew Wilcox if (!ptl) 1922a00cc7d9SMatthew Wilcox return 0; 1923a00cc7d9SMatthew Wilcox /* 1924a00cc7d9SMatthew Wilcox * For architectures like ppc64 we look at deposited pgtable 1925a00cc7d9SMatthew Wilcox * when calling pudp_huge_get_and_clear. So do the 1926a00cc7d9SMatthew Wilcox * pgtable_trans_huge_withdraw after finishing pudp related 1927a00cc7d9SMatthew Wilcox * operations. 1928a00cc7d9SMatthew Wilcox */ 192970516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1930a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19312484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1932a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1933a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1934a00cc7d9SMatthew Wilcox } else { 1935a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 1936a00cc7d9SMatthew Wilcox BUG(); 1937a00cc7d9SMatthew Wilcox } 1938a00cc7d9SMatthew Wilcox return 1; 1939a00cc7d9SMatthew Wilcox } 1940a00cc7d9SMatthew Wilcox 1941a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1942a00cc7d9SMatthew Wilcox unsigned long haddr) 1943a00cc7d9SMatthew Wilcox { 1944a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 1945a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1946a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1947a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1948a00cc7d9SMatthew Wilcox 1949ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 1950a00cc7d9SMatthew Wilcox 1951a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 1952a00cc7d9SMatthew Wilcox } 1953a00cc7d9SMatthew Wilcox 1954a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 1955a00cc7d9SMatthew Wilcox unsigned long address) 1956a00cc7d9SMatthew Wilcox { 1957a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1958ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1959a00cc7d9SMatthew Wilcox 19607269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 19616f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 1962ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 1963ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1964ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 1965a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 1966a00cc7d9SMatthew Wilcox goto out; 1967ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 1968a00cc7d9SMatthew Wilcox 1969a00cc7d9SMatthew Wilcox out: 1970a00cc7d9SMatthew Wilcox spin_unlock(ptl); 19714645b9feSJérôme Glisse /* 19724645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 19734645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 19744645b9feSJérôme Glisse */ 1975ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 1976a00cc7d9SMatthew Wilcox } 1977a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1978a00cc7d9SMatthew Wilcox 1979eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1980eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 1981eef1b3baSKirill A. Shutemov { 1982eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1983eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1984eef1b3baSKirill A. Shutemov pmd_t _pmd; 1985eef1b3baSKirill A. Shutemov int i; 1986eef1b3baSKirill A. Shutemov 19870f10851eSJérôme Glisse /* 19880f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 19890f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 19900f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 19910f10851eSJérôme Glisse * protected page. 19920f10851eSJérôme Glisse * 1993ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 19940f10851eSJérôme Glisse */ 19950f10851eSJérôme Glisse pmdp_huge_clear_flush(vma, haddr, pmd); 1996eef1b3baSKirill A. Shutemov 1997eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1998eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1999eef1b3baSKirill A. Shutemov 2000eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2001eef1b3baSKirill A. Shutemov pte_t *pte, entry; 2002eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2003eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 2004eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2005eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2006eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2007eef1b3baSKirill A. Shutemov pte_unmap(pte); 2008eef1b3baSKirill A. Shutemov } 2009eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2010eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2011eef1b3baSKirill A. Shutemov } 2012eef1b3baSKirill A. Shutemov 2013eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2014ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2015eef1b3baSKirill A. Shutemov { 2016eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2017eef1b3baSKirill A. Shutemov struct page *page; 2018eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2019423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2020292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 20212ac015e2SKirill A. Shutemov unsigned long addr; 2022eef1b3baSKirill A. Shutemov int i; 2023eef1b3baSKirill A. Shutemov 2024eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2025eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2026eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 202784c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 202884c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2029eef1b3baSKirill A. Shutemov 2030eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2031eef1b3baSKirill A. Shutemov 2032d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 2033d21b9e57SKirill A. Shutemov _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2034953c66c2SAneesh Kumar K.V /* 2035953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2036953c66c2SAneesh Kumar K.V * just go ahead and zap it 2037953c66c2SAneesh Kumar K.V */ 2038953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2039953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 20402484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2041d21b9e57SKirill A. Shutemov return; 2042d21b9e57SKirill A. Shutemov page = pmd_page(_pmd); 2043e1f1b157SHugh Dickins if (!PageDirty(page) && pmd_dirty(_pmd)) 2044e1f1b157SHugh Dickins set_page_dirty(page); 2045d21b9e57SKirill A. Shutemov if (!PageReferenced(page) && pmd_young(_pmd)) 2046d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2047d21b9e57SKirill A. Shutemov page_remove_rmap(page, true); 2048d21b9e57SKirill A. Shutemov put_page(page); 2049fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2050eef1b3baSKirill A. Shutemov return; 2051eef1b3baSKirill A. Shutemov } else if (is_huge_zero_pmd(*pmd)) { 20524645b9feSJérôme Glisse /* 20534645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 20544645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 20554645b9feSJérôme Glisse * __split_huge_pmd() ? 20564645b9feSJérôme Glisse * 20574645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 20584645b9feSJérôme Glisse * small page also write protected so it does not seems useful 20594645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 20604645b9feSJérôme Glisse */ 2061eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2062eef1b3baSKirill A. Shutemov } 2063eef1b3baSKirill A. Shutemov 2064423ac9afSAneesh Kumar K.V /* 2065423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2066423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2067423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2068423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2069423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2070423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2071423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 2072423ac9afSAneesh Kumar K.V * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 2073423ac9afSAneesh Kumar K.V * 383 on page 93. Intel should be safe but is also warns that it's 2074423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2075423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2076423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2077423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2078423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2079423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2080423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2081423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2082423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2083423ac9afSAneesh Kumar K.V */ 2084423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2085423ac9afSAneesh Kumar K.V 2086423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 20872e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 208884c3fc4eSZi Yan swp_entry_t entry; 208984c3fc4eSZi Yan 2090423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 209184c3fc4eSZi Yan page = pfn_to_page(swp_offset(entry)); 20922e83ee1dSPeter Xu write = is_write_migration_entry(entry); 20932e83ee1dSPeter Xu young = false; 20942e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2095f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 20962e83ee1dSPeter Xu } else { 2097423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 2098423ac9afSAneesh Kumar K.V if (pmd_dirty(old_pmd)) 2099423ac9afSAneesh Kumar K.V SetPageDirty(page); 2100423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2101423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2102423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2103292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 21042e83ee1dSPeter Xu } 21052e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 21062e83ee1dSPeter Xu page_ref_add(page, HPAGE_PMD_NR - 1); 2107eef1b3baSKirill A. Shutemov 2108423ac9afSAneesh Kumar K.V /* 2109423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2110423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2111423ac9afSAneesh Kumar K.V */ 2112eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2113eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2114eef1b3baSKirill A. Shutemov 21152ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2116eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2117eef1b3baSKirill A. Shutemov /* 2118eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2119eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2120eef1b3baSKirill A. Shutemov * permissions across VMAs. 2121eef1b3baSKirill A. Shutemov */ 212284c3fc4eSZi Yan if (freeze || pmd_migration) { 2123ba988280SKirill A. Shutemov swp_entry_t swp_entry; 2124ba988280SKirill A. Shutemov swp_entry = make_migration_entry(page + i, write); 2125ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2126804dd150SAndrea Arcangeli if (soft_dirty) 2127804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2128f45ec5ffSPeter Xu if (uffd_wp) 2129f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2130ba988280SKirill A. Shutemov } else { 21316d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2132b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 2133eef1b3baSKirill A. Shutemov if (!write) 2134eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 2135eef1b3baSKirill A. Shutemov if (!young) 2136eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 2137804dd150SAndrea Arcangeli if (soft_dirty) 2138804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2139292924b2SPeter Xu if (uffd_wp) 2140292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 2141ba988280SKirill A. Shutemov } 21422ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2143eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 21442ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2145eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2146eef1b3baSKirill A. Shutemov pte_unmap(pte); 2147eef1b3baSKirill A. Shutemov } 2148eef1b3baSKirill A. Shutemov 2149eef1b3baSKirill A. Shutemov /* 2150eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 2151eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 2152eef1b3baSKirill A. Shutemov */ 2153eef1b3baSKirill A. Shutemov if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 2154eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2155eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2156eef1b3baSKirill A. Shutemov } 2157eef1b3baSKirill A. Shutemov 2158468c3982SJohannes Weiner lock_page_memcg(page); 2159eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2160eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 2161468c3982SJohannes Weiner __dec_lruvec_page_state(page, NR_ANON_THPS); 2162eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 2163eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 2164eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2165eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 2166eef1b3baSKirill A. Shutemov } 2167eef1b3baSKirill A. Shutemov } 2168468c3982SJohannes Weiner unlock_page_memcg(page); 2169eef1b3baSKirill A. Shutemov 2170eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2171eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2172e9b61f19SKirill A. Shutemov 2173e9b61f19SKirill A. Shutemov if (freeze) { 21742ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2175e9b61f19SKirill A. Shutemov page_remove_rmap(page + i, false); 2176e9b61f19SKirill A. Shutemov put_page(page + i); 2177e9b61f19SKirill A. Shutemov } 2178e9b61f19SKirill A. Shutemov } 2179eef1b3baSKirill A. Shutemov } 2180eef1b3baSKirill A. Shutemov 2181eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 218233f4751eSNaoya Horiguchi unsigned long address, bool freeze, struct page *page) 2183eef1b3baSKirill A. Shutemov { 2184eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2185ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2186c444eb56SAndrea Arcangeli bool was_locked = false; 2187c444eb56SAndrea Arcangeli pmd_t _pmd; 2188eef1b3baSKirill A. Shutemov 21897269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 21906f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2191ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2192ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2193ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 219433f4751eSNaoya Horiguchi 219533f4751eSNaoya Horiguchi /* 219633f4751eSNaoya Horiguchi * If caller asks to setup a migration entries, we need a page to check 219733f4751eSNaoya Horiguchi * pmd against. Otherwise we can end up replacing wrong page. 219833f4751eSNaoya Horiguchi */ 219933f4751eSNaoya Horiguchi VM_BUG_ON(freeze && !page); 2200c444eb56SAndrea Arcangeli if (page) { 2201c444eb56SAndrea Arcangeli VM_WARN_ON_ONCE(!PageLocked(page)); 2202c444eb56SAndrea Arcangeli was_locked = true; 2203c444eb56SAndrea Arcangeli if (page != pmd_page(*pmd)) 220433f4751eSNaoya Horiguchi goto out; 2205c444eb56SAndrea Arcangeli } 220633f4751eSNaoya Horiguchi 2207c444eb56SAndrea Arcangeli repeat: 22085c7fb56eSDan Williams if (pmd_trans_huge(*pmd)) { 2209c444eb56SAndrea Arcangeli if (!page) { 221033f4751eSNaoya Horiguchi page = pmd_page(*pmd); 2211c444eb56SAndrea Arcangeli if (unlikely(!trylock_page(page))) { 2212c444eb56SAndrea Arcangeli get_page(page); 2213c444eb56SAndrea Arcangeli _pmd = *pmd; 2214c444eb56SAndrea Arcangeli spin_unlock(ptl); 2215c444eb56SAndrea Arcangeli lock_page(page); 2216c444eb56SAndrea Arcangeli spin_lock(ptl); 2217c444eb56SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, _pmd))) { 2218c444eb56SAndrea Arcangeli unlock_page(page); 2219c444eb56SAndrea Arcangeli put_page(page); 2220c444eb56SAndrea Arcangeli page = NULL; 2221c444eb56SAndrea Arcangeli goto repeat; 2222c444eb56SAndrea Arcangeli } 2223c444eb56SAndrea Arcangeli put_page(page); 2224c444eb56SAndrea Arcangeli } 2225c444eb56SAndrea Arcangeli } 2226e90309c9SKirill A. Shutemov if (PageMlocked(page)) 22275f737714SKirill A. Shutemov clear_page_mlock(page); 222884c3fc4eSZi Yan } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) 22295c7fb56eSDan Williams goto out; 2230ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 2231e90309c9SKirill A. Shutemov out: 2232eef1b3baSKirill A. Shutemov spin_unlock(ptl); 2233c444eb56SAndrea Arcangeli if (!was_locked && page) 2234c444eb56SAndrea Arcangeli unlock_page(page); 22354645b9feSJérôme Glisse /* 22364645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 22374645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 22384645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 22394645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 22404645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 22414645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 22424645b9feSJérôme Glisse * page in the meantime) 22434645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 22444645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 22454645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 22464645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 22474645b9feSJérôme Glisse */ 2248ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2249eef1b3baSKirill A. Shutemov } 2250eef1b3baSKirill A. Shutemov 2251fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2252fec89c10SKirill A. Shutemov bool freeze, struct page *page) 225394fcc585SAndrea Arcangeli { 2254f72e7dcdSHugh Dickins pgd_t *pgd; 2255c2febafcSKirill A. Shutemov p4d_t *p4d; 2256f72e7dcdSHugh Dickins pud_t *pud; 225794fcc585SAndrea Arcangeli pmd_t *pmd; 225894fcc585SAndrea Arcangeli 225978ddc534SKirill A. Shutemov pgd = pgd_offset(vma->vm_mm, address); 2260f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 2261f72e7dcdSHugh Dickins return; 2262f72e7dcdSHugh Dickins 2263c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 2264c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 2265c2febafcSKirill A. Shutemov return; 2266c2febafcSKirill A. Shutemov 2267c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 2268f72e7dcdSHugh Dickins if (!pud_present(*pud)) 2269f72e7dcdSHugh Dickins return; 2270f72e7dcdSHugh Dickins 2271f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 2272fec89c10SKirill A. Shutemov 227333f4751eSNaoya Horiguchi __split_huge_pmd(vma, pmd, address, freeze, page); 227494fcc585SAndrea Arcangeli } 227594fcc585SAndrea Arcangeli 2276e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 227794fcc585SAndrea Arcangeli unsigned long start, 227894fcc585SAndrea Arcangeli unsigned long end, 227994fcc585SAndrea Arcangeli long adjust_next) 228094fcc585SAndrea Arcangeli { 228194fcc585SAndrea Arcangeli /* 228294fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 228394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 228494fcc585SAndrea Arcangeli * an huge pmd. 228594fcc585SAndrea Arcangeli */ 228694fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 228794fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 228894fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2289fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, start, false, NULL); 229094fcc585SAndrea Arcangeli 229194fcc585SAndrea Arcangeli /* 229294fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 229394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 229494fcc585SAndrea Arcangeli * an huge pmd. 229594fcc585SAndrea Arcangeli */ 229694fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 229794fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 229894fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2299fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, end, false, NULL); 230094fcc585SAndrea Arcangeli 230194fcc585SAndrea Arcangeli /* 230294fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 230394fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 230494fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 230594fcc585SAndrea Arcangeli */ 230694fcc585SAndrea Arcangeli if (adjust_next > 0) { 230794fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 230894fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 230994fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 231094fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 231194fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 231294fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2313fec89c10SKirill A. Shutemov split_huge_pmd_address(next, nstart, false, NULL); 231494fcc585SAndrea Arcangeli } 231594fcc585SAndrea Arcangeli } 2316e9b61f19SKirill A. Shutemov 2317906f9cdfSHugh Dickins static void unmap_page(struct page *page) 2318e9b61f19SKirill A. Shutemov { 2319baa355fdSKirill A. Shutemov enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2320c7ab0d2fSKirill A. Shutemov TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2321666e5a40SMinchan Kim bool unmap_success; 2322e9b61f19SKirill A. Shutemov 2323e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 2324e9b61f19SKirill A. Shutemov 2325baa355fdSKirill A. Shutemov if (PageAnon(page)) 2326b5ff8161SNaoya Horiguchi ttu_flags |= TTU_SPLIT_FREEZE; 2327baa355fdSKirill A. Shutemov 2328666e5a40SMinchan Kim unmap_success = try_to_unmap(page, ttu_flags); 2329666e5a40SMinchan Kim VM_BUG_ON_PAGE(!unmap_success, page); 2330bd56086fSKirill A. Shutemov } 2331bd56086fSKirill A. Shutemov 2332906f9cdfSHugh Dickins static void remap_page(struct page *page) 2333e9b61f19SKirill A. Shutemov { 2334fec89c10SKirill A. Shutemov int i; 2335ace71a19SKirill A. Shutemov if (PageTransHuge(page)) { 2336ace71a19SKirill A. Shutemov remove_migration_ptes(page, page, true); 2337ace71a19SKirill A. Shutemov } else { 2338fec89c10SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2339fec89c10SKirill A. Shutemov remove_migration_ptes(page + i, page + i, true); 2340e9b61f19SKirill A. Shutemov } 2341ace71a19SKirill A. Shutemov } 2342e9b61f19SKirill A. Shutemov 23438df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2344e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2345e9b61f19SKirill A. Shutemov { 2346e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2347e9b61f19SKirill A. Shutemov 23488df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2349e9b61f19SKirill A. Shutemov 2350e9b61f19SKirill A. Shutemov /* 2351605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2352605ca5edSKonstantin Khlebnikov * 2353605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 2354605ca5edSKonstantin Khlebnikov * for exmaple lock_page() which set PG_waiters. 2355e9b61f19SKirill A. Shutemov */ 2356e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2357e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2358e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2359e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 236038d8b4e6SHuang Ying (1L << PG_swapcache) | 2361e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2362e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2363e9b61f19SKirill A. Shutemov (1L << PG_active) | 23641899ad18SJohannes Weiner (1L << PG_workingset) | 2365e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2366b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 2367b8d3c4c3SMinchan Kim (1L << PG_dirty))); 2368e9b61f19SKirill A. Shutemov 2369173d9d9fSHugh Dickins /* ->mapping in first tail page is compound_mapcount */ 2370173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2371173d9d9fSHugh Dickins page_tail); 2372173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2373173d9d9fSHugh Dickins page_tail->index = head->index + tail; 2374173d9d9fSHugh Dickins 2375605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2376e9b61f19SKirill A. Shutemov smp_wmb(); 2377e9b61f19SKirill A. Shutemov 2378605ca5edSKonstantin Khlebnikov /* 2379605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2380605ca5edSKonstantin Khlebnikov * 2381605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2382605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2383605ca5edSKonstantin Khlebnikov */ 2384e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2385e9b61f19SKirill A. Shutemov 2386605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2387605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2388605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2389605ca5edSKonstantin Khlebnikov 2390e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2391e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2392e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2393e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2394e9b61f19SKirill A. Shutemov 2395e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 239694723aafSMichal Hocko 239794723aafSMichal Hocko /* 239894723aafSMichal Hocko * always add to the tail because some iterators expect new 239994723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 240094723aafSMichal Hocko * migrate_pages 240194723aafSMichal Hocko */ 2402e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2403e9b61f19SKirill A. Shutemov } 2404e9b61f19SKirill A. Shutemov 2405baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2406006d3ff2SHugh Dickins pgoff_t end, unsigned long flags) 2407e9b61f19SKirill A. Shutemov { 2408e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2409f4b7e272SAndrey Ryabinin pg_data_t *pgdat = page_pgdat(head); 2410e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 24114101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 24124101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 24138df651c7SKirill A. Shutemov int i; 2414e9b61f19SKirill A. Shutemov 2415f4b7e272SAndrey Ryabinin lruvec = mem_cgroup_page_lruvec(head, pgdat); 2416e9b61f19SKirill A. Shutemov 2417e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2418e9b61f19SKirill A. Shutemov mem_cgroup_split_huge_fixup(head); 2419e9b61f19SKirill A. Shutemov 24204101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 24214101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 24224101196bSMatthew Wilcox (Oracle) 24234101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 24244101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 24254101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 24264101196bSMatthew Wilcox (Oracle) } 24274101196bSMatthew Wilcox (Oracle) 2428baa355fdSKirill A. Shutemov for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 24298df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2430baa355fdSKirill A. Shutemov /* Some pages can be beyond i_size: drop them from page cache */ 2431baa355fdSKirill A. Shutemov if (head[i].index >= end) { 24322d077d4bSHugh Dickins ClearPageDirty(head + i); 2433baa355fdSKirill A. Shutemov __delete_from_page_cache(head + i, NULL); 2434800d8c63SKirill A. Shutemov if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2435800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2436baa355fdSKirill A. Shutemov put_page(head + i); 24374101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 24384101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 24394101196bSMatthew Wilcox (Oracle) head + i, 0); 24404101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 24414101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 24424101196bSMatthew Wilcox (Oracle) head + i, 0); 2443baa355fdSKirill A. Shutemov } 2444baa355fdSKirill A. Shutemov } 2445e9b61f19SKirill A. Shutemov 2446e9b61f19SKirill A. Shutemov ClearPageCompound(head); 2447f7da677bSVlastimil Babka 2448f7da677bSVlastimil Babka split_page_owner(head, HPAGE_PMD_ORDER); 2449f7da677bSVlastimil Babka 2450baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2451baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2452aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 24534101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 245438d8b4e6SHuang Ying page_ref_add(head, 2); 24554101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 24564101196bSMatthew Wilcox (Oracle) } else { 2457baa355fdSKirill A. Shutemov page_ref_inc(head); 24584101196bSMatthew Wilcox (Oracle) } 2459baa355fdSKirill A. Shutemov } else { 2460aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2461baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2462b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2463baa355fdSKirill A. Shutemov } 2464baa355fdSKirill A. Shutemov 2465f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 2466e9b61f19SKirill A. Shutemov 2467906f9cdfSHugh Dickins remap_page(head); 2468e9b61f19SKirill A. Shutemov 2469e9b61f19SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2470e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2471e9b61f19SKirill A. Shutemov if (subpage == page) 2472e9b61f19SKirill A. Shutemov continue; 2473e9b61f19SKirill A. Shutemov unlock_page(subpage); 2474e9b61f19SKirill A. Shutemov 2475e9b61f19SKirill A. Shutemov /* 2476e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2477e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2478e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2479e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2480e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2481e9b61f19SKirill A. Shutemov */ 2482e9b61f19SKirill A. Shutemov put_page(subpage); 2483e9b61f19SKirill A. Shutemov } 2484e9b61f19SKirill A. Shutemov } 2485e9b61f19SKirill A. Shutemov 2486b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page) 2487b20ce5e0SKirill A. Shutemov { 2488dd78feddSKirill A. Shutemov int i, compound, ret; 2489b20ce5e0SKirill A. Shutemov 2490b20ce5e0SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2491b20ce5e0SKirill A. Shutemov 2492b20ce5e0SKirill A. Shutemov if (likely(!PageCompound(page))) 2493b20ce5e0SKirill A. Shutemov return atomic_read(&page->_mapcount) + 1; 2494b20ce5e0SKirill A. Shutemov 2495dd78feddSKirill A. Shutemov compound = compound_mapcount(page); 2496b20ce5e0SKirill A. Shutemov if (PageHuge(page)) 2497dd78feddSKirill A. Shutemov return compound; 2498dd78feddSKirill A. Shutemov ret = compound; 2499b20ce5e0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2500b20ce5e0SKirill A. Shutemov ret += atomic_read(&page[i]._mapcount) + 1; 2501dd78feddSKirill A. Shutemov /* File pages has compound_mapcount included in _mapcount */ 2502dd78feddSKirill A. Shutemov if (!PageAnon(page)) 2503dd78feddSKirill A. Shutemov return ret - compound * HPAGE_PMD_NR; 2504b20ce5e0SKirill A. Shutemov if (PageDoubleMap(page)) 2505b20ce5e0SKirill A. Shutemov ret -= HPAGE_PMD_NR; 2506b20ce5e0SKirill A. Shutemov return ret; 2507b20ce5e0SKirill A. Shutemov } 2508b20ce5e0SKirill A. Shutemov 2509e9b61f19SKirill A. Shutemov /* 25106d0a07edSAndrea Arcangeli * This calculates accurately how many mappings a transparent hugepage 25116d0a07edSAndrea Arcangeli * has (unlike page_mapcount() which isn't fully accurate). This full 25126d0a07edSAndrea Arcangeli * accuracy is primarily needed to know if copy-on-write faults can 25136d0a07edSAndrea Arcangeli * reuse the page and change the mapping to read-write instead of 25146d0a07edSAndrea Arcangeli * copying them. At the same time this returns the total_mapcount too. 25156d0a07edSAndrea Arcangeli * 25166d0a07edSAndrea Arcangeli * The function returns the highest mapcount any one of the subpages 25176d0a07edSAndrea Arcangeli * has. If the return value is one, even if different processes are 25186d0a07edSAndrea Arcangeli * mapping different subpages of the transparent hugepage, they can 25196d0a07edSAndrea Arcangeli * all reuse it, because each process is reusing a different subpage. 25206d0a07edSAndrea Arcangeli * 25216d0a07edSAndrea Arcangeli * The total_mapcount is instead counting all virtual mappings of the 25226d0a07edSAndrea Arcangeli * subpages. If the total_mapcount is equal to "one", it tells the 25236d0a07edSAndrea Arcangeli * caller all mappings belong to the same "mm" and in turn the 25246d0a07edSAndrea Arcangeli * anon_vma of the transparent hugepage can become the vma->anon_vma 25256d0a07edSAndrea Arcangeli * local one as no other process may be mapping any of the subpages. 25266d0a07edSAndrea Arcangeli * 25276d0a07edSAndrea Arcangeli * It would be more accurate to replace page_mapcount() with 25286d0a07edSAndrea Arcangeli * page_trans_huge_mapcount(), however we only use 25296d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() in the copy-on-write faults where we 25306d0a07edSAndrea Arcangeli * need full accuracy to avoid breaking page pinning, because 25316d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() is slower than page_mapcount(). 25326d0a07edSAndrea Arcangeli */ 25336d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 25346d0a07edSAndrea Arcangeli { 25356d0a07edSAndrea Arcangeli int i, ret, _total_mapcount, mapcount; 25366d0a07edSAndrea Arcangeli 25376d0a07edSAndrea Arcangeli /* hugetlbfs shouldn't call it */ 25386d0a07edSAndrea Arcangeli VM_BUG_ON_PAGE(PageHuge(page), page); 25396d0a07edSAndrea Arcangeli 25406d0a07edSAndrea Arcangeli if (likely(!PageTransCompound(page))) { 25416d0a07edSAndrea Arcangeli mapcount = atomic_read(&page->_mapcount) + 1; 25426d0a07edSAndrea Arcangeli if (total_mapcount) 25436d0a07edSAndrea Arcangeli *total_mapcount = mapcount; 25446d0a07edSAndrea Arcangeli return mapcount; 25456d0a07edSAndrea Arcangeli } 25466d0a07edSAndrea Arcangeli 25476d0a07edSAndrea Arcangeli page = compound_head(page); 25486d0a07edSAndrea Arcangeli 25496d0a07edSAndrea Arcangeli _total_mapcount = ret = 0; 25506d0a07edSAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 25516d0a07edSAndrea Arcangeli mapcount = atomic_read(&page[i]._mapcount) + 1; 25526d0a07edSAndrea Arcangeli ret = max(ret, mapcount); 25536d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25546d0a07edSAndrea Arcangeli } 25556d0a07edSAndrea Arcangeli if (PageDoubleMap(page)) { 25566d0a07edSAndrea Arcangeli ret -= 1; 25576d0a07edSAndrea Arcangeli _total_mapcount -= HPAGE_PMD_NR; 25586d0a07edSAndrea Arcangeli } 25596d0a07edSAndrea Arcangeli mapcount = compound_mapcount(page); 25606d0a07edSAndrea Arcangeli ret += mapcount; 25616d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25626d0a07edSAndrea Arcangeli if (total_mapcount) 25636d0a07edSAndrea Arcangeli *total_mapcount = _total_mapcount; 25646d0a07edSAndrea Arcangeli return ret; 25656d0a07edSAndrea Arcangeli } 25666d0a07edSAndrea Arcangeli 2567b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2568b8f593cdSHuang Ying bool can_split_huge_page(struct page *page, int *pextra_pins) 2569b8f593cdSHuang Ying { 2570b8f593cdSHuang Ying int extra_pins; 2571b8f593cdSHuang Ying 2572aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2573b8f593cdSHuang Ying if (PageAnon(page)) 2574b8f593cdSHuang Ying extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; 2575b8f593cdSHuang Ying else 2576b8f593cdSHuang Ying extra_pins = HPAGE_PMD_NR; 2577b8f593cdSHuang Ying if (pextra_pins) 2578b8f593cdSHuang Ying *pextra_pins = extra_pins; 2579b8f593cdSHuang Ying return total_mapcount(page) == page_count(page) - extra_pins - 1; 2580b8f593cdSHuang Ying } 2581b8f593cdSHuang Ying 25826d0a07edSAndrea Arcangeli /* 2583e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2584e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2585e9b61f19SKirill A. Shutemov * 2586e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2587e9b61f19SKirill A. Shutemov * The huge page must be locked. 2588e9b61f19SKirill A. Shutemov * 2589e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2590e9b61f19SKirill A. Shutemov * 2591e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2592e9b61f19SKirill A. Shutemov * the hugepage. 2593e9b61f19SKirill A. Shutemov * 2594e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2595e9b61f19SKirill A. Shutemov * they are not mapped. 2596e9b61f19SKirill A. Shutemov * 2597e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2598e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2599e9b61f19SKirill A. Shutemov * us. 2600e9b61f19SKirill A. Shutemov */ 2601e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2602e9b61f19SKirill A. Shutemov { 2603e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2604a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2605a8803e6cSWei Yang struct deferred_split *ds_queue = get_deferred_split_queue(head); 2606baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2607baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2608baa355fdSKirill A. Shutemov int count, mapcount, extra_pins, ret; 26090b9b6fffSKirill A. Shutemov unsigned long flags; 2610006d3ff2SHugh Dickins pgoff_t end; 2611e9b61f19SKirill A. Shutemov 2612cb829624SWei Yang VM_BUG_ON_PAGE(is_huge_zero_page(head), head); 2613a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageLocked(head), head); 2614a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageCompound(head), head); 2615e9b61f19SKirill A. Shutemov 2616a8803e6cSWei Yang if (PageWriteback(head)) 261759807685SHuang Ying return -EBUSY; 261859807685SHuang Ying 2619baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2620e9b61f19SKirill A. Shutemov /* 2621*c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2622baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2623baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 2624baa355fdSKirill A. Shutemov * is similar to page_lock_anon_vma_read except the write lock 2625baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2626baa355fdSKirill A. Shutemov * operations. 2627e9b61f19SKirill A. Shutemov */ 2628e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2629e9b61f19SKirill A. Shutemov if (!anon_vma) { 2630e9b61f19SKirill A. Shutemov ret = -EBUSY; 2631e9b61f19SKirill A. Shutemov goto out; 2632e9b61f19SKirill A. Shutemov } 2633006d3ff2SHugh Dickins end = -1; 2634baa355fdSKirill A. Shutemov mapping = NULL; 2635e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2636baa355fdSKirill A. Shutemov } else { 2637baa355fdSKirill A. Shutemov mapping = head->mapping; 2638baa355fdSKirill A. Shutemov 2639baa355fdSKirill A. Shutemov /* Truncated ? */ 2640baa355fdSKirill A. Shutemov if (!mapping) { 2641baa355fdSKirill A. Shutemov ret = -EBUSY; 2642baa355fdSKirill A. Shutemov goto out; 2643baa355fdSKirill A. Shutemov } 2644baa355fdSKirill A. Shutemov 2645baa355fdSKirill A. Shutemov anon_vma = NULL; 2646baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2647006d3ff2SHugh Dickins 2648006d3ff2SHugh Dickins /* 2649006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2650006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2651006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2652006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 2653006d3ff2SHugh Dickins * head page lock is good enough to serialize the trimming. 2654006d3ff2SHugh Dickins */ 2655006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2656baa355fdSKirill A. Shutemov } 2657e9b61f19SKirill A. Shutemov 2658e9b61f19SKirill A. Shutemov /* 2659906f9cdfSHugh Dickins * Racy check if we can split the page, before unmap_page() will 2660e9b61f19SKirill A. Shutemov * split PMDs 2661e9b61f19SKirill A. Shutemov */ 2662b8f593cdSHuang Ying if (!can_split_huge_page(head, &extra_pins)) { 2663e9b61f19SKirill A. Shutemov ret = -EBUSY; 2664e9b61f19SKirill A. Shutemov goto out_unlock; 2665e9b61f19SKirill A. Shutemov } 2666e9b61f19SKirill A. Shutemov 2667906f9cdfSHugh Dickins unmap_page(head); 2668e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(compound_mapcount(head), head); 2669e9b61f19SKirill A. Shutemov 2670baa355fdSKirill A. Shutemov /* prevent PageLRU to go away from under us, and freeze lru stats */ 2671f4b7e272SAndrey Ryabinin spin_lock_irqsave(&pgdata->lru_lock, flags); 2672baa355fdSKirill A. Shutemov 2673baa355fdSKirill A. Shutemov if (mapping) { 2674aa5dc07fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page_index(head)); 2675baa355fdSKirill A. Shutemov 2676baa355fdSKirill A. Shutemov /* 2677aa5dc07fSMatthew Wilcox * Check if the head page is present in page cache. 2678baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2679baa355fdSKirill A. Shutemov */ 2680aa5dc07fSMatthew Wilcox xa_lock(&mapping->i_pages); 2681aa5dc07fSMatthew Wilcox if (xas_load(&xas) != head) 2682baa355fdSKirill A. Shutemov goto fail; 2683baa355fdSKirill A. Shutemov } 2684baa355fdSKirill A. Shutemov 26850139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2686364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 2687e9b61f19SKirill A. Shutemov count = page_count(head); 2688e9b61f19SKirill A. Shutemov mapcount = total_mapcount(head); 2689baa355fdSKirill A. Shutemov if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 26909a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2691364c1eebSYang Shi ds_queue->split_queue_len--; 26929a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 26939a982250SKirill A. Shutemov } 2694afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 269506d3eff6SKirill A. Shutemov if (mapping) { 2696a8803e6cSWei Yang if (PageSwapBacked(head)) 2697a8803e6cSWei Yang __dec_node_page_state(head, NR_SHMEM_THPS); 269806d3eff6SKirill A. Shutemov else 2699a8803e6cSWei Yang __dec_node_page_state(head, NR_FILE_THPS); 270006d3eff6SKirill A. Shutemov } 270106d3eff6SKirill A. Shutemov 2702006d3ff2SHugh Dickins __split_huge_page(page, list, end, flags); 270359807685SHuang Ying if (PageSwapCache(head)) { 270459807685SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 270559807685SHuang Ying 270659807685SHuang Ying ret = split_swap_cluster(entry); 270759807685SHuang Ying } else 2708e9b61f19SKirill A. Shutemov ret = 0; 2709baa355fdSKirill A. Shutemov } else { 2710baa355fdSKirill A. Shutemov if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2711e9b61f19SKirill A. Shutemov pr_alert("total_mapcount: %u, page_count(): %u\n", 2712e9b61f19SKirill A. Shutemov mapcount, count); 2713e9b61f19SKirill A. Shutemov if (PageTail(page)) 2714e9b61f19SKirill A. Shutemov dump_page(head, NULL); 2715bd56086fSKirill A. Shutemov dump_page(page, "total_mapcount(head) > 0"); 2716e9b61f19SKirill A. Shutemov BUG(); 2717baa355fdSKirill A. Shutemov } 2718364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2719baa355fdSKirill A. Shutemov fail: if (mapping) 2720b93b0163SMatthew Wilcox xa_unlock(&mapping->i_pages); 2721f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdata->lru_lock, flags); 2722906f9cdfSHugh Dickins remap_page(head); 2723e9b61f19SKirill A. Shutemov ret = -EBUSY; 2724e9b61f19SKirill A. Shutemov } 2725e9b61f19SKirill A. Shutemov 2726e9b61f19SKirill A. Shutemov out_unlock: 2727baa355fdSKirill A. Shutemov if (anon_vma) { 2728e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2729e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2730baa355fdSKirill A. Shutemov } 2731baa355fdSKirill A. Shutemov if (mapping) 2732baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2733e9b61f19SKirill A. Shutemov out: 2734e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2735e9b61f19SKirill A. Shutemov return ret; 2736e9b61f19SKirill A. Shutemov } 27379a982250SKirill A. Shutemov 27389a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27399a982250SKirill A. Shutemov { 274087eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 27419a982250SKirill A. Shutemov unsigned long flags; 27429a982250SKirill A. Shutemov 2743364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27449a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2745364c1eebSYang Shi ds_queue->split_queue_len--; 27469a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 27479a982250SKirill A. Shutemov } 2748364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 27499a982250SKirill A. Shutemov free_compound_page(page); 27509a982250SKirill A. Shutemov } 27519a982250SKirill A. Shutemov 27529a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 27539a982250SKirill A. Shutemov { 275487eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 275587eaceb3SYang Shi #ifdef CONFIG_MEMCG 275687eaceb3SYang Shi struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; 275787eaceb3SYang Shi #endif 27589a982250SKirill A. Shutemov unsigned long flags; 27599a982250SKirill A. Shutemov 27609a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 27619a982250SKirill A. Shutemov 276287eaceb3SYang Shi /* 276387eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 276487eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 276587eaceb3SYang Shi * And, if page reclaim is already handling the same page, it is 276687eaceb3SYang Shi * unnecessary to handle it again in shrinker. 276787eaceb3SYang Shi * 276887eaceb3SYang Shi * Check PageSwapCache to determine if the page is being 276987eaceb3SYang Shi * handled by page reclaim since THP swap would add the page into 277087eaceb3SYang Shi * swap cache before calling try_to_unmap(). 277187eaceb3SYang Shi */ 277287eaceb3SYang Shi if (PageSwapCache(page)) 277387eaceb3SYang Shi return; 277487eaceb3SYang Shi 2775364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27769a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2777f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2778364c1eebSYang Shi list_add_tail(page_deferred_list(page), &ds_queue->split_queue); 2779364c1eebSYang Shi ds_queue->split_queue_len++; 278087eaceb3SYang Shi #ifdef CONFIG_MEMCG 278187eaceb3SYang Shi if (memcg) 278287eaceb3SYang Shi memcg_set_shrinker_bit(memcg, page_to_nid(page), 278387eaceb3SYang Shi deferred_split_shrinker.id); 278487eaceb3SYang Shi #endif 27859a982250SKirill A. Shutemov } 2786364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 27879a982250SKirill A. Shutemov } 27889a982250SKirill A. Shutemov 27899a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 27909a982250SKirill A. Shutemov struct shrink_control *sc) 27919a982250SKirill A. Shutemov { 2792a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2793364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 279487eaceb3SYang Shi 279587eaceb3SYang Shi #ifdef CONFIG_MEMCG 279687eaceb3SYang Shi if (sc->memcg) 279787eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 279887eaceb3SYang Shi #endif 2799364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28009a982250SKirill A. Shutemov } 28019a982250SKirill A. Shutemov 28029a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28039a982250SKirill A. Shutemov struct shrink_control *sc) 28049a982250SKirill A. Shutemov { 2805a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2806364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28079a982250SKirill A. Shutemov unsigned long flags; 28089a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 28099a982250SKirill A. Shutemov struct page *page; 28109a982250SKirill A. Shutemov int split = 0; 28119a982250SKirill A. Shutemov 281287eaceb3SYang Shi #ifdef CONFIG_MEMCG 281387eaceb3SYang Shi if (sc->memcg) 281487eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 281587eaceb3SYang Shi #endif 281687eaceb3SYang Shi 2817364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28189a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2819364c1eebSYang Shi list_for_each_safe(pos, next, &ds_queue->split_queue) { 28209a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 28219a982250SKirill A. Shutemov page = compound_head(page); 2822e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2823e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2824e3ae1953SKirill A. Shutemov } else { 2825e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 28269a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2827364c1eebSYang Shi ds_queue->split_queue_len--; 28289a982250SKirill A. Shutemov } 2829e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2830e3ae1953SKirill A. Shutemov break; 28319a982250SKirill A. Shutemov } 2832364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28339a982250SKirill A. Shutemov 28349a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 28359a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 2836fa41b900SKirill A. Shutemov if (!trylock_page(page)) 2837fa41b900SKirill A. Shutemov goto next; 28389a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 28399a982250SKirill A. Shutemov if (!split_huge_page(page)) 28409a982250SKirill A. Shutemov split++; 28419a982250SKirill A. Shutemov unlock_page(page); 2842fa41b900SKirill A. Shutemov next: 28439a982250SKirill A. Shutemov put_page(page); 28449a982250SKirill A. Shutemov } 28459a982250SKirill A. Shutemov 2846364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2847364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2848364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28499a982250SKirill A. Shutemov 2850cb8d68ecSKirill A. Shutemov /* 2851cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2852cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2853cb8d68ecSKirill A. Shutemov */ 2854364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2855cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2856cb8d68ecSKirill A. Shutemov return split; 28579a982250SKirill A. Shutemov } 28589a982250SKirill A. Shutemov 28599a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 28609a982250SKirill A. Shutemov .count_objects = deferred_split_count, 28619a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 28629a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 286387eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 286487eaceb3SYang Shi SHRINKER_NONSLAB, 28659a982250SKirill A. Shutemov }; 286649071d43SKirill A. Shutemov 286749071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 286849071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val) 286949071d43SKirill A. Shutemov { 287049071d43SKirill A. Shutemov struct zone *zone; 287149071d43SKirill A. Shutemov struct page *page; 287249071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 287349071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 287449071d43SKirill A. Shutemov 287549071d43SKirill A. Shutemov if (val != 1) 287649071d43SKirill A. Shutemov return -EINVAL; 287749071d43SKirill A. Shutemov 287849071d43SKirill A. Shutemov for_each_populated_zone(zone) { 287949071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 288049071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 288149071d43SKirill A. Shutemov if (!pfn_valid(pfn)) 288249071d43SKirill A. Shutemov continue; 288349071d43SKirill A. Shutemov 288449071d43SKirill A. Shutemov page = pfn_to_page(pfn); 288549071d43SKirill A. Shutemov if (!get_page_unless_zero(page)) 288649071d43SKirill A. Shutemov continue; 288749071d43SKirill A. Shutemov 288849071d43SKirill A. Shutemov if (zone != page_zone(page)) 288949071d43SKirill A. Shutemov goto next; 289049071d43SKirill A. Shutemov 2891baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 289249071d43SKirill A. Shutemov goto next; 289349071d43SKirill A. Shutemov 289449071d43SKirill A. Shutemov total++; 289549071d43SKirill A. Shutemov lock_page(page); 289649071d43SKirill A. Shutemov if (!split_huge_page(page)) 289749071d43SKirill A. Shutemov split++; 289849071d43SKirill A. Shutemov unlock_page(page); 289949071d43SKirill A. Shutemov next: 290049071d43SKirill A. Shutemov put_page(page); 290149071d43SKirill A. Shutemov } 290249071d43SKirill A. Shutemov } 290349071d43SKirill A. Shutemov 2904145bdaa1SYang Shi pr_info("%lu of %lu THP split\n", split, total); 290549071d43SKirill A. Shutemov 290649071d43SKirill A. Shutemov return 0; 290749071d43SKirill A. Shutemov } 2908f1287869Szhong jiang DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 290949071d43SKirill A. Shutemov "%llu\n"); 291049071d43SKirill A. Shutemov 291149071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 291249071d43SKirill A. Shutemov { 2913d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 291449071d43SKirill A. Shutemov &split_huge_pages_fops); 291549071d43SKirill A. Shutemov return 0; 291649071d43SKirill A. Shutemov } 291749071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 291849071d43SKirill A. Shutemov #endif 2919616b8371SZi Yan 2920616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2921616b8371SZi Yan void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 2922616b8371SZi Yan struct page *page) 2923616b8371SZi Yan { 2924616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 2925616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 2926616b8371SZi Yan unsigned long address = pvmw->address; 2927616b8371SZi Yan pmd_t pmdval; 2928616b8371SZi Yan swp_entry_t entry; 2929ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 2930616b8371SZi Yan 2931616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 2932616b8371SZi Yan return; 2933616b8371SZi Yan 2934616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 29358a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 2936616b8371SZi Yan if (pmd_dirty(pmdval)) 2937616b8371SZi Yan set_page_dirty(page); 2938616b8371SZi Yan entry = make_migration_entry(page, pmd_write(pmdval)); 2939ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 2940ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 2941ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 2942ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 2943616b8371SZi Yan page_remove_rmap(page, true); 2944616b8371SZi Yan put_page(page); 2945616b8371SZi Yan } 2946616b8371SZi Yan 2947616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 2948616b8371SZi Yan { 2949616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 2950616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 2951616b8371SZi Yan unsigned long address = pvmw->address; 2952616b8371SZi Yan unsigned long mmun_start = address & HPAGE_PMD_MASK; 2953616b8371SZi Yan pmd_t pmde; 2954616b8371SZi Yan swp_entry_t entry; 2955616b8371SZi Yan 2956616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 2957616b8371SZi Yan return; 2958616b8371SZi Yan 2959616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 2960616b8371SZi Yan get_page(new); 2961616b8371SZi Yan pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); 2962ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 2963ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 2964616b8371SZi Yan if (is_write_migration_entry(entry)) 2965f55e1014SLinus Torvalds pmde = maybe_pmd_mkwrite(pmde, vma); 2966616b8371SZi Yan 2967616b8371SZi Yan flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2968e71769aeSNaoya Horiguchi if (PageAnon(new)) 2969616b8371SZi Yan page_add_anon_rmap(new, vma, mmun_start, true); 2970e71769aeSNaoya Horiguchi else 2971e71769aeSNaoya Horiguchi page_add_file_rmap(new, true); 2972616b8371SZi Yan set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2973e125fe40SKirill A. Shutemov if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) 2974616b8371SZi Yan mlock_vma_page(new); 2975616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 2976616b8371SZi Yan } 2977616b8371SZi Yan #endif 2978