120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10fa6c0231SZi Yan #include <linux/sched/mm.h> 11f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 126a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1371e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1471e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1571e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1671e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1771e3aac0SAndrea Arcangeli #include <linux/swap.h> 1897ae1749SKirill A. Shutemov #include <linux/shrinker.h> 19ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 20e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 214897c765SMatthew Wilcox #include <linux/dax.h> 22ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 23878aee7dSAndrea Arcangeli #include <linux/freezer.h> 24f25748e3SDan Williams #include <linux/pfn_t.h> 25a664b2d8SAndrea Arcangeli #include <linux/mman.h> 263565fce3SDan Williams #include <linux/memremap.h> 27325adeb5SRalf Baechle #include <linux/pagemap.h> 2849071d43SKirill A. Shutemov #include <linux/debugfs.h> 294daae3b4SMel Gorman #include <linux/migrate.h> 3043b5fbbdSSasha Levin #include <linux/hashtable.h> 316b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3233c3fc71SVladimir Davydov #include <linux/page_idle.h> 33baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 346b31d595SMichal Hocko #include <linux/oom.h> 3598fa15f3SAnshuman Khandual #include <linux/numa.h> 36f7da677bSVlastimil Babka #include <linux/page_owner.h> 3797ae1749SKirill A. Shutemov 3871e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3971e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 4071e3aac0SAndrea Arcangeli #include "internal.h" 4171e3aac0SAndrea Arcangeli 42ba76149fSAndrea Arcangeli /* 43b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 44b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 45b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 46b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 478bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 488bfa3f9aSJianguo Wu * for all hugepage allocations. 49ba76149fSAndrea Arcangeli */ 5071e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5113ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 52ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 5313ece886SAndrea Arcangeli #endif 5413ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 5513ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 5613ece886SAndrea Arcangeli #endif 57444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 5879da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5979da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 60ba76149fSAndrea Arcangeli 619a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 62f000565aSAndrea Arcangeli 6397ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 6456873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 653b77e8c8SHugh Dickins unsigned long huge_zero_pfn __read_mostly = ~0UL; 664a6c1297SKirill A. Shutemov 67e6be37b2SMiaohe Lin static inline bool file_thp_enabled(struct vm_area_struct *vma) 68e6be37b2SMiaohe Lin { 69e6be37b2SMiaohe Lin return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && 70e6be37b2SMiaohe Lin !inode_is_open_for_write(vma->vm_file->f_inode) && 71e6be37b2SMiaohe Lin (vma->vm_flags & VM_EXEC); 72e6be37b2SMiaohe Lin } 73e6be37b2SMiaohe Lin 74e6be37b2SMiaohe Lin bool transparent_hugepage_active(struct vm_area_struct *vma) 757635d9cbSMichal Hocko { 76c0630669SYang Shi /* The addr is used to check if the vma size fits */ 77c0630669SYang Shi unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; 78c0630669SYang Shi 79c0630669SYang Shi if (!transhuge_vma_suitable(vma, addr)) 80c0630669SYang Shi return false; 817635d9cbSMichal Hocko if (vma_is_anonymous(vma)) 827635d9cbSMichal Hocko return __transparent_hugepage_enabled(vma); 83c0630669SYang Shi if (vma_is_shmem(vma)) 84c0630669SYang Shi return shmem_huge_enabled(vma); 85e6be37b2SMiaohe Lin if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) 86e6be37b2SMiaohe Lin return file_thp_enabled(vma); 877635d9cbSMichal Hocko 887635d9cbSMichal Hocko return false; 897635d9cbSMichal Hocko } 907635d9cbSMichal Hocko 91aaa9705bSMiaohe Lin static bool get_huge_zero_page(void) 9297ae1749SKirill A. Shutemov { 9397ae1749SKirill A. Shutemov struct page *zero_page; 9497ae1749SKirill A. Shutemov retry: 9597ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 96aaa9705bSMiaohe Lin return true; 9797ae1749SKirill A. Shutemov 9897ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 9997ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 100d8a8e1f0SKirill A. Shutemov if (!zero_page) { 101d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 102aaa9705bSMiaohe Lin return false; 103d8a8e1f0SKirill A. Shutemov } 104d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 10597ae1749SKirill A. Shutemov preempt_disable(); 1065918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 10797ae1749SKirill A. Shutemov preempt_enable(); 1085ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 10997ae1749SKirill A. Shutemov goto retry; 11097ae1749SKirill A. Shutemov } 1113b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); 11297ae1749SKirill A. Shutemov 11397ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 11497ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 11597ae1749SKirill A. Shutemov preempt_enable(); 116aaa9705bSMiaohe Lin return true; 11797ae1749SKirill A. Shutemov } 11897ae1749SKirill A. Shutemov 1196fcb52a5SAaron Lu static void put_huge_zero_page(void) 12097ae1749SKirill A. Shutemov { 12197ae1749SKirill A. Shutemov /* 12297ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 12397ae1749SKirill A. Shutemov * last reference. 12497ae1749SKirill A. Shutemov */ 12597ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 12697ae1749SKirill A. Shutemov } 12797ae1749SKirill A. Shutemov 1286fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1296fcb52a5SAaron Lu { 1306fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1316fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1326fcb52a5SAaron Lu 1336fcb52a5SAaron Lu if (!get_huge_zero_page()) 1346fcb52a5SAaron Lu return NULL; 1356fcb52a5SAaron Lu 1366fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1376fcb52a5SAaron Lu put_huge_zero_page(); 1386fcb52a5SAaron Lu 1396fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1406fcb52a5SAaron Lu } 1416fcb52a5SAaron Lu 1426fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 1436fcb52a5SAaron Lu { 1446fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1456fcb52a5SAaron Lu put_huge_zero_page(); 1466fcb52a5SAaron Lu } 1476fcb52a5SAaron Lu 14848896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 14997ae1749SKirill A. Shutemov struct shrink_control *sc) 15097ae1749SKirill A. Shutemov { 15197ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 15297ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 15348896466SGlauber Costa } 15497ae1749SKirill A. Shutemov 15548896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 15648896466SGlauber Costa struct shrink_control *sc) 15748896466SGlauber Costa { 15897ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 1595918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 1605918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 1613b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, ~0UL); 1625ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 16348896466SGlauber Costa return HPAGE_PMD_NR; 16497ae1749SKirill A. Shutemov } 16597ae1749SKirill A. Shutemov 16697ae1749SKirill A. Shutemov return 0; 16797ae1749SKirill A. Shutemov } 16897ae1749SKirill A. Shutemov 16997ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 17048896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 17148896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 17297ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 17397ae1749SKirill A. Shutemov }; 17497ae1749SKirill A. Shutemov 17571e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 17671e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 17771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 17871e3aac0SAndrea Arcangeli { 179bfb0ffebSJoe Perches const char *output; 180bfb0ffebSJoe Perches 181444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 182bfb0ffebSJoe Perches output = "[always] madvise never"; 183bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 184bfb0ffebSJoe Perches &transparent_hugepage_flags)) 185bfb0ffebSJoe Perches output = "always [madvise] never"; 186444eb2a4SMel Gorman else 187bfb0ffebSJoe Perches output = "always madvise [never]"; 188bfb0ffebSJoe Perches 189bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 19071e3aac0SAndrea Arcangeli } 191444eb2a4SMel Gorman 19271e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 19371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 19471e3aac0SAndrea Arcangeli const char *buf, size_t count) 19571e3aac0SAndrea Arcangeli { 19621440d7eSDavid Rientjes ssize_t ret = count; 197ba76149fSAndrea Arcangeli 198f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 19921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 20021440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 201f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 20221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 20321440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 204f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 20521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 20621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 20721440d7eSDavid Rientjes } else 20821440d7eSDavid Rientjes ret = -EINVAL; 209ba76149fSAndrea Arcangeli 210ba76149fSAndrea Arcangeli if (ret > 0) { 211b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 212ba76149fSAndrea Arcangeli if (err) 213ba76149fSAndrea Arcangeli ret = err; 214ba76149fSAndrea Arcangeli } 215ba76149fSAndrea Arcangeli return ret; 21671e3aac0SAndrea Arcangeli } 21771e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 21871e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 21971e3aac0SAndrea Arcangeli 220b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 22171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 22271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 22371e3aac0SAndrea Arcangeli { 224bfb0ffebSJoe Perches return sysfs_emit(buf, "%d\n", 225e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 22671e3aac0SAndrea Arcangeli } 227e27e6151SBen Hutchings 228b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 22971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 23071e3aac0SAndrea Arcangeli const char *buf, size_t count, 23171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 23271e3aac0SAndrea Arcangeli { 233e27e6151SBen Hutchings unsigned long value; 234e27e6151SBen Hutchings int ret; 235e27e6151SBen Hutchings 236e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 237e27e6151SBen Hutchings if (ret < 0) 238e27e6151SBen Hutchings return ret; 239e27e6151SBen Hutchings if (value > 1) 24071e3aac0SAndrea Arcangeli return -EINVAL; 24171e3aac0SAndrea Arcangeli 242e27e6151SBen Hutchings if (value) 243e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 244e27e6151SBen Hutchings else 245e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 246e27e6151SBen Hutchings 24771e3aac0SAndrea Arcangeli return count; 24871e3aac0SAndrea Arcangeli } 24971e3aac0SAndrea Arcangeli 25071e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 25171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 25271e3aac0SAndrea Arcangeli { 253bfb0ffebSJoe Perches const char *output; 254bfb0ffebSJoe Perches 255bfb0ffebSJoe Perches if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 256bfb0ffebSJoe Perches &transparent_hugepage_flags)) 257bfb0ffebSJoe Perches output = "[always] defer defer+madvise madvise never"; 258bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 259bfb0ffebSJoe Perches &transparent_hugepage_flags)) 260bfb0ffebSJoe Perches output = "always [defer] defer+madvise madvise never"; 261bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 262bfb0ffebSJoe Perches &transparent_hugepage_flags)) 263bfb0ffebSJoe Perches output = "always defer [defer+madvise] madvise never"; 264bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 265bfb0ffebSJoe Perches &transparent_hugepage_flags)) 266bfb0ffebSJoe Perches output = "always defer defer+madvise [madvise] never"; 267bfb0ffebSJoe Perches else 268bfb0ffebSJoe Perches output = "always defer defer+madvise madvise [never]"; 269bfb0ffebSJoe Perches 270bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 27171e3aac0SAndrea Arcangeli } 27221440d7eSDavid Rientjes 27371e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 27471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 27571e3aac0SAndrea Arcangeli const char *buf, size_t count) 27671e3aac0SAndrea Arcangeli { 277f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 27821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 27921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 28021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 28121440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 282f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 28321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 28421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 28521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 28621440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 287f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 2884fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 2894fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 2904fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 2914fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 292f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 29321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 29421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 29521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 29621440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 297f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 29821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 29921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 30021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 30121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 30221440d7eSDavid Rientjes } else 30321440d7eSDavid Rientjes return -EINVAL; 30421440d7eSDavid Rientjes 30521440d7eSDavid Rientjes return count; 30671e3aac0SAndrea Arcangeli } 30771e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 30871e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 30971e3aac0SAndrea Arcangeli 31079da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 31179da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 31279da5407SKirill A. Shutemov { 313b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 31479da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 31579da5407SKirill A. Shutemov } 31679da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 31779da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 31879da5407SKirill A. Shutemov { 319b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 32079da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 32179da5407SKirill A. Shutemov } 32279da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 32379da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 32449920d28SHugh Dickins 32549920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 32649920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 32749920d28SHugh Dickins { 328ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 32949920d28SHugh Dickins } 33049920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 33149920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 33249920d28SHugh Dickins 33371e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 33471e3aac0SAndrea Arcangeli &enabled_attr.attr, 33571e3aac0SAndrea Arcangeli &defrag_attr.attr, 33679da5407SKirill A. Shutemov &use_zero_page_attr.attr, 33749920d28SHugh Dickins &hpage_pmd_size_attr.attr, 338396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 3395a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 3405a6e75f8SKirill A. Shutemov #endif 34171e3aac0SAndrea Arcangeli NULL, 34271e3aac0SAndrea Arcangeli }; 34371e3aac0SAndrea Arcangeli 3448aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 34571e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 346ba76149fSAndrea Arcangeli }; 347ba76149fSAndrea Arcangeli 348569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 349569e5590SShaohua Li { 350569e5590SShaohua Li int err; 351569e5590SShaohua Li 352569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 353569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 354ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 355569e5590SShaohua Li return -ENOMEM; 356569e5590SShaohua Li } 357569e5590SShaohua Li 358569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 359569e5590SShaohua Li if (err) { 360ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 361569e5590SShaohua Li goto delete_obj; 362569e5590SShaohua Li } 363569e5590SShaohua Li 364569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 365569e5590SShaohua Li if (err) { 366ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 367569e5590SShaohua Li goto remove_hp_group; 368569e5590SShaohua Li } 369569e5590SShaohua Li 370569e5590SShaohua Li return 0; 371569e5590SShaohua Li 372569e5590SShaohua Li remove_hp_group: 373569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 374569e5590SShaohua Li delete_obj: 375569e5590SShaohua Li kobject_put(*hugepage_kobj); 376569e5590SShaohua Li return err; 377569e5590SShaohua Li } 378569e5590SShaohua Li 379569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 380569e5590SShaohua Li { 381569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 382569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 383569e5590SShaohua Li kobject_put(hugepage_kobj); 384569e5590SShaohua Li } 385569e5590SShaohua Li #else 386569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 387569e5590SShaohua Li { 388569e5590SShaohua Li return 0; 389569e5590SShaohua Li } 390569e5590SShaohua Li 391569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 392569e5590SShaohua Li { 393569e5590SShaohua Li } 39471e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 39571e3aac0SAndrea Arcangeli 39671e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 39771e3aac0SAndrea Arcangeli { 39871e3aac0SAndrea Arcangeli int err; 399569e5590SShaohua Li struct kobject *hugepage_kobj; 40071e3aac0SAndrea Arcangeli 4014b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 402bae84953SAneesh Kumar K.V /* 403bae84953SAneesh Kumar K.V * Hardware doesn't support hugepages, hence disable 404bae84953SAneesh Kumar K.V * DAX PMD support. 405bae84953SAneesh Kumar K.V */ 406bae84953SAneesh Kumar K.V transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX; 407569e5590SShaohua Li return -EINVAL; 4084b7167b9SAndrea Arcangeli } 4094b7167b9SAndrea Arcangeli 410ff20c2e0SKirill A. Shutemov /* 411ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 412ff20c2e0SKirill A. Shutemov */ 413ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 414ff20c2e0SKirill A. Shutemov /* 415ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 416ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 417ff20c2e0SKirill A. Shutemov */ 418ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 419ff20c2e0SKirill A. Shutemov 420569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 421569e5590SShaohua Li if (err) 42265ebb64fSKirill A. Shutemov goto err_sysfs; 423ba76149fSAndrea Arcangeli 424b46e756fSKirill A. Shutemov err = khugepaged_init(); 425ba76149fSAndrea Arcangeli if (err) 42665ebb64fSKirill A. Shutemov goto err_slab; 427ba76149fSAndrea Arcangeli 42865ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 42965ebb64fSKirill A. Shutemov if (err) 43065ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 4319a982250SKirill A. Shutemov err = register_shrinker(&deferred_split_shrinker); 4329a982250SKirill A. Shutemov if (err) 4339a982250SKirill A. Shutemov goto err_split_shrinker; 43497ae1749SKirill A. Shutemov 43597562cd2SRik van Riel /* 43697562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 43797562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 43897562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 43997562cd2SRik van Riel */ 440ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 44197562cd2SRik van Riel transparent_hugepage_flags = 0; 44279553da2SKirill A. Shutemov return 0; 44379553da2SKirill A. Shutemov } 44497562cd2SRik van Riel 44579553da2SKirill A. Shutemov err = start_stop_khugepaged(); 44665ebb64fSKirill A. Shutemov if (err) 44765ebb64fSKirill A. Shutemov goto err_khugepaged; 448ba76149fSAndrea Arcangeli 449569e5590SShaohua Li return 0; 45065ebb64fSKirill A. Shutemov err_khugepaged: 4519a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 4529a982250SKirill A. Shutemov err_split_shrinker: 45365ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 45465ebb64fSKirill A. Shutemov err_hzp_shrinker: 455b46e756fSKirill A. Shutemov khugepaged_destroy(); 45665ebb64fSKirill A. Shutemov err_slab: 457569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 45865ebb64fSKirill A. Shutemov err_sysfs: 459ba76149fSAndrea Arcangeli return err; 46071e3aac0SAndrea Arcangeli } 461a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 46271e3aac0SAndrea Arcangeli 46371e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 46471e3aac0SAndrea Arcangeli { 46571e3aac0SAndrea Arcangeli int ret = 0; 46671e3aac0SAndrea Arcangeli if (!str) 46771e3aac0SAndrea Arcangeli goto out; 46871e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 46971e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 47071e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 47271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47371e3aac0SAndrea Arcangeli ret = 1; 47471e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 47571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 47671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47771e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 47871e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 47971e3aac0SAndrea Arcangeli ret = 1; 48071e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 48171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 48271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 48371e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 48471e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 48571e3aac0SAndrea Arcangeli ret = 1; 48671e3aac0SAndrea Arcangeli } 48771e3aac0SAndrea Arcangeli out: 48871e3aac0SAndrea Arcangeli if (!ret) 489ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 49071e3aac0SAndrea Arcangeli return ret; 49171e3aac0SAndrea Arcangeli } 49271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 49371e3aac0SAndrea Arcangeli 494f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 49571e3aac0SAndrea Arcangeli { 496f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 49771e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 49871e3aac0SAndrea Arcangeli return pmd; 49971e3aac0SAndrea Arcangeli } 50071e3aac0SAndrea Arcangeli 50187eaceb3SYang Shi #ifdef CONFIG_MEMCG 50287eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 5039a982250SKirill A. Shutemov { 504bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 50587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 50687eaceb3SYang Shi 50787eaceb3SYang Shi if (memcg) 50887eaceb3SYang Shi return &memcg->deferred_split_queue; 50987eaceb3SYang Shi else 51087eaceb3SYang Shi return &pgdat->deferred_split_queue; 5119a982250SKirill A. Shutemov } 51287eaceb3SYang Shi #else 51387eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 51487eaceb3SYang Shi { 51587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 51687eaceb3SYang Shi 51787eaceb3SYang Shi return &pgdat->deferred_split_queue; 51887eaceb3SYang Shi } 51987eaceb3SYang Shi #endif 5209a982250SKirill A. Shutemov 5219a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5229a982250SKirill A. Shutemov { 5239a982250SKirill A. Shutemov /* 5249a982250SKirill A. Shutemov * we use page->mapping and page->indexlru in second tail page 5259a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 5269a982250SKirill A. Shutemov */ 5279a982250SKirill A. Shutemov 5289a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 5299a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 5309a982250SKirill A. Shutemov } 5319a982250SKirill A. Shutemov 532005ba37cSSean Christopherson bool is_transparent_hugepage(struct page *page) 533005ba37cSSean Christopherson { 534005ba37cSSean Christopherson if (!PageCompound(page)) 535fa1f68ccSZou Wei return false; 536005ba37cSSean Christopherson 537005ba37cSSean Christopherson page = compound_head(page); 538005ba37cSSean Christopherson return is_huge_zero_page(page) || 539005ba37cSSean Christopherson page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; 540005ba37cSSean Christopherson } 541005ba37cSSean Christopherson EXPORT_SYMBOL_GPL(is_transparent_hugepage); 542005ba37cSSean Christopherson 54397d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 54497d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 54574d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 54674d2fad1SToshi Kani { 54774d2fad1SToshi Kani loff_t off_end = off + len; 54874d2fad1SToshi Kani loff_t off_align = round_up(off, size); 54997d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 55074d2fad1SToshi Kani 55174d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 55274d2fad1SToshi Kani return 0; 55374d2fad1SToshi Kani 55474d2fad1SToshi Kani len_pad = len + size; 55574d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 55674d2fad1SToshi Kani return 0; 55774d2fad1SToshi Kani 55897d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 55974d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 56097d3d0f9SKirill A. Shutemov 56197d3d0f9SKirill A. Shutemov /* 56297d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 56397d3d0f9SKirill A. Shutemov * without the padding. 56497d3d0f9SKirill A. Shutemov */ 56597d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 56674d2fad1SToshi Kani return 0; 56774d2fad1SToshi Kani 56897d3d0f9SKirill A. Shutemov /* 56997d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 57097d3d0f9SKirill A. Shutemov * hint succeeds. 57197d3d0f9SKirill A. Shutemov */ 57297d3d0f9SKirill A. Shutemov if (ret == addr) 57374d2fad1SToshi Kani return addr; 57497d3d0f9SKirill A. Shutemov 57597d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 57697d3d0f9SKirill A. Shutemov return ret; 57774d2fad1SToshi Kani } 57874d2fad1SToshi Kani 57974d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 58074d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 58174d2fad1SToshi Kani { 58297d3d0f9SKirill A. Shutemov unsigned long ret; 58374d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 58474d2fad1SToshi Kani 58574d2fad1SToshi Kani if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 58674d2fad1SToshi Kani goto out; 58774d2fad1SToshi Kani 58897d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 58997d3d0f9SKirill A. Shutemov if (ret) 59097d3d0f9SKirill A. Shutemov return ret; 59174d2fad1SToshi Kani out: 59274d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 59374d2fad1SToshi Kani } 59474d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 59574d2fad1SToshi Kani 5962b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 5972b740303SSouptick Joarder struct page *page, gfp_t gfp) 59871e3aac0SAndrea Arcangeli { 59982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 60071e3aac0SAndrea Arcangeli pgtable_t pgtable; 60182b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 6022b740303SSouptick Joarder vm_fault_t ret = 0; 60371e3aac0SAndrea Arcangeli 604309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 60500501b53SJohannes Weiner 606d9eb1ea2SJohannes Weiner if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { 6076b251fc9SAndrea Arcangeli put_page(page); 6086b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 60985b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6106b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6116b251fc9SAndrea Arcangeli } 6129d82c694SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 61371e3aac0SAndrea Arcangeli 6144cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 61500501b53SJohannes Weiner if (unlikely(!pgtable)) { 6166b31d595SMichal Hocko ret = VM_FAULT_OOM; 6176b31d595SMichal Hocko goto release; 61800501b53SJohannes Weiner } 61900501b53SJohannes Weiner 620c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 62152f37629SMinchan Kim /* 62252f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 62352f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 62452f37629SMinchan Kim * write. 62552f37629SMinchan Kim */ 62671e3aac0SAndrea Arcangeli __SetPageUptodate(page); 62771e3aac0SAndrea Arcangeli 62882b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 62982b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6306b31d595SMichal Hocko goto unlock_release; 63171e3aac0SAndrea Arcangeli } else { 63271e3aac0SAndrea Arcangeli pmd_t entry; 6336b251fc9SAndrea Arcangeli 6346b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6356b31d595SMichal Hocko if (ret) 6366b31d595SMichal Hocko goto unlock_release; 6376b31d595SMichal Hocko 6386b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6396b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 64082b0f8c3SJan Kara spin_unlock(vmf->ptl); 6416b251fc9SAndrea Arcangeli put_page(page); 642bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6438fd5eda4SMiaohe Lin ret = handle_userfault(vmf, VM_UFFD_MISSING); 6448fd5eda4SMiaohe Lin VM_BUG_ON(ret & VM_FAULT_FALLBACK); 6458fd5eda4SMiaohe Lin return ret; 6466b251fc9SAndrea Arcangeli } 6476b251fc9SAndrea Arcangeli 6483122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 649f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 650d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr, true); 651b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(page, vma); 65282b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 65382b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 654fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 655bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 656c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 65782b0f8c3SJan Kara spin_unlock(vmf->ptl); 6586b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 6599d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 66071e3aac0SAndrea Arcangeli } 66171e3aac0SAndrea Arcangeli 662aa2e878eSDavid Rientjes return 0; 6636b31d595SMichal Hocko unlock_release: 6646b31d595SMichal Hocko spin_unlock(vmf->ptl); 6656b31d595SMichal Hocko release: 6666b31d595SMichal Hocko if (pgtable) 6676b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 6686b31d595SMichal Hocko put_page(page); 6696b31d595SMichal Hocko return ret; 6706b31d595SMichal Hocko 67171e3aac0SAndrea Arcangeli } 67271e3aac0SAndrea Arcangeli 673444eb2a4SMel Gorman /* 67421440d7eSDavid Rientjes * always: directly stall for all thp allocations 67521440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 67621440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 67721440d7eSDavid Rientjes * fail if not immediately available 67821440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 67921440d7eSDavid Rientjes * available 68021440d7eSDavid Rientjes * never: never stall for any thp allocation 681444eb2a4SMel Gorman */ 682164cc4feSRik van Riel gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 6830bbbc0b3SAndrea Arcangeli { 684164cc4feSRik van Riel const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 68589c83fb5SMichal Hocko 686ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 68721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 688a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 689ac79f78dSDavid Rientjes 690ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 69121440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 69219deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 693ac79f78dSDavid Rientjes 694ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 69521440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 69619deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 69719deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 698ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 699ac79f78dSDavid Rientjes 700ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 70121440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 70219deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 70319deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 704ac79f78dSDavid Rientjes 70519deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 706444eb2a4SMel Gorman } 707444eb2a4SMel Gorman 708c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7092efeb8daSMiaohe Lin static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 71097ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7115918d10aSKirill A. Shutemov struct page *zero_page) 712fc9fe822SKirill A. Shutemov { 713fc9fe822SKirill A. Shutemov pmd_t entry; 7147c414164SAndrew Morton if (!pmd_none(*pmd)) 7152efeb8daSMiaohe Lin return; 7165918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 717fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 71812c9d70bSMatthew Wilcox if (pgtable) 7196b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 720fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 721c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 722fc9fe822SKirill A. Shutemov } 723fc9fe822SKirill A. Shutemov 7242b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 72571e3aac0SAndrea Arcangeli { 72682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 727077fcf11SAneesh Kumar K.V gfp_t gfp; 72871e3aac0SAndrea Arcangeli struct page *page; 72982b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 73071e3aac0SAndrea Arcangeli 73143675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 732c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 73371e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 73471e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7356d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 736ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 73782b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 738bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 73979da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 74080371957SKirill A. Shutemov pgtable_t pgtable; 7415918d10aSKirill A. Shutemov struct page *zero_page; 7422b740303SSouptick Joarder vm_fault_t ret; 7434cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 74480371957SKirill A. Shutemov if (unlikely(!pgtable)) 74580371957SKirill A. Shutemov return VM_FAULT_OOM; 7466fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 7475918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 748bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 74997ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 750c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 75197ae1749SKirill A. Shutemov } 75282b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 7536b251fc9SAndrea Arcangeli ret = 0; 75482b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 7556b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 7566b31d595SMichal Hocko if (ret) { 7576b31d595SMichal Hocko spin_unlock(vmf->ptl); 758bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 7596b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 76082b0f8c3SJan Kara spin_unlock(vmf->ptl); 761bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 76282b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 7636b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7646b251fc9SAndrea Arcangeli } else { 765bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 76682b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 767fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 76882b0f8c3SJan Kara spin_unlock(vmf->ptl); 7696b251fc9SAndrea Arcangeli } 770bfe8cc1dSGerald Schaefer } else { 77182b0f8c3SJan Kara spin_unlock(vmf->ptl); 772bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 773bfe8cc1dSGerald Schaefer } 7746b251fc9SAndrea Arcangeli return ret; 77580371957SKirill A. Shutemov } 776164cc4feSRik van Riel gfp = vma_thp_gfp_mask(vma); 77719deb769SDavid Rientjes page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 77881ab4201SAndi Kleen if (unlikely(!page)) { 77981ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 780c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 78181ab4201SAndi Kleen } 7829a982250SKirill A. Shutemov prep_transhuge_page(page); 78382b0f8c3SJan Kara return __do_huge_pmd_anonymous_page(vmf, page, gfp); 78471e3aac0SAndrea Arcangeli } 78571e3aac0SAndrea Arcangeli 786ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 7873b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 7883b6521f5SOliver O'Halloran pgtable_t pgtable) 7895cad465dSMatthew Wilcox { 7905cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 7915cad465dSMatthew Wilcox pmd_t entry; 7925cad465dSMatthew Wilcox spinlock_t *ptl; 7935cad465dSMatthew Wilcox 7945cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 795c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 796c6f3c5eeSAneesh Kumar K.V if (write) { 797c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 798c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 799c6f3c5eeSAneesh Kumar K.V goto out_unlock; 800c6f3c5eeSAneesh Kumar K.V } 801c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 802c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 803c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 804c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 805c6f3c5eeSAneesh Kumar K.V } 806c6f3c5eeSAneesh Kumar K.V 807c6f3c5eeSAneesh Kumar K.V goto out_unlock; 808c6f3c5eeSAneesh Kumar K.V } 809c6f3c5eeSAneesh Kumar K.V 810f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 811f25748e3SDan Williams if (pfn_t_devmap(pfn)) 812f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8135cad465dSMatthew Wilcox if (write) { 814f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 815f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8165cad465dSMatthew Wilcox } 8173b6521f5SOliver O'Halloran 8183b6521f5SOliver O'Halloran if (pgtable) { 8193b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 820c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 821c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8223b6521f5SOliver O'Halloran } 8233b6521f5SOliver O'Halloran 8245cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8255cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 826c6f3c5eeSAneesh Kumar K.V 827c6f3c5eeSAneesh Kumar K.V out_unlock: 8285cad465dSMatthew Wilcox spin_unlock(ptl); 829c6f3c5eeSAneesh Kumar K.V if (pgtable) 830c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8315cad465dSMatthew Wilcox } 8325cad465dSMatthew Wilcox 8339a9731b1SThomas Hellstrom (VMware) /** 8349a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 8359a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8369a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8379a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 8389a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8399a9731b1SThomas Hellstrom (VMware) * 8409a9731b1SThomas Hellstrom (VMware) * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 8419a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 8429a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 8439a9731b1SThomas Hellstrom (VMware) * 8449a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 8459a9731b1SThomas Hellstrom (VMware) */ 8469a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 8479a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 8485cad465dSMatthew Wilcox { 849fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 850fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 8513b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 852fce86ff5SDan Williams 8535cad465dSMatthew Wilcox /* 8545cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 8555cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 8565cad465dSMatthew Wilcox * can't support a 'special' bit. 8575cad465dSMatthew Wilcox */ 858e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 859e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 8605cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 8615cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 8625cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 8635cad465dSMatthew Wilcox 8645cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 8655cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 866308a047cSBorislav Petkov 8673b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 8684cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 8693b6521f5SOliver O'Halloran if (!pgtable) 8703b6521f5SOliver O'Halloran return VM_FAULT_OOM; 8713b6521f5SOliver O'Halloran } 8723b6521f5SOliver O'Halloran 873308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 874308a047cSBorislav Petkov 875fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 876ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 8775cad465dSMatthew Wilcox } 8789a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 8795cad465dSMatthew Wilcox 880a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 881f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 882a00cc7d9SMatthew Wilcox { 883f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 884a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 885a00cc7d9SMatthew Wilcox return pud; 886a00cc7d9SMatthew Wilcox } 887a00cc7d9SMatthew Wilcox 888a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 889a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 890a00cc7d9SMatthew Wilcox { 891a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 892a00cc7d9SMatthew Wilcox pud_t entry; 893a00cc7d9SMatthew Wilcox spinlock_t *ptl; 894a00cc7d9SMatthew Wilcox 895a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 896c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 897c6f3c5eeSAneesh Kumar K.V if (write) { 898c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 899c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 900c6f3c5eeSAneesh Kumar K.V goto out_unlock; 901c6f3c5eeSAneesh Kumar K.V } 902c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 903c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 904c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 905c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 906c6f3c5eeSAneesh Kumar K.V } 907c6f3c5eeSAneesh Kumar K.V goto out_unlock; 908c6f3c5eeSAneesh Kumar K.V } 909c6f3c5eeSAneesh Kumar K.V 910a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 911a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 912a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 913a00cc7d9SMatthew Wilcox if (write) { 914f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 915f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 916a00cc7d9SMatthew Wilcox } 917a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 918a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 919c6f3c5eeSAneesh Kumar K.V 920c6f3c5eeSAneesh Kumar K.V out_unlock: 921a00cc7d9SMatthew Wilcox spin_unlock(ptl); 922a00cc7d9SMatthew Wilcox } 923a00cc7d9SMatthew Wilcox 9249a9731b1SThomas Hellstrom (VMware) /** 9259a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pud_prot - insert a pud size pfn 9269a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9279a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9289a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 9299a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9309a9731b1SThomas Hellstrom (VMware) * 9319a9731b1SThomas Hellstrom (VMware) * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 9329a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 9339a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9349a9731b1SThomas Hellstrom (VMware) * 9359a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9369a9731b1SThomas Hellstrom (VMware) */ 9379a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 9389a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 939a00cc7d9SMatthew Wilcox { 940fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 941fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 942fce86ff5SDan Williams 943a00cc7d9SMatthew Wilcox /* 944a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 945a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 946a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 947a00cc7d9SMatthew Wilcox */ 94862ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 94962ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 950a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 951a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 952a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 953a00cc7d9SMatthew Wilcox 954a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 955a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 956a00cc7d9SMatthew Wilcox 957a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 958a00cc7d9SMatthew Wilcox 959fce86ff5SDan Williams insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 960a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 961a00cc7d9SMatthew Wilcox } 9629a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 963a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 964a00cc7d9SMatthew Wilcox 9653565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 966a8f97366SKirill A. Shutemov pmd_t *pmd, int flags) 9673565fce3SDan Williams { 9683565fce3SDan Williams pmd_t _pmd; 9693565fce3SDan Williams 970a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 971a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 972a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 9733565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 974a8f97366SKirill A. Shutemov pmd, _pmd, flags & FOLL_WRITE)) 9753565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 9763565fce3SDan Williams } 9773565fce3SDan Williams 9783565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 979df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 9803565fce3SDan Williams { 9813565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 9823565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 9833565fce3SDan Williams struct page *page; 9843565fce3SDan Williams 9853565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 9863565fce3SDan Williams 9878310d48bSKeno Fischer /* 9888310d48bSKeno Fischer * When we COW a devmap PMD entry, we split it into PTEs, so we should 9898310d48bSKeno Fischer * not be in this function with `flags & FOLL_COW` set. 9908310d48bSKeno Fischer */ 9918310d48bSKeno Fischer WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 9928310d48bSKeno Fischer 9933faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 9943faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 9953faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 9963faa52c0SJohn Hubbard return NULL; 9973faa52c0SJohn Hubbard 998f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 9993565fce3SDan Williams return NULL; 10003565fce3SDan Williams 10013565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 10023565fce3SDan Williams /* pass */; 10033565fce3SDan Williams else 10043565fce3SDan Williams return NULL; 10053565fce3SDan Williams 10063565fce3SDan Williams if (flags & FOLL_TOUCH) 1007a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 10083565fce3SDan Williams 10093565fce3SDan Williams /* 10103565fce3SDan Williams * device mapped pages can only be returned if the 10113565fce3SDan Williams * caller will manage the page reference count. 10123565fce3SDan Williams */ 10133faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10143565fce3SDan Williams return ERR_PTR(-EEXIST); 10153565fce3SDan Williams 10163565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1017df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1018df06b37fSKeith Busch if (!*pgmap) 10193565fce3SDan Williams return ERR_PTR(-EFAULT); 10203565fce3SDan Williams page = pfn_to_page(pfn); 10213faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 10223faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 10233565fce3SDan Williams 10243565fce3SDan Williams return page; 10253565fce3SDan Williams } 10263565fce3SDan Williams 102771e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 102871e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10298f34f1eaSPeter Xu struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 103071e3aac0SAndrea Arcangeli { 1031c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 103271e3aac0SAndrea Arcangeli struct page *src_page; 103371e3aac0SAndrea Arcangeli pmd_t pmd; 103412c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1035628d47ceSKirill A. Shutemov int ret = -ENOMEM; 103671e3aac0SAndrea Arcangeli 1037628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 10388f34f1eaSPeter Xu if (!vma_is_anonymous(dst_vma)) 1039628d47ceSKirill A. Shutemov return 0; 1040628d47ceSKirill A. Shutemov 10414cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 104271e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 104371e3aac0SAndrea Arcangeli goto out; 104471e3aac0SAndrea Arcangeli 1045c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1046c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1047c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 104871e3aac0SAndrea Arcangeli 104971e3aac0SAndrea Arcangeli ret = -EAGAIN; 105071e3aac0SAndrea Arcangeli pmd = *src_pmd; 105184c3fc4eSZi Yan 105284c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 105384c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 105484c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 105584c3fc4eSZi Yan 105684c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 10574dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 10584dd845b5SAlistair Popple entry = make_readable_migration_entry( 10594dd845b5SAlistair Popple swp_offset(entry)); 106084c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1061ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1062ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 10638f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*src_pmd)) 10648f34f1eaSPeter Xu pmd = pmd_swp_mkuffd_wp(pmd); 106584c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 106684c3fc4eSZi Yan } 1067dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1068af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1069dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 10708f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 10718f34f1eaSPeter Xu pmd = pmd_swp_clear_uffd_wp(pmd); 107284c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 107384c3fc4eSZi Yan ret = 0; 107484c3fc4eSZi Yan goto out_unlock; 107584c3fc4eSZi Yan } 107684c3fc4eSZi Yan #endif 107784c3fc4eSZi Yan 1078628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 107971e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 108071e3aac0SAndrea Arcangeli goto out_unlock; 108171e3aac0SAndrea Arcangeli } 1082fc9fe822SKirill A. Shutemov /* 1083c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1084fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1085fc9fe822SKirill A. Shutemov * a page table. 1086fc9fe822SKirill A. Shutemov */ 1087fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 108897ae1749SKirill A. Shutemov /* 108997ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 109097ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 109197ae1749SKirill A. Shutemov * reference. 109297ae1749SKirill A. Shutemov */ 10935fc7a5f6SPeter Xu mm_get_huge_zero_page(dst_mm); 10945fc7a5f6SPeter Xu goto out_zero_page; 1095fc9fe822SKirill A. Shutemov } 1096de466bd6SMel Gorman 109771e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1098309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1099d042035eSPeter Xu 1100d042035eSPeter Xu /* 1101d042035eSPeter Xu * If this page is a potentially pinned page, split and retry the fault 1102d042035eSPeter Xu * with smaller page size. Normally this should not happen because the 1103d042035eSPeter Xu * userspace should use MADV_DONTFORK upon pinned regions. This is a 1104d042035eSPeter Xu * best effort that the pinned pages won't be replaced by another 1105d042035eSPeter Xu * random page during the coming copy-on-write. 1106d042035eSPeter Xu */ 11078f34f1eaSPeter Xu if (unlikely(page_needs_cow_for_dma(src_vma, src_page))) { 1108d042035eSPeter Xu pte_free(dst_mm, pgtable); 1109d042035eSPeter Xu spin_unlock(src_ptl); 1110d042035eSPeter Xu spin_unlock(dst_ptl); 11118f34f1eaSPeter Xu __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1112d042035eSPeter Xu return -EAGAIN; 1113d042035eSPeter Xu } 1114d042035eSPeter Xu 111571e3aac0SAndrea Arcangeli get_page(src_page); 111653f9263bSKirill A. Shutemov page_dup_rmap(src_page, true); 111771e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 11185fc7a5f6SPeter Xu out_zero_page: 1119c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11205c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 112171e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 11228f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11238f34f1eaSPeter Xu pmd = pmd_clear_uffd_wp(pmd); 112471e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 112571e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 112671e3aac0SAndrea Arcangeli 112771e3aac0SAndrea Arcangeli ret = 0; 112871e3aac0SAndrea Arcangeli out_unlock: 1129c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1130c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 113171e3aac0SAndrea Arcangeli out: 113271e3aac0SAndrea Arcangeli return ret; 113371e3aac0SAndrea Arcangeli } 113471e3aac0SAndrea Arcangeli 1135a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1136a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1137a8f97366SKirill A. Shutemov pud_t *pud, int flags) 1138a00cc7d9SMatthew Wilcox { 1139a00cc7d9SMatthew Wilcox pud_t _pud; 1140a00cc7d9SMatthew Wilcox 1141a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 1142a8f97366SKirill A. Shutemov if (flags & FOLL_WRITE) 1143a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1144a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1145a8f97366SKirill A. Shutemov pud, _pud, flags & FOLL_WRITE)) 1146a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1147a00cc7d9SMatthew Wilcox } 1148a00cc7d9SMatthew Wilcox 1149a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1150df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1151a00cc7d9SMatthew Wilcox { 1152a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1153a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1154a00cc7d9SMatthew Wilcox struct page *page; 1155a00cc7d9SMatthew Wilcox 1156a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1157a00cc7d9SMatthew Wilcox 1158f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1159a00cc7d9SMatthew Wilcox return NULL; 1160a00cc7d9SMatthew Wilcox 11613faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 11623faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 11633faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 11643faa52c0SJohn Hubbard return NULL; 11653faa52c0SJohn Hubbard 1166a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1167a00cc7d9SMatthew Wilcox /* pass */; 1168a00cc7d9SMatthew Wilcox else 1169a00cc7d9SMatthew Wilcox return NULL; 1170a00cc7d9SMatthew Wilcox 1171a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 1172a8f97366SKirill A. Shutemov touch_pud(vma, addr, pud, flags); 1173a00cc7d9SMatthew Wilcox 1174a00cc7d9SMatthew Wilcox /* 1175a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1176a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 11773faa52c0SJohn Hubbard * 11783faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1179a00cc7d9SMatthew Wilcox */ 11803faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1181a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1182a00cc7d9SMatthew Wilcox 1183a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1184df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1185df06b37fSKeith Busch if (!*pgmap) 1186a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1187a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 11883faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 11893faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 1190a00cc7d9SMatthew Wilcox 1191a00cc7d9SMatthew Wilcox return page; 1192a00cc7d9SMatthew Wilcox } 1193a00cc7d9SMatthew Wilcox 1194a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1195a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1196a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1197a00cc7d9SMatthew Wilcox { 1198a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1199a00cc7d9SMatthew Wilcox pud_t pud; 1200a00cc7d9SMatthew Wilcox int ret; 1201a00cc7d9SMatthew Wilcox 1202a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1203a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1204a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1205a00cc7d9SMatthew Wilcox 1206a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1207a00cc7d9SMatthew Wilcox pud = *src_pud; 1208a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1209a00cc7d9SMatthew Wilcox goto out_unlock; 1210a00cc7d9SMatthew Wilcox 1211a00cc7d9SMatthew Wilcox /* 1212a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1213a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1214a00cc7d9SMatthew Wilcox * a page table. 1215a00cc7d9SMatthew Wilcox */ 1216a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1217a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1218a00cc7d9SMatthew Wilcox } 1219a00cc7d9SMatthew Wilcox 1220d042035eSPeter Xu /* Please refer to comments in copy_huge_pmd() */ 122197a7e473SPeter Xu if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) { 1222d042035eSPeter Xu spin_unlock(src_ptl); 1223d042035eSPeter Xu spin_unlock(dst_ptl); 1224d042035eSPeter Xu __split_huge_pud(vma, src_pud, addr); 1225d042035eSPeter Xu return -EAGAIN; 1226d042035eSPeter Xu } 1227d042035eSPeter Xu 1228a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1229a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1230a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1231a00cc7d9SMatthew Wilcox 1232a00cc7d9SMatthew Wilcox ret = 0; 1233a00cc7d9SMatthew Wilcox out_unlock: 1234a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1235a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1236a00cc7d9SMatthew Wilcox return ret; 1237a00cc7d9SMatthew Wilcox } 1238a00cc7d9SMatthew Wilcox 1239a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1240a00cc7d9SMatthew Wilcox { 1241a00cc7d9SMatthew Wilcox pud_t entry; 1242a00cc7d9SMatthew Wilcox unsigned long haddr; 1243a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1244a00cc7d9SMatthew Wilcox 1245a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1246a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1247a00cc7d9SMatthew Wilcox goto unlock; 1248a00cc7d9SMatthew Wilcox 1249a00cc7d9SMatthew Wilcox entry = pud_mkyoung(orig_pud); 1250a00cc7d9SMatthew Wilcox if (write) 1251a00cc7d9SMatthew Wilcox entry = pud_mkdirty(entry); 1252a00cc7d9SMatthew Wilcox haddr = vmf->address & HPAGE_PUD_MASK; 1253a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1254a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1255a00cc7d9SMatthew Wilcox 1256a00cc7d9SMatthew Wilcox unlock: 1257a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1258a00cc7d9SMatthew Wilcox } 1259a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1260a00cc7d9SMatthew Wilcox 12615db4f15cSYang Shi void huge_pmd_set_accessed(struct vm_fault *vmf) 1262a1dd450bSWill Deacon { 1263a1dd450bSWill Deacon pmd_t entry; 1264a1dd450bSWill Deacon unsigned long haddr; 126520f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 12665db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 1267a1dd450bSWill Deacon 126882b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 126982b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1270a1dd450bSWill Deacon goto unlock; 1271a1dd450bSWill Deacon 1272a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 127320f664aaSMinchan Kim if (write) 127420f664aaSMinchan Kim entry = pmd_mkdirty(entry); 127582b0f8c3SJan Kara haddr = vmf->address & HPAGE_PMD_MASK; 127620f664aaSMinchan Kim if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 127782b0f8c3SJan Kara update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1278a1dd450bSWill Deacon 1279a1dd450bSWill Deacon unlock: 128082b0f8c3SJan Kara spin_unlock(vmf->ptl); 1281a1dd450bSWill Deacon } 1282a1dd450bSWill Deacon 12835db4f15cSYang Shi vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 128471e3aac0SAndrea Arcangeli { 128582b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 12863917c802SKirill A. Shutemov struct page *page; 128782b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 12885db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 128971e3aac0SAndrea Arcangeli 129082b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 129181d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 12923917c802SKirill A. Shutemov 129393b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 12943917c802SKirill A. Shutemov goto fallback; 12953917c802SKirill A. Shutemov 129682b0f8c3SJan Kara spin_lock(vmf->ptl); 12973917c802SKirill A. Shutemov 12983917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 12993917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13003917c802SKirill A. Shutemov return 0; 13013917c802SKirill A. Shutemov } 130271e3aac0SAndrea Arcangeli 130371e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1304f6004e73SMiaohe Lin VM_BUG_ON_PAGE(!PageHead(page), page); 13053917c802SKirill A. Shutemov 13063917c802SKirill A. Shutemov /* Lock page for reuse_swap_page() */ 1307ba3c4ce6SHuang Ying if (!trylock_page(page)) { 1308ba3c4ce6SHuang Ying get_page(page); 1309ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 1310ba3c4ce6SHuang Ying lock_page(page); 1311ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1312ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13133917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1314ba3c4ce6SHuang Ying unlock_page(page); 1315ba3c4ce6SHuang Ying put_page(page); 13163917c802SKirill A. Shutemov return 0; 1317ba3c4ce6SHuang Ying } 1318ba3c4ce6SHuang Ying put_page(page); 1319ba3c4ce6SHuang Ying } 13203917c802SKirill A. Shutemov 13213917c802SKirill A. Shutemov /* 13223917c802SKirill A. Shutemov * We can only reuse the page if nobody else maps the huge page or it's 13233917c802SKirill A. Shutemov * part. 13243917c802SKirill A. Shutemov */ 1325ba3c4ce6SHuang Ying if (reuse_swap_page(page, NULL)) { 132671e3aac0SAndrea Arcangeli pmd_t entry; 132771e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1328f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 132982b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 133082b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1331ba3c4ce6SHuang Ying unlock_page(page); 13323917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13333917c802SKirill A. Shutemov return VM_FAULT_WRITE; 133471e3aac0SAndrea Arcangeli } 13353917c802SKirill A. Shutemov 1336ba3c4ce6SHuang Ying unlock_page(page); 133782b0f8c3SJan Kara spin_unlock(vmf->ptl); 13383917c802SKirill A. Shutemov fallback: 13393917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13403917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 134171e3aac0SAndrea Arcangeli } 134271e3aac0SAndrea Arcangeli 13438310d48bSKeno Fischer /* 1344a308c71bSPeter Xu * FOLL_FORCE can write to even unwritable pmd's, but only 1345a308c71bSPeter Xu * after we've gone through a COW cycle and they are dirty. 13468310d48bSKeno Fischer */ 13478310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 13488310d48bSKeno Fischer { 1349a308c71bSPeter Xu return pmd_write(pmd) || 1350a308c71bSPeter Xu ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 13518310d48bSKeno Fischer } 13528310d48bSKeno Fischer 1353b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 135471e3aac0SAndrea Arcangeli unsigned long addr, 135571e3aac0SAndrea Arcangeli pmd_t *pmd, 135671e3aac0SAndrea Arcangeli unsigned int flags) 135771e3aac0SAndrea Arcangeli { 1358b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 135971e3aac0SAndrea Arcangeli struct page *page = NULL; 136071e3aac0SAndrea Arcangeli 1361c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 136271e3aac0SAndrea Arcangeli 13638310d48bSKeno Fischer if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 136471e3aac0SAndrea Arcangeli goto out; 136571e3aac0SAndrea Arcangeli 136685facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 136785facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 136885facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 136985facf25SKirill A. Shutemov 13702b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 13718a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 13722b4847e7SMel Gorman goto out; 13732b4847e7SMel Gorman 137471e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1375ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 13763faa52c0SJohn Hubbard 13773faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 13783faa52c0SJohn Hubbard return ERR_PTR(-ENOMEM); 13793faa52c0SJohn Hubbard 13803565fce3SDan Williams if (flags & FOLL_TOUCH) 1381a8f97366SKirill A. Shutemov touch_pmd(vma, addr, pmd, flags); 13823faa52c0SJohn Hubbard 1383de60f5f1SEric B Munson if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1384e90309c9SKirill A. Shutemov /* 1385e90309c9SKirill A. Shutemov * We don't mlock() pte-mapped THPs. This way we can avoid 1386e90309c9SKirill A. Shutemov * leaking mlocked pages into non-VM_LOCKED VMAs. 1387e90309c9SKirill A. Shutemov * 13889a73f61bSKirill A. Shutemov * For anon THP: 13899a73f61bSKirill A. Shutemov * 1390e90309c9SKirill A. Shutemov * In most cases the pmd is the only mapping of the page as we 1391e90309c9SKirill A. Shutemov * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1392e90309c9SKirill A. Shutemov * writable private mappings in populate_vma_page_range(). 1393e90309c9SKirill A. Shutemov * 1394e90309c9SKirill A. Shutemov * The only scenario when we have the page shared here is if we 1395e90309c9SKirill A. Shutemov * mlocking read-only mapping shared over fork(). We skip 1396e90309c9SKirill A. Shutemov * mlocking such pages. 13979a73f61bSKirill A. Shutemov * 13989a73f61bSKirill A. Shutemov * For file THP: 13999a73f61bSKirill A. Shutemov * 14009a73f61bSKirill A. Shutemov * We can expect PageDoubleMap() to be stable under page lock: 14019a73f61bSKirill A. Shutemov * for file pages we set it in page_add_file_rmap(), which 14029a73f61bSKirill A. Shutemov * requires page to be locked. 1403e90309c9SKirill A. Shutemov */ 14049a73f61bSKirill A. Shutemov 14059a73f61bSKirill A. Shutemov if (PageAnon(page) && compound_mapcount(page) != 1) 14069a73f61bSKirill A. Shutemov goto skip_mlock; 14079a73f61bSKirill A. Shutemov if (PageDoubleMap(page) || !page->mapping) 14089a73f61bSKirill A. Shutemov goto skip_mlock; 14099a73f61bSKirill A. Shutemov if (!trylock_page(page)) 14109a73f61bSKirill A. Shutemov goto skip_mlock; 14119a73f61bSKirill A. Shutemov if (page->mapping && !PageDoubleMap(page)) 1412b676b293SDavid Rientjes mlock_vma_page(page); 1413b676b293SDavid Rientjes unlock_page(page); 1414b676b293SDavid Rientjes } 14159a73f61bSKirill A. Shutemov skip_mlock: 141671e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1417ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 141871e3aac0SAndrea Arcangeli 141971e3aac0SAndrea Arcangeli out: 142071e3aac0SAndrea Arcangeli return page; 142171e3aac0SAndrea Arcangeli } 142271e3aac0SAndrea Arcangeli 1423d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 14245db4f15cSYang Shi vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 1425d10e63f2SMel Gorman { 142682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1427c5b5a3ddSYang Shi pmd_t oldpmd = vmf->orig_pmd; 1428c5b5a3ddSYang Shi pmd_t pmd; 1429b32967ffSMel Gorman struct page *page; 143082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1431c5b5a3ddSYang Shi int page_nid = NUMA_NO_NODE; 143290572890SPeter Zijlstra int target_nid, last_cpupid = -1; 14338191acbdSMel Gorman bool migrated = false; 1434c5b5a3ddSYang Shi bool was_writable = pmd_savedwrite(oldpmd); 14356688cc05SPeter Zijlstra int flags = 0; 1436d10e63f2SMel Gorman 143782b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1438c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 143982b0f8c3SJan Kara spin_unlock(vmf->ptl); 1440de466bd6SMel Gorman goto out; 1441de466bd6SMel Gorman } 1442de466bd6SMel Gorman 1443a54a407fSMel Gorman /* 14448b1b436dSPeter Zijlstra * Since we took the NUMA fault, we must have observed the !accessible 14458b1b436dSPeter Zijlstra * bit. Make sure all other CPUs agree with that, to avoid them 14468b1b436dSPeter Zijlstra * modifying the page we're about to migrate. 14478b1b436dSPeter Zijlstra * 14488b1b436dSPeter Zijlstra * Must be done under PTL such that we'll observe the relevant 1449ccde85baSPeter Zijlstra * inc_tlb_flush_pending(). 1450ccde85baSPeter Zijlstra * 1451ccde85baSPeter Zijlstra * We are not sure a pending tlb flush here is for a huge page 1452ccde85baSPeter Zijlstra * mapping or not. Hence use the tlb range variant 14538b1b436dSPeter Zijlstra */ 14547066f0f9SAndrea Arcangeli if (mm_tlb_flush_pending(vma->vm_mm)) { 1455ccde85baSPeter Zijlstra flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 14567066f0f9SAndrea Arcangeli /* 14577066f0f9SAndrea Arcangeli * change_huge_pmd() released the pmd lock before 14587066f0f9SAndrea Arcangeli * invalidating the secondary MMUs sharing the primary 14597066f0f9SAndrea Arcangeli * MMU pagetables (with ->invalidate_range()). The 14607066f0f9SAndrea Arcangeli * mmu_notifier_invalidate_range_end() (which 14617066f0f9SAndrea Arcangeli * internally calls ->invalidate_range()) in 14627066f0f9SAndrea Arcangeli * change_pmd_range() will run after us, so we can't 14637066f0f9SAndrea Arcangeli * rely on it here and we need an explicit invalidate. 14647066f0f9SAndrea Arcangeli */ 14657066f0f9SAndrea Arcangeli mmu_notifier_invalidate_range(vma->vm_mm, haddr, 14667066f0f9SAndrea Arcangeli haddr + HPAGE_PMD_SIZE); 14677066f0f9SAndrea Arcangeli } 14688b1b436dSPeter Zijlstra 1469c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1470c5b5a3ddSYang Shi page = vm_normal_page_pmd(vma, haddr, pmd); 1471c5b5a3ddSYang Shi if (!page) 1472c5b5a3ddSYang Shi goto out_map; 1473c5b5a3ddSYang Shi 1474c5b5a3ddSYang Shi /* See similar comment in do_numa_page for explanation */ 1475c5b5a3ddSYang Shi if (!was_writable) 1476c5b5a3ddSYang Shi flags |= TNF_NO_GROUP; 1477c5b5a3ddSYang Shi 1478c5b5a3ddSYang Shi page_nid = page_to_nid(page); 1479c5b5a3ddSYang Shi last_cpupid = page_cpupid_last(page); 1480c5b5a3ddSYang Shi target_nid = numa_migrate_prep(page, vma, haddr, page_nid, 1481c5b5a3ddSYang Shi &flags); 1482c5b5a3ddSYang Shi 1483c5b5a3ddSYang Shi if (target_nid == NUMA_NO_NODE) { 1484c5b5a3ddSYang Shi put_page(page); 1485c5b5a3ddSYang Shi goto out_map; 1486c5b5a3ddSYang Shi } 1487c5b5a3ddSYang Shi 148882b0f8c3SJan Kara spin_unlock(vmf->ptl); 14898b1b436dSPeter Zijlstra 1490c5b5a3ddSYang Shi migrated = migrate_misplaced_page(page, vma, target_nid); 14916688cc05SPeter Zijlstra if (migrated) { 14926688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 14938191acbdSMel Gorman page_nid = target_nid; 1494c5b5a3ddSYang Shi } else { 1495074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1496c5b5a3ddSYang Shi vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1497c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 149882b0f8c3SJan Kara spin_unlock(vmf->ptl); 1499c5b5a3ddSYang Shi goto out; 1500c5b5a3ddSYang Shi } 1501c5b5a3ddSYang Shi goto out_map; 1502c5b5a3ddSYang Shi } 1503b8916634SMel Gorman 1504b8916634SMel Gorman out: 150598fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 150682b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15079a8b300fSAneesh Kumar K.V flags); 15088191acbdSMel Gorman 1509d10e63f2SMel Gorman return 0; 1510c5b5a3ddSYang Shi 1511c5b5a3ddSYang Shi out_map: 1512c5b5a3ddSYang Shi /* Restore the PMD */ 1513c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1514c5b5a3ddSYang Shi pmd = pmd_mkyoung(pmd); 1515c5b5a3ddSYang Shi if (was_writable) 1516c5b5a3ddSYang Shi pmd = pmd_mkwrite(pmd); 1517c5b5a3ddSYang Shi set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1518c5b5a3ddSYang Shi update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1519c5b5a3ddSYang Shi spin_unlock(vmf->ptl); 1520c5b5a3ddSYang Shi goto out; 1521d10e63f2SMel Gorman } 1522d10e63f2SMel Gorman 1523319904adSHuang Ying /* 1524319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1525319904adSHuang Ying * Otherwise, return false. 1526319904adSHuang Ying */ 1527319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1528b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1529b8d3c4c3SMinchan Kim { 1530b8d3c4c3SMinchan Kim spinlock_t *ptl; 1531b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1532b8d3c4c3SMinchan Kim struct page *page; 1533b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1534319904adSHuang Ying bool ret = false; 1535b8d3c4c3SMinchan Kim 1536ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 153707e32661SAneesh Kumar K.V 1538b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1539b6ec57f4SKirill A. Shutemov if (!ptl) 154025eedabeSLinus Torvalds goto out_unlocked; 1541b8d3c4c3SMinchan Kim 1542b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1543319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1544b8d3c4c3SMinchan Kim goto out; 1545b8d3c4c3SMinchan Kim 154684c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 154784c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 154884c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 154984c3fc4eSZi Yan goto out; 155084c3fc4eSZi Yan } 155184c3fc4eSZi Yan 1552b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1553b8d3c4c3SMinchan Kim /* 1554b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1555b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1556b8d3c4c3SMinchan Kim */ 1557babbbdd0SMiaohe Lin if (total_mapcount(page) != 1) 1558b8d3c4c3SMinchan Kim goto out; 1559b8d3c4c3SMinchan Kim 1560b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1561b8d3c4c3SMinchan Kim goto out; 1562b8d3c4c3SMinchan Kim 1563b8d3c4c3SMinchan Kim /* 1564b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1565b8d3c4c3SMinchan Kim * will deactivate only them. 1566b8d3c4c3SMinchan Kim */ 1567b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1568b8d3c4c3SMinchan Kim get_page(page); 1569b8d3c4c3SMinchan Kim spin_unlock(ptl); 15709818b8cdSHuang Ying split_huge_page(page); 1571b8d3c4c3SMinchan Kim unlock_page(page); 1572bbf29ffcSKirill A. Shutemov put_page(page); 1573b8d3c4c3SMinchan Kim goto out_unlocked; 1574b8d3c4c3SMinchan Kim } 1575b8d3c4c3SMinchan Kim 1576b8d3c4c3SMinchan Kim if (PageDirty(page)) 1577b8d3c4c3SMinchan Kim ClearPageDirty(page); 1578b8d3c4c3SMinchan Kim unlock_page(page); 1579b8d3c4c3SMinchan Kim 1580b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 158158ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1582b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1583b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1584b8d3c4c3SMinchan Kim 1585b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1586b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1587b8d3c4c3SMinchan Kim } 1588802a3a92SShaohua Li 1589802a3a92SShaohua Li mark_page_lazyfree(page); 1590319904adSHuang Ying ret = true; 1591b8d3c4c3SMinchan Kim out: 1592b8d3c4c3SMinchan Kim spin_unlock(ptl); 1593b8d3c4c3SMinchan Kim out_unlocked: 1594b8d3c4c3SMinchan Kim return ret; 1595b8d3c4c3SMinchan Kim } 1596b8d3c4c3SMinchan Kim 1597953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1598953c66c2SAneesh Kumar K.V { 1599953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1600953c66c2SAneesh Kumar K.V 1601953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1602953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1603c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1604953c66c2SAneesh Kumar K.V } 1605953c66c2SAneesh Kumar K.V 160671e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1607f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 160871e3aac0SAndrea Arcangeli { 1609f5c8ad47SDavid Miller pmd_t orig_pmd; 1610da146769SKirill A. Shutemov spinlock_t *ptl; 1611da146769SKirill A. Shutemov 1612ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 161307e32661SAneesh Kumar K.V 1614b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1615b6ec57f4SKirill A. Shutemov if (!ptl) 1616da146769SKirill A. Shutemov return 0; 1617a6bf2bb0SAneesh Kumar K.V /* 1618a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16198809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1620a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1621a6bf2bb0SAneesh Kumar K.V * operations. 1622a6bf2bb0SAneesh Kumar K.V */ 162393a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1624fcbe08d6SMartin Schwidefsky tlb->fullmm); 1625f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16262484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16273b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16283b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16294897c765SMatthew Wilcox spin_unlock(ptl); 1630da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1631c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1632bf929152SKirill A. Shutemov spin_unlock(ptl); 1633479f0abbSKirill A. Shutemov } else { 1634616b8371SZi Yan struct page *page = NULL; 1635616b8371SZi Yan int flush_needed = 1; 1636616b8371SZi Yan 1637616b8371SZi Yan if (pmd_present(orig_pmd)) { 1638616b8371SZi Yan page = pmd_page(orig_pmd); 1639d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 1640309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1641309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1642616b8371SZi Yan } else if (thp_migration_supported()) { 1643616b8371SZi Yan swp_entry_t entry; 1644616b8371SZi Yan 1645616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1646616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1647af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 1648616b8371SZi Yan flush_needed = 0; 1649616b8371SZi Yan } else 1650616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1651616b8371SZi Yan 1652b5072380SKirill A. Shutemov if (PageAnon(page)) { 1653c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1654b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1655b5072380SKirill A. Shutemov } else { 1656953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1657953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1658fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1659b5072380SKirill A. Shutemov } 1660616b8371SZi Yan 1661bf929152SKirill A. Shutemov spin_unlock(ptl); 1662616b8371SZi Yan if (flush_needed) 1663e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1664479f0abbSKirill A. Shutemov } 1665da146769SKirill A. Shutemov return 1; 166671e3aac0SAndrea Arcangeli } 166771e3aac0SAndrea Arcangeli 16681dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 16691dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 16701dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 16711dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 16721dd38b6cSAneesh Kumar K.V { 16731dd38b6cSAneesh Kumar K.V /* 16741dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 16751dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 16761dd38b6cSAneesh Kumar K.V * 16771dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 16781dd38b6cSAneesh Kumar K.V */ 16791dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 16801dd38b6cSAneesh Kumar K.V } 16811dd38b6cSAneesh Kumar K.V #endif 16821dd38b6cSAneesh Kumar K.V 1683ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1684ab6e3d09SNaoya Horiguchi { 1685ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1686ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1687ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1688ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1689ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1690ab6e3d09SNaoya Horiguchi #endif 1691ab6e3d09SNaoya Horiguchi return pmd; 1692ab6e3d09SNaoya Horiguchi } 1693ab6e3d09SNaoya Horiguchi 1694bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1695b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 169637a1c49aSAndrea Arcangeli { 1697bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 169837a1c49aSAndrea Arcangeli pmd_t pmd; 169937a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17005d190420SAaron Lu bool force_flush = false; 170137a1c49aSAndrea Arcangeli 170237a1c49aSAndrea Arcangeli /* 170337a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 170437a1c49aSAndrea Arcangeli * should have release it. 170537a1c49aSAndrea Arcangeli */ 170637a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 170737a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17084b471e88SKirill A. Shutemov return false; 170937a1c49aSAndrea Arcangeli } 171037a1c49aSAndrea Arcangeli 1711bf929152SKirill A. Shutemov /* 1712bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1713c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1714bf929152SKirill A. Shutemov */ 1715b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1716b6ec57f4SKirill A. Shutemov if (old_ptl) { 1717bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1718bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1719bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17208809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1721eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1722a2ce2666SAaron Lu force_flush = true; 172337a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17243592806cSKirill A. Shutemov 17251dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1726b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17273592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17283592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17293592806cSKirill A. Shutemov } 1730ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1731ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17325d190420SAaron Lu if (force_flush) 17335d190420SAaron Lu flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1734eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1735eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1736bf929152SKirill A. Shutemov spin_unlock(old_ptl); 17374b471e88SKirill A. Shutemov return true; 173837a1c49aSAndrea Arcangeli } 17394b471e88SKirill A. Shutemov return false; 174037a1c49aSAndrea Arcangeli } 174137a1c49aSAndrea Arcangeli 1742f123d74aSMel Gorman /* 1743f123d74aSMel Gorman * Returns 1744f123d74aSMel Gorman * - 0 if PMD could not be locked 1745f0953a1bSIngo Molnar * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 1746e346e668SYang Shi * or if prot_numa but THP migration is not supported 1747f0953a1bSIngo Molnar * - HPAGE_PMD_NR if protections changed and TLB flush necessary 1748f123d74aSMel Gorman */ 1749cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 175058705444SPeter Xu unsigned long addr, pgprot_t newprot, unsigned long cp_flags) 1751cd7548abSJohannes Weiner { 1752cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1753bf929152SKirill A. Shutemov spinlock_t *ptl; 17540a85e51dSKirill A. Shutemov pmd_t entry; 17550a85e51dSKirill A. Shutemov bool preserve_write; 17560a85e51dSKirill A. Shutemov int ret; 175758705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1758292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1759292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 1760cd7548abSJohannes Weiner 1761e346e668SYang Shi if (prot_numa && !thp_migration_supported()) 1762e346e668SYang Shi return 1; 1763e346e668SYang Shi 1764b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 17650a85e51dSKirill A. Shutemov if (!ptl) 17660a85e51dSKirill A. Shutemov return 0; 17670a85e51dSKirill A. Shutemov 17680a85e51dSKirill A. Shutemov preserve_write = prot_numa && pmd_write(*pmd); 1769ba68bc01SMel Gorman ret = 1; 1770e944fd67SMel Gorman 177184c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 177284c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 177384c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 177484c3fc4eSZi Yan 177584c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 17764dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 177784c3fc4eSZi Yan pmd_t newpmd; 177884c3fc4eSZi Yan /* 177984c3fc4eSZi Yan * A protection check is difficult so 178084c3fc4eSZi Yan * just be safe and disable write 178184c3fc4eSZi Yan */ 17824dd845b5SAlistair Popple entry = make_readable_migration_entry( 17834dd845b5SAlistair Popple swp_offset(entry)); 178484c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1785ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1786ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 17878f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pmd)) 17888f34f1eaSPeter Xu newpmd = pmd_swp_mkuffd_wp(newpmd); 178984c3fc4eSZi Yan set_pmd_at(mm, addr, pmd, newpmd); 179084c3fc4eSZi Yan } 179184c3fc4eSZi Yan goto unlock; 179284c3fc4eSZi Yan } 179384c3fc4eSZi Yan #endif 179484c3fc4eSZi Yan 1795e944fd67SMel Gorman /* 1796e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1797e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1798e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1799e944fd67SMel Gorman */ 18000a85e51dSKirill A. Shutemov if (prot_numa && is_huge_zero_pmd(*pmd)) 18010a85e51dSKirill A. Shutemov goto unlock; 1802e944fd67SMel Gorman 18030a85e51dSKirill A. Shutemov if (prot_numa && pmd_protnone(*pmd)) 18040a85e51dSKirill A. Shutemov goto unlock; 18050a85e51dSKirill A. Shutemov 1806ced10803SKirill A. Shutemov /* 18073e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1808ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 18093e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1810ced10803SKirill A. Shutemov * 1811ced10803SKirill A. Shutemov * CPU0: CPU1: 1812ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1813ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1814ced10803SKirill A. Shutemov * madvise_dontneed() 1815ced10803SKirill A. Shutemov * zap_pmd_range() 1816ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1817ced10803SKirill A. Shutemov * // skip the pmd 1818ced10803SKirill A. Shutemov * set_pmd_at(); 1819ced10803SKirill A. Shutemov * // pmd is re-established 1820ced10803SKirill A. Shutemov * 1821ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1822ced10803SKirill A. Shutemov * which may break userspace. 1823ced10803SKirill A. Shutemov * 1824ced10803SKirill A. Shutemov * pmdp_invalidate() is required to make sure we don't miss 1825ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1826ced10803SKirill A. Shutemov */ 1827a3cf988fSKirill A. Shutemov entry = pmdp_invalidate(vma, addr, pmd); 1828ced10803SKirill A. Shutemov 1829cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1830b191f9b1SMel Gorman if (preserve_write) 1831288bc549SAneesh Kumar K.V entry = pmd_mk_savedwrite(entry); 1832292924b2SPeter Xu if (uffd_wp) { 1833292924b2SPeter Xu entry = pmd_wrprotect(entry); 1834292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1835292924b2SPeter Xu } else if (uffd_wp_resolve) { 1836292924b2SPeter Xu /* 1837292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1838292924b2SPeter Xu * handler, then things like COW could be properly 1839292924b2SPeter Xu * handled. 1840292924b2SPeter Xu */ 1841292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1842292924b2SPeter Xu } 1843f123d74aSMel Gorman ret = HPAGE_PMD_NR; 184456eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 18450a85e51dSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 18460a85e51dSKirill A. Shutemov unlock: 1847bf929152SKirill A. Shutemov spin_unlock(ptl); 1848cd7548abSJohannes Weiner return ret; 1849cd7548abSJohannes Weiner } 1850cd7548abSJohannes Weiner 1851025c5b24SNaoya Horiguchi /* 18528f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1853025c5b24SNaoya Horiguchi * 18548f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 18558f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1856025c5b24SNaoya Horiguchi */ 1857b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1858025c5b24SNaoya Horiguchi { 1859b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1860b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 186184c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 186284c3fc4eSZi Yan pmd_devmap(*pmd))) 1863b6ec57f4SKirill A. Shutemov return ptl; 1864b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1865b6ec57f4SKirill A. Shutemov return NULL; 1866025c5b24SNaoya Horiguchi } 1867025c5b24SNaoya Horiguchi 1868a00cc7d9SMatthew Wilcox /* 1869a00cc7d9SMatthew Wilcox * Returns true if a given pud maps a thp, false otherwise. 1870a00cc7d9SMatthew Wilcox * 1871a00cc7d9SMatthew Wilcox * Note that if it returns true, this routine returns without unlocking page 1872a00cc7d9SMatthew Wilcox * table lock. So callers must unlock it. 1873a00cc7d9SMatthew Wilcox */ 1874a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1875a00cc7d9SMatthew Wilcox { 1876a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1877a00cc7d9SMatthew Wilcox 1878a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1879a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1880a00cc7d9SMatthew Wilcox return ptl; 1881a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1882a00cc7d9SMatthew Wilcox return NULL; 1883a00cc7d9SMatthew Wilcox } 1884a00cc7d9SMatthew Wilcox 1885a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1886a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1887a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1888a00cc7d9SMatthew Wilcox { 1889a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1890a00cc7d9SMatthew Wilcox 1891a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1892a00cc7d9SMatthew Wilcox if (!ptl) 1893a00cc7d9SMatthew Wilcox return 0; 1894a00cc7d9SMatthew Wilcox /* 1895a00cc7d9SMatthew Wilcox * For architectures like ppc64 we look at deposited pgtable 1896a00cc7d9SMatthew Wilcox * when calling pudp_huge_get_and_clear. So do the 1897a00cc7d9SMatthew Wilcox * pgtable_trans_huge_withdraw after finishing pudp related 1898a00cc7d9SMatthew Wilcox * operations. 1899a00cc7d9SMatthew Wilcox */ 190070516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1901a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19022484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1903a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1904a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1905a00cc7d9SMatthew Wilcox } else { 1906a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 1907a00cc7d9SMatthew Wilcox BUG(); 1908a00cc7d9SMatthew Wilcox } 1909a00cc7d9SMatthew Wilcox return 1; 1910a00cc7d9SMatthew Wilcox } 1911a00cc7d9SMatthew Wilcox 1912a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1913a00cc7d9SMatthew Wilcox unsigned long haddr) 1914a00cc7d9SMatthew Wilcox { 1915a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 1916a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1917a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1918a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1919a00cc7d9SMatthew Wilcox 1920ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 1921a00cc7d9SMatthew Wilcox 1922a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 1923a00cc7d9SMatthew Wilcox } 1924a00cc7d9SMatthew Wilcox 1925a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 1926a00cc7d9SMatthew Wilcox unsigned long address) 1927a00cc7d9SMatthew Wilcox { 1928a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1929ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1930a00cc7d9SMatthew Wilcox 19317269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 19326f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 1933ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 1934ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1935ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 1936a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 1937a00cc7d9SMatthew Wilcox goto out; 1938ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 1939a00cc7d9SMatthew Wilcox 1940a00cc7d9SMatthew Wilcox out: 1941a00cc7d9SMatthew Wilcox spin_unlock(ptl); 19424645b9feSJérôme Glisse /* 19434645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 19444645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 19454645b9feSJérôme Glisse */ 1946ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 1947a00cc7d9SMatthew Wilcox } 1948a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1949a00cc7d9SMatthew Wilcox 1950eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1951eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 1952eef1b3baSKirill A. Shutemov { 1953eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1954eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1955eef1b3baSKirill A. Shutemov pmd_t _pmd; 1956eef1b3baSKirill A. Shutemov int i; 1957eef1b3baSKirill A. Shutemov 19580f10851eSJérôme Glisse /* 19590f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 19600f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 19610f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 19620f10851eSJérôme Glisse * protected page. 19630f10851eSJérôme Glisse * 1964ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 19650f10851eSJérôme Glisse */ 19660f10851eSJérôme Glisse pmdp_huge_clear_flush(vma, haddr, pmd); 1967eef1b3baSKirill A. Shutemov 1968eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1969eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1970eef1b3baSKirill A. Shutemov 1971eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1972eef1b3baSKirill A. Shutemov pte_t *pte, entry; 1973eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 1974eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 1975eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 1976eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 1977eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 1978eef1b3baSKirill A. Shutemov pte_unmap(pte); 1979eef1b3baSKirill A. Shutemov } 1980eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 1981eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 1982eef1b3baSKirill A. Shutemov } 1983eef1b3baSKirill A. Shutemov 1984eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 1985ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 1986eef1b3baSKirill A. Shutemov { 1987eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1988eef1b3baSKirill A. Shutemov struct page *page; 1989eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1990423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 1991292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 19922ac015e2SKirill A. Shutemov unsigned long addr; 1993eef1b3baSKirill A. Shutemov int i; 1994eef1b3baSKirill A. Shutemov 1995eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 1996eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1997eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 199884c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 199984c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2000eef1b3baSKirill A. Shutemov 2001eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2002eef1b3baSKirill A. Shutemov 2003d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 200499fa8a48SHugh Dickins old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2005953c66c2SAneesh Kumar K.V /* 2006953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2007953c66c2SAneesh Kumar K.V * just go ahead and zap it 2008953c66c2SAneesh Kumar K.V */ 2009953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2010953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 20112484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2012d21b9e57SKirill A. Shutemov return; 201399fa8a48SHugh Dickins if (unlikely(is_pmd_migration_entry(old_pmd))) { 201499fa8a48SHugh Dickins swp_entry_t entry; 201599fa8a48SHugh Dickins 201699fa8a48SHugh Dickins entry = pmd_to_swp_entry(old_pmd); 2017af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 201899fa8a48SHugh Dickins } else { 201999fa8a48SHugh Dickins page = pmd_page(old_pmd); 202099fa8a48SHugh Dickins if (!PageDirty(page) && pmd_dirty(old_pmd)) 2021e1f1b157SHugh Dickins set_page_dirty(page); 202299fa8a48SHugh Dickins if (!PageReferenced(page) && pmd_young(old_pmd)) 2023d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2024d21b9e57SKirill A. Shutemov page_remove_rmap(page, true); 2025d21b9e57SKirill A. Shutemov put_page(page); 202699fa8a48SHugh Dickins } 2027fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2028eef1b3baSKirill A. Shutemov return; 202999fa8a48SHugh Dickins } 203099fa8a48SHugh Dickins 20313b77e8c8SHugh Dickins if (is_huge_zero_pmd(*pmd)) { 20324645b9feSJérôme Glisse /* 20334645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 20344645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 20354645b9feSJérôme Glisse * __split_huge_pmd() ? 20364645b9feSJérôme Glisse * 20374645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 20384645b9feSJérôme Glisse * small page also write protected so it does not seems useful 20394645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 20404645b9feSJérôme Glisse */ 2041eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2042eef1b3baSKirill A. Shutemov } 2043eef1b3baSKirill A. Shutemov 2044423ac9afSAneesh Kumar K.V /* 2045423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2046423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2047423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2048423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2049423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2050423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2051423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 205242742d9bSAlexander A. Klimov * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 205342742d9bSAlexander A. Klimov * 383 on page 105. Intel should be safe but is also warns that it's 2054423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2055423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2056423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2057423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2058423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2059423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2060423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2061423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2062423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2063423ac9afSAneesh Kumar K.V */ 2064423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2065423ac9afSAneesh Kumar K.V 2066423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 20672e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 206884c3fc4eSZi Yan swp_entry_t entry; 206984c3fc4eSZi Yan 2070423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 2071af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 20724dd845b5SAlistair Popple write = is_writable_migration_entry(entry); 20732e83ee1dSPeter Xu young = false; 20742e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2075f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 20762e83ee1dSPeter Xu } else { 2077423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 2078423ac9afSAneesh Kumar K.V if (pmd_dirty(old_pmd)) 2079423ac9afSAneesh Kumar K.V SetPageDirty(page); 2080423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2081423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2082423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2083292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 20842e83ee1dSPeter Xu } 20852e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 20862e83ee1dSPeter Xu page_ref_add(page, HPAGE_PMD_NR - 1); 2087eef1b3baSKirill A. Shutemov 2088423ac9afSAneesh Kumar K.V /* 2089423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2090423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2091423ac9afSAneesh Kumar K.V */ 2092eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2093eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2094eef1b3baSKirill A. Shutemov 20952ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2096eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2097eef1b3baSKirill A. Shutemov /* 2098eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2099eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2100eef1b3baSKirill A. Shutemov * permissions across VMAs. 2101eef1b3baSKirill A. Shutemov */ 210284c3fc4eSZi Yan if (freeze || pmd_migration) { 2103ba988280SKirill A. Shutemov swp_entry_t swp_entry; 21044dd845b5SAlistair Popple if (write) 21054dd845b5SAlistair Popple swp_entry = make_writable_migration_entry( 21064dd845b5SAlistair Popple page_to_pfn(page + i)); 21074dd845b5SAlistair Popple else 21084dd845b5SAlistair Popple swp_entry = make_readable_migration_entry( 21094dd845b5SAlistair Popple page_to_pfn(page + i)); 2110ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2111804dd150SAndrea Arcangeli if (soft_dirty) 2112804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2113f45ec5ffSPeter Xu if (uffd_wp) 2114f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2115ba988280SKirill A. Shutemov } else { 21166d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2117b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 2118eef1b3baSKirill A. Shutemov if (!write) 2119eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 2120eef1b3baSKirill A. Shutemov if (!young) 2121eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 2122804dd150SAndrea Arcangeli if (soft_dirty) 2123804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2124292924b2SPeter Xu if (uffd_wp) 2125292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 2126ba988280SKirill A. Shutemov } 21272ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2128eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 21292ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2130ec0abae6SRalph Campbell if (!pmd_migration) 2131eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2132eef1b3baSKirill A. Shutemov pte_unmap(pte); 2133eef1b3baSKirill A. Shutemov } 2134eef1b3baSKirill A. Shutemov 2135ec0abae6SRalph Campbell if (!pmd_migration) { 2136eef1b3baSKirill A. Shutemov /* 2137eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 2138eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 2139eef1b3baSKirill A. Shutemov */ 2140ec0abae6SRalph Campbell if (compound_mapcount(page) > 1 && 2141ec0abae6SRalph Campbell !TestSetPageDoubleMap(page)) { 2142eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2143eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2144eef1b3baSKirill A. Shutemov } 2145eef1b3baSKirill A. Shutemov 2146468c3982SJohannes Weiner lock_page_memcg(page); 2147eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2148eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 214969473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, 215069473e5dSMuchun Song -HPAGE_PMD_NR); 2151eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 2152eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 2153eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2154eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 2155eef1b3baSKirill A. Shutemov } 2156eef1b3baSKirill A. Shutemov } 2157468c3982SJohannes Weiner unlock_page_memcg(page); 2158ec0abae6SRalph Campbell } 2159eef1b3baSKirill A. Shutemov 2160eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2161eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2162e9b61f19SKirill A. Shutemov 2163e9b61f19SKirill A. Shutemov if (freeze) { 21642ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2165e9b61f19SKirill A. Shutemov page_remove_rmap(page + i, false); 2166e9b61f19SKirill A. Shutemov put_page(page + i); 2167e9b61f19SKirill A. Shutemov } 2168e9b61f19SKirill A. Shutemov } 2169eef1b3baSKirill A. Shutemov } 2170eef1b3baSKirill A. Shutemov 2171eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 217233f4751eSNaoya Horiguchi unsigned long address, bool freeze, struct page *page) 2173eef1b3baSKirill A. Shutemov { 2174eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2175ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 21761c2f6730SHugh Dickins bool do_unlock_page = false; 2177c444eb56SAndrea Arcangeli pmd_t _pmd; 2178eef1b3baSKirill A. Shutemov 21797269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 21806f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2181ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2182ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2183ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 218433f4751eSNaoya Horiguchi 218533f4751eSNaoya Horiguchi /* 218633f4751eSNaoya Horiguchi * If caller asks to setup a migration entries, we need a page to check 218733f4751eSNaoya Horiguchi * pmd against. Otherwise we can end up replacing wrong page. 218833f4751eSNaoya Horiguchi */ 218933f4751eSNaoya Horiguchi VM_BUG_ON(freeze && !page); 2190c444eb56SAndrea Arcangeli if (page) { 2191c444eb56SAndrea Arcangeli VM_WARN_ON_ONCE(!PageLocked(page)); 2192c444eb56SAndrea Arcangeli if (page != pmd_page(*pmd)) 219333f4751eSNaoya Horiguchi goto out; 2194c444eb56SAndrea Arcangeli } 219533f4751eSNaoya Horiguchi 2196c444eb56SAndrea Arcangeli repeat: 21975c7fb56eSDan Williams if (pmd_trans_huge(*pmd)) { 2198c444eb56SAndrea Arcangeli if (!page) { 219933f4751eSNaoya Horiguchi page = pmd_page(*pmd); 22001c2f6730SHugh Dickins /* 22011c2f6730SHugh Dickins * An anonymous page must be locked, to ensure that a 22021c2f6730SHugh Dickins * concurrent reuse_swap_page() sees stable mapcount; 22031c2f6730SHugh Dickins * but reuse_swap_page() is not used on shmem or file, 22041c2f6730SHugh Dickins * and page lock must not be taken when zap_pmd_range() 22051c2f6730SHugh Dickins * calls __split_huge_pmd() while i_mmap_lock is held. 22061c2f6730SHugh Dickins */ 22071c2f6730SHugh Dickins if (PageAnon(page)) { 2208c444eb56SAndrea Arcangeli if (unlikely(!trylock_page(page))) { 2209c444eb56SAndrea Arcangeli get_page(page); 2210c444eb56SAndrea Arcangeli _pmd = *pmd; 2211c444eb56SAndrea Arcangeli spin_unlock(ptl); 2212c444eb56SAndrea Arcangeli lock_page(page); 2213c444eb56SAndrea Arcangeli spin_lock(ptl); 2214c444eb56SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, _pmd))) { 2215c444eb56SAndrea Arcangeli unlock_page(page); 2216c444eb56SAndrea Arcangeli put_page(page); 2217c444eb56SAndrea Arcangeli page = NULL; 2218c444eb56SAndrea Arcangeli goto repeat; 2219c444eb56SAndrea Arcangeli } 2220c444eb56SAndrea Arcangeli put_page(page); 2221c444eb56SAndrea Arcangeli } 22221c2f6730SHugh Dickins do_unlock_page = true; 22231c2f6730SHugh Dickins } 2224c444eb56SAndrea Arcangeli } 2225e90309c9SKirill A. Shutemov if (PageMlocked(page)) 22265f737714SKirill A. Shutemov clear_page_mlock(page); 222784c3fc4eSZi Yan } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) 22285c7fb56eSDan Williams goto out; 2229ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 2230e90309c9SKirill A. Shutemov out: 2231eef1b3baSKirill A. Shutemov spin_unlock(ptl); 22321c2f6730SHugh Dickins if (do_unlock_page) 2233c444eb56SAndrea Arcangeli unlock_page(page); 22344645b9feSJérôme Glisse /* 22354645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 22364645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 22374645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 22384645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 22394645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 22404645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 22414645b9feSJérôme Glisse * page in the meantime) 22424645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 22434645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 22444645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 22454645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 22464645b9feSJérôme Glisse */ 2247ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2248eef1b3baSKirill A. Shutemov } 2249eef1b3baSKirill A. Shutemov 2250fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2251fec89c10SKirill A. Shutemov bool freeze, struct page *page) 225294fcc585SAndrea Arcangeli { 2253f72e7dcdSHugh Dickins pgd_t *pgd; 2254c2febafcSKirill A. Shutemov p4d_t *p4d; 2255f72e7dcdSHugh Dickins pud_t *pud; 225694fcc585SAndrea Arcangeli pmd_t *pmd; 225794fcc585SAndrea Arcangeli 225878ddc534SKirill A. Shutemov pgd = pgd_offset(vma->vm_mm, address); 2259f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 2260f72e7dcdSHugh Dickins return; 2261f72e7dcdSHugh Dickins 2262c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 2263c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 2264c2febafcSKirill A. Shutemov return; 2265c2febafcSKirill A. Shutemov 2266c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 2267f72e7dcdSHugh Dickins if (!pud_present(*pud)) 2268f72e7dcdSHugh Dickins return; 2269f72e7dcdSHugh Dickins 2270f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 2271fec89c10SKirill A. Shutemov 227233f4751eSNaoya Horiguchi __split_huge_pmd(vma, pmd, address, freeze, page); 227394fcc585SAndrea Arcangeli } 227494fcc585SAndrea Arcangeli 227571f9e58eSMiaohe Lin static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 227671f9e58eSMiaohe Lin { 227771f9e58eSMiaohe Lin /* 227871f9e58eSMiaohe Lin * If the new address isn't hpage aligned and it could previously 227971f9e58eSMiaohe Lin * contain an hugepage: check if we need to split an huge pmd. 228071f9e58eSMiaohe Lin */ 228171f9e58eSMiaohe Lin if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 228271f9e58eSMiaohe Lin range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 228371f9e58eSMiaohe Lin ALIGN(address, HPAGE_PMD_SIZE))) 228471f9e58eSMiaohe Lin split_huge_pmd_address(vma, address, false, NULL); 228571f9e58eSMiaohe Lin } 228671f9e58eSMiaohe Lin 2287e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 228894fcc585SAndrea Arcangeli unsigned long start, 228994fcc585SAndrea Arcangeli unsigned long end, 229094fcc585SAndrea Arcangeli long adjust_next) 229194fcc585SAndrea Arcangeli { 229271f9e58eSMiaohe Lin /* Check if we need to split start first. */ 229371f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, start); 229471f9e58eSMiaohe Lin 229571f9e58eSMiaohe Lin /* Check if we need to split end next. */ 229671f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, end); 229794fcc585SAndrea Arcangeli 229894fcc585SAndrea Arcangeli /* 229971f9e58eSMiaohe Lin * If we're also updating the vma->vm_next->vm_start, 230071f9e58eSMiaohe Lin * check if we need to split it. 230194fcc585SAndrea Arcangeli */ 230294fcc585SAndrea Arcangeli if (adjust_next > 0) { 230394fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 230494fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 2305f9d86a60SWei Yang nstart += adjust_next; 230671f9e58eSMiaohe Lin split_huge_pmd_if_needed(next, nstart); 230794fcc585SAndrea Arcangeli } 230894fcc585SAndrea Arcangeli } 2309e9b61f19SKirill A. Shutemov 2310906f9cdfSHugh Dickins static void unmap_page(struct page *page) 2311e9b61f19SKirill A. Shutemov { 2312*a98a2f0cSAlistair Popple enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2313*a98a2f0cSAlistair Popple TTU_SYNC; 2314e9b61f19SKirill A. Shutemov 2315e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 2316e9b61f19SKirill A. Shutemov 2317*a98a2f0cSAlistair Popple /* 2318*a98a2f0cSAlistair Popple * Anon pages need migration entries to preserve them, but file 2319*a98a2f0cSAlistair Popple * pages can simply be left unmapped, then faulted back on demand. 2320*a98a2f0cSAlistair Popple * If that is ever changed (perhaps for mlock), update remap_page(). 2321*a98a2f0cSAlistair Popple */ 2322baa355fdSKirill A. Shutemov if (PageAnon(page)) 2323*a98a2f0cSAlistair Popple try_to_migrate(page, ttu_flags); 2324*a98a2f0cSAlistair Popple else 2325*a98a2f0cSAlistair Popple try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK); 2326504e070dSYang Shi 2327504e070dSYang Shi VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); 2328bd56086fSKirill A. Shutemov } 2329bd56086fSKirill A. Shutemov 23308cce5475SKirill A. Shutemov static void remap_page(struct page *page, unsigned int nr) 2331e9b61f19SKirill A. Shutemov { 2332fec89c10SKirill A. Shutemov int i; 2333ab02c252SHugh Dickins 2334ab02c252SHugh Dickins /* If TTU_SPLIT_FREEZE is ever extended to file, remove this check */ 2335ab02c252SHugh Dickins if (!PageAnon(page)) 2336ab02c252SHugh Dickins return; 2337ace71a19SKirill A. Shutemov if (PageTransHuge(page)) { 2338ace71a19SKirill A. Shutemov remove_migration_ptes(page, page, true); 2339ace71a19SKirill A. Shutemov } else { 23408cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) 2341fec89c10SKirill A. Shutemov remove_migration_ptes(page + i, page + i, true); 2342e9b61f19SKirill A. Shutemov } 2343ace71a19SKirill A. Shutemov } 2344e9b61f19SKirill A. Shutemov 234594866635SAlex Shi static void lru_add_page_tail(struct page *head, struct page *tail, 234688dcb9a3SAlex Shi struct lruvec *lruvec, struct list_head *list) 234788dcb9a3SAlex Shi { 234894866635SAlex Shi VM_BUG_ON_PAGE(!PageHead(head), head); 234994866635SAlex Shi VM_BUG_ON_PAGE(PageCompound(tail), head); 235094866635SAlex Shi VM_BUG_ON_PAGE(PageLRU(tail), head); 23516168d0daSAlex Shi lockdep_assert_held(&lruvec->lru_lock); 235288dcb9a3SAlex Shi 23536dbb5741SAlex Shi if (list) { 235488dcb9a3SAlex Shi /* page reclaim is reclaiming a huge page */ 23556dbb5741SAlex Shi VM_WARN_ON(PageLRU(head)); 235694866635SAlex Shi get_page(tail); 235794866635SAlex Shi list_add_tail(&tail->lru, list); 235888dcb9a3SAlex Shi } else { 23596dbb5741SAlex Shi /* head is still on lru (and we have it frozen) */ 23606dbb5741SAlex Shi VM_WARN_ON(!PageLRU(head)); 23616dbb5741SAlex Shi SetPageLRU(tail); 23626dbb5741SAlex Shi list_add_tail(&tail->lru, &head->lru); 236388dcb9a3SAlex Shi } 236488dcb9a3SAlex Shi } 236588dcb9a3SAlex Shi 23668df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2367e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2368e9b61f19SKirill A. Shutemov { 2369e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2370e9b61f19SKirill A. Shutemov 23718df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2372e9b61f19SKirill A. Shutemov 2373e9b61f19SKirill A. Shutemov /* 2374605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2375605ca5edSKonstantin Khlebnikov * 2376605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 23778958b249SHaitao Shi * for example lock_page() which set PG_waiters. 2378e9b61f19SKirill A. Shutemov */ 2379e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2380e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2381e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2382e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 238338d8b4e6SHuang Ying (1L << PG_swapcache) | 2384e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2385e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2386e9b61f19SKirill A. Shutemov (1L << PG_active) | 23871899ad18SJohannes Weiner (1L << PG_workingset) | 2388e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2389b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 239072e6afa0SCatalin Marinas #ifdef CONFIG_64BIT 239172e6afa0SCatalin Marinas (1L << PG_arch_2) | 239272e6afa0SCatalin Marinas #endif 2393b8d3c4c3SMinchan Kim (1L << PG_dirty))); 2394e9b61f19SKirill A. Shutemov 2395173d9d9fSHugh Dickins /* ->mapping in first tail page is compound_mapcount */ 2396173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2397173d9d9fSHugh Dickins page_tail); 2398173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2399173d9d9fSHugh Dickins page_tail->index = head->index + tail; 2400173d9d9fSHugh Dickins 2401605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2402e9b61f19SKirill A. Shutemov smp_wmb(); 2403e9b61f19SKirill A. Shutemov 2404605ca5edSKonstantin Khlebnikov /* 2405605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2406605ca5edSKonstantin Khlebnikov * 2407605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2408605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2409605ca5edSKonstantin Khlebnikov */ 2410e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2411e9b61f19SKirill A. Shutemov 2412605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2413605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2414605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2415605ca5edSKonstantin Khlebnikov 2416e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2417e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2418e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2419e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2420e9b61f19SKirill A. Shutemov 2421e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 242294723aafSMichal Hocko 242394723aafSMichal Hocko /* 242494723aafSMichal Hocko * always add to the tail because some iterators expect new 242594723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 242694723aafSMichal Hocko * migrate_pages 242794723aafSMichal Hocko */ 2428e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2429e9b61f19SKirill A. Shutemov } 2430e9b61f19SKirill A. Shutemov 2431baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2432b6769834SAlex Shi pgoff_t end) 2433e9b61f19SKirill A. Shutemov { 2434e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2435e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 24364101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 24374101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 24388cce5475SKirill A. Shutemov unsigned int nr = thp_nr_pages(head); 24398df651c7SKirill A. Shutemov int i; 2440e9b61f19SKirill A. Shutemov 2441e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2442be6c8982SZhou Guanghui split_page_memcg(head, nr); 2443e9b61f19SKirill A. Shutemov 24444101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 24454101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 24464101196bSMatthew Wilcox (Oracle) 24474101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 24484101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 24494101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 24504101196bSMatthew Wilcox (Oracle) } 24514101196bSMatthew Wilcox (Oracle) 2452f0953a1bSIngo Molnar /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 24536168d0daSAlex Shi lruvec = lock_page_lruvec(head); 2454b6769834SAlex Shi 24558cce5475SKirill A. Shutemov for (i = nr - 1; i >= 1; i--) { 24568df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2457baa355fdSKirill A. Shutemov /* Some pages can be beyond i_size: drop them from page cache */ 2458baa355fdSKirill A. Shutemov if (head[i].index >= end) { 24592d077d4bSHugh Dickins ClearPageDirty(head + i); 2460baa355fdSKirill A. Shutemov __delete_from_page_cache(head + i, NULL); 2461800d8c63SKirill A. Shutemov if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2462800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2463baa355fdSKirill A. Shutemov put_page(head + i); 24644101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 24654101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 24664101196bSMatthew Wilcox (Oracle) head + i, 0); 24674101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 24684101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 24694101196bSMatthew Wilcox (Oracle) head + i, 0); 2470baa355fdSKirill A. Shutemov } 2471baa355fdSKirill A. Shutemov } 2472e9b61f19SKirill A. Shutemov 2473e9b61f19SKirill A. Shutemov ClearPageCompound(head); 24746168d0daSAlex Shi unlock_page_lruvec(lruvec); 2475b6769834SAlex Shi /* Caller disabled irqs, so they are still disabled here */ 2476f7da677bSVlastimil Babka 24778cce5475SKirill A. Shutemov split_page_owner(head, nr); 2478f7da677bSVlastimil Babka 2479baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2480baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2481aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 24824101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 248338d8b4e6SHuang Ying page_ref_add(head, 2); 24844101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 24854101196bSMatthew Wilcox (Oracle) } else { 2486baa355fdSKirill A. Shutemov page_ref_inc(head); 24874101196bSMatthew Wilcox (Oracle) } 2488baa355fdSKirill A. Shutemov } else { 2489aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2490baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2491b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2492baa355fdSKirill A. Shutemov } 2493b6769834SAlex Shi local_irq_enable(); 2494e9b61f19SKirill A. Shutemov 24958cce5475SKirill A. Shutemov remap_page(head, nr); 2496e9b61f19SKirill A. Shutemov 2497c4f9c701SHuang Ying if (PageSwapCache(head)) { 2498c4f9c701SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 2499c4f9c701SHuang Ying 2500c4f9c701SHuang Ying split_swap_cluster(entry); 2501c4f9c701SHuang Ying } 2502c4f9c701SHuang Ying 25038cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) { 2504e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2505e9b61f19SKirill A. Shutemov if (subpage == page) 2506e9b61f19SKirill A. Shutemov continue; 2507e9b61f19SKirill A. Shutemov unlock_page(subpage); 2508e9b61f19SKirill A. Shutemov 2509e9b61f19SKirill A. Shutemov /* 2510e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2511e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2512e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2513e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2514e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2515e9b61f19SKirill A. Shutemov */ 2516e9b61f19SKirill A. Shutemov put_page(subpage); 2517e9b61f19SKirill A. Shutemov } 2518e9b61f19SKirill A. Shutemov } 2519e9b61f19SKirill A. Shutemov 2520b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page) 2521b20ce5e0SKirill A. Shutemov { 252286b562b6SKirill A. Shutemov int i, compound, nr, ret; 2523b20ce5e0SKirill A. Shutemov 2524b20ce5e0SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2525b20ce5e0SKirill A. Shutemov 2526b20ce5e0SKirill A. Shutemov if (likely(!PageCompound(page))) 2527b20ce5e0SKirill A. Shutemov return atomic_read(&page->_mapcount) + 1; 2528b20ce5e0SKirill A. Shutemov 2529dd78feddSKirill A. Shutemov compound = compound_mapcount(page); 253086b562b6SKirill A. Shutemov nr = compound_nr(page); 2531b20ce5e0SKirill A. Shutemov if (PageHuge(page)) 2532dd78feddSKirill A. Shutemov return compound; 2533dd78feddSKirill A. Shutemov ret = compound; 253486b562b6SKirill A. Shutemov for (i = 0; i < nr; i++) 2535b20ce5e0SKirill A. Shutemov ret += atomic_read(&page[i]._mapcount) + 1; 2536dd78feddSKirill A. Shutemov /* File pages has compound_mapcount included in _mapcount */ 2537dd78feddSKirill A. Shutemov if (!PageAnon(page)) 253886b562b6SKirill A. Shutemov return ret - compound * nr; 2539b20ce5e0SKirill A. Shutemov if (PageDoubleMap(page)) 254086b562b6SKirill A. Shutemov ret -= nr; 2541b20ce5e0SKirill A. Shutemov return ret; 2542b20ce5e0SKirill A. Shutemov } 2543b20ce5e0SKirill A. Shutemov 2544e9b61f19SKirill A. Shutemov /* 25456d0a07edSAndrea Arcangeli * This calculates accurately how many mappings a transparent hugepage 25466d0a07edSAndrea Arcangeli * has (unlike page_mapcount() which isn't fully accurate). This full 25476d0a07edSAndrea Arcangeli * accuracy is primarily needed to know if copy-on-write faults can 25486d0a07edSAndrea Arcangeli * reuse the page and change the mapping to read-write instead of 25496d0a07edSAndrea Arcangeli * copying them. At the same time this returns the total_mapcount too. 25506d0a07edSAndrea Arcangeli * 25516d0a07edSAndrea Arcangeli * The function returns the highest mapcount any one of the subpages 25526d0a07edSAndrea Arcangeli * has. If the return value is one, even if different processes are 25536d0a07edSAndrea Arcangeli * mapping different subpages of the transparent hugepage, they can 25546d0a07edSAndrea Arcangeli * all reuse it, because each process is reusing a different subpage. 25556d0a07edSAndrea Arcangeli * 25566d0a07edSAndrea Arcangeli * The total_mapcount is instead counting all virtual mappings of the 25576d0a07edSAndrea Arcangeli * subpages. If the total_mapcount is equal to "one", it tells the 25586d0a07edSAndrea Arcangeli * caller all mappings belong to the same "mm" and in turn the 25596d0a07edSAndrea Arcangeli * anon_vma of the transparent hugepage can become the vma->anon_vma 25606d0a07edSAndrea Arcangeli * local one as no other process may be mapping any of the subpages. 25616d0a07edSAndrea Arcangeli * 25626d0a07edSAndrea Arcangeli * It would be more accurate to replace page_mapcount() with 25636d0a07edSAndrea Arcangeli * page_trans_huge_mapcount(), however we only use 25646d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() in the copy-on-write faults where we 25656d0a07edSAndrea Arcangeli * need full accuracy to avoid breaking page pinning, because 25666d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() is slower than page_mapcount(). 25676d0a07edSAndrea Arcangeli */ 25686d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 25696d0a07edSAndrea Arcangeli { 25706d0a07edSAndrea Arcangeli int i, ret, _total_mapcount, mapcount; 25716d0a07edSAndrea Arcangeli 25726d0a07edSAndrea Arcangeli /* hugetlbfs shouldn't call it */ 25736d0a07edSAndrea Arcangeli VM_BUG_ON_PAGE(PageHuge(page), page); 25746d0a07edSAndrea Arcangeli 25756d0a07edSAndrea Arcangeli if (likely(!PageTransCompound(page))) { 25766d0a07edSAndrea Arcangeli mapcount = atomic_read(&page->_mapcount) + 1; 25776d0a07edSAndrea Arcangeli if (total_mapcount) 25786d0a07edSAndrea Arcangeli *total_mapcount = mapcount; 25796d0a07edSAndrea Arcangeli return mapcount; 25806d0a07edSAndrea Arcangeli } 25816d0a07edSAndrea Arcangeli 25826d0a07edSAndrea Arcangeli page = compound_head(page); 25836d0a07edSAndrea Arcangeli 25846d0a07edSAndrea Arcangeli _total_mapcount = ret = 0; 258565dfe3c3SMatthew Wilcox (Oracle) for (i = 0; i < thp_nr_pages(page); i++) { 25866d0a07edSAndrea Arcangeli mapcount = atomic_read(&page[i]._mapcount) + 1; 25876d0a07edSAndrea Arcangeli ret = max(ret, mapcount); 25886d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25896d0a07edSAndrea Arcangeli } 25906d0a07edSAndrea Arcangeli if (PageDoubleMap(page)) { 25916d0a07edSAndrea Arcangeli ret -= 1; 259265dfe3c3SMatthew Wilcox (Oracle) _total_mapcount -= thp_nr_pages(page); 25936d0a07edSAndrea Arcangeli } 25946d0a07edSAndrea Arcangeli mapcount = compound_mapcount(page); 25956d0a07edSAndrea Arcangeli ret += mapcount; 25966d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 25976d0a07edSAndrea Arcangeli if (total_mapcount) 25986d0a07edSAndrea Arcangeli *total_mapcount = _total_mapcount; 25996d0a07edSAndrea Arcangeli return ret; 26006d0a07edSAndrea Arcangeli } 26016d0a07edSAndrea Arcangeli 2602b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2603b8f593cdSHuang Ying bool can_split_huge_page(struct page *page, int *pextra_pins) 2604b8f593cdSHuang Ying { 2605b8f593cdSHuang Ying int extra_pins; 2606b8f593cdSHuang Ying 2607aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2608b8f593cdSHuang Ying if (PageAnon(page)) 2609e2333dadSMatthew Wilcox (Oracle) extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0; 2610b8f593cdSHuang Ying else 2611e2333dadSMatthew Wilcox (Oracle) extra_pins = thp_nr_pages(page); 2612b8f593cdSHuang Ying if (pextra_pins) 2613b8f593cdSHuang Ying *pextra_pins = extra_pins; 2614b8f593cdSHuang Ying return total_mapcount(page) == page_count(page) - extra_pins - 1; 2615b8f593cdSHuang Ying } 2616b8f593cdSHuang Ying 26176d0a07edSAndrea Arcangeli /* 2618e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2619e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2620e9b61f19SKirill A. Shutemov * 2621e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2622e9b61f19SKirill A. Shutemov * The huge page must be locked. 2623e9b61f19SKirill A. Shutemov * 2624e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2625e9b61f19SKirill A. Shutemov * 2626e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2627e9b61f19SKirill A. Shutemov * the hugepage. 2628e9b61f19SKirill A. Shutemov * 2629e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2630e9b61f19SKirill A. Shutemov * they are not mapped. 2631e9b61f19SKirill A. Shutemov * 2632e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2633e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2634e9b61f19SKirill A. Shutemov * us. 2635e9b61f19SKirill A. Shutemov */ 2636e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2637e9b61f19SKirill A. Shutemov { 2638e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2639a8803e6cSWei Yang struct deferred_split *ds_queue = get_deferred_split_queue(head); 2640baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2641baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2642504e070dSYang Shi int extra_pins, ret; 2643006d3ff2SHugh Dickins pgoff_t end; 2644e9b61f19SKirill A. Shutemov 2645cb829624SWei Yang VM_BUG_ON_PAGE(is_huge_zero_page(head), head); 2646a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageLocked(head), head); 2647a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageCompound(head), head); 2648e9b61f19SKirill A. Shutemov 2649a8803e6cSWei Yang if (PageWriteback(head)) 265059807685SHuang Ying return -EBUSY; 265159807685SHuang Ying 2652baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2653e9b61f19SKirill A. Shutemov /* 2654c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2655baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2656baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 2657baa355fdSKirill A. Shutemov * is similar to page_lock_anon_vma_read except the write lock 2658baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2659baa355fdSKirill A. Shutemov * operations. 2660e9b61f19SKirill A. Shutemov */ 2661e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2662e9b61f19SKirill A. Shutemov if (!anon_vma) { 2663e9b61f19SKirill A. Shutemov ret = -EBUSY; 2664e9b61f19SKirill A. Shutemov goto out; 2665e9b61f19SKirill A. Shutemov } 2666006d3ff2SHugh Dickins end = -1; 2667baa355fdSKirill A. Shutemov mapping = NULL; 2668e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2669baa355fdSKirill A. Shutemov } else { 2670baa355fdSKirill A. Shutemov mapping = head->mapping; 2671baa355fdSKirill A. Shutemov 2672baa355fdSKirill A. Shutemov /* Truncated ? */ 2673baa355fdSKirill A. Shutemov if (!mapping) { 2674baa355fdSKirill A. Shutemov ret = -EBUSY; 2675baa355fdSKirill A. Shutemov goto out; 2676baa355fdSKirill A. Shutemov } 2677baa355fdSKirill A. Shutemov 2678baa355fdSKirill A. Shutemov anon_vma = NULL; 2679baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2680006d3ff2SHugh Dickins 2681006d3ff2SHugh Dickins /* 2682006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2683006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2684006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2685006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 2686006d3ff2SHugh Dickins * head page lock is good enough to serialize the trimming. 2687006d3ff2SHugh Dickins */ 2688006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2689baa355fdSKirill A. Shutemov } 2690e9b61f19SKirill A. Shutemov 2691e9b61f19SKirill A. Shutemov /* 2692906f9cdfSHugh Dickins * Racy check if we can split the page, before unmap_page() will 2693e9b61f19SKirill A. Shutemov * split PMDs 2694e9b61f19SKirill A. Shutemov */ 2695b8f593cdSHuang Ying if (!can_split_huge_page(head, &extra_pins)) { 2696e9b61f19SKirill A. Shutemov ret = -EBUSY; 2697e9b61f19SKirill A. Shutemov goto out_unlock; 2698e9b61f19SKirill A. Shutemov } 2699e9b61f19SKirill A. Shutemov 2700906f9cdfSHugh Dickins unmap_page(head); 2701e9b61f19SKirill A. Shutemov 2702b6769834SAlex Shi /* block interrupt reentry in xa_lock and spinlock */ 2703b6769834SAlex Shi local_irq_disable(); 2704baa355fdSKirill A. Shutemov if (mapping) { 2705aa5dc07fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page_index(head)); 2706baa355fdSKirill A. Shutemov 2707baa355fdSKirill A. Shutemov /* 2708aa5dc07fSMatthew Wilcox * Check if the head page is present in page cache. 2709baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2710baa355fdSKirill A. Shutemov */ 2711aa5dc07fSMatthew Wilcox xa_lock(&mapping->i_pages); 2712aa5dc07fSMatthew Wilcox if (xas_load(&xas) != head) 2713baa355fdSKirill A. Shutemov goto fail; 2714baa355fdSKirill A. Shutemov } 2715baa355fdSKirill A. Shutemov 27160139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2717364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 2718504e070dSYang Shi if (page_ref_freeze(head, 1 + extra_pins)) { 27199a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2720364c1eebSYang Shi ds_queue->split_queue_len--; 27219a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 27229a982250SKirill A. Shutemov } 2723afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 272406d3eff6SKirill A. Shutemov if (mapping) { 2725bf9eceadSMuchun Song int nr = thp_nr_pages(head); 2726bf9eceadSMuchun Song 2727a8803e6cSWei Yang if (PageSwapBacked(head)) 272857b2847dSMuchun Song __mod_lruvec_page_state(head, NR_SHMEM_THPS, 272957b2847dSMuchun Song -nr); 273006d3eff6SKirill A. Shutemov else 2731bf9eceadSMuchun Song __mod_lruvec_page_state(head, NR_FILE_THPS, 2732bf9eceadSMuchun Song -nr); 273306d3eff6SKirill A. Shutemov } 273406d3eff6SKirill A. Shutemov 2735b6769834SAlex Shi __split_huge_page(page, list, end); 2736e9b61f19SKirill A. Shutemov ret = 0; 2737baa355fdSKirill A. Shutemov } else { 2738364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2739504e070dSYang Shi fail: 2740504e070dSYang Shi if (mapping) 2741b93b0163SMatthew Wilcox xa_unlock(&mapping->i_pages); 2742b6769834SAlex Shi local_irq_enable(); 27438cce5475SKirill A. Shutemov remap_page(head, thp_nr_pages(head)); 2744e9b61f19SKirill A. Shutemov ret = -EBUSY; 2745e9b61f19SKirill A. Shutemov } 2746e9b61f19SKirill A. Shutemov 2747e9b61f19SKirill A. Shutemov out_unlock: 2748baa355fdSKirill A. Shutemov if (anon_vma) { 2749e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2750e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2751baa355fdSKirill A. Shutemov } 2752baa355fdSKirill A. Shutemov if (mapping) 2753baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2754e9b61f19SKirill A. Shutemov out: 2755e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2756e9b61f19SKirill A. Shutemov return ret; 2757e9b61f19SKirill A. Shutemov } 27589a982250SKirill A. Shutemov 27599a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27609a982250SKirill A. Shutemov { 276187eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 27629a982250SKirill A. Shutemov unsigned long flags; 27639a982250SKirill A. Shutemov 2764364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27659a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2766364c1eebSYang Shi ds_queue->split_queue_len--; 27679a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 27689a982250SKirill A. Shutemov } 2769364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 27709a982250SKirill A. Shutemov free_compound_page(page); 27719a982250SKirill A. Shutemov } 27729a982250SKirill A. Shutemov 27739a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 27749a982250SKirill A. Shutemov { 277587eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 277687eaceb3SYang Shi #ifdef CONFIG_MEMCG 2777bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 277887eaceb3SYang Shi #endif 27799a982250SKirill A. Shutemov unsigned long flags; 27809a982250SKirill A. Shutemov 27819a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 27829a982250SKirill A. Shutemov 278387eaceb3SYang Shi /* 278487eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 278587eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 278687eaceb3SYang Shi * And, if page reclaim is already handling the same page, it is 278787eaceb3SYang Shi * unnecessary to handle it again in shrinker. 278887eaceb3SYang Shi * 278987eaceb3SYang Shi * Check PageSwapCache to determine if the page is being 279087eaceb3SYang Shi * handled by page reclaim since THP swap would add the page into 279187eaceb3SYang Shi * swap cache before calling try_to_unmap(). 279287eaceb3SYang Shi */ 279387eaceb3SYang Shi if (PageSwapCache(page)) 279487eaceb3SYang Shi return; 279587eaceb3SYang Shi 2796364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27979a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2798f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2799364c1eebSYang Shi list_add_tail(page_deferred_list(page), &ds_queue->split_queue); 2800364c1eebSYang Shi ds_queue->split_queue_len++; 280187eaceb3SYang Shi #ifdef CONFIG_MEMCG 280287eaceb3SYang Shi if (memcg) 28032bfd3637SYang Shi set_shrinker_bit(memcg, page_to_nid(page), 280487eaceb3SYang Shi deferred_split_shrinker.id); 280587eaceb3SYang Shi #endif 28069a982250SKirill A. Shutemov } 2807364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28089a982250SKirill A. Shutemov } 28099a982250SKirill A. Shutemov 28109a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 28119a982250SKirill A. Shutemov struct shrink_control *sc) 28129a982250SKirill A. Shutemov { 2813a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2814364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 281587eaceb3SYang Shi 281687eaceb3SYang Shi #ifdef CONFIG_MEMCG 281787eaceb3SYang Shi if (sc->memcg) 281887eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 281987eaceb3SYang Shi #endif 2820364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28219a982250SKirill A. Shutemov } 28229a982250SKirill A. Shutemov 28239a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28249a982250SKirill A. Shutemov struct shrink_control *sc) 28259a982250SKirill A. Shutemov { 2826a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2827364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28289a982250SKirill A. Shutemov unsigned long flags; 28299a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 28309a982250SKirill A. Shutemov struct page *page; 28319a982250SKirill A. Shutemov int split = 0; 28329a982250SKirill A. Shutemov 283387eaceb3SYang Shi #ifdef CONFIG_MEMCG 283487eaceb3SYang Shi if (sc->memcg) 283587eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 283687eaceb3SYang Shi #endif 283787eaceb3SYang Shi 2838364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28399a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2840364c1eebSYang Shi list_for_each_safe(pos, next, &ds_queue->split_queue) { 2841dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 28429a982250SKirill A. Shutemov page = compound_head(page); 2843e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2844e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2845e3ae1953SKirill A. Shutemov } else { 2846e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 28479a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2848364c1eebSYang Shi ds_queue->split_queue_len--; 28499a982250SKirill A. Shutemov } 2850e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2851e3ae1953SKirill A. Shutemov break; 28529a982250SKirill A. Shutemov } 2853364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28549a982250SKirill A. Shutemov 28559a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 2856dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 2857fa41b900SKirill A. Shutemov if (!trylock_page(page)) 2858fa41b900SKirill A. Shutemov goto next; 28599a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 28609a982250SKirill A. Shutemov if (!split_huge_page(page)) 28619a982250SKirill A. Shutemov split++; 28629a982250SKirill A. Shutemov unlock_page(page); 2863fa41b900SKirill A. Shutemov next: 28649a982250SKirill A. Shutemov put_page(page); 28659a982250SKirill A. Shutemov } 28669a982250SKirill A. Shutemov 2867364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2868364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2869364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28709a982250SKirill A. Shutemov 2871cb8d68ecSKirill A. Shutemov /* 2872cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2873cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2874cb8d68ecSKirill A. Shutemov */ 2875364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2876cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2877cb8d68ecSKirill A. Shutemov return split; 28789a982250SKirill A. Shutemov } 28799a982250SKirill A. Shutemov 28809a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 28819a982250SKirill A. Shutemov .count_objects = deferred_split_count, 28829a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 28839a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 288487eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 288587eaceb3SYang Shi SHRINKER_NONSLAB, 28869a982250SKirill A. Shutemov }; 288749071d43SKirill A. Shutemov 288849071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 2889fa6c0231SZi Yan static void split_huge_pages_all(void) 289049071d43SKirill A. Shutemov { 289149071d43SKirill A. Shutemov struct zone *zone; 289249071d43SKirill A. Shutemov struct page *page; 289349071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 289449071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 289549071d43SKirill A. Shutemov 2896fa6c0231SZi Yan pr_debug("Split all THPs\n"); 289749071d43SKirill A. Shutemov for_each_populated_zone(zone) { 289849071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 289949071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 290049071d43SKirill A. Shutemov if (!pfn_valid(pfn)) 290149071d43SKirill A. Shutemov continue; 290249071d43SKirill A. Shutemov 290349071d43SKirill A. Shutemov page = pfn_to_page(pfn); 290449071d43SKirill A. Shutemov if (!get_page_unless_zero(page)) 290549071d43SKirill A. Shutemov continue; 290649071d43SKirill A. Shutemov 290749071d43SKirill A. Shutemov if (zone != page_zone(page)) 290849071d43SKirill A. Shutemov goto next; 290949071d43SKirill A. Shutemov 2910baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 291149071d43SKirill A. Shutemov goto next; 291249071d43SKirill A. Shutemov 291349071d43SKirill A. Shutemov total++; 291449071d43SKirill A. Shutemov lock_page(page); 291549071d43SKirill A. Shutemov if (!split_huge_page(page)) 291649071d43SKirill A. Shutemov split++; 291749071d43SKirill A. Shutemov unlock_page(page); 291849071d43SKirill A. Shutemov next: 291949071d43SKirill A. Shutemov put_page(page); 2920fa6c0231SZi Yan cond_resched(); 292149071d43SKirill A. Shutemov } 292249071d43SKirill A. Shutemov } 292349071d43SKirill A. Shutemov 2924fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 292549071d43SKirill A. Shutemov } 2926fa6c0231SZi Yan 2927fa6c0231SZi Yan static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 2928fa6c0231SZi Yan { 2929fa6c0231SZi Yan return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 2930fa6c0231SZi Yan is_vm_hugetlb_page(vma); 2931fa6c0231SZi Yan } 2932fa6c0231SZi Yan 2933fa6c0231SZi Yan static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 2934fa6c0231SZi Yan unsigned long vaddr_end) 2935fa6c0231SZi Yan { 2936fa6c0231SZi Yan int ret = 0; 2937fa6c0231SZi Yan struct task_struct *task; 2938fa6c0231SZi Yan struct mm_struct *mm; 2939fa6c0231SZi Yan unsigned long total = 0, split = 0; 2940fa6c0231SZi Yan unsigned long addr; 2941fa6c0231SZi Yan 2942fa6c0231SZi Yan vaddr_start &= PAGE_MASK; 2943fa6c0231SZi Yan vaddr_end &= PAGE_MASK; 2944fa6c0231SZi Yan 2945fa6c0231SZi Yan /* Find the task_struct from pid */ 2946fa6c0231SZi Yan rcu_read_lock(); 2947fa6c0231SZi Yan task = find_task_by_vpid(pid); 2948fa6c0231SZi Yan if (!task) { 2949fa6c0231SZi Yan rcu_read_unlock(); 2950fa6c0231SZi Yan ret = -ESRCH; 2951fa6c0231SZi Yan goto out; 2952fa6c0231SZi Yan } 2953fa6c0231SZi Yan get_task_struct(task); 2954fa6c0231SZi Yan rcu_read_unlock(); 2955fa6c0231SZi Yan 2956fa6c0231SZi Yan /* Find the mm_struct */ 2957fa6c0231SZi Yan mm = get_task_mm(task); 2958fa6c0231SZi Yan put_task_struct(task); 2959fa6c0231SZi Yan 2960fa6c0231SZi Yan if (!mm) { 2961fa6c0231SZi Yan ret = -EINVAL; 2962fa6c0231SZi Yan goto out; 2963fa6c0231SZi Yan } 2964fa6c0231SZi Yan 2965fa6c0231SZi Yan pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 2966fa6c0231SZi Yan pid, vaddr_start, vaddr_end); 2967fa6c0231SZi Yan 2968fa6c0231SZi Yan mmap_read_lock(mm); 2969fa6c0231SZi Yan /* 2970fa6c0231SZi Yan * always increase addr by PAGE_SIZE, since we could have a PTE page 2971fa6c0231SZi Yan * table filled with PTE-mapped THPs, each of which is distinct. 2972fa6c0231SZi Yan */ 2973fa6c0231SZi Yan for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 2974fa6c0231SZi Yan struct vm_area_struct *vma = find_vma(mm, addr); 2975fa6c0231SZi Yan unsigned int follflags; 2976fa6c0231SZi Yan struct page *page; 2977fa6c0231SZi Yan 2978fa6c0231SZi Yan if (!vma || addr < vma->vm_start) 2979fa6c0231SZi Yan break; 2980fa6c0231SZi Yan 2981fa6c0231SZi Yan /* skip special VMA and hugetlb VMA */ 2982fa6c0231SZi Yan if (vma_not_suitable_for_thp_split(vma)) { 2983fa6c0231SZi Yan addr = vma->vm_end; 2984fa6c0231SZi Yan continue; 2985fa6c0231SZi Yan } 2986fa6c0231SZi Yan 2987fa6c0231SZi Yan /* FOLL_DUMP to ignore special (like zero) pages */ 2988fa6c0231SZi Yan follflags = FOLL_GET | FOLL_DUMP; 2989fa6c0231SZi Yan page = follow_page(vma, addr, follflags); 2990fa6c0231SZi Yan 2991fa6c0231SZi Yan if (IS_ERR(page)) 2992fa6c0231SZi Yan continue; 2993fa6c0231SZi Yan if (!page) 2994fa6c0231SZi Yan continue; 2995fa6c0231SZi Yan 2996fa6c0231SZi Yan if (!is_transparent_hugepage(page)) 2997fa6c0231SZi Yan goto next; 2998fa6c0231SZi Yan 2999fa6c0231SZi Yan total++; 3000fa6c0231SZi Yan if (!can_split_huge_page(compound_head(page), NULL)) 3001fa6c0231SZi Yan goto next; 3002fa6c0231SZi Yan 3003fa6c0231SZi Yan if (!trylock_page(page)) 3004fa6c0231SZi Yan goto next; 3005fa6c0231SZi Yan 3006fa6c0231SZi Yan if (!split_huge_page(page)) 3007fa6c0231SZi Yan split++; 3008fa6c0231SZi Yan 3009fa6c0231SZi Yan unlock_page(page); 3010fa6c0231SZi Yan next: 3011fa6c0231SZi Yan put_page(page); 3012fa6c0231SZi Yan cond_resched(); 3013fa6c0231SZi Yan } 3014fa6c0231SZi Yan mmap_read_unlock(mm); 3015fa6c0231SZi Yan mmput(mm); 3016fa6c0231SZi Yan 3017fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 3018fa6c0231SZi Yan 3019fa6c0231SZi Yan out: 3020fa6c0231SZi Yan return ret; 3021fa6c0231SZi Yan } 3022fa6c0231SZi Yan 3023fbe37501SZi Yan static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 3024fbe37501SZi Yan pgoff_t off_end) 3025fbe37501SZi Yan { 3026fbe37501SZi Yan struct filename *file; 3027fbe37501SZi Yan struct file *candidate; 3028fbe37501SZi Yan struct address_space *mapping; 3029fbe37501SZi Yan int ret = -EINVAL; 3030fbe37501SZi Yan pgoff_t index; 3031fbe37501SZi Yan int nr_pages = 1; 3032fbe37501SZi Yan unsigned long total = 0, split = 0; 3033fbe37501SZi Yan 3034fbe37501SZi Yan file = getname_kernel(file_path); 3035fbe37501SZi Yan if (IS_ERR(file)) 3036fbe37501SZi Yan return ret; 3037fbe37501SZi Yan 3038fbe37501SZi Yan candidate = file_open_name(file, O_RDONLY, 0); 3039fbe37501SZi Yan if (IS_ERR(candidate)) 3040fbe37501SZi Yan goto out; 3041fbe37501SZi Yan 3042fbe37501SZi Yan pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 3043fbe37501SZi Yan file_path, off_start, off_end); 3044fbe37501SZi Yan 3045fbe37501SZi Yan mapping = candidate->f_mapping; 3046fbe37501SZi Yan 3047fbe37501SZi Yan for (index = off_start; index < off_end; index += nr_pages) { 3048fbe37501SZi Yan struct page *fpage = pagecache_get_page(mapping, index, 3049fbe37501SZi Yan FGP_ENTRY | FGP_HEAD, 0); 3050fbe37501SZi Yan 3051fbe37501SZi Yan nr_pages = 1; 3052fbe37501SZi Yan if (xa_is_value(fpage) || !fpage) 3053fbe37501SZi Yan continue; 3054fbe37501SZi Yan 3055fbe37501SZi Yan if (!is_transparent_hugepage(fpage)) 3056fbe37501SZi Yan goto next; 3057fbe37501SZi Yan 3058fbe37501SZi Yan total++; 3059fbe37501SZi Yan nr_pages = thp_nr_pages(fpage); 3060fbe37501SZi Yan 3061fbe37501SZi Yan if (!trylock_page(fpage)) 3062fbe37501SZi Yan goto next; 3063fbe37501SZi Yan 3064fbe37501SZi Yan if (!split_huge_page(fpage)) 3065fbe37501SZi Yan split++; 3066fbe37501SZi Yan 3067fbe37501SZi Yan unlock_page(fpage); 3068fbe37501SZi Yan next: 3069fbe37501SZi Yan put_page(fpage); 3070fbe37501SZi Yan cond_resched(); 3071fbe37501SZi Yan } 3072fbe37501SZi Yan 3073fbe37501SZi Yan filp_close(candidate, NULL); 3074fbe37501SZi Yan ret = 0; 3075fbe37501SZi Yan 3076fbe37501SZi Yan pr_debug("%lu of %lu file-backed THP split\n", split, total); 3077fbe37501SZi Yan out: 3078fbe37501SZi Yan putname(file); 3079fbe37501SZi Yan return ret; 3080fbe37501SZi Yan } 3081fbe37501SZi Yan 3082fa6c0231SZi Yan #define MAX_INPUT_BUF_SZ 255 3083fa6c0231SZi Yan 3084fa6c0231SZi Yan static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 3085fa6c0231SZi Yan size_t count, loff_t *ppops) 3086fa6c0231SZi Yan { 3087fa6c0231SZi Yan static DEFINE_MUTEX(split_debug_mutex); 3088fa6c0231SZi Yan ssize_t ret; 3089fbe37501SZi Yan /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ 3090fbe37501SZi Yan char input_buf[MAX_INPUT_BUF_SZ]; 3091fa6c0231SZi Yan int pid; 3092fa6c0231SZi Yan unsigned long vaddr_start, vaddr_end; 3093fa6c0231SZi Yan 3094fa6c0231SZi Yan ret = mutex_lock_interruptible(&split_debug_mutex); 3095fa6c0231SZi Yan if (ret) 3096fa6c0231SZi Yan return ret; 3097fa6c0231SZi Yan 3098fa6c0231SZi Yan ret = -EFAULT; 3099fa6c0231SZi Yan 3100fa6c0231SZi Yan memset(input_buf, 0, MAX_INPUT_BUF_SZ); 3101fa6c0231SZi Yan if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 3102fa6c0231SZi Yan goto out; 3103fa6c0231SZi Yan 3104fa6c0231SZi Yan input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 3105fbe37501SZi Yan 3106fbe37501SZi Yan if (input_buf[0] == '/') { 3107fbe37501SZi Yan char *tok; 3108fbe37501SZi Yan char *buf = input_buf; 3109fbe37501SZi Yan char file_path[MAX_INPUT_BUF_SZ]; 3110fbe37501SZi Yan pgoff_t off_start = 0, off_end = 0; 3111fbe37501SZi Yan size_t input_len = strlen(input_buf); 3112fbe37501SZi Yan 3113fbe37501SZi Yan tok = strsep(&buf, ","); 3114fbe37501SZi Yan if (tok) { 31151212e00cSMatthew Wilcox (Oracle) strcpy(file_path, tok); 3116fbe37501SZi Yan } else { 3117fbe37501SZi Yan ret = -EINVAL; 3118fbe37501SZi Yan goto out; 3119fbe37501SZi Yan } 3120fbe37501SZi Yan 3121fbe37501SZi Yan ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); 3122fbe37501SZi Yan if (ret != 2) { 3123fbe37501SZi Yan ret = -EINVAL; 3124fbe37501SZi Yan goto out; 3125fbe37501SZi Yan } 3126fbe37501SZi Yan ret = split_huge_pages_in_file(file_path, off_start, off_end); 3127fbe37501SZi Yan if (!ret) 3128fbe37501SZi Yan ret = input_len; 3129fbe37501SZi Yan 3130fbe37501SZi Yan goto out; 3131fbe37501SZi Yan } 3132fbe37501SZi Yan 3133fa6c0231SZi Yan ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); 3134fa6c0231SZi Yan if (ret == 1 && pid == 1) { 3135fa6c0231SZi Yan split_huge_pages_all(); 3136fa6c0231SZi Yan ret = strlen(input_buf); 3137fa6c0231SZi Yan goto out; 3138fa6c0231SZi Yan } else if (ret != 3) { 3139fa6c0231SZi Yan ret = -EINVAL; 3140fa6c0231SZi Yan goto out; 3141fa6c0231SZi Yan } 3142fa6c0231SZi Yan 3143fa6c0231SZi Yan ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); 3144fa6c0231SZi Yan if (!ret) 3145fa6c0231SZi Yan ret = strlen(input_buf); 3146fa6c0231SZi Yan out: 3147fa6c0231SZi Yan mutex_unlock(&split_debug_mutex); 3148fa6c0231SZi Yan return ret; 3149fa6c0231SZi Yan 3150fa6c0231SZi Yan } 3151fa6c0231SZi Yan 3152fa6c0231SZi Yan static const struct file_operations split_huge_pages_fops = { 3153fa6c0231SZi Yan .owner = THIS_MODULE, 3154fa6c0231SZi Yan .write = split_huge_pages_write, 3155fa6c0231SZi Yan .llseek = no_llseek, 3156fa6c0231SZi Yan }; 315749071d43SKirill A. Shutemov 315849071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 315949071d43SKirill A. Shutemov { 3160d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 316149071d43SKirill A. Shutemov &split_huge_pages_fops); 316249071d43SKirill A. Shutemov return 0; 316349071d43SKirill A. Shutemov } 316449071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 316549071d43SKirill A. Shutemov #endif 3166616b8371SZi Yan 3167616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 3168616b8371SZi Yan void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3169616b8371SZi Yan struct page *page) 3170616b8371SZi Yan { 3171616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3172616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3173616b8371SZi Yan unsigned long address = pvmw->address; 3174616b8371SZi Yan pmd_t pmdval; 3175616b8371SZi Yan swp_entry_t entry; 3176ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 3177616b8371SZi Yan 3178616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3179616b8371SZi Yan return; 3180616b8371SZi Yan 3181616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 31828a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 3183616b8371SZi Yan if (pmd_dirty(pmdval)) 3184616b8371SZi Yan set_page_dirty(page); 31854dd845b5SAlistair Popple if (pmd_write(pmdval)) 31864dd845b5SAlistair Popple entry = make_writable_migration_entry(page_to_pfn(page)); 31874dd845b5SAlistair Popple else 31884dd845b5SAlistair Popple entry = make_readable_migration_entry(page_to_pfn(page)); 3189ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 3190ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 3191ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 3192ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3193616b8371SZi Yan page_remove_rmap(page, true); 3194616b8371SZi Yan put_page(page); 3195616b8371SZi Yan } 3196616b8371SZi Yan 3197616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3198616b8371SZi Yan { 3199616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3200616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3201616b8371SZi Yan unsigned long address = pvmw->address; 3202616b8371SZi Yan unsigned long mmun_start = address & HPAGE_PMD_MASK; 3203616b8371SZi Yan pmd_t pmde; 3204616b8371SZi Yan swp_entry_t entry; 3205616b8371SZi Yan 3206616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3207616b8371SZi Yan return; 3208616b8371SZi Yan 3209616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 3210616b8371SZi Yan get_page(new); 3211616b8371SZi Yan pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); 3212ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 3213ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 32144dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) 3215f55e1014SLinus Torvalds pmde = maybe_pmd_mkwrite(pmde, vma); 32168f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pvmw->pmd)) 32178f34f1eaSPeter Xu pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); 3218616b8371SZi Yan 3219616b8371SZi Yan flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 3220e71769aeSNaoya Horiguchi if (PageAnon(new)) 3221616b8371SZi Yan page_add_anon_rmap(new, vma, mmun_start, true); 3222e71769aeSNaoya Horiguchi else 3223e71769aeSNaoya Horiguchi page_add_file_rmap(new, true); 3224616b8371SZi Yan set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 3225e125fe40SKirill A. Shutemov if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) 3226616b8371SZi Yan mlock_vma_page(new); 3227616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 3228616b8371SZi Yan } 3229616b8371SZi Yan #endif 3230