120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10fa6c0231SZi Yan #include <linux/sched/mm.h> 11f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 126a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1371e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1471e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1571e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1671e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1771e3aac0SAndrea Arcangeli #include <linux/swap.h> 1897ae1749SKirill A. Shutemov #include <linux/shrinker.h> 19ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 20e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 21fb5c2029SMatthew Wilcox (Oracle) #include <linux/backing-dev.h> 224897c765SMatthew Wilcox #include <linux/dax.h> 23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 24878aee7dSAndrea Arcangeli #include <linux/freezer.h> 25f25748e3SDan Williams #include <linux/pfn_t.h> 26a664b2d8SAndrea Arcangeli #include <linux/mman.h> 273565fce3SDan Williams #include <linux/memremap.h> 28325adeb5SRalf Baechle #include <linux/pagemap.h> 2949071d43SKirill A. Shutemov #include <linux/debugfs.h> 304daae3b4SMel Gorman #include <linux/migrate.h> 3143b5fbbdSSasha Levin #include <linux/hashtable.h> 326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3333c3fc71SVladimir Davydov #include <linux/page_idle.h> 34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 356b31d595SMichal Hocko #include <linux/oom.h> 3698fa15f3SAnshuman Khandual #include <linux/numa.h> 37f7da677bSVlastimil Babka #include <linux/page_owner.h> 38a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 39467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h> 4097ae1749SKirill A. Shutemov 4171e3aac0SAndrea Arcangeli #include <asm/tlb.h> 4271e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 4371e3aac0SAndrea Arcangeli #include "internal.h" 44014bb1deSNeilBrown #include "swap.h" 4571e3aac0SAndrea Arcangeli 46283fd6feSAnshuman Khandual #define CREATE_TRACE_POINTS 47283fd6feSAnshuman Khandual #include <trace/events/thp.h> 48283fd6feSAnshuman Khandual 49ba76149fSAndrea Arcangeli /* 50b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 51b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 52b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 53b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 548bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 558bfa3f9aSJianguo Wu * for all hugepage allocations. 56ba76149fSAndrea Arcangeli */ 5771e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5813ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 59ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 6013ece886SAndrea Arcangeli #endif 6113ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 6213ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 6313ece886SAndrea Arcangeli #endif 64444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 6579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 6679da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 67ba76149fSAndrea Arcangeli 689a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 69f000565aSAndrea Arcangeli 7097ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 7156873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 723b77e8c8SHugh Dickins unsigned long huge_zero_pfn __read_mostly = ~0UL; 734a6c1297SKirill A. Shutemov 74a7f4e6e4SZach O'Keefe bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, 75a7f4e6e4SZach O'Keefe bool smaps, bool in_pf, bool enforce_sysfs) 767635d9cbSMichal Hocko { 779fec5168SYang Shi if (!vma->vm_mm) /* vdso */ 78c0630669SYang Shi return false; 799fec5168SYang Shi 807da4e2cbSYang Shi /* 817da4e2cbSYang Shi * Explicitly disabled through madvise or prctl, or some 827da4e2cbSYang Shi * architectures may disable THP for some mappings, for 837da4e2cbSYang Shi * example, s390 kvm. 847da4e2cbSYang Shi * */ 857da4e2cbSYang Shi if ((vm_flags & VM_NOHUGEPAGE) || 867da4e2cbSYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 877da4e2cbSYang Shi return false; 887da4e2cbSYang Shi /* 897da4e2cbSYang Shi * If the hardware/firmware marked hugepage support disabled. 907da4e2cbSYang Shi */ 917da4e2cbSYang Shi if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) 929fec5168SYang Shi return false; 939fec5168SYang Shi 947da4e2cbSYang Shi /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 957da4e2cbSYang Shi if (vma_is_dax(vma)) 967da4e2cbSYang Shi return in_pf; 977da4e2cbSYang Shi 987da4e2cbSYang Shi /* 997da4e2cbSYang Shi * Special VMA and hugetlb VMA. 1007da4e2cbSYang Shi * Must be checked after dax since some dax mappings may have 1017da4e2cbSYang Shi * VM_MIXEDMAP set. 1027da4e2cbSYang Shi */ 1039fec5168SYang Shi if (vm_flags & VM_NO_KHUGEPAGED) 1049fec5168SYang Shi return false; 1059fec5168SYang Shi 1067da4e2cbSYang Shi /* 1077da4e2cbSYang Shi * Check alignment for file vma and size for both file and anon vma. 1087da4e2cbSYang Shi * 1097da4e2cbSYang Shi * Skip the check for page fault. Huge fault does the check in fault 1107da4e2cbSYang Shi * handlers. And this check is not suitable for huge PUD fault. 1117da4e2cbSYang Shi */ 1127da4e2cbSYang Shi if (!in_pf && 1137da4e2cbSYang Shi !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) 1149fec5168SYang Shi return false; 1159fec5168SYang Shi 1167da4e2cbSYang Shi /* 1177da4e2cbSYang Shi * Enabled via shmem mount options or sysfs settings. 1187da4e2cbSYang Shi * Must be done before hugepage flags check since shmem has its 1197da4e2cbSYang Shi * own flags. 1207da4e2cbSYang Shi */ 1217da4e2cbSYang Shi if (!in_pf && shmem_file(vma->vm_file)) 122c0630669SYang Shi return shmem_huge_enabled(vma); 1239fec5168SYang Shi 124a7f4e6e4SZach O'Keefe /* Enforce sysfs THP requirements as necessary */ 125a7f4e6e4SZach O'Keefe if (enforce_sysfs && 126a7f4e6e4SZach O'Keefe (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && 127a7f4e6e4SZach O'Keefe !hugepage_flags_always()))) 1289fec5168SYang Shi return false; 1299fec5168SYang Shi 1309fec5168SYang Shi /* Only regular file is valid */ 1317da4e2cbSYang Shi if (!in_pf && file_thp_enabled(vma)) 13278d12c19SYang Shi return true; 1337635d9cbSMichal Hocko 1349fec5168SYang Shi if (!vma_is_anonymous(vma)) 1357635d9cbSMichal Hocko return false; 1369fec5168SYang Shi 1379fec5168SYang Shi if (vma_is_temporary_stack(vma)) 1389fec5168SYang Shi return false; 1399fec5168SYang Shi 1409fec5168SYang Shi /* 1419fec5168SYang Shi * THPeligible bit of smaps should show 1 for proper VMAs even 1429fec5168SYang Shi * though anon_vma is not initialized yet. 1437da4e2cbSYang Shi * 1447da4e2cbSYang Shi * Allow page fault since anon_vma may be not initialized until 1457da4e2cbSYang Shi * the first page fault. 1469fec5168SYang Shi */ 1479fec5168SYang Shi if (!vma->anon_vma) 1487da4e2cbSYang Shi return (smaps || in_pf); 1499fec5168SYang Shi 1509fec5168SYang Shi return true; 1517635d9cbSMichal Hocko } 1527635d9cbSMichal Hocko 153aaa9705bSMiaohe Lin static bool get_huge_zero_page(void) 15497ae1749SKirill A. Shutemov { 15597ae1749SKirill A. Shutemov struct page *zero_page; 15697ae1749SKirill A. Shutemov retry: 15797ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 158aaa9705bSMiaohe Lin return true; 15997ae1749SKirill A. Shutemov 16097ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 16197ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 162d8a8e1f0SKirill A. Shutemov if (!zero_page) { 163d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 164aaa9705bSMiaohe Lin return false; 165d8a8e1f0SKirill A. Shutemov } 166d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 16797ae1749SKirill A. Shutemov preempt_disable(); 1685918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 16997ae1749SKirill A. Shutemov preempt_enable(); 1705ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 17197ae1749SKirill A. Shutemov goto retry; 17297ae1749SKirill A. Shutemov } 1733b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); 17497ae1749SKirill A. Shutemov 17597ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 17697ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 17797ae1749SKirill A. Shutemov preempt_enable(); 178aaa9705bSMiaohe Lin return true; 17997ae1749SKirill A. Shutemov } 18097ae1749SKirill A. Shutemov 1816fcb52a5SAaron Lu static void put_huge_zero_page(void) 18297ae1749SKirill A. Shutemov { 18397ae1749SKirill A. Shutemov /* 18497ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 18597ae1749SKirill A. Shutemov * last reference. 18697ae1749SKirill A. Shutemov */ 18797ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 18897ae1749SKirill A. Shutemov } 18997ae1749SKirill A. Shutemov 1906fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1916fcb52a5SAaron Lu { 1926fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1936fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1946fcb52a5SAaron Lu 1956fcb52a5SAaron Lu if (!get_huge_zero_page()) 1966fcb52a5SAaron Lu return NULL; 1976fcb52a5SAaron Lu 1986fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1996fcb52a5SAaron Lu put_huge_zero_page(); 2006fcb52a5SAaron Lu 2016fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 2026fcb52a5SAaron Lu } 2036fcb52a5SAaron Lu 2046fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 2056fcb52a5SAaron Lu { 2066fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2076fcb52a5SAaron Lu put_huge_zero_page(); 2086fcb52a5SAaron Lu } 2096fcb52a5SAaron Lu 21048896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 21197ae1749SKirill A. Shutemov struct shrink_control *sc) 21297ae1749SKirill A. Shutemov { 21397ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 21497ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 21548896466SGlauber Costa } 21697ae1749SKirill A. Shutemov 21748896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 21848896466SGlauber Costa struct shrink_control *sc) 21948896466SGlauber Costa { 22097ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2215918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2225918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2233b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, ~0UL); 2245ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 22548896466SGlauber Costa return HPAGE_PMD_NR; 22697ae1749SKirill A. Shutemov } 22797ae1749SKirill A. Shutemov 22897ae1749SKirill A. Shutemov return 0; 22997ae1749SKirill A. Shutemov } 23097ae1749SKirill A. Shutemov 23197ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23248896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 23348896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 23497ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23597ae1749SKirill A. Shutemov }; 23697ae1749SKirill A. Shutemov 23771e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 23871e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 23971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 24071e3aac0SAndrea Arcangeli { 241bfb0ffebSJoe Perches const char *output; 242bfb0ffebSJoe Perches 243444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 244bfb0ffebSJoe Perches output = "[always] madvise never"; 245bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 246bfb0ffebSJoe Perches &transparent_hugepage_flags)) 247bfb0ffebSJoe Perches output = "always [madvise] never"; 248444eb2a4SMel Gorman else 249bfb0ffebSJoe Perches output = "always madvise [never]"; 250bfb0ffebSJoe Perches 251bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 25271e3aac0SAndrea Arcangeli } 253444eb2a4SMel Gorman 25471e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 25571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25671e3aac0SAndrea Arcangeli const char *buf, size_t count) 25771e3aac0SAndrea Arcangeli { 25821440d7eSDavid Rientjes ssize_t ret = count; 259ba76149fSAndrea Arcangeli 260f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 26121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26221440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 263f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 26421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26521440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 266f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 26721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26921440d7eSDavid Rientjes } else 27021440d7eSDavid Rientjes ret = -EINVAL; 271ba76149fSAndrea Arcangeli 272ba76149fSAndrea Arcangeli if (ret > 0) { 273b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 274ba76149fSAndrea Arcangeli if (err) 275ba76149fSAndrea Arcangeli ret = err; 276ba76149fSAndrea Arcangeli } 277ba76149fSAndrea Arcangeli return ret; 27871e3aac0SAndrea Arcangeli } 27937139bb0SMiaohe Lin 28037139bb0SMiaohe Lin static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 28171e3aac0SAndrea Arcangeli 282b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 28371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 28471e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 28571e3aac0SAndrea Arcangeli { 286bfb0ffebSJoe Perches return sysfs_emit(buf, "%d\n", 287e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 28871e3aac0SAndrea Arcangeli } 289e27e6151SBen Hutchings 290b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 29171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 29271e3aac0SAndrea Arcangeli const char *buf, size_t count, 29371e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 29471e3aac0SAndrea Arcangeli { 295e27e6151SBen Hutchings unsigned long value; 296e27e6151SBen Hutchings int ret; 297e27e6151SBen Hutchings 298e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 299e27e6151SBen Hutchings if (ret < 0) 300e27e6151SBen Hutchings return ret; 301e27e6151SBen Hutchings if (value > 1) 30271e3aac0SAndrea Arcangeli return -EINVAL; 30371e3aac0SAndrea Arcangeli 304e27e6151SBen Hutchings if (value) 305e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 306e27e6151SBen Hutchings else 307e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 308e27e6151SBen Hutchings 30971e3aac0SAndrea Arcangeli return count; 31071e3aac0SAndrea Arcangeli } 31171e3aac0SAndrea Arcangeli 31271e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 31371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31471e3aac0SAndrea Arcangeli { 315bfb0ffebSJoe Perches const char *output; 316bfb0ffebSJoe Perches 317bfb0ffebSJoe Perches if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 318bfb0ffebSJoe Perches &transparent_hugepage_flags)) 319bfb0ffebSJoe Perches output = "[always] defer defer+madvise madvise never"; 320bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 321bfb0ffebSJoe Perches &transparent_hugepage_flags)) 322bfb0ffebSJoe Perches output = "always [defer] defer+madvise madvise never"; 323bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 324bfb0ffebSJoe Perches &transparent_hugepage_flags)) 325bfb0ffebSJoe Perches output = "always defer [defer+madvise] madvise never"; 326bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 327bfb0ffebSJoe Perches &transparent_hugepage_flags)) 328bfb0ffebSJoe Perches output = "always defer defer+madvise [madvise] never"; 329bfb0ffebSJoe Perches else 330bfb0ffebSJoe Perches output = "always defer defer+madvise madvise [never]"; 331bfb0ffebSJoe Perches 332bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 33371e3aac0SAndrea Arcangeli } 33421440d7eSDavid Rientjes 33571e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 33671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 33771e3aac0SAndrea Arcangeli const char *buf, size_t count) 33871e3aac0SAndrea Arcangeli { 339f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 34021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 34221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34321440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 344f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 34521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 34621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34821440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 349f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 3504fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 3514fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 3524fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 3534fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 354f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 35521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 35621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 35721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 35821440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 359f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 36021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 36121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 36221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 36321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 36421440d7eSDavid Rientjes } else 36521440d7eSDavid Rientjes return -EINVAL; 36621440d7eSDavid Rientjes 36721440d7eSDavid Rientjes return count; 36871e3aac0SAndrea Arcangeli } 36937139bb0SMiaohe Lin static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 37071e3aac0SAndrea Arcangeli 37179da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 37279da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37379da5407SKirill A. Shutemov { 374b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 37579da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37679da5407SKirill A. Shutemov } 37779da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 37879da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 37979da5407SKirill A. Shutemov { 380b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 38179da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 38279da5407SKirill A. Shutemov } 38337139bb0SMiaohe Lin static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 38449920d28SHugh Dickins 38549920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 38649920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 38749920d28SHugh Dickins { 388ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 38949920d28SHugh Dickins } 39049920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 39149920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 39249920d28SHugh Dickins 39371e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39471e3aac0SAndrea Arcangeli &enabled_attr.attr, 39571e3aac0SAndrea Arcangeli &defrag_attr.attr, 39679da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39749920d28SHugh Dickins &hpage_pmd_size_attr.attr, 398396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 3995a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 4005a6e75f8SKirill A. Shutemov #endif 40171e3aac0SAndrea Arcangeli NULL, 40271e3aac0SAndrea Arcangeli }; 40371e3aac0SAndrea Arcangeli 4048aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 40571e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 406ba76149fSAndrea Arcangeli }; 407ba76149fSAndrea Arcangeli 408569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 409569e5590SShaohua Li { 410569e5590SShaohua Li int err; 411569e5590SShaohua Li 412569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 413569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 414ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 415569e5590SShaohua Li return -ENOMEM; 416569e5590SShaohua Li } 417569e5590SShaohua Li 418569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 419569e5590SShaohua Li if (err) { 420ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 421569e5590SShaohua Li goto delete_obj; 422569e5590SShaohua Li } 423569e5590SShaohua Li 424569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 425569e5590SShaohua Li if (err) { 426ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 427569e5590SShaohua Li goto remove_hp_group; 428569e5590SShaohua Li } 429569e5590SShaohua Li 430569e5590SShaohua Li return 0; 431569e5590SShaohua Li 432569e5590SShaohua Li remove_hp_group: 433569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 434569e5590SShaohua Li delete_obj: 435569e5590SShaohua Li kobject_put(*hugepage_kobj); 436569e5590SShaohua Li return err; 437569e5590SShaohua Li } 438569e5590SShaohua Li 439569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 440569e5590SShaohua Li { 441569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 442569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 443569e5590SShaohua Li kobject_put(hugepage_kobj); 444569e5590SShaohua Li } 445569e5590SShaohua Li #else 446569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 447569e5590SShaohua Li { 448569e5590SShaohua Li return 0; 449569e5590SShaohua Li } 450569e5590SShaohua Li 451569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 452569e5590SShaohua Li { 453569e5590SShaohua Li } 45471e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 45571e3aac0SAndrea Arcangeli 45671e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 45771e3aac0SAndrea Arcangeli { 45871e3aac0SAndrea Arcangeli int err; 459569e5590SShaohua Li struct kobject *hugepage_kobj; 46071e3aac0SAndrea Arcangeli 4614b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 462bae84953SAneesh Kumar K.V /* 463bae84953SAneesh Kumar K.V * Hardware doesn't support hugepages, hence disable 464bae84953SAneesh Kumar K.V * DAX PMD support. 465bae84953SAneesh Kumar K.V */ 466bae84953SAneesh Kumar K.V transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX; 467569e5590SShaohua Li return -EINVAL; 4684b7167b9SAndrea Arcangeli } 4694b7167b9SAndrea Arcangeli 470ff20c2e0SKirill A. Shutemov /* 471ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 472ff20c2e0SKirill A. Shutemov */ 473ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 474ff20c2e0SKirill A. Shutemov /* 475ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 476ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 477ff20c2e0SKirill A. Shutemov */ 478ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 479ff20c2e0SKirill A. Shutemov 480569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 481569e5590SShaohua Li if (err) 48265ebb64fSKirill A. Shutemov goto err_sysfs; 483ba76149fSAndrea Arcangeli 484b46e756fSKirill A. Shutemov err = khugepaged_init(); 485ba76149fSAndrea Arcangeli if (err) 48665ebb64fSKirill A. Shutemov goto err_slab; 487ba76149fSAndrea Arcangeli 488e33c267aSRoman Gushchin err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); 48965ebb64fSKirill A. Shutemov if (err) 49065ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 491e33c267aSRoman Gushchin err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); 4929a982250SKirill A. Shutemov if (err) 4939a982250SKirill A. Shutemov goto err_split_shrinker; 49497ae1749SKirill A. Shutemov 49597562cd2SRik van Riel /* 49697562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 49797562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 49897562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 49997562cd2SRik van Riel */ 500ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 50197562cd2SRik van Riel transparent_hugepage_flags = 0; 50279553da2SKirill A. Shutemov return 0; 50379553da2SKirill A. Shutemov } 50497562cd2SRik van Riel 50579553da2SKirill A. Shutemov err = start_stop_khugepaged(); 50665ebb64fSKirill A. Shutemov if (err) 50765ebb64fSKirill A. Shutemov goto err_khugepaged; 508ba76149fSAndrea Arcangeli 509569e5590SShaohua Li return 0; 51065ebb64fSKirill A. Shutemov err_khugepaged: 5119a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 5129a982250SKirill A. Shutemov err_split_shrinker: 51365ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 51465ebb64fSKirill A. Shutemov err_hzp_shrinker: 515b46e756fSKirill A. Shutemov khugepaged_destroy(); 51665ebb64fSKirill A. Shutemov err_slab: 517569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 51865ebb64fSKirill A. Shutemov err_sysfs: 519ba76149fSAndrea Arcangeli return err; 52071e3aac0SAndrea Arcangeli } 521a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 52271e3aac0SAndrea Arcangeli 52371e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 52471e3aac0SAndrea Arcangeli { 52571e3aac0SAndrea Arcangeli int ret = 0; 52671e3aac0SAndrea Arcangeli if (!str) 52771e3aac0SAndrea Arcangeli goto out; 52871e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 52971e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 53071e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53371e3aac0SAndrea Arcangeli ret = 1; 53471e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 53571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 53671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53771e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53871e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53971e3aac0SAndrea Arcangeli ret = 1; 54071e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 54171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 54271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54371e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 54471e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54571e3aac0SAndrea Arcangeli ret = 1; 54671e3aac0SAndrea Arcangeli } 54771e3aac0SAndrea Arcangeli out: 54871e3aac0SAndrea Arcangeli if (!ret) 549ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 55071e3aac0SAndrea Arcangeli return ret; 55171e3aac0SAndrea Arcangeli } 55271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 55371e3aac0SAndrea Arcangeli 554f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 55571e3aac0SAndrea Arcangeli { 556f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 55771e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 55871e3aac0SAndrea Arcangeli return pmd; 55971e3aac0SAndrea Arcangeli } 56071e3aac0SAndrea Arcangeli 56187eaceb3SYang Shi #ifdef CONFIG_MEMCG 56287eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 5639a982250SKirill A. Shutemov { 564bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 56587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 56687eaceb3SYang Shi 56787eaceb3SYang Shi if (memcg) 56887eaceb3SYang Shi return &memcg->deferred_split_queue; 56987eaceb3SYang Shi else 57087eaceb3SYang Shi return &pgdat->deferred_split_queue; 5719a982250SKirill A. Shutemov } 57287eaceb3SYang Shi #else 57387eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 57487eaceb3SYang Shi { 57587eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 57687eaceb3SYang Shi 57787eaceb3SYang Shi return &pgdat->deferred_split_queue; 57887eaceb3SYang Shi } 57987eaceb3SYang Shi #endif 5809a982250SKirill A. Shutemov 5819a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5829a982250SKirill A. Shutemov { 5839a982250SKirill A. Shutemov /* 584d764afedSMiaohe Lin * we use page->mapping and page->index in second tail page 5859a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 5869a982250SKirill A. Shutemov */ 5879a982250SKirill A. Shutemov 5889a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 5899a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 5909a982250SKirill A. Shutemov } 5919a982250SKirill A. Shutemov 592562beb72SMiaohe Lin static inline bool is_transparent_hugepage(struct page *page) 593005ba37cSSean Christopherson { 594005ba37cSSean Christopherson if (!PageCompound(page)) 595fa1f68ccSZou Wei return false; 596005ba37cSSean Christopherson 597005ba37cSSean Christopherson page = compound_head(page); 598005ba37cSSean Christopherson return is_huge_zero_page(page) || 599005ba37cSSean Christopherson page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; 600005ba37cSSean Christopherson } 601005ba37cSSean Christopherson 60297d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 60397d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 60474d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 60574d2fad1SToshi Kani { 60674d2fad1SToshi Kani loff_t off_end = off + len; 60774d2fad1SToshi Kani loff_t off_align = round_up(off, size); 60897d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 60974d2fad1SToshi Kani 61074d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 61174d2fad1SToshi Kani return 0; 61274d2fad1SToshi Kani 61374d2fad1SToshi Kani len_pad = len + size; 61474d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 61574d2fad1SToshi Kani return 0; 61674d2fad1SToshi Kani 61797d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 61874d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 61997d3d0f9SKirill A. Shutemov 62097d3d0f9SKirill A. Shutemov /* 62197d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 62297d3d0f9SKirill A. Shutemov * without the padding. 62397d3d0f9SKirill A. Shutemov */ 62497d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 62574d2fad1SToshi Kani return 0; 62674d2fad1SToshi Kani 62797d3d0f9SKirill A. Shutemov /* 62897d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 62997d3d0f9SKirill A. Shutemov * hint succeeds. 63097d3d0f9SKirill A. Shutemov */ 63197d3d0f9SKirill A. Shutemov if (ret == addr) 63274d2fad1SToshi Kani return addr; 63397d3d0f9SKirill A. Shutemov 63497d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 63597d3d0f9SKirill A. Shutemov return ret; 63674d2fad1SToshi Kani } 63774d2fad1SToshi Kani 63874d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 63974d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 64074d2fad1SToshi Kani { 64197d3d0f9SKirill A. Shutemov unsigned long ret; 64274d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 64374d2fad1SToshi Kani 64497d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 64597d3d0f9SKirill A. Shutemov if (ret) 64697d3d0f9SKirill A. Shutemov return ret; 6471854bc6eSWilliam Kucharski 64874d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 64974d2fad1SToshi Kani } 65074d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 65174d2fad1SToshi Kani 6522b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 6532b740303SSouptick Joarder struct page *page, gfp_t gfp) 65471e3aac0SAndrea Arcangeli { 65582b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 65671e3aac0SAndrea Arcangeli pgtable_t pgtable; 65782b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 6582b740303SSouptick Joarder vm_fault_t ret = 0; 65971e3aac0SAndrea Arcangeli 660309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 66100501b53SJohannes Weiner 6628f425e4eSMatthew Wilcox (Oracle) if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { 6636b251fc9SAndrea Arcangeli put_page(page); 6646b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 66585b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6666b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6676b251fc9SAndrea Arcangeli } 6689d82c694SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 66971e3aac0SAndrea Arcangeli 6704cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 67100501b53SJohannes Weiner if (unlikely(!pgtable)) { 6726b31d595SMichal Hocko ret = VM_FAULT_OOM; 6736b31d595SMichal Hocko goto release; 67400501b53SJohannes Weiner } 67500501b53SJohannes Weiner 676c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 67752f37629SMinchan Kim /* 67852f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 67952f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 68052f37629SMinchan Kim * write. 68152f37629SMinchan Kim */ 68271e3aac0SAndrea Arcangeli __SetPageUptodate(page); 68371e3aac0SAndrea Arcangeli 68482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 68582b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6866b31d595SMichal Hocko goto unlock_release; 68771e3aac0SAndrea Arcangeli } else { 68871e3aac0SAndrea Arcangeli pmd_t entry; 6896b251fc9SAndrea Arcangeli 6906b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6916b31d595SMichal Hocko if (ret) 6926b31d595SMichal Hocko goto unlock_release; 6936b31d595SMichal Hocko 6946b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6956b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 69682b0f8c3SJan Kara spin_unlock(vmf->ptl); 6976b251fc9SAndrea Arcangeli put_page(page); 698bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6998fd5eda4SMiaohe Lin ret = handle_userfault(vmf, VM_UFFD_MISSING); 7008fd5eda4SMiaohe Lin VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7018fd5eda4SMiaohe Lin return ret; 7026b251fc9SAndrea Arcangeli } 7036b251fc9SAndrea Arcangeli 7043122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 705f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 70640f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, vma, haddr); 707b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(page, vma); 70882b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 70982b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 710fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 711bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 712c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 71382b0f8c3SJan Kara spin_unlock(vmf->ptl); 7146b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 7159d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 71671e3aac0SAndrea Arcangeli } 71771e3aac0SAndrea Arcangeli 718aa2e878eSDavid Rientjes return 0; 7196b31d595SMichal Hocko unlock_release: 7206b31d595SMichal Hocko spin_unlock(vmf->ptl); 7216b31d595SMichal Hocko release: 7226b31d595SMichal Hocko if (pgtable) 7236b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 7246b31d595SMichal Hocko put_page(page); 7256b31d595SMichal Hocko return ret; 7266b31d595SMichal Hocko 72771e3aac0SAndrea Arcangeli } 72871e3aac0SAndrea Arcangeli 729444eb2a4SMel Gorman /* 73021440d7eSDavid Rientjes * always: directly stall for all thp allocations 73121440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 73221440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 73321440d7eSDavid Rientjes * fail if not immediately available 73421440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 73521440d7eSDavid Rientjes * available 73621440d7eSDavid Rientjes * never: never stall for any thp allocation 737444eb2a4SMel Gorman */ 738164cc4feSRik van Riel gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 7390bbbc0b3SAndrea Arcangeli { 740164cc4feSRik van Riel const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 74189c83fb5SMichal Hocko 742ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 74321440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 744a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 745ac79f78dSDavid Rientjes 746ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 74721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 74819deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 749ac79f78dSDavid Rientjes 750ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 75121440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 75219deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75319deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 754ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 755ac79f78dSDavid Rientjes 756ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 75721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 75819deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75919deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 760ac79f78dSDavid Rientjes 76119deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 762444eb2a4SMel Gorman } 763444eb2a4SMel Gorman 764c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7652efeb8daSMiaohe Lin static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 76697ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7675918d10aSKirill A. Shutemov struct page *zero_page) 768fc9fe822SKirill A. Shutemov { 769fc9fe822SKirill A. Shutemov pmd_t entry; 7707c414164SAndrew Morton if (!pmd_none(*pmd)) 7712efeb8daSMiaohe Lin return; 7725918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 773fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 7746b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 775fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 776c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 777fc9fe822SKirill A. Shutemov } 778fc9fe822SKirill A. Shutemov 7792b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 78071e3aac0SAndrea Arcangeli { 78182b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 782077fcf11SAneesh Kumar K.V gfp_t gfp; 783cb196ee1SMatthew Wilcox (Oracle) struct folio *folio; 78482b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 78571e3aac0SAndrea Arcangeli 78643675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 787c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 78871e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 78971e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7904fa6893fSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 791d2081b2bSYang Shi 79282b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 793bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 79479da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 79580371957SKirill A. Shutemov pgtable_t pgtable; 7965918d10aSKirill A. Shutemov struct page *zero_page; 7972b740303SSouptick Joarder vm_fault_t ret; 7984cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 79980371957SKirill A. Shutemov if (unlikely(!pgtable)) 80080371957SKirill A. Shutemov return VM_FAULT_OOM; 8016fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 8025918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 803bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 80497ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 805c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 80697ae1749SKirill A. Shutemov } 80782b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 8086b251fc9SAndrea Arcangeli ret = 0; 80982b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 8106b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 8116b31d595SMichal Hocko if (ret) { 8126b31d595SMichal Hocko spin_unlock(vmf->ptl); 813bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 8146b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 81582b0f8c3SJan Kara spin_unlock(vmf->ptl); 816bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 81782b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 8186b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 8196b251fc9SAndrea Arcangeli } else { 820bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 82182b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 822fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 82382b0f8c3SJan Kara spin_unlock(vmf->ptl); 8246b251fc9SAndrea Arcangeli } 825bfe8cc1dSGerald Schaefer } else { 82682b0f8c3SJan Kara spin_unlock(vmf->ptl); 827bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 828bfe8cc1dSGerald Schaefer } 8296b251fc9SAndrea Arcangeli return ret; 83080371957SKirill A. Shutemov } 831164cc4feSRik van Riel gfp = vma_thp_gfp_mask(vma); 832cb196ee1SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); 833cb196ee1SMatthew Wilcox (Oracle) if (unlikely(!folio)) { 83481ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 835c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83681ab4201SAndi Kleen } 837cb196ee1SMatthew Wilcox (Oracle) return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); 83871e3aac0SAndrea Arcangeli } 83971e3aac0SAndrea Arcangeli 840ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8413b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 8423b6521f5SOliver O'Halloran pgtable_t pgtable) 8435cad465dSMatthew Wilcox { 8445cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 8455cad465dSMatthew Wilcox pmd_t entry; 8465cad465dSMatthew Wilcox spinlock_t *ptl; 8475cad465dSMatthew Wilcox 8485cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 849c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 850c6f3c5eeSAneesh Kumar K.V if (write) { 851c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 852c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 853c6f3c5eeSAneesh Kumar K.V goto out_unlock; 854c6f3c5eeSAneesh Kumar K.V } 855c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 856c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 857c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 858c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 859c6f3c5eeSAneesh Kumar K.V } 860c6f3c5eeSAneesh Kumar K.V 861c6f3c5eeSAneesh Kumar K.V goto out_unlock; 862c6f3c5eeSAneesh Kumar K.V } 863c6f3c5eeSAneesh Kumar K.V 864f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 865f25748e3SDan Williams if (pfn_t_devmap(pfn)) 866f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8675cad465dSMatthew Wilcox if (write) { 868f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 869f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8705cad465dSMatthew Wilcox } 8713b6521f5SOliver O'Halloran 8723b6521f5SOliver O'Halloran if (pgtable) { 8733b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 874c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 875c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8763b6521f5SOliver O'Halloran } 8773b6521f5SOliver O'Halloran 8785cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8795cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 880c6f3c5eeSAneesh Kumar K.V 881c6f3c5eeSAneesh Kumar K.V out_unlock: 8825cad465dSMatthew Wilcox spin_unlock(ptl); 883c6f3c5eeSAneesh Kumar K.V if (pgtable) 884c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8855cad465dSMatthew Wilcox } 8865cad465dSMatthew Wilcox 8879a9731b1SThomas Hellstrom (VMware) /** 8889a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 8899a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8909a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8919a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 8929a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8939a9731b1SThomas Hellstrom (VMware) * 8949a9731b1SThomas Hellstrom (VMware) * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 8959a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 8969a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 8979a9731b1SThomas Hellstrom (VMware) * 8989a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 8999a9731b1SThomas Hellstrom (VMware) */ 9009a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 9019a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 9025cad465dSMatthew Wilcox { 903fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 904fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 9053b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 906fce86ff5SDan Williams 9075cad465dSMatthew Wilcox /* 9085cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 9095cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 9105cad465dSMatthew Wilcox * can't support a 'special' bit. 9115cad465dSMatthew Wilcox */ 912e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 913e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 9145cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 9155cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 9165cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 9175cad465dSMatthew Wilcox 9185cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 9195cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 920308a047cSBorislav Petkov 9213b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 9224cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 9233b6521f5SOliver O'Halloran if (!pgtable) 9243b6521f5SOliver O'Halloran return VM_FAULT_OOM; 9253b6521f5SOliver O'Halloran } 9263b6521f5SOliver O'Halloran 927308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 928308a047cSBorislav Petkov 929fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 930ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 9315cad465dSMatthew Wilcox } 9329a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 9335cad465dSMatthew Wilcox 934a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 935f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 936a00cc7d9SMatthew Wilcox { 937f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 938a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 939a00cc7d9SMatthew Wilcox return pud; 940a00cc7d9SMatthew Wilcox } 941a00cc7d9SMatthew Wilcox 942a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 943a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 944a00cc7d9SMatthew Wilcox { 945a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 946a00cc7d9SMatthew Wilcox pud_t entry; 947a00cc7d9SMatthew Wilcox spinlock_t *ptl; 948a00cc7d9SMatthew Wilcox 949a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 950c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 951c6f3c5eeSAneesh Kumar K.V if (write) { 952c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 953c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 954c6f3c5eeSAneesh Kumar K.V goto out_unlock; 955c6f3c5eeSAneesh Kumar K.V } 956c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 957c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 958c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 959c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 960c6f3c5eeSAneesh Kumar K.V } 961c6f3c5eeSAneesh Kumar K.V goto out_unlock; 962c6f3c5eeSAneesh Kumar K.V } 963c6f3c5eeSAneesh Kumar K.V 964a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 965a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 966a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 967a00cc7d9SMatthew Wilcox if (write) { 968f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 969f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 970a00cc7d9SMatthew Wilcox } 971a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 972a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 973c6f3c5eeSAneesh Kumar K.V 974c6f3c5eeSAneesh Kumar K.V out_unlock: 975a00cc7d9SMatthew Wilcox spin_unlock(ptl); 976a00cc7d9SMatthew Wilcox } 977a00cc7d9SMatthew Wilcox 9789a9731b1SThomas Hellstrom (VMware) /** 9799a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pud_prot - insert a pud size pfn 9809a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9819a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9829a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 9839a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9849a9731b1SThomas Hellstrom (VMware) * 9859a9731b1SThomas Hellstrom (VMware) * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 9869a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 9879a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9889a9731b1SThomas Hellstrom (VMware) * 9899a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9909a9731b1SThomas Hellstrom (VMware) */ 9919a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 9929a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 993a00cc7d9SMatthew Wilcox { 994fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 995fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 996fce86ff5SDan Williams 997a00cc7d9SMatthew Wilcox /* 998a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 999a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 1000a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 1001a00cc7d9SMatthew Wilcox */ 100262ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 100362ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 1004a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1005a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 1006a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1007a00cc7d9SMatthew Wilcox 1008a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 1009a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 1010a00cc7d9SMatthew Wilcox 1011a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 1012a00cc7d9SMatthew Wilcox 1013fce86ff5SDan Williams insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 1014a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 1015a00cc7d9SMatthew Wilcox } 10169a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 1017a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1018a00cc7d9SMatthew Wilcox 10193565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1020a69e4717SMiaohe Lin pmd_t *pmd, bool write) 10213565fce3SDan Williams { 10223565fce3SDan Williams pmd_t _pmd; 10233565fce3SDan Williams 1024a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 1025a69e4717SMiaohe Lin if (write) 1026a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 10273565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1028a69e4717SMiaohe Lin pmd, _pmd, write)) 10293565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 10303565fce3SDan Williams } 10313565fce3SDan Williams 10323565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1033df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 10343565fce3SDan Williams { 10353565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 10363565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 10373565fce3SDan Williams struct page *page; 10383565fce3SDan Williams 10393565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 10403565fce3SDan Williams 10413faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 10423faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 10433faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 10443faa52c0SJohn Hubbard return NULL; 10453faa52c0SJohn Hubbard 1046f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 10473565fce3SDan Williams return NULL; 10483565fce3SDan Williams 10493565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 10503565fce3SDan Williams /* pass */; 10513565fce3SDan Williams else 10523565fce3SDan Williams return NULL; 10533565fce3SDan Williams 10543565fce3SDan Williams if (flags & FOLL_TOUCH) 1055a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 10563565fce3SDan Williams 10573565fce3SDan Williams /* 10583565fce3SDan Williams * device mapped pages can only be returned if the 10593565fce3SDan Williams * caller will manage the page reference count. 10603565fce3SDan Williams */ 10613faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10623565fce3SDan Williams return ERR_PTR(-EEXIST); 10633565fce3SDan Williams 10643565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1065df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1066df06b37fSKeith Busch if (!*pgmap) 10673565fce3SDan Williams return ERR_PTR(-EFAULT); 10683565fce3SDan Williams page = pfn_to_page(pfn); 10693faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 10703faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 10713565fce3SDan Williams 10723565fce3SDan Williams return page; 10733565fce3SDan Williams } 10743565fce3SDan Williams 107571e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 107671e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10778f34f1eaSPeter Xu struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 107871e3aac0SAndrea Arcangeli { 1079c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 108071e3aac0SAndrea Arcangeli struct page *src_page; 108171e3aac0SAndrea Arcangeli pmd_t pmd; 108212c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1083628d47ceSKirill A. Shutemov int ret = -ENOMEM; 108471e3aac0SAndrea Arcangeli 1085628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 10868f34f1eaSPeter Xu if (!vma_is_anonymous(dst_vma)) 1087628d47ceSKirill A. Shutemov return 0; 1088628d47ceSKirill A. Shutemov 10894cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 109071e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 109171e3aac0SAndrea Arcangeli goto out; 109271e3aac0SAndrea Arcangeli 1093c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1094c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1095c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 109671e3aac0SAndrea Arcangeli 109771e3aac0SAndrea Arcangeli ret = -EAGAIN; 109871e3aac0SAndrea Arcangeli pmd = *src_pmd; 109984c3fc4eSZi Yan 110084c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 110184c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 110284c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 110384c3fc4eSZi Yan 110484c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 11056c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) { 11064dd845b5SAlistair Popple entry = make_readable_migration_entry( 11074dd845b5SAlistair Popple swp_offset(entry)); 110884c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1109ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1110ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 11118f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*src_pmd)) 11128f34f1eaSPeter Xu pmd = pmd_swp_mkuffd_wp(pmd); 111384c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 111484c3fc4eSZi Yan } 1115dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1116af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1117dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 11188f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11198f34f1eaSPeter Xu pmd = pmd_swp_clear_uffd_wp(pmd); 112084c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 112184c3fc4eSZi Yan ret = 0; 112284c3fc4eSZi Yan goto out_unlock; 112384c3fc4eSZi Yan } 112484c3fc4eSZi Yan #endif 112584c3fc4eSZi Yan 1126628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 112771e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 112871e3aac0SAndrea Arcangeli goto out_unlock; 112971e3aac0SAndrea Arcangeli } 1130fc9fe822SKirill A. Shutemov /* 1131c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1132fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1133fc9fe822SKirill A. Shutemov * a page table. 1134fc9fe822SKirill A. Shutemov */ 1135fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 113697ae1749SKirill A. Shutemov /* 113797ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 113897ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 113997ae1749SKirill A. Shutemov * reference. 114097ae1749SKirill A. Shutemov */ 11415fc7a5f6SPeter Xu mm_get_huge_zero_page(dst_mm); 11425fc7a5f6SPeter Xu goto out_zero_page; 1143fc9fe822SKirill A. Shutemov } 1144de466bd6SMel Gorman 114571e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1146309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1147d042035eSPeter Xu 1148fb3d824dSDavid Hildenbrand get_page(src_page); 1149fb3d824dSDavid Hildenbrand if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { 1150fb3d824dSDavid Hildenbrand /* Page maybe pinned: split and retry the fault on PTEs. */ 1151fb3d824dSDavid Hildenbrand put_page(src_page); 1152d042035eSPeter Xu pte_free(dst_mm, pgtable); 1153d042035eSPeter Xu spin_unlock(src_ptl); 1154d042035eSPeter Xu spin_unlock(dst_ptl); 11558f34f1eaSPeter Xu __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1156d042035eSPeter Xu return -EAGAIN; 1157d042035eSPeter Xu } 115871e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 11595fc7a5f6SPeter Xu out_zero_page: 1160c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11615c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 116271e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 11638f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11648f34f1eaSPeter Xu pmd = pmd_clear_uffd_wp(pmd); 116571e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 116671e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 116771e3aac0SAndrea Arcangeli 116871e3aac0SAndrea Arcangeli ret = 0; 116971e3aac0SAndrea Arcangeli out_unlock: 1170c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1171c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 117271e3aac0SAndrea Arcangeli out: 117371e3aac0SAndrea Arcangeli return ret; 117471e3aac0SAndrea Arcangeli } 117571e3aac0SAndrea Arcangeli 1176a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1177a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 11785fe653e9SMiaohe Lin pud_t *pud, bool write) 1179a00cc7d9SMatthew Wilcox { 1180a00cc7d9SMatthew Wilcox pud_t _pud; 1181a00cc7d9SMatthew Wilcox 1182a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 11835fe653e9SMiaohe Lin if (write) 1184a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1185a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 11865fe653e9SMiaohe Lin pud, _pud, write)) 1187a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1188a00cc7d9SMatthew Wilcox } 1189a00cc7d9SMatthew Wilcox 1190a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1191df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1192a00cc7d9SMatthew Wilcox { 1193a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1194a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1195a00cc7d9SMatthew Wilcox struct page *page; 1196a00cc7d9SMatthew Wilcox 1197a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1198a00cc7d9SMatthew Wilcox 1199f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1200a00cc7d9SMatthew Wilcox return NULL; 1201a00cc7d9SMatthew Wilcox 12023faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 12033faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 12043faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 12053faa52c0SJohn Hubbard return NULL; 12063faa52c0SJohn Hubbard 1207a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1208a00cc7d9SMatthew Wilcox /* pass */; 1209a00cc7d9SMatthew Wilcox else 1210a00cc7d9SMatthew Wilcox return NULL; 1211a00cc7d9SMatthew Wilcox 1212a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 12135fe653e9SMiaohe Lin touch_pud(vma, addr, pud, flags & FOLL_WRITE); 1214a00cc7d9SMatthew Wilcox 1215a00cc7d9SMatthew Wilcox /* 1216a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1217a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 12183faa52c0SJohn Hubbard * 12193faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1220a00cc7d9SMatthew Wilcox */ 12213faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1222a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1223a00cc7d9SMatthew Wilcox 1224a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1225df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1226df06b37fSKeith Busch if (!*pgmap) 1227a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1228a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 12293faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 12303faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 1231a00cc7d9SMatthew Wilcox 1232a00cc7d9SMatthew Wilcox return page; 1233a00cc7d9SMatthew Wilcox } 1234a00cc7d9SMatthew Wilcox 1235a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1236a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1237a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1238a00cc7d9SMatthew Wilcox { 1239a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1240a00cc7d9SMatthew Wilcox pud_t pud; 1241a00cc7d9SMatthew Wilcox int ret; 1242a00cc7d9SMatthew Wilcox 1243a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1244a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1245a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1246a00cc7d9SMatthew Wilcox 1247a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1248a00cc7d9SMatthew Wilcox pud = *src_pud; 1249a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1250a00cc7d9SMatthew Wilcox goto out_unlock; 1251a00cc7d9SMatthew Wilcox 1252a00cc7d9SMatthew Wilcox /* 1253a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1254a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1255a00cc7d9SMatthew Wilcox * a page table. 1256a00cc7d9SMatthew Wilcox */ 1257a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1258a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1259a00cc7d9SMatthew Wilcox } 1260a00cc7d9SMatthew Wilcox 1261fb3d824dSDavid Hildenbrand /* 1262fb3d824dSDavid Hildenbrand * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() 1263fb3d824dSDavid Hildenbrand * and split if duplicating fails. 1264fb3d824dSDavid Hildenbrand */ 1265a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1266a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1267a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1268a00cc7d9SMatthew Wilcox 1269a00cc7d9SMatthew Wilcox ret = 0; 1270a00cc7d9SMatthew Wilcox out_unlock: 1271a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1272a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1273a00cc7d9SMatthew Wilcox return ret; 1274a00cc7d9SMatthew Wilcox } 1275a00cc7d9SMatthew Wilcox 1276a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1277a00cc7d9SMatthew Wilcox { 1278a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1279a00cc7d9SMatthew Wilcox 1280a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1281a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1282a00cc7d9SMatthew Wilcox goto unlock; 1283a00cc7d9SMatthew Wilcox 12845fe653e9SMiaohe Lin touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1285a00cc7d9SMatthew Wilcox unlock: 1286a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1287a00cc7d9SMatthew Wilcox } 1288a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1289a00cc7d9SMatthew Wilcox 12905db4f15cSYang Shi void huge_pmd_set_accessed(struct vm_fault *vmf) 1291a1dd450bSWill Deacon { 129220f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1293a1dd450bSWill Deacon 129482b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1295a69e4717SMiaohe Lin if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1296a1dd450bSWill Deacon goto unlock; 1297a1dd450bSWill Deacon 1298a69e4717SMiaohe Lin touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1299a1dd450bSWill Deacon 1300a1dd450bSWill Deacon unlock: 130182b0f8c3SJan Kara spin_unlock(vmf->ptl); 1302a1dd450bSWill Deacon } 1303a1dd450bSWill Deacon 13045db4f15cSYang Shi vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 130571e3aac0SAndrea Arcangeli { 1306c89357e2SDavid Hildenbrand const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 130782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 13083917c802SKirill A. Shutemov struct page *page; 130982b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 13105db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 131171e3aac0SAndrea Arcangeli 131282b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 131381d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 13143917c802SKirill A. Shutemov 1315c89357e2SDavid Hildenbrand VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE)); 1316c89357e2SDavid Hildenbrand VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE)); 1317c89357e2SDavid Hildenbrand 131893b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 13193917c802SKirill A. Shutemov goto fallback; 13203917c802SKirill A. Shutemov 132182b0f8c3SJan Kara spin_lock(vmf->ptl); 13223917c802SKirill A. Shutemov 13233917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13243917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13253917c802SKirill A. Shutemov return 0; 13263917c802SKirill A. Shutemov } 132771e3aac0SAndrea Arcangeli 132871e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1329f6004e73SMiaohe Lin VM_BUG_ON_PAGE(!PageHead(page), page); 13303917c802SKirill A. Shutemov 13316c287605SDavid Hildenbrand /* Early check when only holding the PT lock. */ 13326c287605SDavid Hildenbrand if (PageAnonExclusive(page)) 13336c287605SDavid Hildenbrand goto reuse; 13346c287605SDavid Hildenbrand 1335ba3c4ce6SHuang Ying if (!trylock_page(page)) { 1336ba3c4ce6SHuang Ying get_page(page); 1337ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 1338ba3c4ce6SHuang Ying lock_page(page); 1339ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1340ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13413917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1342ba3c4ce6SHuang Ying unlock_page(page); 1343ba3c4ce6SHuang Ying put_page(page); 13443917c802SKirill A. Shutemov return 0; 1345ba3c4ce6SHuang Ying } 1346ba3c4ce6SHuang Ying put_page(page); 1347ba3c4ce6SHuang Ying } 13483917c802SKirill A. Shutemov 13496c287605SDavid Hildenbrand /* Recheck after temporarily dropping the PT lock. */ 13506c287605SDavid Hildenbrand if (PageAnonExclusive(page)) { 13516c287605SDavid Hildenbrand unlock_page(page); 13526c287605SDavid Hildenbrand goto reuse; 13536c287605SDavid Hildenbrand } 13546c287605SDavid Hildenbrand 13553917c802SKirill A. Shutemov /* 1356c89357e2SDavid Hildenbrand * See do_wp_page(): we can only reuse the page exclusively if there are 13573bff7e3fSDavid Hildenbrand * no additional references. Note that we always drain the LRU 13583bff7e3fSDavid Hildenbrand * pagevecs immediately after adding a THP. 13593917c802SKirill A. Shutemov */ 13603bff7e3fSDavid Hildenbrand if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page)) 13613bff7e3fSDavid Hildenbrand goto unlock_fallback; 13623bff7e3fSDavid Hildenbrand if (PageSwapCache(page)) 13633bff7e3fSDavid Hildenbrand try_to_free_swap(page); 13643bff7e3fSDavid Hildenbrand if (page_count(page) == 1) { 136571e3aac0SAndrea Arcangeli pmd_t entry; 13666c54dc6cSDavid Hildenbrand 13676c54dc6cSDavid Hildenbrand page_move_anon_rmap(page, vma); 13686c287605SDavid Hildenbrand unlock_page(page); 13696c287605SDavid Hildenbrand reuse: 1370c89357e2SDavid Hildenbrand if (unlikely(unshare)) { 1371c89357e2SDavid Hildenbrand spin_unlock(vmf->ptl); 1372c89357e2SDavid Hildenbrand return 0; 1373c89357e2SDavid Hildenbrand } 137471e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1375f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 137682b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 137782b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 13783917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13793917c802SKirill A. Shutemov return VM_FAULT_WRITE; 138071e3aac0SAndrea Arcangeli } 13813917c802SKirill A. Shutemov 13823bff7e3fSDavid Hildenbrand unlock_fallback: 1383ba3c4ce6SHuang Ying unlock_page(page); 138482b0f8c3SJan Kara spin_unlock(vmf->ptl); 13853917c802SKirill A. Shutemov fallback: 13863917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13873917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 138871e3aac0SAndrea Arcangeli } 138971e3aac0SAndrea Arcangeli 13905535be30SDavid Hildenbrand /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 13915535be30SDavid Hildenbrand static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 13925535be30SDavid Hildenbrand struct vm_area_struct *vma, 13935535be30SDavid Hildenbrand unsigned int flags) 13948310d48bSKeno Fischer { 13955535be30SDavid Hildenbrand /* If the pmd is writable, we can write to the page. */ 13965535be30SDavid Hildenbrand if (pmd_write(pmd)) 13975535be30SDavid Hildenbrand return true; 13985535be30SDavid Hildenbrand 13995535be30SDavid Hildenbrand /* Maybe FOLL_FORCE is set to override it? */ 14005535be30SDavid Hildenbrand if (!(flags & FOLL_FORCE)) 14015535be30SDavid Hildenbrand return false; 14025535be30SDavid Hildenbrand 14035535be30SDavid Hildenbrand /* But FOLL_FORCE has no effect on shared mappings */ 14045535be30SDavid Hildenbrand if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 14055535be30SDavid Hildenbrand return false; 14065535be30SDavid Hildenbrand 14075535be30SDavid Hildenbrand /* ... or read-only private ones */ 14085535be30SDavid Hildenbrand if (!(vma->vm_flags & VM_MAYWRITE)) 14095535be30SDavid Hildenbrand return false; 14105535be30SDavid Hildenbrand 14115535be30SDavid Hildenbrand /* ... or already writable ones that just need to take a write fault */ 14125535be30SDavid Hildenbrand if (vma->vm_flags & VM_WRITE) 14135535be30SDavid Hildenbrand return false; 14145535be30SDavid Hildenbrand 14155535be30SDavid Hildenbrand /* 14165535be30SDavid Hildenbrand * See can_change_pte_writable(): we broke COW and could map the page 14175535be30SDavid Hildenbrand * writable if we have an exclusive anonymous page ... 14185535be30SDavid Hildenbrand */ 14195535be30SDavid Hildenbrand if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 14205535be30SDavid Hildenbrand return false; 14215535be30SDavid Hildenbrand 14225535be30SDavid Hildenbrand /* ... and a write-fault isn't required for other reasons. */ 14235535be30SDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 14245535be30SDavid Hildenbrand return false; 14255535be30SDavid Hildenbrand return !userfaultfd_huge_pmd_wp(vma, pmd); 14268310d48bSKeno Fischer } 14278310d48bSKeno Fischer 1428b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 142971e3aac0SAndrea Arcangeli unsigned long addr, 143071e3aac0SAndrea Arcangeli pmd_t *pmd, 143171e3aac0SAndrea Arcangeli unsigned int flags) 143271e3aac0SAndrea Arcangeli { 1433b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 14345535be30SDavid Hildenbrand struct page *page; 143571e3aac0SAndrea Arcangeli 1436c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 143771e3aac0SAndrea Arcangeli 14385535be30SDavid Hildenbrand page = pmd_page(*pmd); 14395535be30SDavid Hildenbrand VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 14405535be30SDavid Hildenbrand 14415535be30SDavid Hildenbrand if ((flags & FOLL_WRITE) && 14425535be30SDavid Hildenbrand !can_follow_write_pmd(*pmd, page, vma, flags)) 14435535be30SDavid Hildenbrand return NULL; 144471e3aac0SAndrea Arcangeli 144585facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 144685facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 144785facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 144885facf25SKirill A. Shutemov 14492b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 14508a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 14515535be30SDavid Hildenbrand return NULL; 14523faa52c0SJohn Hubbard 1453a7f22660SDavid Hildenbrand if (!pmd_write(*pmd) && gup_must_unshare(flags, page)) 1454a7f22660SDavid Hildenbrand return ERR_PTR(-EMLINK); 1455a7f22660SDavid Hildenbrand 1456b6a2619cSDavid Hildenbrand VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 1457b6a2619cSDavid Hildenbrand !PageAnonExclusive(page), page); 1458b6a2619cSDavid Hildenbrand 14593faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 14603faa52c0SJohn Hubbard return ERR_PTR(-ENOMEM); 14613faa52c0SJohn Hubbard 14623565fce3SDan Williams if (flags & FOLL_TOUCH) 1463a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 14643faa52c0SJohn Hubbard 146571e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1466ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 146771e3aac0SAndrea Arcangeli 146871e3aac0SAndrea Arcangeli return page; 146971e3aac0SAndrea Arcangeli } 147071e3aac0SAndrea Arcangeli 1471d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 14725db4f15cSYang Shi vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 1473d10e63f2SMel Gorman { 147482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1475c5b5a3ddSYang Shi pmd_t oldpmd = vmf->orig_pmd; 1476c5b5a3ddSYang Shi pmd_t pmd; 1477b32967ffSMel Gorman struct page *page; 147882b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1479c5b5a3ddSYang Shi int page_nid = NUMA_NO_NODE; 148033024536SHuang Ying int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); 14818191acbdSMel Gorman bool migrated = false; 1482c5b5a3ddSYang Shi bool was_writable = pmd_savedwrite(oldpmd); 14836688cc05SPeter Zijlstra int flags = 0; 1484d10e63f2SMel Gorman 148582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1486c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 148782b0f8c3SJan Kara spin_unlock(vmf->ptl); 1488de466bd6SMel Gorman goto out; 1489de466bd6SMel Gorman } 1490de466bd6SMel Gorman 1491c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1492c5b5a3ddSYang Shi page = vm_normal_page_pmd(vma, haddr, pmd); 1493c5b5a3ddSYang Shi if (!page) 1494c5b5a3ddSYang Shi goto out_map; 1495c5b5a3ddSYang Shi 1496c5b5a3ddSYang Shi /* See similar comment in do_numa_page for explanation */ 1497c5b5a3ddSYang Shi if (!was_writable) 1498c5b5a3ddSYang Shi flags |= TNF_NO_GROUP; 1499c5b5a3ddSYang Shi 1500c5b5a3ddSYang Shi page_nid = page_to_nid(page); 150133024536SHuang Ying /* 150233024536SHuang Ying * For memory tiering mode, cpupid of slow memory page is used 150333024536SHuang Ying * to record page access time. So use default value. 150433024536SHuang Ying */ 150533024536SHuang Ying if (node_is_toptier(page_nid)) 1506c5b5a3ddSYang Shi last_cpupid = page_cpupid_last(page); 1507c5b5a3ddSYang Shi target_nid = numa_migrate_prep(page, vma, haddr, page_nid, 1508c5b5a3ddSYang Shi &flags); 1509c5b5a3ddSYang Shi 1510c5b5a3ddSYang Shi if (target_nid == NUMA_NO_NODE) { 1511c5b5a3ddSYang Shi put_page(page); 1512c5b5a3ddSYang Shi goto out_map; 1513c5b5a3ddSYang Shi } 1514c5b5a3ddSYang Shi 151582b0f8c3SJan Kara spin_unlock(vmf->ptl); 15168b1b436dSPeter Zijlstra 1517c5b5a3ddSYang Shi migrated = migrate_misplaced_page(page, vma, target_nid); 15186688cc05SPeter Zijlstra if (migrated) { 15196688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 15208191acbdSMel Gorman page_nid = target_nid; 1521c5b5a3ddSYang Shi } else { 1522074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1523c5b5a3ddSYang Shi vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1524c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 152582b0f8c3SJan Kara spin_unlock(vmf->ptl); 1526c5b5a3ddSYang Shi goto out; 1527c5b5a3ddSYang Shi } 1528c5b5a3ddSYang Shi goto out_map; 1529c5b5a3ddSYang Shi } 1530b8916634SMel Gorman 1531b8916634SMel Gorman out: 153298fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 153382b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15349a8b300fSAneesh Kumar K.V flags); 15358191acbdSMel Gorman 1536d10e63f2SMel Gorman return 0; 1537c5b5a3ddSYang Shi 1538c5b5a3ddSYang Shi out_map: 1539c5b5a3ddSYang Shi /* Restore the PMD */ 1540c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1541c5b5a3ddSYang Shi pmd = pmd_mkyoung(pmd); 1542c5b5a3ddSYang Shi if (was_writable) 1543c5b5a3ddSYang Shi pmd = pmd_mkwrite(pmd); 1544c5b5a3ddSYang Shi set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1545c5b5a3ddSYang Shi update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1546c5b5a3ddSYang Shi spin_unlock(vmf->ptl); 1547c5b5a3ddSYang Shi goto out; 1548d10e63f2SMel Gorman } 1549d10e63f2SMel Gorman 1550319904adSHuang Ying /* 1551319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1552319904adSHuang Ying * Otherwise, return false. 1553319904adSHuang Ying */ 1554319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1555b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1556b8d3c4c3SMinchan Kim { 1557b8d3c4c3SMinchan Kim spinlock_t *ptl; 1558b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1559b8d3c4c3SMinchan Kim struct page *page; 1560b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1561319904adSHuang Ying bool ret = false; 1562b8d3c4c3SMinchan Kim 1563ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 156407e32661SAneesh Kumar K.V 1565b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1566b6ec57f4SKirill A. Shutemov if (!ptl) 156725eedabeSLinus Torvalds goto out_unlocked; 1568b8d3c4c3SMinchan Kim 1569b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1570319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1571b8d3c4c3SMinchan Kim goto out; 1572b8d3c4c3SMinchan Kim 157384c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 157484c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 157584c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 157684c3fc4eSZi Yan goto out; 157784c3fc4eSZi Yan } 157884c3fc4eSZi Yan 1579b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1580b8d3c4c3SMinchan Kim /* 1581b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1582b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1583b8d3c4c3SMinchan Kim */ 1584babbbdd0SMiaohe Lin if (total_mapcount(page) != 1) 1585b8d3c4c3SMinchan Kim goto out; 1586b8d3c4c3SMinchan Kim 1587b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1588b8d3c4c3SMinchan Kim goto out; 1589b8d3c4c3SMinchan Kim 1590b8d3c4c3SMinchan Kim /* 1591b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1592b8d3c4c3SMinchan Kim * will deactivate only them. 1593b8d3c4c3SMinchan Kim */ 1594b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1595b8d3c4c3SMinchan Kim get_page(page); 1596b8d3c4c3SMinchan Kim spin_unlock(ptl); 15979818b8cdSHuang Ying split_huge_page(page); 1598b8d3c4c3SMinchan Kim unlock_page(page); 1599bbf29ffcSKirill A. Shutemov put_page(page); 1600b8d3c4c3SMinchan Kim goto out_unlocked; 1601b8d3c4c3SMinchan Kim } 1602b8d3c4c3SMinchan Kim 1603b8d3c4c3SMinchan Kim if (PageDirty(page)) 1604b8d3c4c3SMinchan Kim ClearPageDirty(page); 1605b8d3c4c3SMinchan Kim unlock_page(page); 1606b8d3c4c3SMinchan Kim 1607b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 160858ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1609b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1610b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1611b8d3c4c3SMinchan Kim 1612b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1613b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1614b8d3c4c3SMinchan Kim } 1615802a3a92SShaohua Li 1616802a3a92SShaohua Li mark_page_lazyfree(page); 1617319904adSHuang Ying ret = true; 1618b8d3c4c3SMinchan Kim out: 1619b8d3c4c3SMinchan Kim spin_unlock(ptl); 1620b8d3c4c3SMinchan Kim out_unlocked: 1621b8d3c4c3SMinchan Kim return ret; 1622b8d3c4c3SMinchan Kim } 1623b8d3c4c3SMinchan Kim 1624953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1625953c66c2SAneesh Kumar K.V { 1626953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1627953c66c2SAneesh Kumar K.V 1628953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1629953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1630c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1631953c66c2SAneesh Kumar K.V } 1632953c66c2SAneesh Kumar K.V 163371e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1634f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 163571e3aac0SAndrea Arcangeli { 1636f5c8ad47SDavid Miller pmd_t orig_pmd; 1637da146769SKirill A. Shutemov spinlock_t *ptl; 1638da146769SKirill A. Shutemov 1639ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 164007e32661SAneesh Kumar K.V 1641b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1642b6ec57f4SKirill A. Shutemov if (!ptl) 1643da146769SKirill A. Shutemov return 0; 1644a6bf2bb0SAneesh Kumar K.V /* 1645a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16468809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1647a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1648a6bf2bb0SAneesh Kumar K.V * operations. 1649a6bf2bb0SAneesh Kumar K.V */ 165093a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1651fcbe08d6SMartin Schwidefsky tlb->fullmm); 1652f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16532484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16543b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16553b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16564897c765SMatthew Wilcox spin_unlock(ptl); 1657da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1658c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1659bf929152SKirill A. Shutemov spin_unlock(ptl); 1660479f0abbSKirill A. Shutemov } else { 1661616b8371SZi Yan struct page *page = NULL; 1662616b8371SZi Yan int flush_needed = 1; 1663616b8371SZi Yan 1664616b8371SZi Yan if (pmd_present(orig_pmd)) { 1665616b8371SZi Yan page = pmd_page(orig_pmd); 1666cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 1667309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1668309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1669616b8371SZi Yan } else if (thp_migration_supported()) { 1670616b8371SZi Yan swp_entry_t entry; 1671616b8371SZi Yan 1672616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1673616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1674af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 1675616b8371SZi Yan flush_needed = 0; 1676616b8371SZi Yan } else 1677616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1678616b8371SZi Yan 1679b5072380SKirill A. Shutemov if (PageAnon(page)) { 1680c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1681b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1682b5072380SKirill A. Shutemov } else { 1683953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1684953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1685fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1686b5072380SKirill A. Shutemov } 1687616b8371SZi Yan 1688bf929152SKirill A. Shutemov spin_unlock(ptl); 1689616b8371SZi Yan if (flush_needed) 1690e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1691479f0abbSKirill A. Shutemov } 1692da146769SKirill A. Shutemov return 1; 169371e3aac0SAndrea Arcangeli } 169471e3aac0SAndrea Arcangeli 16951dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 16961dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 16971dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 16981dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 16991dd38b6cSAneesh Kumar K.V { 17001dd38b6cSAneesh Kumar K.V /* 17011dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17021dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17031dd38b6cSAneesh Kumar K.V * 17041dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17051dd38b6cSAneesh Kumar K.V */ 17061dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17071dd38b6cSAneesh Kumar K.V } 17081dd38b6cSAneesh Kumar K.V #endif 17091dd38b6cSAneesh Kumar K.V 1710ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1711ab6e3d09SNaoya Horiguchi { 1712ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1713ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1714ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1715ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1716ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1717ab6e3d09SNaoya Horiguchi #endif 1718ab6e3d09SNaoya Horiguchi return pmd; 1719ab6e3d09SNaoya Horiguchi } 1720ab6e3d09SNaoya Horiguchi 1721bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1722b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 172337a1c49aSAndrea Arcangeli { 1724bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 172537a1c49aSAndrea Arcangeli pmd_t pmd; 172637a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17275d190420SAaron Lu bool force_flush = false; 172837a1c49aSAndrea Arcangeli 172937a1c49aSAndrea Arcangeli /* 173037a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 173137a1c49aSAndrea Arcangeli * should have release it. 173237a1c49aSAndrea Arcangeli */ 173337a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 173437a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17354b471e88SKirill A. Shutemov return false; 173637a1c49aSAndrea Arcangeli } 173737a1c49aSAndrea Arcangeli 1738bf929152SKirill A. Shutemov /* 1739bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1740c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1741bf929152SKirill A. Shutemov */ 1742b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1743b6ec57f4SKirill A. Shutemov if (old_ptl) { 1744bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1745bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1746bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17478809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1748eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1749a2ce2666SAaron Lu force_flush = true; 175037a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17513592806cSKirill A. Shutemov 17521dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1753b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17543592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17553592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17563592806cSKirill A. Shutemov } 1757ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1758ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17595d190420SAaron Lu if (force_flush) 17607c38f181SMiaohe Lin flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1761eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1762eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1763bf929152SKirill A. Shutemov spin_unlock(old_ptl); 17644b471e88SKirill A. Shutemov return true; 176537a1c49aSAndrea Arcangeli } 17664b471e88SKirill A. Shutemov return false; 176737a1c49aSAndrea Arcangeli } 176837a1c49aSAndrea Arcangeli 1769f123d74aSMel Gorman /* 1770f123d74aSMel Gorman * Returns 1771f123d74aSMel Gorman * - 0 if PMD could not be locked 1772f0953a1bSIngo Molnar * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 1773e346e668SYang Shi * or if prot_numa but THP migration is not supported 1774f0953a1bSIngo Molnar * - HPAGE_PMD_NR if protections changed and TLB flush necessary 1775f123d74aSMel Gorman */ 17764a18419fSNadav Amit int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 17774a18419fSNadav Amit pmd_t *pmd, unsigned long addr, pgprot_t newprot, 17784a18419fSNadav Amit unsigned long cp_flags) 1779cd7548abSJohannes Weiner { 1780cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1781bf929152SKirill A. Shutemov spinlock_t *ptl; 1782c9fe6656SNadav Amit pmd_t oldpmd, entry; 17830a85e51dSKirill A. Shutemov bool preserve_write; 17840a85e51dSKirill A. Shutemov int ret; 178558705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1786292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1787292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 1788cd7548abSJohannes Weiner 17894a18419fSNadav Amit tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 17904a18419fSNadav Amit 1791e346e668SYang Shi if (prot_numa && !thp_migration_supported()) 1792e346e668SYang Shi return 1; 1793e346e668SYang Shi 1794b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 17950a85e51dSKirill A. Shutemov if (!ptl) 17960a85e51dSKirill A. Shutemov return 0; 17970a85e51dSKirill A. Shutemov 17980a85e51dSKirill A. Shutemov preserve_write = prot_numa && pmd_write(*pmd); 1799ba68bc01SMel Gorman ret = 1; 1800e944fd67SMel Gorman 180184c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 180284c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 180384c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 18046c287605SDavid Hildenbrand struct page *page = pfn_swap_entry_to_page(entry); 180584c3fc4eSZi Yan 180684c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 18074dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 180884c3fc4eSZi Yan pmd_t newpmd; 180984c3fc4eSZi Yan /* 181084c3fc4eSZi Yan * A protection check is difficult so 181184c3fc4eSZi Yan * just be safe and disable write 181284c3fc4eSZi Yan */ 18136c287605SDavid Hildenbrand if (PageAnon(page)) 18146c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 18156c287605SDavid Hildenbrand else 18166c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry)); 181784c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1818ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1819ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 18208f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pmd)) 18218f34f1eaSPeter Xu newpmd = pmd_swp_mkuffd_wp(newpmd); 182284c3fc4eSZi Yan set_pmd_at(mm, addr, pmd, newpmd); 182384c3fc4eSZi Yan } 182484c3fc4eSZi Yan goto unlock; 182584c3fc4eSZi Yan } 182684c3fc4eSZi Yan #endif 182784c3fc4eSZi Yan 1828a1a3a2fcSHuang Ying if (prot_numa) { 1829a1a3a2fcSHuang Ying struct page *page; 183033024536SHuang Ying bool toptier; 1831e944fd67SMel Gorman /* 1832e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1833e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1834e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1835e944fd67SMel Gorman */ 1836a1a3a2fcSHuang Ying if (is_huge_zero_pmd(*pmd)) 18370a85e51dSKirill A. Shutemov goto unlock; 1838e944fd67SMel Gorman 1839a1a3a2fcSHuang Ying if (pmd_protnone(*pmd)) 18400a85e51dSKirill A. Shutemov goto unlock; 18410a85e51dSKirill A. Shutemov 1842a1a3a2fcSHuang Ying page = pmd_page(*pmd); 184333024536SHuang Ying toptier = node_is_toptier(page_to_nid(page)); 1844a1a3a2fcSHuang Ying /* 1845a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 1846a1a3a2fcSHuang Ying * balancing is disabled 1847a1a3a2fcSHuang Ying */ 1848a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 184933024536SHuang Ying toptier) 1850a1a3a2fcSHuang Ying goto unlock; 185133024536SHuang Ying 185233024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 185333024536SHuang Ying !toptier) 185433024536SHuang Ying xchg_page_access_time(page, jiffies_to_msecs(jiffies)); 1855a1a3a2fcSHuang Ying } 1856ced10803SKirill A. Shutemov /* 18573e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1858ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 18593e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1860ced10803SKirill A. Shutemov * 1861ced10803SKirill A. Shutemov * CPU0: CPU1: 1862ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1863ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1864ced10803SKirill A. Shutemov * madvise_dontneed() 1865ced10803SKirill A. Shutemov * zap_pmd_range() 1866ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1867ced10803SKirill A. Shutemov * // skip the pmd 1868ced10803SKirill A. Shutemov * set_pmd_at(); 1869ced10803SKirill A. Shutemov * // pmd is re-established 1870ced10803SKirill A. Shutemov * 1871ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1872ced10803SKirill A. Shutemov * which may break userspace. 1873ced10803SKirill A. Shutemov * 18744f831457SNadav Amit * pmdp_invalidate_ad() is required to make sure we don't miss 1875ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1876ced10803SKirill A. Shutemov */ 18774f831457SNadav Amit oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 1878ced10803SKirill A. Shutemov 1879c9fe6656SNadav Amit entry = pmd_modify(oldpmd, newprot); 1880b191f9b1SMel Gorman if (preserve_write) 1881288bc549SAneesh Kumar K.V entry = pmd_mk_savedwrite(entry); 1882292924b2SPeter Xu if (uffd_wp) { 1883292924b2SPeter Xu entry = pmd_wrprotect(entry); 1884292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1885292924b2SPeter Xu } else if (uffd_wp_resolve) { 1886292924b2SPeter Xu /* 1887292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1888292924b2SPeter Xu * handler, then things like COW could be properly 1889292924b2SPeter Xu * handled. 1890292924b2SPeter Xu */ 1891292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1892292924b2SPeter Xu } 1893f123d74aSMel Gorman ret = HPAGE_PMD_NR; 189456eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 18954a18419fSNadav Amit 1896c9fe6656SNadav Amit if (huge_pmd_needs_flush(oldpmd, entry)) 18974a18419fSNadav Amit tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 18984a18419fSNadav Amit 18990a85e51dSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 19000a85e51dSKirill A. Shutemov unlock: 1901bf929152SKirill A. Shutemov spin_unlock(ptl); 1902cd7548abSJohannes Weiner return ret; 1903cd7548abSJohannes Weiner } 1904cd7548abSJohannes Weiner 1905025c5b24SNaoya Horiguchi /* 19068f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1907025c5b24SNaoya Horiguchi * 19088f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 19098f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1910025c5b24SNaoya Horiguchi */ 1911b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1912025c5b24SNaoya Horiguchi { 1913b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1914b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 191584c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 191684c3fc4eSZi Yan pmd_devmap(*pmd))) 1917b6ec57f4SKirill A. Shutemov return ptl; 1918b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1919b6ec57f4SKirill A. Shutemov return NULL; 1920025c5b24SNaoya Horiguchi } 1921025c5b24SNaoya Horiguchi 1922a00cc7d9SMatthew Wilcox /* 1923d965e390SMiaohe Lin * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 1924a00cc7d9SMatthew Wilcox * 1925d965e390SMiaohe Lin * Note that if it returns page table lock pointer, this routine returns without 1926d965e390SMiaohe Lin * unlocking page table lock. So callers must unlock it. 1927a00cc7d9SMatthew Wilcox */ 1928a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1929a00cc7d9SMatthew Wilcox { 1930a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1931a00cc7d9SMatthew Wilcox 1932a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1933a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1934a00cc7d9SMatthew Wilcox return ptl; 1935a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1936a00cc7d9SMatthew Wilcox return NULL; 1937a00cc7d9SMatthew Wilcox } 1938a00cc7d9SMatthew Wilcox 1939a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1940a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1941a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1942a00cc7d9SMatthew Wilcox { 1943a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1944a00cc7d9SMatthew Wilcox 1945a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1946a00cc7d9SMatthew Wilcox if (!ptl) 1947a00cc7d9SMatthew Wilcox return 0; 194874929079SMiaohe Lin 194970516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1950a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19512484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1952a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1953a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1954a00cc7d9SMatthew Wilcox } else { 1955a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 1956a00cc7d9SMatthew Wilcox BUG(); 1957a00cc7d9SMatthew Wilcox } 1958a00cc7d9SMatthew Wilcox return 1; 1959a00cc7d9SMatthew Wilcox } 1960a00cc7d9SMatthew Wilcox 1961a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1962a00cc7d9SMatthew Wilcox unsigned long haddr) 1963a00cc7d9SMatthew Wilcox { 1964a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 1965a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1966a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1967a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1968a00cc7d9SMatthew Wilcox 1969ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 1970a00cc7d9SMatthew Wilcox 1971a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 1972a00cc7d9SMatthew Wilcox } 1973a00cc7d9SMatthew Wilcox 1974a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 1975a00cc7d9SMatthew Wilcox unsigned long address) 1976a00cc7d9SMatthew Wilcox { 1977a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1978ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1979a00cc7d9SMatthew Wilcox 19807269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 19816f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 1982ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 1983ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1984ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 1985a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 1986a00cc7d9SMatthew Wilcox goto out; 1987ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 1988a00cc7d9SMatthew Wilcox 1989a00cc7d9SMatthew Wilcox out: 1990a00cc7d9SMatthew Wilcox spin_unlock(ptl); 19914645b9feSJérôme Glisse /* 19924645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 19934645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 19944645b9feSJérôme Glisse */ 1995ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 1996a00cc7d9SMatthew Wilcox } 1997a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1998a00cc7d9SMatthew Wilcox 1999eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2000eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2001eef1b3baSKirill A. Shutemov { 2002eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2003eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2004eef1b3baSKirill A. Shutemov pmd_t _pmd; 2005eef1b3baSKirill A. Shutemov int i; 2006eef1b3baSKirill A. Shutemov 20070f10851eSJérôme Glisse /* 20080f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 20090f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 20100f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 20110f10851eSJérôme Glisse * protected page. 20120f10851eSJérôme Glisse * 2013ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 20140f10851eSJérôme Glisse */ 20150f10851eSJérôme Glisse pmdp_huge_clear_flush(vma, haddr, pmd); 2016eef1b3baSKirill A. Shutemov 2017eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2018eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2019eef1b3baSKirill A. Shutemov 2020eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2021eef1b3baSKirill A. Shutemov pte_t *pte, entry; 2022eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2023eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 2024eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2025eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2026eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2027eef1b3baSKirill A. Shutemov pte_unmap(pte); 2028eef1b3baSKirill A. Shutemov } 2029eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2030eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2031eef1b3baSKirill A. Shutemov } 2032eef1b3baSKirill A. Shutemov 2033eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2034ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2035eef1b3baSKirill A. Shutemov { 2036eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2037eef1b3baSKirill A. Shutemov struct page *page; 2038eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2039423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2040292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 20410ccf7f16SPeter Xu bool anon_exclusive = false, dirty = false; 20422ac015e2SKirill A. Shutemov unsigned long addr; 2043eef1b3baSKirill A. Shutemov int i; 2044eef1b3baSKirill A. Shutemov 2045eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2046eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2047eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 204884c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 204984c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2050eef1b3baSKirill A. Shutemov 2051eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2052eef1b3baSKirill A. Shutemov 2053d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 205499fa8a48SHugh Dickins old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2055953c66c2SAneesh Kumar K.V /* 2056953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2057953c66c2SAneesh Kumar K.V * just go ahead and zap it 2058953c66c2SAneesh Kumar K.V */ 2059953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2060953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 20612484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2062d21b9e57SKirill A. Shutemov return; 206399fa8a48SHugh Dickins if (unlikely(is_pmd_migration_entry(old_pmd))) { 206499fa8a48SHugh Dickins swp_entry_t entry; 206599fa8a48SHugh Dickins 206699fa8a48SHugh Dickins entry = pmd_to_swp_entry(old_pmd); 2067af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 206899fa8a48SHugh Dickins } else { 206999fa8a48SHugh Dickins page = pmd_page(old_pmd); 207099fa8a48SHugh Dickins if (!PageDirty(page) && pmd_dirty(old_pmd)) 2071e1f1b157SHugh Dickins set_page_dirty(page); 207299fa8a48SHugh Dickins if (!PageReferenced(page) && pmd_young(old_pmd)) 2073d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2074cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 2075d21b9e57SKirill A. Shutemov put_page(page); 207699fa8a48SHugh Dickins } 2077fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2078eef1b3baSKirill A. Shutemov return; 207999fa8a48SHugh Dickins } 208099fa8a48SHugh Dickins 20813b77e8c8SHugh Dickins if (is_huge_zero_pmd(*pmd)) { 20824645b9feSJérôme Glisse /* 20834645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 20844645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 20854645b9feSJérôme Glisse * __split_huge_pmd() ? 20864645b9feSJérôme Glisse * 20874645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 20884645b9feSJérôme Glisse * small page also write protected so it does not seems useful 20894645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 20904645b9feSJérôme Glisse */ 2091eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2092eef1b3baSKirill A. Shutemov } 2093eef1b3baSKirill A. Shutemov 2094423ac9afSAneesh Kumar K.V /* 2095423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2096423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2097423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2098423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2099423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2100423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2101423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 210242742d9bSAlexander A. Klimov * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 210342742d9bSAlexander A. Klimov * 383 on page 105. Intel should be safe but is also warns that it's 2104423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2105423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2106423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2107423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2108423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2109423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2110423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2111423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2112423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2113423ac9afSAneesh Kumar K.V */ 2114423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2115423ac9afSAneesh Kumar K.V 2116423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 21172e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 211884c3fc4eSZi Yan swp_entry_t entry; 211984c3fc4eSZi Yan 2120423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 2121af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 21224dd845b5SAlistair Popple write = is_writable_migration_entry(entry); 21236c287605SDavid Hildenbrand if (PageAnon(page)) 21246c287605SDavid Hildenbrand anon_exclusive = is_readable_exclusive_migration_entry(entry); 21252e346877SPeter Xu young = is_migration_entry_young(entry); 21262e346877SPeter Xu dirty = is_migration_entry_dirty(entry); 21272e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2128f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 21292e83ee1dSPeter Xu } else { 2130423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 21310ccf7f16SPeter Xu if (pmd_dirty(old_pmd)) { 21320ccf7f16SPeter Xu dirty = true; 2133423ac9afSAneesh Kumar K.V SetPageDirty(page); 21340ccf7f16SPeter Xu } 2135423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2136423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2137423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2138292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 21396c287605SDavid Hildenbrand 21402e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 21412e83ee1dSPeter Xu page_ref_add(page, HPAGE_PMD_NR - 1); 21426c287605SDavid Hildenbrand 21436c287605SDavid Hildenbrand /* 21446c287605SDavid Hildenbrand * Without "freeze", we'll simply split the PMD, propagating the 21456c287605SDavid Hildenbrand * PageAnonExclusive() flag for each PTE by setting it for 21466c287605SDavid Hildenbrand * each subpage -- no need to (temporarily) clear. 21476c287605SDavid Hildenbrand * 21486c287605SDavid Hildenbrand * With "freeze" we want to replace mapped pages by 21496c287605SDavid Hildenbrand * migration entries right away. This is only possible if we 21506c287605SDavid Hildenbrand * managed to clear PageAnonExclusive() -- see 21516c287605SDavid Hildenbrand * set_pmd_migration_entry(). 21526c287605SDavid Hildenbrand * 21536c287605SDavid Hildenbrand * In case we cannot clear PageAnonExclusive(), split the PMD 21546c287605SDavid Hildenbrand * only and let try_to_migrate_one() fail later. 2155088b8aa5SDavid Hildenbrand * 2156088b8aa5SDavid Hildenbrand * See page_try_share_anon_rmap(): invalidate PMD first. 21576c287605SDavid Hildenbrand */ 21586c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 21596c287605SDavid Hildenbrand if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) 21606c287605SDavid Hildenbrand freeze = false; 21619d84604bSHugh Dickins } 2162eef1b3baSKirill A. Shutemov 2163423ac9afSAneesh Kumar K.V /* 2164423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2165423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2166423ac9afSAneesh Kumar K.V */ 2167eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2168eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2169eef1b3baSKirill A. Shutemov 21702ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2171eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2172eef1b3baSKirill A. Shutemov /* 2173eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2174eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2175eef1b3baSKirill A. Shutemov * permissions across VMAs. 2176eef1b3baSKirill A. Shutemov */ 217784c3fc4eSZi Yan if (freeze || pmd_migration) { 2178ba988280SKirill A. Shutemov swp_entry_t swp_entry; 21794dd845b5SAlistair Popple if (write) 21804dd845b5SAlistair Popple swp_entry = make_writable_migration_entry( 21814dd845b5SAlistair Popple page_to_pfn(page + i)); 21826c287605SDavid Hildenbrand else if (anon_exclusive) 21836c287605SDavid Hildenbrand swp_entry = make_readable_exclusive_migration_entry( 21846c287605SDavid Hildenbrand page_to_pfn(page + i)); 21854dd845b5SAlistair Popple else 21864dd845b5SAlistair Popple swp_entry = make_readable_migration_entry( 21874dd845b5SAlistair Popple page_to_pfn(page + i)); 21882e346877SPeter Xu if (young) 21892e346877SPeter Xu swp_entry = make_migration_entry_young(swp_entry); 21902e346877SPeter Xu if (dirty) 21912e346877SPeter Xu swp_entry = make_migration_entry_dirty(swp_entry); 2192ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2193804dd150SAndrea Arcangeli if (soft_dirty) 2194804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2195f45ec5ffSPeter Xu if (uffd_wp) 2196f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2197ba988280SKirill A. Shutemov } else { 21986d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2199b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 22006c287605SDavid Hildenbrand if (anon_exclusive) 22016c287605SDavid Hildenbrand SetPageAnonExclusive(page + i); 2202eef1b3baSKirill A. Shutemov if (!write) 2203eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 2204eef1b3baSKirill A. Shutemov if (!young) 2205eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 22060ccf7f16SPeter Xu /* NOTE: this may set soft-dirty too on some archs */ 22070ccf7f16SPeter Xu if (dirty) 22080ccf7f16SPeter Xu entry = pte_mkdirty(entry); 2209804dd150SAndrea Arcangeli if (soft_dirty) 2210804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2211292924b2SPeter Xu if (uffd_wp) 2212292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 2213ba988280SKirill A. Shutemov } 22142ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2215eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 22162ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2217ec0abae6SRalph Campbell if (!pmd_migration) 2218eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2219eef1b3baSKirill A. Shutemov pte_unmap(pte); 2220eef1b3baSKirill A. Shutemov } 2221eef1b3baSKirill A. Shutemov 2222ec0abae6SRalph Campbell if (!pmd_migration) { 2223eef1b3baSKirill A. Shutemov /* 2224eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 2225eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 2226eef1b3baSKirill A. Shutemov */ 2227ec0abae6SRalph Campbell if (compound_mapcount(page) > 1 && 2228ec0abae6SRalph Campbell !TestSetPageDoubleMap(page)) { 2229eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2230eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2231eef1b3baSKirill A. Shutemov } 2232eef1b3baSKirill A. Shutemov 2233468c3982SJohannes Weiner lock_page_memcg(page); 2234eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2235eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 223669473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, 223769473e5dSMuchun Song -HPAGE_PMD_NR); 2238eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 2239eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 2240eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2241eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 2242eef1b3baSKirill A. Shutemov } 2243eef1b3baSKirill A. Shutemov } 2244468c3982SJohannes Weiner unlock_page_memcg(page); 2245cea86fe2SHugh Dickins 2246cea86fe2SHugh Dickins /* Above is effectively page_remove_rmap(page, vma, true) */ 2247cea86fe2SHugh Dickins munlock_vma_page(page, vma, true); 2248ec0abae6SRalph Campbell } 2249eef1b3baSKirill A. Shutemov 2250eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2251eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2252e9b61f19SKirill A. Shutemov 2253e9b61f19SKirill A. Shutemov if (freeze) { 22542ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2255cea86fe2SHugh Dickins page_remove_rmap(page + i, vma, false); 2256e9b61f19SKirill A. Shutemov put_page(page + i); 2257e9b61f19SKirill A. Shutemov } 2258e9b61f19SKirill A. Shutemov } 2259eef1b3baSKirill A. Shutemov } 2260eef1b3baSKirill A. Shutemov 2261eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2262af28a988SMatthew Wilcox (Oracle) unsigned long address, bool freeze, struct folio *folio) 2263eef1b3baSKirill A. Shutemov { 2264eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2265ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2266eef1b3baSKirill A. Shutemov 22677269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 22686f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2269ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2270ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2271ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 227233f4751eSNaoya Horiguchi 227333f4751eSNaoya Horiguchi /* 2274af28a988SMatthew Wilcox (Oracle) * If caller asks to setup a migration entry, we need a folio to check 2275af28a988SMatthew Wilcox (Oracle) * pmd against. Otherwise we can end up replacing wrong folio. 227633f4751eSNaoya Horiguchi */ 2277af28a988SMatthew Wilcox (Oracle) VM_BUG_ON(freeze && !folio); 227883a8441fSMatthew Wilcox (Oracle) VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 227933f4751eSNaoya Horiguchi 22807f760917SDavid Hildenbrand if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || 228183a8441fSMatthew Wilcox (Oracle) is_pmd_migration_entry(*pmd)) { 2282cea33328SMiaohe Lin /* 2283cea33328SMiaohe Lin * It's safe to call pmd_page when folio is set because it's 2284cea33328SMiaohe Lin * guaranteed that pmd is present. 2285cea33328SMiaohe Lin */ 228683a8441fSMatthew Wilcox (Oracle) if (folio && folio != page_folio(pmd_page(*pmd))) 228783a8441fSMatthew Wilcox (Oracle) goto out; 2288ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 228983a8441fSMatthew Wilcox (Oracle) } 22907f760917SDavid Hildenbrand 2291e90309c9SKirill A. Shutemov out: 2292eef1b3baSKirill A. Shutemov spin_unlock(ptl); 22934645b9feSJérôme Glisse /* 22944645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 22954645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 22964645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 22974645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 22984645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 22994645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 23004645b9feSJérôme Glisse * page in the meantime) 23014645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 23024645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 23034645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 23044645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 23054645b9feSJérôme Glisse */ 2306ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2307eef1b3baSKirill A. Shutemov } 2308eef1b3baSKirill A. Shutemov 2309fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2310af28a988SMatthew Wilcox (Oracle) bool freeze, struct folio *folio) 231194fcc585SAndrea Arcangeli { 231250722804SZach O'Keefe pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 231394fcc585SAndrea Arcangeli 231450722804SZach O'Keefe if (!pmd) 2315f72e7dcdSHugh Dickins return; 2316f72e7dcdSHugh Dickins 2317af28a988SMatthew Wilcox (Oracle) __split_huge_pmd(vma, pmd, address, freeze, folio); 231894fcc585SAndrea Arcangeli } 231994fcc585SAndrea Arcangeli 232071f9e58eSMiaohe Lin static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 232171f9e58eSMiaohe Lin { 232271f9e58eSMiaohe Lin /* 232371f9e58eSMiaohe Lin * If the new address isn't hpage aligned and it could previously 232471f9e58eSMiaohe Lin * contain an hugepage: check if we need to split an huge pmd. 232571f9e58eSMiaohe Lin */ 232671f9e58eSMiaohe Lin if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 232771f9e58eSMiaohe Lin range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 232871f9e58eSMiaohe Lin ALIGN(address, HPAGE_PMD_SIZE))) 232971f9e58eSMiaohe Lin split_huge_pmd_address(vma, address, false, NULL); 233071f9e58eSMiaohe Lin } 233171f9e58eSMiaohe Lin 2332e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 233394fcc585SAndrea Arcangeli unsigned long start, 233494fcc585SAndrea Arcangeli unsigned long end, 233594fcc585SAndrea Arcangeli long adjust_next) 233694fcc585SAndrea Arcangeli { 233771f9e58eSMiaohe Lin /* Check if we need to split start first. */ 233871f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, start); 233971f9e58eSMiaohe Lin 234071f9e58eSMiaohe Lin /* Check if we need to split end next. */ 234171f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, end); 234294fcc585SAndrea Arcangeli 234394fcc585SAndrea Arcangeli /* 234468540502SMatthew Wilcox (Oracle) * If we're also updating the next vma vm_start, 234571f9e58eSMiaohe Lin * check if we need to split it. 234694fcc585SAndrea Arcangeli */ 234794fcc585SAndrea Arcangeli if (adjust_next > 0) { 234868540502SMatthew Wilcox (Oracle) struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); 234994fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 2350f9d86a60SWei Yang nstart += adjust_next; 235171f9e58eSMiaohe Lin split_huge_pmd_if_needed(next, nstart); 235294fcc585SAndrea Arcangeli } 235394fcc585SAndrea Arcangeli } 2354e9b61f19SKirill A. Shutemov 2355906f9cdfSHugh Dickins static void unmap_page(struct page *page) 2356e9b61f19SKirill A. Shutemov { 2357869f7ee6SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2358a98a2f0cSAlistair Popple enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2359a98a2f0cSAlistair Popple TTU_SYNC; 2360e9b61f19SKirill A. Shutemov 2361e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 2362e9b61f19SKirill A. Shutemov 2363a98a2f0cSAlistair Popple /* 2364a98a2f0cSAlistair Popple * Anon pages need migration entries to preserve them, but file 2365a98a2f0cSAlistair Popple * pages can simply be left unmapped, then faulted back on demand. 2366a98a2f0cSAlistair Popple * If that is ever changed (perhaps for mlock), update remap_page(). 2367a98a2f0cSAlistair Popple */ 23684b8554c5SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 23694b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, ttu_flags); 2370a98a2f0cSAlistair Popple else 2371869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 2372bd56086fSKirill A. Shutemov } 2373bd56086fSKirill A. Shutemov 23744eecb8b9SMatthew Wilcox (Oracle) static void remap_page(struct folio *folio, unsigned long nr) 2375e9b61f19SKirill A. Shutemov { 23764eecb8b9SMatthew Wilcox (Oracle) int i = 0; 2377ab02c252SHugh Dickins 237864b586d1SHugh Dickins /* If unmap_page() uses try_to_migrate() on file, remove this check */ 23794eecb8b9SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2380ab02c252SHugh Dickins return; 23814eecb8b9SMatthew Wilcox (Oracle) for (;;) { 23824eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, true); 23834eecb8b9SMatthew Wilcox (Oracle) i += folio_nr_pages(folio); 23844eecb8b9SMatthew Wilcox (Oracle) if (i >= nr) 23854eecb8b9SMatthew Wilcox (Oracle) break; 23864eecb8b9SMatthew Wilcox (Oracle) folio = folio_next(folio); 2387e9b61f19SKirill A. Shutemov } 2388ace71a19SKirill A. Shutemov } 2389e9b61f19SKirill A. Shutemov 239094866635SAlex Shi static void lru_add_page_tail(struct page *head, struct page *tail, 239188dcb9a3SAlex Shi struct lruvec *lruvec, struct list_head *list) 239288dcb9a3SAlex Shi { 239394866635SAlex Shi VM_BUG_ON_PAGE(!PageHead(head), head); 239494866635SAlex Shi VM_BUG_ON_PAGE(PageCompound(tail), head); 239594866635SAlex Shi VM_BUG_ON_PAGE(PageLRU(tail), head); 23966168d0daSAlex Shi lockdep_assert_held(&lruvec->lru_lock); 239788dcb9a3SAlex Shi 23986dbb5741SAlex Shi if (list) { 239988dcb9a3SAlex Shi /* page reclaim is reclaiming a huge page */ 24006dbb5741SAlex Shi VM_WARN_ON(PageLRU(head)); 240194866635SAlex Shi get_page(tail); 240294866635SAlex Shi list_add_tail(&tail->lru, list); 240388dcb9a3SAlex Shi } else { 24046dbb5741SAlex Shi /* head is still on lru (and we have it frozen) */ 24056dbb5741SAlex Shi VM_WARN_ON(!PageLRU(head)); 240607ca7606SHugh Dickins if (PageUnevictable(tail)) 240707ca7606SHugh Dickins tail->mlock_count = 0; 240807ca7606SHugh Dickins else 24096dbb5741SAlex Shi list_add_tail(&tail->lru, &head->lru); 241007ca7606SHugh Dickins SetPageLRU(tail); 241188dcb9a3SAlex Shi } 241288dcb9a3SAlex Shi } 241388dcb9a3SAlex Shi 24148df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2415e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2416e9b61f19SKirill A. Shutemov { 2417e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2418e9b61f19SKirill A. Shutemov 24198df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2420e9b61f19SKirill A. Shutemov 2421e9b61f19SKirill A. Shutemov /* 2422605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2423605ca5edSKonstantin Khlebnikov * 2424605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 24258958b249SHaitao Shi * for example lock_page() which set PG_waiters. 24266c287605SDavid Hildenbrand * 24276c287605SDavid Hildenbrand * Note that for mapped sub-pages of an anonymous THP, 24286c287605SDavid Hildenbrand * PG_anon_exclusive has been cleared in unmap_page() and is stored in 24296c287605SDavid Hildenbrand * the migration entry instead from where remap_page() will restore it. 24306c287605SDavid Hildenbrand * We can still have PG_anon_exclusive set on effectively unmapped and 24316c287605SDavid Hildenbrand * unreferenced sub-pages of an anonymous THP: we can simply drop 24326c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 2433e9b61f19SKirill A. Shutemov */ 2434e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2435e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2436e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2437e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 243838d8b4e6SHuang Ying (1L << PG_swapcache) | 2439e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2440e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2441e9b61f19SKirill A. Shutemov (1L << PG_active) | 24421899ad18SJohannes Weiner (1L << PG_workingset) | 2443e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2444b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 244572e6afa0SCatalin Marinas #ifdef CONFIG_64BIT 244672e6afa0SCatalin Marinas (1L << PG_arch_2) | 244772e6afa0SCatalin Marinas #endif 2448ec1c86b2SYu Zhao (1L << PG_dirty) | 2449ec1c86b2SYu Zhao LRU_GEN_MASK | LRU_REFS_MASK)); 2450e9b61f19SKirill A. Shutemov 2451173d9d9fSHugh Dickins /* ->mapping in first tail page is compound_mapcount */ 2452173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2453173d9d9fSHugh Dickins page_tail); 2454173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2455173d9d9fSHugh Dickins page_tail->index = head->index + tail; 2456b653db77SMatthew Wilcox (Oracle) page_tail->private = 0; 2457173d9d9fSHugh Dickins 2458605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2459e9b61f19SKirill A. Shutemov smp_wmb(); 2460e9b61f19SKirill A. Shutemov 2461605ca5edSKonstantin Khlebnikov /* 2462605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2463605ca5edSKonstantin Khlebnikov * 2464605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2465605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2466605ca5edSKonstantin Khlebnikov */ 2467e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2468e9b61f19SKirill A. Shutemov 2469605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2470605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2471605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2472605ca5edSKonstantin Khlebnikov 2473e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2474e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2475e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2476e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2477e9b61f19SKirill A. Shutemov 2478e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 247994723aafSMichal Hocko 248094723aafSMichal Hocko /* 248194723aafSMichal Hocko * always add to the tail because some iterators expect new 248294723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 248394723aafSMichal Hocko * migrate_pages 248494723aafSMichal Hocko */ 2485e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2486e9b61f19SKirill A. Shutemov } 2487e9b61f19SKirill A. Shutemov 2488baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2489b6769834SAlex Shi pgoff_t end) 2490e9b61f19SKirill A. Shutemov { 2491e809c3feSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2492e809c3feSMatthew Wilcox (Oracle) struct page *head = &folio->page; 2493e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 24944101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 24954101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 24968cce5475SKirill A. Shutemov unsigned int nr = thp_nr_pages(head); 24978df651c7SKirill A. Shutemov int i; 2498e9b61f19SKirill A. Shutemov 2499e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2500be6c8982SZhou Guanghui split_page_memcg(head, nr); 2501e9b61f19SKirill A. Shutemov 25024101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 25034101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 25044101196bSMatthew Wilcox (Oracle) 25054101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 25064101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 25074101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 25084101196bSMatthew Wilcox (Oracle) } 25094101196bSMatthew Wilcox (Oracle) 2510f0953a1bSIngo Molnar /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 2511e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock(folio); 2512b6769834SAlex Shi 2513eac96c3eSYang Shi ClearPageHasHWPoisoned(head); 2514eac96c3eSYang Shi 25158cce5475SKirill A. Shutemov for (i = nr - 1; i >= 1; i--) { 25168df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2517d144bf62SHugh Dickins /* Some pages can be beyond EOF: drop them from page cache */ 2518baa355fdSKirill A. Shutemov if (head[i].index >= end) { 2519fb5c2029SMatthew Wilcox (Oracle) struct folio *tail = page_folio(head + i); 2520fb5c2029SMatthew Wilcox (Oracle) 2521d144bf62SHugh Dickins if (shmem_mapping(head->mapping)) 2522800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2523fb5c2029SMatthew Wilcox (Oracle) else if (folio_test_clear_dirty(tail)) 2524fb5c2029SMatthew Wilcox (Oracle) folio_account_cleaned(tail, 2525fb5c2029SMatthew Wilcox (Oracle) inode_to_wb(folio->mapping->host)); 2526fb5c2029SMatthew Wilcox (Oracle) __filemap_remove_folio(tail, NULL); 2527fb5c2029SMatthew Wilcox (Oracle) folio_put(tail); 25284101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 25294101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 25304101196bSMatthew Wilcox (Oracle) head + i, 0); 25314101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 25324101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 25334101196bSMatthew Wilcox (Oracle) head + i, 0); 2534baa355fdSKirill A. Shutemov } 2535baa355fdSKirill A. Shutemov } 2536e9b61f19SKirill A. Shutemov 2537e9b61f19SKirill A. Shutemov ClearPageCompound(head); 25386168d0daSAlex Shi unlock_page_lruvec(lruvec); 2539b6769834SAlex Shi /* Caller disabled irqs, so they are still disabled here */ 2540f7da677bSVlastimil Babka 25418cce5475SKirill A. Shutemov split_page_owner(head, nr); 2542f7da677bSVlastimil Babka 2543baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2544baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2545aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 25464101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 254738d8b4e6SHuang Ying page_ref_add(head, 2); 25484101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 25494101196bSMatthew Wilcox (Oracle) } else { 2550baa355fdSKirill A. Shutemov page_ref_inc(head); 25514101196bSMatthew Wilcox (Oracle) } 2552baa355fdSKirill A. Shutemov } else { 2553aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2554baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2555b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2556baa355fdSKirill A. Shutemov } 2557b6769834SAlex Shi local_irq_enable(); 2558e9b61f19SKirill A. Shutemov 25594eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, nr); 2560e9b61f19SKirill A. Shutemov 2561c4f9c701SHuang Ying if (PageSwapCache(head)) { 2562c4f9c701SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 2563c4f9c701SHuang Ying 2564c4f9c701SHuang Ying split_swap_cluster(entry); 2565c4f9c701SHuang Ying } 2566c4f9c701SHuang Ying 25678cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) { 2568e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2569e9b61f19SKirill A. Shutemov if (subpage == page) 2570e9b61f19SKirill A. Shutemov continue; 2571e9b61f19SKirill A. Shutemov unlock_page(subpage); 2572e9b61f19SKirill A. Shutemov 2573e9b61f19SKirill A. Shutemov /* 2574e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2575e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2576e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2577e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2578e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2579e9b61f19SKirill A. Shutemov */ 25800b175468SMiaohe Lin free_page_and_swap_cache(subpage); 2581e9b61f19SKirill A. Shutemov } 2582e9b61f19SKirill A. Shutemov } 2583e9b61f19SKirill A. Shutemov 2584b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2585d4b4084aSMatthew Wilcox (Oracle) bool can_split_folio(struct folio *folio, int *pextra_pins) 2586b8f593cdSHuang Ying { 2587b8f593cdSHuang Ying int extra_pins; 2588b8f593cdSHuang Ying 2589aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2590d4b4084aSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 2591d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_test_swapcache(folio) ? 2592d4b4084aSMatthew Wilcox (Oracle) folio_nr_pages(folio) : 0; 2593b8f593cdSHuang Ying else 2594d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_nr_pages(folio); 2595b8f593cdSHuang Ying if (pextra_pins) 2596b8f593cdSHuang Ying *pextra_pins = extra_pins; 2597d4b4084aSMatthew Wilcox (Oracle) return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; 2598b8f593cdSHuang Ying } 2599b8f593cdSHuang Ying 26006d0a07edSAndrea Arcangeli /* 2601e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2602e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2603e9b61f19SKirill A. Shutemov * 2604e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2605e9b61f19SKirill A. Shutemov * The huge page must be locked. 2606e9b61f19SKirill A. Shutemov * 2607e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2608e9b61f19SKirill A. Shutemov * 2609e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2610e9b61f19SKirill A. Shutemov * the hugepage. 2611e9b61f19SKirill A. Shutemov * 2612e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2613e9b61f19SKirill A. Shutemov * they are not mapped. 2614e9b61f19SKirill A. Shutemov * 2615e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2616e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2617e9b61f19SKirill A. Shutemov * us. 2618e9b61f19SKirill A. Shutemov */ 2619e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2620e9b61f19SKirill A. Shutemov { 26214eecb8b9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 26224eecb8b9SMatthew Wilcox (Oracle) struct page *head = &folio->page; 2623a8803e6cSWei Yang struct deferred_split *ds_queue = get_deferred_split_queue(head); 26246b24ca4aSMatthew Wilcox (Oracle) XA_STATE(xas, &head->mapping->i_pages, head->index); 2625baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2626baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2627504e070dSYang Shi int extra_pins, ret; 2628006d3ff2SHugh Dickins pgoff_t end; 2629478d134eSXu Yu bool is_hzp; 2630e9b61f19SKirill A. Shutemov 2631a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageLocked(head), head); 2632a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageCompound(head), head); 2633e9b61f19SKirill A. Shutemov 2634478d134eSXu Yu is_hzp = is_huge_zero_page(head); 2635478d134eSXu Yu VM_WARN_ON_ONCE_PAGE(is_hzp, head); 2636478d134eSXu Yu if (is_hzp) 2637478d134eSXu Yu return -EBUSY; 2638478d134eSXu Yu 2639a8803e6cSWei Yang if (PageWriteback(head)) 264059807685SHuang Ying return -EBUSY; 264159807685SHuang Ying 2642baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2643e9b61f19SKirill A. Shutemov /* 2644c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2645baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2646baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 26472f031c6fSMatthew Wilcox (Oracle) * is similar to folio_lock_anon_vma_read except the write lock 2648baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2649baa355fdSKirill A. Shutemov * operations. 2650e9b61f19SKirill A. Shutemov */ 2651e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2652e9b61f19SKirill A. Shutemov if (!anon_vma) { 2653e9b61f19SKirill A. Shutemov ret = -EBUSY; 2654e9b61f19SKirill A. Shutemov goto out; 2655e9b61f19SKirill A. Shutemov } 2656006d3ff2SHugh Dickins end = -1; 2657baa355fdSKirill A. Shutemov mapping = NULL; 2658e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2659baa355fdSKirill A. Shutemov } else { 26606a3edd29SYin Fengwei gfp_t gfp; 26616a3edd29SYin Fengwei 2662baa355fdSKirill A. Shutemov mapping = head->mapping; 2663baa355fdSKirill A. Shutemov 2664baa355fdSKirill A. Shutemov /* Truncated ? */ 2665baa355fdSKirill A. Shutemov if (!mapping) { 2666baa355fdSKirill A. Shutemov ret = -EBUSY; 2667baa355fdSKirill A. Shutemov goto out; 2668baa355fdSKirill A. Shutemov } 2669baa355fdSKirill A. Shutemov 26706a3edd29SYin Fengwei gfp = current_gfp_context(mapping_gfp_mask(mapping) & 26716a3edd29SYin Fengwei GFP_RECLAIM_MASK); 26726a3edd29SYin Fengwei 26736a3edd29SYin Fengwei if (folio_test_private(folio) && 26746a3edd29SYin Fengwei !filemap_release_folio(folio, gfp)) { 26756a3edd29SYin Fengwei ret = -EBUSY; 26766a3edd29SYin Fengwei goto out; 26776a3edd29SYin Fengwei } 26786a3edd29SYin Fengwei 26796a3edd29SYin Fengwei xas_split_alloc(&xas, head, compound_order(head), gfp); 26806b24ca4aSMatthew Wilcox (Oracle) if (xas_error(&xas)) { 26816b24ca4aSMatthew Wilcox (Oracle) ret = xas_error(&xas); 26826b24ca4aSMatthew Wilcox (Oracle) goto out; 26836b24ca4aSMatthew Wilcox (Oracle) } 26846b24ca4aSMatthew Wilcox (Oracle) 2685baa355fdSKirill A. Shutemov anon_vma = NULL; 2686baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2687006d3ff2SHugh Dickins 2688006d3ff2SHugh Dickins /* 2689006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2690006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2691006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2692006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 2693006d3ff2SHugh Dickins * head page lock is good enough to serialize the trimming. 2694006d3ff2SHugh Dickins */ 2695006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2696d144bf62SHugh Dickins if (shmem_mapping(mapping)) 2697d144bf62SHugh Dickins end = shmem_fallocend(mapping->host, end); 2698baa355fdSKirill A. Shutemov } 2699e9b61f19SKirill A. Shutemov 2700e9b61f19SKirill A. Shutemov /* 2701906f9cdfSHugh Dickins * Racy check if we can split the page, before unmap_page() will 2702e9b61f19SKirill A. Shutemov * split PMDs 2703e9b61f19SKirill A. Shutemov */ 2704d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(folio, &extra_pins)) { 2705e9b61f19SKirill A. Shutemov ret = -EBUSY; 2706e9b61f19SKirill A. Shutemov goto out_unlock; 2707e9b61f19SKirill A. Shutemov } 2708e9b61f19SKirill A. Shutemov 2709906f9cdfSHugh Dickins unmap_page(head); 2710e9b61f19SKirill A. Shutemov 2711b6769834SAlex Shi /* block interrupt reentry in xa_lock and spinlock */ 2712b6769834SAlex Shi local_irq_disable(); 2713baa355fdSKirill A. Shutemov if (mapping) { 2714baa355fdSKirill A. Shutemov /* 2715aa5dc07fSMatthew Wilcox * Check if the head page is present in page cache. 2716baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2717baa355fdSKirill A. Shutemov */ 27186b24ca4aSMatthew Wilcox (Oracle) xas_lock(&xas); 27196b24ca4aSMatthew Wilcox (Oracle) xas_reset(&xas); 2720aa5dc07fSMatthew Wilcox if (xas_load(&xas) != head) 2721baa355fdSKirill A. Shutemov goto fail; 2722baa355fdSKirill A. Shutemov } 2723baa355fdSKirill A. Shutemov 27240139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2725364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 2726504e070dSYang Shi if (page_ref_freeze(head, 1 + extra_pins)) { 27279a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2728364c1eebSYang Shi ds_queue->split_queue_len--; 27299a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 27309a982250SKirill A. Shutemov } 2731afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 273206d3eff6SKirill A. Shutemov if (mapping) { 2733bf9eceadSMuchun Song int nr = thp_nr_pages(head); 2734bf9eceadSMuchun Song 27356b24ca4aSMatthew Wilcox (Oracle) xas_split(&xas, head, thp_order(head)); 27361ca7554dSMarek Szyprowski if (PageSwapBacked(head)) { 273757b2847dSMuchun Song __mod_lruvec_page_state(head, NR_SHMEM_THPS, 273857b2847dSMuchun Song -nr); 27391ca7554dSMarek Szyprowski } else { 2740bf9eceadSMuchun Song __mod_lruvec_page_state(head, NR_FILE_THPS, 2741bf9eceadSMuchun Song -nr); 27421ca7554dSMarek Szyprowski filemap_nr_thps_dec(mapping); 27431ca7554dSMarek Szyprowski } 274406d3eff6SKirill A. Shutemov } 274506d3eff6SKirill A. Shutemov 2746b6769834SAlex Shi __split_huge_page(page, list, end); 2747e9b61f19SKirill A. Shutemov ret = 0; 2748baa355fdSKirill A. Shutemov } else { 2749364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2750504e070dSYang Shi fail: 2751504e070dSYang Shi if (mapping) 27526b24ca4aSMatthew Wilcox (Oracle) xas_unlock(&xas); 2753b6769834SAlex Shi local_irq_enable(); 27544eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, folio_nr_pages(folio)); 2755e9b61f19SKirill A. Shutemov ret = -EBUSY; 2756e9b61f19SKirill A. Shutemov } 2757e9b61f19SKirill A. Shutemov 2758e9b61f19SKirill A. Shutemov out_unlock: 2759baa355fdSKirill A. Shutemov if (anon_vma) { 2760e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2761e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2762baa355fdSKirill A. Shutemov } 2763baa355fdSKirill A. Shutemov if (mapping) 2764baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2765e9b61f19SKirill A. Shutemov out: 276669a37a8bSMatthew Wilcox (Oracle) xas_destroy(&xas); 2767e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2768e9b61f19SKirill A. Shutemov return ret; 2769e9b61f19SKirill A. Shutemov } 27709a982250SKirill A. Shutemov 27719a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27729a982250SKirill A. Shutemov { 277387eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 27749a982250SKirill A. Shutemov unsigned long flags; 27759a982250SKirill A. Shutemov 2776364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27779a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2778364c1eebSYang Shi ds_queue->split_queue_len--; 27799a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 27809a982250SKirill A. Shutemov } 2781364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 27829a982250SKirill A. Shutemov free_compound_page(page); 27839a982250SKirill A. Shutemov } 27849a982250SKirill A. Shutemov 27859a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 27869a982250SKirill A. Shutemov { 278787eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 278887eaceb3SYang Shi #ifdef CONFIG_MEMCG 2789bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 279087eaceb3SYang Shi #endif 27919a982250SKirill A. Shutemov unsigned long flags; 27929a982250SKirill A. Shutemov 27939a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 27949a982250SKirill A. Shutemov 279587eaceb3SYang Shi /* 279687eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 279787eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 279887eaceb3SYang Shi * And, if page reclaim is already handling the same page, it is 279987eaceb3SYang Shi * unnecessary to handle it again in shrinker. 280087eaceb3SYang Shi * 280187eaceb3SYang Shi * Check PageSwapCache to determine if the page is being 280287eaceb3SYang Shi * handled by page reclaim since THP swap would add the page into 280387eaceb3SYang Shi * swap cache before calling try_to_unmap(). 280487eaceb3SYang Shi */ 280587eaceb3SYang Shi if (PageSwapCache(page)) 280687eaceb3SYang Shi return; 280787eaceb3SYang Shi 2808364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28099a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2810f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2811364c1eebSYang Shi list_add_tail(page_deferred_list(page), &ds_queue->split_queue); 2812364c1eebSYang Shi ds_queue->split_queue_len++; 281387eaceb3SYang Shi #ifdef CONFIG_MEMCG 281487eaceb3SYang Shi if (memcg) 28152bfd3637SYang Shi set_shrinker_bit(memcg, page_to_nid(page), 281687eaceb3SYang Shi deferred_split_shrinker.id); 281787eaceb3SYang Shi #endif 28189a982250SKirill A. Shutemov } 2819364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28209a982250SKirill A. Shutemov } 28219a982250SKirill A. Shutemov 28229a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 28239a982250SKirill A. Shutemov struct shrink_control *sc) 28249a982250SKirill A. Shutemov { 2825a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2826364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 282787eaceb3SYang Shi 282887eaceb3SYang Shi #ifdef CONFIG_MEMCG 282987eaceb3SYang Shi if (sc->memcg) 283087eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 283187eaceb3SYang Shi #endif 2832364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28339a982250SKirill A. Shutemov } 28349a982250SKirill A. Shutemov 28359a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28369a982250SKirill A. Shutemov struct shrink_control *sc) 28379a982250SKirill A. Shutemov { 2838a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2839364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28409a982250SKirill A. Shutemov unsigned long flags; 28419a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 28429a982250SKirill A. Shutemov struct page *page; 28439a982250SKirill A. Shutemov int split = 0; 28449a982250SKirill A. Shutemov 284587eaceb3SYang Shi #ifdef CONFIG_MEMCG 284687eaceb3SYang Shi if (sc->memcg) 284787eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 284887eaceb3SYang Shi #endif 284987eaceb3SYang Shi 2850364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28519a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2852364c1eebSYang Shi list_for_each_safe(pos, next, &ds_queue->split_queue) { 2853dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 28549a982250SKirill A. Shutemov page = compound_head(page); 2855e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2856e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2857e3ae1953SKirill A. Shutemov } else { 2858e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 28599a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2860364c1eebSYang Shi ds_queue->split_queue_len--; 28619a982250SKirill A. Shutemov } 2862e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2863e3ae1953SKirill A. Shutemov break; 28649a982250SKirill A. Shutemov } 2865364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28669a982250SKirill A. Shutemov 28679a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 2868dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 2869fa41b900SKirill A. Shutemov if (!trylock_page(page)) 2870fa41b900SKirill A. Shutemov goto next; 28719a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 28729a982250SKirill A. Shutemov if (!split_huge_page(page)) 28739a982250SKirill A. Shutemov split++; 28749a982250SKirill A. Shutemov unlock_page(page); 2875fa41b900SKirill A. Shutemov next: 28769a982250SKirill A. Shutemov put_page(page); 28779a982250SKirill A. Shutemov } 28789a982250SKirill A. Shutemov 2879364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2880364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2881364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28829a982250SKirill A. Shutemov 2883cb8d68ecSKirill A. Shutemov /* 2884cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2885cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2886cb8d68ecSKirill A. Shutemov */ 2887364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2888cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2889cb8d68ecSKirill A. Shutemov return split; 28909a982250SKirill A. Shutemov } 28919a982250SKirill A. Shutemov 28929a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 28939a982250SKirill A. Shutemov .count_objects = deferred_split_count, 28949a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 28959a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 289687eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 289787eaceb3SYang Shi SHRINKER_NONSLAB, 28989a982250SKirill A. Shutemov }; 289949071d43SKirill A. Shutemov 290049071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 2901fa6c0231SZi Yan static void split_huge_pages_all(void) 290249071d43SKirill A. Shutemov { 290349071d43SKirill A. Shutemov struct zone *zone; 290449071d43SKirill A. Shutemov struct page *page; 290549071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 290649071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 290749071d43SKirill A. Shutemov 2908fa6c0231SZi Yan pr_debug("Split all THPs\n"); 2909a17206daSMiaohe Lin for_each_zone(zone) { 2910a17206daSMiaohe Lin if (!managed_zone(zone)) 2911a17206daSMiaohe Lin continue; 291249071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 291349071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2914a17206daSMiaohe Lin int nr_pages; 291549071d43SKirill A. Shutemov 29162b7aa91bSNaoya Horiguchi page = pfn_to_online_page(pfn); 29172b7aa91bSNaoya Horiguchi if (!page || !get_page_unless_zero(page)) 291849071d43SKirill A. Shutemov continue; 291949071d43SKirill A. Shutemov 292049071d43SKirill A. Shutemov if (zone != page_zone(page)) 292149071d43SKirill A. Shutemov goto next; 292249071d43SKirill A. Shutemov 2923baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 292449071d43SKirill A. Shutemov goto next; 292549071d43SKirill A. Shutemov 292649071d43SKirill A. Shutemov total++; 292749071d43SKirill A. Shutemov lock_page(page); 2928a17206daSMiaohe Lin nr_pages = thp_nr_pages(page); 292949071d43SKirill A. Shutemov if (!split_huge_page(page)) 293049071d43SKirill A. Shutemov split++; 2931a17206daSMiaohe Lin pfn += nr_pages - 1; 293249071d43SKirill A. Shutemov unlock_page(page); 293349071d43SKirill A. Shutemov next: 293449071d43SKirill A. Shutemov put_page(page); 2935fa6c0231SZi Yan cond_resched(); 293649071d43SKirill A. Shutemov } 293749071d43SKirill A. Shutemov } 293849071d43SKirill A. Shutemov 2939fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 294049071d43SKirill A. Shutemov } 2941fa6c0231SZi Yan 2942fa6c0231SZi Yan static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 2943fa6c0231SZi Yan { 2944fa6c0231SZi Yan return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 2945fa6c0231SZi Yan is_vm_hugetlb_page(vma); 2946fa6c0231SZi Yan } 2947fa6c0231SZi Yan 2948fa6c0231SZi Yan static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 2949fa6c0231SZi Yan unsigned long vaddr_end) 2950fa6c0231SZi Yan { 2951fa6c0231SZi Yan int ret = 0; 2952fa6c0231SZi Yan struct task_struct *task; 2953fa6c0231SZi Yan struct mm_struct *mm; 2954fa6c0231SZi Yan unsigned long total = 0, split = 0; 2955fa6c0231SZi Yan unsigned long addr; 2956fa6c0231SZi Yan 2957fa6c0231SZi Yan vaddr_start &= PAGE_MASK; 2958fa6c0231SZi Yan vaddr_end &= PAGE_MASK; 2959fa6c0231SZi Yan 2960fa6c0231SZi Yan /* Find the task_struct from pid */ 2961fa6c0231SZi Yan rcu_read_lock(); 2962fa6c0231SZi Yan task = find_task_by_vpid(pid); 2963fa6c0231SZi Yan if (!task) { 2964fa6c0231SZi Yan rcu_read_unlock(); 2965fa6c0231SZi Yan ret = -ESRCH; 2966fa6c0231SZi Yan goto out; 2967fa6c0231SZi Yan } 2968fa6c0231SZi Yan get_task_struct(task); 2969fa6c0231SZi Yan rcu_read_unlock(); 2970fa6c0231SZi Yan 2971fa6c0231SZi Yan /* Find the mm_struct */ 2972fa6c0231SZi Yan mm = get_task_mm(task); 2973fa6c0231SZi Yan put_task_struct(task); 2974fa6c0231SZi Yan 2975fa6c0231SZi Yan if (!mm) { 2976fa6c0231SZi Yan ret = -EINVAL; 2977fa6c0231SZi Yan goto out; 2978fa6c0231SZi Yan } 2979fa6c0231SZi Yan 2980fa6c0231SZi Yan pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 2981fa6c0231SZi Yan pid, vaddr_start, vaddr_end); 2982fa6c0231SZi Yan 2983fa6c0231SZi Yan mmap_read_lock(mm); 2984fa6c0231SZi Yan /* 2985fa6c0231SZi Yan * always increase addr by PAGE_SIZE, since we could have a PTE page 2986fa6c0231SZi Yan * table filled with PTE-mapped THPs, each of which is distinct. 2987fa6c0231SZi Yan */ 2988fa6c0231SZi Yan for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 298974ba2b38SMiaohe Lin struct vm_area_struct *vma = vma_lookup(mm, addr); 2990fa6c0231SZi Yan struct page *page; 2991fa6c0231SZi Yan 299274ba2b38SMiaohe Lin if (!vma) 2993fa6c0231SZi Yan break; 2994fa6c0231SZi Yan 2995fa6c0231SZi Yan /* skip special VMA and hugetlb VMA */ 2996fa6c0231SZi Yan if (vma_not_suitable_for_thp_split(vma)) { 2997fa6c0231SZi Yan addr = vma->vm_end; 2998fa6c0231SZi Yan continue; 2999fa6c0231SZi Yan } 3000fa6c0231SZi Yan 3001fa6c0231SZi Yan /* FOLL_DUMP to ignore special (like zero) pages */ 300287d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 3003fa6c0231SZi Yan 3004*f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(page)) 3005fa6c0231SZi Yan continue; 3006fa6c0231SZi Yan 3007fa6c0231SZi Yan if (!is_transparent_hugepage(page)) 3008fa6c0231SZi Yan goto next; 3009fa6c0231SZi Yan 3010fa6c0231SZi Yan total++; 3011d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(page_folio(page), NULL)) 3012fa6c0231SZi Yan goto next; 3013fa6c0231SZi Yan 3014fa6c0231SZi Yan if (!trylock_page(page)) 3015fa6c0231SZi Yan goto next; 3016fa6c0231SZi Yan 3017fa6c0231SZi Yan if (!split_huge_page(page)) 3018fa6c0231SZi Yan split++; 3019fa6c0231SZi Yan 3020fa6c0231SZi Yan unlock_page(page); 3021fa6c0231SZi Yan next: 3022fa6c0231SZi Yan put_page(page); 3023fa6c0231SZi Yan cond_resched(); 3024fa6c0231SZi Yan } 3025fa6c0231SZi Yan mmap_read_unlock(mm); 3026fa6c0231SZi Yan mmput(mm); 3027fa6c0231SZi Yan 3028fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 3029fa6c0231SZi Yan 3030fa6c0231SZi Yan out: 3031fa6c0231SZi Yan return ret; 3032fa6c0231SZi Yan } 3033fa6c0231SZi Yan 3034fbe37501SZi Yan static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 3035fbe37501SZi Yan pgoff_t off_end) 3036fbe37501SZi Yan { 3037fbe37501SZi Yan struct filename *file; 3038fbe37501SZi Yan struct file *candidate; 3039fbe37501SZi Yan struct address_space *mapping; 3040fbe37501SZi Yan int ret = -EINVAL; 3041fbe37501SZi Yan pgoff_t index; 3042fbe37501SZi Yan int nr_pages = 1; 3043fbe37501SZi Yan unsigned long total = 0, split = 0; 3044fbe37501SZi Yan 3045fbe37501SZi Yan file = getname_kernel(file_path); 3046fbe37501SZi Yan if (IS_ERR(file)) 3047fbe37501SZi Yan return ret; 3048fbe37501SZi Yan 3049fbe37501SZi Yan candidate = file_open_name(file, O_RDONLY, 0); 3050fbe37501SZi Yan if (IS_ERR(candidate)) 3051fbe37501SZi Yan goto out; 3052fbe37501SZi Yan 3053fbe37501SZi Yan pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 3054fbe37501SZi Yan file_path, off_start, off_end); 3055fbe37501SZi Yan 3056fbe37501SZi Yan mapping = candidate->f_mapping; 3057fbe37501SZi Yan 3058fbe37501SZi Yan for (index = off_start; index < off_end; index += nr_pages) { 3059fbe37501SZi Yan struct page *fpage = pagecache_get_page(mapping, index, 3060fbe37501SZi Yan FGP_ENTRY | FGP_HEAD, 0); 3061fbe37501SZi Yan 3062fbe37501SZi Yan nr_pages = 1; 3063fbe37501SZi Yan if (xa_is_value(fpage) || !fpage) 3064fbe37501SZi Yan continue; 3065fbe37501SZi Yan 3066fbe37501SZi Yan if (!is_transparent_hugepage(fpage)) 3067fbe37501SZi Yan goto next; 3068fbe37501SZi Yan 3069fbe37501SZi Yan total++; 3070fbe37501SZi Yan nr_pages = thp_nr_pages(fpage); 3071fbe37501SZi Yan 3072fbe37501SZi Yan if (!trylock_page(fpage)) 3073fbe37501SZi Yan goto next; 3074fbe37501SZi Yan 3075fbe37501SZi Yan if (!split_huge_page(fpage)) 3076fbe37501SZi Yan split++; 3077fbe37501SZi Yan 3078fbe37501SZi Yan unlock_page(fpage); 3079fbe37501SZi Yan next: 3080fbe37501SZi Yan put_page(fpage); 3081fbe37501SZi Yan cond_resched(); 3082fbe37501SZi Yan } 3083fbe37501SZi Yan 3084fbe37501SZi Yan filp_close(candidate, NULL); 3085fbe37501SZi Yan ret = 0; 3086fbe37501SZi Yan 3087fbe37501SZi Yan pr_debug("%lu of %lu file-backed THP split\n", split, total); 3088fbe37501SZi Yan out: 3089fbe37501SZi Yan putname(file); 3090fbe37501SZi Yan return ret; 3091fbe37501SZi Yan } 3092fbe37501SZi Yan 3093fa6c0231SZi Yan #define MAX_INPUT_BUF_SZ 255 3094fa6c0231SZi Yan 3095fa6c0231SZi Yan static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 3096fa6c0231SZi Yan size_t count, loff_t *ppops) 3097fa6c0231SZi Yan { 3098fa6c0231SZi Yan static DEFINE_MUTEX(split_debug_mutex); 3099fa6c0231SZi Yan ssize_t ret; 3100fbe37501SZi Yan /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ 3101fbe37501SZi Yan char input_buf[MAX_INPUT_BUF_SZ]; 3102fa6c0231SZi Yan int pid; 3103fa6c0231SZi Yan unsigned long vaddr_start, vaddr_end; 3104fa6c0231SZi Yan 3105fa6c0231SZi Yan ret = mutex_lock_interruptible(&split_debug_mutex); 3106fa6c0231SZi Yan if (ret) 3107fa6c0231SZi Yan return ret; 3108fa6c0231SZi Yan 3109fa6c0231SZi Yan ret = -EFAULT; 3110fa6c0231SZi Yan 3111fa6c0231SZi Yan memset(input_buf, 0, MAX_INPUT_BUF_SZ); 3112fa6c0231SZi Yan if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 3113fa6c0231SZi Yan goto out; 3114fa6c0231SZi Yan 3115fa6c0231SZi Yan input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 3116fbe37501SZi Yan 3117fbe37501SZi Yan if (input_buf[0] == '/') { 3118fbe37501SZi Yan char *tok; 3119fbe37501SZi Yan char *buf = input_buf; 3120fbe37501SZi Yan char file_path[MAX_INPUT_BUF_SZ]; 3121fbe37501SZi Yan pgoff_t off_start = 0, off_end = 0; 3122fbe37501SZi Yan size_t input_len = strlen(input_buf); 3123fbe37501SZi Yan 3124fbe37501SZi Yan tok = strsep(&buf, ","); 3125fbe37501SZi Yan if (tok) { 31261212e00cSMatthew Wilcox (Oracle) strcpy(file_path, tok); 3127fbe37501SZi Yan } else { 3128fbe37501SZi Yan ret = -EINVAL; 3129fbe37501SZi Yan goto out; 3130fbe37501SZi Yan } 3131fbe37501SZi Yan 3132fbe37501SZi Yan ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); 3133fbe37501SZi Yan if (ret != 2) { 3134fbe37501SZi Yan ret = -EINVAL; 3135fbe37501SZi Yan goto out; 3136fbe37501SZi Yan } 3137fbe37501SZi Yan ret = split_huge_pages_in_file(file_path, off_start, off_end); 3138fbe37501SZi Yan if (!ret) 3139fbe37501SZi Yan ret = input_len; 3140fbe37501SZi Yan 3141fbe37501SZi Yan goto out; 3142fbe37501SZi Yan } 3143fbe37501SZi Yan 3144fa6c0231SZi Yan ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); 3145fa6c0231SZi Yan if (ret == 1 && pid == 1) { 3146fa6c0231SZi Yan split_huge_pages_all(); 3147fa6c0231SZi Yan ret = strlen(input_buf); 3148fa6c0231SZi Yan goto out; 3149fa6c0231SZi Yan } else if (ret != 3) { 3150fa6c0231SZi Yan ret = -EINVAL; 3151fa6c0231SZi Yan goto out; 3152fa6c0231SZi Yan } 3153fa6c0231SZi Yan 3154fa6c0231SZi Yan ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); 3155fa6c0231SZi Yan if (!ret) 3156fa6c0231SZi Yan ret = strlen(input_buf); 3157fa6c0231SZi Yan out: 3158fa6c0231SZi Yan mutex_unlock(&split_debug_mutex); 3159fa6c0231SZi Yan return ret; 3160fa6c0231SZi Yan 3161fa6c0231SZi Yan } 3162fa6c0231SZi Yan 3163fa6c0231SZi Yan static const struct file_operations split_huge_pages_fops = { 3164fa6c0231SZi Yan .owner = THIS_MODULE, 3165fa6c0231SZi Yan .write = split_huge_pages_write, 3166fa6c0231SZi Yan .llseek = no_llseek, 3167fa6c0231SZi Yan }; 316849071d43SKirill A. Shutemov 316949071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 317049071d43SKirill A. Shutemov { 3171d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 317249071d43SKirill A. Shutemov &split_huge_pages_fops); 317349071d43SKirill A. Shutemov return 0; 317449071d43SKirill A. Shutemov } 317549071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 317649071d43SKirill A. Shutemov #endif 3177616b8371SZi Yan 3178616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 31797f5abe60SDavid Hildenbrand int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3180616b8371SZi Yan struct page *page) 3181616b8371SZi Yan { 3182616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3183616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3184616b8371SZi Yan unsigned long address = pvmw->address; 31856c287605SDavid Hildenbrand bool anon_exclusive; 3186616b8371SZi Yan pmd_t pmdval; 3187616b8371SZi Yan swp_entry_t entry; 3188ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 3189616b8371SZi Yan 3190616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 31917f5abe60SDavid Hildenbrand return 0; 3192616b8371SZi Yan 3193616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 31948a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 31956c287605SDavid Hildenbrand 3196088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): invalidate PMD first. */ 31976c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 31986c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 31996c287605SDavid Hildenbrand set_pmd_at(mm, address, pvmw->pmd, pmdval); 32007f5abe60SDavid Hildenbrand return -EBUSY; 32016c287605SDavid Hildenbrand } 32026c287605SDavid Hildenbrand 3203616b8371SZi Yan if (pmd_dirty(pmdval)) 3204616b8371SZi Yan set_page_dirty(page); 32054dd845b5SAlistair Popple if (pmd_write(pmdval)) 32064dd845b5SAlistair Popple entry = make_writable_migration_entry(page_to_pfn(page)); 32076c287605SDavid Hildenbrand else if (anon_exclusive) 32086c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 32094dd845b5SAlistair Popple else 32104dd845b5SAlistair Popple entry = make_readable_migration_entry(page_to_pfn(page)); 32112e346877SPeter Xu if (pmd_young(pmdval)) 32122e346877SPeter Xu entry = make_migration_entry_young(entry); 32132e346877SPeter Xu if (pmd_dirty(pmdval)) 32142e346877SPeter Xu entry = make_migration_entry_dirty(entry); 3215ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 3216ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 3217ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 3218ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3219cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 3220616b8371SZi Yan put_page(page); 3221283fd6feSAnshuman Khandual trace_set_migration_pmd(address, pmd_val(pmdswp)); 32227f5abe60SDavid Hildenbrand 32237f5abe60SDavid Hildenbrand return 0; 3224616b8371SZi Yan } 3225616b8371SZi Yan 3226616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3227616b8371SZi Yan { 3228616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3229616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3230616b8371SZi Yan unsigned long address = pvmw->address; 32314fba8f2aSMiaohe Lin unsigned long haddr = address & HPAGE_PMD_MASK; 3232616b8371SZi Yan pmd_t pmde; 3233616b8371SZi Yan swp_entry_t entry; 3234616b8371SZi Yan 3235616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3236616b8371SZi Yan return; 3237616b8371SZi Yan 3238616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 3239616b8371SZi Yan get_page(new); 32402e346877SPeter Xu pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); 3241ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 3242ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 32434dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) 3244f55e1014SLinus Torvalds pmde = maybe_pmd_mkwrite(pmde, vma); 32458f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pvmw->pmd)) 32468f34f1eaSPeter Xu pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); 32472e346877SPeter Xu if (!is_migration_entry_young(entry)) 32482e346877SPeter Xu pmde = pmd_mkold(pmde); 32492e346877SPeter Xu /* NOTE: this may contain setting soft-dirty on some archs */ 32502e346877SPeter Xu if (PageDirty(new) && is_migration_entry_dirty(entry)) 32512e346877SPeter Xu pmde = pmd_mkdirty(pmde); 3252616b8371SZi Yan 32536c287605SDavid Hildenbrand if (PageAnon(new)) { 32546c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_COMPOUND; 32556c287605SDavid Hildenbrand 32566c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) 32576c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 32586c287605SDavid Hildenbrand 32594fba8f2aSMiaohe Lin page_add_anon_rmap(new, vma, haddr, rmap_flags); 32606c287605SDavid Hildenbrand } else { 3261cea86fe2SHugh Dickins page_add_file_rmap(new, vma, true); 32626c287605SDavid Hildenbrand } 32636c287605SDavid Hildenbrand VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); 32644fba8f2aSMiaohe Lin set_pmd_at(mm, haddr, pvmw->pmd, pmde); 32655cbcf225SMuchun Song 32665cbcf225SMuchun Song /* No need to invalidate - it was non-present before */ 3267616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 3268283fd6feSAnshuman Khandual trace_remove_migration_pmd(address, pmd_val(pmde)); 3269616b8371SZi Yan } 3270616b8371SZi Yan #endif 3271