120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10fa6c0231SZi Yan #include <linux/sched/mm.h> 11f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 126a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1371e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1471e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1571e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1671e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1771e3aac0SAndrea Arcangeli #include <linux/swap.h> 1897ae1749SKirill A. Shutemov #include <linux/shrinker.h> 19ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 20e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 21fb5c2029SMatthew Wilcox (Oracle) #include <linux/backing-dev.h> 224897c765SMatthew Wilcox #include <linux/dax.h> 23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 24878aee7dSAndrea Arcangeli #include <linux/freezer.h> 25f25748e3SDan Williams #include <linux/pfn_t.h> 26a664b2d8SAndrea Arcangeli #include <linux/mman.h> 273565fce3SDan Williams #include <linux/memremap.h> 28325adeb5SRalf Baechle #include <linux/pagemap.h> 2949071d43SKirill A. Shutemov #include <linux/debugfs.h> 304daae3b4SMel Gorman #include <linux/migrate.h> 3143b5fbbdSSasha Levin #include <linux/hashtable.h> 326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3333c3fc71SVladimir Davydov #include <linux/page_idle.h> 34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 356b31d595SMichal Hocko #include <linux/oom.h> 3698fa15f3SAnshuman Khandual #include <linux/numa.h> 37f7da677bSVlastimil Babka #include <linux/page_owner.h> 38a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 39467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h> 4097ae1749SKirill A. Shutemov 4171e3aac0SAndrea Arcangeli #include <asm/tlb.h> 4271e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 4371e3aac0SAndrea Arcangeli #include "internal.h" 44014bb1deSNeilBrown #include "swap.h" 4571e3aac0SAndrea Arcangeli 46283fd6feSAnshuman Khandual #define CREATE_TRACE_POINTS 47283fd6feSAnshuman Khandual #include <trace/events/thp.h> 48283fd6feSAnshuman Khandual 49ba76149fSAndrea Arcangeli /* 50b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 51b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 52b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 53b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 548bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 558bfa3f9aSJianguo Wu * for all hugepage allocations. 56ba76149fSAndrea Arcangeli */ 5771e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5813ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 59ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 6013ece886SAndrea Arcangeli #endif 6113ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 6213ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 6313ece886SAndrea Arcangeli #endif 64444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 6579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 6679da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 67ba76149fSAndrea Arcangeli 689a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 69f000565aSAndrea Arcangeli 7097ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 7156873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 723b77e8c8SHugh Dickins unsigned long huge_zero_pfn __read_mostly = ~0UL; 734a6c1297SKirill A. Shutemov 74a7f4e6e4SZach O'Keefe bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, 75a7f4e6e4SZach O'Keefe bool smaps, bool in_pf, bool enforce_sysfs) 767635d9cbSMichal Hocko { 779fec5168SYang Shi if (!vma->vm_mm) /* vdso */ 78c0630669SYang Shi return false; 799fec5168SYang Shi 807da4e2cbSYang Shi /* 817da4e2cbSYang Shi * Explicitly disabled through madvise or prctl, or some 827da4e2cbSYang Shi * architectures may disable THP for some mappings, for 837da4e2cbSYang Shi * example, s390 kvm. 847da4e2cbSYang Shi * */ 857da4e2cbSYang Shi if ((vm_flags & VM_NOHUGEPAGE) || 867da4e2cbSYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 877da4e2cbSYang Shi return false; 887da4e2cbSYang Shi /* 897da4e2cbSYang Shi * If the hardware/firmware marked hugepage support disabled. 907da4e2cbSYang Shi */ 913c556d24SPeter Xu if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) 929fec5168SYang Shi return false; 939fec5168SYang Shi 947da4e2cbSYang Shi /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 957da4e2cbSYang Shi if (vma_is_dax(vma)) 967da4e2cbSYang Shi return in_pf; 977da4e2cbSYang Shi 987da4e2cbSYang Shi /* 997da4e2cbSYang Shi * Special VMA and hugetlb VMA. 1007da4e2cbSYang Shi * Must be checked after dax since some dax mappings may have 1017da4e2cbSYang Shi * VM_MIXEDMAP set. 1027da4e2cbSYang Shi */ 1039fec5168SYang Shi if (vm_flags & VM_NO_KHUGEPAGED) 1049fec5168SYang Shi return false; 1059fec5168SYang Shi 1067da4e2cbSYang Shi /* 1077da4e2cbSYang Shi * Check alignment for file vma and size for both file and anon vma. 1087da4e2cbSYang Shi * 1097da4e2cbSYang Shi * Skip the check for page fault. Huge fault does the check in fault 1107da4e2cbSYang Shi * handlers. And this check is not suitable for huge PUD fault. 1117da4e2cbSYang Shi */ 1127da4e2cbSYang Shi if (!in_pf && 1137da4e2cbSYang Shi !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) 1149fec5168SYang Shi return false; 1159fec5168SYang Shi 1167da4e2cbSYang Shi /* 1177da4e2cbSYang Shi * Enabled via shmem mount options or sysfs settings. 1187da4e2cbSYang Shi * Must be done before hugepage flags check since shmem has its 1197da4e2cbSYang Shi * own flags. 1207da4e2cbSYang Shi */ 1217da4e2cbSYang Shi if (!in_pf && shmem_file(vma->vm_file)) 1222cf13384SDavid Stevens return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, 1232cf13384SDavid Stevens !enforce_sysfs, vma->vm_mm, vm_flags); 1249fec5168SYang Shi 125a7f4e6e4SZach O'Keefe /* Enforce sysfs THP requirements as necessary */ 126a7f4e6e4SZach O'Keefe if (enforce_sysfs && 127a7f4e6e4SZach O'Keefe (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && 128a7f4e6e4SZach O'Keefe !hugepage_flags_always()))) 1299fec5168SYang Shi return false; 1309fec5168SYang Shi 1319fec5168SYang Shi /* Only regular file is valid */ 1327da4e2cbSYang Shi if (!in_pf && file_thp_enabled(vma)) 13378d12c19SYang Shi return true; 1347635d9cbSMichal Hocko 1359fec5168SYang Shi if (!vma_is_anonymous(vma)) 1367635d9cbSMichal Hocko return false; 1379fec5168SYang Shi 1389fec5168SYang Shi if (vma_is_temporary_stack(vma)) 1399fec5168SYang Shi return false; 1409fec5168SYang Shi 1419fec5168SYang Shi /* 1429fec5168SYang Shi * THPeligible bit of smaps should show 1 for proper VMAs even 1439fec5168SYang Shi * though anon_vma is not initialized yet. 1447da4e2cbSYang Shi * 1457da4e2cbSYang Shi * Allow page fault since anon_vma may be not initialized until 1467da4e2cbSYang Shi * the first page fault. 1479fec5168SYang Shi */ 1489fec5168SYang Shi if (!vma->anon_vma) 1497da4e2cbSYang Shi return (smaps || in_pf); 1509fec5168SYang Shi 1519fec5168SYang Shi return true; 1527635d9cbSMichal Hocko } 1537635d9cbSMichal Hocko 154aaa9705bSMiaohe Lin static bool get_huge_zero_page(void) 15597ae1749SKirill A. Shutemov { 15697ae1749SKirill A. Shutemov struct page *zero_page; 15797ae1749SKirill A. Shutemov retry: 15897ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 159aaa9705bSMiaohe Lin return true; 16097ae1749SKirill A. Shutemov 16197ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 16297ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 163d8a8e1f0SKirill A. Shutemov if (!zero_page) { 164d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 165aaa9705bSMiaohe Lin return false; 166d8a8e1f0SKirill A. Shutemov } 16797ae1749SKirill A. Shutemov preempt_disable(); 1685918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 16997ae1749SKirill A. Shutemov preempt_enable(); 1705ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 17197ae1749SKirill A. Shutemov goto retry; 17297ae1749SKirill A. Shutemov } 1733b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); 17497ae1749SKirill A. Shutemov 17597ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 17697ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 17797ae1749SKirill A. Shutemov preempt_enable(); 178f4981502SLiu Shixin count_vm_event(THP_ZERO_PAGE_ALLOC); 179aaa9705bSMiaohe Lin return true; 18097ae1749SKirill A. Shutemov } 18197ae1749SKirill A. Shutemov 1826fcb52a5SAaron Lu static void put_huge_zero_page(void) 18397ae1749SKirill A. Shutemov { 18497ae1749SKirill A. Shutemov /* 18597ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 18697ae1749SKirill A. Shutemov * last reference. 18797ae1749SKirill A. Shutemov */ 18897ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 18997ae1749SKirill A. Shutemov } 19097ae1749SKirill A. Shutemov 1916fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1926fcb52a5SAaron Lu { 1936fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1946fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1956fcb52a5SAaron Lu 1966fcb52a5SAaron Lu if (!get_huge_zero_page()) 1976fcb52a5SAaron Lu return NULL; 1986fcb52a5SAaron Lu 1996fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2006fcb52a5SAaron Lu put_huge_zero_page(); 2016fcb52a5SAaron Lu 2026fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 2036fcb52a5SAaron Lu } 2046fcb52a5SAaron Lu 2056fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 2066fcb52a5SAaron Lu { 2076fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2086fcb52a5SAaron Lu put_huge_zero_page(); 2096fcb52a5SAaron Lu } 2106fcb52a5SAaron Lu 21148896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 21297ae1749SKirill A. Shutemov struct shrink_control *sc) 21397ae1749SKirill A. Shutemov { 21497ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 21597ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 21648896466SGlauber Costa } 21797ae1749SKirill A. Shutemov 21848896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 21948896466SGlauber Costa struct shrink_control *sc) 22048896466SGlauber Costa { 22197ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2225918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2235918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2243b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, ~0UL); 2255ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 22648896466SGlauber Costa return HPAGE_PMD_NR; 22797ae1749SKirill A. Shutemov } 22897ae1749SKirill A. Shutemov 22997ae1749SKirill A. Shutemov return 0; 23097ae1749SKirill A. Shutemov } 23197ae1749SKirill A. Shutemov 23297ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23348896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 23448896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 23597ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23697ae1749SKirill A. Shutemov }; 23797ae1749SKirill A. Shutemov 23871e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 23971e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 24071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 24171e3aac0SAndrea Arcangeli { 242bfb0ffebSJoe Perches const char *output; 243bfb0ffebSJoe Perches 244444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 245bfb0ffebSJoe Perches output = "[always] madvise never"; 246bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 247bfb0ffebSJoe Perches &transparent_hugepage_flags)) 248bfb0ffebSJoe Perches output = "always [madvise] never"; 249444eb2a4SMel Gorman else 250bfb0ffebSJoe Perches output = "always madvise [never]"; 251bfb0ffebSJoe Perches 252bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 25371e3aac0SAndrea Arcangeli } 254444eb2a4SMel Gorman 25571e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 25671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25771e3aac0SAndrea Arcangeli const char *buf, size_t count) 25871e3aac0SAndrea Arcangeli { 25921440d7eSDavid Rientjes ssize_t ret = count; 260ba76149fSAndrea Arcangeli 261f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 26221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26321440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 264f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 26521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26621440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 267f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 26821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 27021440d7eSDavid Rientjes } else 27121440d7eSDavid Rientjes ret = -EINVAL; 272ba76149fSAndrea Arcangeli 273ba76149fSAndrea Arcangeli if (ret > 0) { 274b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 275ba76149fSAndrea Arcangeli if (err) 276ba76149fSAndrea Arcangeli ret = err; 277ba76149fSAndrea Arcangeli } 278ba76149fSAndrea Arcangeli return ret; 27971e3aac0SAndrea Arcangeli } 28037139bb0SMiaohe Lin 28137139bb0SMiaohe Lin static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 28271e3aac0SAndrea Arcangeli 283b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 28471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 28571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 28671e3aac0SAndrea Arcangeli { 287bfb0ffebSJoe Perches return sysfs_emit(buf, "%d\n", 288e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 28971e3aac0SAndrea Arcangeli } 290e27e6151SBen Hutchings 291b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 29271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 29371e3aac0SAndrea Arcangeli const char *buf, size_t count, 29471e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 29571e3aac0SAndrea Arcangeli { 296e27e6151SBen Hutchings unsigned long value; 297e27e6151SBen Hutchings int ret; 298e27e6151SBen Hutchings 299e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 300e27e6151SBen Hutchings if (ret < 0) 301e27e6151SBen Hutchings return ret; 302e27e6151SBen Hutchings if (value > 1) 30371e3aac0SAndrea Arcangeli return -EINVAL; 30471e3aac0SAndrea Arcangeli 305e27e6151SBen Hutchings if (value) 306e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 307e27e6151SBen Hutchings else 308e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 309e27e6151SBen Hutchings 31071e3aac0SAndrea Arcangeli return count; 31171e3aac0SAndrea Arcangeli } 31271e3aac0SAndrea Arcangeli 31371e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 31471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31571e3aac0SAndrea Arcangeli { 316bfb0ffebSJoe Perches const char *output; 317bfb0ffebSJoe Perches 318bfb0ffebSJoe Perches if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 319bfb0ffebSJoe Perches &transparent_hugepage_flags)) 320bfb0ffebSJoe Perches output = "[always] defer defer+madvise madvise never"; 321bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 322bfb0ffebSJoe Perches &transparent_hugepage_flags)) 323bfb0ffebSJoe Perches output = "always [defer] defer+madvise madvise never"; 324bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 325bfb0ffebSJoe Perches &transparent_hugepage_flags)) 326bfb0ffebSJoe Perches output = "always defer [defer+madvise] madvise never"; 327bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 328bfb0ffebSJoe Perches &transparent_hugepage_flags)) 329bfb0ffebSJoe Perches output = "always defer defer+madvise [madvise] never"; 330bfb0ffebSJoe Perches else 331bfb0ffebSJoe Perches output = "always defer defer+madvise madvise [never]"; 332bfb0ffebSJoe Perches 333bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 33471e3aac0SAndrea Arcangeli } 33521440d7eSDavid Rientjes 33671e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 33771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 33871e3aac0SAndrea Arcangeli const char *buf, size_t count) 33971e3aac0SAndrea Arcangeli { 340f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 34121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 34321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34421440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 345f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 34621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 34721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 350f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 3514fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 3524fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 3534fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 3544fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 355f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 35621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 35721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 35821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 35921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 360f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 36121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 36221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 36321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 36421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 36521440d7eSDavid Rientjes } else 36621440d7eSDavid Rientjes return -EINVAL; 36721440d7eSDavid Rientjes 36821440d7eSDavid Rientjes return count; 36971e3aac0SAndrea Arcangeli } 37037139bb0SMiaohe Lin static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 37171e3aac0SAndrea Arcangeli 37279da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 37379da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37479da5407SKirill A. Shutemov { 375b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 37679da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37779da5407SKirill A. Shutemov } 37879da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 37979da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 38079da5407SKirill A. Shutemov { 381b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 38279da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 38379da5407SKirill A. Shutemov } 38437139bb0SMiaohe Lin static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 38549920d28SHugh Dickins 38649920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 38749920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 38849920d28SHugh Dickins { 389ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 39049920d28SHugh Dickins } 39149920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 39249920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 39349920d28SHugh Dickins 39471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39571e3aac0SAndrea Arcangeli &enabled_attr.attr, 39671e3aac0SAndrea Arcangeli &defrag_attr.attr, 39779da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39849920d28SHugh Dickins &hpage_pmd_size_attr.attr, 399396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 4005a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 4015a6e75f8SKirill A. Shutemov #endif 40271e3aac0SAndrea Arcangeli NULL, 40371e3aac0SAndrea Arcangeli }; 40471e3aac0SAndrea Arcangeli 4058aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 40671e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 407ba76149fSAndrea Arcangeli }; 408ba76149fSAndrea Arcangeli 409569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 410569e5590SShaohua Li { 411569e5590SShaohua Li int err; 412569e5590SShaohua Li 413569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 414569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 415ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 416569e5590SShaohua Li return -ENOMEM; 417569e5590SShaohua Li } 418569e5590SShaohua Li 419569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 420569e5590SShaohua Li if (err) { 421ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 422569e5590SShaohua Li goto delete_obj; 423569e5590SShaohua Li } 424569e5590SShaohua Li 425569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 426569e5590SShaohua Li if (err) { 427ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 428569e5590SShaohua Li goto remove_hp_group; 429569e5590SShaohua Li } 430569e5590SShaohua Li 431569e5590SShaohua Li return 0; 432569e5590SShaohua Li 433569e5590SShaohua Li remove_hp_group: 434569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 435569e5590SShaohua Li delete_obj: 436569e5590SShaohua Li kobject_put(*hugepage_kobj); 437569e5590SShaohua Li return err; 438569e5590SShaohua Li } 439569e5590SShaohua Li 440569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 441569e5590SShaohua Li { 442569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 443569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 444569e5590SShaohua Li kobject_put(hugepage_kobj); 445569e5590SShaohua Li } 446569e5590SShaohua Li #else 447569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 448569e5590SShaohua Li { 449569e5590SShaohua Li return 0; 450569e5590SShaohua Li } 451569e5590SShaohua Li 452569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 453569e5590SShaohua Li { 454569e5590SShaohua Li } 45571e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 45671e3aac0SAndrea Arcangeli 45771e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 45871e3aac0SAndrea Arcangeli { 45971e3aac0SAndrea Arcangeli int err; 460569e5590SShaohua Li struct kobject *hugepage_kobj; 46171e3aac0SAndrea Arcangeli 4624b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 4633c556d24SPeter Xu transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; 464569e5590SShaohua Li return -EINVAL; 4654b7167b9SAndrea Arcangeli } 4664b7167b9SAndrea Arcangeli 467ff20c2e0SKirill A. Shutemov /* 468ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 469ff20c2e0SKirill A. Shutemov */ 47023baf831SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER); 471ff20c2e0SKirill A. Shutemov /* 472ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 473ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 474ff20c2e0SKirill A. Shutemov */ 475ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 476ff20c2e0SKirill A. Shutemov 477569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 478569e5590SShaohua Li if (err) 47965ebb64fSKirill A. Shutemov goto err_sysfs; 480ba76149fSAndrea Arcangeli 481b46e756fSKirill A. Shutemov err = khugepaged_init(); 482ba76149fSAndrea Arcangeli if (err) 48365ebb64fSKirill A. Shutemov goto err_slab; 484ba76149fSAndrea Arcangeli 485e33c267aSRoman Gushchin err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); 48665ebb64fSKirill A. Shutemov if (err) 48765ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 488e33c267aSRoman Gushchin err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); 4899a982250SKirill A. Shutemov if (err) 4909a982250SKirill A. Shutemov goto err_split_shrinker; 49197ae1749SKirill A. Shutemov 49297562cd2SRik van Riel /* 49397562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 49497562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 49597562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 49697562cd2SRik van Riel */ 497ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 49897562cd2SRik van Riel transparent_hugepage_flags = 0; 49979553da2SKirill A. Shutemov return 0; 50079553da2SKirill A. Shutemov } 50197562cd2SRik van Riel 50279553da2SKirill A. Shutemov err = start_stop_khugepaged(); 50365ebb64fSKirill A. Shutemov if (err) 50465ebb64fSKirill A. Shutemov goto err_khugepaged; 505ba76149fSAndrea Arcangeli 506569e5590SShaohua Li return 0; 50765ebb64fSKirill A. Shutemov err_khugepaged: 5089a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 5099a982250SKirill A. Shutemov err_split_shrinker: 51065ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 51165ebb64fSKirill A. Shutemov err_hzp_shrinker: 512b46e756fSKirill A. Shutemov khugepaged_destroy(); 51365ebb64fSKirill A. Shutemov err_slab: 514569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 51565ebb64fSKirill A. Shutemov err_sysfs: 516ba76149fSAndrea Arcangeli return err; 51771e3aac0SAndrea Arcangeli } 518a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 51971e3aac0SAndrea Arcangeli 52071e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 52171e3aac0SAndrea Arcangeli { 52271e3aac0SAndrea Arcangeli int ret = 0; 52371e3aac0SAndrea Arcangeli if (!str) 52471e3aac0SAndrea Arcangeli goto out; 52571e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 52671e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 52771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 52871e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 52971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53071e3aac0SAndrea Arcangeli ret = 1; 53171e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 53271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 53371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53471e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53671e3aac0SAndrea Arcangeli ret = 1; 53771e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 53871e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 53971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 54171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54271e3aac0SAndrea Arcangeli ret = 1; 54371e3aac0SAndrea Arcangeli } 54471e3aac0SAndrea Arcangeli out: 54571e3aac0SAndrea Arcangeli if (!ret) 546ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 54771e3aac0SAndrea Arcangeli return ret; 54871e3aac0SAndrea Arcangeli } 54971e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 55071e3aac0SAndrea Arcangeli 551f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 55271e3aac0SAndrea Arcangeli { 553f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 55471e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 55571e3aac0SAndrea Arcangeli return pmd; 55671e3aac0SAndrea Arcangeli } 55771e3aac0SAndrea Arcangeli 55887eaceb3SYang Shi #ifdef CONFIG_MEMCG 559f8baa6beSMatthew Wilcox (Oracle) static inline 560f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *get_deferred_split_queue(struct folio *folio) 5619a982250SKirill A. Shutemov { 562f8baa6beSMatthew Wilcox (Oracle) struct mem_cgroup *memcg = folio_memcg(folio); 563f8baa6beSMatthew Wilcox (Oracle) struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 56487eaceb3SYang Shi 56587eaceb3SYang Shi if (memcg) 56687eaceb3SYang Shi return &memcg->deferred_split_queue; 56787eaceb3SYang Shi else 56887eaceb3SYang Shi return &pgdat->deferred_split_queue; 5699a982250SKirill A. Shutemov } 57087eaceb3SYang Shi #else 571f8baa6beSMatthew Wilcox (Oracle) static inline 572f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *get_deferred_split_queue(struct folio *folio) 57387eaceb3SYang Shi { 574f8baa6beSMatthew Wilcox (Oracle) struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 57587eaceb3SYang Shi 57687eaceb3SYang Shi return &pgdat->deferred_split_queue; 57787eaceb3SYang Shi } 57887eaceb3SYang Shi #endif 5799a982250SKirill A. Shutemov 5809a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5819a982250SKirill A. Shutemov { 5828991de90SMatthew Wilcox (Oracle) struct folio *folio = (struct folio *)page; 5839a982250SKirill A. Shutemov 5848991de90SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); 5858991de90SMatthew Wilcox (Oracle) INIT_LIST_HEAD(&folio->_deferred_list); 586b95826c9SSidhartha Kumar folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR); 5879a982250SKirill A. Shutemov } 5889a982250SKirill A. Shutemov 589562beb72SMiaohe Lin static inline bool is_transparent_hugepage(struct page *page) 590005ba37cSSean Christopherson { 591f04029f3SMatthew Wilcox (Oracle) struct folio *folio; 592f04029f3SMatthew Wilcox (Oracle) 593005ba37cSSean Christopherson if (!PageCompound(page)) 594fa1f68ccSZou Wei return false; 595005ba37cSSean Christopherson 596f04029f3SMatthew Wilcox (Oracle) folio = page_folio(page); 597f04029f3SMatthew Wilcox (Oracle) return is_huge_zero_page(&folio->page) || 598f04029f3SMatthew Wilcox (Oracle) folio->_folio_dtor == TRANSHUGE_PAGE_DTOR; 599005ba37cSSean Christopherson } 600005ba37cSSean Christopherson 60197d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 60297d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 60374d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 60474d2fad1SToshi Kani { 60574d2fad1SToshi Kani loff_t off_end = off + len; 60674d2fad1SToshi Kani loff_t off_align = round_up(off, size); 60797d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 60874d2fad1SToshi Kani 60974d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 61074d2fad1SToshi Kani return 0; 61174d2fad1SToshi Kani 61274d2fad1SToshi Kani len_pad = len + size; 61374d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 61474d2fad1SToshi Kani return 0; 61574d2fad1SToshi Kani 61697d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 61774d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 61897d3d0f9SKirill A. Shutemov 61997d3d0f9SKirill A. Shutemov /* 62097d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 62197d3d0f9SKirill A. Shutemov * without the padding. 62297d3d0f9SKirill A. Shutemov */ 62397d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 62474d2fad1SToshi Kani return 0; 62574d2fad1SToshi Kani 62697d3d0f9SKirill A. Shutemov /* 62797d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 62897d3d0f9SKirill A. Shutemov * hint succeeds. 62997d3d0f9SKirill A. Shutemov */ 63097d3d0f9SKirill A. Shutemov if (ret == addr) 63174d2fad1SToshi Kani return addr; 63297d3d0f9SKirill A. Shutemov 63397d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 63497d3d0f9SKirill A. Shutemov return ret; 63574d2fad1SToshi Kani } 63674d2fad1SToshi Kani 63774d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 63874d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 63974d2fad1SToshi Kani { 64097d3d0f9SKirill A. Shutemov unsigned long ret; 64174d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 64274d2fad1SToshi Kani 64397d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 64497d3d0f9SKirill A. Shutemov if (ret) 64597d3d0f9SKirill A. Shutemov return ret; 6461854bc6eSWilliam Kucharski 64774d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 64874d2fad1SToshi Kani } 64974d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 65074d2fad1SToshi Kani 6512b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 6522b740303SSouptick Joarder struct page *page, gfp_t gfp) 65371e3aac0SAndrea Arcangeli { 65482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 655cfe3236dSKefeng Wang struct folio *folio = page_folio(page); 65671e3aac0SAndrea Arcangeli pgtable_t pgtable; 65782b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 6582b740303SSouptick Joarder vm_fault_t ret = 0; 65971e3aac0SAndrea Arcangeli 660cfe3236dSKefeng Wang VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 66100501b53SJohannes Weiner 662cfe3236dSKefeng Wang if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 663cfe3236dSKefeng Wang folio_put(folio); 6646b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 66585b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6666b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6676b251fc9SAndrea Arcangeli } 668cfe3236dSKefeng Wang folio_throttle_swaprate(folio, gfp); 66971e3aac0SAndrea Arcangeli 6704cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 67100501b53SJohannes Weiner if (unlikely(!pgtable)) { 6726b31d595SMichal Hocko ret = VM_FAULT_OOM; 6736b31d595SMichal Hocko goto release; 67400501b53SJohannes Weiner } 67500501b53SJohannes Weiner 676c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 67752f37629SMinchan Kim /* 678cfe3236dSKefeng Wang * The memory barrier inside __folio_mark_uptodate makes sure that 67952f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 68052f37629SMinchan Kim * write. 68152f37629SMinchan Kim */ 682cfe3236dSKefeng Wang __folio_mark_uptodate(folio); 68371e3aac0SAndrea Arcangeli 68482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 68582b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6866b31d595SMichal Hocko goto unlock_release; 68771e3aac0SAndrea Arcangeli } else { 68871e3aac0SAndrea Arcangeli pmd_t entry; 6896b251fc9SAndrea Arcangeli 6906b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6916b31d595SMichal Hocko if (ret) 6926b31d595SMichal Hocko goto unlock_release; 6936b31d595SMichal Hocko 6946b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6956b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 69682b0f8c3SJan Kara spin_unlock(vmf->ptl); 697cfe3236dSKefeng Wang folio_put(folio); 698bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6998fd5eda4SMiaohe Lin ret = handle_userfault(vmf, VM_UFFD_MISSING); 7008fd5eda4SMiaohe Lin VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7018fd5eda4SMiaohe Lin return ret; 7026b251fc9SAndrea Arcangeli } 7036b251fc9SAndrea Arcangeli 7043122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 705f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 706cfe3236dSKefeng Wang folio_add_new_anon_rmap(folio, vma, haddr); 707cfe3236dSKefeng Wang folio_add_lru_vma(folio, vma); 70882b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 70982b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 710fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 711bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 712c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 71382b0f8c3SJan Kara spin_unlock(vmf->ptl); 7146b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 7159d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 71671e3aac0SAndrea Arcangeli } 71771e3aac0SAndrea Arcangeli 718aa2e878eSDavid Rientjes return 0; 7196b31d595SMichal Hocko unlock_release: 7206b31d595SMichal Hocko spin_unlock(vmf->ptl); 7216b31d595SMichal Hocko release: 7226b31d595SMichal Hocko if (pgtable) 7236b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 724cfe3236dSKefeng Wang folio_put(folio); 7256b31d595SMichal Hocko return ret; 7266b31d595SMichal Hocko 72771e3aac0SAndrea Arcangeli } 72871e3aac0SAndrea Arcangeli 729444eb2a4SMel Gorman /* 73021440d7eSDavid Rientjes * always: directly stall for all thp allocations 73121440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 73221440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 73321440d7eSDavid Rientjes * fail if not immediately available 73421440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 73521440d7eSDavid Rientjes * available 73621440d7eSDavid Rientjes * never: never stall for any thp allocation 737444eb2a4SMel Gorman */ 738164cc4feSRik van Riel gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 7390bbbc0b3SAndrea Arcangeli { 740164cc4feSRik van Riel const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 74189c83fb5SMichal Hocko 742ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 74321440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 744a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 745ac79f78dSDavid Rientjes 746ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 74721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 74819deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 749ac79f78dSDavid Rientjes 750ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 75121440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 75219deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75319deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 754ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 755ac79f78dSDavid Rientjes 756ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 75721440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 75819deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75919deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 760ac79f78dSDavid Rientjes 76119deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 762444eb2a4SMel Gorman } 763444eb2a4SMel Gorman 764c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7652efeb8daSMiaohe Lin static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 76697ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7675918d10aSKirill A. Shutemov struct page *zero_page) 768fc9fe822SKirill A. Shutemov { 769fc9fe822SKirill A. Shutemov pmd_t entry; 7707c414164SAndrew Morton if (!pmd_none(*pmd)) 7712efeb8daSMiaohe Lin return; 7725918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 773fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 7746b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 775fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 776c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 777fc9fe822SKirill A. Shutemov } 778fc9fe822SKirill A. Shutemov 7792b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 78071e3aac0SAndrea Arcangeli { 78182b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 782077fcf11SAneesh Kumar K.V gfp_t gfp; 783cb196ee1SMatthew Wilcox (Oracle) struct folio *folio; 78482b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 78571e3aac0SAndrea Arcangeli 78643675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 787c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 78871e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 78971e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7904fa6893fSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 791d2081b2bSYang Shi 79282b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 793bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 79479da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 79580371957SKirill A. Shutemov pgtable_t pgtable; 7965918d10aSKirill A. Shutemov struct page *zero_page; 7972b740303SSouptick Joarder vm_fault_t ret; 7984cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 79980371957SKirill A. Shutemov if (unlikely(!pgtable)) 80080371957SKirill A. Shutemov return VM_FAULT_OOM; 8016fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 8025918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 803bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 80497ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 805c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 80697ae1749SKirill A. Shutemov } 80782b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 8086b251fc9SAndrea Arcangeli ret = 0; 80982b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 8106b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 8116b31d595SMichal Hocko if (ret) { 8126b31d595SMichal Hocko spin_unlock(vmf->ptl); 813bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 8146b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 81582b0f8c3SJan Kara spin_unlock(vmf->ptl); 816bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 81782b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 8186b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 8196b251fc9SAndrea Arcangeli } else { 820bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 82182b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 822fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 82382b0f8c3SJan Kara spin_unlock(vmf->ptl); 8246b251fc9SAndrea Arcangeli } 825bfe8cc1dSGerald Schaefer } else { 82682b0f8c3SJan Kara spin_unlock(vmf->ptl); 827bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 828bfe8cc1dSGerald Schaefer } 8296b251fc9SAndrea Arcangeli return ret; 83080371957SKirill A. Shutemov } 831164cc4feSRik van Riel gfp = vma_thp_gfp_mask(vma); 832cb196ee1SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); 833cb196ee1SMatthew Wilcox (Oracle) if (unlikely(!folio)) { 83481ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 835c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83681ab4201SAndi Kleen } 837cb196ee1SMatthew Wilcox (Oracle) return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); 83871e3aac0SAndrea Arcangeli } 83971e3aac0SAndrea Arcangeli 840ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8413b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 8423b6521f5SOliver O'Halloran pgtable_t pgtable) 8435cad465dSMatthew Wilcox { 8445cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 8455cad465dSMatthew Wilcox pmd_t entry; 8465cad465dSMatthew Wilcox spinlock_t *ptl; 8475cad465dSMatthew Wilcox 8485cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 849c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 850c6f3c5eeSAneesh Kumar K.V if (write) { 851c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 852c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 853c6f3c5eeSAneesh Kumar K.V goto out_unlock; 854c6f3c5eeSAneesh Kumar K.V } 855c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 856c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 857c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 858c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 859c6f3c5eeSAneesh Kumar K.V } 860c6f3c5eeSAneesh Kumar K.V 861c6f3c5eeSAneesh Kumar K.V goto out_unlock; 862c6f3c5eeSAneesh Kumar K.V } 863c6f3c5eeSAneesh Kumar K.V 864f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 865f25748e3SDan Williams if (pfn_t_devmap(pfn)) 866f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8675cad465dSMatthew Wilcox if (write) { 868f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 869f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8705cad465dSMatthew Wilcox } 8713b6521f5SOliver O'Halloran 8723b6521f5SOliver O'Halloran if (pgtable) { 8733b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 874c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 875c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8763b6521f5SOliver O'Halloran } 8773b6521f5SOliver O'Halloran 8785cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8795cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 880c6f3c5eeSAneesh Kumar K.V 881c6f3c5eeSAneesh Kumar K.V out_unlock: 8825cad465dSMatthew Wilcox spin_unlock(ptl); 883c6f3c5eeSAneesh Kumar K.V if (pgtable) 884c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8855cad465dSMatthew Wilcox } 8865cad465dSMatthew Wilcox 8879a9731b1SThomas Hellstrom (VMware) /** 8887b806d22SLorenzo Stoakes * vmf_insert_pfn_pmd - insert a pmd size pfn 8899a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8909a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8919a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8929a9731b1SThomas Hellstrom (VMware) * 8937b806d22SLorenzo Stoakes * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. 8949a9731b1SThomas Hellstrom (VMware) * 8959a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 8969a9731b1SThomas Hellstrom (VMware) */ 8977b806d22SLorenzo Stoakes vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 8985cad465dSMatthew Wilcox { 899fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 900fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 9017b806d22SLorenzo Stoakes pgprot_t pgprot = vma->vm_page_prot; 9023b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 903fce86ff5SDan Williams 9045cad465dSMatthew Wilcox /* 9055cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 9065cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 9075cad465dSMatthew Wilcox * can't support a 'special' bit. 9085cad465dSMatthew Wilcox */ 909e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 910e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 9115cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 9125cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 9135cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 9145cad465dSMatthew Wilcox 9155cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 9165cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 917308a047cSBorislav Petkov 9183b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 9194cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 9203b6521f5SOliver O'Halloran if (!pgtable) 9213b6521f5SOliver O'Halloran return VM_FAULT_OOM; 9223b6521f5SOliver O'Halloran } 9233b6521f5SOliver O'Halloran 924308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 925308a047cSBorislav Petkov 926fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 927ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 9285cad465dSMatthew Wilcox } 9297b806d22SLorenzo Stoakes EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 9305cad465dSMatthew Wilcox 931a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 932f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 933a00cc7d9SMatthew Wilcox { 934f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 935a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 936a00cc7d9SMatthew Wilcox return pud; 937a00cc7d9SMatthew Wilcox } 938a00cc7d9SMatthew Wilcox 939a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 9407b806d22SLorenzo Stoakes pud_t *pud, pfn_t pfn, bool write) 941a00cc7d9SMatthew Wilcox { 942a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 9437b806d22SLorenzo Stoakes pgprot_t prot = vma->vm_page_prot; 944a00cc7d9SMatthew Wilcox pud_t entry; 945a00cc7d9SMatthew Wilcox spinlock_t *ptl; 946a00cc7d9SMatthew Wilcox 947a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 948c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 949c6f3c5eeSAneesh Kumar K.V if (write) { 950c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 951c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 952c6f3c5eeSAneesh Kumar K.V goto out_unlock; 953c6f3c5eeSAneesh Kumar K.V } 954c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 955c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 956c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 957c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 958c6f3c5eeSAneesh Kumar K.V } 959c6f3c5eeSAneesh Kumar K.V goto out_unlock; 960c6f3c5eeSAneesh Kumar K.V } 961c6f3c5eeSAneesh Kumar K.V 962a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 963a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 964a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 965a00cc7d9SMatthew Wilcox if (write) { 966f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 967f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 968a00cc7d9SMatthew Wilcox } 969a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 970a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 971c6f3c5eeSAneesh Kumar K.V 972c6f3c5eeSAneesh Kumar K.V out_unlock: 973a00cc7d9SMatthew Wilcox spin_unlock(ptl); 974a00cc7d9SMatthew Wilcox } 975a00cc7d9SMatthew Wilcox 9769a9731b1SThomas Hellstrom (VMware) /** 9777b806d22SLorenzo Stoakes * vmf_insert_pfn_pud - insert a pud size pfn 9789a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9799a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9809a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9819a9731b1SThomas Hellstrom (VMware) * 9827b806d22SLorenzo Stoakes * Insert a pud size pfn. See vmf_insert_pfn() for additional info. 9839a9731b1SThomas Hellstrom (VMware) * 9849a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9859a9731b1SThomas Hellstrom (VMware) */ 9867b806d22SLorenzo Stoakes vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 987a00cc7d9SMatthew Wilcox { 988fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 989fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 9907b806d22SLorenzo Stoakes pgprot_t pgprot = vma->vm_page_prot; 991fce86ff5SDan Williams 992a00cc7d9SMatthew Wilcox /* 993a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 994a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 995a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 996a00cc7d9SMatthew Wilcox */ 99762ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 99862ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 999a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1000a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 1001a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1002a00cc7d9SMatthew Wilcox 1003a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 1004a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 1005a00cc7d9SMatthew Wilcox 1006a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 1007a00cc7d9SMatthew Wilcox 10087b806d22SLorenzo Stoakes insert_pfn_pud(vma, addr, vmf->pud, pfn, write); 1009a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 1010a00cc7d9SMatthew Wilcox } 10117b806d22SLorenzo Stoakes EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 1012a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1013a00cc7d9SMatthew Wilcox 10143565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1015a69e4717SMiaohe Lin pmd_t *pmd, bool write) 10163565fce3SDan Williams { 10173565fce3SDan Williams pmd_t _pmd; 10183565fce3SDan Williams 1019a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 1020a69e4717SMiaohe Lin if (write) 1021a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 10223565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1023a69e4717SMiaohe Lin pmd, _pmd, write)) 10243565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 10253565fce3SDan Williams } 10263565fce3SDan Williams 10273565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1028df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 10293565fce3SDan Williams { 10303565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 10313565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 10323565fce3SDan Williams struct page *page; 10330f089235SLogan Gunthorpe int ret; 10343565fce3SDan Williams 10353565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 10363565fce3SDan Williams 1037f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 10383565fce3SDan Williams return NULL; 10393565fce3SDan Williams 10403565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 10413565fce3SDan Williams /* pass */; 10423565fce3SDan Williams else 10433565fce3SDan Williams return NULL; 10443565fce3SDan Williams 10453565fce3SDan Williams if (flags & FOLL_TOUCH) 1046a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 10473565fce3SDan Williams 10483565fce3SDan Williams /* 10493565fce3SDan Williams * device mapped pages can only be returned if the 10503565fce3SDan Williams * caller will manage the page reference count. 10513565fce3SDan Williams */ 10523faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10533565fce3SDan Williams return ERR_PTR(-EEXIST); 10543565fce3SDan Williams 10553565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1056df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1057df06b37fSKeith Busch if (!*pgmap) 10583565fce3SDan Williams return ERR_PTR(-EFAULT); 10593565fce3SDan Williams page = pfn_to_page(pfn); 10600f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 10610f089235SLogan Gunthorpe if (ret) 10620f089235SLogan Gunthorpe page = ERR_PTR(ret); 10633565fce3SDan Williams 10643565fce3SDan Williams return page; 10653565fce3SDan Williams } 10663565fce3SDan Williams 106771e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 106871e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10698f34f1eaSPeter Xu struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 107071e3aac0SAndrea Arcangeli { 1071c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 107271e3aac0SAndrea Arcangeli struct page *src_page; 107371e3aac0SAndrea Arcangeli pmd_t pmd; 107412c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1075628d47ceSKirill A. Shutemov int ret = -ENOMEM; 107671e3aac0SAndrea Arcangeli 1077628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 10788f34f1eaSPeter Xu if (!vma_is_anonymous(dst_vma)) 1079628d47ceSKirill A. Shutemov return 0; 1080628d47ceSKirill A. Shutemov 10814cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 108271e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 108371e3aac0SAndrea Arcangeli goto out; 108471e3aac0SAndrea Arcangeli 1085c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1086c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1087c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 108871e3aac0SAndrea Arcangeli 108971e3aac0SAndrea Arcangeli ret = -EAGAIN; 109071e3aac0SAndrea Arcangeli pmd = *src_pmd; 109184c3fc4eSZi Yan 109284c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 109384c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 109484c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 109584c3fc4eSZi Yan 109684c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 10976c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) { 10984dd845b5SAlistair Popple entry = make_readable_migration_entry( 10994dd845b5SAlistair Popple swp_offset(entry)); 110084c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1101ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1102ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 11038f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*src_pmd)) 11048f34f1eaSPeter Xu pmd = pmd_swp_mkuffd_wp(pmd); 110584c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 110684c3fc4eSZi Yan } 1107dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1108af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1109dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 11108f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11118f34f1eaSPeter Xu pmd = pmd_swp_clear_uffd_wp(pmd); 111284c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 111384c3fc4eSZi Yan ret = 0; 111484c3fc4eSZi Yan goto out_unlock; 111584c3fc4eSZi Yan } 111684c3fc4eSZi Yan #endif 111784c3fc4eSZi Yan 1118628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 111971e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 112071e3aac0SAndrea Arcangeli goto out_unlock; 112171e3aac0SAndrea Arcangeli } 1122fc9fe822SKirill A. Shutemov /* 1123c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1124fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1125fc9fe822SKirill A. Shutemov * a page table. 1126fc9fe822SKirill A. Shutemov */ 1127fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 112897ae1749SKirill A. Shutemov /* 112997ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 113097ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 113197ae1749SKirill A. Shutemov * reference. 113297ae1749SKirill A. Shutemov */ 11335fc7a5f6SPeter Xu mm_get_huge_zero_page(dst_mm); 11345fc7a5f6SPeter Xu goto out_zero_page; 1135fc9fe822SKirill A. Shutemov } 1136de466bd6SMel Gorman 113771e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1138309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1139d042035eSPeter Xu 1140fb3d824dSDavid Hildenbrand get_page(src_page); 1141fb3d824dSDavid Hildenbrand if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { 1142fb3d824dSDavid Hildenbrand /* Page maybe pinned: split and retry the fault on PTEs. */ 1143fb3d824dSDavid Hildenbrand put_page(src_page); 1144d042035eSPeter Xu pte_free(dst_mm, pgtable); 1145d042035eSPeter Xu spin_unlock(src_ptl); 1146d042035eSPeter Xu spin_unlock(dst_ptl); 11478f34f1eaSPeter Xu __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1148d042035eSPeter Xu return -EAGAIN; 1149d042035eSPeter Xu } 115071e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 11515fc7a5f6SPeter Xu out_zero_page: 1152c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11535c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 115471e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 11558f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11568f34f1eaSPeter Xu pmd = pmd_clear_uffd_wp(pmd); 115771e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 115871e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 115971e3aac0SAndrea Arcangeli 116071e3aac0SAndrea Arcangeli ret = 0; 116171e3aac0SAndrea Arcangeli out_unlock: 1162c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1163c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 116471e3aac0SAndrea Arcangeli out: 116571e3aac0SAndrea Arcangeli return ret; 116671e3aac0SAndrea Arcangeli } 116771e3aac0SAndrea Arcangeli 1168a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1169a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 11705fe653e9SMiaohe Lin pud_t *pud, bool write) 1171a00cc7d9SMatthew Wilcox { 1172a00cc7d9SMatthew Wilcox pud_t _pud; 1173a00cc7d9SMatthew Wilcox 1174a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 11755fe653e9SMiaohe Lin if (write) 1176a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1177a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 11785fe653e9SMiaohe Lin pud, _pud, write)) 1179a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1180a00cc7d9SMatthew Wilcox } 1181a00cc7d9SMatthew Wilcox 1182a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1183df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1184a00cc7d9SMatthew Wilcox { 1185a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1186a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1187a00cc7d9SMatthew Wilcox struct page *page; 11880f089235SLogan Gunthorpe int ret; 1189a00cc7d9SMatthew Wilcox 1190a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1191a00cc7d9SMatthew Wilcox 1192f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1193a00cc7d9SMatthew Wilcox return NULL; 1194a00cc7d9SMatthew Wilcox 1195a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1196a00cc7d9SMatthew Wilcox /* pass */; 1197a00cc7d9SMatthew Wilcox else 1198a00cc7d9SMatthew Wilcox return NULL; 1199a00cc7d9SMatthew Wilcox 1200a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 12015fe653e9SMiaohe Lin touch_pud(vma, addr, pud, flags & FOLL_WRITE); 1202a00cc7d9SMatthew Wilcox 1203a00cc7d9SMatthew Wilcox /* 1204a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1205a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 12063faa52c0SJohn Hubbard * 12073faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1208a00cc7d9SMatthew Wilcox */ 12093faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1210a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1211a00cc7d9SMatthew Wilcox 1212a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1213df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1214df06b37fSKeith Busch if (!*pgmap) 1215a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1216a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 12170f089235SLogan Gunthorpe 12180f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 12190f089235SLogan Gunthorpe if (ret) 12200f089235SLogan Gunthorpe page = ERR_PTR(ret); 1221a00cc7d9SMatthew Wilcox 1222a00cc7d9SMatthew Wilcox return page; 1223a00cc7d9SMatthew Wilcox } 1224a00cc7d9SMatthew Wilcox 1225a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1226a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1227a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1228a00cc7d9SMatthew Wilcox { 1229a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1230a00cc7d9SMatthew Wilcox pud_t pud; 1231a00cc7d9SMatthew Wilcox int ret; 1232a00cc7d9SMatthew Wilcox 1233a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1234a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1235a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1236a00cc7d9SMatthew Wilcox 1237a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1238a00cc7d9SMatthew Wilcox pud = *src_pud; 1239a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1240a00cc7d9SMatthew Wilcox goto out_unlock; 1241a00cc7d9SMatthew Wilcox 1242a00cc7d9SMatthew Wilcox /* 1243a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1244a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1245a00cc7d9SMatthew Wilcox * a page table. 1246a00cc7d9SMatthew Wilcox */ 1247a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1248a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1249a00cc7d9SMatthew Wilcox } 1250a00cc7d9SMatthew Wilcox 1251fb3d824dSDavid Hildenbrand /* 1252fb3d824dSDavid Hildenbrand * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() 1253fb3d824dSDavid Hildenbrand * and split if duplicating fails. 1254fb3d824dSDavid Hildenbrand */ 1255a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1256a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1257a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1258a00cc7d9SMatthew Wilcox 1259a00cc7d9SMatthew Wilcox ret = 0; 1260a00cc7d9SMatthew Wilcox out_unlock: 1261a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1262a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1263a00cc7d9SMatthew Wilcox return ret; 1264a00cc7d9SMatthew Wilcox } 1265a00cc7d9SMatthew Wilcox 1266a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1267a00cc7d9SMatthew Wilcox { 1268a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1269a00cc7d9SMatthew Wilcox 1270a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1271a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1272a00cc7d9SMatthew Wilcox goto unlock; 1273a00cc7d9SMatthew Wilcox 12745fe653e9SMiaohe Lin touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1275a00cc7d9SMatthew Wilcox unlock: 1276a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1277a00cc7d9SMatthew Wilcox } 1278a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1279a00cc7d9SMatthew Wilcox 12805db4f15cSYang Shi void huge_pmd_set_accessed(struct vm_fault *vmf) 1281a1dd450bSWill Deacon { 128220f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1283a1dd450bSWill Deacon 128482b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1285a69e4717SMiaohe Lin if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1286a1dd450bSWill Deacon goto unlock; 1287a1dd450bSWill Deacon 1288a69e4717SMiaohe Lin touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1289a1dd450bSWill Deacon 1290a1dd450bSWill Deacon unlock: 129182b0f8c3SJan Kara spin_unlock(vmf->ptl); 1292a1dd450bSWill Deacon } 1293a1dd450bSWill Deacon 12945db4f15cSYang Shi vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 129571e3aac0SAndrea Arcangeli { 1296c89357e2SDavid Hildenbrand const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 129782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 12982fad3d14SMatthew Wilcox (Oracle) struct folio *folio; 12993917c802SKirill A. Shutemov struct page *page; 130082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 13015db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 130271e3aac0SAndrea Arcangeli 130382b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 130481d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 13053917c802SKirill A. Shutemov 130693b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 13073917c802SKirill A. Shutemov goto fallback; 13083917c802SKirill A. Shutemov 130982b0f8c3SJan Kara spin_lock(vmf->ptl); 13103917c802SKirill A. Shutemov 13113917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13123917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13133917c802SKirill A. Shutemov return 0; 13143917c802SKirill A. Shutemov } 131571e3aac0SAndrea Arcangeli 131671e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 13172fad3d14SMatthew Wilcox (Oracle) folio = page_folio(page); 1318f6004e73SMiaohe Lin VM_BUG_ON_PAGE(!PageHead(page), page); 13193917c802SKirill A. Shutemov 13206c287605SDavid Hildenbrand /* Early check when only holding the PT lock. */ 13216c287605SDavid Hildenbrand if (PageAnonExclusive(page)) 13226c287605SDavid Hildenbrand goto reuse; 13236c287605SDavid Hildenbrand 13242fad3d14SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) { 13252fad3d14SMatthew Wilcox (Oracle) folio_get(folio); 1326ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 13272fad3d14SMatthew Wilcox (Oracle) folio_lock(folio); 1328ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1329ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13303917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13312fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13322fad3d14SMatthew Wilcox (Oracle) folio_put(folio); 13333917c802SKirill A. Shutemov return 0; 1334ba3c4ce6SHuang Ying } 13352fad3d14SMatthew Wilcox (Oracle) folio_put(folio); 1336ba3c4ce6SHuang Ying } 13373917c802SKirill A. Shutemov 13386c287605SDavid Hildenbrand /* Recheck after temporarily dropping the PT lock. */ 13396c287605SDavid Hildenbrand if (PageAnonExclusive(page)) { 13402fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13416c287605SDavid Hildenbrand goto reuse; 13426c287605SDavid Hildenbrand } 13436c287605SDavid Hildenbrand 13443917c802SKirill A. Shutemov /* 13452fad3d14SMatthew Wilcox (Oracle) * See do_wp_page(): we can only reuse the folio exclusively if 13462fad3d14SMatthew Wilcox (Oracle) * there are no additional references. Note that we always drain 13472fad3d14SMatthew Wilcox (Oracle) * the LRU pagevecs immediately after adding a THP. 13483917c802SKirill A. Shutemov */ 13492fad3d14SMatthew Wilcox (Oracle) if (folio_ref_count(folio) > 13502fad3d14SMatthew Wilcox (Oracle) 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) 13513bff7e3fSDavid Hildenbrand goto unlock_fallback; 13522fad3d14SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 13532fad3d14SMatthew Wilcox (Oracle) folio_free_swap(folio); 13542fad3d14SMatthew Wilcox (Oracle) if (folio_ref_count(folio) == 1) { 135571e3aac0SAndrea Arcangeli pmd_t entry; 13566c54dc6cSDavid Hildenbrand 13576c54dc6cSDavid Hildenbrand page_move_anon_rmap(page, vma); 13582fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13596c287605SDavid Hildenbrand reuse: 1360c89357e2SDavid Hildenbrand if (unlikely(unshare)) { 1361c89357e2SDavid Hildenbrand spin_unlock(vmf->ptl); 1362c89357e2SDavid Hildenbrand return 0; 1363c89357e2SDavid Hildenbrand } 136471e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1365f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 136682b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 136782b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 13683917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1369cb8d8633SDavid Hildenbrand return 0; 137071e3aac0SAndrea Arcangeli } 13713917c802SKirill A. Shutemov 13723bff7e3fSDavid Hildenbrand unlock_fallback: 13732fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 137482b0f8c3SJan Kara spin_unlock(vmf->ptl); 13753917c802SKirill A. Shutemov fallback: 13763917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13773917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 137871e3aac0SAndrea Arcangeli } 137971e3aac0SAndrea Arcangeli 1380c27f479eSDavid Hildenbrand static inline bool can_change_pmd_writable(struct vm_area_struct *vma, 1381c27f479eSDavid Hildenbrand unsigned long addr, pmd_t pmd) 1382c27f479eSDavid Hildenbrand { 1383c27f479eSDavid Hildenbrand struct page *page; 1384c27f479eSDavid Hildenbrand 1385c27f479eSDavid Hildenbrand if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) 1386c27f479eSDavid Hildenbrand return false; 1387c27f479eSDavid Hildenbrand 1388c27f479eSDavid Hildenbrand /* Don't touch entries that are not even readable (NUMA hinting). */ 1389c27f479eSDavid Hildenbrand if (pmd_protnone(pmd)) 1390c27f479eSDavid Hildenbrand return false; 1391c27f479eSDavid Hildenbrand 1392c27f479eSDavid Hildenbrand /* Do we need write faults for softdirty tracking? */ 1393c27f479eSDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 1394c27f479eSDavid Hildenbrand return false; 1395c27f479eSDavid Hildenbrand 1396c27f479eSDavid Hildenbrand /* Do we need write faults for uffd-wp tracking? */ 1397c27f479eSDavid Hildenbrand if (userfaultfd_huge_pmd_wp(vma, pmd)) 1398c27f479eSDavid Hildenbrand return false; 1399c27f479eSDavid Hildenbrand 1400c27f479eSDavid Hildenbrand if (!(vma->vm_flags & VM_SHARED)) { 1401c27f479eSDavid Hildenbrand /* See can_change_pte_writable(). */ 1402c27f479eSDavid Hildenbrand page = vm_normal_page_pmd(vma, addr, pmd); 1403c27f479eSDavid Hildenbrand return page && PageAnon(page) && PageAnonExclusive(page); 1404c27f479eSDavid Hildenbrand } 1405c27f479eSDavid Hildenbrand 1406c27f479eSDavid Hildenbrand /* See can_change_pte_writable(). */ 1407c27f479eSDavid Hildenbrand return pmd_dirty(pmd); 1408c27f479eSDavid Hildenbrand } 1409c27f479eSDavid Hildenbrand 14105535be30SDavid Hildenbrand /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 14115535be30SDavid Hildenbrand static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 14125535be30SDavid Hildenbrand struct vm_area_struct *vma, 14135535be30SDavid Hildenbrand unsigned int flags) 14148310d48bSKeno Fischer { 14155535be30SDavid Hildenbrand /* If the pmd is writable, we can write to the page. */ 14165535be30SDavid Hildenbrand if (pmd_write(pmd)) 14175535be30SDavid Hildenbrand return true; 14185535be30SDavid Hildenbrand 14195535be30SDavid Hildenbrand /* Maybe FOLL_FORCE is set to override it? */ 14205535be30SDavid Hildenbrand if (!(flags & FOLL_FORCE)) 14215535be30SDavid Hildenbrand return false; 14225535be30SDavid Hildenbrand 14235535be30SDavid Hildenbrand /* But FOLL_FORCE has no effect on shared mappings */ 14245535be30SDavid Hildenbrand if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 14255535be30SDavid Hildenbrand return false; 14265535be30SDavid Hildenbrand 14275535be30SDavid Hildenbrand /* ... or read-only private ones */ 14285535be30SDavid Hildenbrand if (!(vma->vm_flags & VM_MAYWRITE)) 14295535be30SDavid Hildenbrand return false; 14305535be30SDavid Hildenbrand 14315535be30SDavid Hildenbrand /* ... or already writable ones that just need to take a write fault */ 14325535be30SDavid Hildenbrand if (vma->vm_flags & VM_WRITE) 14335535be30SDavid Hildenbrand return false; 14345535be30SDavid Hildenbrand 14355535be30SDavid Hildenbrand /* 14365535be30SDavid Hildenbrand * See can_change_pte_writable(): we broke COW and could map the page 14375535be30SDavid Hildenbrand * writable if we have an exclusive anonymous page ... 14385535be30SDavid Hildenbrand */ 14395535be30SDavid Hildenbrand if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 14405535be30SDavid Hildenbrand return false; 14415535be30SDavid Hildenbrand 14425535be30SDavid Hildenbrand /* ... and a write-fault isn't required for other reasons. */ 14435535be30SDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 14445535be30SDavid Hildenbrand return false; 14455535be30SDavid Hildenbrand return !userfaultfd_huge_pmd_wp(vma, pmd); 14468310d48bSKeno Fischer } 14478310d48bSKeno Fischer 1448b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 144971e3aac0SAndrea Arcangeli unsigned long addr, 145071e3aac0SAndrea Arcangeli pmd_t *pmd, 145171e3aac0SAndrea Arcangeli unsigned int flags) 145271e3aac0SAndrea Arcangeli { 1453b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 14545535be30SDavid Hildenbrand struct page *page; 14550f089235SLogan Gunthorpe int ret; 145671e3aac0SAndrea Arcangeli 1457c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 145871e3aac0SAndrea Arcangeli 14595535be30SDavid Hildenbrand page = pmd_page(*pmd); 14605535be30SDavid Hildenbrand VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 14615535be30SDavid Hildenbrand 14625535be30SDavid Hildenbrand if ((flags & FOLL_WRITE) && 14635535be30SDavid Hildenbrand !can_follow_write_pmd(*pmd, page, vma, flags)) 14645535be30SDavid Hildenbrand return NULL; 146571e3aac0SAndrea Arcangeli 146685facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 146785facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 146885facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 146985facf25SKirill A. Shutemov 14702b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 1471474098edSDavid Hildenbrand if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags)) 14725535be30SDavid Hildenbrand return NULL; 14733faa52c0SJohn Hubbard 147484209e87SDavid Hildenbrand if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page)) 1475a7f22660SDavid Hildenbrand return ERR_PTR(-EMLINK); 1476a7f22660SDavid Hildenbrand 1477b6a2619cSDavid Hildenbrand VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 1478b6a2619cSDavid Hildenbrand !PageAnonExclusive(page), page); 1479b6a2619cSDavid Hildenbrand 14800f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 14810f089235SLogan Gunthorpe if (ret) 14820f089235SLogan Gunthorpe return ERR_PTR(ret); 14833faa52c0SJohn Hubbard 14843565fce3SDan Williams if (flags & FOLL_TOUCH) 1485a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 14863faa52c0SJohn Hubbard 148771e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1488ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 148971e3aac0SAndrea Arcangeli 149071e3aac0SAndrea Arcangeli return page; 149171e3aac0SAndrea Arcangeli } 149271e3aac0SAndrea Arcangeli 1493d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 14945db4f15cSYang Shi vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 1495d10e63f2SMel Gorman { 149682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1497c5b5a3ddSYang Shi pmd_t oldpmd = vmf->orig_pmd; 1498c5b5a3ddSYang Shi pmd_t pmd; 1499b32967ffSMel Gorman struct page *page; 150082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1501c5b5a3ddSYang Shi int page_nid = NUMA_NO_NODE; 150233024536SHuang Ying int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); 15036a56ccbcSDavid Hildenbrand bool migrated = false, writable = false; 15046688cc05SPeter Zijlstra int flags = 0; 1505d10e63f2SMel Gorman 150682b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1507c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 150882b0f8c3SJan Kara spin_unlock(vmf->ptl); 1509de466bd6SMel Gorman goto out; 1510de466bd6SMel Gorman } 1511de466bd6SMel Gorman 1512c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 15136a56ccbcSDavid Hildenbrand 15146a56ccbcSDavid Hildenbrand /* 15156a56ccbcSDavid Hildenbrand * Detect now whether the PMD could be writable; this information 15166a56ccbcSDavid Hildenbrand * is only valid while holding the PT lock. 15176a56ccbcSDavid Hildenbrand */ 15186a56ccbcSDavid Hildenbrand writable = pmd_write(pmd); 15196a56ccbcSDavid Hildenbrand if (!writable && vma_wants_manual_pte_write_upgrade(vma) && 15206a56ccbcSDavid Hildenbrand can_change_pmd_writable(vma, vmf->address, pmd)) 15216a56ccbcSDavid Hildenbrand writable = true; 15226a56ccbcSDavid Hildenbrand 1523c5b5a3ddSYang Shi page = vm_normal_page_pmd(vma, haddr, pmd); 1524c5b5a3ddSYang Shi if (!page) 1525c5b5a3ddSYang Shi goto out_map; 1526c5b5a3ddSYang Shi 1527c5b5a3ddSYang Shi /* See similar comment in do_numa_page for explanation */ 15286a56ccbcSDavid Hildenbrand if (!writable) 1529c5b5a3ddSYang Shi flags |= TNF_NO_GROUP; 1530c5b5a3ddSYang Shi 1531c5b5a3ddSYang Shi page_nid = page_to_nid(page); 153233024536SHuang Ying /* 153333024536SHuang Ying * For memory tiering mode, cpupid of slow memory page is used 153433024536SHuang Ying * to record page access time. So use default value. 153533024536SHuang Ying */ 153633024536SHuang Ying if (node_is_toptier(page_nid)) 1537c5b5a3ddSYang Shi last_cpupid = page_cpupid_last(page); 1538c5b5a3ddSYang Shi target_nid = numa_migrate_prep(page, vma, haddr, page_nid, 1539c5b5a3ddSYang Shi &flags); 1540c5b5a3ddSYang Shi 1541c5b5a3ddSYang Shi if (target_nid == NUMA_NO_NODE) { 1542c5b5a3ddSYang Shi put_page(page); 1543c5b5a3ddSYang Shi goto out_map; 1544c5b5a3ddSYang Shi } 1545c5b5a3ddSYang Shi 154682b0f8c3SJan Kara spin_unlock(vmf->ptl); 15476a56ccbcSDavid Hildenbrand writable = false; 15488b1b436dSPeter Zijlstra 1549c5b5a3ddSYang Shi migrated = migrate_misplaced_page(page, vma, target_nid); 15506688cc05SPeter Zijlstra if (migrated) { 15516688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 15528191acbdSMel Gorman page_nid = target_nid; 1553c5b5a3ddSYang Shi } else { 1554074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1555c5b5a3ddSYang Shi vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1556c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 155782b0f8c3SJan Kara spin_unlock(vmf->ptl); 1558c5b5a3ddSYang Shi goto out; 1559c5b5a3ddSYang Shi } 1560c5b5a3ddSYang Shi goto out_map; 1561c5b5a3ddSYang Shi } 1562b8916634SMel Gorman 1563b8916634SMel Gorman out: 156498fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 156582b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15669a8b300fSAneesh Kumar K.V flags); 15678191acbdSMel Gorman 1568d10e63f2SMel Gorman return 0; 1569c5b5a3ddSYang Shi 1570c5b5a3ddSYang Shi out_map: 1571c5b5a3ddSYang Shi /* Restore the PMD */ 1572c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1573c5b5a3ddSYang Shi pmd = pmd_mkyoung(pmd); 15746a56ccbcSDavid Hildenbrand if (writable) 1575c5b5a3ddSYang Shi pmd = pmd_mkwrite(pmd); 1576c5b5a3ddSYang Shi set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1577c5b5a3ddSYang Shi update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1578c5b5a3ddSYang Shi spin_unlock(vmf->ptl); 1579c5b5a3ddSYang Shi goto out; 1580d10e63f2SMel Gorman } 1581d10e63f2SMel Gorman 1582319904adSHuang Ying /* 1583319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1584319904adSHuang Ying * Otherwise, return false. 1585319904adSHuang Ying */ 1586319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1587b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1588b8d3c4c3SMinchan Kim { 1589b8d3c4c3SMinchan Kim spinlock_t *ptl; 1590b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1591fc986a38SKefeng Wang struct folio *folio; 1592b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1593319904adSHuang Ying bool ret = false; 1594b8d3c4c3SMinchan Kim 1595ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 159607e32661SAneesh Kumar K.V 1597b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1598b6ec57f4SKirill A. Shutemov if (!ptl) 159925eedabeSLinus Torvalds goto out_unlocked; 1600b8d3c4c3SMinchan Kim 1601b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1602319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1603b8d3c4c3SMinchan Kim goto out; 1604b8d3c4c3SMinchan Kim 160584c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 160684c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 160784c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 160884c3fc4eSZi Yan goto out; 160984c3fc4eSZi Yan } 161084c3fc4eSZi Yan 1611fc986a38SKefeng Wang folio = pfn_folio(pmd_pfn(orig_pmd)); 1612b8d3c4c3SMinchan Kim /* 1613fc986a38SKefeng Wang * If other processes are mapping this folio, we couldn't discard 1614fc986a38SKefeng Wang * the folio unless they all do MADV_FREE so let's skip the folio. 1615b8d3c4c3SMinchan Kim */ 1616fc986a38SKefeng Wang if (folio_mapcount(folio) != 1) 1617b8d3c4c3SMinchan Kim goto out; 1618b8d3c4c3SMinchan Kim 1619fc986a38SKefeng Wang if (!folio_trylock(folio)) 1620b8d3c4c3SMinchan Kim goto out; 1621b8d3c4c3SMinchan Kim 1622b8d3c4c3SMinchan Kim /* 1623b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1624b8d3c4c3SMinchan Kim * will deactivate only them. 1625b8d3c4c3SMinchan Kim */ 1626b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1627fc986a38SKefeng Wang folio_get(folio); 1628b8d3c4c3SMinchan Kim spin_unlock(ptl); 1629fc986a38SKefeng Wang split_folio(folio); 1630fc986a38SKefeng Wang folio_unlock(folio); 1631fc986a38SKefeng Wang folio_put(folio); 1632b8d3c4c3SMinchan Kim goto out_unlocked; 1633b8d3c4c3SMinchan Kim } 1634b8d3c4c3SMinchan Kim 1635fc986a38SKefeng Wang if (folio_test_dirty(folio)) 1636fc986a38SKefeng Wang folio_clear_dirty(folio); 1637fc986a38SKefeng Wang folio_unlock(folio); 1638b8d3c4c3SMinchan Kim 1639b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 164058ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1641b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1642b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1643b8d3c4c3SMinchan Kim 1644b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1645b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1646b8d3c4c3SMinchan Kim } 1647802a3a92SShaohua Li 16486a6fe9ebSKefeng Wang folio_mark_lazyfree(folio); 1649319904adSHuang Ying ret = true; 1650b8d3c4c3SMinchan Kim out: 1651b8d3c4c3SMinchan Kim spin_unlock(ptl); 1652b8d3c4c3SMinchan Kim out_unlocked: 1653b8d3c4c3SMinchan Kim return ret; 1654b8d3c4c3SMinchan Kim } 1655b8d3c4c3SMinchan Kim 1656953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1657953c66c2SAneesh Kumar K.V { 1658953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1659953c66c2SAneesh Kumar K.V 1660953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1661953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1662c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1663953c66c2SAneesh Kumar K.V } 1664953c66c2SAneesh Kumar K.V 166571e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1666f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 166771e3aac0SAndrea Arcangeli { 1668f5c8ad47SDavid Miller pmd_t orig_pmd; 1669da146769SKirill A. Shutemov spinlock_t *ptl; 1670da146769SKirill A. Shutemov 1671ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 167207e32661SAneesh Kumar K.V 1673b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1674b6ec57f4SKirill A. Shutemov if (!ptl) 1675da146769SKirill A. Shutemov return 0; 1676a6bf2bb0SAneesh Kumar K.V /* 1677a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16788809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1679a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1680a6bf2bb0SAneesh Kumar K.V * operations. 1681a6bf2bb0SAneesh Kumar K.V */ 168293a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1683fcbe08d6SMartin Schwidefsky tlb->fullmm); 1684f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16852484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16863b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16873b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16884897c765SMatthew Wilcox spin_unlock(ptl); 1689da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1690c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1691bf929152SKirill A. Shutemov spin_unlock(ptl); 1692479f0abbSKirill A. Shutemov } else { 1693616b8371SZi Yan struct page *page = NULL; 1694616b8371SZi Yan int flush_needed = 1; 1695616b8371SZi Yan 1696616b8371SZi Yan if (pmd_present(orig_pmd)) { 1697616b8371SZi Yan page = pmd_page(orig_pmd); 1698cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 1699309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1700309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1701616b8371SZi Yan } else if (thp_migration_supported()) { 1702616b8371SZi Yan swp_entry_t entry; 1703616b8371SZi Yan 1704616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1705616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1706af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 1707616b8371SZi Yan flush_needed = 0; 1708616b8371SZi Yan } else 1709616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1710616b8371SZi Yan 1711b5072380SKirill A. Shutemov if (PageAnon(page)) { 1712c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1713b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1714b5072380SKirill A. Shutemov } else { 1715953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1716953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1717fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1718b5072380SKirill A. Shutemov } 1719616b8371SZi Yan 1720bf929152SKirill A. Shutemov spin_unlock(ptl); 1721616b8371SZi Yan if (flush_needed) 1722e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1723479f0abbSKirill A. Shutemov } 1724da146769SKirill A. Shutemov return 1; 172571e3aac0SAndrea Arcangeli } 172671e3aac0SAndrea Arcangeli 17271dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 17281dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 17291dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 17301dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 17311dd38b6cSAneesh Kumar K.V { 17321dd38b6cSAneesh Kumar K.V /* 17331dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17341dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17351dd38b6cSAneesh Kumar K.V * 17361dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17371dd38b6cSAneesh Kumar K.V */ 17381dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17391dd38b6cSAneesh Kumar K.V } 17401dd38b6cSAneesh Kumar K.V #endif 17411dd38b6cSAneesh Kumar K.V 1742ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1743ab6e3d09SNaoya Horiguchi { 1744ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1745ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1746ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1747ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1748ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1749ab6e3d09SNaoya Horiguchi #endif 1750ab6e3d09SNaoya Horiguchi return pmd; 1751ab6e3d09SNaoya Horiguchi } 1752ab6e3d09SNaoya Horiguchi 1753bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1754b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 175537a1c49aSAndrea Arcangeli { 1756bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 175737a1c49aSAndrea Arcangeli pmd_t pmd; 175837a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17595d190420SAaron Lu bool force_flush = false; 176037a1c49aSAndrea Arcangeli 176137a1c49aSAndrea Arcangeli /* 176237a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 1763a5be621eSHugh Dickins * should have released it; but move_page_tables() might have already 1764a5be621eSHugh Dickins * inserted a page table, if racing against shmem/file collapse. 176537a1c49aSAndrea Arcangeli */ 1766a5be621eSHugh Dickins if (!pmd_none(*new_pmd)) { 176737a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17684b471e88SKirill A. Shutemov return false; 176937a1c49aSAndrea Arcangeli } 177037a1c49aSAndrea Arcangeli 1771bf929152SKirill A. Shutemov /* 1772bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1773c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1774bf929152SKirill A. Shutemov */ 1775b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1776b6ec57f4SKirill A. Shutemov if (old_ptl) { 1777bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1778bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1779bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17808809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1781eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1782a2ce2666SAaron Lu force_flush = true; 178337a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17843592806cSKirill A. Shutemov 17851dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1786b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17873592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17883592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17893592806cSKirill A. Shutemov } 1790ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1791ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17925d190420SAaron Lu if (force_flush) 17937c38f181SMiaohe Lin flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1794eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1795eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1796bf929152SKirill A. Shutemov spin_unlock(old_ptl); 17974b471e88SKirill A. Shutemov return true; 179837a1c49aSAndrea Arcangeli } 17994b471e88SKirill A. Shutemov return false; 180037a1c49aSAndrea Arcangeli } 180137a1c49aSAndrea Arcangeli 1802f123d74aSMel Gorman /* 1803f123d74aSMel Gorman * Returns 1804f123d74aSMel Gorman * - 0 if PMD could not be locked 1805f0953a1bSIngo Molnar * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 1806e346e668SYang Shi * or if prot_numa but THP migration is not supported 1807f0953a1bSIngo Molnar * - HPAGE_PMD_NR if protections changed and TLB flush necessary 1808f123d74aSMel Gorman */ 18094a18419fSNadav Amit int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 18104a18419fSNadav Amit pmd_t *pmd, unsigned long addr, pgprot_t newprot, 18114a18419fSNadav Amit unsigned long cp_flags) 1812cd7548abSJohannes Weiner { 1813cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1814bf929152SKirill A. Shutemov spinlock_t *ptl; 1815c9fe6656SNadav Amit pmd_t oldpmd, entry; 181658705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1817292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1818292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 18196a56ccbcSDavid Hildenbrand int ret = 1; 1820cd7548abSJohannes Weiner 18214a18419fSNadav Amit tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 18224a18419fSNadav Amit 1823e346e668SYang Shi if (prot_numa && !thp_migration_supported()) 1824e346e668SYang Shi return 1; 1825e346e668SYang Shi 1826b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 18270a85e51dSKirill A. Shutemov if (!ptl) 18280a85e51dSKirill A. Shutemov return 0; 18290a85e51dSKirill A. Shutemov 183084c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 183184c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 183284c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 18336c287605SDavid Hildenbrand struct page *page = pfn_swap_entry_to_page(entry); 183424bf08c4SDavid Hildenbrand pmd_t newpmd; 183584c3fc4eSZi Yan 183684c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 18374dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 183884c3fc4eSZi Yan /* 183984c3fc4eSZi Yan * A protection check is difficult so 184084c3fc4eSZi Yan * just be safe and disable write 184184c3fc4eSZi Yan */ 18426c287605SDavid Hildenbrand if (PageAnon(page)) 18436c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 18446c287605SDavid Hildenbrand else 18456c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry)); 184684c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1847ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1848ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 184924bf08c4SDavid Hildenbrand } else { 185024bf08c4SDavid Hildenbrand newpmd = *pmd; 185184c3fc4eSZi Yan } 185224bf08c4SDavid Hildenbrand 185324bf08c4SDavid Hildenbrand if (uffd_wp) 185424bf08c4SDavid Hildenbrand newpmd = pmd_swp_mkuffd_wp(newpmd); 185524bf08c4SDavid Hildenbrand else if (uffd_wp_resolve) 185624bf08c4SDavid Hildenbrand newpmd = pmd_swp_clear_uffd_wp(newpmd); 185724bf08c4SDavid Hildenbrand if (!pmd_same(*pmd, newpmd)) 185824bf08c4SDavid Hildenbrand set_pmd_at(mm, addr, pmd, newpmd); 185984c3fc4eSZi Yan goto unlock; 186084c3fc4eSZi Yan } 186184c3fc4eSZi Yan #endif 186284c3fc4eSZi Yan 1863a1a3a2fcSHuang Ying if (prot_numa) { 1864a1a3a2fcSHuang Ying struct page *page; 186533024536SHuang Ying bool toptier; 1866e944fd67SMel Gorman /* 1867e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1868e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1869e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1870e944fd67SMel Gorman */ 1871a1a3a2fcSHuang Ying if (is_huge_zero_pmd(*pmd)) 18720a85e51dSKirill A. Shutemov goto unlock; 1873e944fd67SMel Gorman 1874a1a3a2fcSHuang Ying if (pmd_protnone(*pmd)) 18750a85e51dSKirill A. Shutemov goto unlock; 18760a85e51dSKirill A. Shutemov 1877a1a3a2fcSHuang Ying page = pmd_page(*pmd); 187833024536SHuang Ying toptier = node_is_toptier(page_to_nid(page)); 1879a1a3a2fcSHuang Ying /* 1880a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 1881a1a3a2fcSHuang Ying * balancing is disabled 1882a1a3a2fcSHuang Ying */ 1883a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 188433024536SHuang Ying toptier) 1885a1a3a2fcSHuang Ying goto unlock; 188633024536SHuang Ying 188733024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 188833024536SHuang Ying !toptier) 188933024536SHuang Ying xchg_page_access_time(page, jiffies_to_msecs(jiffies)); 1890a1a3a2fcSHuang Ying } 1891ced10803SKirill A. Shutemov /* 18923e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1893ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 18943e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1895ced10803SKirill A. Shutemov * 1896ced10803SKirill A. Shutemov * CPU0: CPU1: 1897ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1898ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1899ced10803SKirill A. Shutemov * madvise_dontneed() 1900ced10803SKirill A. Shutemov * zap_pmd_range() 1901ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1902ced10803SKirill A. Shutemov * // skip the pmd 1903ced10803SKirill A. Shutemov * set_pmd_at(); 1904ced10803SKirill A. Shutemov * // pmd is re-established 1905ced10803SKirill A. Shutemov * 1906ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1907ced10803SKirill A. Shutemov * which may break userspace. 1908ced10803SKirill A. Shutemov * 19094f831457SNadav Amit * pmdp_invalidate_ad() is required to make sure we don't miss 1910ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1911ced10803SKirill A. Shutemov */ 19124f831457SNadav Amit oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 1913ced10803SKirill A. Shutemov 1914c9fe6656SNadav Amit entry = pmd_modify(oldpmd, newprot); 1915f1eb1bacSPeter Xu if (uffd_wp) 1916292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1917f1eb1bacSPeter Xu else if (uffd_wp_resolve) 1918292924b2SPeter Xu /* 1919292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1920292924b2SPeter Xu * handler, then things like COW could be properly 1921292924b2SPeter Xu * handled. 1922292924b2SPeter Xu */ 1923292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1924c27f479eSDavid Hildenbrand 1925c27f479eSDavid Hildenbrand /* See change_pte_range(). */ 1926c27f479eSDavid Hildenbrand if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && 1927c27f479eSDavid Hildenbrand can_change_pmd_writable(vma, addr, entry)) 1928c27f479eSDavid Hildenbrand entry = pmd_mkwrite(entry); 1929c27f479eSDavid Hildenbrand 1930f123d74aSMel Gorman ret = HPAGE_PMD_NR; 193156eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 19324a18419fSNadav Amit 1933c9fe6656SNadav Amit if (huge_pmd_needs_flush(oldpmd, entry)) 19344a18419fSNadav Amit tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 19350a85e51dSKirill A. Shutemov unlock: 1936bf929152SKirill A. Shutemov spin_unlock(ptl); 1937cd7548abSJohannes Weiner return ret; 1938cd7548abSJohannes Weiner } 1939cd7548abSJohannes Weiner 1940025c5b24SNaoya Horiguchi /* 19418f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1942025c5b24SNaoya Horiguchi * 19438f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 19448f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1945025c5b24SNaoya Horiguchi */ 1946b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1947025c5b24SNaoya Horiguchi { 1948b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1949b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 195084c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 195184c3fc4eSZi Yan pmd_devmap(*pmd))) 1952b6ec57f4SKirill A. Shutemov return ptl; 1953b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1954b6ec57f4SKirill A. Shutemov return NULL; 1955025c5b24SNaoya Horiguchi } 1956025c5b24SNaoya Horiguchi 1957a00cc7d9SMatthew Wilcox /* 1958d965e390SMiaohe Lin * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 1959a00cc7d9SMatthew Wilcox * 1960d965e390SMiaohe Lin * Note that if it returns page table lock pointer, this routine returns without 1961d965e390SMiaohe Lin * unlocking page table lock. So callers must unlock it. 1962a00cc7d9SMatthew Wilcox */ 1963a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1964a00cc7d9SMatthew Wilcox { 1965a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1966a00cc7d9SMatthew Wilcox 1967a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1968a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1969a00cc7d9SMatthew Wilcox return ptl; 1970a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1971a00cc7d9SMatthew Wilcox return NULL; 1972a00cc7d9SMatthew Wilcox } 1973a00cc7d9SMatthew Wilcox 1974a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1975a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1976a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1977a00cc7d9SMatthew Wilcox { 1978a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1979a00cc7d9SMatthew Wilcox 1980a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1981a00cc7d9SMatthew Wilcox if (!ptl) 1982a00cc7d9SMatthew Wilcox return 0; 198374929079SMiaohe Lin 198470516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1985a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19862484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1987a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1988a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1989a00cc7d9SMatthew Wilcox } else { 1990a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 1991a00cc7d9SMatthew Wilcox BUG(); 1992a00cc7d9SMatthew Wilcox } 1993a00cc7d9SMatthew Wilcox return 1; 1994a00cc7d9SMatthew Wilcox } 1995a00cc7d9SMatthew Wilcox 1996a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1997a00cc7d9SMatthew Wilcox unsigned long haddr) 1998a00cc7d9SMatthew Wilcox { 1999a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2000a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2001a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2002a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2003a00cc7d9SMatthew Wilcox 2004ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 2005a00cc7d9SMatthew Wilcox 2006a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 2007a00cc7d9SMatthew Wilcox } 2008a00cc7d9SMatthew Wilcox 2009a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2010a00cc7d9SMatthew Wilcox unsigned long address) 2011a00cc7d9SMatthew Wilcox { 2012a00cc7d9SMatthew Wilcox spinlock_t *ptl; 2013ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2014a00cc7d9SMatthew Wilcox 20157d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 20166f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 2017ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2018ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2019ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 2020a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2021a00cc7d9SMatthew Wilcox goto out; 2022ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 2023a00cc7d9SMatthew Wilcox 2024a00cc7d9SMatthew Wilcox out: 2025a00cc7d9SMatthew Wilcox spin_unlock(ptl); 20264645b9feSJérôme Glisse /* 20274645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 20284645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 20294645b9feSJérôme Glisse */ 2030ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2031a00cc7d9SMatthew Wilcox } 2032a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2033a00cc7d9SMatthew Wilcox 2034eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2035eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2036eef1b3baSKirill A. Shutemov { 2037eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2038eef1b3baSKirill A. Shutemov pgtable_t pgtable; 203942b2af2cSDavid Hildenbrand pmd_t _pmd, old_pmd; 2040c9c1ee20SHugh Dickins unsigned long addr; 2041c9c1ee20SHugh Dickins pte_t *pte; 2042eef1b3baSKirill A. Shutemov int i; 2043eef1b3baSKirill A. Shutemov 20440f10851eSJérôme Glisse /* 20450f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 20460f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 20470f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 20480f10851eSJérôme Glisse * protected page. 20490f10851eSJérôme Glisse * 2050ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 20510f10851eSJérôme Glisse */ 205242b2af2cSDavid Hildenbrand old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2053eef1b3baSKirill A. Shutemov 2054eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2055eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2056eef1b3baSKirill A. Shutemov 2057c9c1ee20SHugh Dickins pte = pte_offset_map(&_pmd, haddr); 2058c9c1ee20SHugh Dickins VM_BUG_ON(!pte); 2059c9c1ee20SHugh Dickins for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2060c9c1ee20SHugh Dickins pte_t entry; 2061c9c1ee20SHugh Dickins 2062c9c1ee20SHugh Dickins entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); 2063eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 206442b2af2cSDavid Hildenbrand if (pmd_uffd_wp(old_pmd)) 206542b2af2cSDavid Hildenbrand entry = pte_mkuffd_wp(entry); 2066*c33c7948SRyan Roberts VM_BUG_ON(!pte_none(ptep_get(pte))); 2067c9c1ee20SHugh Dickins set_pte_at(mm, addr, pte, entry); 2068c9c1ee20SHugh Dickins pte++; 2069eef1b3baSKirill A. Shutemov } 2070c9c1ee20SHugh Dickins pte_unmap(pte - 1); 2071eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2072eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2073eef1b3baSKirill A. Shutemov } 2074eef1b3baSKirill A. Shutemov 2075eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2076ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2077eef1b3baSKirill A. Shutemov { 2078eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2079eef1b3baSKirill A. Shutemov struct page *page; 2080eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2081423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2082292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 20830ccf7f16SPeter Xu bool anon_exclusive = false, dirty = false; 20842ac015e2SKirill A. Shutemov unsigned long addr; 2085c9c1ee20SHugh Dickins pte_t *pte; 2086eef1b3baSKirill A. Shutemov int i; 2087eef1b3baSKirill A. Shutemov 2088eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2089eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2090eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 209184c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 209284c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2093eef1b3baSKirill A. Shutemov 2094eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2095eef1b3baSKirill A. Shutemov 2096d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 209799fa8a48SHugh Dickins old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2098953c66c2SAneesh Kumar K.V /* 2099953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2100953c66c2SAneesh Kumar K.V * just go ahead and zap it 2101953c66c2SAneesh Kumar K.V */ 2102953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2103953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 21042484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2105d21b9e57SKirill A. Shutemov return; 210699fa8a48SHugh Dickins if (unlikely(is_pmd_migration_entry(old_pmd))) { 210799fa8a48SHugh Dickins swp_entry_t entry; 210899fa8a48SHugh Dickins 210999fa8a48SHugh Dickins entry = pmd_to_swp_entry(old_pmd); 2110af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 211199fa8a48SHugh Dickins } else { 211299fa8a48SHugh Dickins page = pmd_page(old_pmd); 211399fa8a48SHugh Dickins if (!PageDirty(page) && pmd_dirty(old_pmd)) 2114e1f1b157SHugh Dickins set_page_dirty(page); 211599fa8a48SHugh Dickins if (!PageReferenced(page) && pmd_young(old_pmd)) 2116d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2117cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 2118d21b9e57SKirill A. Shutemov put_page(page); 211999fa8a48SHugh Dickins } 2120fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2121eef1b3baSKirill A. Shutemov return; 212299fa8a48SHugh Dickins } 212399fa8a48SHugh Dickins 21243b77e8c8SHugh Dickins if (is_huge_zero_pmd(*pmd)) { 21254645b9feSJérôme Glisse /* 21264645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 21274645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 21284645b9feSJérôme Glisse * __split_huge_pmd() ? 21294645b9feSJérôme Glisse * 21304645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 21314645b9feSJérôme Glisse * small page also write protected so it does not seems useful 21324645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 21334645b9feSJérôme Glisse */ 2134eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2135eef1b3baSKirill A. Shutemov } 2136eef1b3baSKirill A. Shutemov 2137423ac9afSAneesh Kumar K.V /* 2138423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2139423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2140423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2141423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2142423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2143423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2144423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 214542742d9bSAlexander A. Klimov * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 214642742d9bSAlexander A. Klimov * 383 on page 105. Intel should be safe but is also warns that it's 2147423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2148423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2149423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2150423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2151423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2152423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2153423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2154423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2155423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2156423ac9afSAneesh Kumar K.V */ 2157423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2158423ac9afSAneesh Kumar K.V 2159423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 21602e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 216184c3fc4eSZi Yan swp_entry_t entry; 216284c3fc4eSZi Yan 2163423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 2164af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 21654dd845b5SAlistair Popple write = is_writable_migration_entry(entry); 21666c287605SDavid Hildenbrand if (PageAnon(page)) 21676c287605SDavid Hildenbrand anon_exclusive = is_readable_exclusive_migration_entry(entry); 21682e346877SPeter Xu young = is_migration_entry_young(entry); 21692e346877SPeter Xu dirty = is_migration_entry_dirty(entry); 21702e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2171f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 21722e83ee1dSPeter Xu } else { 2173423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 21740ccf7f16SPeter Xu if (pmd_dirty(old_pmd)) { 21750ccf7f16SPeter Xu dirty = true; 2176423ac9afSAneesh Kumar K.V SetPageDirty(page); 21770ccf7f16SPeter Xu } 2178423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2179423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2180423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2181292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 21826c287605SDavid Hildenbrand 21832e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 21846c287605SDavid Hildenbrand 21856c287605SDavid Hildenbrand /* 21866c287605SDavid Hildenbrand * Without "freeze", we'll simply split the PMD, propagating the 21876c287605SDavid Hildenbrand * PageAnonExclusive() flag for each PTE by setting it for 21886c287605SDavid Hildenbrand * each subpage -- no need to (temporarily) clear. 21896c287605SDavid Hildenbrand * 21906c287605SDavid Hildenbrand * With "freeze" we want to replace mapped pages by 21916c287605SDavid Hildenbrand * migration entries right away. This is only possible if we 21926c287605SDavid Hildenbrand * managed to clear PageAnonExclusive() -- see 21936c287605SDavid Hildenbrand * set_pmd_migration_entry(). 21946c287605SDavid Hildenbrand * 21956c287605SDavid Hildenbrand * In case we cannot clear PageAnonExclusive(), split the PMD 21966c287605SDavid Hildenbrand * only and let try_to_migrate_one() fail later. 2197088b8aa5SDavid Hildenbrand * 2198088b8aa5SDavid Hildenbrand * See page_try_share_anon_rmap(): invalidate PMD first. 21996c287605SDavid Hildenbrand */ 22006c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 22016c287605SDavid Hildenbrand if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) 22026c287605SDavid Hildenbrand freeze = false; 220396d82debSHugh Dickins if (!freeze) 220496d82debSHugh Dickins page_ref_add(page, HPAGE_PMD_NR - 1); 22059d84604bSHugh Dickins } 2206eef1b3baSKirill A. Shutemov 2207423ac9afSAneesh Kumar K.V /* 2208423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2209423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2210423ac9afSAneesh Kumar K.V */ 2211eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2212eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2213eef1b3baSKirill A. Shutemov 2214c9c1ee20SHugh Dickins pte = pte_offset_map(&_pmd, haddr); 2215c9c1ee20SHugh Dickins VM_BUG_ON(!pte); 22162ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2217c9c1ee20SHugh Dickins pte_t entry; 2218eef1b3baSKirill A. Shutemov /* 2219eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2220eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2221eef1b3baSKirill A. Shutemov * permissions across VMAs. 2222eef1b3baSKirill A. Shutemov */ 222384c3fc4eSZi Yan if (freeze || pmd_migration) { 2224ba988280SKirill A. Shutemov swp_entry_t swp_entry; 22254dd845b5SAlistair Popple if (write) 22264dd845b5SAlistair Popple swp_entry = make_writable_migration_entry( 22274dd845b5SAlistair Popple page_to_pfn(page + i)); 22286c287605SDavid Hildenbrand else if (anon_exclusive) 22296c287605SDavid Hildenbrand swp_entry = make_readable_exclusive_migration_entry( 22306c287605SDavid Hildenbrand page_to_pfn(page + i)); 22314dd845b5SAlistair Popple else 22324dd845b5SAlistair Popple swp_entry = make_readable_migration_entry( 22334dd845b5SAlistair Popple page_to_pfn(page + i)); 22342e346877SPeter Xu if (young) 22352e346877SPeter Xu swp_entry = make_migration_entry_young(swp_entry); 22362e346877SPeter Xu if (dirty) 22372e346877SPeter Xu swp_entry = make_migration_entry_dirty(swp_entry); 2238ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2239804dd150SAndrea Arcangeli if (soft_dirty) 2240804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2241f45ec5ffSPeter Xu if (uffd_wp) 2242f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2243ba988280SKirill A. Shutemov } else { 22446d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 22451462c52eSDavid Hildenbrand if (write) 2246f3ebdf04SDavid Hildenbrand entry = pte_mkwrite(entry); 22476c287605SDavid Hildenbrand if (anon_exclusive) 22486c287605SDavid Hildenbrand SetPageAnonExclusive(page + i); 2249eef1b3baSKirill A. Shutemov if (!young) 2250eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 2251e833bc50SPeter Xu /* NOTE: this may set soft-dirty too on some archs */ 2252e833bc50SPeter Xu if (dirty) 2253e833bc50SPeter Xu entry = pte_mkdirty(entry); 2254804dd150SAndrea Arcangeli if (soft_dirty) 2255804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2256292924b2SPeter Xu if (uffd_wp) 2257292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 225896d82debSHugh Dickins page_add_anon_rmap(page + i, vma, addr, false); 2259ba988280SKirill A. Shutemov } 2260*c33c7948SRyan Roberts VM_BUG_ON(!pte_none(ptep_get(pte))); 22612ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2262c9c1ee20SHugh Dickins pte++; 2263eef1b3baSKirill A. Shutemov } 2264c9c1ee20SHugh Dickins pte_unmap(pte - 1); 2265eef1b3baSKirill A. Shutemov 2266cb67f428SHugh Dickins if (!pmd_migration) 2267cb67f428SHugh Dickins page_remove_rmap(page, vma, true); 226896d82debSHugh Dickins if (freeze) 226996d82debSHugh Dickins put_page(page); 2270eef1b3baSKirill A. Shutemov 2271eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2272eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2273eef1b3baSKirill A. Shutemov } 2274eef1b3baSKirill A. Shutemov 2275eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2276af28a988SMatthew Wilcox (Oracle) unsigned long address, bool freeze, struct folio *folio) 2277eef1b3baSKirill A. Shutemov { 2278eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2279ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2280eef1b3baSKirill A. Shutemov 22817d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 22826f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2283ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2284ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2285ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 228633f4751eSNaoya Horiguchi 228733f4751eSNaoya Horiguchi /* 2288af28a988SMatthew Wilcox (Oracle) * If caller asks to setup a migration entry, we need a folio to check 2289af28a988SMatthew Wilcox (Oracle) * pmd against. Otherwise we can end up replacing wrong folio. 229033f4751eSNaoya Horiguchi */ 2291af28a988SMatthew Wilcox (Oracle) VM_BUG_ON(freeze && !folio); 229283a8441fSMatthew Wilcox (Oracle) VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 229333f4751eSNaoya Horiguchi 22947f760917SDavid Hildenbrand if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || 229583a8441fSMatthew Wilcox (Oracle) is_pmd_migration_entry(*pmd)) { 2296cea33328SMiaohe Lin /* 2297cea33328SMiaohe Lin * It's safe to call pmd_page when folio is set because it's 2298cea33328SMiaohe Lin * guaranteed that pmd is present. 2299cea33328SMiaohe Lin */ 230083a8441fSMatthew Wilcox (Oracle) if (folio && folio != page_folio(pmd_page(*pmd))) 230183a8441fSMatthew Wilcox (Oracle) goto out; 2302ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 230383a8441fSMatthew Wilcox (Oracle) } 23047f760917SDavid Hildenbrand 2305e90309c9SKirill A. Shutemov out: 2306eef1b3baSKirill A. Shutemov spin_unlock(ptl); 23074645b9feSJérôme Glisse /* 23084645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 23094645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 23104645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 23114645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 23124645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 23134645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 23144645b9feSJérôme Glisse * page in the meantime) 23154645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 23164645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 23174645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 23184645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 23194645b9feSJérôme Glisse */ 2320ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2321eef1b3baSKirill A. Shutemov } 2322eef1b3baSKirill A. Shutemov 2323fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2324af28a988SMatthew Wilcox (Oracle) bool freeze, struct folio *folio) 232594fcc585SAndrea Arcangeli { 232650722804SZach O'Keefe pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 232794fcc585SAndrea Arcangeli 232850722804SZach O'Keefe if (!pmd) 2329f72e7dcdSHugh Dickins return; 2330f72e7dcdSHugh Dickins 2331af28a988SMatthew Wilcox (Oracle) __split_huge_pmd(vma, pmd, address, freeze, folio); 233294fcc585SAndrea Arcangeli } 233394fcc585SAndrea Arcangeli 233471f9e58eSMiaohe Lin static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 233571f9e58eSMiaohe Lin { 233671f9e58eSMiaohe Lin /* 233771f9e58eSMiaohe Lin * If the new address isn't hpage aligned and it could previously 233871f9e58eSMiaohe Lin * contain an hugepage: check if we need to split an huge pmd. 233971f9e58eSMiaohe Lin */ 234071f9e58eSMiaohe Lin if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 234171f9e58eSMiaohe Lin range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 234271f9e58eSMiaohe Lin ALIGN(address, HPAGE_PMD_SIZE))) 234371f9e58eSMiaohe Lin split_huge_pmd_address(vma, address, false, NULL); 234471f9e58eSMiaohe Lin } 234571f9e58eSMiaohe Lin 2346e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 234794fcc585SAndrea Arcangeli unsigned long start, 234894fcc585SAndrea Arcangeli unsigned long end, 234994fcc585SAndrea Arcangeli long adjust_next) 235094fcc585SAndrea Arcangeli { 235171f9e58eSMiaohe Lin /* Check if we need to split start first. */ 235271f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, start); 235371f9e58eSMiaohe Lin 235471f9e58eSMiaohe Lin /* Check if we need to split end next. */ 235571f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, end); 235694fcc585SAndrea Arcangeli 235794fcc585SAndrea Arcangeli /* 235868540502SMatthew Wilcox (Oracle) * If we're also updating the next vma vm_start, 235971f9e58eSMiaohe Lin * check if we need to split it. 236094fcc585SAndrea Arcangeli */ 236194fcc585SAndrea Arcangeli if (adjust_next > 0) { 236268540502SMatthew Wilcox (Oracle) struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); 236394fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 2364f9d86a60SWei Yang nstart += adjust_next; 236571f9e58eSMiaohe Lin split_huge_pmd_if_needed(next, nstart); 236694fcc585SAndrea Arcangeli } 236794fcc585SAndrea Arcangeli } 2368e9b61f19SKirill A. Shutemov 2369684555aaSMatthew Wilcox (Oracle) static void unmap_folio(struct folio *folio) 2370e9b61f19SKirill A. Shutemov { 2371a98a2f0cSAlistair Popple enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2372a98a2f0cSAlistair Popple TTU_SYNC; 2373e9b61f19SKirill A. Shutemov 2374684555aaSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 2375e9b61f19SKirill A. Shutemov 2376a98a2f0cSAlistair Popple /* 2377a98a2f0cSAlistair Popple * Anon pages need migration entries to preserve them, but file 2378a98a2f0cSAlistair Popple * pages can simply be left unmapped, then faulted back on demand. 2379a98a2f0cSAlistair Popple * If that is ever changed (perhaps for mlock), update remap_page(). 2380a98a2f0cSAlistair Popple */ 23814b8554c5SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 23824b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, ttu_flags); 2383a98a2f0cSAlistair Popple else 2384869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 2385bd56086fSKirill A. Shutemov } 2386bd56086fSKirill A. Shutemov 23874eecb8b9SMatthew Wilcox (Oracle) static void remap_page(struct folio *folio, unsigned long nr) 2388e9b61f19SKirill A. Shutemov { 23894eecb8b9SMatthew Wilcox (Oracle) int i = 0; 2390ab02c252SHugh Dickins 2391684555aaSMatthew Wilcox (Oracle) /* If unmap_folio() uses try_to_migrate() on file, remove this check */ 23924eecb8b9SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2393ab02c252SHugh Dickins return; 23944eecb8b9SMatthew Wilcox (Oracle) for (;;) { 23954eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, true); 23964eecb8b9SMatthew Wilcox (Oracle) i += folio_nr_pages(folio); 23974eecb8b9SMatthew Wilcox (Oracle) if (i >= nr) 23984eecb8b9SMatthew Wilcox (Oracle) break; 23994eecb8b9SMatthew Wilcox (Oracle) folio = folio_next(folio); 2400e9b61f19SKirill A. Shutemov } 2401ace71a19SKirill A. Shutemov } 2402e9b61f19SKirill A. Shutemov 240394866635SAlex Shi static void lru_add_page_tail(struct page *head, struct page *tail, 240488dcb9a3SAlex Shi struct lruvec *lruvec, struct list_head *list) 240588dcb9a3SAlex Shi { 240694866635SAlex Shi VM_BUG_ON_PAGE(!PageHead(head), head); 240794866635SAlex Shi VM_BUG_ON_PAGE(PageCompound(tail), head); 240894866635SAlex Shi VM_BUG_ON_PAGE(PageLRU(tail), head); 24096168d0daSAlex Shi lockdep_assert_held(&lruvec->lru_lock); 241088dcb9a3SAlex Shi 24116dbb5741SAlex Shi if (list) { 241288dcb9a3SAlex Shi /* page reclaim is reclaiming a huge page */ 24136dbb5741SAlex Shi VM_WARN_ON(PageLRU(head)); 241494866635SAlex Shi get_page(tail); 241594866635SAlex Shi list_add_tail(&tail->lru, list); 241688dcb9a3SAlex Shi } else { 24176dbb5741SAlex Shi /* head is still on lru (and we have it frozen) */ 24186dbb5741SAlex Shi VM_WARN_ON(!PageLRU(head)); 241907ca7606SHugh Dickins if (PageUnevictable(tail)) 242007ca7606SHugh Dickins tail->mlock_count = 0; 242107ca7606SHugh Dickins else 24226dbb5741SAlex Shi list_add_tail(&tail->lru, &head->lru); 242307ca7606SHugh Dickins SetPageLRU(tail); 242488dcb9a3SAlex Shi } 242588dcb9a3SAlex Shi } 242688dcb9a3SAlex Shi 24278df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2428e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2429e9b61f19SKirill A. Shutemov { 2430e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2431e9b61f19SKirill A. Shutemov 24328df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2433e9b61f19SKirill A. Shutemov 2434e9b61f19SKirill A. Shutemov /* 2435605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2436605ca5edSKonstantin Khlebnikov * 2437605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 24388958b249SHaitao Shi * for example lock_page() which set PG_waiters. 24396c287605SDavid Hildenbrand * 24406c287605SDavid Hildenbrand * Note that for mapped sub-pages of an anonymous THP, 2441684555aaSMatthew Wilcox (Oracle) * PG_anon_exclusive has been cleared in unmap_folio() and is stored in 24426c287605SDavid Hildenbrand * the migration entry instead from where remap_page() will restore it. 24436c287605SDavid Hildenbrand * We can still have PG_anon_exclusive set on effectively unmapped and 24446c287605SDavid Hildenbrand * unreferenced sub-pages of an anonymous THP: we can simply drop 24456c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 2446e9b61f19SKirill A. Shutemov */ 2447e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2448e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2449e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2450e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 245138d8b4e6SHuang Ying (1L << PG_swapcache) | 2452e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2453e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2454e9b61f19SKirill A. Shutemov (1L << PG_active) | 24551899ad18SJohannes Weiner (1L << PG_workingset) | 2456e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2457b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 2458b0284cd2SCatalin Marinas #ifdef CONFIG_ARCH_USES_PG_ARCH_X 245972e6afa0SCatalin Marinas (1L << PG_arch_2) | 2460ef6458b1SPeter Collingbourne (1L << PG_arch_3) | 246172e6afa0SCatalin Marinas #endif 2462ec1c86b2SYu Zhao (1L << PG_dirty) | 2463ec1c86b2SYu Zhao LRU_GEN_MASK | LRU_REFS_MASK)); 2464e9b61f19SKirill A. Shutemov 2465cb67f428SHugh Dickins /* ->mapping in first and second tail page is replaced by other uses */ 2466173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2467173d9d9fSHugh Dickins page_tail); 2468173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2469173d9d9fSHugh Dickins page_tail->index = head->index + tail; 247071e2d666SMel Gorman 247171e2d666SMel Gorman /* 247271e2d666SMel Gorman * page->private should not be set in tail pages with the exception 247371e2d666SMel Gorman * of swap cache pages that store the swp_entry_t in tail pages. 247471e2d666SMel Gorman * Fix up and warn once if private is unexpectedly set. 2475cb67f428SHugh Dickins * 247694688e8eSMatthew Wilcox (Oracle) * What of 32-bit systems, on which folio->_pincount overlays 2477cb67f428SHugh Dickins * head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and 247894688e8eSMatthew Wilcox (Oracle) * pincount must be 0 for folio_ref_freeze() to have succeeded. 247971e2d666SMel Gorman */ 248071e2d666SMel Gorman if (!folio_test_swapcache(page_folio(head))) { 24815aae9265SHugh Dickins VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail); 2482b653db77SMatthew Wilcox (Oracle) page_tail->private = 0; 248371e2d666SMel Gorman } 2484173d9d9fSHugh Dickins 2485605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2486e9b61f19SKirill A. Shutemov smp_wmb(); 2487e9b61f19SKirill A. Shutemov 2488605ca5edSKonstantin Khlebnikov /* 2489605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2490605ca5edSKonstantin Khlebnikov * 2491605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2492605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2493605ca5edSKonstantin Khlebnikov */ 2494e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2495e9b61f19SKirill A. Shutemov 2496605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2497605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2498605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2499605ca5edSKonstantin Khlebnikov 2500e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2501e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2502e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2503e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2504e9b61f19SKirill A. Shutemov 2505e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 250694723aafSMichal Hocko 250794723aafSMichal Hocko /* 250894723aafSMichal Hocko * always add to the tail because some iterators expect new 250994723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 251094723aafSMichal Hocko * migrate_pages 251194723aafSMichal Hocko */ 2512e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2513e9b61f19SKirill A. Shutemov } 2514e9b61f19SKirill A. Shutemov 2515baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2516b6769834SAlex Shi pgoff_t end) 2517e9b61f19SKirill A. Shutemov { 2518e809c3feSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2519e809c3feSMatthew Wilcox (Oracle) struct page *head = &folio->page; 2520e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 25214101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 25224101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 25238cce5475SKirill A. Shutemov unsigned int nr = thp_nr_pages(head); 25248df651c7SKirill A. Shutemov int i; 2525e9b61f19SKirill A. Shutemov 2526e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2527be6c8982SZhou Guanghui split_page_memcg(head, nr); 2528e9b61f19SKirill A. Shutemov 25294101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 25304101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 25314101196bSMatthew Wilcox (Oracle) 25324101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 25334101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 25344101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 25354101196bSMatthew Wilcox (Oracle) } 25364101196bSMatthew Wilcox (Oracle) 2537f0953a1bSIngo Molnar /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 2538e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock(folio); 2539b6769834SAlex Shi 2540eac96c3eSYang Shi ClearPageHasHWPoisoned(head); 2541eac96c3eSYang Shi 25428cce5475SKirill A. Shutemov for (i = nr - 1; i >= 1; i--) { 25438df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2544d144bf62SHugh Dickins /* Some pages can be beyond EOF: drop them from page cache */ 2545baa355fdSKirill A. Shutemov if (head[i].index >= end) { 2546fb5c2029SMatthew Wilcox (Oracle) struct folio *tail = page_folio(head + i); 2547fb5c2029SMatthew Wilcox (Oracle) 2548d144bf62SHugh Dickins if (shmem_mapping(head->mapping)) 2549800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2550fb5c2029SMatthew Wilcox (Oracle) else if (folio_test_clear_dirty(tail)) 2551fb5c2029SMatthew Wilcox (Oracle) folio_account_cleaned(tail, 2552fb5c2029SMatthew Wilcox (Oracle) inode_to_wb(folio->mapping->host)); 2553fb5c2029SMatthew Wilcox (Oracle) __filemap_remove_folio(tail, NULL); 2554fb5c2029SMatthew Wilcox (Oracle) folio_put(tail); 25554101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 25564101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 25574101196bSMatthew Wilcox (Oracle) head + i, 0); 25584101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 25594101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 25604101196bSMatthew Wilcox (Oracle) head + i, 0); 2561baa355fdSKirill A. Shutemov } 2562baa355fdSKirill A. Shutemov } 2563e9b61f19SKirill A. Shutemov 2564e9b61f19SKirill A. Shutemov ClearPageCompound(head); 25656168d0daSAlex Shi unlock_page_lruvec(lruvec); 2566b6769834SAlex Shi /* Caller disabled irqs, so they are still disabled here */ 2567f7da677bSVlastimil Babka 25688cce5475SKirill A. Shutemov split_page_owner(head, nr); 2569f7da677bSVlastimil Babka 2570baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2571baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2572aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 25734101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 257438d8b4e6SHuang Ying page_ref_add(head, 2); 25754101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 25764101196bSMatthew Wilcox (Oracle) } else { 2577baa355fdSKirill A. Shutemov page_ref_inc(head); 25784101196bSMatthew Wilcox (Oracle) } 2579baa355fdSKirill A. Shutemov } else { 2580aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2581baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2582b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2583baa355fdSKirill A. Shutemov } 2584b6769834SAlex Shi local_irq_enable(); 2585e9b61f19SKirill A. Shutemov 25864eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, nr); 2587e9b61f19SKirill A. Shutemov 2588c4f9c701SHuang Ying if (PageSwapCache(head)) { 2589c4f9c701SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 2590c4f9c701SHuang Ying 2591c4f9c701SHuang Ying split_swap_cluster(entry); 2592c4f9c701SHuang Ying } 2593c4f9c701SHuang Ying 25948cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) { 2595e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2596e9b61f19SKirill A. Shutemov if (subpage == page) 2597e9b61f19SKirill A. Shutemov continue; 2598e9b61f19SKirill A. Shutemov unlock_page(subpage); 2599e9b61f19SKirill A. Shutemov 2600e9b61f19SKirill A. Shutemov /* 2601e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2602e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2603e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2604e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2605e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2606e9b61f19SKirill A. Shutemov */ 26070b175468SMiaohe Lin free_page_and_swap_cache(subpage); 2608e9b61f19SKirill A. Shutemov } 2609e9b61f19SKirill A. Shutemov } 2610e9b61f19SKirill A. Shutemov 2611b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2612d4b4084aSMatthew Wilcox (Oracle) bool can_split_folio(struct folio *folio, int *pextra_pins) 2613b8f593cdSHuang Ying { 2614b8f593cdSHuang Ying int extra_pins; 2615b8f593cdSHuang Ying 2616aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2617d4b4084aSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 2618d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_test_swapcache(folio) ? 2619d4b4084aSMatthew Wilcox (Oracle) folio_nr_pages(folio) : 0; 2620b8f593cdSHuang Ying else 2621d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_nr_pages(folio); 2622b8f593cdSHuang Ying if (pextra_pins) 2623b8f593cdSHuang Ying *pextra_pins = extra_pins; 2624d4b4084aSMatthew Wilcox (Oracle) return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; 2625b8f593cdSHuang Ying } 2626b8f593cdSHuang Ying 26276d0a07edSAndrea Arcangeli /* 2628e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2629e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2630e9b61f19SKirill A. Shutemov * 2631e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2632e9b61f19SKirill A. Shutemov * The huge page must be locked. 2633e9b61f19SKirill A. Shutemov * 2634e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2635e9b61f19SKirill A. Shutemov * 2636e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2637e9b61f19SKirill A. Shutemov * the hugepage. 2638e9b61f19SKirill A. Shutemov * 2639e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2640e9b61f19SKirill A. Shutemov * they are not mapped. 2641e9b61f19SKirill A. Shutemov * 2642e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2643e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2644e9b61f19SKirill A. Shutemov * us. 2645e9b61f19SKirill A. Shutemov */ 2646e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2647e9b61f19SKirill A. Shutemov { 26484eecb8b9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2649f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 26503e9a13daSMatthew Wilcox (Oracle) XA_STATE(xas, &folio->mapping->i_pages, folio->index); 2651baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2652baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2653504e070dSYang Shi int extra_pins, ret; 2654006d3ff2SHugh Dickins pgoff_t end; 2655478d134eSXu Yu bool is_hzp; 2656e9b61f19SKirill A. Shutemov 26573e9a13daSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 26583e9a13daSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 2659e9b61f19SKirill A. Shutemov 26603e9a13daSMatthew Wilcox (Oracle) is_hzp = is_huge_zero_page(&folio->page); 26614737edbbSNaoya Horiguchi if (is_hzp) { 26624737edbbSNaoya Horiguchi pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); 2663478d134eSXu Yu return -EBUSY; 26644737edbbSNaoya Horiguchi } 2665478d134eSXu Yu 26663e9a13daSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 266759807685SHuang Ying return -EBUSY; 266859807685SHuang Ying 26693e9a13daSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) { 2670e9b61f19SKirill A. Shutemov /* 2671c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2672baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2673baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 26742f031c6fSMatthew Wilcox (Oracle) * is similar to folio_lock_anon_vma_read except the write lock 2675baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2676baa355fdSKirill A. Shutemov * operations. 2677e9b61f19SKirill A. Shutemov */ 267829eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(folio); 2679e9b61f19SKirill A. Shutemov if (!anon_vma) { 2680e9b61f19SKirill A. Shutemov ret = -EBUSY; 2681e9b61f19SKirill A. Shutemov goto out; 2682e9b61f19SKirill A. Shutemov } 2683006d3ff2SHugh Dickins end = -1; 2684baa355fdSKirill A. Shutemov mapping = NULL; 2685e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2686baa355fdSKirill A. Shutemov } else { 26876a3edd29SYin Fengwei gfp_t gfp; 26886a3edd29SYin Fengwei 26893e9a13daSMatthew Wilcox (Oracle) mapping = folio->mapping; 2690baa355fdSKirill A. Shutemov 2691baa355fdSKirill A. Shutemov /* Truncated ? */ 2692baa355fdSKirill A. Shutemov if (!mapping) { 2693baa355fdSKirill A. Shutemov ret = -EBUSY; 2694baa355fdSKirill A. Shutemov goto out; 2695baa355fdSKirill A. Shutemov } 2696baa355fdSKirill A. Shutemov 26976a3edd29SYin Fengwei gfp = current_gfp_context(mapping_gfp_mask(mapping) & 26986a3edd29SYin Fengwei GFP_RECLAIM_MASK); 26996a3edd29SYin Fengwei 27006a3edd29SYin Fengwei if (folio_test_private(folio) && 27016a3edd29SYin Fengwei !filemap_release_folio(folio, gfp)) { 27026a3edd29SYin Fengwei ret = -EBUSY; 27036a3edd29SYin Fengwei goto out; 27046a3edd29SYin Fengwei } 27056a3edd29SYin Fengwei 27063e9a13daSMatthew Wilcox (Oracle) xas_split_alloc(&xas, folio, folio_order(folio), gfp); 27076b24ca4aSMatthew Wilcox (Oracle) if (xas_error(&xas)) { 27086b24ca4aSMatthew Wilcox (Oracle) ret = xas_error(&xas); 27096b24ca4aSMatthew Wilcox (Oracle) goto out; 27106b24ca4aSMatthew Wilcox (Oracle) } 27116b24ca4aSMatthew Wilcox (Oracle) 2712baa355fdSKirill A. Shutemov anon_vma = NULL; 2713baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2714006d3ff2SHugh Dickins 2715006d3ff2SHugh Dickins /* 2716006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2717006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2718006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2719006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 27203e9a13daSMatthew Wilcox (Oracle) * folio lock is good enough to serialize the trimming. 2721006d3ff2SHugh Dickins */ 2722006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2723d144bf62SHugh Dickins if (shmem_mapping(mapping)) 2724d144bf62SHugh Dickins end = shmem_fallocend(mapping->host, end); 2725baa355fdSKirill A. Shutemov } 2726e9b61f19SKirill A. Shutemov 2727e9b61f19SKirill A. Shutemov /* 2728684555aaSMatthew Wilcox (Oracle) * Racy check if we can split the page, before unmap_folio() will 2729e9b61f19SKirill A. Shutemov * split PMDs 2730e9b61f19SKirill A. Shutemov */ 2731d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(folio, &extra_pins)) { 2732fd4a7ac3SBaolin Wang ret = -EAGAIN; 2733e9b61f19SKirill A. Shutemov goto out_unlock; 2734e9b61f19SKirill A. Shutemov } 2735e9b61f19SKirill A. Shutemov 2736684555aaSMatthew Wilcox (Oracle) unmap_folio(folio); 2737e9b61f19SKirill A. Shutemov 2738b6769834SAlex Shi /* block interrupt reentry in xa_lock and spinlock */ 2739b6769834SAlex Shi local_irq_disable(); 2740baa355fdSKirill A. Shutemov if (mapping) { 2741baa355fdSKirill A. Shutemov /* 27423e9a13daSMatthew Wilcox (Oracle) * Check if the folio is present in page cache. 27433e9a13daSMatthew Wilcox (Oracle) * We assume all tail are present too, if folio is there. 2744baa355fdSKirill A. Shutemov */ 27456b24ca4aSMatthew Wilcox (Oracle) xas_lock(&xas); 27466b24ca4aSMatthew Wilcox (Oracle) xas_reset(&xas); 27473e9a13daSMatthew Wilcox (Oracle) if (xas_load(&xas) != folio) 2748baa355fdSKirill A. Shutemov goto fail; 2749baa355fdSKirill A. Shutemov } 2750baa355fdSKirill A. Shutemov 27510139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2752364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 27533e9a13daSMatthew Wilcox (Oracle) if (folio_ref_freeze(folio, 1 + extra_pins)) { 27544375a553SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) { 2755364c1eebSYang Shi ds_queue->split_queue_len--; 27564375a553SMatthew Wilcox (Oracle) list_del(&folio->_deferred_list); 27579a982250SKirill A. Shutemov } 2758afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 275906d3eff6SKirill A. Shutemov if (mapping) { 27603e9a13daSMatthew Wilcox (Oracle) int nr = folio_nr_pages(folio); 2761bf9eceadSMuchun Song 27623e9a13daSMatthew Wilcox (Oracle) xas_split(&xas, folio, folio_order(folio)); 27633e9a13daSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) { 27643e9a13daSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, 276557b2847dSMuchun Song -nr); 27661ca7554dSMarek Szyprowski } else { 27673e9a13daSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_THPS, 2768bf9eceadSMuchun Song -nr); 27691ca7554dSMarek Szyprowski filemap_nr_thps_dec(mapping); 27701ca7554dSMarek Szyprowski } 277106d3eff6SKirill A. Shutemov } 277206d3eff6SKirill A. Shutemov 2773b6769834SAlex Shi __split_huge_page(page, list, end); 2774e9b61f19SKirill A. Shutemov ret = 0; 2775baa355fdSKirill A. Shutemov } else { 2776364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2777504e070dSYang Shi fail: 2778504e070dSYang Shi if (mapping) 27796b24ca4aSMatthew Wilcox (Oracle) xas_unlock(&xas); 2780b6769834SAlex Shi local_irq_enable(); 27814eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, folio_nr_pages(folio)); 2782fd4a7ac3SBaolin Wang ret = -EAGAIN; 2783e9b61f19SKirill A. Shutemov } 2784e9b61f19SKirill A. Shutemov 2785e9b61f19SKirill A. Shutemov out_unlock: 2786baa355fdSKirill A. Shutemov if (anon_vma) { 2787e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2788e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2789baa355fdSKirill A. Shutemov } 2790baa355fdSKirill A. Shutemov if (mapping) 2791baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2792e9b61f19SKirill A. Shutemov out: 279369a37a8bSMatthew Wilcox (Oracle) xas_destroy(&xas); 2794e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2795e9b61f19SKirill A. Shutemov return ret; 2796e9b61f19SKirill A. Shutemov } 27979a982250SKirill A. Shutemov 27989a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27999a982250SKirill A. Shutemov { 28008991de90SMatthew Wilcox (Oracle) struct folio *folio = (struct folio *)page; 2801f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 28029a982250SKirill A. Shutemov unsigned long flags; 28039a982250SKirill A. Shutemov 2804deedad80SYin Fengwei /* 2805deedad80SYin Fengwei * At this point, there is no one trying to add the folio to 2806deedad80SYin Fengwei * deferred_list. If folio is not in deferred_list, it's safe 2807deedad80SYin Fengwei * to check without acquiring the split_queue_lock. 2808deedad80SYin Fengwei */ 2809deedad80SYin Fengwei if (data_race(!list_empty(&folio->_deferred_list))) { 2810364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28118991de90SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) { 2812364c1eebSYang Shi ds_queue->split_queue_len--; 28138991de90SMatthew Wilcox (Oracle) list_del(&folio->_deferred_list); 28149a982250SKirill A. Shutemov } 2815364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2816deedad80SYin Fengwei } 28179a982250SKirill A. Shutemov free_compound_page(page); 28189a982250SKirill A. Shutemov } 28199a982250SKirill A. Shutemov 2820f158ed61SMatthew Wilcox (Oracle) void deferred_split_folio(struct folio *folio) 28219a982250SKirill A. Shutemov { 2822f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 282387eaceb3SYang Shi #ifdef CONFIG_MEMCG 28248991de90SMatthew Wilcox (Oracle) struct mem_cgroup *memcg = folio_memcg(folio); 282587eaceb3SYang Shi #endif 28269a982250SKirill A. Shutemov unsigned long flags; 28279a982250SKirill A. Shutemov 28288991de90SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); 28299a982250SKirill A. Shutemov 283087eaceb3SYang Shi /* 283187eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 283287eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 28338991de90SMatthew Wilcox (Oracle) * And, if page reclaim is already handling the same folio, it is 283487eaceb3SYang Shi * unnecessary to handle it again in shrinker. 283587eaceb3SYang Shi * 28368991de90SMatthew Wilcox (Oracle) * Check the swapcache flag to determine if the folio is being 28378991de90SMatthew Wilcox (Oracle) * handled by page reclaim since THP swap would add the folio into 283887eaceb3SYang Shi * swap cache before calling try_to_unmap(). 283987eaceb3SYang Shi */ 28408991de90SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 284187eaceb3SYang Shi return; 284287eaceb3SYang Shi 28438991de90SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) 28449a982250SKirill A. Shutemov return; 28459a982250SKirill A. Shutemov 2846364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28478991de90SMatthew Wilcox (Oracle) if (list_empty(&folio->_deferred_list)) { 2848f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 28498991de90SMatthew Wilcox (Oracle) list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); 2850364c1eebSYang Shi ds_queue->split_queue_len++; 285187eaceb3SYang Shi #ifdef CONFIG_MEMCG 285287eaceb3SYang Shi if (memcg) 28538991de90SMatthew Wilcox (Oracle) set_shrinker_bit(memcg, folio_nid(folio), 285487eaceb3SYang Shi deferred_split_shrinker.id); 285587eaceb3SYang Shi #endif 28569a982250SKirill A. Shutemov } 2857364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28589a982250SKirill A. Shutemov } 28599a982250SKirill A. Shutemov 28609a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 28619a982250SKirill A. Shutemov struct shrink_control *sc) 28629a982250SKirill A. Shutemov { 2863a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2864364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 286587eaceb3SYang Shi 286687eaceb3SYang Shi #ifdef CONFIG_MEMCG 286787eaceb3SYang Shi if (sc->memcg) 286887eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 286987eaceb3SYang Shi #endif 2870364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28719a982250SKirill A. Shutemov } 28729a982250SKirill A. Shutemov 28739a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28749a982250SKirill A. Shutemov struct shrink_control *sc) 28759a982250SKirill A. Shutemov { 2876a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2877364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28789a982250SKirill A. Shutemov unsigned long flags; 28794375a553SMatthew Wilcox (Oracle) LIST_HEAD(list); 28804375a553SMatthew Wilcox (Oracle) struct folio *folio, *next; 28819a982250SKirill A. Shutemov int split = 0; 28829a982250SKirill A. Shutemov 288387eaceb3SYang Shi #ifdef CONFIG_MEMCG 288487eaceb3SYang Shi if (sc->memcg) 288587eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 288687eaceb3SYang Shi #endif 288787eaceb3SYang Shi 2888364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28899a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 28904375a553SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, &ds_queue->split_queue, 28914375a553SMatthew Wilcox (Oracle) _deferred_list) { 28924375a553SMatthew Wilcox (Oracle) if (folio_try_get(folio)) { 28934375a553SMatthew Wilcox (Oracle) list_move(&folio->_deferred_list, &list); 2894e3ae1953SKirill A. Shutemov } else { 28954375a553SMatthew Wilcox (Oracle) /* We lost race with folio_put() */ 28964375a553SMatthew Wilcox (Oracle) list_del_init(&folio->_deferred_list); 2897364c1eebSYang Shi ds_queue->split_queue_len--; 28989a982250SKirill A. Shutemov } 2899e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2900e3ae1953SKirill A. Shutemov break; 29019a982250SKirill A. Shutemov } 2902364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 29039a982250SKirill A. Shutemov 29044375a553SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, &list, _deferred_list) { 29054375a553SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) 2906fa41b900SKirill A. Shutemov goto next; 29079a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 29084375a553SMatthew Wilcox (Oracle) if (!split_folio(folio)) 29099a982250SKirill A. Shutemov split++; 29104375a553SMatthew Wilcox (Oracle) folio_unlock(folio); 2911fa41b900SKirill A. Shutemov next: 29124375a553SMatthew Wilcox (Oracle) folio_put(folio); 29139a982250SKirill A. Shutemov } 29149a982250SKirill A. Shutemov 2915364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2916364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2917364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 29189a982250SKirill A. Shutemov 2919cb8d68ecSKirill A. Shutemov /* 2920cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2921cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2922cb8d68ecSKirill A. Shutemov */ 2923364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2924cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2925cb8d68ecSKirill A. Shutemov return split; 29269a982250SKirill A. Shutemov } 29279a982250SKirill A. Shutemov 29289a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 29299a982250SKirill A. Shutemov .count_objects = deferred_split_count, 29309a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 29319a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 293287eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 293387eaceb3SYang Shi SHRINKER_NONSLAB, 29349a982250SKirill A. Shutemov }; 293549071d43SKirill A. Shutemov 293649071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 2937fa6c0231SZi Yan static void split_huge_pages_all(void) 293849071d43SKirill A. Shutemov { 293949071d43SKirill A. Shutemov struct zone *zone; 294049071d43SKirill A. Shutemov struct page *page; 2941630e7c5eSKefeng Wang struct folio *folio; 294249071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 294349071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 294449071d43SKirill A. Shutemov 2945fa6c0231SZi Yan pr_debug("Split all THPs\n"); 2946a17206daSMiaohe Lin for_each_zone(zone) { 2947a17206daSMiaohe Lin if (!managed_zone(zone)) 2948a17206daSMiaohe Lin continue; 294949071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 295049071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2951a17206daSMiaohe Lin int nr_pages; 295249071d43SKirill A. Shutemov 29532b7aa91bSNaoya Horiguchi page = pfn_to_online_page(pfn); 2954630e7c5eSKefeng Wang if (!page || PageTail(page)) 2955630e7c5eSKefeng Wang continue; 2956630e7c5eSKefeng Wang folio = page_folio(page); 2957630e7c5eSKefeng Wang if (!folio_try_get(folio)) 295849071d43SKirill A. Shutemov continue; 295949071d43SKirill A. Shutemov 2960630e7c5eSKefeng Wang if (unlikely(page_folio(page) != folio)) 296149071d43SKirill A. Shutemov goto next; 296249071d43SKirill A. Shutemov 2963630e7c5eSKefeng Wang if (zone != folio_zone(folio)) 2964630e7c5eSKefeng Wang goto next; 2965630e7c5eSKefeng Wang 2966630e7c5eSKefeng Wang if (!folio_test_large(folio) 2967630e7c5eSKefeng Wang || folio_test_hugetlb(folio) 2968630e7c5eSKefeng Wang || !folio_test_lru(folio)) 296949071d43SKirill A. Shutemov goto next; 297049071d43SKirill A. Shutemov 297149071d43SKirill A. Shutemov total++; 2972630e7c5eSKefeng Wang folio_lock(folio); 2973630e7c5eSKefeng Wang nr_pages = folio_nr_pages(folio); 2974630e7c5eSKefeng Wang if (!split_folio(folio)) 297549071d43SKirill A. Shutemov split++; 2976a17206daSMiaohe Lin pfn += nr_pages - 1; 2977630e7c5eSKefeng Wang folio_unlock(folio); 297849071d43SKirill A. Shutemov next: 2979630e7c5eSKefeng Wang folio_put(folio); 2980fa6c0231SZi Yan cond_resched(); 298149071d43SKirill A. Shutemov } 298249071d43SKirill A. Shutemov } 298349071d43SKirill A. Shutemov 2984fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 298549071d43SKirill A. Shutemov } 2986fa6c0231SZi Yan 2987fa6c0231SZi Yan static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 2988fa6c0231SZi Yan { 2989fa6c0231SZi Yan return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 2990fa6c0231SZi Yan is_vm_hugetlb_page(vma); 2991fa6c0231SZi Yan } 2992fa6c0231SZi Yan 2993fa6c0231SZi Yan static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 2994fa6c0231SZi Yan unsigned long vaddr_end) 2995fa6c0231SZi Yan { 2996fa6c0231SZi Yan int ret = 0; 2997fa6c0231SZi Yan struct task_struct *task; 2998fa6c0231SZi Yan struct mm_struct *mm; 2999fa6c0231SZi Yan unsigned long total = 0, split = 0; 3000fa6c0231SZi Yan unsigned long addr; 3001fa6c0231SZi Yan 3002fa6c0231SZi Yan vaddr_start &= PAGE_MASK; 3003fa6c0231SZi Yan vaddr_end &= PAGE_MASK; 3004fa6c0231SZi Yan 3005fa6c0231SZi Yan /* Find the task_struct from pid */ 3006fa6c0231SZi Yan rcu_read_lock(); 3007fa6c0231SZi Yan task = find_task_by_vpid(pid); 3008fa6c0231SZi Yan if (!task) { 3009fa6c0231SZi Yan rcu_read_unlock(); 3010fa6c0231SZi Yan ret = -ESRCH; 3011fa6c0231SZi Yan goto out; 3012fa6c0231SZi Yan } 3013fa6c0231SZi Yan get_task_struct(task); 3014fa6c0231SZi Yan rcu_read_unlock(); 3015fa6c0231SZi Yan 3016fa6c0231SZi Yan /* Find the mm_struct */ 3017fa6c0231SZi Yan mm = get_task_mm(task); 3018fa6c0231SZi Yan put_task_struct(task); 3019fa6c0231SZi Yan 3020fa6c0231SZi Yan if (!mm) { 3021fa6c0231SZi Yan ret = -EINVAL; 3022fa6c0231SZi Yan goto out; 3023fa6c0231SZi Yan } 3024fa6c0231SZi Yan 3025fa6c0231SZi Yan pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 3026fa6c0231SZi Yan pid, vaddr_start, vaddr_end); 3027fa6c0231SZi Yan 3028fa6c0231SZi Yan mmap_read_lock(mm); 3029fa6c0231SZi Yan /* 3030fa6c0231SZi Yan * always increase addr by PAGE_SIZE, since we could have a PTE page 3031fa6c0231SZi Yan * table filled with PTE-mapped THPs, each of which is distinct. 3032fa6c0231SZi Yan */ 3033fa6c0231SZi Yan for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 303474ba2b38SMiaohe Lin struct vm_area_struct *vma = vma_lookup(mm, addr); 3035fa6c0231SZi Yan struct page *page; 3036fa6c0231SZi Yan 303774ba2b38SMiaohe Lin if (!vma) 3038fa6c0231SZi Yan break; 3039fa6c0231SZi Yan 3040fa6c0231SZi Yan /* skip special VMA and hugetlb VMA */ 3041fa6c0231SZi Yan if (vma_not_suitable_for_thp_split(vma)) { 3042fa6c0231SZi Yan addr = vma->vm_end; 3043fa6c0231SZi Yan continue; 3044fa6c0231SZi Yan } 3045fa6c0231SZi Yan 3046fa6c0231SZi Yan /* FOLL_DUMP to ignore special (like zero) pages */ 304787d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 3048fa6c0231SZi Yan 3049f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(page)) 3050fa6c0231SZi Yan continue; 3051fa6c0231SZi Yan 3052fa6c0231SZi Yan if (!is_transparent_hugepage(page)) 3053fa6c0231SZi Yan goto next; 3054fa6c0231SZi Yan 3055fa6c0231SZi Yan total++; 3056d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(page_folio(page), NULL)) 3057fa6c0231SZi Yan goto next; 3058fa6c0231SZi Yan 3059fa6c0231SZi Yan if (!trylock_page(page)) 3060fa6c0231SZi Yan goto next; 3061fa6c0231SZi Yan 3062fa6c0231SZi Yan if (!split_huge_page(page)) 3063fa6c0231SZi Yan split++; 3064fa6c0231SZi Yan 3065fa6c0231SZi Yan unlock_page(page); 3066fa6c0231SZi Yan next: 3067fa6c0231SZi Yan put_page(page); 3068fa6c0231SZi Yan cond_resched(); 3069fa6c0231SZi Yan } 3070fa6c0231SZi Yan mmap_read_unlock(mm); 3071fa6c0231SZi Yan mmput(mm); 3072fa6c0231SZi Yan 3073fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 3074fa6c0231SZi Yan 3075fa6c0231SZi Yan out: 3076fa6c0231SZi Yan return ret; 3077fa6c0231SZi Yan } 3078fa6c0231SZi Yan 3079fbe37501SZi Yan static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 3080fbe37501SZi Yan pgoff_t off_end) 3081fbe37501SZi Yan { 3082fbe37501SZi Yan struct filename *file; 3083fbe37501SZi Yan struct file *candidate; 3084fbe37501SZi Yan struct address_space *mapping; 3085fbe37501SZi Yan int ret = -EINVAL; 3086fbe37501SZi Yan pgoff_t index; 3087fbe37501SZi Yan int nr_pages = 1; 3088fbe37501SZi Yan unsigned long total = 0, split = 0; 3089fbe37501SZi Yan 3090fbe37501SZi Yan file = getname_kernel(file_path); 3091fbe37501SZi Yan if (IS_ERR(file)) 3092fbe37501SZi Yan return ret; 3093fbe37501SZi Yan 3094fbe37501SZi Yan candidate = file_open_name(file, O_RDONLY, 0); 3095fbe37501SZi Yan if (IS_ERR(candidate)) 3096fbe37501SZi Yan goto out; 3097fbe37501SZi Yan 3098fbe37501SZi Yan pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 3099fbe37501SZi Yan file_path, off_start, off_end); 3100fbe37501SZi Yan 3101fbe37501SZi Yan mapping = candidate->f_mapping; 3102fbe37501SZi Yan 3103fbe37501SZi Yan for (index = off_start; index < off_end; index += nr_pages) { 31041fb130b2SChristoph Hellwig struct folio *folio = filemap_get_folio(mapping, index); 3105fbe37501SZi Yan 3106fbe37501SZi Yan nr_pages = 1; 310766dabbb6SChristoph Hellwig if (IS_ERR(folio)) 3108fbe37501SZi Yan continue; 3109fbe37501SZi Yan 31109ee2c086SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) 3111fbe37501SZi Yan goto next; 3112fbe37501SZi Yan 3113fbe37501SZi Yan total++; 31149ee2c086SMatthew Wilcox (Oracle) nr_pages = folio_nr_pages(folio); 3115fbe37501SZi Yan 31169ee2c086SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) 3117fbe37501SZi Yan goto next; 3118fbe37501SZi Yan 31199ee2c086SMatthew Wilcox (Oracle) if (!split_folio(folio)) 3120fbe37501SZi Yan split++; 3121fbe37501SZi Yan 31229ee2c086SMatthew Wilcox (Oracle) folio_unlock(folio); 3123fbe37501SZi Yan next: 31249ee2c086SMatthew Wilcox (Oracle) folio_put(folio); 3125fbe37501SZi Yan cond_resched(); 3126fbe37501SZi Yan } 3127fbe37501SZi Yan 3128fbe37501SZi Yan filp_close(candidate, NULL); 3129fbe37501SZi Yan ret = 0; 3130fbe37501SZi Yan 3131fbe37501SZi Yan pr_debug("%lu of %lu file-backed THP split\n", split, total); 3132fbe37501SZi Yan out: 3133fbe37501SZi Yan putname(file); 3134fbe37501SZi Yan return ret; 3135fbe37501SZi Yan } 3136fbe37501SZi Yan 3137fa6c0231SZi Yan #define MAX_INPUT_BUF_SZ 255 3138fa6c0231SZi Yan 3139fa6c0231SZi Yan static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 3140fa6c0231SZi Yan size_t count, loff_t *ppops) 3141fa6c0231SZi Yan { 3142fa6c0231SZi Yan static DEFINE_MUTEX(split_debug_mutex); 3143fa6c0231SZi Yan ssize_t ret; 3144fbe37501SZi Yan /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ 3145fbe37501SZi Yan char input_buf[MAX_INPUT_BUF_SZ]; 3146fa6c0231SZi Yan int pid; 3147fa6c0231SZi Yan unsigned long vaddr_start, vaddr_end; 3148fa6c0231SZi Yan 3149fa6c0231SZi Yan ret = mutex_lock_interruptible(&split_debug_mutex); 3150fa6c0231SZi Yan if (ret) 3151fa6c0231SZi Yan return ret; 3152fa6c0231SZi Yan 3153fa6c0231SZi Yan ret = -EFAULT; 3154fa6c0231SZi Yan 3155fa6c0231SZi Yan memset(input_buf, 0, MAX_INPUT_BUF_SZ); 3156fa6c0231SZi Yan if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 3157fa6c0231SZi Yan goto out; 3158fa6c0231SZi Yan 3159fa6c0231SZi Yan input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 3160fbe37501SZi Yan 3161fbe37501SZi Yan if (input_buf[0] == '/') { 3162fbe37501SZi Yan char *tok; 3163fbe37501SZi Yan char *buf = input_buf; 3164fbe37501SZi Yan char file_path[MAX_INPUT_BUF_SZ]; 3165fbe37501SZi Yan pgoff_t off_start = 0, off_end = 0; 3166fbe37501SZi Yan size_t input_len = strlen(input_buf); 3167fbe37501SZi Yan 3168fbe37501SZi Yan tok = strsep(&buf, ","); 3169fbe37501SZi Yan if (tok) { 31701212e00cSMatthew Wilcox (Oracle) strcpy(file_path, tok); 3171fbe37501SZi Yan } else { 3172fbe37501SZi Yan ret = -EINVAL; 3173fbe37501SZi Yan goto out; 3174fbe37501SZi Yan } 3175fbe37501SZi Yan 3176fbe37501SZi Yan ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); 3177fbe37501SZi Yan if (ret != 2) { 3178fbe37501SZi Yan ret = -EINVAL; 3179fbe37501SZi Yan goto out; 3180fbe37501SZi Yan } 3181fbe37501SZi Yan ret = split_huge_pages_in_file(file_path, off_start, off_end); 3182fbe37501SZi Yan if (!ret) 3183fbe37501SZi Yan ret = input_len; 3184fbe37501SZi Yan 3185fbe37501SZi Yan goto out; 3186fbe37501SZi Yan } 3187fbe37501SZi Yan 3188fa6c0231SZi Yan ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); 3189fa6c0231SZi Yan if (ret == 1 && pid == 1) { 3190fa6c0231SZi Yan split_huge_pages_all(); 3191fa6c0231SZi Yan ret = strlen(input_buf); 3192fa6c0231SZi Yan goto out; 3193fa6c0231SZi Yan } else if (ret != 3) { 3194fa6c0231SZi Yan ret = -EINVAL; 3195fa6c0231SZi Yan goto out; 3196fa6c0231SZi Yan } 3197fa6c0231SZi Yan 3198fa6c0231SZi Yan ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); 3199fa6c0231SZi Yan if (!ret) 3200fa6c0231SZi Yan ret = strlen(input_buf); 3201fa6c0231SZi Yan out: 3202fa6c0231SZi Yan mutex_unlock(&split_debug_mutex); 3203fa6c0231SZi Yan return ret; 3204fa6c0231SZi Yan 3205fa6c0231SZi Yan } 3206fa6c0231SZi Yan 3207fa6c0231SZi Yan static const struct file_operations split_huge_pages_fops = { 3208fa6c0231SZi Yan .owner = THIS_MODULE, 3209fa6c0231SZi Yan .write = split_huge_pages_write, 3210fa6c0231SZi Yan .llseek = no_llseek, 3211fa6c0231SZi Yan }; 321249071d43SKirill A. Shutemov 321349071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 321449071d43SKirill A. Shutemov { 3215d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 321649071d43SKirill A. Shutemov &split_huge_pages_fops); 321749071d43SKirill A. Shutemov return 0; 321849071d43SKirill A. Shutemov } 321949071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 322049071d43SKirill A. Shutemov #endif 3221616b8371SZi Yan 3222616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 32237f5abe60SDavid Hildenbrand int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3224616b8371SZi Yan struct page *page) 3225616b8371SZi Yan { 3226616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3227616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3228616b8371SZi Yan unsigned long address = pvmw->address; 32296c287605SDavid Hildenbrand bool anon_exclusive; 3230616b8371SZi Yan pmd_t pmdval; 3231616b8371SZi Yan swp_entry_t entry; 3232ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 3233616b8371SZi Yan 3234616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 32357f5abe60SDavid Hildenbrand return 0; 3236616b8371SZi Yan 3237616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 32388a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 32396c287605SDavid Hildenbrand 3240088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): invalidate PMD first. */ 32416c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 32426c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 32436c287605SDavid Hildenbrand set_pmd_at(mm, address, pvmw->pmd, pmdval); 32447f5abe60SDavid Hildenbrand return -EBUSY; 32456c287605SDavid Hildenbrand } 32466c287605SDavid Hildenbrand 3247616b8371SZi Yan if (pmd_dirty(pmdval)) 3248616b8371SZi Yan set_page_dirty(page); 32494dd845b5SAlistair Popple if (pmd_write(pmdval)) 32504dd845b5SAlistair Popple entry = make_writable_migration_entry(page_to_pfn(page)); 32516c287605SDavid Hildenbrand else if (anon_exclusive) 32526c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 32534dd845b5SAlistair Popple else 32544dd845b5SAlistair Popple entry = make_readable_migration_entry(page_to_pfn(page)); 32552e346877SPeter Xu if (pmd_young(pmdval)) 32562e346877SPeter Xu entry = make_migration_entry_young(entry); 32572e346877SPeter Xu if (pmd_dirty(pmdval)) 32582e346877SPeter Xu entry = make_migration_entry_dirty(entry); 3259ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 3260ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 3261ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 326224bf08c4SDavid Hildenbrand if (pmd_uffd_wp(pmdval)) 326324bf08c4SDavid Hildenbrand pmdswp = pmd_swp_mkuffd_wp(pmdswp); 3264ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3265cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 3266616b8371SZi Yan put_page(page); 3267283fd6feSAnshuman Khandual trace_set_migration_pmd(address, pmd_val(pmdswp)); 32687f5abe60SDavid Hildenbrand 32697f5abe60SDavid Hildenbrand return 0; 3270616b8371SZi Yan } 3271616b8371SZi Yan 3272616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3273616b8371SZi Yan { 3274616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3275616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3276616b8371SZi Yan unsigned long address = pvmw->address; 32774fba8f2aSMiaohe Lin unsigned long haddr = address & HPAGE_PMD_MASK; 3278616b8371SZi Yan pmd_t pmde; 3279616b8371SZi Yan swp_entry_t entry; 3280616b8371SZi Yan 3281616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3282616b8371SZi Yan return; 3283616b8371SZi Yan 3284616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 3285616b8371SZi Yan get_page(new); 32862e346877SPeter Xu pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); 3287ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 3288ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 32893c811f78SDavid Hildenbrand if (is_writable_migration_entry(entry)) 3290f3ebdf04SDavid Hildenbrand pmde = pmd_mkwrite(pmde); 32918f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pvmw->pmd)) 3292f1eb1bacSPeter Xu pmde = pmd_mkuffd_wp(pmde); 32932e346877SPeter Xu if (!is_migration_entry_young(entry)) 32942e346877SPeter Xu pmde = pmd_mkold(pmde); 32952e346877SPeter Xu /* NOTE: this may contain setting soft-dirty on some archs */ 32962e346877SPeter Xu if (PageDirty(new) && is_migration_entry_dirty(entry)) 32972e346877SPeter Xu pmde = pmd_mkdirty(pmde); 3298616b8371SZi Yan 32996c287605SDavid Hildenbrand if (PageAnon(new)) { 33006c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_COMPOUND; 33016c287605SDavid Hildenbrand 33026c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) 33036c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 33046c287605SDavid Hildenbrand 33054fba8f2aSMiaohe Lin page_add_anon_rmap(new, vma, haddr, rmap_flags); 33066c287605SDavid Hildenbrand } else { 3307cea86fe2SHugh Dickins page_add_file_rmap(new, vma, true); 33086c287605SDavid Hildenbrand } 33096c287605SDavid Hildenbrand VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); 33104fba8f2aSMiaohe Lin set_pmd_at(mm, haddr, pvmw->pmd, pmde); 33115cbcf225SMuchun Song 33125cbcf225SMuchun Song /* No need to invalidate - it was non-present before */ 3313616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 3314283fd6feSAnshuman Khandual trace_remove_migration_pmd(address, pmd_val(pmde)); 3315616b8371SZi Yan } 3316616b8371SZi Yan #endif 3317