120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10fa6c0231SZi Yan #include <linux/sched/mm.h> 11f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 126a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1371e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1471e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1571e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1671e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1771e3aac0SAndrea Arcangeli #include <linux/swap.h> 1897ae1749SKirill A. Shutemov #include <linux/shrinker.h> 19ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 20e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 21fb5c2029SMatthew Wilcox (Oracle) #include <linux/backing-dev.h> 224897c765SMatthew Wilcox #include <linux/dax.h> 23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 24878aee7dSAndrea Arcangeli #include <linux/freezer.h> 25f25748e3SDan Williams #include <linux/pfn_t.h> 26a664b2d8SAndrea Arcangeli #include <linux/mman.h> 273565fce3SDan Williams #include <linux/memremap.h> 28325adeb5SRalf Baechle #include <linux/pagemap.h> 2949071d43SKirill A. Shutemov #include <linux/debugfs.h> 304daae3b4SMel Gorman #include <linux/migrate.h> 3143b5fbbdSSasha Levin #include <linux/hashtable.h> 326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3333c3fc71SVladimir Davydov #include <linux/page_idle.h> 34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 356b31d595SMichal Hocko #include <linux/oom.h> 3698fa15f3SAnshuman Khandual #include <linux/numa.h> 37f7da677bSVlastimil Babka #include <linux/page_owner.h> 38a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 39467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h> 4097ae1749SKirill A. Shutemov 4171e3aac0SAndrea Arcangeli #include <asm/tlb.h> 4271e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 4371e3aac0SAndrea Arcangeli #include "internal.h" 44014bb1deSNeilBrown #include "swap.h" 4571e3aac0SAndrea Arcangeli 46283fd6feSAnshuman Khandual #define CREATE_TRACE_POINTS 47283fd6feSAnshuman Khandual #include <trace/events/thp.h> 48283fd6feSAnshuman Khandual 49ba76149fSAndrea Arcangeli /* 50b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 51b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 52b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 53b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 548bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 558bfa3f9aSJianguo Wu * for all hugepage allocations. 56ba76149fSAndrea Arcangeli */ 5771e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5813ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 59ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 6013ece886SAndrea Arcangeli #endif 6113ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 6213ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 6313ece886SAndrea Arcangeli #endif 64444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 6579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 6679da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 67ba76149fSAndrea Arcangeli 689a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 69f000565aSAndrea Arcangeli 7097ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 7156873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 723b77e8c8SHugh Dickins unsigned long huge_zero_pfn __read_mostly = ~0UL; 734a6c1297SKirill A. Shutemov 74a7f4e6e4SZach O'Keefe bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, 75a7f4e6e4SZach O'Keefe bool smaps, bool in_pf, bool enforce_sysfs) 767635d9cbSMichal Hocko { 779fec5168SYang Shi if (!vma->vm_mm) /* vdso */ 78c0630669SYang Shi return false; 799fec5168SYang Shi 807da4e2cbSYang Shi /* 817da4e2cbSYang Shi * Explicitly disabled through madvise or prctl, or some 827da4e2cbSYang Shi * architectures may disable THP for some mappings, for 837da4e2cbSYang Shi * example, s390 kvm. 847da4e2cbSYang Shi * */ 857da4e2cbSYang Shi if ((vm_flags & VM_NOHUGEPAGE) || 867da4e2cbSYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 877da4e2cbSYang Shi return false; 887da4e2cbSYang Shi /* 897da4e2cbSYang Shi * If the hardware/firmware marked hugepage support disabled. 907da4e2cbSYang Shi */ 917da4e2cbSYang Shi if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) 929fec5168SYang Shi return false; 939fec5168SYang Shi 947da4e2cbSYang Shi /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 957da4e2cbSYang Shi if (vma_is_dax(vma)) 967da4e2cbSYang Shi return in_pf; 977da4e2cbSYang Shi 987da4e2cbSYang Shi /* 997da4e2cbSYang Shi * Special VMA and hugetlb VMA. 1007da4e2cbSYang Shi * Must be checked after dax since some dax mappings may have 1017da4e2cbSYang Shi * VM_MIXEDMAP set. 1027da4e2cbSYang Shi */ 1039fec5168SYang Shi if (vm_flags & VM_NO_KHUGEPAGED) 1049fec5168SYang Shi return false; 1059fec5168SYang Shi 1067da4e2cbSYang Shi /* 1077da4e2cbSYang Shi * Check alignment for file vma and size for both file and anon vma. 1087da4e2cbSYang Shi * 1097da4e2cbSYang Shi * Skip the check for page fault. Huge fault does the check in fault 1107da4e2cbSYang Shi * handlers. And this check is not suitable for huge PUD fault. 1117da4e2cbSYang Shi */ 1127da4e2cbSYang Shi if (!in_pf && 1137da4e2cbSYang Shi !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) 1149fec5168SYang Shi return false; 1159fec5168SYang Shi 1167da4e2cbSYang Shi /* 1177da4e2cbSYang Shi * Enabled via shmem mount options or sysfs settings. 1187da4e2cbSYang Shi * Must be done before hugepage flags check since shmem has its 1197da4e2cbSYang Shi * own flags. 1207da4e2cbSYang Shi */ 1217da4e2cbSYang Shi if (!in_pf && shmem_file(vma->vm_file)) 1222cf13384SDavid Stevens return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, 1232cf13384SDavid Stevens !enforce_sysfs, vma->vm_mm, vm_flags); 1249fec5168SYang Shi 125a7f4e6e4SZach O'Keefe /* Enforce sysfs THP requirements as necessary */ 126a7f4e6e4SZach O'Keefe if (enforce_sysfs && 127a7f4e6e4SZach O'Keefe (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && 128a7f4e6e4SZach O'Keefe !hugepage_flags_always()))) 1299fec5168SYang Shi return false; 1309fec5168SYang Shi 1319fec5168SYang Shi /* Only regular file is valid */ 1327da4e2cbSYang Shi if (!in_pf && file_thp_enabled(vma)) 13378d12c19SYang Shi return true; 1347635d9cbSMichal Hocko 1359fec5168SYang Shi if (!vma_is_anonymous(vma)) 1367635d9cbSMichal Hocko return false; 1379fec5168SYang Shi 1389fec5168SYang Shi if (vma_is_temporary_stack(vma)) 1399fec5168SYang Shi return false; 1409fec5168SYang Shi 1419fec5168SYang Shi /* 1429fec5168SYang Shi * THPeligible bit of smaps should show 1 for proper VMAs even 1439fec5168SYang Shi * though anon_vma is not initialized yet. 1447da4e2cbSYang Shi * 1457da4e2cbSYang Shi * Allow page fault since anon_vma may be not initialized until 1467da4e2cbSYang Shi * the first page fault. 1479fec5168SYang Shi */ 1489fec5168SYang Shi if (!vma->anon_vma) 1497da4e2cbSYang Shi return (smaps || in_pf); 1509fec5168SYang Shi 1519fec5168SYang Shi return true; 1527635d9cbSMichal Hocko } 1537635d9cbSMichal Hocko 154aaa9705bSMiaohe Lin static bool get_huge_zero_page(void) 15597ae1749SKirill A. Shutemov { 15697ae1749SKirill A. Shutemov struct page *zero_page; 15797ae1749SKirill A. Shutemov retry: 15897ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 159aaa9705bSMiaohe Lin return true; 16097ae1749SKirill A. Shutemov 16197ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 16297ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 163d8a8e1f0SKirill A. Shutemov if (!zero_page) { 164d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 165aaa9705bSMiaohe Lin return false; 166d8a8e1f0SKirill A. Shutemov } 16797ae1749SKirill A. Shutemov preempt_disable(); 1685918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 16997ae1749SKirill A. Shutemov preempt_enable(); 1705ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 17197ae1749SKirill A. Shutemov goto retry; 17297ae1749SKirill A. Shutemov } 1733b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); 17497ae1749SKirill A. Shutemov 17597ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 17697ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 17797ae1749SKirill A. Shutemov preempt_enable(); 178f4981502SLiu Shixin count_vm_event(THP_ZERO_PAGE_ALLOC); 179aaa9705bSMiaohe Lin return true; 18097ae1749SKirill A. Shutemov } 18197ae1749SKirill A. Shutemov 1826fcb52a5SAaron Lu static void put_huge_zero_page(void) 18397ae1749SKirill A. Shutemov { 18497ae1749SKirill A. Shutemov /* 18597ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 18697ae1749SKirill A. Shutemov * last reference. 18797ae1749SKirill A. Shutemov */ 18897ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 18997ae1749SKirill A. Shutemov } 19097ae1749SKirill A. Shutemov 1916fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1926fcb52a5SAaron Lu { 1936fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1946fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1956fcb52a5SAaron Lu 1966fcb52a5SAaron Lu if (!get_huge_zero_page()) 1976fcb52a5SAaron Lu return NULL; 1986fcb52a5SAaron Lu 1996fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2006fcb52a5SAaron Lu put_huge_zero_page(); 2016fcb52a5SAaron Lu 2026fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 2036fcb52a5SAaron Lu } 2046fcb52a5SAaron Lu 2056fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 2066fcb52a5SAaron Lu { 2076fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2086fcb52a5SAaron Lu put_huge_zero_page(); 2096fcb52a5SAaron Lu } 2106fcb52a5SAaron Lu 21148896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 21297ae1749SKirill A. Shutemov struct shrink_control *sc) 21397ae1749SKirill A. Shutemov { 21497ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 21597ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 21648896466SGlauber Costa } 21797ae1749SKirill A. Shutemov 21848896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 21948896466SGlauber Costa struct shrink_control *sc) 22048896466SGlauber Costa { 22197ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2225918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2235918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2243b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, ~0UL); 2255ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 22648896466SGlauber Costa return HPAGE_PMD_NR; 22797ae1749SKirill A. Shutemov } 22897ae1749SKirill A. Shutemov 22997ae1749SKirill A. Shutemov return 0; 23097ae1749SKirill A. Shutemov } 23197ae1749SKirill A. Shutemov 23297ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23348896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 23448896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 23597ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23697ae1749SKirill A. Shutemov }; 23797ae1749SKirill A. Shutemov 23871e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 23971e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 24071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 24171e3aac0SAndrea Arcangeli { 242bfb0ffebSJoe Perches const char *output; 243bfb0ffebSJoe Perches 244444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 245bfb0ffebSJoe Perches output = "[always] madvise never"; 246bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 247bfb0ffebSJoe Perches &transparent_hugepage_flags)) 248bfb0ffebSJoe Perches output = "always [madvise] never"; 249444eb2a4SMel Gorman else 250bfb0ffebSJoe Perches output = "always madvise [never]"; 251bfb0ffebSJoe Perches 252bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 25371e3aac0SAndrea Arcangeli } 254444eb2a4SMel Gorman 25571e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 25671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25771e3aac0SAndrea Arcangeli const char *buf, size_t count) 25871e3aac0SAndrea Arcangeli { 25921440d7eSDavid Rientjes ssize_t ret = count; 260ba76149fSAndrea Arcangeli 261f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 26221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26321440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 264f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 26521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26621440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 267f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 26821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 27021440d7eSDavid Rientjes } else 27121440d7eSDavid Rientjes ret = -EINVAL; 272ba76149fSAndrea Arcangeli 273ba76149fSAndrea Arcangeli if (ret > 0) { 274b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 275ba76149fSAndrea Arcangeli if (err) 276ba76149fSAndrea Arcangeli ret = err; 277ba76149fSAndrea Arcangeli } 278ba76149fSAndrea Arcangeli return ret; 27971e3aac0SAndrea Arcangeli } 28037139bb0SMiaohe Lin 28137139bb0SMiaohe Lin static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 28271e3aac0SAndrea Arcangeli 283b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 28471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 28571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 28671e3aac0SAndrea Arcangeli { 287bfb0ffebSJoe Perches return sysfs_emit(buf, "%d\n", 288e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 28971e3aac0SAndrea Arcangeli } 290e27e6151SBen Hutchings 291b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 29271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 29371e3aac0SAndrea Arcangeli const char *buf, size_t count, 29471e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 29571e3aac0SAndrea Arcangeli { 296e27e6151SBen Hutchings unsigned long value; 297e27e6151SBen Hutchings int ret; 298e27e6151SBen Hutchings 299e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 300e27e6151SBen Hutchings if (ret < 0) 301e27e6151SBen Hutchings return ret; 302e27e6151SBen Hutchings if (value > 1) 30371e3aac0SAndrea Arcangeli return -EINVAL; 30471e3aac0SAndrea Arcangeli 305e27e6151SBen Hutchings if (value) 306e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 307e27e6151SBen Hutchings else 308e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 309e27e6151SBen Hutchings 31071e3aac0SAndrea Arcangeli return count; 31171e3aac0SAndrea Arcangeli } 31271e3aac0SAndrea Arcangeli 31371e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 31471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31571e3aac0SAndrea Arcangeli { 316bfb0ffebSJoe Perches const char *output; 317bfb0ffebSJoe Perches 318bfb0ffebSJoe Perches if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 319bfb0ffebSJoe Perches &transparent_hugepage_flags)) 320bfb0ffebSJoe Perches output = "[always] defer defer+madvise madvise never"; 321bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 322bfb0ffebSJoe Perches &transparent_hugepage_flags)) 323bfb0ffebSJoe Perches output = "always [defer] defer+madvise madvise never"; 324bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 325bfb0ffebSJoe Perches &transparent_hugepage_flags)) 326bfb0ffebSJoe Perches output = "always defer [defer+madvise] madvise never"; 327bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 328bfb0ffebSJoe Perches &transparent_hugepage_flags)) 329bfb0ffebSJoe Perches output = "always defer defer+madvise [madvise] never"; 330bfb0ffebSJoe Perches else 331bfb0ffebSJoe Perches output = "always defer defer+madvise madvise [never]"; 332bfb0ffebSJoe Perches 333bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 33471e3aac0SAndrea Arcangeli } 33521440d7eSDavid Rientjes 33671e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 33771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 33871e3aac0SAndrea Arcangeli const char *buf, size_t count) 33971e3aac0SAndrea Arcangeli { 340f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 34121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 34321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34421440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 345f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 34621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 34721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 350f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 3514fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 3524fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 3534fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 3544fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 355f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 35621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 35721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 35821440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 35921440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 360f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 36121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 36221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 36321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 36421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 36521440d7eSDavid Rientjes } else 36621440d7eSDavid Rientjes return -EINVAL; 36721440d7eSDavid Rientjes 36821440d7eSDavid Rientjes return count; 36971e3aac0SAndrea Arcangeli } 37037139bb0SMiaohe Lin static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 37171e3aac0SAndrea Arcangeli 37279da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 37379da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37479da5407SKirill A. Shutemov { 375b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 37679da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37779da5407SKirill A. Shutemov } 37879da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 37979da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 38079da5407SKirill A. Shutemov { 381b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 38279da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 38379da5407SKirill A. Shutemov } 38437139bb0SMiaohe Lin static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 38549920d28SHugh Dickins 38649920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 38749920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 38849920d28SHugh Dickins { 389ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 39049920d28SHugh Dickins } 39149920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 39249920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 39349920d28SHugh Dickins 39471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39571e3aac0SAndrea Arcangeli &enabled_attr.attr, 39671e3aac0SAndrea Arcangeli &defrag_attr.attr, 39779da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39849920d28SHugh Dickins &hpage_pmd_size_attr.attr, 399396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 4005a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 4015a6e75f8SKirill A. Shutemov #endif 40271e3aac0SAndrea Arcangeli NULL, 40371e3aac0SAndrea Arcangeli }; 40471e3aac0SAndrea Arcangeli 4058aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 40671e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 407ba76149fSAndrea Arcangeli }; 408ba76149fSAndrea Arcangeli 409569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 410569e5590SShaohua Li { 411569e5590SShaohua Li int err; 412569e5590SShaohua Li 413569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 414569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 415ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 416569e5590SShaohua Li return -ENOMEM; 417569e5590SShaohua Li } 418569e5590SShaohua Li 419569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 420569e5590SShaohua Li if (err) { 421ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 422569e5590SShaohua Li goto delete_obj; 423569e5590SShaohua Li } 424569e5590SShaohua Li 425569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 426569e5590SShaohua Li if (err) { 427ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 428569e5590SShaohua Li goto remove_hp_group; 429569e5590SShaohua Li } 430569e5590SShaohua Li 431569e5590SShaohua Li return 0; 432569e5590SShaohua Li 433569e5590SShaohua Li remove_hp_group: 434569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 435569e5590SShaohua Li delete_obj: 436569e5590SShaohua Li kobject_put(*hugepage_kobj); 437569e5590SShaohua Li return err; 438569e5590SShaohua Li } 439569e5590SShaohua Li 440569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 441569e5590SShaohua Li { 442569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 443569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 444569e5590SShaohua Li kobject_put(hugepage_kobj); 445569e5590SShaohua Li } 446569e5590SShaohua Li #else 447569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 448569e5590SShaohua Li { 449569e5590SShaohua Li return 0; 450569e5590SShaohua Li } 451569e5590SShaohua Li 452569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 453569e5590SShaohua Li { 454569e5590SShaohua Li } 45571e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 45671e3aac0SAndrea Arcangeli 45771e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 45871e3aac0SAndrea Arcangeli { 45971e3aac0SAndrea Arcangeli int err; 460569e5590SShaohua Li struct kobject *hugepage_kobj; 46171e3aac0SAndrea Arcangeli 4624b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 463bae84953SAneesh Kumar K.V /* 464bae84953SAneesh Kumar K.V * Hardware doesn't support hugepages, hence disable 465bae84953SAneesh Kumar K.V * DAX PMD support. 466bae84953SAneesh Kumar K.V */ 467bae84953SAneesh Kumar K.V transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX; 468569e5590SShaohua Li return -EINVAL; 4694b7167b9SAndrea Arcangeli } 4704b7167b9SAndrea Arcangeli 471ff20c2e0SKirill A. Shutemov /* 472ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 473ff20c2e0SKirill A. Shutemov */ 474ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 475ff20c2e0SKirill A. Shutemov /* 476ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 477ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 478ff20c2e0SKirill A. Shutemov */ 479ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 480ff20c2e0SKirill A. Shutemov 481569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 482569e5590SShaohua Li if (err) 48365ebb64fSKirill A. Shutemov goto err_sysfs; 484ba76149fSAndrea Arcangeli 485b46e756fSKirill A. Shutemov err = khugepaged_init(); 486ba76149fSAndrea Arcangeli if (err) 48765ebb64fSKirill A. Shutemov goto err_slab; 488ba76149fSAndrea Arcangeli 489e33c267aSRoman Gushchin err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); 49065ebb64fSKirill A. Shutemov if (err) 49165ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 492e33c267aSRoman Gushchin err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); 4939a982250SKirill A. Shutemov if (err) 4949a982250SKirill A. Shutemov goto err_split_shrinker; 49597ae1749SKirill A. Shutemov 49697562cd2SRik van Riel /* 49797562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 49897562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 49997562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 50097562cd2SRik van Riel */ 501ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 50297562cd2SRik van Riel transparent_hugepage_flags = 0; 50379553da2SKirill A. Shutemov return 0; 50479553da2SKirill A. Shutemov } 50597562cd2SRik van Riel 50679553da2SKirill A. Shutemov err = start_stop_khugepaged(); 50765ebb64fSKirill A. Shutemov if (err) 50865ebb64fSKirill A. Shutemov goto err_khugepaged; 509ba76149fSAndrea Arcangeli 510569e5590SShaohua Li return 0; 51165ebb64fSKirill A. Shutemov err_khugepaged: 5129a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 5139a982250SKirill A. Shutemov err_split_shrinker: 51465ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 51565ebb64fSKirill A. Shutemov err_hzp_shrinker: 516b46e756fSKirill A. Shutemov khugepaged_destroy(); 51765ebb64fSKirill A. Shutemov err_slab: 518569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 51965ebb64fSKirill A. Shutemov err_sysfs: 520ba76149fSAndrea Arcangeli return err; 52171e3aac0SAndrea Arcangeli } 522a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 52371e3aac0SAndrea Arcangeli 52471e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 52571e3aac0SAndrea Arcangeli { 52671e3aac0SAndrea Arcangeli int ret = 0; 52771e3aac0SAndrea Arcangeli if (!str) 52871e3aac0SAndrea Arcangeli goto out; 52971e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 53071e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 53171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53471e3aac0SAndrea Arcangeli ret = 1; 53571e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 53671e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 53771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53871e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54071e3aac0SAndrea Arcangeli ret = 1; 54171e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 54271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 54371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 54571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54671e3aac0SAndrea Arcangeli ret = 1; 54771e3aac0SAndrea Arcangeli } 54871e3aac0SAndrea Arcangeli out: 54971e3aac0SAndrea Arcangeli if (!ret) 550ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 55171e3aac0SAndrea Arcangeli return ret; 55271e3aac0SAndrea Arcangeli } 55371e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 55471e3aac0SAndrea Arcangeli 555f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 55671e3aac0SAndrea Arcangeli { 557f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 55871e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 55971e3aac0SAndrea Arcangeli return pmd; 56071e3aac0SAndrea Arcangeli } 56171e3aac0SAndrea Arcangeli 56287eaceb3SYang Shi #ifdef CONFIG_MEMCG 563f8baa6beSMatthew Wilcox (Oracle) static inline 564f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *get_deferred_split_queue(struct folio *folio) 5659a982250SKirill A. Shutemov { 566f8baa6beSMatthew Wilcox (Oracle) struct mem_cgroup *memcg = folio_memcg(folio); 567f8baa6beSMatthew Wilcox (Oracle) struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 56887eaceb3SYang Shi 56987eaceb3SYang Shi if (memcg) 57087eaceb3SYang Shi return &memcg->deferred_split_queue; 57187eaceb3SYang Shi else 57287eaceb3SYang Shi return &pgdat->deferred_split_queue; 5739a982250SKirill A. Shutemov } 57487eaceb3SYang Shi #else 575f8baa6beSMatthew Wilcox (Oracle) static inline 576f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *get_deferred_split_queue(struct folio *folio) 57787eaceb3SYang Shi { 578f8baa6beSMatthew Wilcox (Oracle) struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 57987eaceb3SYang Shi 58087eaceb3SYang Shi return &pgdat->deferred_split_queue; 58187eaceb3SYang Shi } 58287eaceb3SYang Shi #endif 5839a982250SKirill A. Shutemov 5849a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5859a982250SKirill A. Shutemov { 5868991de90SMatthew Wilcox (Oracle) struct folio *folio = (struct folio *)page; 5879a982250SKirill A. Shutemov 5888991de90SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); 5898991de90SMatthew Wilcox (Oracle) INIT_LIST_HEAD(&folio->_deferred_list); 5909a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 5919a982250SKirill A. Shutemov } 5929a982250SKirill A. Shutemov 593562beb72SMiaohe Lin static inline bool is_transparent_hugepage(struct page *page) 594005ba37cSSean Christopherson { 595f04029f3SMatthew Wilcox (Oracle) struct folio *folio; 596f04029f3SMatthew Wilcox (Oracle) 597005ba37cSSean Christopherson if (!PageCompound(page)) 598fa1f68ccSZou Wei return false; 599005ba37cSSean Christopherson 600f04029f3SMatthew Wilcox (Oracle) folio = page_folio(page); 601f04029f3SMatthew Wilcox (Oracle) return is_huge_zero_page(&folio->page) || 602f04029f3SMatthew Wilcox (Oracle) folio->_folio_dtor == TRANSHUGE_PAGE_DTOR; 603005ba37cSSean Christopherson } 604005ba37cSSean Christopherson 60597d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 60697d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 60774d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 60874d2fad1SToshi Kani { 60974d2fad1SToshi Kani loff_t off_end = off + len; 61074d2fad1SToshi Kani loff_t off_align = round_up(off, size); 61197d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 61274d2fad1SToshi Kani 61374d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 61474d2fad1SToshi Kani return 0; 61574d2fad1SToshi Kani 61674d2fad1SToshi Kani len_pad = len + size; 61774d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 61874d2fad1SToshi Kani return 0; 61974d2fad1SToshi Kani 62097d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 62174d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 62297d3d0f9SKirill A. Shutemov 62397d3d0f9SKirill A. Shutemov /* 62497d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 62597d3d0f9SKirill A. Shutemov * without the padding. 62697d3d0f9SKirill A. Shutemov */ 62797d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 62874d2fad1SToshi Kani return 0; 62974d2fad1SToshi Kani 63097d3d0f9SKirill A. Shutemov /* 63197d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 63297d3d0f9SKirill A. Shutemov * hint succeeds. 63397d3d0f9SKirill A. Shutemov */ 63497d3d0f9SKirill A. Shutemov if (ret == addr) 63574d2fad1SToshi Kani return addr; 63697d3d0f9SKirill A. Shutemov 63797d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 63897d3d0f9SKirill A. Shutemov return ret; 63974d2fad1SToshi Kani } 64074d2fad1SToshi Kani 64174d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 64274d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 64374d2fad1SToshi Kani { 64497d3d0f9SKirill A. Shutemov unsigned long ret; 64574d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 64674d2fad1SToshi Kani 64797d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 64897d3d0f9SKirill A. Shutemov if (ret) 64997d3d0f9SKirill A. Shutemov return ret; 6501854bc6eSWilliam Kucharski 65174d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 65274d2fad1SToshi Kani } 65374d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 65474d2fad1SToshi Kani 6552b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 6562b740303SSouptick Joarder struct page *page, gfp_t gfp) 65771e3aac0SAndrea Arcangeli { 65882b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 65971e3aac0SAndrea Arcangeli pgtable_t pgtable; 66082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 6612b740303SSouptick Joarder vm_fault_t ret = 0; 66271e3aac0SAndrea Arcangeli 663309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 66400501b53SJohannes Weiner 6658f425e4eSMatthew Wilcox (Oracle) if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { 6666b251fc9SAndrea Arcangeli put_page(page); 6676b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 66885b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6696b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6706b251fc9SAndrea Arcangeli } 6719d82c694SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 67271e3aac0SAndrea Arcangeli 6734cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 67400501b53SJohannes Weiner if (unlikely(!pgtable)) { 6756b31d595SMichal Hocko ret = VM_FAULT_OOM; 6766b31d595SMichal Hocko goto release; 67700501b53SJohannes Weiner } 67800501b53SJohannes Weiner 679c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 68052f37629SMinchan Kim /* 68152f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 68252f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 68352f37629SMinchan Kim * write. 68452f37629SMinchan Kim */ 68571e3aac0SAndrea Arcangeli __SetPageUptodate(page); 68671e3aac0SAndrea Arcangeli 68782b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 68882b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6896b31d595SMichal Hocko goto unlock_release; 69071e3aac0SAndrea Arcangeli } else { 69171e3aac0SAndrea Arcangeli pmd_t entry; 6926b251fc9SAndrea Arcangeli 6936b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6946b31d595SMichal Hocko if (ret) 6956b31d595SMichal Hocko goto unlock_release; 6966b31d595SMichal Hocko 6976b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6986b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 69982b0f8c3SJan Kara spin_unlock(vmf->ptl); 7006b251fc9SAndrea Arcangeli put_page(page); 701bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 7028fd5eda4SMiaohe Lin ret = handle_userfault(vmf, VM_UFFD_MISSING); 7038fd5eda4SMiaohe Lin VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7048fd5eda4SMiaohe Lin return ret; 7056b251fc9SAndrea Arcangeli } 7066b251fc9SAndrea Arcangeli 7073122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 708f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 70940f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, vma, haddr); 710b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(page, vma); 71182b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 71282b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 713fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 714bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 715c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 71682b0f8c3SJan Kara spin_unlock(vmf->ptl); 7176b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 7189d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 71971e3aac0SAndrea Arcangeli } 72071e3aac0SAndrea Arcangeli 721aa2e878eSDavid Rientjes return 0; 7226b31d595SMichal Hocko unlock_release: 7236b31d595SMichal Hocko spin_unlock(vmf->ptl); 7246b31d595SMichal Hocko release: 7256b31d595SMichal Hocko if (pgtable) 7266b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 7276b31d595SMichal Hocko put_page(page); 7286b31d595SMichal Hocko return ret; 7296b31d595SMichal Hocko 73071e3aac0SAndrea Arcangeli } 73171e3aac0SAndrea Arcangeli 732444eb2a4SMel Gorman /* 73321440d7eSDavid Rientjes * always: directly stall for all thp allocations 73421440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 73521440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 73621440d7eSDavid Rientjes * fail if not immediately available 73721440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 73821440d7eSDavid Rientjes * available 73921440d7eSDavid Rientjes * never: never stall for any thp allocation 740444eb2a4SMel Gorman */ 741164cc4feSRik van Riel gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 7420bbbc0b3SAndrea Arcangeli { 743164cc4feSRik van Riel const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 74489c83fb5SMichal Hocko 745ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 74621440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 747a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 748ac79f78dSDavid Rientjes 749ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 75021440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 75119deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 752ac79f78dSDavid Rientjes 753ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 75421440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 75519deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75619deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 757ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 758ac79f78dSDavid Rientjes 759ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 76021440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 76119deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 76219deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 763ac79f78dSDavid Rientjes 76419deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 765444eb2a4SMel Gorman } 766444eb2a4SMel Gorman 767c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7682efeb8daSMiaohe Lin static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 76997ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7705918d10aSKirill A. Shutemov struct page *zero_page) 771fc9fe822SKirill A. Shutemov { 772fc9fe822SKirill A. Shutemov pmd_t entry; 7737c414164SAndrew Morton if (!pmd_none(*pmd)) 7742efeb8daSMiaohe Lin return; 7755918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 776fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 7776b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 778fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 779c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 780fc9fe822SKirill A. Shutemov } 781fc9fe822SKirill A. Shutemov 7822b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 78371e3aac0SAndrea Arcangeli { 78482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 785077fcf11SAneesh Kumar K.V gfp_t gfp; 786cb196ee1SMatthew Wilcox (Oracle) struct folio *folio; 78782b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 78871e3aac0SAndrea Arcangeli 78943675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 790c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 79171e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 79271e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7934fa6893fSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 794d2081b2bSYang Shi 79582b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 796bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 79779da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 79880371957SKirill A. Shutemov pgtable_t pgtable; 7995918d10aSKirill A. Shutemov struct page *zero_page; 8002b740303SSouptick Joarder vm_fault_t ret; 8014cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 80280371957SKirill A. Shutemov if (unlikely(!pgtable)) 80380371957SKirill A. Shutemov return VM_FAULT_OOM; 8046fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 8055918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 806bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 80797ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 808c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 80997ae1749SKirill A. Shutemov } 81082b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 8116b251fc9SAndrea Arcangeli ret = 0; 81282b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 8136b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 8146b31d595SMichal Hocko if (ret) { 8156b31d595SMichal Hocko spin_unlock(vmf->ptl); 816bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 8176b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 81882b0f8c3SJan Kara spin_unlock(vmf->ptl); 819bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 82082b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 8216b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 8226b251fc9SAndrea Arcangeli } else { 823bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 82482b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 825fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 82682b0f8c3SJan Kara spin_unlock(vmf->ptl); 8276b251fc9SAndrea Arcangeli } 828bfe8cc1dSGerald Schaefer } else { 82982b0f8c3SJan Kara spin_unlock(vmf->ptl); 830bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 831bfe8cc1dSGerald Schaefer } 8326b251fc9SAndrea Arcangeli return ret; 83380371957SKirill A. Shutemov } 834164cc4feSRik van Riel gfp = vma_thp_gfp_mask(vma); 835cb196ee1SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); 836cb196ee1SMatthew Wilcox (Oracle) if (unlikely(!folio)) { 83781ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 838c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83981ab4201SAndi Kleen } 840cb196ee1SMatthew Wilcox (Oracle) return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); 84171e3aac0SAndrea Arcangeli } 84271e3aac0SAndrea Arcangeli 843ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8443b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 8453b6521f5SOliver O'Halloran pgtable_t pgtable) 8465cad465dSMatthew Wilcox { 8475cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 8485cad465dSMatthew Wilcox pmd_t entry; 8495cad465dSMatthew Wilcox spinlock_t *ptl; 8505cad465dSMatthew Wilcox 8515cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 852c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 853c6f3c5eeSAneesh Kumar K.V if (write) { 854c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 855c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 856c6f3c5eeSAneesh Kumar K.V goto out_unlock; 857c6f3c5eeSAneesh Kumar K.V } 858c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 859c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 860c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 861c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 862c6f3c5eeSAneesh Kumar K.V } 863c6f3c5eeSAneesh Kumar K.V 864c6f3c5eeSAneesh Kumar K.V goto out_unlock; 865c6f3c5eeSAneesh Kumar K.V } 866c6f3c5eeSAneesh Kumar K.V 867f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 868f25748e3SDan Williams if (pfn_t_devmap(pfn)) 869f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8705cad465dSMatthew Wilcox if (write) { 871f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 872f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8735cad465dSMatthew Wilcox } 8743b6521f5SOliver O'Halloran 8753b6521f5SOliver O'Halloran if (pgtable) { 8763b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 877c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 878c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8793b6521f5SOliver O'Halloran } 8803b6521f5SOliver O'Halloran 8815cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8825cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 883c6f3c5eeSAneesh Kumar K.V 884c6f3c5eeSAneesh Kumar K.V out_unlock: 8855cad465dSMatthew Wilcox spin_unlock(ptl); 886c6f3c5eeSAneesh Kumar K.V if (pgtable) 887c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8885cad465dSMatthew Wilcox } 8895cad465dSMatthew Wilcox 8909a9731b1SThomas Hellstrom (VMware) /** 8919a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 8929a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8939a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8949a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 8959a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8969a9731b1SThomas Hellstrom (VMware) * 8979a9731b1SThomas Hellstrom (VMware) * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 8989a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 8999a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9009a9731b1SThomas Hellstrom (VMware) * 9019a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9029a9731b1SThomas Hellstrom (VMware) */ 9039a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 9049a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 9055cad465dSMatthew Wilcox { 906fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 907fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 9083b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 909fce86ff5SDan Williams 9105cad465dSMatthew Wilcox /* 9115cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 9125cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 9135cad465dSMatthew Wilcox * can't support a 'special' bit. 9145cad465dSMatthew Wilcox */ 915e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 916e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 9175cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 9185cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 9195cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 9205cad465dSMatthew Wilcox 9215cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 9225cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 923308a047cSBorislav Petkov 9243b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 9254cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 9263b6521f5SOliver O'Halloran if (!pgtable) 9273b6521f5SOliver O'Halloran return VM_FAULT_OOM; 9283b6521f5SOliver O'Halloran } 9293b6521f5SOliver O'Halloran 930308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 931308a047cSBorislav Petkov 932fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 933ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 9345cad465dSMatthew Wilcox } 9359a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 9365cad465dSMatthew Wilcox 937a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 938f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 939a00cc7d9SMatthew Wilcox { 940f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 941a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 942a00cc7d9SMatthew Wilcox return pud; 943a00cc7d9SMatthew Wilcox } 944a00cc7d9SMatthew Wilcox 945a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 946a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 947a00cc7d9SMatthew Wilcox { 948a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 949a00cc7d9SMatthew Wilcox pud_t entry; 950a00cc7d9SMatthew Wilcox spinlock_t *ptl; 951a00cc7d9SMatthew Wilcox 952a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 953c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 954c6f3c5eeSAneesh Kumar K.V if (write) { 955c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 956c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 957c6f3c5eeSAneesh Kumar K.V goto out_unlock; 958c6f3c5eeSAneesh Kumar K.V } 959c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 960c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 961c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 962c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 963c6f3c5eeSAneesh Kumar K.V } 964c6f3c5eeSAneesh Kumar K.V goto out_unlock; 965c6f3c5eeSAneesh Kumar K.V } 966c6f3c5eeSAneesh Kumar K.V 967a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 968a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 969a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 970a00cc7d9SMatthew Wilcox if (write) { 971f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 972f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 973a00cc7d9SMatthew Wilcox } 974a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 975a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 976c6f3c5eeSAneesh Kumar K.V 977c6f3c5eeSAneesh Kumar K.V out_unlock: 978a00cc7d9SMatthew Wilcox spin_unlock(ptl); 979a00cc7d9SMatthew Wilcox } 980a00cc7d9SMatthew Wilcox 9819a9731b1SThomas Hellstrom (VMware) /** 9829a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pud_prot - insert a pud size pfn 9839a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9849a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9859a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 9869a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9879a9731b1SThomas Hellstrom (VMware) * 9889a9731b1SThomas Hellstrom (VMware) * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 9899a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 9909a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9919a9731b1SThomas Hellstrom (VMware) * 9929a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9939a9731b1SThomas Hellstrom (VMware) */ 9949a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 9959a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 996a00cc7d9SMatthew Wilcox { 997fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 998fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 999fce86ff5SDan Williams 1000a00cc7d9SMatthew Wilcox /* 1001a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 1002a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 1003a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 1004a00cc7d9SMatthew Wilcox */ 100562ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 100662ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 1007a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1008a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 1009a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1010a00cc7d9SMatthew Wilcox 1011a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 1012a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 1013a00cc7d9SMatthew Wilcox 1014a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 1015a00cc7d9SMatthew Wilcox 1016fce86ff5SDan Williams insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 1017a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 1018a00cc7d9SMatthew Wilcox } 10199a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 1020a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1021a00cc7d9SMatthew Wilcox 10223565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1023a69e4717SMiaohe Lin pmd_t *pmd, bool write) 10243565fce3SDan Williams { 10253565fce3SDan Williams pmd_t _pmd; 10263565fce3SDan Williams 1027a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 1028a69e4717SMiaohe Lin if (write) 1029a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 10303565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1031a69e4717SMiaohe Lin pmd, _pmd, write)) 10323565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 10333565fce3SDan Williams } 10343565fce3SDan Williams 10353565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1036df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 10373565fce3SDan Williams { 10383565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 10393565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 10403565fce3SDan Williams struct page *page; 10410f089235SLogan Gunthorpe int ret; 10423565fce3SDan Williams 10433565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 10443565fce3SDan Williams 1045f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 10463565fce3SDan Williams return NULL; 10473565fce3SDan Williams 10483565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 10493565fce3SDan Williams /* pass */; 10503565fce3SDan Williams else 10513565fce3SDan Williams return NULL; 10523565fce3SDan Williams 10533565fce3SDan Williams if (flags & FOLL_TOUCH) 1054a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 10553565fce3SDan Williams 10563565fce3SDan Williams /* 10573565fce3SDan Williams * device mapped pages can only be returned if the 10583565fce3SDan Williams * caller will manage the page reference count. 10593565fce3SDan Williams */ 10603faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10613565fce3SDan Williams return ERR_PTR(-EEXIST); 10623565fce3SDan Williams 10633565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1064df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1065df06b37fSKeith Busch if (!*pgmap) 10663565fce3SDan Williams return ERR_PTR(-EFAULT); 10673565fce3SDan Williams page = pfn_to_page(pfn); 10680f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 10690f089235SLogan Gunthorpe if (ret) 10700f089235SLogan Gunthorpe page = ERR_PTR(ret); 10713565fce3SDan Williams 10723565fce3SDan Williams return page; 10733565fce3SDan Williams } 10743565fce3SDan Williams 107571e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 107671e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10778f34f1eaSPeter Xu struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 107871e3aac0SAndrea Arcangeli { 1079c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 108071e3aac0SAndrea Arcangeli struct page *src_page; 108171e3aac0SAndrea Arcangeli pmd_t pmd; 108212c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1083628d47ceSKirill A. Shutemov int ret = -ENOMEM; 108471e3aac0SAndrea Arcangeli 1085628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 10868f34f1eaSPeter Xu if (!vma_is_anonymous(dst_vma)) 1087628d47ceSKirill A. Shutemov return 0; 1088628d47ceSKirill A. Shutemov 10894cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 109071e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 109171e3aac0SAndrea Arcangeli goto out; 109271e3aac0SAndrea Arcangeli 1093c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1094c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1095c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 109671e3aac0SAndrea Arcangeli 109771e3aac0SAndrea Arcangeli ret = -EAGAIN; 109871e3aac0SAndrea Arcangeli pmd = *src_pmd; 109984c3fc4eSZi Yan 110084c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 110184c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 110284c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 110384c3fc4eSZi Yan 110484c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 11056c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) { 11064dd845b5SAlistair Popple entry = make_readable_migration_entry( 11074dd845b5SAlistair Popple swp_offset(entry)); 110884c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1109ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1110ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 11118f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*src_pmd)) 11128f34f1eaSPeter Xu pmd = pmd_swp_mkuffd_wp(pmd); 111384c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 111484c3fc4eSZi Yan } 1115dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1116af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1117dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 11188f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11198f34f1eaSPeter Xu pmd = pmd_swp_clear_uffd_wp(pmd); 112084c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 112184c3fc4eSZi Yan ret = 0; 112284c3fc4eSZi Yan goto out_unlock; 112384c3fc4eSZi Yan } 112484c3fc4eSZi Yan #endif 112584c3fc4eSZi Yan 1126628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 112771e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 112871e3aac0SAndrea Arcangeli goto out_unlock; 112971e3aac0SAndrea Arcangeli } 1130fc9fe822SKirill A. Shutemov /* 1131c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1132fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1133fc9fe822SKirill A. Shutemov * a page table. 1134fc9fe822SKirill A. Shutemov */ 1135fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 113697ae1749SKirill A. Shutemov /* 113797ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 113897ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 113997ae1749SKirill A. Shutemov * reference. 114097ae1749SKirill A. Shutemov */ 11415fc7a5f6SPeter Xu mm_get_huge_zero_page(dst_mm); 11425fc7a5f6SPeter Xu goto out_zero_page; 1143fc9fe822SKirill A. Shutemov } 1144de466bd6SMel Gorman 114571e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1146309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1147d042035eSPeter Xu 1148fb3d824dSDavid Hildenbrand get_page(src_page); 1149fb3d824dSDavid Hildenbrand if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { 1150fb3d824dSDavid Hildenbrand /* Page maybe pinned: split and retry the fault on PTEs. */ 1151fb3d824dSDavid Hildenbrand put_page(src_page); 1152d042035eSPeter Xu pte_free(dst_mm, pgtable); 1153d042035eSPeter Xu spin_unlock(src_ptl); 1154d042035eSPeter Xu spin_unlock(dst_ptl); 11558f34f1eaSPeter Xu __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1156d042035eSPeter Xu return -EAGAIN; 1157d042035eSPeter Xu } 115871e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 11595fc7a5f6SPeter Xu out_zero_page: 1160c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11615c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 116271e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 11638f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11648f34f1eaSPeter Xu pmd = pmd_clear_uffd_wp(pmd); 116571e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 116671e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 116771e3aac0SAndrea Arcangeli 116871e3aac0SAndrea Arcangeli ret = 0; 116971e3aac0SAndrea Arcangeli out_unlock: 1170c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1171c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 117271e3aac0SAndrea Arcangeli out: 117371e3aac0SAndrea Arcangeli return ret; 117471e3aac0SAndrea Arcangeli } 117571e3aac0SAndrea Arcangeli 1176a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1177a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 11785fe653e9SMiaohe Lin pud_t *pud, bool write) 1179a00cc7d9SMatthew Wilcox { 1180a00cc7d9SMatthew Wilcox pud_t _pud; 1181a00cc7d9SMatthew Wilcox 1182a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 11835fe653e9SMiaohe Lin if (write) 1184a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1185a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 11865fe653e9SMiaohe Lin pud, _pud, write)) 1187a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1188a00cc7d9SMatthew Wilcox } 1189a00cc7d9SMatthew Wilcox 1190a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1191df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1192a00cc7d9SMatthew Wilcox { 1193a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1194a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1195a00cc7d9SMatthew Wilcox struct page *page; 11960f089235SLogan Gunthorpe int ret; 1197a00cc7d9SMatthew Wilcox 1198a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1199a00cc7d9SMatthew Wilcox 1200f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1201a00cc7d9SMatthew Wilcox return NULL; 1202a00cc7d9SMatthew Wilcox 1203a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1204a00cc7d9SMatthew Wilcox /* pass */; 1205a00cc7d9SMatthew Wilcox else 1206a00cc7d9SMatthew Wilcox return NULL; 1207a00cc7d9SMatthew Wilcox 1208a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 12095fe653e9SMiaohe Lin touch_pud(vma, addr, pud, flags & FOLL_WRITE); 1210a00cc7d9SMatthew Wilcox 1211a00cc7d9SMatthew Wilcox /* 1212a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1213a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 12143faa52c0SJohn Hubbard * 12153faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1216a00cc7d9SMatthew Wilcox */ 12173faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1218a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1219a00cc7d9SMatthew Wilcox 1220a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1221df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1222df06b37fSKeith Busch if (!*pgmap) 1223a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1224a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 12250f089235SLogan Gunthorpe 12260f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 12270f089235SLogan Gunthorpe if (ret) 12280f089235SLogan Gunthorpe page = ERR_PTR(ret); 1229a00cc7d9SMatthew Wilcox 1230a00cc7d9SMatthew Wilcox return page; 1231a00cc7d9SMatthew Wilcox } 1232a00cc7d9SMatthew Wilcox 1233a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1234a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1235a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1236a00cc7d9SMatthew Wilcox { 1237a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1238a00cc7d9SMatthew Wilcox pud_t pud; 1239a00cc7d9SMatthew Wilcox int ret; 1240a00cc7d9SMatthew Wilcox 1241a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1242a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1243a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1244a00cc7d9SMatthew Wilcox 1245a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1246a00cc7d9SMatthew Wilcox pud = *src_pud; 1247a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1248a00cc7d9SMatthew Wilcox goto out_unlock; 1249a00cc7d9SMatthew Wilcox 1250a00cc7d9SMatthew Wilcox /* 1251a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1252a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1253a00cc7d9SMatthew Wilcox * a page table. 1254a00cc7d9SMatthew Wilcox */ 1255a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1256a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1257a00cc7d9SMatthew Wilcox } 1258a00cc7d9SMatthew Wilcox 1259fb3d824dSDavid Hildenbrand /* 1260fb3d824dSDavid Hildenbrand * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() 1261fb3d824dSDavid Hildenbrand * and split if duplicating fails. 1262fb3d824dSDavid Hildenbrand */ 1263a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1264a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1265a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1266a00cc7d9SMatthew Wilcox 1267a00cc7d9SMatthew Wilcox ret = 0; 1268a00cc7d9SMatthew Wilcox out_unlock: 1269a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1270a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1271a00cc7d9SMatthew Wilcox return ret; 1272a00cc7d9SMatthew Wilcox } 1273a00cc7d9SMatthew Wilcox 1274a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1275a00cc7d9SMatthew Wilcox { 1276a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1277a00cc7d9SMatthew Wilcox 1278a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1279a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1280a00cc7d9SMatthew Wilcox goto unlock; 1281a00cc7d9SMatthew Wilcox 12825fe653e9SMiaohe Lin touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1283a00cc7d9SMatthew Wilcox unlock: 1284a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1285a00cc7d9SMatthew Wilcox } 1286a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1287a00cc7d9SMatthew Wilcox 12885db4f15cSYang Shi void huge_pmd_set_accessed(struct vm_fault *vmf) 1289a1dd450bSWill Deacon { 129020f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1291a1dd450bSWill Deacon 129282b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1293a69e4717SMiaohe Lin if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1294a1dd450bSWill Deacon goto unlock; 1295a1dd450bSWill Deacon 1296a69e4717SMiaohe Lin touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1297a1dd450bSWill Deacon 1298a1dd450bSWill Deacon unlock: 129982b0f8c3SJan Kara spin_unlock(vmf->ptl); 1300a1dd450bSWill Deacon } 1301a1dd450bSWill Deacon 13025db4f15cSYang Shi vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 130371e3aac0SAndrea Arcangeli { 1304c89357e2SDavid Hildenbrand const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 130582b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 13062fad3d14SMatthew Wilcox (Oracle) struct folio *folio; 13073917c802SKirill A. Shutemov struct page *page; 130882b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 13095db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 131071e3aac0SAndrea Arcangeli 131182b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 131281d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 13133917c802SKirill A. Shutemov 131493b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 13153917c802SKirill A. Shutemov goto fallback; 13163917c802SKirill A. Shutemov 131782b0f8c3SJan Kara spin_lock(vmf->ptl); 13183917c802SKirill A. Shutemov 13193917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13203917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13213917c802SKirill A. Shutemov return 0; 13223917c802SKirill A. Shutemov } 132371e3aac0SAndrea Arcangeli 132471e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 13252fad3d14SMatthew Wilcox (Oracle) folio = page_folio(page); 1326f6004e73SMiaohe Lin VM_BUG_ON_PAGE(!PageHead(page), page); 13273917c802SKirill A. Shutemov 13286c287605SDavid Hildenbrand /* Early check when only holding the PT lock. */ 13296c287605SDavid Hildenbrand if (PageAnonExclusive(page)) 13306c287605SDavid Hildenbrand goto reuse; 13316c287605SDavid Hildenbrand 13322fad3d14SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) { 13332fad3d14SMatthew Wilcox (Oracle) folio_get(folio); 1334ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 13352fad3d14SMatthew Wilcox (Oracle) folio_lock(folio); 1336ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1337ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13383917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13392fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13402fad3d14SMatthew Wilcox (Oracle) folio_put(folio); 13413917c802SKirill A. Shutemov return 0; 1342ba3c4ce6SHuang Ying } 13432fad3d14SMatthew Wilcox (Oracle) folio_put(folio); 1344ba3c4ce6SHuang Ying } 13453917c802SKirill A. Shutemov 13466c287605SDavid Hildenbrand /* Recheck after temporarily dropping the PT lock. */ 13476c287605SDavid Hildenbrand if (PageAnonExclusive(page)) { 13482fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13496c287605SDavid Hildenbrand goto reuse; 13506c287605SDavid Hildenbrand } 13516c287605SDavid Hildenbrand 13523917c802SKirill A. Shutemov /* 13532fad3d14SMatthew Wilcox (Oracle) * See do_wp_page(): we can only reuse the folio exclusively if 13542fad3d14SMatthew Wilcox (Oracle) * there are no additional references. Note that we always drain 13552fad3d14SMatthew Wilcox (Oracle) * the LRU pagevecs immediately after adding a THP. 13563917c802SKirill A. Shutemov */ 13572fad3d14SMatthew Wilcox (Oracle) if (folio_ref_count(folio) > 13582fad3d14SMatthew Wilcox (Oracle) 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) 13593bff7e3fSDavid Hildenbrand goto unlock_fallback; 13602fad3d14SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 13612fad3d14SMatthew Wilcox (Oracle) folio_free_swap(folio); 13622fad3d14SMatthew Wilcox (Oracle) if (folio_ref_count(folio) == 1) { 136371e3aac0SAndrea Arcangeli pmd_t entry; 13646c54dc6cSDavid Hildenbrand 13656c54dc6cSDavid Hildenbrand page_move_anon_rmap(page, vma); 13662fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 13676c287605SDavid Hildenbrand reuse: 1368c89357e2SDavid Hildenbrand if (unlikely(unshare)) { 1369c89357e2SDavid Hildenbrand spin_unlock(vmf->ptl); 1370c89357e2SDavid Hildenbrand return 0; 1371c89357e2SDavid Hildenbrand } 137271e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1373f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 137482b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 137582b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 13763917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1377cb8d8633SDavid Hildenbrand return 0; 137871e3aac0SAndrea Arcangeli } 13793917c802SKirill A. Shutemov 13803bff7e3fSDavid Hildenbrand unlock_fallback: 13812fad3d14SMatthew Wilcox (Oracle) folio_unlock(folio); 138282b0f8c3SJan Kara spin_unlock(vmf->ptl); 13833917c802SKirill A. Shutemov fallback: 13843917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13853917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 138671e3aac0SAndrea Arcangeli } 138771e3aac0SAndrea Arcangeli 1388c27f479eSDavid Hildenbrand static inline bool can_change_pmd_writable(struct vm_area_struct *vma, 1389c27f479eSDavid Hildenbrand unsigned long addr, pmd_t pmd) 1390c27f479eSDavid Hildenbrand { 1391c27f479eSDavid Hildenbrand struct page *page; 1392c27f479eSDavid Hildenbrand 1393c27f479eSDavid Hildenbrand if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) 1394c27f479eSDavid Hildenbrand return false; 1395c27f479eSDavid Hildenbrand 1396c27f479eSDavid Hildenbrand /* Don't touch entries that are not even readable (NUMA hinting). */ 1397c27f479eSDavid Hildenbrand if (pmd_protnone(pmd)) 1398c27f479eSDavid Hildenbrand return false; 1399c27f479eSDavid Hildenbrand 1400c27f479eSDavid Hildenbrand /* Do we need write faults for softdirty tracking? */ 1401c27f479eSDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 1402c27f479eSDavid Hildenbrand return false; 1403c27f479eSDavid Hildenbrand 1404c27f479eSDavid Hildenbrand /* Do we need write faults for uffd-wp tracking? */ 1405c27f479eSDavid Hildenbrand if (userfaultfd_huge_pmd_wp(vma, pmd)) 1406c27f479eSDavid Hildenbrand return false; 1407c27f479eSDavid Hildenbrand 1408c27f479eSDavid Hildenbrand if (!(vma->vm_flags & VM_SHARED)) { 1409c27f479eSDavid Hildenbrand /* See can_change_pte_writable(). */ 1410c27f479eSDavid Hildenbrand page = vm_normal_page_pmd(vma, addr, pmd); 1411c27f479eSDavid Hildenbrand return page && PageAnon(page) && PageAnonExclusive(page); 1412c27f479eSDavid Hildenbrand } 1413c27f479eSDavid Hildenbrand 1414c27f479eSDavid Hildenbrand /* See can_change_pte_writable(). */ 1415c27f479eSDavid Hildenbrand return pmd_dirty(pmd); 1416c27f479eSDavid Hildenbrand } 1417c27f479eSDavid Hildenbrand 14185535be30SDavid Hildenbrand /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 14195535be30SDavid Hildenbrand static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 14205535be30SDavid Hildenbrand struct vm_area_struct *vma, 14215535be30SDavid Hildenbrand unsigned int flags) 14228310d48bSKeno Fischer { 14235535be30SDavid Hildenbrand /* If the pmd is writable, we can write to the page. */ 14245535be30SDavid Hildenbrand if (pmd_write(pmd)) 14255535be30SDavid Hildenbrand return true; 14265535be30SDavid Hildenbrand 14275535be30SDavid Hildenbrand /* Maybe FOLL_FORCE is set to override it? */ 14285535be30SDavid Hildenbrand if (!(flags & FOLL_FORCE)) 14295535be30SDavid Hildenbrand return false; 14305535be30SDavid Hildenbrand 14315535be30SDavid Hildenbrand /* But FOLL_FORCE has no effect on shared mappings */ 14325535be30SDavid Hildenbrand if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 14335535be30SDavid Hildenbrand return false; 14345535be30SDavid Hildenbrand 14355535be30SDavid Hildenbrand /* ... or read-only private ones */ 14365535be30SDavid Hildenbrand if (!(vma->vm_flags & VM_MAYWRITE)) 14375535be30SDavid Hildenbrand return false; 14385535be30SDavid Hildenbrand 14395535be30SDavid Hildenbrand /* ... or already writable ones that just need to take a write fault */ 14405535be30SDavid Hildenbrand if (vma->vm_flags & VM_WRITE) 14415535be30SDavid Hildenbrand return false; 14425535be30SDavid Hildenbrand 14435535be30SDavid Hildenbrand /* 14445535be30SDavid Hildenbrand * See can_change_pte_writable(): we broke COW and could map the page 14455535be30SDavid Hildenbrand * writable if we have an exclusive anonymous page ... 14465535be30SDavid Hildenbrand */ 14475535be30SDavid Hildenbrand if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 14485535be30SDavid Hildenbrand return false; 14495535be30SDavid Hildenbrand 14505535be30SDavid Hildenbrand /* ... and a write-fault isn't required for other reasons. */ 14515535be30SDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 14525535be30SDavid Hildenbrand return false; 14535535be30SDavid Hildenbrand return !userfaultfd_huge_pmd_wp(vma, pmd); 14548310d48bSKeno Fischer } 14558310d48bSKeno Fischer 1456b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 145771e3aac0SAndrea Arcangeli unsigned long addr, 145871e3aac0SAndrea Arcangeli pmd_t *pmd, 145971e3aac0SAndrea Arcangeli unsigned int flags) 146071e3aac0SAndrea Arcangeli { 1461b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 14625535be30SDavid Hildenbrand struct page *page; 14630f089235SLogan Gunthorpe int ret; 146471e3aac0SAndrea Arcangeli 1465c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 146671e3aac0SAndrea Arcangeli 14675535be30SDavid Hildenbrand page = pmd_page(*pmd); 14685535be30SDavid Hildenbrand VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 14695535be30SDavid Hildenbrand 14705535be30SDavid Hildenbrand if ((flags & FOLL_WRITE) && 14715535be30SDavid Hildenbrand !can_follow_write_pmd(*pmd, page, vma, flags)) 14725535be30SDavid Hildenbrand return NULL; 147371e3aac0SAndrea Arcangeli 147485facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 147585facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 147685facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 147785facf25SKirill A. Shutemov 14782b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 1479474098edSDavid Hildenbrand if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags)) 14805535be30SDavid Hildenbrand return NULL; 14813faa52c0SJohn Hubbard 148284209e87SDavid Hildenbrand if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page)) 1483a7f22660SDavid Hildenbrand return ERR_PTR(-EMLINK); 1484a7f22660SDavid Hildenbrand 1485b6a2619cSDavid Hildenbrand VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 1486b6a2619cSDavid Hildenbrand !PageAnonExclusive(page), page); 1487b6a2619cSDavid Hildenbrand 14880f089235SLogan Gunthorpe ret = try_grab_page(page, flags); 14890f089235SLogan Gunthorpe if (ret) 14900f089235SLogan Gunthorpe return ERR_PTR(ret); 14913faa52c0SJohn Hubbard 14923565fce3SDan Williams if (flags & FOLL_TOUCH) 1493a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 14943faa52c0SJohn Hubbard 149571e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1496ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 149771e3aac0SAndrea Arcangeli 149871e3aac0SAndrea Arcangeli return page; 149971e3aac0SAndrea Arcangeli } 150071e3aac0SAndrea Arcangeli 1501d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 15025db4f15cSYang Shi vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 1503d10e63f2SMel Gorman { 150482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1505c5b5a3ddSYang Shi pmd_t oldpmd = vmf->orig_pmd; 1506c5b5a3ddSYang Shi pmd_t pmd; 1507b32967ffSMel Gorman struct page *page; 150882b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1509c5b5a3ddSYang Shi int page_nid = NUMA_NO_NODE; 151033024536SHuang Ying int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); 15116a56ccbcSDavid Hildenbrand bool migrated = false, writable = false; 15126688cc05SPeter Zijlstra int flags = 0; 1513d10e63f2SMel Gorman 151482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1515c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 151682b0f8c3SJan Kara spin_unlock(vmf->ptl); 1517de466bd6SMel Gorman goto out; 1518de466bd6SMel Gorman } 1519de466bd6SMel Gorman 1520c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 15216a56ccbcSDavid Hildenbrand 15226a56ccbcSDavid Hildenbrand /* 15236a56ccbcSDavid Hildenbrand * Detect now whether the PMD could be writable; this information 15246a56ccbcSDavid Hildenbrand * is only valid while holding the PT lock. 15256a56ccbcSDavid Hildenbrand */ 15266a56ccbcSDavid Hildenbrand writable = pmd_write(pmd); 15276a56ccbcSDavid Hildenbrand if (!writable && vma_wants_manual_pte_write_upgrade(vma) && 15286a56ccbcSDavid Hildenbrand can_change_pmd_writable(vma, vmf->address, pmd)) 15296a56ccbcSDavid Hildenbrand writable = true; 15306a56ccbcSDavid Hildenbrand 1531c5b5a3ddSYang Shi page = vm_normal_page_pmd(vma, haddr, pmd); 1532c5b5a3ddSYang Shi if (!page) 1533c5b5a3ddSYang Shi goto out_map; 1534c5b5a3ddSYang Shi 1535c5b5a3ddSYang Shi /* See similar comment in do_numa_page for explanation */ 15366a56ccbcSDavid Hildenbrand if (!writable) 1537c5b5a3ddSYang Shi flags |= TNF_NO_GROUP; 1538c5b5a3ddSYang Shi 1539c5b5a3ddSYang Shi page_nid = page_to_nid(page); 154033024536SHuang Ying /* 154133024536SHuang Ying * For memory tiering mode, cpupid of slow memory page is used 154233024536SHuang Ying * to record page access time. So use default value. 154333024536SHuang Ying */ 154433024536SHuang Ying if (node_is_toptier(page_nid)) 1545c5b5a3ddSYang Shi last_cpupid = page_cpupid_last(page); 1546c5b5a3ddSYang Shi target_nid = numa_migrate_prep(page, vma, haddr, page_nid, 1547c5b5a3ddSYang Shi &flags); 1548c5b5a3ddSYang Shi 1549c5b5a3ddSYang Shi if (target_nid == NUMA_NO_NODE) { 1550c5b5a3ddSYang Shi put_page(page); 1551c5b5a3ddSYang Shi goto out_map; 1552c5b5a3ddSYang Shi } 1553c5b5a3ddSYang Shi 155482b0f8c3SJan Kara spin_unlock(vmf->ptl); 15556a56ccbcSDavid Hildenbrand writable = false; 15568b1b436dSPeter Zijlstra 1557c5b5a3ddSYang Shi migrated = migrate_misplaced_page(page, vma, target_nid); 15586688cc05SPeter Zijlstra if (migrated) { 15596688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 15608191acbdSMel Gorman page_nid = target_nid; 1561c5b5a3ddSYang Shi } else { 1562074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1563c5b5a3ddSYang Shi vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1564c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 156582b0f8c3SJan Kara spin_unlock(vmf->ptl); 1566c5b5a3ddSYang Shi goto out; 1567c5b5a3ddSYang Shi } 1568c5b5a3ddSYang Shi goto out_map; 1569c5b5a3ddSYang Shi } 1570b8916634SMel Gorman 1571b8916634SMel Gorman out: 157298fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 157382b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15749a8b300fSAneesh Kumar K.V flags); 15758191acbdSMel Gorman 1576d10e63f2SMel Gorman return 0; 1577c5b5a3ddSYang Shi 1578c5b5a3ddSYang Shi out_map: 1579c5b5a3ddSYang Shi /* Restore the PMD */ 1580c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1581c5b5a3ddSYang Shi pmd = pmd_mkyoung(pmd); 15826a56ccbcSDavid Hildenbrand if (writable) 1583c5b5a3ddSYang Shi pmd = pmd_mkwrite(pmd); 1584c5b5a3ddSYang Shi set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1585c5b5a3ddSYang Shi update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1586c5b5a3ddSYang Shi spin_unlock(vmf->ptl); 1587c5b5a3ddSYang Shi goto out; 1588d10e63f2SMel Gorman } 1589d10e63f2SMel Gorman 1590319904adSHuang Ying /* 1591319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1592319904adSHuang Ying * Otherwise, return false. 1593319904adSHuang Ying */ 1594319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1595b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1596b8d3c4c3SMinchan Kim { 1597b8d3c4c3SMinchan Kim spinlock_t *ptl; 1598b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1599fc986a38SKefeng Wang struct folio *folio; 1600b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1601319904adSHuang Ying bool ret = false; 1602b8d3c4c3SMinchan Kim 1603ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 160407e32661SAneesh Kumar K.V 1605b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1606b6ec57f4SKirill A. Shutemov if (!ptl) 160725eedabeSLinus Torvalds goto out_unlocked; 1608b8d3c4c3SMinchan Kim 1609b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1610319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1611b8d3c4c3SMinchan Kim goto out; 1612b8d3c4c3SMinchan Kim 161384c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 161484c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 161584c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 161684c3fc4eSZi Yan goto out; 161784c3fc4eSZi Yan } 161884c3fc4eSZi Yan 1619fc986a38SKefeng Wang folio = pfn_folio(pmd_pfn(orig_pmd)); 1620b8d3c4c3SMinchan Kim /* 1621fc986a38SKefeng Wang * If other processes are mapping this folio, we couldn't discard 1622fc986a38SKefeng Wang * the folio unless they all do MADV_FREE so let's skip the folio. 1623b8d3c4c3SMinchan Kim */ 1624fc986a38SKefeng Wang if (folio_mapcount(folio) != 1) 1625b8d3c4c3SMinchan Kim goto out; 1626b8d3c4c3SMinchan Kim 1627fc986a38SKefeng Wang if (!folio_trylock(folio)) 1628b8d3c4c3SMinchan Kim goto out; 1629b8d3c4c3SMinchan Kim 1630b8d3c4c3SMinchan Kim /* 1631b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1632b8d3c4c3SMinchan Kim * will deactivate only them. 1633b8d3c4c3SMinchan Kim */ 1634b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1635fc986a38SKefeng Wang folio_get(folio); 1636b8d3c4c3SMinchan Kim spin_unlock(ptl); 1637fc986a38SKefeng Wang split_folio(folio); 1638fc986a38SKefeng Wang folio_unlock(folio); 1639fc986a38SKefeng Wang folio_put(folio); 1640b8d3c4c3SMinchan Kim goto out_unlocked; 1641b8d3c4c3SMinchan Kim } 1642b8d3c4c3SMinchan Kim 1643fc986a38SKefeng Wang if (folio_test_dirty(folio)) 1644fc986a38SKefeng Wang folio_clear_dirty(folio); 1645fc986a38SKefeng Wang folio_unlock(folio); 1646b8d3c4c3SMinchan Kim 1647b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 164858ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1649b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1650b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1651b8d3c4c3SMinchan Kim 1652b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1653b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1654b8d3c4c3SMinchan Kim } 1655802a3a92SShaohua Li 16566a6fe9ebSKefeng Wang folio_mark_lazyfree(folio); 1657319904adSHuang Ying ret = true; 1658b8d3c4c3SMinchan Kim out: 1659b8d3c4c3SMinchan Kim spin_unlock(ptl); 1660b8d3c4c3SMinchan Kim out_unlocked: 1661b8d3c4c3SMinchan Kim return ret; 1662b8d3c4c3SMinchan Kim } 1663b8d3c4c3SMinchan Kim 1664953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1665953c66c2SAneesh Kumar K.V { 1666953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1667953c66c2SAneesh Kumar K.V 1668953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1669953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1670c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1671953c66c2SAneesh Kumar K.V } 1672953c66c2SAneesh Kumar K.V 167371e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1674f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 167571e3aac0SAndrea Arcangeli { 1676f5c8ad47SDavid Miller pmd_t orig_pmd; 1677da146769SKirill A. Shutemov spinlock_t *ptl; 1678da146769SKirill A. Shutemov 1679ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 168007e32661SAneesh Kumar K.V 1681b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1682b6ec57f4SKirill A. Shutemov if (!ptl) 1683da146769SKirill A. Shutemov return 0; 1684a6bf2bb0SAneesh Kumar K.V /* 1685a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16868809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1687a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1688a6bf2bb0SAneesh Kumar K.V * operations. 1689a6bf2bb0SAneesh Kumar K.V */ 169093a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1691fcbe08d6SMartin Schwidefsky tlb->fullmm); 1692f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16932484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16943b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16953b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16964897c765SMatthew Wilcox spin_unlock(ptl); 1697da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1698c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1699bf929152SKirill A. Shutemov spin_unlock(ptl); 1700479f0abbSKirill A. Shutemov } else { 1701616b8371SZi Yan struct page *page = NULL; 1702616b8371SZi Yan int flush_needed = 1; 1703616b8371SZi Yan 1704616b8371SZi Yan if (pmd_present(orig_pmd)) { 1705616b8371SZi Yan page = pmd_page(orig_pmd); 1706cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 1707309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1708309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1709616b8371SZi Yan } else if (thp_migration_supported()) { 1710616b8371SZi Yan swp_entry_t entry; 1711616b8371SZi Yan 1712616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1713616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1714af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 1715616b8371SZi Yan flush_needed = 0; 1716616b8371SZi Yan } else 1717616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1718616b8371SZi Yan 1719b5072380SKirill A. Shutemov if (PageAnon(page)) { 1720c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1721b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1722b5072380SKirill A. Shutemov } else { 1723953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1724953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1725fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1726b5072380SKirill A. Shutemov } 1727616b8371SZi Yan 1728bf929152SKirill A. Shutemov spin_unlock(ptl); 1729616b8371SZi Yan if (flush_needed) 1730e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1731479f0abbSKirill A. Shutemov } 1732da146769SKirill A. Shutemov return 1; 173371e3aac0SAndrea Arcangeli } 173471e3aac0SAndrea Arcangeli 17351dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 17361dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 17371dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 17381dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 17391dd38b6cSAneesh Kumar K.V { 17401dd38b6cSAneesh Kumar K.V /* 17411dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17421dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17431dd38b6cSAneesh Kumar K.V * 17441dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17451dd38b6cSAneesh Kumar K.V */ 17461dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17471dd38b6cSAneesh Kumar K.V } 17481dd38b6cSAneesh Kumar K.V #endif 17491dd38b6cSAneesh Kumar K.V 1750ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1751ab6e3d09SNaoya Horiguchi { 1752ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1753ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1754ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1755ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1756ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1757ab6e3d09SNaoya Horiguchi #endif 1758ab6e3d09SNaoya Horiguchi return pmd; 1759ab6e3d09SNaoya Horiguchi } 1760ab6e3d09SNaoya Horiguchi 1761bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1762b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 176337a1c49aSAndrea Arcangeli { 1764bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 176537a1c49aSAndrea Arcangeli pmd_t pmd; 176637a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17675d190420SAaron Lu bool force_flush = false; 176837a1c49aSAndrea Arcangeli 176937a1c49aSAndrea Arcangeli /* 177037a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 177137a1c49aSAndrea Arcangeli * should have release it. 177237a1c49aSAndrea Arcangeli */ 177337a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 177437a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17754b471e88SKirill A. Shutemov return false; 177637a1c49aSAndrea Arcangeli } 177737a1c49aSAndrea Arcangeli 1778bf929152SKirill A. Shutemov /* 1779bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1780c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1781bf929152SKirill A. Shutemov */ 1782b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1783b6ec57f4SKirill A. Shutemov if (old_ptl) { 1784bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1785bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1786bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17878809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1788eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1789a2ce2666SAaron Lu force_flush = true; 179037a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17913592806cSKirill A. Shutemov 17921dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1793b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17943592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17953592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17963592806cSKirill A. Shutemov } 1797ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1798ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17995d190420SAaron Lu if (force_flush) 18007c38f181SMiaohe Lin flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1801eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1802eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1803bf929152SKirill A. Shutemov spin_unlock(old_ptl); 18044b471e88SKirill A. Shutemov return true; 180537a1c49aSAndrea Arcangeli } 18064b471e88SKirill A. Shutemov return false; 180737a1c49aSAndrea Arcangeli } 180837a1c49aSAndrea Arcangeli 1809f123d74aSMel Gorman /* 1810f123d74aSMel Gorman * Returns 1811f123d74aSMel Gorman * - 0 if PMD could not be locked 1812f0953a1bSIngo Molnar * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 1813e346e668SYang Shi * or if prot_numa but THP migration is not supported 1814f0953a1bSIngo Molnar * - HPAGE_PMD_NR if protections changed and TLB flush necessary 1815f123d74aSMel Gorman */ 18164a18419fSNadav Amit int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 18174a18419fSNadav Amit pmd_t *pmd, unsigned long addr, pgprot_t newprot, 18184a18419fSNadav Amit unsigned long cp_flags) 1819cd7548abSJohannes Weiner { 1820cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1821bf929152SKirill A. Shutemov spinlock_t *ptl; 1822c9fe6656SNadav Amit pmd_t oldpmd, entry; 182358705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1824292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1825292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 18266a56ccbcSDavid Hildenbrand int ret = 1; 1827cd7548abSJohannes Weiner 18284a18419fSNadav Amit tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 18294a18419fSNadav Amit 1830e346e668SYang Shi if (prot_numa && !thp_migration_supported()) 1831e346e668SYang Shi return 1; 1832e346e668SYang Shi 1833b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 18340a85e51dSKirill A. Shutemov if (!ptl) 18350a85e51dSKirill A. Shutemov return 0; 18360a85e51dSKirill A. Shutemov 183784c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 183884c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 183984c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 18406c287605SDavid Hildenbrand struct page *page = pfn_swap_entry_to_page(entry); 1841*24bf08c4SDavid Hildenbrand pmd_t newpmd; 184284c3fc4eSZi Yan 184384c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 18444dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 184584c3fc4eSZi Yan /* 184684c3fc4eSZi Yan * A protection check is difficult so 184784c3fc4eSZi Yan * just be safe and disable write 184884c3fc4eSZi Yan */ 18496c287605SDavid Hildenbrand if (PageAnon(page)) 18506c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 18516c287605SDavid Hildenbrand else 18526c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry)); 185384c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1854ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1855ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 18568f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pmd)) 18578f34f1eaSPeter Xu newpmd = pmd_swp_mkuffd_wp(newpmd); 1858*24bf08c4SDavid Hildenbrand } else { 1859*24bf08c4SDavid Hildenbrand newpmd = *pmd; 186084c3fc4eSZi Yan } 1861*24bf08c4SDavid Hildenbrand 1862*24bf08c4SDavid Hildenbrand if (uffd_wp) 1863*24bf08c4SDavid Hildenbrand newpmd = pmd_swp_mkuffd_wp(newpmd); 1864*24bf08c4SDavid Hildenbrand else if (uffd_wp_resolve) 1865*24bf08c4SDavid Hildenbrand newpmd = pmd_swp_clear_uffd_wp(newpmd); 1866*24bf08c4SDavid Hildenbrand if (!pmd_same(*pmd, newpmd)) 1867*24bf08c4SDavid Hildenbrand set_pmd_at(mm, addr, pmd, newpmd); 186884c3fc4eSZi Yan goto unlock; 186984c3fc4eSZi Yan } 187084c3fc4eSZi Yan #endif 187184c3fc4eSZi Yan 1872a1a3a2fcSHuang Ying if (prot_numa) { 1873a1a3a2fcSHuang Ying struct page *page; 187433024536SHuang Ying bool toptier; 1875e944fd67SMel Gorman /* 1876e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1877e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1878e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1879e944fd67SMel Gorman */ 1880a1a3a2fcSHuang Ying if (is_huge_zero_pmd(*pmd)) 18810a85e51dSKirill A. Shutemov goto unlock; 1882e944fd67SMel Gorman 1883a1a3a2fcSHuang Ying if (pmd_protnone(*pmd)) 18840a85e51dSKirill A. Shutemov goto unlock; 18850a85e51dSKirill A. Shutemov 1886a1a3a2fcSHuang Ying page = pmd_page(*pmd); 188733024536SHuang Ying toptier = node_is_toptier(page_to_nid(page)); 1888a1a3a2fcSHuang Ying /* 1889a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 1890a1a3a2fcSHuang Ying * balancing is disabled 1891a1a3a2fcSHuang Ying */ 1892a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 189333024536SHuang Ying toptier) 1894a1a3a2fcSHuang Ying goto unlock; 189533024536SHuang Ying 189633024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 189733024536SHuang Ying !toptier) 189833024536SHuang Ying xchg_page_access_time(page, jiffies_to_msecs(jiffies)); 1899a1a3a2fcSHuang Ying } 1900ced10803SKirill A. Shutemov /* 19013e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1902ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 19033e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1904ced10803SKirill A. Shutemov * 1905ced10803SKirill A. Shutemov * CPU0: CPU1: 1906ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1907ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1908ced10803SKirill A. Shutemov * madvise_dontneed() 1909ced10803SKirill A. Shutemov * zap_pmd_range() 1910ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1911ced10803SKirill A. Shutemov * // skip the pmd 1912ced10803SKirill A. Shutemov * set_pmd_at(); 1913ced10803SKirill A. Shutemov * // pmd is re-established 1914ced10803SKirill A. Shutemov * 1915ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1916ced10803SKirill A. Shutemov * which may break userspace. 1917ced10803SKirill A. Shutemov * 19184f831457SNadav Amit * pmdp_invalidate_ad() is required to make sure we don't miss 1919ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1920ced10803SKirill A. Shutemov */ 19214f831457SNadav Amit oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 1922ced10803SKirill A. Shutemov 1923c9fe6656SNadav Amit entry = pmd_modify(oldpmd, newprot); 1924f1eb1bacSPeter Xu if (uffd_wp) 1925292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1926f1eb1bacSPeter Xu else if (uffd_wp_resolve) 1927292924b2SPeter Xu /* 1928292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1929292924b2SPeter Xu * handler, then things like COW could be properly 1930292924b2SPeter Xu * handled. 1931292924b2SPeter Xu */ 1932292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1933c27f479eSDavid Hildenbrand 1934c27f479eSDavid Hildenbrand /* See change_pte_range(). */ 1935c27f479eSDavid Hildenbrand if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && 1936c27f479eSDavid Hildenbrand can_change_pmd_writable(vma, addr, entry)) 1937c27f479eSDavid Hildenbrand entry = pmd_mkwrite(entry); 1938c27f479eSDavid Hildenbrand 1939f123d74aSMel Gorman ret = HPAGE_PMD_NR; 194056eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 19414a18419fSNadav Amit 1942c9fe6656SNadav Amit if (huge_pmd_needs_flush(oldpmd, entry)) 19434a18419fSNadav Amit tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 19440a85e51dSKirill A. Shutemov unlock: 1945bf929152SKirill A. Shutemov spin_unlock(ptl); 1946cd7548abSJohannes Weiner return ret; 1947cd7548abSJohannes Weiner } 1948cd7548abSJohannes Weiner 1949025c5b24SNaoya Horiguchi /* 19508f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1951025c5b24SNaoya Horiguchi * 19528f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 19538f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1954025c5b24SNaoya Horiguchi */ 1955b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1956025c5b24SNaoya Horiguchi { 1957b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1958b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 195984c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 196084c3fc4eSZi Yan pmd_devmap(*pmd))) 1961b6ec57f4SKirill A. Shutemov return ptl; 1962b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1963b6ec57f4SKirill A. Shutemov return NULL; 1964025c5b24SNaoya Horiguchi } 1965025c5b24SNaoya Horiguchi 1966a00cc7d9SMatthew Wilcox /* 1967d965e390SMiaohe Lin * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 1968a00cc7d9SMatthew Wilcox * 1969d965e390SMiaohe Lin * Note that if it returns page table lock pointer, this routine returns without 1970d965e390SMiaohe Lin * unlocking page table lock. So callers must unlock it. 1971a00cc7d9SMatthew Wilcox */ 1972a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1973a00cc7d9SMatthew Wilcox { 1974a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1975a00cc7d9SMatthew Wilcox 1976a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1977a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1978a00cc7d9SMatthew Wilcox return ptl; 1979a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1980a00cc7d9SMatthew Wilcox return NULL; 1981a00cc7d9SMatthew Wilcox } 1982a00cc7d9SMatthew Wilcox 1983a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1984a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1985a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1986a00cc7d9SMatthew Wilcox { 1987a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1988a00cc7d9SMatthew Wilcox 1989a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1990a00cc7d9SMatthew Wilcox if (!ptl) 1991a00cc7d9SMatthew Wilcox return 0; 199274929079SMiaohe Lin 199370516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1994a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19952484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1996a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1997a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1998a00cc7d9SMatthew Wilcox } else { 1999a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 2000a00cc7d9SMatthew Wilcox BUG(); 2001a00cc7d9SMatthew Wilcox } 2002a00cc7d9SMatthew Wilcox return 1; 2003a00cc7d9SMatthew Wilcox } 2004a00cc7d9SMatthew Wilcox 2005a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2006a00cc7d9SMatthew Wilcox unsigned long haddr) 2007a00cc7d9SMatthew Wilcox { 2008a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2009a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2010a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2011a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2012a00cc7d9SMatthew Wilcox 2013ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 2014a00cc7d9SMatthew Wilcox 2015a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 2016a00cc7d9SMatthew Wilcox } 2017a00cc7d9SMatthew Wilcox 2018a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2019a00cc7d9SMatthew Wilcox unsigned long address) 2020a00cc7d9SMatthew Wilcox { 2021a00cc7d9SMatthew Wilcox spinlock_t *ptl; 2022ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2023a00cc7d9SMatthew Wilcox 20247d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 20256f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 2026ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2027ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2028ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 2029a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2030a00cc7d9SMatthew Wilcox goto out; 2031ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 2032a00cc7d9SMatthew Wilcox 2033a00cc7d9SMatthew Wilcox out: 2034a00cc7d9SMatthew Wilcox spin_unlock(ptl); 20354645b9feSJérôme Glisse /* 20364645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 20374645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 20384645b9feSJérôme Glisse */ 2039ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2040a00cc7d9SMatthew Wilcox } 2041a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2042a00cc7d9SMatthew Wilcox 2043eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2044eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2045eef1b3baSKirill A. Shutemov { 2046eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2047eef1b3baSKirill A. Shutemov pgtable_t pgtable; 204842b2af2cSDavid Hildenbrand pmd_t _pmd, old_pmd; 2049eef1b3baSKirill A. Shutemov int i; 2050eef1b3baSKirill A. Shutemov 20510f10851eSJérôme Glisse /* 20520f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 20530f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 20540f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 20550f10851eSJérôme Glisse * protected page. 20560f10851eSJérôme Glisse * 2057ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 20580f10851eSJérôme Glisse */ 205942b2af2cSDavid Hildenbrand old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2060eef1b3baSKirill A. Shutemov 2061eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2062eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2063eef1b3baSKirill A. Shutemov 2064eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2065eef1b3baSKirill A. Shutemov pte_t *pte, entry; 2066eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2067eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 206842b2af2cSDavid Hildenbrand if (pmd_uffd_wp(old_pmd)) 206942b2af2cSDavid Hildenbrand entry = pte_mkuffd_wp(entry); 2070eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2071eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2072eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2073eef1b3baSKirill A. Shutemov pte_unmap(pte); 2074eef1b3baSKirill A. Shutemov } 2075eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2076eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2077eef1b3baSKirill A. Shutemov } 2078eef1b3baSKirill A. Shutemov 2079eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2080ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2081eef1b3baSKirill A. Shutemov { 2082eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2083eef1b3baSKirill A. Shutemov struct page *page; 2084eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2085423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2086292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 20870ccf7f16SPeter Xu bool anon_exclusive = false, dirty = false; 20882ac015e2SKirill A. Shutemov unsigned long addr; 2089eef1b3baSKirill A. Shutemov int i; 2090eef1b3baSKirill A. Shutemov 2091eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2092eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2093eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 209484c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 209584c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2096eef1b3baSKirill A. Shutemov 2097eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2098eef1b3baSKirill A. Shutemov 2099d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 210099fa8a48SHugh Dickins old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2101953c66c2SAneesh Kumar K.V /* 2102953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2103953c66c2SAneesh Kumar K.V * just go ahead and zap it 2104953c66c2SAneesh Kumar K.V */ 2105953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2106953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 21072484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2108d21b9e57SKirill A. Shutemov return; 210999fa8a48SHugh Dickins if (unlikely(is_pmd_migration_entry(old_pmd))) { 211099fa8a48SHugh Dickins swp_entry_t entry; 211199fa8a48SHugh Dickins 211299fa8a48SHugh Dickins entry = pmd_to_swp_entry(old_pmd); 2113af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 211499fa8a48SHugh Dickins } else { 211599fa8a48SHugh Dickins page = pmd_page(old_pmd); 211699fa8a48SHugh Dickins if (!PageDirty(page) && pmd_dirty(old_pmd)) 2117e1f1b157SHugh Dickins set_page_dirty(page); 211899fa8a48SHugh Dickins if (!PageReferenced(page) && pmd_young(old_pmd)) 2119d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2120cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 2121d21b9e57SKirill A. Shutemov put_page(page); 212299fa8a48SHugh Dickins } 2123fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2124eef1b3baSKirill A. Shutemov return; 212599fa8a48SHugh Dickins } 212699fa8a48SHugh Dickins 21273b77e8c8SHugh Dickins if (is_huge_zero_pmd(*pmd)) { 21284645b9feSJérôme Glisse /* 21294645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 21304645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 21314645b9feSJérôme Glisse * __split_huge_pmd() ? 21324645b9feSJérôme Glisse * 21334645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 21344645b9feSJérôme Glisse * small page also write protected so it does not seems useful 21354645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 21364645b9feSJérôme Glisse */ 2137eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2138eef1b3baSKirill A. Shutemov } 2139eef1b3baSKirill A. Shutemov 2140423ac9afSAneesh Kumar K.V /* 2141423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2142423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2143423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2144423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2145423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2146423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2147423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 214842742d9bSAlexander A. Klimov * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 214942742d9bSAlexander A. Klimov * 383 on page 105. Intel should be safe but is also warns that it's 2150423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2151423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2152423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2153423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2154423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2155423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2156423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2157423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2158423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2159423ac9afSAneesh Kumar K.V */ 2160423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2161423ac9afSAneesh Kumar K.V 2162423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 21632e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 216484c3fc4eSZi Yan swp_entry_t entry; 216584c3fc4eSZi Yan 2166423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 2167af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 21684dd845b5SAlistair Popple write = is_writable_migration_entry(entry); 21696c287605SDavid Hildenbrand if (PageAnon(page)) 21706c287605SDavid Hildenbrand anon_exclusive = is_readable_exclusive_migration_entry(entry); 21712e346877SPeter Xu young = is_migration_entry_young(entry); 21722e346877SPeter Xu dirty = is_migration_entry_dirty(entry); 21732e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2174f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 21752e83ee1dSPeter Xu } else { 2176423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 21770ccf7f16SPeter Xu if (pmd_dirty(old_pmd)) { 21780ccf7f16SPeter Xu dirty = true; 2179423ac9afSAneesh Kumar K.V SetPageDirty(page); 21800ccf7f16SPeter Xu } 2181423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2182423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2183423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2184292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 21856c287605SDavid Hildenbrand 21862e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 21876c287605SDavid Hildenbrand 21886c287605SDavid Hildenbrand /* 21896c287605SDavid Hildenbrand * Without "freeze", we'll simply split the PMD, propagating the 21906c287605SDavid Hildenbrand * PageAnonExclusive() flag for each PTE by setting it for 21916c287605SDavid Hildenbrand * each subpage -- no need to (temporarily) clear. 21926c287605SDavid Hildenbrand * 21936c287605SDavid Hildenbrand * With "freeze" we want to replace mapped pages by 21946c287605SDavid Hildenbrand * migration entries right away. This is only possible if we 21956c287605SDavid Hildenbrand * managed to clear PageAnonExclusive() -- see 21966c287605SDavid Hildenbrand * set_pmd_migration_entry(). 21976c287605SDavid Hildenbrand * 21986c287605SDavid Hildenbrand * In case we cannot clear PageAnonExclusive(), split the PMD 21996c287605SDavid Hildenbrand * only and let try_to_migrate_one() fail later. 2200088b8aa5SDavid Hildenbrand * 2201088b8aa5SDavid Hildenbrand * See page_try_share_anon_rmap(): invalidate PMD first. 22026c287605SDavid Hildenbrand */ 22036c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 22046c287605SDavid Hildenbrand if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) 22056c287605SDavid Hildenbrand freeze = false; 220696d82debSHugh Dickins if (!freeze) 220796d82debSHugh Dickins page_ref_add(page, HPAGE_PMD_NR - 1); 22089d84604bSHugh Dickins } 2209eef1b3baSKirill A. Shutemov 2210423ac9afSAneesh Kumar K.V /* 2211423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2212423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2213423ac9afSAneesh Kumar K.V */ 2214eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2215eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2216eef1b3baSKirill A. Shutemov 22172ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2218eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2219eef1b3baSKirill A. Shutemov /* 2220eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2221eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2222eef1b3baSKirill A. Shutemov * permissions across VMAs. 2223eef1b3baSKirill A. Shutemov */ 222484c3fc4eSZi Yan if (freeze || pmd_migration) { 2225ba988280SKirill A. Shutemov swp_entry_t swp_entry; 22264dd845b5SAlistair Popple if (write) 22274dd845b5SAlistair Popple swp_entry = make_writable_migration_entry( 22284dd845b5SAlistair Popple page_to_pfn(page + i)); 22296c287605SDavid Hildenbrand else if (anon_exclusive) 22306c287605SDavid Hildenbrand swp_entry = make_readable_exclusive_migration_entry( 22316c287605SDavid Hildenbrand page_to_pfn(page + i)); 22324dd845b5SAlistair Popple else 22334dd845b5SAlistair Popple swp_entry = make_readable_migration_entry( 22344dd845b5SAlistair Popple page_to_pfn(page + i)); 22352e346877SPeter Xu if (young) 22362e346877SPeter Xu swp_entry = make_migration_entry_young(swp_entry); 22372e346877SPeter Xu if (dirty) 22382e346877SPeter Xu swp_entry = make_migration_entry_dirty(swp_entry); 2239ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2240804dd150SAndrea Arcangeli if (soft_dirty) 2241804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2242f45ec5ffSPeter Xu if (uffd_wp) 2243f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2244ba988280SKirill A. Shutemov } else { 22456d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2246b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 22476c287605SDavid Hildenbrand if (anon_exclusive) 22486c287605SDavid Hildenbrand SetPageAnonExclusive(page + i); 2249eef1b3baSKirill A. Shutemov if (!young) 2250eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 2251e833bc50SPeter Xu /* NOTE: this may set soft-dirty too on some archs */ 2252e833bc50SPeter Xu if (dirty) 2253e833bc50SPeter Xu entry = pte_mkdirty(entry); 2254624a2c94SPeter Xu /* 2255e833bc50SPeter Xu * NOTE: this needs to happen after pte_mkdirty, 2256e833bc50SPeter Xu * because some archs (sparc64, loongarch) could 2257e833bc50SPeter Xu * set hw write bit when mkdirty. 2258624a2c94SPeter Xu */ 2259e833bc50SPeter Xu if (!write) 2260e833bc50SPeter Xu entry = pte_wrprotect(entry); 2261804dd150SAndrea Arcangeli if (soft_dirty) 2262804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2263292924b2SPeter Xu if (uffd_wp) 2264292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 226596d82debSHugh Dickins page_add_anon_rmap(page + i, vma, addr, false); 2266ba988280SKirill A. Shutemov } 22672ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2268eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 22692ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2270eef1b3baSKirill A. Shutemov pte_unmap(pte); 2271eef1b3baSKirill A. Shutemov } 2272eef1b3baSKirill A. Shutemov 2273cb67f428SHugh Dickins if (!pmd_migration) 2274cb67f428SHugh Dickins page_remove_rmap(page, vma, true); 227596d82debSHugh Dickins if (freeze) 227696d82debSHugh Dickins put_page(page); 2277eef1b3baSKirill A. Shutemov 2278eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2279eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2280eef1b3baSKirill A. Shutemov } 2281eef1b3baSKirill A. Shutemov 2282eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2283af28a988SMatthew Wilcox (Oracle) unsigned long address, bool freeze, struct folio *folio) 2284eef1b3baSKirill A. Shutemov { 2285eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2286ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2287eef1b3baSKirill A. Shutemov 22887d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 22896f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2290ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2291ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2292ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 229333f4751eSNaoya Horiguchi 229433f4751eSNaoya Horiguchi /* 2295af28a988SMatthew Wilcox (Oracle) * If caller asks to setup a migration entry, we need a folio to check 2296af28a988SMatthew Wilcox (Oracle) * pmd against. Otherwise we can end up replacing wrong folio. 229733f4751eSNaoya Horiguchi */ 2298af28a988SMatthew Wilcox (Oracle) VM_BUG_ON(freeze && !folio); 229983a8441fSMatthew Wilcox (Oracle) VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 230033f4751eSNaoya Horiguchi 23017f760917SDavid Hildenbrand if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || 230283a8441fSMatthew Wilcox (Oracle) is_pmd_migration_entry(*pmd)) { 2303cea33328SMiaohe Lin /* 2304cea33328SMiaohe Lin * It's safe to call pmd_page when folio is set because it's 2305cea33328SMiaohe Lin * guaranteed that pmd is present. 2306cea33328SMiaohe Lin */ 230783a8441fSMatthew Wilcox (Oracle) if (folio && folio != page_folio(pmd_page(*pmd))) 230883a8441fSMatthew Wilcox (Oracle) goto out; 2309ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 231083a8441fSMatthew Wilcox (Oracle) } 23117f760917SDavid Hildenbrand 2312e90309c9SKirill A. Shutemov out: 2313eef1b3baSKirill A. Shutemov spin_unlock(ptl); 23144645b9feSJérôme Glisse /* 23154645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 23164645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 23174645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 23184645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 23194645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 23204645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 23214645b9feSJérôme Glisse * page in the meantime) 23224645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 23234645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 23244645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 23254645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 23264645b9feSJérôme Glisse */ 2327ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2328eef1b3baSKirill A. Shutemov } 2329eef1b3baSKirill A. Shutemov 2330fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2331af28a988SMatthew Wilcox (Oracle) bool freeze, struct folio *folio) 233294fcc585SAndrea Arcangeli { 233350722804SZach O'Keefe pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 233494fcc585SAndrea Arcangeli 233550722804SZach O'Keefe if (!pmd) 2336f72e7dcdSHugh Dickins return; 2337f72e7dcdSHugh Dickins 2338af28a988SMatthew Wilcox (Oracle) __split_huge_pmd(vma, pmd, address, freeze, folio); 233994fcc585SAndrea Arcangeli } 234094fcc585SAndrea Arcangeli 234171f9e58eSMiaohe Lin static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 234271f9e58eSMiaohe Lin { 234371f9e58eSMiaohe Lin /* 234471f9e58eSMiaohe Lin * If the new address isn't hpage aligned and it could previously 234571f9e58eSMiaohe Lin * contain an hugepage: check if we need to split an huge pmd. 234671f9e58eSMiaohe Lin */ 234771f9e58eSMiaohe Lin if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 234871f9e58eSMiaohe Lin range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 234971f9e58eSMiaohe Lin ALIGN(address, HPAGE_PMD_SIZE))) 235071f9e58eSMiaohe Lin split_huge_pmd_address(vma, address, false, NULL); 235171f9e58eSMiaohe Lin } 235271f9e58eSMiaohe Lin 2353e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 235494fcc585SAndrea Arcangeli unsigned long start, 235594fcc585SAndrea Arcangeli unsigned long end, 235694fcc585SAndrea Arcangeli long adjust_next) 235794fcc585SAndrea Arcangeli { 235871f9e58eSMiaohe Lin /* Check if we need to split start first. */ 235971f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, start); 236071f9e58eSMiaohe Lin 236171f9e58eSMiaohe Lin /* Check if we need to split end next. */ 236271f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, end); 236394fcc585SAndrea Arcangeli 236494fcc585SAndrea Arcangeli /* 236568540502SMatthew Wilcox (Oracle) * If we're also updating the next vma vm_start, 236671f9e58eSMiaohe Lin * check if we need to split it. 236794fcc585SAndrea Arcangeli */ 236894fcc585SAndrea Arcangeli if (adjust_next > 0) { 236968540502SMatthew Wilcox (Oracle) struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); 237094fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 2371f9d86a60SWei Yang nstart += adjust_next; 237271f9e58eSMiaohe Lin split_huge_pmd_if_needed(next, nstart); 237394fcc585SAndrea Arcangeli } 237494fcc585SAndrea Arcangeli } 2375e9b61f19SKirill A. Shutemov 2376684555aaSMatthew Wilcox (Oracle) static void unmap_folio(struct folio *folio) 2377e9b61f19SKirill A. Shutemov { 2378a98a2f0cSAlistair Popple enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2379a98a2f0cSAlistair Popple TTU_SYNC; 2380e9b61f19SKirill A. Shutemov 2381684555aaSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 2382e9b61f19SKirill A. Shutemov 2383a98a2f0cSAlistair Popple /* 2384a98a2f0cSAlistair Popple * Anon pages need migration entries to preserve them, but file 2385a98a2f0cSAlistair Popple * pages can simply be left unmapped, then faulted back on demand. 2386a98a2f0cSAlistair Popple * If that is ever changed (perhaps for mlock), update remap_page(). 2387a98a2f0cSAlistair Popple */ 23884b8554c5SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 23894b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, ttu_flags); 2390a98a2f0cSAlistair Popple else 2391869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 2392bd56086fSKirill A. Shutemov } 2393bd56086fSKirill A. Shutemov 23944eecb8b9SMatthew Wilcox (Oracle) static void remap_page(struct folio *folio, unsigned long nr) 2395e9b61f19SKirill A. Shutemov { 23964eecb8b9SMatthew Wilcox (Oracle) int i = 0; 2397ab02c252SHugh Dickins 2398684555aaSMatthew Wilcox (Oracle) /* If unmap_folio() uses try_to_migrate() on file, remove this check */ 23994eecb8b9SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2400ab02c252SHugh Dickins return; 24014eecb8b9SMatthew Wilcox (Oracle) for (;;) { 24024eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, true); 24034eecb8b9SMatthew Wilcox (Oracle) i += folio_nr_pages(folio); 24044eecb8b9SMatthew Wilcox (Oracle) if (i >= nr) 24054eecb8b9SMatthew Wilcox (Oracle) break; 24064eecb8b9SMatthew Wilcox (Oracle) folio = folio_next(folio); 2407e9b61f19SKirill A. Shutemov } 2408ace71a19SKirill A. Shutemov } 2409e9b61f19SKirill A. Shutemov 241094866635SAlex Shi static void lru_add_page_tail(struct page *head, struct page *tail, 241188dcb9a3SAlex Shi struct lruvec *lruvec, struct list_head *list) 241288dcb9a3SAlex Shi { 241394866635SAlex Shi VM_BUG_ON_PAGE(!PageHead(head), head); 241494866635SAlex Shi VM_BUG_ON_PAGE(PageCompound(tail), head); 241594866635SAlex Shi VM_BUG_ON_PAGE(PageLRU(tail), head); 24166168d0daSAlex Shi lockdep_assert_held(&lruvec->lru_lock); 241788dcb9a3SAlex Shi 24186dbb5741SAlex Shi if (list) { 241988dcb9a3SAlex Shi /* page reclaim is reclaiming a huge page */ 24206dbb5741SAlex Shi VM_WARN_ON(PageLRU(head)); 242194866635SAlex Shi get_page(tail); 242294866635SAlex Shi list_add_tail(&tail->lru, list); 242388dcb9a3SAlex Shi } else { 24246dbb5741SAlex Shi /* head is still on lru (and we have it frozen) */ 24256dbb5741SAlex Shi VM_WARN_ON(!PageLRU(head)); 242607ca7606SHugh Dickins if (PageUnevictable(tail)) 242707ca7606SHugh Dickins tail->mlock_count = 0; 242807ca7606SHugh Dickins else 24296dbb5741SAlex Shi list_add_tail(&tail->lru, &head->lru); 243007ca7606SHugh Dickins SetPageLRU(tail); 243188dcb9a3SAlex Shi } 243288dcb9a3SAlex Shi } 243388dcb9a3SAlex Shi 24348df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2435e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2436e9b61f19SKirill A. Shutemov { 2437e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2438e9b61f19SKirill A. Shutemov 24398df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2440e9b61f19SKirill A. Shutemov 2441e9b61f19SKirill A. Shutemov /* 2442605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2443605ca5edSKonstantin Khlebnikov * 2444605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 24458958b249SHaitao Shi * for example lock_page() which set PG_waiters. 24466c287605SDavid Hildenbrand * 24476c287605SDavid Hildenbrand * Note that for mapped sub-pages of an anonymous THP, 2448684555aaSMatthew Wilcox (Oracle) * PG_anon_exclusive has been cleared in unmap_folio() and is stored in 24496c287605SDavid Hildenbrand * the migration entry instead from where remap_page() will restore it. 24506c287605SDavid Hildenbrand * We can still have PG_anon_exclusive set on effectively unmapped and 24516c287605SDavid Hildenbrand * unreferenced sub-pages of an anonymous THP: we can simply drop 24526c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 2453e9b61f19SKirill A. Shutemov */ 2454e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2455e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2456e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2457e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 245838d8b4e6SHuang Ying (1L << PG_swapcache) | 2459e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2460e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2461e9b61f19SKirill A. Shutemov (1L << PG_active) | 24621899ad18SJohannes Weiner (1L << PG_workingset) | 2463e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2464b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 2465b0284cd2SCatalin Marinas #ifdef CONFIG_ARCH_USES_PG_ARCH_X 246672e6afa0SCatalin Marinas (1L << PG_arch_2) | 2467ef6458b1SPeter Collingbourne (1L << PG_arch_3) | 246872e6afa0SCatalin Marinas #endif 2469ec1c86b2SYu Zhao (1L << PG_dirty) | 2470ec1c86b2SYu Zhao LRU_GEN_MASK | LRU_REFS_MASK)); 2471e9b61f19SKirill A. Shutemov 2472cb67f428SHugh Dickins /* ->mapping in first and second tail page is replaced by other uses */ 2473173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2474173d9d9fSHugh Dickins page_tail); 2475173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2476173d9d9fSHugh Dickins page_tail->index = head->index + tail; 247771e2d666SMel Gorman 247871e2d666SMel Gorman /* 247971e2d666SMel Gorman * page->private should not be set in tail pages with the exception 248071e2d666SMel Gorman * of swap cache pages that store the swp_entry_t in tail pages. 248171e2d666SMel Gorman * Fix up and warn once if private is unexpectedly set. 2482cb67f428SHugh Dickins * 248394688e8eSMatthew Wilcox (Oracle) * What of 32-bit systems, on which folio->_pincount overlays 2484cb67f428SHugh Dickins * head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and 248594688e8eSMatthew Wilcox (Oracle) * pincount must be 0 for folio_ref_freeze() to have succeeded. 248671e2d666SMel Gorman */ 248771e2d666SMel Gorman if (!folio_test_swapcache(page_folio(head))) { 24885aae9265SHugh Dickins VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail); 2489b653db77SMatthew Wilcox (Oracle) page_tail->private = 0; 249071e2d666SMel Gorman } 2491173d9d9fSHugh Dickins 2492605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2493e9b61f19SKirill A. Shutemov smp_wmb(); 2494e9b61f19SKirill A. Shutemov 2495605ca5edSKonstantin Khlebnikov /* 2496605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2497605ca5edSKonstantin Khlebnikov * 2498605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2499605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2500605ca5edSKonstantin Khlebnikov */ 2501e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2502e9b61f19SKirill A. Shutemov 2503605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2504605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2505605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2506605ca5edSKonstantin Khlebnikov 2507e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2508e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2509e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2510e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2511e9b61f19SKirill A. Shutemov 2512e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 251394723aafSMichal Hocko 251494723aafSMichal Hocko /* 251594723aafSMichal Hocko * always add to the tail because some iterators expect new 251694723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 251794723aafSMichal Hocko * migrate_pages 251894723aafSMichal Hocko */ 2519e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2520e9b61f19SKirill A. Shutemov } 2521e9b61f19SKirill A. Shutemov 2522baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2523b6769834SAlex Shi pgoff_t end) 2524e9b61f19SKirill A. Shutemov { 2525e809c3feSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2526e809c3feSMatthew Wilcox (Oracle) struct page *head = &folio->page; 2527e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 25284101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 25294101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 25308cce5475SKirill A. Shutemov unsigned int nr = thp_nr_pages(head); 25318df651c7SKirill A. Shutemov int i; 2532e9b61f19SKirill A. Shutemov 2533e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2534be6c8982SZhou Guanghui split_page_memcg(head, nr); 2535e9b61f19SKirill A. Shutemov 25364101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 25374101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 25384101196bSMatthew Wilcox (Oracle) 25394101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 25404101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 25414101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 25424101196bSMatthew Wilcox (Oracle) } 25434101196bSMatthew Wilcox (Oracle) 2544f0953a1bSIngo Molnar /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 2545e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock(folio); 2546b6769834SAlex Shi 2547eac96c3eSYang Shi ClearPageHasHWPoisoned(head); 2548eac96c3eSYang Shi 25498cce5475SKirill A. Shutemov for (i = nr - 1; i >= 1; i--) { 25508df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2551d144bf62SHugh Dickins /* Some pages can be beyond EOF: drop them from page cache */ 2552baa355fdSKirill A. Shutemov if (head[i].index >= end) { 2553fb5c2029SMatthew Wilcox (Oracle) struct folio *tail = page_folio(head + i); 2554fb5c2029SMatthew Wilcox (Oracle) 2555d144bf62SHugh Dickins if (shmem_mapping(head->mapping)) 2556800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2557fb5c2029SMatthew Wilcox (Oracle) else if (folio_test_clear_dirty(tail)) 2558fb5c2029SMatthew Wilcox (Oracle) folio_account_cleaned(tail, 2559fb5c2029SMatthew Wilcox (Oracle) inode_to_wb(folio->mapping->host)); 2560fb5c2029SMatthew Wilcox (Oracle) __filemap_remove_folio(tail, NULL); 2561fb5c2029SMatthew Wilcox (Oracle) folio_put(tail); 25624101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 25634101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 25644101196bSMatthew Wilcox (Oracle) head + i, 0); 25654101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 25664101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 25674101196bSMatthew Wilcox (Oracle) head + i, 0); 2568baa355fdSKirill A. Shutemov } 2569baa355fdSKirill A. Shutemov } 2570e9b61f19SKirill A. Shutemov 2571e9b61f19SKirill A. Shutemov ClearPageCompound(head); 25726168d0daSAlex Shi unlock_page_lruvec(lruvec); 2573b6769834SAlex Shi /* Caller disabled irqs, so they are still disabled here */ 2574f7da677bSVlastimil Babka 25758cce5475SKirill A. Shutemov split_page_owner(head, nr); 2576f7da677bSVlastimil Babka 2577baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2578baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2579aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 25804101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 258138d8b4e6SHuang Ying page_ref_add(head, 2); 25824101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 25834101196bSMatthew Wilcox (Oracle) } else { 2584baa355fdSKirill A. Shutemov page_ref_inc(head); 25854101196bSMatthew Wilcox (Oracle) } 2586baa355fdSKirill A. Shutemov } else { 2587aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2588baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2589b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2590baa355fdSKirill A. Shutemov } 2591b6769834SAlex Shi local_irq_enable(); 2592e9b61f19SKirill A. Shutemov 25934eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, nr); 2594e9b61f19SKirill A. Shutemov 2595c4f9c701SHuang Ying if (PageSwapCache(head)) { 2596c4f9c701SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 2597c4f9c701SHuang Ying 2598c4f9c701SHuang Ying split_swap_cluster(entry); 2599c4f9c701SHuang Ying } 2600c4f9c701SHuang Ying 26018cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) { 2602e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2603e9b61f19SKirill A. Shutemov if (subpage == page) 2604e9b61f19SKirill A. Shutemov continue; 2605e9b61f19SKirill A. Shutemov unlock_page(subpage); 2606e9b61f19SKirill A. Shutemov 2607e9b61f19SKirill A. Shutemov /* 2608e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2609e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2610e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2611e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2612e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2613e9b61f19SKirill A. Shutemov */ 26140b175468SMiaohe Lin free_page_and_swap_cache(subpage); 2615e9b61f19SKirill A. Shutemov } 2616e9b61f19SKirill A. Shutemov } 2617e9b61f19SKirill A. Shutemov 2618b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2619d4b4084aSMatthew Wilcox (Oracle) bool can_split_folio(struct folio *folio, int *pextra_pins) 2620b8f593cdSHuang Ying { 2621b8f593cdSHuang Ying int extra_pins; 2622b8f593cdSHuang Ying 2623aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2624d4b4084aSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 2625d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_test_swapcache(folio) ? 2626d4b4084aSMatthew Wilcox (Oracle) folio_nr_pages(folio) : 0; 2627b8f593cdSHuang Ying else 2628d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_nr_pages(folio); 2629b8f593cdSHuang Ying if (pextra_pins) 2630b8f593cdSHuang Ying *pextra_pins = extra_pins; 2631d4b4084aSMatthew Wilcox (Oracle) return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; 2632b8f593cdSHuang Ying } 2633b8f593cdSHuang Ying 26346d0a07edSAndrea Arcangeli /* 2635e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2636e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2637e9b61f19SKirill A. Shutemov * 2638e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2639e9b61f19SKirill A. Shutemov * The huge page must be locked. 2640e9b61f19SKirill A. Shutemov * 2641e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2642e9b61f19SKirill A. Shutemov * 2643e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2644e9b61f19SKirill A. Shutemov * the hugepage. 2645e9b61f19SKirill A. Shutemov * 2646e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2647e9b61f19SKirill A. Shutemov * they are not mapped. 2648e9b61f19SKirill A. Shutemov * 2649e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2650e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2651e9b61f19SKirill A. Shutemov * us. 2652e9b61f19SKirill A. Shutemov */ 2653e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2654e9b61f19SKirill A. Shutemov { 26554eecb8b9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2656f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 26573e9a13daSMatthew Wilcox (Oracle) XA_STATE(xas, &folio->mapping->i_pages, folio->index); 2658baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2659baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2660504e070dSYang Shi int extra_pins, ret; 2661006d3ff2SHugh Dickins pgoff_t end; 2662478d134eSXu Yu bool is_hzp; 2663e9b61f19SKirill A. Shutemov 26643e9a13daSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 26653e9a13daSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 2666e9b61f19SKirill A. Shutemov 26673e9a13daSMatthew Wilcox (Oracle) is_hzp = is_huge_zero_page(&folio->page); 26683e9a13daSMatthew Wilcox (Oracle) VM_WARN_ON_ONCE_FOLIO(is_hzp, folio); 2669478d134eSXu Yu if (is_hzp) 2670478d134eSXu Yu return -EBUSY; 2671478d134eSXu Yu 26723e9a13daSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 267359807685SHuang Ying return -EBUSY; 267459807685SHuang Ying 26753e9a13daSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) { 2676e9b61f19SKirill A. Shutemov /* 2677c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2678baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2679baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 26802f031c6fSMatthew Wilcox (Oracle) * is similar to folio_lock_anon_vma_read except the write lock 2681baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2682baa355fdSKirill A. Shutemov * operations. 2683e9b61f19SKirill A. Shutemov */ 268429eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(folio); 2685e9b61f19SKirill A. Shutemov if (!anon_vma) { 2686e9b61f19SKirill A. Shutemov ret = -EBUSY; 2687e9b61f19SKirill A. Shutemov goto out; 2688e9b61f19SKirill A. Shutemov } 2689006d3ff2SHugh Dickins end = -1; 2690baa355fdSKirill A. Shutemov mapping = NULL; 2691e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2692baa355fdSKirill A. Shutemov } else { 26936a3edd29SYin Fengwei gfp_t gfp; 26946a3edd29SYin Fengwei 26953e9a13daSMatthew Wilcox (Oracle) mapping = folio->mapping; 2696baa355fdSKirill A. Shutemov 2697baa355fdSKirill A. Shutemov /* Truncated ? */ 2698baa355fdSKirill A. Shutemov if (!mapping) { 2699baa355fdSKirill A. Shutemov ret = -EBUSY; 2700baa355fdSKirill A. Shutemov goto out; 2701baa355fdSKirill A. Shutemov } 2702baa355fdSKirill A. Shutemov 27036a3edd29SYin Fengwei gfp = current_gfp_context(mapping_gfp_mask(mapping) & 27046a3edd29SYin Fengwei GFP_RECLAIM_MASK); 27056a3edd29SYin Fengwei 27066a3edd29SYin Fengwei if (folio_test_private(folio) && 27076a3edd29SYin Fengwei !filemap_release_folio(folio, gfp)) { 27086a3edd29SYin Fengwei ret = -EBUSY; 27096a3edd29SYin Fengwei goto out; 27106a3edd29SYin Fengwei } 27116a3edd29SYin Fengwei 27123e9a13daSMatthew Wilcox (Oracle) xas_split_alloc(&xas, folio, folio_order(folio), gfp); 27136b24ca4aSMatthew Wilcox (Oracle) if (xas_error(&xas)) { 27146b24ca4aSMatthew Wilcox (Oracle) ret = xas_error(&xas); 27156b24ca4aSMatthew Wilcox (Oracle) goto out; 27166b24ca4aSMatthew Wilcox (Oracle) } 27176b24ca4aSMatthew Wilcox (Oracle) 2718baa355fdSKirill A. Shutemov anon_vma = NULL; 2719baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2720006d3ff2SHugh Dickins 2721006d3ff2SHugh Dickins /* 2722006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2723006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2724006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2725006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 27263e9a13daSMatthew Wilcox (Oracle) * folio lock is good enough to serialize the trimming. 2727006d3ff2SHugh Dickins */ 2728006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2729d144bf62SHugh Dickins if (shmem_mapping(mapping)) 2730d144bf62SHugh Dickins end = shmem_fallocend(mapping->host, end); 2731baa355fdSKirill A. Shutemov } 2732e9b61f19SKirill A. Shutemov 2733e9b61f19SKirill A. Shutemov /* 2734684555aaSMatthew Wilcox (Oracle) * Racy check if we can split the page, before unmap_folio() will 2735e9b61f19SKirill A. Shutemov * split PMDs 2736e9b61f19SKirill A. Shutemov */ 2737d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(folio, &extra_pins)) { 2738fd4a7ac3SBaolin Wang ret = -EAGAIN; 2739e9b61f19SKirill A. Shutemov goto out_unlock; 2740e9b61f19SKirill A. Shutemov } 2741e9b61f19SKirill A. Shutemov 2742684555aaSMatthew Wilcox (Oracle) unmap_folio(folio); 2743e9b61f19SKirill A. Shutemov 2744b6769834SAlex Shi /* block interrupt reentry in xa_lock and spinlock */ 2745b6769834SAlex Shi local_irq_disable(); 2746baa355fdSKirill A. Shutemov if (mapping) { 2747baa355fdSKirill A. Shutemov /* 27483e9a13daSMatthew Wilcox (Oracle) * Check if the folio is present in page cache. 27493e9a13daSMatthew Wilcox (Oracle) * We assume all tail are present too, if folio is there. 2750baa355fdSKirill A. Shutemov */ 27516b24ca4aSMatthew Wilcox (Oracle) xas_lock(&xas); 27526b24ca4aSMatthew Wilcox (Oracle) xas_reset(&xas); 27533e9a13daSMatthew Wilcox (Oracle) if (xas_load(&xas) != folio) 2754baa355fdSKirill A. Shutemov goto fail; 2755baa355fdSKirill A. Shutemov } 2756baa355fdSKirill A. Shutemov 27570139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2758364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 27593e9a13daSMatthew Wilcox (Oracle) if (folio_ref_freeze(folio, 1 + extra_pins)) { 27604375a553SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) { 2761364c1eebSYang Shi ds_queue->split_queue_len--; 27624375a553SMatthew Wilcox (Oracle) list_del(&folio->_deferred_list); 27639a982250SKirill A. Shutemov } 2764afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 276506d3eff6SKirill A. Shutemov if (mapping) { 27663e9a13daSMatthew Wilcox (Oracle) int nr = folio_nr_pages(folio); 2767bf9eceadSMuchun Song 27683e9a13daSMatthew Wilcox (Oracle) xas_split(&xas, folio, folio_order(folio)); 27693e9a13daSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) { 27703e9a13daSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, 277157b2847dSMuchun Song -nr); 27721ca7554dSMarek Szyprowski } else { 27733e9a13daSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_THPS, 2774bf9eceadSMuchun Song -nr); 27751ca7554dSMarek Szyprowski filemap_nr_thps_dec(mapping); 27761ca7554dSMarek Szyprowski } 277706d3eff6SKirill A. Shutemov } 277806d3eff6SKirill A. Shutemov 2779b6769834SAlex Shi __split_huge_page(page, list, end); 2780e9b61f19SKirill A. Shutemov ret = 0; 2781baa355fdSKirill A. Shutemov } else { 2782364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2783504e070dSYang Shi fail: 2784504e070dSYang Shi if (mapping) 27856b24ca4aSMatthew Wilcox (Oracle) xas_unlock(&xas); 2786b6769834SAlex Shi local_irq_enable(); 27874eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, folio_nr_pages(folio)); 2788fd4a7ac3SBaolin Wang ret = -EAGAIN; 2789e9b61f19SKirill A. Shutemov } 2790e9b61f19SKirill A. Shutemov 2791e9b61f19SKirill A. Shutemov out_unlock: 2792baa355fdSKirill A. Shutemov if (anon_vma) { 2793e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2794e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2795baa355fdSKirill A. Shutemov } 2796baa355fdSKirill A. Shutemov if (mapping) 2797baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2798e9b61f19SKirill A. Shutemov out: 279969a37a8bSMatthew Wilcox (Oracle) xas_destroy(&xas); 2800e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2801e9b61f19SKirill A. Shutemov return ret; 2802e9b61f19SKirill A. Shutemov } 28039a982250SKirill A. Shutemov 28049a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 28059a982250SKirill A. Shutemov { 28068991de90SMatthew Wilcox (Oracle) struct folio *folio = (struct folio *)page; 2807f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 28089a982250SKirill A. Shutemov unsigned long flags; 28099a982250SKirill A. Shutemov 2810364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28118991de90SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) { 2812364c1eebSYang Shi ds_queue->split_queue_len--; 28138991de90SMatthew Wilcox (Oracle) list_del(&folio->_deferred_list); 28149a982250SKirill A. Shutemov } 2815364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28169a982250SKirill A. Shutemov free_compound_page(page); 28179a982250SKirill A. Shutemov } 28189a982250SKirill A. Shutemov 2819f158ed61SMatthew Wilcox (Oracle) void deferred_split_folio(struct folio *folio) 28209a982250SKirill A. Shutemov { 2821f8baa6beSMatthew Wilcox (Oracle) struct deferred_split *ds_queue = get_deferred_split_queue(folio); 282287eaceb3SYang Shi #ifdef CONFIG_MEMCG 28238991de90SMatthew Wilcox (Oracle) struct mem_cgroup *memcg = folio_memcg(folio); 282487eaceb3SYang Shi #endif 28259a982250SKirill A. Shutemov unsigned long flags; 28269a982250SKirill A. Shutemov 28278991de90SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); 28289a982250SKirill A. Shutemov 282987eaceb3SYang Shi /* 283087eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 283187eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 28328991de90SMatthew Wilcox (Oracle) * And, if page reclaim is already handling the same folio, it is 283387eaceb3SYang Shi * unnecessary to handle it again in shrinker. 283487eaceb3SYang Shi * 28358991de90SMatthew Wilcox (Oracle) * Check the swapcache flag to determine if the folio is being 28368991de90SMatthew Wilcox (Oracle) * handled by page reclaim since THP swap would add the folio into 283787eaceb3SYang Shi * swap cache before calling try_to_unmap(). 283887eaceb3SYang Shi */ 28398991de90SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 284087eaceb3SYang Shi return; 284187eaceb3SYang Shi 28428991de90SMatthew Wilcox (Oracle) if (!list_empty(&folio->_deferred_list)) 28439a982250SKirill A. Shutemov return; 28449a982250SKirill A. Shutemov 2845364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28468991de90SMatthew Wilcox (Oracle) if (list_empty(&folio->_deferred_list)) { 2847f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 28488991de90SMatthew Wilcox (Oracle) list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); 2849364c1eebSYang Shi ds_queue->split_queue_len++; 285087eaceb3SYang Shi #ifdef CONFIG_MEMCG 285187eaceb3SYang Shi if (memcg) 28528991de90SMatthew Wilcox (Oracle) set_shrinker_bit(memcg, folio_nid(folio), 285387eaceb3SYang Shi deferred_split_shrinker.id); 285487eaceb3SYang Shi #endif 28559a982250SKirill A. Shutemov } 2856364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28579a982250SKirill A. Shutemov } 28589a982250SKirill A. Shutemov 28599a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 28609a982250SKirill A. Shutemov struct shrink_control *sc) 28619a982250SKirill A. Shutemov { 2862a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2863364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 286487eaceb3SYang Shi 286587eaceb3SYang Shi #ifdef CONFIG_MEMCG 286687eaceb3SYang Shi if (sc->memcg) 286787eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 286887eaceb3SYang Shi #endif 2869364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28709a982250SKirill A. Shutemov } 28719a982250SKirill A. Shutemov 28729a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28739a982250SKirill A. Shutemov struct shrink_control *sc) 28749a982250SKirill A. Shutemov { 2875a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2876364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28779a982250SKirill A. Shutemov unsigned long flags; 28784375a553SMatthew Wilcox (Oracle) LIST_HEAD(list); 28794375a553SMatthew Wilcox (Oracle) struct folio *folio, *next; 28809a982250SKirill A. Shutemov int split = 0; 28819a982250SKirill A. Shutemov 288287eaceb3SYang Shi #ifdef CONFIG_MEMCG 288387eaceb3SYang Shi if (sc->memcg) 288487eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 288587eaceb3SYang Shi #endif 288687eaceb3SYang Shi 2887364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28889a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 28894375a553SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, &ds_queue->split_queue, 28904375a553SMatthew Wilcox (Oracle) _deferred_list) { 28914375a553SMatthew Wilcox (Oracle) if (folio_try_get(folio)) { 28924375a553SMatthew Wilcox (Oracle) list_move(&folio->_deferred_list, &list); 2893e3ae1953SKirill A. Shutemov } else { 28944375a553SMatthew Wilcox (Oracle) /* We lost race with folio_put() */ 28954375a553SMatthew Wilcox (Oracle) list_del_init(&folio->_deferred_list); 2896364c1eebSYang Shi ds_queue->split_queue_len--; 28979a982250SKirill A. Shutemov } 2898e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2899e3ae1953SKirill A. Shutemov break; 29009a982250SKirill A. Shutemov } 2901364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 29029a982250SKirill A. Shutemov 29034375a553SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, &list, _deferred_list) { 29044375a553SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) 2905fa41b900SKirill A. Shutemov goto next; 29069a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 29074375a553SMatthew Wilcox (Oracle) if (!split_folio(folio)) 29089a982250SKirill A. Shutemov split++; 29094375a553SMatthew Wilcox (Oracle) folio_unlock(folio); 2910fa41b900SKirill A. Shutemov next: 29114375a553SMatthew Wilcox (Oracle) folio_put(folio); 29129a982250SKirill A. Shutemov } 29139a982250SKirill A. Shutemov 2914364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2915364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2916364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 29179a982250SKirill A. Shutemov 2918cb8d68ecSKirill A. Shutemov /* 2919cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2920cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2921cb8d68ecSKirill A. Shutemov */ 2922364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2923cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2924cb8d68ecSKirill A. Shutemov return split; 29259a982250SKirill A. Shutemov } 29269a982250SKirill A. Shutemov 29279a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 29289a982250SKirill A. Shutemov .count_objects = deferred_split_count, 29299a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 29309a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 293187eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 293287eaceb3SYang Shi SHRINKER_NONSLAB, 29339a982250SKirill A. Shutemov }; 293449071d43SKirill A. Shutemov 293549071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 2936fa6c0231SZi Yan static void split_huge_pages_all(void) 293749071d43SKirill A. Shutemov { 293849071d43SKirill A. Shutemov struct zone *zone; 293949071d43SKirill A. Shutemov struct page *page; 2940630e7c5eSKefeng Wang struct folio *folio; 294149071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 294249071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 294349071d43SKirill A. Shutemov 2944fa6c0231SZi Yan pr_debug("Split all THPs\n"); 2945a17206daSMiaohe Lin for_each_zone(zone) { 2946a17206daSMiaohe Lin if (!managed_zone(zone)) 2947a17206daSMiaohe Lin continue; 294849071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 294949071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2950a17206daSMiaohe Lin int nr_pages; 295149071d43SKirill A. Shutemov 29522b7aa91bSNaoya Horiguchi page = pfn_to_online_page(pfn); 2953630e7c5eSKefeng Wang if (!page || PageTail(page)) 2954630e7c5eSKefeng Wang continue; 2955630e7c5eSKefeng Wang folio = page_folio(page); 2956630e7c5eSKefeng Wang if (!folio_try_get(folio)) 295749071d43SKirill A. Shutemov continue; 295849071d43SKirill A. Shutemov 2959630e7c5eSKefeng Wang if (unlikely(page_folio(page) != folio)) 296049071d43SKirill A. Shutemov goto next; 296149071d43SKirill A. Shutemov 2962630e7c5eSKefeng Wang if (zone != folio_zone(folio)) 2963630e7c5eSKefeng Wang goto next; 2964630e7c5eSKefeng Wang 2965630e7c5eSKefeng Wang if (!folio_test_large(folio) 2966630e7c5eSKefeng Wang || folio_test_hugetlb(folio) 2967630e7c5eSKefeng Wang || !folio_test_lru(folio)) 296849071d43SKirill A. Shutemov goto next; 296949071d43SKirill A. Shutemov 297049071d43SKirill A. Shutemov total++; 2971630e7c5eSKefeng Wang folio_lock(folio); 2972630e7c5eSKefeng Wang nr_pages = folio_nr_pages(folio); 2973630e7c5eSKefeng Wang if (!split_folio(folio)) 297449071d43SKirill A. Shutemov split++; 2975a17206daSMiaohe Lin pfn += nr_pages - 1; 2976630e7c5eSKefeng Wang folio_unlock(folio); 297749071d43SKirill A. Shutemov next: 2978630e7c5eSKefeng Wang folio_put(folio); 2979fa6c0231SZi Yan cond_resched(); 298049071d43SKirill A. Shutemov } 298149071d43SKirill A. Shutemov } 298249071d43SKirill A. Shutemov 2983fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 298449071d43SKirill A. Shutemov } 2985fa6c0231SZi Yan 2986fa6c0231SZi Yan static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 2987fa6c0231SZi Yan { 2988fa6c0231SZi Yan return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 2989fa6c0231SZi Yan is_vm_hugetlb_page(vma); 2990fa6c0231SZi Yan } 2991fa6c0231SZi Yan 2992fa6c0231SZi Yan static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 2993fa6c0231SZi Yan unsigned long vaddr_end) 2994fa6c0231SZi Yan { 2995fa6c0231SZi Yan int ret = 0; 2996fa6c0231SZi Yan struct task_struct *task; 2997fa6c0231SZi Yan struct mm_struct *mm; 2998fa6c0231SZi Yan unsigned long total = 0, split = 0; 2999fa6c0231SZi Yan unsigned long addr; 3000fa6c0231SZi Yan 3001fa6c0231SZi Yan vaddr_start &= PAGE_MASK; 3002fa6c0231SZi Yan vaddr_end &= PAGE_MASK; 3003fa6c0231SZi Yan 3004fa6c0231SZi Yan /* Find the task_struct from pid */ 3005fa6c0231SZi Yan rcu_read_lock(); 3006fa6c0231SZi Yan task = find_task_by_vpid(pid); 3007fa6c0231SZi Yan if (!task) { 3008fa6c0231SZi Yan rcu_read_unlock(); 3009fa6c0231SZi Yan ret = -ESRCH; 3010fa6c0231SZi Yan goto out; 3011fa6c0231SZi Yan } 3012fa6c0231SZi Yan get_task_struct(task); 3013fa6c0231SZi Yan rcu_read_unlock(); 3014fa6c0231SZi Yan 3015fa6c0231SZi Yan /* Find the mm_struct */ 3016fa6c0231SZi Yan mm = get_task_mm(task); 3017fa6c0231SZi Yan put_task_struct(task); 3018fa6c0231SZi Yan 3019fa6c0231SZi Yan if (!mm) { 3020fa6c0231SZi Yan ret = -EINVAL; 3021fa6c0231SZi Yan goto out; 3022fa6c0231SZi Yan } 3023fa6c0231SZi Yan 3024fa6c0231SZi Yan pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 3025fa6c0231SZi Yan pid, vaddr_start, vaddr_end); 3026fa6c0231SZi Yan 3027fa6c0231SZi Yan mmap_read_lock(mm); 3028fa6c0231SZi Yan /* 3029fa6c0231SZi Yan * always increase addr by PAGE_SIZE, since we could have a PTE page 3030fa6c0231SZi Yan * table filled with PTE-mapped THPs, each of which is distinct. 3031fa6c0231SZi Yan */ 3032fa6c0231SZi Yan for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 303374ba2b38SMiaohe Lin struct vm_area_struct *vma = vma_lookup(mm, addr); 3034fa6c0231SZi Yan struct page *page; 3035fa6c0231SZi Yan 303674ba2b38SMiaohe Lin if (!vma) 3037fa6c0231SZi Yan break; 3038fa6c0231SZi Yan 3039fa6c0231SZi Yan /* skip special VMA and hugetlb VMA */ 3040fa6c0231SZi Yan if (vma_not_suitable_for_thp_split(vma)) { 3041fa6c0231SZi Yan addr = vma->vm_end; 3042fa6c0231SZi Yan continue; 3043fa6c0231SZi Yan } 3044fa6c0231SZi Yan 3045fa6c0231SZi Yan /* FOLL_DUMP to ignore special (like zero) pages */ 304687d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 3047fa6c0231SZi Yan 3048f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(page)) 3049fa6c0231SZi Yan continue; 3050fa6c0231SZi Yan 3051fa6c0231SZi Yan if (!is_transparent_hugepage(page)) 3052fa6c0231SZi Yan goto next; 3053fa6c0231SZi Yan 3054fa6c0231SZi Yan total++; 3055d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(page_folio(page), NULL)) 3056fa6c0231SZi Yan goto next; 3057fa6c0231SZi Yan 3058fa6c0231SZi Yan if (!trylock_page(page)) 3059fa6c0231SZi Yan goto next; 3060fa6c0231SZi Yan 3061fa6c0231SZi Yan if (!split_huge_page(page)) 3062fa6c0231SZi Yan split++; 3063fa6c0231SZi Yan 3064fa6c0231SZi Yan unlock_page(page); 3065fa6c0231SZi Yan next: 3066fa6c0231SZi Yan put_page(page); 3067fa6c0231SZi Yan cond_resched(); 3068fa6c0231SZi Yan } 3069fa6c0231SZi Yan mmap_read_unlock(mm); 3070fa6c0231SZi Yan mmput(mm); 3071fa6c0231SZi Yan 3072fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 3073fa6c0231SZi Yan 3074fa6c0231SZi Yan out: 3075fa6c0231SZi Yan return ret; 3076fa6c0231SZi Yan } 3077fa6c0231SZi Yan 3078fbe37501SZi Yan static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 3079fbe37501SZi Yan pgoff_t off_end) 3080fbe37501SZi Yan { 3081fbe37501SZi Yan struct filename *file; 3082fbe37501SZi Yan struct file *candidate; 3083fbe37501SZi Yan struct address_space *mapping; 3084fbe37501SZi Yan int ret = -EINVAL; 3085fbe37501SZi Yan pgoff_t index; 3086fbe37501SZi Yan int nr_pages = 1; 3087fbe37501SZi Yan unsigned long total = 0, split = 0; 3088fbe37501SZi Yan 3089fbe37501SZi Yan file = getname_kernel(file_path); 3090fbe37501SZi Yan if (IS_ERR(file)) 3091fbe37501SZi Yan return ret; 3092fbe37501SZi Yan 3093fbe37501SZi Yan candidate = file_open_name(file, O_RDONLY, 0); 3094fbe37501SZi Yan if (IS_ERR(candidate)) 3095fbe37501SZi Yan goto out; 3096fbe37501SZi Yan 3097fbe37501SZi Yan pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 3098fbe37501SZi Yan file_path, off_start, off_end); 3099fbe37501SZi Yan 3100fbe37501SZi Yan mapping = candidate->f_mapping; 3101fbe37501SZi Yan 3102fbe37501SZi Yan for (index = off_start; index < off_end; index += nr_pages) { 31039ee2c086SMatthew Wilcox (Oracle) struct folio *folio = __filemap_get_folio(mapping, index, 31049ee2c086SMatthew Wilcox (Oracle) FGP_ENTRY, 0); 3105fbe37501SZi Yan 3106fbe37501SZi Yan nr_pages = 1; 31079ee2c086SMatthew Wilcox (Oracle) if (xa_is_value(folio) || !folio) 3108fbe37501SZi Yan continue; 3109fbe37501SZi Yan 31109ee2c086SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) 3111fbe37501SZi Yan goto next; 3112fbe37501SZi Yan 3113fbe37501SZi Yan total++; 31149ee2c086SMatthew Wilcox (Oracle) nr_pages = folio_nr_pages(folio); 3115fbe37501SZi Yan 31169ee2c086SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) 3117fbe37501SZi Yan goto next; 3118fbe37501SZi Yan 31199ee2c086SMatthew Wilcox (Oracle) if (!split_folio(folio)) 3120fbe37501SZi Yan split++; 3121fbe37501SZi Yan 31229ee2c086SMatthew Wilcox (Oracle) folio_unlock(folio); 3123fbe37501SZi Yan next: 31249ee2c086SMatthew Wilcox (Oracle) folio_put(folio); 3125fbe37501SZi Yan cond_resched(); 3126fbe37501SZi Yan } 3127fbe37501SZi Yan 3128fbe37501SZi Yan filp_close(candidate, NULL); 3129fbe37501SZi Yan ret = 0; 3130fbe37501SZi Yan 3131fbe37501SZi Yan pr_debug("%lu of %lu file-backed THP split\n", split, total); 3132fbe37501SZi Yan out: 3133fbe37501SZi Yan putname(file); 3134fbe37501SZi Yan return ret; 3135fbe37501SZi Yan } 3136fbe37501SZi Yan 3137fa6c0231SZi Yan #define MAX_INPUT_BUF_SZ 255 3138fa6c0231SZi Yan 3139fa6c0231SZi Yan static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 3140fa6c0231SZi Yan size_t count, loff_t *ppops) 3141fa6c0231SZi Yan { 3142fa6c0231SZi Yan static DEFINE_MUTEX(split_debug_mutex); 3143fa6c0231SZi Yan ssize_t ret; 3144fbe37501SZi Yan /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ 3145fbe37501SZi Yan char input_buf[MAX_INPUT_BUF_SZ]; 3146fa6c0231SZi Yan int pid; 3147fa6c0231SZi Yan unsigned long vaddr_start, vaddr_end; 3148fa6c0231SZi Yan 3149fa6c0231SZi Yan ret = mutex_lock_interruptible(&split_debug_mutex); 3150fa6c0231SZi Yan if (ret) 3151fa6c0231SZi Yan return ret; 3152fa6c0231SZi Yan 3153fa6c0231SZi Yan ret = -EFAULT; 3154fa6c0231SZi Yan 3155fa6c0231SZi Yan memset(input_buf, 0, MAX_INPUT_BUF_SZ); 3156fa6c0231SZi Yan if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 3157fa6c0231SZi Yan goto out; 3158fa6c0231SZi Yan 3159fa6c0231SZi Yan input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 3160fbe37501SZi Yan 3161fbe37501SZi Yan if (input_buf[0] == '/') { 3162fbe37501SZi Yan char *tok; 3163fbe37501SZi Yan char *buf = input_buf; 3164fbe37501SZi Yan char file_path[MAX_INPUT_BUF_SZ]; 3165fbe37501SZi Yan pgoff_t off_start = 0, off_end = 0; 3166fbe37501SZi Yan size_t input_len = strlen(input_buf); 3167fbe37501SZi Yan 3168fbe37501SZi Yan tok = strsep(&buf, ","); 3169fbe37501SZi Yan if (tok) { 31701212e00cSMatthew Wilcox (Oracle) strcpy(file_path, tok); 3171fbe37501SZi Yan } else { 3172fbe37501SZi Yan ret = -EINVAL; 3173fbe37501SZi Yan goto out; 3174fbe37501SZi Yan } 3175fbe37501SZi Yan 3176fbe37501SZi Yan ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); 3177fbe37501SZi Yan if (ret != 2) { 3178fbe37501SZi Yan ret = -EINVAL; 3179fbe37501SZi Yan goto out; 3180fbe37501SZi Yan } 3181fbe37501SZi Yan ret = split_huge_pages_in_file(file_path, off_start, off_end); 3182fbe37501SZi Yan if (!ret) 3183fbe37501SZi Yan ret = input_len; 3184fbe37501SZi Yan 3185fbe37501SZi Yan goto out; 3186fbe37501SZi Yan } 3187fbe37501SZi Yan 3188fa6c0231SZi Yan ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); 3189fa6c0231SZi Yan if (ret == 1 && pid == 1) { 3190fa6c0231SZi Yan split_huge_pages_all(); 3191fa6c0231SZi Yan ret = strlen(input_buf); 3192fa6c0231SZi Yan goto out; 3193fa6c0231SZi Yan } else if (ret != 3) { 3194fa6c0231SZi Yan ret = -EINVAL; 3195fa6c0231SZi Yan goto out; 3196fa6c0231SZi Yan } 3197fa6c0231SZi Yan 3198fa6c0231SZi Yan ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); 3199fa6c0231SZi Yan if (!ret) 3200fa6c0231SZi Yan ret = strlen(input_buf); 3201fa6c0231SZi Yan out: 3202fa6c0231SZi Yan mutex_unlock(&split_debug_mutex); 3203fa6c0231SZi Yan return ret; 3204fa6c0231SZi Yan 3205fa6c0231SZi Yan } 3206fa6c0231SZi Yan 3207fa6c0231SZi Yan static const struct file_operations split_huge_pages_fops = { 3208fa6c0231SZi Yan .owner = THIS_MODULE, 3209fa6c0231SZi Yan .write = split_huge_pages_write, 3210fa6c0231SZi Yan .llseek = no_llseek, 3211fa6c0231SZi Yan }; 321249071d43SKirill A. Shutemov 321349071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 321449071d43SKirill A. Shutemov { 3215d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 321649071d43SKirill A. Shutemov &split_huge_pages_fops); 321749071d43SKirill A. Shutemov return 0; 321849071d43SKirill A. Shutemov } 321949071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 322049071d43SKirill A. Shutemov #endif 3221616b8371SZi Yan 3222616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 32237f5abe60SDavid Hildenbrand int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3224616b8371SZi Yan struct page *page) 3225616b8371SZi Yan { 3226616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3227616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3228616b8371SZi Yan unsigned long address = pvmw->address; 32296c287605SDavid Hildenbrand bool anon_exclusive; 3230616b8371SZi Yan pmd_t pmdval; 3231616b8371SZi Yan swp_entry_t entry; 3232ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 3233616b8371SZi Yan 3234616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 32357f5abe60SDavid Hildenbrand return 0; 3236616b8371SZi Yan 3237616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 32388a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 32396c287605SDavid Hildenbrand 3240088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): invalidate PMD first. */ 32416c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 32426c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 32436c287605SDavid Hildenbrand set_pmd_at(mm, address, pvmw->pmd, pmdval); 32447f5abe60SDavid Hildenbrand return -EBUSY; 32456c287605SDavid Hildenbrand } 32466c287605SDavid Hildenbrand 3247616b8371SZi Yan if (pmd_dirty(pmdval)) 3248616b8371SZi Yan set_page_dirty(page); 32494dd845b5SAlistair Popple if (pmd_write(pmdval)) 32504dd845b5SAlistair Popple entry = make_writable_migration_entry(page_to_pfn(page)); 32516c287605SDavid Hildenbrand else if (anon_exclusive) 32526c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 32534dd845b5SAlistair Popple else 32544dd845b5SAlistair Popple entry = make_readable_migration_entry(page_to_pfn(page)); 32552e346877SPeter Xu if (pmd_young(pmdval)) 32562e346877SPeter Xu entry = make_migration_entry_young(entry); 32572e346877SPeter Xu if (pmd_dirty(pmdval)) 32582e346877SPeter Xu entry = make_migration_entry_dirty(entry); 3259ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 3260ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 3261ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 3262*24bf08c4SDavid Hildenbrand if (pmd_uffd_wp(pmdval)) 3263*24bf08c4SDavid Hildenbrand pmdswp = pmd_swp_mkuffd_wp(pmdswp); 3264ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3265cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 3266616b8371SZi Yan put_page(page); 3267283fd6feSAnshuman Khandual trace_set_migration_pmd(address, pmd_val(pmdswp)); 32687f5abe60SDavid Hildenbrand 32697f5abe60SDavid Hildenbrand return 0; 3270616b8371SZi Yan } 3271616b8371SZi Yan 3272616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3273616b8371SZi Yan { 3274616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3275616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3276616b8371SZi Yan unsigned long address = pvmw->address; 32774fba8f2aSMiaohe Lin unsigned long haddr = address & HPAGE_PMD_MASK; 3278616b8371SZi Yan pmd_t pmde; 3279616b8371SZi Yan swp_entry_t entry; 3280616b8371SZi Yan 3281616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3282616b8371SZi Yan return; 3283616b8371SZi Yan 3284616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 3285616b8371SZi Yan get_page(new); 32862e346877SPeter Xu pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); 3287ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 3288ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 32898f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pvmw->pmd)) 3290f1eb1bacSPeter Xu pmde = pmd_mkuffd_wp(pmde); 32912e346877SPeter Xu if (!is_migration_entry_young(entry)) 32922e346877SPeter Xu pmde = pmd_mkold(pmde); 32932e346877SPeter Xu /* NOTE: this may contain setting soft-dirty on some archs */ 32942e346877SPeter Xu if (PageDirty(new) && is_migration_entry_dirty(entry)) 32952e346877SPeter Xu pmde = pmd_mkdirty(pmde); 329696a9c287SPeter Xu if (is_writable_migration_entry(entry)) 329796a9c287SPeter Xu pmde = maybe_pmd_mkwrite(pmde, vma); 329896a9c287SPeter Xu else 329996a9c287SPeter Xu pmde = pmd_wrprotect(pmde); 3300616b8371SZi Yan 33016c287605SDavid Hildenbrand if (PageAnon(new)) { 33026c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_COMPOUND; 33036c287605SDavid Hildenbrand 33046c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) 33056c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 33066c287605SDavid Hildenbrand 33074fba8f2aSMiaohe Lin page_add_anon_rmap(new, vma, haddr, rmap_flags); 33086c287605SDavid Hildenbrand } else { 3309cea86fe2SHugh Dickins page_add_file_rmap(new, vma, true); 33106c287605SDavid Hildenbrand } 33116c287605SDavid Hildenbrand VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); 33124fba8f2aSMiaohe Lin set_pmd_at(mm, haddr, pvmw->pmd, pmde); 33135cbcf225SMuchun Song 33145cbcf225SMuchun Song /* No need to invalidate - it was non-present before */ 3315616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 3316283fd6feSAnshuman Khandual trace_remove_migration_pmd(address, pmd_val(pmde)); 3317616b8371SZi Yan } 3318616b8371SZi Yan #endif 3319