120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 271e3aac0SAndrea Arcangeli /* 371e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 471e3aac0SAndrea Arcangeli */ 571e3aac0SAndrea Arcangeli 6ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7ae3a8c1cSAndrew Morton 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 10fa6c0231SZi Yan #include <linux/sched/mm.h> 11f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 126a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 1371e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1471e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1571e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1671e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1771e3aac0SAndrea Arcangeli #include <linux/swap.h> 1897ae1749SKirill A. Shutemov #include <linux/shrinker.h> 19ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 20e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 21fb5c2029SMatthew Wilcox (Oracle) #include <linux/backing-dev.h> 224897c765SMatthew Wilcox #include <linux/dax.h> 23ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 24878aee7dSAndrea Arcangeli #include <linux/freezer.h> 25f25748e3SDan Williams #include <linux/pfn_t.h> 26a664b2d8SAndrea Arcangeli #include <linux/mman.h> 273565fce3SDan Williams #include <linux/memremap.h> 28325adeb5SRalf Baechle #include <linux/pagemap.h> 2949071d43SKirill A. Shutemov #include <linux/debugfs.h> 304daae3b4SMel Gorman #include <linux/migrate.h> 3143b5fbbdSSasha Levin #include <linux/hashtable.h> 326b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3333c3fc71SVladimir Davydov #include <linux/page_idle.h> 34baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 356b31d595SMichal Hocko #include <linux/oom.h> 3698fa15f3SAnshuman Khandual #include <linux/numa.h> 37f7da677bSVlastimil Babka #include <linux/page_owner.h> 38a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 3997ae1749SKirill A. Shutemov 4071e3aac0SAndrea Arcangeli #include <asm/tlb.h> 4171e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 4271e3aac0SAndrea Arcangeli #include "internal.h" 43014bb1deSNeilBrown #include "swap.h" 4471e3aac0SAndrea Arcangeli 45283fd6feSAnshuman Khandual #define CREATE_TRACE_POINTS 46283fd6feSAnshuman Khandual #include <trace/events/thp.h> 47283fd6feSAnshuman Khandual 48ba76149fSAndrea Arcangeli /* 49b14d595aSMichael DeGuzis * By default, transparent hugepage support is disabled in order to avoid 50b14d595aSMichael DeGuzis * risking an increased memory footprint for applications that are not 51b14d595aSMichael DeGuzis * guaranteed to benefit from it. When transparent hugepage support is 52b14d595aSMichael DeGuzis * enabled, it is for all mappings, and khugepaged scans all mappings. 538bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 548bfa3f9aSJianguo Wu * for all hugepage allocations. 55ba76149fSAndrea Arcangeli */ 5671e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 5713ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 58ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 5913ece886SAndrea Arcangeli #endif 6013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 6113ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 6213ece886SAndrea Arcangeli #endif 63444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 6479da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 6579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 66ba76149fSAndrea Arcangeli 679a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 68f000565aSAndrea Arcangeli 6997ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 7056873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 713b77e8c8SHugh Dickins unsigned long huge_zero_pfn __read_mostly = ~0UL; 724a6c1297SKirill A. Shutemov 73a7f4e6e4SZach O'Keefe bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, 74a7f4e6e4SZach O'Keefe bool smaps, bool in_pf, bool enforce_sysfs) 757635d9cbSMichal Hocko { 769fec5168SYang Shi if (!vma->vm_mm) /* vdso */ 77c0630669SYang Shi return false; 789fec5168SYang Shi 797da4e2cbSYang Shi /* 807da4e2cbSYang Shi * Explicitly disabled through madvise or prctl, or some 817da4e2cbSYang Shi * architectures may disable THP for some mappings, for 827da4e2cbSYang Shi * example, s390 kvm. 837da4e2cbSYang Shi * */ 847da4e2cbSYang Shi if ((vm_flags & VM_NOHUGEPAGE) || 857da4e2cbSYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 867da4e2cbSYang Shi return false; 877da4e2cbSYang Shi /* 887da4e2cbSYang Shi * If the hardware/firmware marked hugepage support disabled. 897da4e2cbSYang Shi */ 907da4e2cbSYang Shi if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) 919fec5168SYang Shi return false; 929fec5168SYang Shi 937da4e2cbSYang Shi /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 947da4e2cbSYang Shi if (vma_is_dax(vma)) 957da4e2cbSYang Shi return in_pf; 967da4e2cbSYang Shi 977da4e2cbSYang Shi /* 987da4e2cbSYang Shi * Special VMA and hugetlb VMA. 997da4e2cbSYang Shi * Must be checked after dax since some dax mappings may have 1007da4e2cbSYang Shi * VM_MIXEDMAP set. 1017da4e2cbSYang Shi */ 1029fec5168SYang Shi if (vm_flags & VM_NO_KHUGEPAGED) 1039fec5168SYang Shi return false; 1049fec5168SYang Shi 1057da4e2cbSYang Shi /* 1067da4e2cbSYang Shi * Check alignment for file vma and size for both file and anon vma. 1077da4e2cbSYang Shi * 1087da4e2cbSYang Shi * Skip the check for page fault. Huge fault does the check in fault 1097da4e2cbSYang Shi * handlers. And this check is not suitable for huge PUD fault. 1107da4e2cbSYang Shi */ 1117da4e2cbSYang Shi if (!in_pf && 1127da4e2cbSYang Shi !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) 1139fec5168SYang Shi return false; 1149fec5168SYang Shi 1157da4e2cbSYang Shi /* 1167da4e2cbSYang Shi * Enabled via shmem mount options or sysfs settings. 1177da4e2cbSYang Shi * Must be done before hugepage flags check since shmem has its 1187da4e2cbSYang Shi * own flags. 1197da4e2cbSYang Shi */ 1207da4e2cbSYang Shi if (!in_pf && shmem_file(vma->vm_file)) 121c0630669SYang Shi return shmem_huge_enabled(vma); 1229fec5168SYang Shi 123a7f4e6e4SZach O'Keefe /* Enforce sysfs THP requirements as necessary */ 124a7f4e6e4SZach O'Keefe if (enforce_sysfs && 125a7f4e6e4SZach O'Keefe (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && 126a7f4e6e4SZach O'Keefe !hugepage_flags_always()))) 1279fec5168SYang Shi return false; 1289fec5168SYang Shi 1299fec5168SYang Shi /* Only regular file is valid */ 1307da4e2cbSYang Shi if (!in_pf && file_thp_enabled(vma)) 13178d12c19SYang Shi return true; 1327635d9cbSMichal Hocko 1339fec5168SYang Shi if (!vma_is_anonymous(vma)) 1347635d9cbSMichal Hocko return false; 1359fec5168SYang Shi 1369fec5168SYang Shi if (vma_is_temporary_stack(vma)) 1379fec5168SYang Shi return false; 1389fec5168SYang Shi 1399fec5168SYang Shi /* 1409fec5168SYang Shi * THPeligible bit of smaps should show 1 for proper VMAs even 1419fec5168SYang Shi * though anon_vma is not initialized yet. 1427da4e2cbSYang Shi * 1437da4e2cbSYang Shi * Allow page fault since anon_vma may be not initialized until 1447da4e2cbSYang Shi * the first page fault. 1459fec5168SYang Shi */ 1469fec5168SYang Shi if (!vma->anon_vma) 1477da4e2cbSYang Shi return (smaps || in_pf); 1489fec5168SYang Shi 1499fec5168SYang Shi return true; 1507635d9cbSMichal Hocko } 1517635d9cbSMichal Hocko 152aaa9705bSMiaohe Lin static bool get_huge_zero_page(void) 15397ae1749SKirill A. Shutemov { 15497ae1749SKirill A. Shutemov struct page *zero_page; 15597ae1749SKirill A. Shutemov retry: 15697ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 157aaa9705bSMiaohe Lin return true; 15897ae1749SKirill A. Shutemov 15997ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 16097ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 161d8a8e1f0SKirill A. Shutemov if (!zero_page) { 162d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 163aaa9705bSMiaohe Lin return false; 164d8a8e1f0SKirill A. Shutemov } 165d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 16697ae1749SKirill A. Shutemov preempt_disable(); 1675918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 16897ae1749SKirill A. Shutemov preempt_enable(); 1695ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 17097ae1749SKirill A. Shutemov goto retry; 17197ae1749SKirill A. Shutemov } 1723b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); 17397ae1749SKirill A. Shutemov 17497ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 17597ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 17697ae1749SKirill A. Shutemov preempt_enable(); 177aaa9705bSMiaohe Lin return true; 17897ae1749SKirill A. Shutemov } 17997ae1749SKirill A. Shutemov 1806fcb52a5SAaron Lu static void put_huge_zero_page(void) 18197ae1749SKirill A. Shutemov { 18297ae1749SKirill A. Shutemov /* 18397ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 18497ae1749SKirill A. Shutemov * last reference. 18597ae1749SKirill A. Shutemov */ 18697ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 18797ae1749SKirill A. Shutemov } 18897ae1749SKirill A. Shutemov 1896fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 1906fcb52a5SAaron Lu { 1916fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1926fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1936fcb52a5SAaron Lu 1946fcb52a5SAaron Lu if (!get_huge_zero_page()) 1956fcb52a5SAaron Lu return NULL; 1966fcb52a5SAaron Lu 1976fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1986fcb52a5SAaron Lu put_huge_zero_page(); 1996fcb52a5SAaron Lu 2006fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 2016fcb52a5SAaron Lu } 2026fcb52a5SAaron Lu 2036fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 2046fcb52a5SAaron Lu { 2056fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 2066fcb52a5SAaron Lu put_huge_zero_page(); 2076fcb52a5SAaron Lu } 2086fcb52a5SAaron Lu 20948896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 21097ae1749SKirill A. Shutemov struct shrink_control *sc) 21197ae1749SKirill A. Shutemov { 21297ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 21397ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 21448896466SGlauber Costa } 21597ae1749SKirill A. Shutemov 21648896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 21748896466SGlauber Costa struct shrink_control *sc) 21848896466SGlauber Costa { 21997ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2205918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2215918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2223b77e8c8SHugh Dickins WRITE_ONCE(huge_zero_pfn, ~0UL); 2235ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 22448896466SGlauber Costa return HPAGE_PMD_NR; 22597ae1749SKirill A. Shutemov } 22697ae1749SKirill A. Shutemov 22797ae1749SKirill A. Shutemov return 0; 22897ae1749SKirill A. Shutemov } 22997ae1749SKirill A. Shutemov 23097ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23148896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 23248896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 23397ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23497ae1749SKirill A. Shutemov }; 23597ae1749SKirill A. Shutemov 23671e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 23771e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 23871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 23971e3aac0SAndrea Arcangeli { 240bfb0ffebSJoe Perches const char *output; 241bfb0ffebSJoe Perches 242444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 243bfb0ffebSJoe Perches output = "[always] madvise never"; 244bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 245bfb0ffebSJoe Perches &transparent_hugepage_flags)) 246bfb0ffebSJoe Perches output = "always [madvise] never"; 247444eb2a4SMel Gorman else 248bfb0ffebSJoe Perches output = "always madvise [never]"; 249bfb0ffebSJoe Perches 250bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 25171e3aac0SAndrea Arcangeli } 252444eb2a4SMel Gorman 25371e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 25471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25571e3aac0SAndrea Arcangeli const char *buf, size_t count) 25671e3aac0SAndrea Arcangeli { 25721440d7eSDavid Rientjes ssize_t ret = count; 258ba76149fSAndrea Arcangeli 259f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 26021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26121440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 262f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 26321440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26421440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 265f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 26621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 26721440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 26821440d7eSDavid Rientjes } else 26921440d7eSDavid Rientjes ret = -EINVAL; 270ba76149fSAndrea Arcangeli 271ba76149fSAndrea Arcangeli if (ret > 0) { 272b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 273ba76149fSAndrea Arcangeli if (err) 274ba76149fSAndrea Arcangeli ret = err; 275ba76149fSAndrea Arcangeli } 276ba76149fSAndrea Arcangeli return ret; 27771e3aac0SAndrea Arcangeli } 27837139bb0SMiaohe Lin 27937139bb0SMiaohe Lin static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 28071e3aac0SAndrea Arcangeli 281b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 28271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 28371e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 28471e3aac0SAndrea Arcangeli { 285bfb0ffebSJoe Perches return sysfs_emit(buf, "%d\n", 286e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 28771e3aac0SAndrea Arcangeli } 288e27e6151SBen Hutchings 289b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 29071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 29171e3aac0SAndrea Arcangeli const char *buf, size_t count, 29271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 29371e3aac0SAndrea Arcangeli { 294e27e6151SBen Hutchings unsigned long value; 295e27e6151SBen Hutchings int ret; 296e27e6151SBen Hutchings 297e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 298e27e6151SBen Hutchings if (ret < 0) 299e27e6151SBen Hutchings return ret; 300e27e6151SBen Hutchings if (value > 1) 30171e3aac0SAndrea Arcangeli return -EINVAL; 30271e3aac0SAndrea Arcangeli 303e27e6151SBen Hutchings if (value) 304e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 305e27e6151SBen Hutchings else 306e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 307e27e6151SBen Hutchings 30871e3aac0SAndrea Arcangeli return count; 30971e3aac0SAndrea Arcangeli } 31071e3aac0SAndrea Arcangeli 31171e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 31271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31371e3aac0SAndrea Arcangeli { 314bfb0ffebSJoe Perches const char *output; 315bfb0ffebSJoe Perches 316bfb0ffebSJoe Perches if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 317bfb0ffebSJoe Perches &transparent_hugepage_flags)) 318bfb0ffebSJoe Perches output = "[always] defer defer+madvise madvise never"; 319bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 320bfb0ffebSJoe Perches &transparent_hugepage_flags)) 321bfb0ffebSJoe Perches output = "always [defer] defer+madvise madvise never"; 322bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 323bfb0ffebSJoe Perches &transparent_hugepage_flags)) 324bfb0ffebSJoe Perches output = "always defer [defer+madvise] madvise never"; 325bfb0ffebSJoe Perches else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 326bfb0ffebSJoe Perches &transparent_hugepage_flags)) 327bfb0ffebSJoe Perches output = "always defer defer+madvise [madvise] never"; 328bfb0ffebSJoe Perches else 329bfb0ffebSJoe Perches output = "always defer defer+madvise madvise [never]"; 330bfb0ffebSJoe Perches 331bfb0ffebSJoe Perches return sysfs_emit(buf, "%s\n", output); 33271e3aac0SAndrea Arcangeli } 33321440d7eSDavid Rientjes 33471e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 33571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 33671e3aac0SAndrea Arcangeli const char *buf, size_t count) 33771e3aac0SAndrea Arcangeli { 338f42f2552SDavid Rientjes if (sysfs_streq(buf, "always")) { 33921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 34121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34221440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 343f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer+madvise")) { 34421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 34521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 34621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 34721440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 348f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "defer")) { 3494fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 3504fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 3514fad7fb6SDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 3524fad7fb6SDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 353f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "madvise")) { 35421440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 35521440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 35621440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 35721440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 358f42f2552SDavid Rientjes } else if (sysfs_streq(buf, "never")) { 35921440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 36021440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 36121440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 36221440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 36321440d7eSDavid Rientjes } else 36421440d7eSDavid Rientjes return -EINVAL; 36521440d7eSDavid Rientjes 36621440d7eSDavid Rientjes return count; 36771e3aac0SAndrea Arcangeli } 36837139bb0SMiaohe Lin static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 36971e3aac0SAndrea Arcangeli 37079da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 37179da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37279da5407SKirill A. Shutemov { 373b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 37479da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37579da5407SKirill A. Shutemov } 37679da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 37779da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 37879da5407SKirill A. Shutemov { 379b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 38079da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 38179da5407SKirill A. Shutemov } 38237139bb0SMiaohe Lin static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 38349920d28SHugh Dickins 38449920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 38549920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 38649920d28SHugh Dickins { 387ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 38849920d28SHugh Dickins } 38949920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 39049920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 39149920d28SHugh Dickins 39271e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39371e3aac0SAndrea Arcangeli &enabled_attr.attr, 39471e3aac0SAndrea Arcangeli &defrag_attr.attr, 39579da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39649920d28SHugh Dickins &hpage_pmd_size_attr.attr, 397396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 3985a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 3995a6e75f8SKirill A. Shutemov #endif 40071e3aac0SAndrea Arcangeli NULL, 40171e3aac0SAndrea Arcangeli }; 40271e3aac0SAndrea Arcangeli 4038aa95a21SArvind Yadav static const struct attribute_group hugepage_attr_group = { 40471e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 405ba76149fSAndrea Arcangeli }; 406ba76149fSAndrea Arcangeli 407569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 408569e5590SShaohua Li { 409569e5590SShaohua Li int err; 410569e5590SShaohua Li 411569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 412569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 413ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 414569e5590SShaohua Li return -ENOMEM; 415569e5590SShaohua Li } 416569e5590SShaohua Li 417569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 418569e5590SShaohua Li if (err) { 419ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 420569e5590SShaohua Li goto delete_obj; 421569e5590SShaohua Li } 422569e5590SShaohua Li 423569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 424569e5590SShaohua Li if (err) { 425ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 426569e5590SShaohua Li goto remove_hp_group; 427569e5590SShaohua Li } 428569e5590SShaohua Li 429569e5590SShaohua Li return 0; 430569e5590SShaohua Li 431569e5590SShaohua Li remove_hp_group: 432569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 433569e5590SShaohua Li delete_obj: 434569e5590SShaohua Li kobject_put(*hugepage_kobj); 435569e5590SShaohua Li return err; 436569e5590SShaohua Li } 437569e5590SShaohua Li 438569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 439569e5590SShaohua Li { 440569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 441569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 442569e5590SShaohua Li kobject_put(hugepage_kobj); 443569e5590SShaohua Li } 444569e5590SShaohua Li #else 445569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 446569e5590SShaohua Li { 447569e5590SShaohua Li return 0; 448569e5590SShaohua Li } 449569e5590SShaohua Li 450569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 451569e5590SShaohua Li { 452569e5590SShaohua Li } 45371e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 45471e3aac0SAndrea Arcangeli 45571e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 45671e3aac0SAndrea Arcangeli { 45771e3aac0SAndrea Arcangeli int err; 458569e5590SShaohua Li struct kobject *hugepage_kobj; 45971e3aac0SAndrea Arcangeli 4604b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 461bae84953SAneesh Kumar K.V /* 462bae84953SAneesh Kumar K.V * Hardware doesn't support hugepages, hence disable 463bae84953SAneesh Kumar K.V * DAX PMD support. 464bae84953SAneesh Kumar K.V */ 465bae84953SAneesh Kumar K.V transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX; 466569e5590SShaohua Li return -EINVAL; 4674b7167b9SAndrea Arcangeli } 4684b7167b9SAndrea Arcangeli 469ff20c2e0SKirill A. Shutemov /* 470ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 471ff20c2e0SKirill A. Shutemov */ 472ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 473ff20c2e0SKirill A. Shutemov /* 474ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 475ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 476ff20c2e0SKirill A. Shutemov */ 477ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 478ff20c2e0SKirill A. Shutemov 479569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 480569e5590SShaohua Li if (err) 48165ebb64fSKirill A. Shutemov goto err_sysfs; 482ba76149fSAndrea Arcangeli 483b46e756fSKirill A. Shutemov err = khugepaged_init(); 484ba76149fSAndrea Arcangeli if (err) 48565ebb64fSKirill A. Shutemov goto err_slab; 486ba76149fSAndrea Arcangeli 487e33c267aSRoman Gushchin err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); 48865ebb64fSKirill A. Shutemov if (err) 48965ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 490e33c267aSRoman Gushchin err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); 4919a982250SKirill A. Shutemov if (err) 4929a982250SKirill A. Shutemov goto err_split_shrinker; 49397ae1749SKirill A. Shutemov 49497562cd2SRik van Riel /* 49597562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 49697562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 49797562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 49897562cd2SRik van Riel */ 499ca79b0c2SArun KS if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 50097562cd2SRik van Riel transparent_hugepage_flags = 0; 50179553da2SKirill A. Shutemov return 0; 50279553da2SKirill A. Shutemov } 50397562cd2SRik van Riel 50479553da2SKirill A. Shutemov err = start_stop_khugepaged(); 50565ebb64fSKirill A. Shutemov if (err) 50665ebb64fSKirill A. Shutemov goto err_khugepaged; 507ba76149fSAndrea Arcangeli 508569e5590SShaohua Li return 0; 50965ebb64fSKirill A. Shutemov err_khugepaged: 5109a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 5119a982250SKirill A. Shutemov err_split_shrinker: 51265ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 51365ebb64fSKirill A. Shutemov err_hzp_shrinker: 514b46e756fSKirill A. Shutemov khugepaged_destroy(); 51565ebb64fSKirill A. Shutemov err_slab: 516569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 51765ebb64fSKirill A. Shutemov err_sysfs: 518ba76149fSAndrea Arcangeli return err; 51971e3aac0SAndrea Arcangeli } 520a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 52171e3aac0SAndrea Arcangeli 52271e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 52371e3aac0SAndrea Arcangeli { 52471e3aac0SAndrea Arcangeli int ret = 0; 52571e3aac0SAndrea Arcangeli if (!str) 52671e3aac0SAndrea Arcangeli goto out; 52771e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 52871e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 52971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53271e3aac0SAndrea Arcangeli ret = 1; 53371e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 53471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 53571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53671e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 53771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 53871e3aac0SAndrea Arcangeli ret = 1; 53971e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 54071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 54171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 54371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 54471e3aac0SAndrea Arcangeli ret = 1; 54571e3aac0SAndrea Arcangeli } 54671e3aac0SAndrea Arcangeli out: 54771e3aac0SAndrea Arcangeli if (!ret) 548ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 54971e3aac0SAndrea Arcangeli return ret; 55071e3aac0SAndrea Arcangeli } 55171e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 55271e3aac0SAndrea Arcangeli 553f55e1014SLinus Torvalds pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 55471e3aac0SAndrea Arcangeli { 555f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 55671e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 55771e3aac0SAndrea Arcangeli return pmd; 55871e3aac0SAndrea Arcangeli } 55971e3aac0SAndrea Arcangeli 56087eaceb3SYang Shi #ifdef CONFIG_MEMCG 56187eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 5629a982250SKirill A. Shutemov { 563bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 56487eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 56587eaceb3SYang Shi 56687eaceb3SYang Shi if (memcg) 56787eaceb3SYang Shi return &memcg->deferred_split_queue; 56887eaceb3SYang Shi else 56987eaceb3SYang Shi return &pgdat->deferred_split_queue; 5709a982250SKirill A. Shutemov } 57187eaceb3SYang Shi #else 57287eaceb3SYang Shi static inline struct deferred_split *get_deferred_split_queue(struct page *page) 57387eaceb3SYang Shi { 57487eaceb3SYang Shi struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 57587eaceb3SYang Shi 57687eaceb3SYang Shi return &pgdat->deferred_split_queue; 57787eaceb3SYang Shi } 57887eaceb3SYang Shi #endif 5799a982250SKirill A. Shutemov 5809a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 5819a982250SKirill A. Shutemov { 5829a982250SKirill A. Shutemov /* 583d764afedSMiaohe Lin * we use page->mapping and page->index in second tail page 5849a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 5859a982250SKirill A. Shutemov */ 5869a982250SKirill A. Shutemov 5879a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 5889a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 5899a982250SKirill A. Shutemov } 5909a982250SKirill A. Shutemov 591562beb72SMiaohe Lin static inline bool is_transparent_hugepage(struct page *page) 592005ba37cSSean Christopherson { 593005ba37cSSean Christopherson if (!PageCompound(page)) 594fa1f68ccSZou Wei return false; 595005ba37cSSean Christopherson 596005ba37cSSean Christopherson page = compound_head(page); 597005ba37cSSean Christopherson return is_huge_zero_page(page) || 598005ba37cSSean Christopherson page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; 599005ba37cSSean Christopherson } 600005ba37cSSean Christopherson 60197d3d0f9SKirill A. Shutemov static unsigned long __thp_get_unmapped_area(struct file *filp, 60297d3d0f9SKirill A. Shutemov unsigned long addr, unsigned long len, 60374d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 60474d2fad1SToshi Kani { 60574d2fad1SToshi Kani loff_t off_end = off + len; 60674d2fad1SToshi Kani loff_t off_align = round_up(off, size); 60797d3d0f9SKirill A. Shutemov unsigned long len_pad, ret; 60874d2fad1SToshi Kani 60974d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 61074d2fad1SToshi Kani return 0; 61174d2fad1SToshi Kani 61274d2fad1SToshi Kani len_pad = len + size; 61374d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 61474d2fad1SToshi Kani return 0; 61574d2fad1SToshi Kani 61697d3d0f9SKirill A. Shutemov ret = current->mm->get_unmapped_area(filp, addr, len_pad, 61774d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 61897d3d0f9SKirill A. Shutemov 61997d3d0f9SKirill A. Shutemov /* 62097d3d0f9SKirill A. Shutemov * The failure might be due to length padding. The caller will retry 62197d3d0f9SKirill A. Shutemov * without the padding. 62297d3d0f9SKirill A. Shutemov */ 62397d3d0f9SKirill A. Shutemov if (IS_ERR_VALUE(ret)) 62474d2fad1SToshi Kani return 0; 62574d2fad1SToshi Kani 62697d3d0f9SKirill A. Shutemov /* 62797d3d0f9SKirill A. Shutemov * Do not try to align to THP boundary if allocation at the address 62897d3d0f9SKirill A. Shutemov * hint succeeds. 62997d3d0f9SKirill A. Shutemov */ 63097d3d0f9SKirill A. Shutemov if (ret == addr) 63174d2fad1SToshi Kani return addr; 63297d3d0f9SKirill A. Shutemov 63397d3d0f9SKirill A. Shutemov ret += (off - ret) & (size - 1); 63497d3d0f9SKirill A. Shutemov return ret; 63574d2fad1SToshi Kani } 63674d2fad1SToshi Kani 63774d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 63874d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 63974d2fad1SToshi Kani { 64097d3d0f9SKirill A. Shutemov unsigned long ret; 64174d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 64274d2fad1SToshi Kani 64397d3d0f9SKirill A. Shutemov ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 64497d3d0f9SKirill A. Shutemov if (ret) 64597d3d0f9SKirill A. Shutemov return ret; 6461854bc6eSWilliam Kucharski 64774d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 64874d2fad1SToshi Kani } 64974d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 65074d2fad1SToshi Kani 6512b740303SSouptick Joarder static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 6522b740303SSouptick Joarder struct page *page, gfp_t gfp) 65371e3aac0SAndrea Arcangeli { 65482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 65571e3aac0SAndrea Arcangeli pgtable_t pgtable; 65682b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 6572b740303SSouptick Joarder vm_fault_t ret = 0; 65871e3aac0SAndrea Arcangeli 659309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 66000501b53SJohannes Weiner 6618f425e4eSMatthew Wilcox (Oracle) if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { 6626b251fc9SAndrea Arcangeli put_page(page); 6636b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 66485b9f46eSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK_CHARGE); 6656b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 6666b251fc9SAndrea Arcangeli } 6679d82c694SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 66871e3aac0SAndrea Arcangeli 6694cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 67000501b53SJohannes Weiner if (unlikely(!pgtable)) { 6716b31d595SMichal Hocko ret = VM_FAULT_OOM; 6726b31d595SMichal Hocko goto release; 67300501b53SJohannes Weiner } 67400501b53SJohannes Weiner 675c79b57e4SHuang Ying clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 67652f37629SMinchan Kim /* 67752f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 67852f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 67952f37629SMinchan Kim * write. 68052f37629SMinchan Kim */ 68171e3aac0SAndrea Arcangeli __SetPageUptodate(page); 68271e3aac0SAndrea Arcangeli 68382b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 68482b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 6856b31d595SMichal Hocko goto unlock_release; 68671e3aac0SAndrea Arcangeli } else { 68771e3aac0SAndrea Arcangeli pmd_t entry; 6886b251fc9SAndrea Arcangeli 6896b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 6906b31d595SMichal Hocko if (ret) 6916b31d595SMichal Hocko goto unlock_release; 6926b31d595SMichal Hocko 6936b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 6946b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 69582b0f8c3SJan Kara spin_unlock(vmf->ptl); 6966b251fc9SAndrea Arcangeli put_page(page); 697bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6988fd5eda4SMiaohe Lin ret = handle_userfault(vmf, VM_UFFD_MISSING); 6998fd5eda4SMiaohe Lin VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7008fd5eda4SMiaohe Lin return ret; 7016b251fc9SAndrea Arcangeli } 7026b251fc9SAndrea Arcangeli 7033122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 704f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 70540f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, vma, haddr); 706b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(page, vma); 70782b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 70882b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 709fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 710bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 711c4812909SKirill A. Shutemov mm_inc_nr_ptes(vma->vm_mm); 71282b0f8c3SJan Kara spin_unlock(vmf->ptl); 7136b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 7149d82c694SJohannes Weiner count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 71571e3aac0SAndrea Arcangeli } 71671e3aac0SAndrea Arcangeli 717aa2e878eSDavid Rientjes return 0; 7186b31d595SMichal Hocko unlock_release: 7196b31d595SMichal Hocko spin_unlock(vmf->ptl); 7206b31d595SMichal Hocko release: 7216b31d595SMichal Hocko if (pgtable) 7226b31d595SMichal Hocko pte_free(vma->vm_mm, pgtable); 7236b31d595SMichal Hocko put_page(page); 7246b31d595SMichal Hocko return ret; 7256b31d595SMichal Hocko 72671e3aac0SAndrea Arcangeli } 72771e3aac0SAndrea Arcangeli 728444eb2a4SMel Gorman /* 72921440d7eSDavid Rientjes * always: directly stall for all thp allocations 73021440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 73121440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 73221440d7eSDavid Rientjes * fail if not immediately available 73321440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 73421440d7eSDavid Rientjes * available 73521440d7eSDavid Rientjes * never: never stall for any thp allocation 736444eb2a4SMel Gorman */ 737164cc4feSRik van Riel gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 7380bbbc0b3SAndrea Arcangeli { 739164cc4feSRik van Riel const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 74089c83fb5SMichal Hocko 741ac79f78dSDavid Rientjes /* Always do synchronous compaction */ 74221440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 743a8282608SAndrea Arcangeli return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 744ac79f78dSDavid Rientjes 745ac79f78dSDavid Rientjes /* Kick kcompactd and fail quickly */ 74621440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 74719deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 748ac79f78dSDavid Rientjes 749ac79f78dSDavid Rientjes /* Synchronous compaction if madvised, otherwise kick kcompactd */ 75021440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 75119deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75219deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 753ac79f78dSDavid Rientjes __GFP_KSWAPD_RECLAIM); 754ac79f78dSDavid Rientjes 755ac79f78dSDavid Rientjes /* Only do synchronous compaction if madvised */ 75621440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 75719deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT | 75819deb769SDavid Rientjes (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 759ac79f78dSDavid Rientjes 76019deb769SDavid Rientjes return GFP_TRANSHUGE_LIGHT; 761444eb2a4SMel Gorman } 762444eb2a4SMel Gorman 763c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7642efeb8daSMiaohe Lin static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 76597ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7665918d10aSKirill A. Shutemov struct page *zero_page) 767fc9fe822SKirill A. Shutemov { 768fc9fe822SKirill A. Shutemov pmd_t entry; 7697c414164SAndrew Morton if (!pmd_none(*pmd)) 7702efeb8daSMiaohe Lin return; 7715918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 772fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 7736b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 774fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 775c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 776fc9fe822SKirill A. Shutemov } 777fc9fe822SKirill A. Shutemov 7782b740303SSouptick Joarder vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 77971e3aac0SAndrea Arcangeli { 78082b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 781077fcf11SAneesh Kumar K.V gfp_t gfp; 782cb196ee1SMatthew Wilcox (Oracle) struct folio *folio; 78382b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 78471e3aac0SAndrea Arcangeli 78543675e6fSYang Shi if (!transhuge_vma_suitable(vma, haddr)) 786c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 78771e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 78871e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 7894fa6893fSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 790d2081b2bSYang Shi 79182b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 792bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 79379da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 79480371957SKirill A. Shutemov pgtable_t pgtable; 7955918d10aSKirill A. Shutemov struct page *zero_page; 7962b740303SSouptick Joarder vm_fault_t ret; 7974cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 79880371957SKirill A. Shutemov if (unlikely(!pgtable)) 79980371957SKirill A. Shutemov return VM_FAULT_OOM; 8006fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 8015918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 802bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 80397ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 804c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 80597ae1749SKirill A. Shutemov } 80682b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 8076b251fc9SAndrea Arcangeli ret = 0; 80882b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 8096b31d595SMichal Hocko ret = check_stable_address_space(vma->vm_mm); 8106b31d595SMichal Hocko if (ret) { 8116b31d595SMichal Hocko spin_unlock(vmf->ptl); 812bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 8136b31d595SMichal Hocko } else if (userfaultfd_missing(vma)) { 81482b0f8c3SJan Kara spin_unlock(vmf->ptl); 815bfe8cc1dSGerald Schaefer pte_free(vma->vm_mm, pgtable); 81682b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 8176b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 8186b251fc9SAndrea Arcangeli } else { 819bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 82082b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 821fca40573SBibo Mao update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 82282b0f8c3SJan Kara spin_unlock(vmf->ptl); 8236b251fc9SAndrea Arcangeli } 824bfe8cc1dSGerald Schaefer } else { 82582b0f8c3SJan Kara spin_unlock(vmf->ptl); 826bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 827bfe8cc1dSGerald Schaefer } 8286b251fc9SAndrea Arcangeli return ret; 82980371957SKirill A. Shutemov } 830164cc4feSRik van Riel gfp = vma_thp_gfp_mask(vma); 831cb196ee1SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); 832cb196ee1SMatthew Wilcox (Oracle) if (unlikely(!folio)) { 83381ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 834c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83581ab4201SAndi Kleen } 836cb196ee1SMatthew Wilcox (Oracle) return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); 83771e3aac0SAndrea Arcangeli } 83871e3aac0SAndrea Arcangeli 839ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8403b6521f5SOliver O'Halloran pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 8413b6521f5SOliver O'Halloran pgtable_t pgtable) 8425cad465dSMatthew Wilcox { 8435cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 8445cad465dSMatthew Wilcox pmd_t entry; 8455cad465dSMatthew Wilcox spinlock_t *ptl; 8465cad465dSMatthew Wilcox 8475cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 848c6f3c5eeSAneesh Kumar K.V if (!pmd_none(*pmd)) { 849c6f3c5eeSAneesh Kumar K.V if (write) { 850c6f3c5eeSAneesh Kumar K.V if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 851c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 852c6f3c5eeSAneesh Kumar K.V goto out_unlock; 853c6f3c5eeSAneesh Kumar K.V } 854c6f3c5eeSAneesh Kumar K.V entry = pmd_mkyoung(*pmd); 855c6f3c5eeSAneesh Kumar K.V entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 856c6f3c5eeSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 857c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 858c6f3c5eeSAneesh Kumar K.V } 859c6f3c5eeSAneesh Kumar K.V 860c6f3c5eeSAneesh Kumar K.V goto out_unlock; 861c6f3c5eeSAneesh Kumar K.V } 862c6f3c5eeSAneesh Kumar K.V 863f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 864f25748e3SDan Williams if (pfn_t_devmap(pfn)) 865f25748e3SDan Williams entry = pmd_mkdevmap(entry); 8665cad465dSMatthew Wilcox if (write) { 867f55e1014SLinus Torvalds entry = pmd_mkyoung(pmd_mkdirty(entry)); 868f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(entry, vma); 8695cad465dSMatthew Wilcox } 8703b6521f5SOliver O'Halloran 8713b6521f5SOliver O'Halloran if (pgtable) { 8723b6521f5SOliver O'Halloran pgtable_trans_huge_deposit(mm, pmd, pgtable); 873c4812909SKirill A. Shutemov mm_inc_nr_ptes(mm); 874c6f3c5eeSAneesh Kumar K.V pgtable = NULL; 8753b6521f5SOliver O'Halloran } 8763b6521f5SOliver O'Halloran 8775cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8785cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 879c6f3c5eeSAneesh Kumar K.V 880c6f3c5eeSAneesh Kumar K.V out_unlock: 8815cad465dSMatthew Wilcox spin_unlock(ptl); 882c6f3c5eeSAneesh Kumar K.V if (pgtable) 883c6f3c5eeSAneesh Kumar K.V pte_free(mm, pgtable); 8845cad465dSMatthew Wilcox } 8855cad465dSMatthew Wilcox 8869a9731b1SThomas Hellstrom (VMware) /** 8879a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 8889a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 8899a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 8909a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 8919a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 8929a9731b1SThomas Hellstrom (VMware) * 8939a9731b1SThomas Hellstrom (VMware) * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 8949a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 8959a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 8969a9731b1SThomas Hellstrom (VMware) * 8979a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 8989a9731b1SThomas Hellstrom (VMware) */ 8999a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 9009a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 9015cad465dSMatthew Wilcox { 902fce86ff5SDan Williams unsigned long addr = vmf->address & PMD_MASK; 903fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 9043b6521f5SOliver O'Halloran pgtable_t pgtable = NULL; 905fce86ff5SDan Williams 9065cad465dSMatthew Wilcox /* 9075cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 9085cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 9095cad465dSMatthew Wilcox * can't support a 'special' bit. 9105cad465dSMatthew Wilcox */ 911e1fb4a08SDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 912e1fb4a08SDave Jiang !pfn_t_devmap(pfn)); 9135cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 9145cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 9155cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 9165cad465dSMatthew Wilcox 9175cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 9185cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 919308a047cSBorislav Petkov 9203b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) { 9214cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(vma->vm_mm); 9223b6521f5SOliver O'Halloran if (!pgtable) 9233b6521f5SOliver O'Halloran return VM_FAULT_OOM; 9243b6521f5SOliver O'Halloran } 9253b6521f5SOliver O'Halloran 926308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 927308a047cSBorislav Petkov 928fce86ff5SDan Williams insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 929ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 9305cad465dSMatthew Wilcox } 9319a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 9325cad465dSMatthew Wilcox 933a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 934f55e1014SLinus Torvalds static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 935a00cc7d9SMatthew Wilcox { 936f55e1014SLinus Torvalds if (likely(vma->vm_flags & VM_WRITE)) 937a00cc7d9SMatthew Wilcox pud = pud_mkwrite(pud); 938a00cc7d9SMatthew Wilcox return pud; 939a00cc7d9SMatthew Wilcox } 940a00cc7d9SMatthew Wilcox 941a00cc7d9SMatthew Wilcox static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 942a00cc7d9SMatthew Wilcox pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 943a00cc7d9SMatthew Wilcox { 944a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 945a00cc7d9SMatthew Wilcox pud_t entry; 946a00cc7d9SMatthew Wilcox spinlock_t *ptl; 947a00cc7d9SMatthew Wilcox 948a00cc7d9SMatthew Wilcox ptl = pud_lock(mm, pud); 949c6f3c5eeSAneesh Kumar K.V if (!pud_none(*pud)) { 950c6f3c5eeSAneesh Kumar K.V if (write) { 951c6f3c5eeSAneesh Kumar K.V if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 952c6f3c5eeSAneesh Kumar K.V WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 953c6f3c5eeSAneesh Kumar K.V goto out_unlock; 954c6f3c5eeSAneesh Kumar K.V } 955c6f3c5eeSAneesh Kumar K.V entry = pud_mkyoung(*pud); 956c6f3c5eeSAneesh Kumar K.V entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 957c6f3c5eeSAneesh Kumar K.V if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 958c6f3c5eeSAneesh Kumar K.V update_mmu_cache_pud(vma, addr, pud); 959c6f3c5eeSAneesh Kumar K.V } 960c6f3c5eeSAneesh Kumar K.V goto out_unlock; 961c6f3c5eeSAneesh Kumar K.V } 962c6f3c5eeSAneesh Kumar K.V 963a00cc7d9SMatthew Wilcox entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 964a00cc7d9SMatthew Wilcox if (pfn_t_devmap(pfn)) 965a00cc7d9SMatthew Wilcox entry = pud_mkdevmap(entry); 966a00cc7d9SMatthew Wilcox if (write) { 967f55e1014SLinus Torvalds entry = pud_mkyoung(pud_mkdirty(entry)); 968f55e1014SLinus Torvalds entry = maybe_pud_mkwrite(entry, vma); 969a00cc7d9SMatthew Wilcox } 970a00cc7d9SMatthew Wilcox set_pud_at(mm, addr, pud, entry); 971a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 972c6f3c5eeSAneesh Kumar K.V 973c6f3c5eeSAneesh Kumar K.V out_unlock: 974a00cc7d9SMatthew Wilcox spin_unlock(ptl); 975a00cc7d9SMatthew Wilcox } 976a00cc7d9SMatthew Wilcox 9779a9731b1SThomas Hellstrom (VMware) /** 9789a9731b1SThomas Hellstrom (VMware) * vmf_insert_pfn_pud_prot - insert a pud size pfn 9799a9731b1SThomas Hellstrom (VMware) * @vmf: Structure describing the fault 9809a9731b1SThomas Hellstrom (VMware) * @pfn: pfn to insert 9819a9731b1SThomas Hellstrom (VMware) * @pgprot: page protection to use 9829a9731b1SThomas Hellstrom (VMware) * @write: whether it's a write fault 9839a9731b1SThomas Hellstrom (VMware) * 9849a9731b1SThomas Hellstrom (VMware) * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 9859a9731b1SThomas Hellstrom (VMware) * also consult the vmf_insert_mixed_prot() documentation when 9869a9731b1SThomas Hellstrom (VMware) * @pgprot != @vmf->vma->vm_page_prot. 9879a9731b1SThomas Hellstrom (VMware) * 9889a9731b1SThomas Hellstrom (VMware) * Return: vm_fault_t value. 9899a9731b1SThomas Hellstrom (VMware) */ 9909a9731b1SThomas Hellstrom (VMware) vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 9919a9731b1SThomas Hellstrom (VMware) pgprot_t pgprot, bool write) 992a00cc7d9SMatthew Wilcox { 993fce86ff5SDan Williams unsigned long addr = vmf->address & PUD_MASK; 994fce86ff5SDan Williams struct vm_area_struct *vma = vmf->vma; 995fce86ff5SDan Williams 996a00cc7d9SMatthew Wilcox /* 997a00cc7d9SMatthew Wilcox * If we had pud_special, we could avoid all these restrictions, 998a00cc7d9SMatthew Wilcox * but we need to be consistent with PTEs and architectures that 999a00cc7d9SMatthew Wilcox * can't support a 'special' bit. 1000a00cc7d9SMatthew Wilcox */ 100162ec0d8cSDave Jiang BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 100262ec0d8cSDave Jiang !pfn_t_devmap(pfn)); 1003a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1004a00cc7d9SMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 1005a00cc7d9SMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1006a00cc7d9SMatthew Wilcox 1007a00cc7d9SMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 1008a00cc7d9SMatthew Wilcox return VM_FAULT_SIGBUS; 1009a00cc7d9SMatthew Wilcox 1010a00cc7d9SMatthew Wilcox track_pfn_insert(vma, &pgprot, pfn); 1011a00cc7d9SMatthew Wilcox 1012fce86ff5SDan Williams insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 1013a00cc7d9SMatthew Wilcox return VM_FAULT_NOPAGE; 1014a00cc7d9SMatthew Wilcox } 10159a9731b1SThomas Hellstrom (VMware) EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 1016a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1017a00cc7d9SMatthew Wilcox 10183565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1019a69e4717SMiaohe Lin pmd_t *pmd, bool write) 10203565fce3SDan Williams { 10213565fce3SDan Williams pmd_t _pmd; 10223565fce3SDan Williams 1023a8f97366SKirill A. Shutemov _pmd = pmd_mkyoung(*pmd); 1024a69e4717SMiaohe Lin if (write) 1025a8f97366SKirill A. Shutemov _pmd = pmd_mkdirty(_pmd); 10263565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1027a69e4717SMiaohe Lin pmd, _pmd, write)) 10283565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 10293565fce3SDan Williams } 10303565fce3SDan Williams 10313565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1032df06b37fSKeith Busch pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 10333565fce3SDan Williams { 10343565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 10353565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 10363565fce3SDan Williams struct page *page; 10373565fce3SDan Williams 10383565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 10393565fce3SDan Williams 10403faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 10413faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 10423faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 10433faa52c0SJohn Hubbard return NULL; 10443faa52c0SJohn Hubbard 1045f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pmd_write(*pmd)) 10463565fce3SDan Williams return NULL; 10473565fce3SDan Williams 10483565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 10493565fce3SDan Williams /* pass */; 10503565fce3SDan Williams else 10513565fce3SDan Williams return NULL; 10523565fce3SDan Williams 10533565fce3SDan Williams if (flags & FOLL_TOUCH) 1054a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 10553565fce3SDan Williams 10563565fce3SDan Williams /* 10573565fce3SDan Williams * device mapped pages can only be returned if the 10583565fce3SDan Williams * caller will manage the page reference count. 10593565fce3SDan Williams */ 10603faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 10613565fce3SDan Williams return ERR_PTR(-EEXIST); 10623565fce3SDan Williams 10633565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1064df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1065df06b37fSKeith Busch if (!*pgmap) 10663565fce3SDan Williams return ERR_PTR(-EFAULT); 10673565fce3SDan Williams page = pfn_to_page(pfn); 10683faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 10693faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 10703565fce3SDan Williams 10713565fce3SDan Williams return page; 10723565fce3SDan Williams } 10733565fce3SDan Williams 107471e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 107571e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10768f34f1eaSPeter Xu struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 107771e3aac0SAndrea Arcangeli { 1078c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 107971e3aac0SAndrea Arcangeli struct page *src_page; 108071e3aac0SAndrea Arcangeli pmd_t pmd; 108112c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 1082628d47ceSKirill A. Shutemov int ret = -ENOMEM; 108371e3aac0SAndrea Arcangeli 1084628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 10858f34f1eaSPeter Xu if (!vma_is_anonymous(dst_vma)) 1086628d47ceSKirill A. Shutemov return 0; 1087628d47ceSKirill A. Shutemov 10884cf58924SJoel Fernandes (Google) pgtable = pte_alloc_one(dst_mm); 108971e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 109071e3aac0SAndrea Arcangeli goto out; 109171e3aac0SAndrea Arcangeli 1092c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 1093c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 1094c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 109571e3aac0SAndrea Arcangeli 109671e3aac0SAndrea Arcangeli ret = -EAGAIN; 109771e3aac0SAndrea Arcangeli pmd = *src_pmd; 109884c3fc4eSZi Yan 109984c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 110084c3fc4eSZi Yan if (unlikely(is_swap_pmd(pmd))) { 110184c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(pmd); 110284c3fc4eSZi Yan 110384c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(pmd)); 11046c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) { 11054dd845b5SAlistair Popple entry = make_readable_migration_entry( 11064dd845b5SAlistair Popple swp_offset(entry)); 110784c3fc4eSZi Yan pmd = swp_entry_to_pmd(entry); 1108ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*src_pmd)) 1109ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 11108f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*src_pmd)) 11118f34f1eaSPeter Xu pmd = pmd_swp_mkuffd_wp(pmd); 111284c3fc4eSZi Yan set_pmd_at(src_mm, addr, src_pmd, pmd); 111384c3fc4eSZi Yan } 1114dd8a67f9SZi Yan add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1115af5b0f6aSKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 1116dd8a67f9SZi Yan pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 11178f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11188f34f1eaSPeter Xu pmd = pmd_swp_clear_uffd_wp(pmd); 111984c3fc4eSZi Yan set_pmd_at(dst_mm, addr, dst_pmd, pmd); 112084c3fc4eSZi Yan ret = 0; 112184c3fc4eSZi Yan goto out_unlock; 112284c3fc4eSZi Yan } 112384c3fc4eSZi Yan #endif 112484c3fc4eSZi Yan 1125628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 112671e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 112771e3aac0SAndrea Arcangeli goto out_unlock; 112871e3aac0SAndrea Arcangeli } 1129fc9fe822SKirill A. Shutemov /* 1130c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 1131fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 1132fc9fe822SKirill A. Shutemov * a page table. 1133fc9fe822SKirill A. Shutemov */ 1134fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 113597ae1749SKirill A. Shutemov /* 113697ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 113797ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 113897ae1749SKirill A. Shutemov * reference. 113997ae1749SKirill A. Shutemov */ 11405fc7a5f6SPeter Xu mm_get_huge_zero_page(dst_mm); 11415fc7a5f6SPeter Xu goto out_zero_page; 1142fc9fe822SKirill A. Shutemov } 1143de466bd6SMel Gorman 114471e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 1145309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1146d042035eSPeter Xu 1147fb3d824dSDavid Hildenbrand get_page(src_page); 1148fb3d824dSDavid Hildenbrand if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { 1149fb3d824dSDavid Hildenbrand /* Page maybe pinned: split and retry the fault on PTEs. */ 1150fb3d824dSDavid Hildenbrand put_page(src_page); 1151d042035eSPeter Xu pte_free(dst_mm, pgtable); 1152d042035eSPeter Xu spin_unlock(src_ptl); 1153d042035eSPeter Xu spin_unlock(dst_ptl); 11548f34f1eaSPeter Xu __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1155d042035eSPeter Xu return -EAGAIN; 1156d042035eSPeter Xu } 115771e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 11585fc7a5f6SPeter Xu out_zero_page: 1159c4812909SKirill A. Shutemov mm_inc_nr_ptes(dst_mm); 11605c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 116171e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 11628f34f1eaSPeter Xu if (!userfaultfd_wp(dst_vma)) 11638f34f1eaSPeter Xu pmd = pmd_clear_uffd_wp(pmd); 116471e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 116571e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 116671e3aac0SAndrea Arcangeli 116771e3aac0SAndrea Arcangeli ret = 0; 116871e3aac0SAndrea Arcangeli out_unlock: 1169c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 1170c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 117171e3aac0SAndrea Arcangeli out: 117271e3aac0SAndrea Arcangeli return ret; 117371e3aac0SAndrea Arcangeli } 117471e3aac0SAndrea Arcangeli 1175a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1176a00cc7d9SMatthew Wilcox static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 11775fe653e9SMiaohe Lin pud_t *pud, bool write) 1178a00cc7d9SMatthew Wilcox { 1179a00cc7d9SMatthew Wilcox pud_t _pud; 1180a00cc7d9SMatthew Wilcox 1181a8f97366SKirill A. Shutemov _pud = pud_mkyoung(*pud); 11825fe653e9SMiaohe Lin if (write) 1183a8f97366SKirill A. Shutemov _pud = pud_mkdirty(_pud); 1184a00cc7d9SMatthew Wilcox if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 11855fe653e9SMiaohe Lin pud, _pud, write)) 1186a00cc7d9SMatthew Wilcox update_mmu_cache_pud(vma, addr, pud); 1187a00cc7d9SMatthew Wilcox } 1188a00cc7d9SMatthew Wilcox 1189a00cc7d9SMatthew Wilcox struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1190df06b37fSKeith Busch pud_t *pud, int flags, struct dev_pagemap **pgmap) 1191a00cc7d9SMatthew Wilcox { 1192a00cc7d9SMatthew Wilcox unsigned long pfn = pud_pfn(*pud); 1193a00cc7d9SMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 1194a00cc7d9SMatthew Wilcox struct page *page; 1195a00cc7d9SMatthew Wilcox 1196a00cc7d9SMatthew Wilcox assert_spin_locked(pud_lockptr(mm, pud)); 1197a00cc7d9SMatthew Wilcox 1198f6f37321SLinus Torvalds if (flags & FOLL_WRITE && !pud_write(*pud)) 1199a00cc7d9SMatthew Wilcox return NULL; 1200a00cc7d9SMatthew Wilcox 12013faa52c0SJohn Hubbard /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 12023faa52c0SJohn Hubbard if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 12033faa52c0SJohn Hubbard (FOLL_PIN | FOLL_GET))) 12043faa52c0SJohn Hubbard return NULL; 12053faa52c0SJohn Hubbard 1206a00cc7d9SMatthew Wilcox if (pud_present(*pud) && pud_devmap(*pud)) 1207a00cc7d9SMatthew Wilcox /* pass */; 1208a00cc7d9SMatthew Wilcox else 1209a00cc7d9SMatthew Wilcox return NULL; 1210a00cc7d9SMatthew Wilcox 1211a00cc7d9SMatthew Wilcox if (flags & FOLL_TOUCH) 12125fe653e9SMiaohe Lin touch_pud(vma, addr, pud, flags & FOLL_WRITE); 1213a00cc7d9SMatthew Wilcox 1214a00cc7d9SMatthew Wilcox /* 1215a00cc7d9SMatthew Wilcox * device mapped pages can only be returned if the 1216a00cc7d9SMatthew Wilcox * caller will manage the page reference count. 12173faa52c0SJohn Hubbard * 12183faa52c0SJohn Hubbard * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: 1219a00cc7d9SMatthew Wilcox */ 12203faa52c0SJohn Hubbard if (!(flags & (FOLL_GET | FOLL_PIN))) 1221a00cc7d9SMatthew Wilcox return ERR_PTR(-EEXIST); 1222a00cc7d9SMatthew Wilcox 1223a00cc7d9SMatthew Wilcox pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1224df06b37fSKeith Busch *pgmap = get_dev_pagemap(pfn, *pgmap); 1225df06b37fSKeith Busch if (!*pgmap) 1226a00cc7d9SMatthew Wilcox return ERR_PTR(-EFAULT); 1227a00cc7d9SMatthew Wilcox page = pfn_to_page(pfn); 12283faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 12293faa52c0SJohn Hubbard page = ERR_PTR(-ENOMEM); 1230a00cc7d9SMatthew Wilcox 1231a00cc7d9SMatthew Wilcox return page; 1232a00cc7d9SMatthew Wilcox } 1233a00cc7d9SMatthew Wilcox 1234a00cc7d9SMatthew Wilcox int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1235a00cc7d9SMatthew Wilcox pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1236a00cc7d9SMatthew Wilcox struct vm_area_struct *vma) 1237a00cc7d9SMatthew Wilcox { 1238a00cc7d9SMatthew Wilcox spinlock_t *dst_ptl, *src_ptl; 1239a00cc7d9SMatthew Wilcox pud_t pud; 1240a00cc7d9SMatthew Wilcox int ret; 1241a00cc7d9SMatthew Wilcox 1242a00cc7d9SMatthew Wilcox dst_ptl = pud_lock(dst_mm, dst_pud); 1243a00cc7d9SMatthew Wilcox src_ptl = pud_lockptr(src_mm, src_pud); 1244a00cc7d9SMatthew Wilcox spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1245a00cc7d9SMatthew Wilcox 1246a00cc7d9SMatthew Wilcox ret = -EAGAIN; 1247a00cc7d9SMatthew Wilcox pud = *src_pud; 1248a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1249a00cc7d9SMatthew Wilcox goto out_unlock; 1250a00cc7d9SMatthew Wilcox 1251a00cc7d9SMatthew Wilcox /* 1252a00cc7d9SMatthew Wilcox * When page table lock is held, the huge zero pud should not be 1253a00cc7d9SMatthew Wilcox * under splitting since we don't split the page itself, only pud to 1254a00cc7d9SMatthew Wilcox * a page table. 1255a00cc7d9SMatthew Wilcox */ 1256a00cc7d9SMatthew Wilcox if (is_huge_zero_pud(pud)) { 1257a00cc7d9SMatthew Wilcox /* No huge zero pud yet */ 1258a00cc7d9SMatthew Wilcox } 1259a00cc7d9SMatthew Wilcox 1260fb3d824dSDavid Hildenbrand /* 1261fb3d824dSDavid Hildenbrand * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() 1262fb3d824dSDavid Hildenbrand * and split if duplicating fails. 1263fb3d824dSDavid Hildenbrand */ 1264a00cc7d9SMatthew Wilcox pudp_set_wrprotect(src_mm, addr, src_pud); 1265a00cc7d9SMatthew Wilcox pud = pud_mkold(pud_wrprotect(pud)); 1266a00cc7d9SMatthew Wilcox set_pud_at(dst_mm, addr, dst_pud, pud); 1267a00cc7d9SMatthew Wilcox 1268a00cc7d9SMatthew Wilcox ret = 0; 1269a00cc7d9SMatthew Wilcox out_unlock: 1270a00cc7d9SMatthew Wilcox spin_unlock(src_ptl); 1271a00cc7d9SMatthew Wilcox spin_unlock(dst_ptl); 1272a00cc7d9SMatthew Wilcox return ret; 1273a00cc7d9SMatthew Wilcox } 1274a00cc7d9SMatthew Wilcox 1275a00cc7d9SMatthew Wilcox void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1276a00cc7d9SMatthew Wilcox { 1277a00cc7d9SMatthew Wilcox bool write = vmf->flags & FAULT_FLAG_WRITE; 1278a00cc7d9SMatthew Wilcox 1279a00cc7d9SMatthew Wilcox vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1280a00cc7d9SMatthew Wilcox if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1281a00cc7d9SMatthew Wilcox goto unlock; 1282a00cc7d9SMatthew Wilcox 12835fe653e9SMiaohe Lin touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1284a00cc7d9SMatthew Wilcox unlock: 1285a00cc7d9SMatthew Wilcox spin_unlock(vmf->ptl); 1286a00cc7d9SMatthew Wilcox } 1287a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1288a00cc7d9SMatthew Wilcox 12895db4f15cSYang Shi void huge_pmd_set_accessed(struct vm_fault *vmf) 1290a1dd450bSWill Deacon { 129120f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 1292a1dd450bSWill Deacon 129382b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1294a69e4717SMiaohe Lin if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1295a1dd450bSWill Deacon goto unlock; 1296a1dd450bSWill Deacon 1297a69e4717SMiaohe Lin touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1298a1dd450bSWill Deacon 1299a1dd450bSWill Deacon unlock: 130082b0f8c3SJan Kara spin_unlock(vmf->ptl); 1301a1dd450bSWill Deacon } 1302a1dd450bSWill Deacon 13035db4f15cSYang Shi vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 130471e3aac0SAndrea Arcangeli { 1305c89357e2SDavid Hildenbrand const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 130682b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 13073917c802SKirill A. Shutemov struct page *page; 130882b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 13095db4f15cSYang Shi pmd_t orig_pmd = vmf->orig_pmd; 131071e3aac0SAndrea Arcangeli 131182b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 131281d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 13133917c802SKirill A. Shutemov 1314c89357e2SDavid Hildenbrand VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE)); 1315c89357e2SDavid Hildenbrand VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE)); 1316c89357e2SDavid Hildenbrand 131793b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 13183917c802SKirill A. Shutemov goto fallback; 13193917c802SKirill A. Shutemov 132082b0f8c3SJan Kara spin_lock(vmf->ptl); 13213917c802SKirill A. Shutemov 13223917c802SKirill A. Shutemov if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13233917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13243917c802SKirill A. Shutemov return 0; 13253917c802SKirill A. Shutemov } 132671e3aac0SAndrea Arcangeli 132771e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1328f6004e73SMiaohe Lin VM_BUG_ON_PAGE(!PageHead(page), page); 13293917c802SKirill A. Shutemov 13306c287605SDavid Hildenbrand /* Early check when only holding the PT lock. */ 13316c287605SDavid Hildenbrand if (PageAnonExclusive(page)) 13326c287605SDavid Hildenbrand goto reuse; 13336c287605SDavid Hildenbrand 1334ba3c4ce6SHuang Ying if (!trylock_page(page)) { 1335ba3c4ce6SHuang Ying get_page(page); 1336ba3c4ce6SHuang Ying spin_unlock(vmf->ptl); 1337ba3c4ce6SHuang Ying lock_page(page); 1338ba3c4ce6SHuang Ying spin_lock(vmf->ptl); 1339ba3c4ce6SHuang Ying if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 13403917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 1341ba3c4ce6SHuang Ying unlock_page(page); 1342ba3c4ce6SHuang Ying put_page(page); 13433917c802SKirill A. Shutemov return 0; 1344ba3c4ce6SHuang Ying } 1345ba3c4ce6SHuang Ying put_page(page); 1346ba3c4ce6SHuang Ying } 13473917c802SKirill A. Shutemov 13486c287605SDavid Hildenbrand /* Recheck after temporarily dropping the PT lock. */ 13496c287605SDavid Hildenbrand if (PageAnonExclusive(page)) { 13506c287605SDavid Hildenbrand unlock_page(page); 13516c287605SDavid Hildenbrand goto reuse; 13526c287605SDavid Hildenbrand } 13536c287605SDavid Hildenbrand 13543917c802SKirill A. Shutemov /* 1355c89357e2SDavid Hildenbrand * See do_wp_page(): we can only reuse the page exclusively if there are 13563bff7e3fSDavid Hildenbrand * no additional references. Note that we always drain the LRU 13573bff7e3fSDavid Hildenbrand * pagevecs immediately after adding a THP. 13583917c802SKirill A. Shutemov */ 13593bff7e3fSDavid Hildenbrand if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page)) 13603bff7e3fSDavid Hildenbrand goto unlock_fallback; 13613bff7e3fSDavid Hildenbrand if (PageSwapCache(page)) 13623bff7e3fSDavid Hildenbrand try_to_free_swap(page); 13633bff7e3fSDavid Hildenbrand if (page_count(page) == 1) { 136471e3aac0SAndrea Arcangeli pmd_t entry; 13656c54dc6cSDavid Hildenbrand 13666c54dc6cSDavid Hildenbrand page_move_anon_rmap(page, vma); 13676c287605SDavid Hildenbrand unlock_page(page); 13686c287605SDavid Hildenbrand reuse: 1369c89357e2SDavid Hildenbrand if (unlikely(unshare)) { 1370c89357e2SDavid Hildenbrand spin_unlock(vmf->ptl); 1371c89357e2SDavid Hildenbrand return 0; 1372c89357e2SDavid Hildenbrand } 137371e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 1374f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 137582b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 137682b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 13773917c802SKirill A. Shutemov spin_unlock(vmf->ptl); 13783917c802SKirill A. Shutemov return VM_FAULT_WRITE; 137971e3aac0SAndrea Arcangeli } 13803917c802SKirill A. Shutemov 13813bff7e3fSDavid Hildenbrand unlock_fallback: 1382ba3c4ce6SHuang Ying unlock_page(page); 138382b0f8c3SJan Kara spin_unlock(vmf->ptl); 13843917c802SKirill A. Shutemov fallback: 13853917c802SKirill A. Shutemov __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 13863917c802SKirill A. Shutemov return VM_FAULT_FALLBACK; 138771e3aac0SAndrea Arcangeli } 138871e3aac0SAndrea Arcangeli 13895535be30SDavid Hildenbrand /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 13905535be30SDavid Hildenbrand static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 13915535be30SDavid Hildenbrand struct vm_area_struct *vma, 13925535be30SDavid Hildenbrand unsigned int flags) 13938310d48bSKeno Fischer { 13945535be30SDavid Hildenbrand /* If the pmd is writable, we can write to the page. */ 13955535be30SDavid Hildenbrand if (pmd_write(pmd)) 13965535be30SDavid Hildenbrand return true; 13975535be30SDavid Hildenbrand 13985535be30SDavid Hildenbrand /* Maybe FOLL_FORCE is set to override it? */ 13995535be30SDavid Hildenbrand if (!(flags & FOLL_FORCE)) 14005535be30SDavid Hildenbrand return false; 14015535be30SDavid Hildenbrand 14025535be30SDavid Hildenbrand /* But FOLL_FORCE has no effect on shared mappings */ 14035535be30SDavid Hildenbrand if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 14045535be30SDavid Hildenbrand return false; 14055535be30SDavid Hildenbrand 14065535be30SDavid Hildenbrand /* ... or read-only private ones */ 14075535be30SDavid Hildenbrand if (!(vma->vm_flags & VM_MAYWRITE)) 14085535be30SDavid Hildenbrand return false; 14095535be30SDavid Hildenbrand 14105535be30SDavid Hildenbrand /* ... or already writable ones that just need to take a write fault */ 14115535be30SDavid Hildenbrand if (vma->vm_flags & VM_WRITE) 14125535be30SDavid Hildenbrand return false; 14135535be30SDavid Hildenbrand 14145535be30SDavid Hildenbrand /* 14155535be30SDavid Hildenbrand * See can_change_pte_writable(): we broke COW and could map the page 14165535be30SDavid Hildenbrand * writable if we have an exclusive anonymous page ... 14175535be30SDavid Hildenbrand */ 14185535be30SDavid Hildenbrand if (!page || !PageAnon(page) || !PageAnonExclusive(page)) 14195535be30SDavid Hildenbrand return false; 14205535be30SDavid Hildenbrand 14215535be30SDavid Hildenbrand /* ... and a write-fault isn't required for other reasons. */ 14225535be30SDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) 14235535be30SDavid Hildenbrand return false; 14245535be30SDavid Hildenbrand return !userfaultfd_huge_pmd_wp(vma, pmd); 14258310d48bSKeno Fischer } 14268310d48bSKeno Fischer 1427b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 142871e3aac0SAndrea Arcangeli unsigned long addr, 142971e3aac0SAndrea Arcangeli pmd_t *pmd, 143071e3aac0SAndrea Arcangeli unsigned int flags) 143171e3aac0SAndrea Arcangeli { 1432b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 14335535be30SDavid Hildenbrand struct page *page; 143471e3aac0SAndrea Arcangeli 1435c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 143671e3aac0SAndrea Arcangeli 14375535be30SDavid Hildenbrand page = pmd_page(*pmd); 14385535be30SDavid Hildenbrand VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 14395535be30SDavid Hildenbrand 14405535be30SDavid Hildenbrand if ((flags & FOLL_WRITE) && 14415535be30SDavid Hildenbrand !can_follow_write_pmd(*pmd, page, vma, flags)) 14425535be30SDavid Hildenbrand return NULL; 144371e3aac0SAndrea Arcangeli 144485facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 144585facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 144685facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 144785facf25SKirill A. Shutemov 14482b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 14498a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 14505535be30SDavid Hildenbrand return NULL; 14513faa52c0SJohn Hubbard 1452a7f22660SDavid Hildenbrand if (!pmd_write(*pmd) && gup_must_unshare(flags, page)) 1453a7f22660SDavid Hildenbrand return ERR_PTR(-EMLINK); 1454a7f22660SDavid Hildenbrand 1455b6a2619cSDavid Hildenbrand VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 1456b6a2619cSDavid Hildenbrand !PageAnonExclusive(page), page); 1457b6a2619cSDavid Hildenbrand 14583faa52c0SJohn Hubbard if (!try_grab_page(page, flags)) 14593faa52c0SJohn Hubbard return ERR_PTR(-ENOMEM); 14603faa52c0SJohn Hubbard 14613565fce3SDan Williams if (flags & FOLL_TOUCH) 1462a69e4717SMiaohe Lin touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 14633faa52c0SJohn Hubbard 146471e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1465ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 146671e3aac0SAndrea Arcangeli 146771e3aac0SAndrea Arcangeli return page; 146871e3aac0SAndrea Arcangeli } 146971e3aac0SAndrea Arcangeli 1470d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 14715db4f15cSYang Shi vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 1472d10e63f2SMel Gorman { 147382b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1474c5b5a3ddSYang Shi pmd_t oldpmd = vmf->orig_pmd; 1475c5b5a3ddSYang Shi pmd_t pmd; 1476b32967ffSMel Gorman struct page *page; 147782b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1478c5b5a3ddSYang Shi int page_nid = NUMA_NO_NODE; 147933024536SHuang Ying int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); 14808191acbdSMel Gorman bool migrated = false; 1481c5b5a3ddSYang Shi bool was_writable = pmd_savedwrite(oldpmd); 14826688cc05SPeter Zijlstra int flags = 0; 1483d10e63f2SMel Gorman 148482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1485c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 148682b0f8c3SJan Kara spin_unlock(vmf->ptl); 1487de466bd6SMel Gorman goto out; 1488de466bd6SMel Gorman } 1489de466bd6SMel Gorman 1490c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1491c5b5a3ddSYang Shi page = vm_normal_page_pmd(vma, haddr, pmd); 1492c5b5a3ddSYang Shi if (!page) 1493c5b5a3ddSYang Shi goto out_map; 1494c5b5a3ddSYang Shi 1495c5b5a3ddSYang Shi /* See similar comment in do_numa_page for explanation */ 1496c5b5a3ddSYang Shi if (!was_writable) 1497c5b5a3ddSYang Shi flags |= TNF_NO_GROUP; 1498c5b5a3ddSYang Shi 1499c5b5a3ddSYang Shi page_nid = page_to_nid(page); 150033024536SHuang Ying /* 150133024536SHuang Ying * For memory tiering mode, cpupid of slow memory page is used 150233024536SHuang Ying * to record page access time. So use default value. 150333024536SHuang Ying */ 150433024536SHuang Ying if (node_is_toptier(page_nid)) 1505c5b5a3ddSYang Shi last_cpupid = page_cpupid_last(page); 1506c5b5a3ddSYang Shi target_nid = numa_migrate_prep(page, vma, haddr, page_nid, 1507c5b5a3ddSYang Shi &flags); 1508c5b5a3ddSYang Shi 1509c5b5a3ddSYang Shi if (target_nid == NUMA_NO_NODE) { 1510c5b5a3ddSYang Shi put_page(page); 1511c5b5a3ddSYang Shi goto out_map; 1512c5b5a3ddSYang Shi } 1513c5b5a3ddSYang Shi 151482b0f8c3SJan Kara spin_unlock(vmf->ptl); 15158b1b436dSPeter Zijlstra 1516c5b5a3ddSYang Shi migrated = migrate_misplaced_page(page, vma, target_nid); 15176688cc05SPeter Zijlstra if (migrated) { 15186688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 15198191acbdSMel Gorman page_nid = target_nid; 1520c5b5a3ddSYang Shi } else { 1521074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1522c5b5a3ddSYang Shi vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1523c5b5a3ddSYang Shi if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { 152482b0f8c3SJan Kara spin_unlock(vmf->ptl); 1525c5b5a3ddSYang Shi goto out; 1526c5b5a3ddSYang Shi } 1527c5b5a3ddSYang Shi goto out_map; 1528c5b5a3ddSYang Shi } 1529b8916634SMel Gorman 1530b8916634SMel Gorman out: 153198fa15f3SAnshuman Khandual if (page_nid != NUMA_NO_NODE) 153282b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 15339a8b300fSAneesh Kumar K.V flags); 15348191acbdSMel Gorman 1535d10e63f2SMel Gorman return 0; 1536c5b5a3ddSYang Shi 1537c5b5a3ddSYang Shi out_map: 1538c5b5a3ddSYang Shi /* Restore the PMD */ 1539c5b5a3ddSYang Shi pmd = pmd_modify(oldpmd, vma->vm_page_prot); 1540c5b5a3ddSYang Shi pmd = pmd_mkyoung(pmd); 1541c5b5a3ddSYang Shi if (was_writable) 1542c5b5a3ddSYang Shi pmd = pmd_mkwrite(pmd); 1543c5b5a3ddSYang Shi set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1544c5b5a3ddSYang Shi update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1545c5b5a3ddSYang Shi spin_unlock(vmf->ptl); 1546c5b5a3ddSYang Shi goto out; 1547d10e63f2SMel Gorman } 1548d10e63f2SMel Gorman 1549319904adSHuang Ying /* 1550319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1551319904adSHuang Ying * Otherwise, return false. 1552319904adSHuang Ying */ 1553319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1554b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1555b8d3c4c3SMinchan Kim { 1556b8d3c4c3SMinchan Kim spinlock_t *ptl; 1557b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1558b8d3c4c3SMinchan Kim struct page *page; 1559b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1560319904adSHuang Ying bool ret = false; 1561b8d3c4c3SMinchan Kim 1562ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 156307e32661SAneesh Kumar K.V 1564b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1565b6ec57f4SKirill A. Shutemov if (!ptl) 156625eedabeSLinus Torvalds goto out_unlocked; 1567b8d3c4c3SMinchan Kim 1568b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1569319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1570b8d3c4c3SMinchan Kim goto out; 1571b8d3c4c3SMinchan Kim 157284c3fc4eSZi Yan if (unlikely(!pmd_present(orig_pmd))) { 157384c3fc4eSZi Yan VM_BUG_ON(thp_migration_supported() && 157484c3fc4eSZi Yan !is_pmd_migration_entry(orig_pmd)); 157584c3fc4eSZi Yan goto out; 157684c3fc4eSZi Yan } 157784c3fc4eSZi Yan 1578b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1579b8d3c4c3SMinchan Kim /* 1580b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1581b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1582b8d3c4c3SMinchan Kim */ 1583babbbdd0SMiaohe Lin if (total_mapcount(page) != 1) 1584b8d3c4c3SMinchan Kim goto out; 1585b8d3c4c3SMinchan Kim 1586b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1587b8d3c4c3SMinchan Kim goto out; 1588b8d3c4c3SMinchan Kim 1589b8d3c4c3SMinchan Kim /* 1590b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1591b8d3c4c3SMinchan Kim * will deactivate only them. 1592b8d3c4c3SMinchan Kim */ 1593b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1594b8d3c4c3SMinchan Kim get_page(page); 1595b8d3c4c3SMinchan Kim spin_unlock(ptl); 15969818b8cdSHuang Ying split_huge_page(page); 1597b8d3c4c3SMinchan Kim unlock_page(page); 1598bbf29ffcSKirill A. Shutemov put_page(page); 1599b8d3c4c3SMinchan Kim goto out_unlocked; 1600b8d3c4c3SMinchan Kim } 1601b8d3c4c3SMinchan Kim 1602b8d3c4c3SMinchan Kim if (PageDirty(page)) 1603b8d3c4c3SMinchan Kim ClearPageDirty(page); 1604b8d3c4c3SMinchan Kim unlock_page(page); 1605b8d3c4c3SMinchan Kim 1606b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 160758ceeb6bSKirill A. Shutemov pmdp_invalidate(vma, addr, pmd); 1608b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1609b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1610b8d3c4c3SMinchan Kim 1611b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1612b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1613b8d3c4c3SMinchan Kim } 1614802a3a92SShaohua Li 1615802a3a92SShaohua Li mark_page_lazyfree(page); 1616319904adSHuang Ying ret = true; 1617b8d3c4c3SMinchan Kim out: 1618b8d3c4c3SMinchan Kim spin_unlock(ptl); 1619b8d3c4c3SMinchan Kim out_unlocked: 1620b8d3c4c3SMinchan Kim return ret; 1621b8d3c4c3SMinchan Kim } 1622b8d3c4c3SMinchan Kim 1623953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1624953c66c2SAneesh Kumar K.V { 1625953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1626953c66c2SAneesh Kumar K.V 1627953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1628953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1629c4812909SKirill A. Shutemov mm_dec_nr_ptes(mm); 1630953c66c2SAneesh Kumar K.V } 1631953c66c2SAneesh Kumar K.V 163271e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1633f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 163471e3aac0SAndrea Arcangeli { 1635f5c8ad47SDavid Miller pmd_t orig_pmd; 1636da146769SKirill A. Shutemov spinlock_t *ptl; 1637da146769SKirill A. Shutemov 1638ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 163907e32661SAneesh Kumar K.V 1640b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1641b6ec57f4SKirill A. Shutemov if (!ptl) 1642da146769SKirill A. Shutemov return 0; 1643a6bf2bb0SAneesh Kumar K.V /* 1644a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 16458809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1646a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1647a6bf2bb0SAneesh Kumar K.V * operations. 1648a6bf2bb0SAneesh Kumar K.V */ 164993a98695SAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 1650fcbe08d6SMartin Schwidefsky tlb->fullmm); 1651f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 16522484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 16533b6521f5SOliver O'Halloran if (arch_needs_pgtable_deposit()) 16543b6521f5SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 16554897c765SMatthew Wilcox spin_unlock(ptl); 1656da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1657c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1658bf929152SKirill A. Shutemov spin_unlock(ptl); 1659479f0abbSKirill A. Shutemov } else { 1660616b8371SZi Yan struct page *page = NULL; 1661616b8371SZi Yan int flush_needed = 1; 1662616b8371SZi Yan 1663616b8371SZi Yan if (pmd_present(orig_pmd)) { 1664616b8371SZi Yan page = pmd_page(orig_pmd); 1665cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 1666309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1667309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1668616b8371SZi Yan } else if (thp_migration_supported()) { 1669616b8371SZi Yan swp_entry_t entry; 1670616b8371SZi Yan 1671616b8371SZi Yan VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1672616b8371SZi Yan entry = pmd_to_swp_entry(orig_pmd); 1673af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 1674616b8371SZi Yan flush_needed = 0; 1675616b8371SZi Yan } else 1676616b8371SZi Yan WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1677616b8371SZi Yan 1678b5072380SKirill A. Shutemov if (PageAnon(page)) { 1679c14a6eb4SOliver O'Halloran zap_deposited_table(tlb->mm, pmd); 1680b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1681b5072380SKirill A. Shutemov } else { 1682953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1683953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1684fadae295SYang Shi add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1685b5072380SKirill A. Shutemov } 1686616b8371SZi Yan 1687bf929152SKirill A. Shutemov spin_unlock(ptl); 1688616b8371SZi Yan if (flush_needed) 1689e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1690479f0abbSKirill A. Shutemov } 1691da146769SKirill A. Shutemov return 1; 169271e3aac0SAndrea Arcangeli } 169371e3aac0SAndrea Arcangeli 16941dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 16951dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 16961dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 16971dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 16981dd38b6cSAneesh Kumar K.V { 16991dd38b6cSAneesh Kumar K.V /* 17001dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 17011dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 17021dd38b6cSAneesh Kumar K.V * 17031dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 17041dd38b6cSAneesh Kumar K.V */ 17051dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 17061dd38b6cSAneesh Kumar K.V } 17071dd38b6cSAneesh Kumar K.V #endif 17081dd38b6cSAneesh Kumar K.V 1709ab6e3d09SNaoya Horiguchi static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1710ab6e3d09SNaoya Horiguchi { 1711ab6e3d09SNaoya Horiguchi #ifdef CONFIG_MEM_SOFT_DIRTY 1712ab6e3d09SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(pmd))) 1713ab6e3d09SNaoya Horiguchi pmd = pmd_swp_mksoft_dirty(pmd); 1714ab6e3d09SNaoya Horiguchi else if (pmd_present(pmd)) 1715ab6e3d09SNaoya Horiguchi pmd = pmd_mksoft_dirty(pmd); 1716ab6e3d09SNaoya Horiguchi #endif 1717ab6e3d09SNaoya Horiguchi return pmd; 1718ab6e3d09SNaoya Horiguchi } 1719ab6e3d09SNaoya Horiguchi 1720bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1721b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 172237a1c49aSAndrea Arcangeli { 1723bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 172437a1c49aSAndrea Arcangeli pmd_t pmd; 172537a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 17265d190420SAaron Lu bool force_flush = false; 172737a1c49aSAndrea Arcangeli 172837a1c49aSAndrea Arcangeli /* 172937a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 173037a1c49aSAndrea Arcangeli * should have release it. 173137a1c49aSAndrea Arcangeli */ 173237a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 173337a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 17344b471e88SKirill A. Shutemov return false; 173537a1c49aSAndrea Arcangeli } 173637a1c49aSAndrea Arcangeli 1737bf929152SKirill A. Shutemov /* 1738bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1739c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 1740bf929152SKirill A. Shutemov */ 1741b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1742b6ec57f4SKirill A. Shutemov if (old_ptl) { 1743bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1744bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1745bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 17468809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1747eb66ae03SLinus Torvalds if (pmd_present(pmd)) 1748a2ce2666SAaron Lu force_flush = true; 174937a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 17503592806cSKirill A. Shutemov 17511dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1752b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 17533592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 17543592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 17553592806cSKirill A. Shutemov } 1756ab6e3d09SNaoya Horiguchi pmd = move_soft_dirty_pmd(pmd); 1757ab6e3d09SNaoya Horiguchi set_pmd_at(mm, new_addr, new_pmd, pmd); 17585d190420SAaron Lu if (force_flush) 17597c38f181SMiaohe Lin flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1760eb66ae03SLinus Torvalds if (new_ptl != old_ptl) 1761eb66ae03SLinus Torvalds spin_unlock(new_ptl); 1762bf929152SKirill A. Shutemov spin_unlock(old_ptl); 17634b471e88SKirill A. Shutemov return true; 176437a1c49aSAndrea Arcangeli } 17654b471e88SKirill A. Shutemov return false; 176637a1c49aSAndrea Arcangeli } 176737a1c49aSAndrea Arcangeli 1768f123d74aSMel Gorman /* 1769f123d74aSMel Gorman * Returns 1770f123d74aSMel Gorman * - 0 if PMD could not be locked 1771f0953a1bSIngo Molnar * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 1772e346e668SYang Shi * or if prot_numa but THP migration is not supported 1773f0953a1bSIngo Molnar * - HPAGE_PMD_NR if protections changed and TLB flush necessary 1774f123d74aSMel Gorman */ 17754a18419fSNadav Amit int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 17764a18419fSNadav Amit pmd_t *pmd, unsigned long addr, pgprot_t newprot, 17774a18419fSNadav Amit unsigned long cp_flags) 1778cd7548abSJohannes Weiner { 1779cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1780bf929152SKirill A. Shutemov spinlock_t *ptl; 1781c9fe6656SNadav Amit pmd_t oldpmd, entry; 17820a85e51dSKirill A. Shutemov bool preserve_write; 17830a85e51dSKirill A. Shutemov int ret; 178458705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 1785292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 1786292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 1787cd7548abSJohannes Weiner 17884a18419fSNadav Amit tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 17894a18419fSNadav Amit 1790e346e668SYang Shi if (prot_numa && !thp_migration_supported()) 1791e346e668SYang Shi return 1; 1792e346e668SYang Shi 1793b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 17940a85e51dSKirill A. Shutemov if (!ptl) 17950a85e51dSKirill A. Shutemov return 0; 17960a85e51dSKirill A. Shutemov 17970a85e51dSKirill A. Shutemov preserve_write = prot_numa && pmd_write(*pmd); 1798ba68bc01SMel Gorman ret = 1; 1799e944fd67SMel Gorman 180084c3fc4eSZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 180184c3fc4eSZi Yan if (is_swap_pmd(*pmd)) { 180284c3fc4eSZi Yan swp_entry_t entry = pmd_to_swp_entry(*pmd); 18036c287605SDavid Hildenbrand struct page *page = pfn_swap_entry_to_page(entry); 180484c3fc4eSZi Yan 180584c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 18064dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 180784c3fc4eSZi Yan pmd_t newpmd; 180884c3fc4eSZi Yan /* 180984c3fc4eSZi Yan * A protection check is difficult so 181084c3fc4eSZi Yan * just be safe and disable write 181184c3fc4eSZi Yan */ 18126c287605SDavid Hildenbrand if (PageAnon(page)) 18136c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 18146c287605SDavid Hildenbrand else 18156c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry)); 181684c3fc4eSZi Yan newpmd = swp_entry_to_pmd(entry); 1817ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pmd)) 1818ab6e3d09SNaoya Horiguchi newpmd = pmd_swp_mksoft_dirty(newpmd); 18198f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pmd)) 18208f34f1eaSPeter Xu newpmd = pmd_swp_mkuffd_wp(newpmd); 182184c3fc4eSZi Yan set_pmd_at(mm, addr, pmd, newpmd); 182284c3fc4eSZi Yan } 182384c3fc4eSZi Yan goto unlock; 182484c3fc4eSZi Yan } 182584c3fc4eSZi Yan #endif 182684c3fc4eSZi Yan 1827a1a3a2fcSHuang Ying if (prot_numa) { 1828a1a3a2fcSHuang Ying struct page *page; 182933024536SHuang Ying bool toptier; 1830e944fd67SMel Gorman /* 1831e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1832e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1833e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1834e944fd67SMel Gorman */ 1835a1a3a2fcSHuang Ying if (is_huge_zero_pmd(*pmd)) 18360a85e51dSKirill A. Shutemov goto unlock; 1837e944fd67SMel Gorman 1838a1a3a2fcSHuang Ying if (pmd_protnone(*pmd)) 18390a85e51dSKirill A. Shutemov goto unlock; 18400a85e51dSKirill A. Shutemov 1841a1a3a2fcSHuang Ying page = pmd_page(*pmd); 184233024536SHuang Ying toptier = node_is_toptier(page_to_nid(page)); 1843a1a3a2fcSHuang Ying /* 1844a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 1845a1a3a2fcSHuang Ying * balancing is disabled 1846a1a3a2fcSHuang Ying */ 1847a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 184833024536SHuang Ying toptier) 1849a1a3a2fcSHuang Ying goto unlock; 185033024536SHuang Ying 185133024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 185233024536SHuang Ying !toptier) 185333024536SHuang Ying xchg_page_access_time(page, jiffies_to_msecs(jiffies)); 1854a1a3a2fcSHuang Ying } 1855ced10803SKirill A. Shutemov /* 18563e4e28c5SMichel Lespinasse * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1857ced10803SKirill A. Shutemov * to not clear pmd intermittently to avoid race with MADV_DONTNEED 18583e4e28c5SMichel Lespinasse * which is also under mmap_read_lock(mm): 1859ced10803SKirill A. Shutemov * 1860ced10803SKirill A. Shutemov * CPU0: CPU1: 1861ced10803SKirill A. Shutemov * change_huge_pmd(prot_numa=1) 1862ced10803SKirill A. Shutemov * pmdp_huge_get_and_clear_notify() 1863ced10803SKirill A. Shutemov * madvise_dontneed() 1864ced10803SKirill A. Shutemov * zap_pmd_range() 1865ced10803SKirill A. Shutemov * pmd_trans_huge(*pmd) == 0 (without ptl) 1866ced10803SKirill A. Shutemov * // skip the pmd 1867ced10803SKirill A. Shutemov * set_pmd_at(); 1868ced10803SKirill A. Shutemov * // pmd is re-established 1869ced10803SKirill A. Shutemov * 1870ced10803SKirill A. Shutemov * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1871ced10803SKirill A. Shutemov * which may break userspace. 1872ced10803SKirill A. Shutemov * 18734f831457SNadav Amit * pmdp_invalidate_ad() is required to make sure we don't miss 1874ced10803SKirill A. Shutemov * dirty/young flags set by hardware. 1875ced10803SKirill A. Shutemov */ 18764f831457SNadav Amit oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 1877ced10803SKirill A. Shutemov 1878c9fe6656SNadav Amit entry = pmd_modify(oldpmd, newprot); 1879b191f9b1SMel Gorman if (preserve_write) 1880288bc549SAneesh Kumar K.V entry = pmd_mk_savedwrite(entry); 1881292924b2SPeter Xu if (uffd_wp) { 1882292924b2SPeter Xu entry = pmd_wrprotect(entry); 1883292924b2SPeter Xu entry = pmd_mkuffd_wp(entry); 1884292924b2SPeter Xu } else if (uffd_wp_resolve) { 1885292924b2SPeter Xu /* 1886292924b2SPeter Xu * Leave the write bit to be handled by PF interrupt 1887292924b2SPeter Xu * handler, then things like COW could be properly 1888292924b2SPeter Xu * handled. 1889292924b2SPeter Xu */ 1890292924b2SPeter Xu entry = pmd_clear_uffd_wp(entry); 1891292924b2SPeter Xu } 1892f123d74aSMel Gorman ret = HPAGE_PMD_NR; 189356eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 18944a18419fSNadav Amit 1895c9fe6656SNadav Amit if (huge_pmd_needs_flush(oldpmd, entry)) 18964a18419fSNadav Amit tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 18974a18419fSNadav Amit 18980a85e51dSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 18990a85e51dSKirill A. Shutemov unlock: 1900bf929152SKirill A. Shutemov spin_unlock(ptl); 1901cd7548abSJohannes Weiner return ret; 1902cd7548abSJohannes Weiner } 1903cd7548abSJohannes Weiner 1904025c5b24SNaoya Horiguchi /* 19058f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1906025c5b24SNaoya Horiguchi * 19078f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 19088f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1909025c5b24SNaoya Horiguchi */ 1910b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1911025c5b24SNaoya Horiguchi { 1912b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1913b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 191484c3fc4eSZi Yan if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 191584c3fc4eSZi Yan pmd_devmap(*pmd))) 1916b6ec57f4SKirill A. Shutemov return ptl; 1917b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1918b6ec57f4SKirill A. Shutemov return NULL; 1919025c5b24SNaoya Horiguchi } 1920025c5b24SNaoya Horiguchi 1921a00cc7d9SMatthew Wilcox /* 1922d965e390SMiaohe Lin * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 1923a00cc7d9SMatthew Wilcox * 1924d965e390SMiaohe Lin * Note that if it returns page table lock pointer, this routine returns without 1925d965e390SMiaohe Lin * unlocking page table lock. So callers must unlock it. 1926a00cc7d9SMatthew Wilcox */ 1927a00cc7d9SMatthew Wilcox spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1928a00cc7d9SMatthew Wilcox { 1929a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1930a00cc7d9SMatthew Wilcox 1931a00cc7d9SMatthew Wilcox ptl = pud_lock(vma->vm_mm, pud); 1932a00cc7d9SMatthew Wilcox if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1933a00cc7d9SMatthew Wilcox return ptl; 1934a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1935a00cc7d9SMatthew Wilcox return NULL; 1936a00cc7d9SMatthew Wilcox } 1937a00cc7d9SMatthew Wilcox 1938a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1939a00cc7d9SMatthew Wilcox int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1940a00cc7d9SMatthew Wilcox pud_t *pud, unsigned long addr) 1941a00cc7d9SMatthew Wilcox { 1942a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1943a00cc7d9SMatthew Wilcox 1944a00cc7d9SMatthew Wilcox ptl = __pud_trans_huge_lock(pud, vma); 1945a00cc7d9SMatthew Wilcox if (!ptl) 1946a00cc7d9SMatthew Wilcox return 0; 194774929079SMiaohe Lin 194870516b93SQian Cai pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 1949a00cc7d9SMatthew Wilcox tlb_remove_pud_tlb_entry(tlb, pud, addr); 19502484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) { 1951a00cc7d9SMatthew Wilcox spin_unlock(ptl); 1952a00cc7d9SMatthew Wilcox /* No zero page support yet */ 1953a00cc7d9SMatthew Wilcox } else { 1954a00cc7d9SMatthew Wilcox /* No support for anonymous PUD pages yet */ 1955a00cc7d9SMatthew Wilcox BUG(); 1956a00cc7d9SMatthew Wilcox } 1957a00cc7d9SMatthew Wilcox return 1; 1958a00cc7d9SMatthew Wilcox } 1959a00cc7d9SMatthew Wilcox 1960a00cc7d9SMatthew Wilcox static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1961a00cc7d9SMatthew Wilcox unsigned long haddr) 1962a00cc7d9SMatthew Wilcox { 1963a00cc7d9SMatthew Wilcox VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 1964a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1965a00cc7d9SMatthew Wilcox VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1966a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1967a00cc7d9SMatthew Wilcox 1968ce9311cfSYisheng Xie count_vm_event(THP_SPLIT_PUD); 1969a00cc7d9SMatthew Wilcox 1970a00cc7d9SMatthew Wilcox pudp_huge_clear_flush_notify(vma, haddr, pud); 1971a00cc7d9SMatthew Wilcox } 1972a00cc7d9SMatthew Wilcox 1973a00cc7d9SMatthew Wilcox void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 1974a00cc7d9SMatthew Wilcox unsigned long address) 1975a00cc7d9SMatthew Wilcox { 1976a00cc7d9SMatthew Wilcox spinlock_t *ptl; 1977ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1978a00cc7d9SMatthew Wilcox 19797269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 19806f4f13e8SJérôme Glisse address & HPAGE_PUD_MASK, 1981ac46d4f3SJérôme Glisse (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 1982ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1983ac46d4f3SJérôme Glisse ptl = pud_lock(vma->vm_mm, pud); 1984a00cc7d9SMatthew Wilcox if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 1985a00cc7d9SMatthew Wilcox goto out; 1986ac46d4f3SJérôme Glisse __split_huge_pud_locked(vma, pud, range.start); 1987a00cc7d9SMatthew Wilcox 1988a00cc7d9SMatthew Wilcox out: 1989a00cc7d9SMatthew Wilcox spin_unlock(ptl); 19904645b9feSJérôme Glisse /* 19914645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 19924645b9feSJérôme Glisse * the above pudp_huge_clear_flush_notify() did already call it. 19934645b9feSJérôme Glisse */ 1994ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 1995a00cc7d9SMatthew Wilcox } 1996a00cc7d9SMatthew Wilcox #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1997a00cc7d9SMatthew Wilcox 1998eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1999eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2000eef1b3baSKirill A. Shutemov { 2001eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2002eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2003eef1b3baSKirill A. Shutemov pmd_t _pmd; 2004eef1b3baSKirill A. Shutemov int i; 2005eef1b3baSKirill A. Shutemov 20060f10851eSJérôme Glisse /* 20070f10851eSJérôme Glisse * Leave pmd empty until pte is filled note that it is fine to delay 20080f10851eSJérôme Glisse * notification until mmu_notifier_invalidate_range_end() as we are 20090f10851eSJérôme Glisse * replacing a zero pmd write protected page with a zero pte write 20100f10851eSJérôme Glisse * protected page. 20110f10851eSJérôme Glisse * 2012ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 20130f10851eSJérôme Glisse */ 20140f10851eSJérôme Glisse pmdp_huge_clear_flush(vma, haddr, pmd); 2015eef1b3baSKirill A. Shutemov 2016eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2017eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2018eef1b3baSKirill A. Shutemov 2019eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2020eef1b3baSKirill A. Shutemov pte_t *pte, entry; 2021eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2022eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 2023eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2024eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2025eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2026eef1b3baSKirill A. Shutemov pte_unmap(pte); 2027eef1b3baSKirill A. Shutemov } 2028eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2029eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2030eef1b3baSKirill A. Shutemov } 2031eef1b3baSKirill A. Shutemov 2032eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2033ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 2034eef1b3baSKirill A. Shutemov { 2035eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2036eef1b3baSKirill A. Shutemov struct page *page; 2037eef1b3baSKirill A. Shutemov pgtable_t pgtable; 2038423ac9afSAneesh Kumar K.V pmd_t old_pmd, _pmd; 2039292924b2SPeter Xu bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 20400ccf7f16SPeter Xu bool anon_exclusive = false, dirty = false; 20412ac015e2SKirill A. Shutemov unsigned long addr; 2042eef1b3baSKirill A. Shutemov int i; 2043eef1b3baSKirill A. Shutemov 2044eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2045eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2046eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 204784c3fc4eSZi Yan VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 204884c3fc4eSZi Yan && !pmd_devmap(*pmd)); 2049eef1b3baSKirill A. Shutemov 2050eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 2051eef1b3baSKirill A. Shutemov 2052d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 205399fa8a48SHugh Dickins old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2054953c66c2SAneesh Kumar K.V /* 2055953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 2056953c66c2SAneesh Kumar K.V * just go ahead and zap it 2057953c66c2SAneesh Kumar K.V */ 2058953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 2059953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 20602484ca9bSThomas Hellstrom (VMware) if (vma_is_special_huge(vma)) 2061d21b9e57SKirill A. Shutemov return; 206299fa8a48SHugh Dickins if (unlikely(is_pmd_migration_entry(old_pmd))) { 206399fa8a48SHugh Dickins swp_entry_t entry; 206499fa8a48SHugh Dickins 206599fa8a48SHugh Dickins entry = pmd_to_swp_entry(old_pmd); 2066af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 206799fa8a48SHugh Dickins } else { 206899fa8a48SHugh Dickins page = pmd_page(old_pmd); 206999fa8a48SHugh Dickins if (!PageDirty(page) && pmd_dirty(old_pmd)) 2070e1f1b157SHugh Dickins set_page_dirty(page); 207199fa8a48SHugh Dickins if (!PageReferenced(page) && pmd_young(old_pmd)) 2072d21b9e57SKirill A. Shutemov SetPageReferenced(page); 2073cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 2074d21b9e57SKirill A. Shutemov put_page(page); 207599fa8a48SHugh Dickins } 2076fadae295SYang Shi add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2077eef1b3baSKirill A. Shutemov return; 207899fa8a48SHugh Dickins } 207999fa8a48SHugh Dickins 20803b77e8c8SHugh Dickins if (is_huge_zero_pmd(*pmd)) { 20814645b9feSJérôme Glisse /* 20824645b9feSJérôme Glisse * FIXME: Do we want to invalidate secondary mmu by calling 20834645b9feSJérôme Glisse * mmu_notifier_invalidate_range() see comments below inside 20844645b9feSJérôme Glisse * __split_huge_pmd() ? 20854645b9feSJérôme Glisse * 20864645b9feSJérôme Glisse * We are going from a zero huge page write protected to zero 20874645b9feSJérôme Glisse * small page also write protected so it does not seems useful 20884645b9feSJérôme Glisse * to invalidate secondary mmu at this time. 20894645b9feSJérôme Glisse */ 2090eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 2091eef1b3baSKirill A. Shutemov } 2092eef1b3baSKirill A. Shutemov 2093423ac9afSAneesh Kumar K.V /* 2094423ac9afSAneesh Kumar K.V * Up to this point the pmd is present and huge and userland has the 2095423ac9afSAneesh Kumar K.V * whole access to the hugepage during the split (which happens in 2096423ac9afSAneesh Kumar K.V * place). If we overwrite the pmd with the not-huge version pointing 2097423ac9afSAneesh Kumar K.V * to the pte here (which of course we could if all CPUs were bug 2098423ac9afSAneesh Kumar K.V * free), userland could trigger a small page size TLB miss on the 2099423ac9afSAneesh Kumar K.V * small sized TLB while the hugepage TLB entry is still established in 2100423ac9afSAneesh Kumar K.V * the huge TLB. Some CPU doesn't like that. 210142742d9bSAlexander A. Klimov * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 210242742d9bSAlexander A. Klimov * 383 on page 105. Intel should be safe but is also warns that it's 2103423ac9afSAneesh Kumar K.V * only safe if the permission and cache attributes of the two entries 2104423ac9afSAneesh Kumar K.V * loaded in the two TLB is identical (which should be the case here). 2105423ac9afSAneesh Kumar K.V * But it is generally safer to never allow small and huge TLB entries 2106423ac9afSAneesh Kumar K.V * for the same virtual address to be loaded simultaneously. So instead 2107423ac9afSAneesh Kumar K.V * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2108423ac9afSAneesh Kumar K.V * current pmd notpresent (atomically because here the pmd_trans_huge 2109423ac9afSAneesh Kumar K.V * must remain set at all times on the pmd until the split is complete 2110423ac9afSAneesh Kumar K.V * for this pmd), then we flush the SMP TLB and finally we write the 2111423ac9afSAneesh Kumar K.V * non-huge version of the pmd entry with pmd_populate. 2112423ac9afSAneesh Kumar K.V */ 2113423ac9afSAneesh Kumar K.V old_pmd = pmdp_invalidate(vma, haddr, pmd); 2114423ac9afSAneesh Kumar K.V 2115423ac9afSAneesh Kumar K.V pmd_migration = is_pmd_migration_entry(old_pmd); 21162e83ee1dSPeter Xu if (unlikely(pmd_migration)) { 211784c3fc4eSZi Yan swp_entry_t entry; 211884c3fc4eSZi Yan 2119423ac9afSAneesh Kumar K.V entry = pmd_to_swp_entry(old_pmd); 2120af5cdaf8SAlistair Popple page = pfn_swap_entry_to_page(entry); 21214dd845b5SAlistair Popple write = is_writable_migration_entry(entry); 21226c287605SDavid Hildenbrand if (PageAnon(page)) 21236c287605SDavid Hildenbrand anon_exclusive = is_readable_exclusive_migration_entry(entry); 21242e346877SPeter Xu young = is_migration_entry_young(entry); 21252e346877SPeter Xu dirty = is_migration_entry_dirty(entry); 21262e83ee1dSPeter Xu soft_dirty = pmd_swp_soft_dirty(old_pmd); 2127f45ec5ffSPeter Xu uffd_wp = pmd_swp_uffd_wp(old_pmd); 21282e83ee1dSPeter Xu } else { 2129423ac9afSAneesh Kumar K.V page = pmd_page(old_pmd); 21300ccf7f16SPeter Xu if (pmd_dirty(old_pmd)) { 21310ccf7f16SPeter Xu dirty = true; 2132423ac9afSAneesh Kumar K.V SetPageDirty(page); 21330ccf7f16SPeter Xu } 2134423ac9afSAneesh Kumar K.V write = pmd_write(old_pmd); 2135423ac9afSAneesh Kumar K.V young = pmd_young(old_pmd); 2136423ac9afSAneesh Kumar K.V soft_dirty = pmd_soft_dirty(old_pmd); 2137292924b2SPeter Xu uffd_wp = pmd_uffd_wp(old_pmd); 21386c287605SDavid Hildenbrand 21392e83ee1dSPeter Xu VM_BUG_ON_PAGE(!page_count(page), page); 21402e83ee1dSPeter Xu page_ref_add(page, HPAGE_PMD_NR - 1); 21416c287605SDavid Hildenbrand 21426c287605SDavid Hildenbrand /* 21436c287605SDavid Hildenbrand * Without "freeze", we'll simply split the PMD, propagating the 21446c287605SDavid Hildenbrand * PageAnonExclusive() flag for each PTE by setting it for 21456c287605SDavid Hildenbrand * each subpage -- no need to (temporarily) clear. 21466c287605SDavid Hildenbrand * 21476c287605SDavid Hildenbrand * With "freeze" we want to replace mapped pages by 21486c287605SDavid Hildenbrand * migration entries right away. This is only possible if we 21496c287605SDavid Hildenbrand * managed to clear PageAnonExclusive() -- see 21506c287605SDavid Hildenbrand * set_pmd_migration_entry(). 21516c287605SDavid Hildenbrand * 21526c287605SDavid Hildenbrand * In case we cannot clear PageAnonExclusive(), split the PMD 21536c287605SDavid Hildenbrand * only and let try_to_migrate_one() fail later. 2154088b8aa5SDavid Hildenbrand * 2155088b8aa5SDavid Hildenbrand * See page_try_share_anon_rmap(): invalidate PMD first. 21566c287605SDavid Hildenbrand */ 21576c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 21586c287605SDavid Hildenbrand if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) 21596c287605SDavid Hildenbrand freeze = false; 21609d84604bSHugh Dickins } 2161eef1b3baSKirill A. Shutemov 2162423ac9afSAneesh Kumar K.V /* 2163423ac9afSAneesh Kumar K.V * Withdraw the table only after we mark the pmd entry invalid. 2164423ac9afSAneesh Kumar K.V * This's critical for some architectures (Power). 2165423ac9afSAneesh Kumar K.V */ 2166eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2167eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2168eef1b3baSKirill A. Shutemov 21692ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2170eef1b3baSKirill A. Shutemov pte_t entry, *pte; 2171eef1b3baSKirill A. Shutemov /* 2172eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 2173eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 2174eef1b3baSKirill A. Shutemov * permissions across VMAs. 2175eef1b3baSKirill A. Shutemov */ 217684c3fc4eSZi Yan if (freeze || pmd_migration) { 2177ba988280SKirill A. Shutemov swp_entry_t swp_entry; 21784dd845b5SAlistair Popple if (write) 21794dd845b5SAlistair Popple swp_entry = make_writable_migration_entry( 21804dd845b5SAlistair Popple page_to_pfn(page + i)); 21816c287605SDavid Hildenbrand else if (anon_exclusive) 21826c287605SDavid Hildenbrand swp_entry = make_readable_exclusive_migration_entry( 21836c287605SDavid Hildenbrand page_to_pfn(page + i)); 21844dd845b5SAlistair Popple else 21854dd845b5SAlistair Popple swp_entry = make_readable_migration_entry( 21864dd845b5SAlistair Popple page_to_pfn(page + i)); 21872e346877SPeter Xu if (young) 21882e346877SPeter Xu swp_entry = make_migration_entry_young(swp_entry); 21892e346877SPeter Xu if (dirty) 21902e346877SPeter Xu swp_entry = make_migration_entry_dirty(swp_entry); 2191ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 2192804dd150SAndrea Arcangeli if (soft_dirty) 2193804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 2194f45ec5ffSPeter Xu if (uffd_wp) 2195f45ec5ffSPeter Xu entry = pte_swp_mkuffd_wp(entry); 2196ba988280SKirill A. Shutemov } else { 21976d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2198b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 21996c287605SDavid Hildenbrand if (anon_exclusive) 22006c287605SDavid Hildenbrand SetPageAnonExclusive(page + i); 2201eef1b3baSKirill A. Shutemov if (!write) 2202eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 2203eef1b3baSKirill A. Shutemov if (!young) 2204eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 22050ccf7f16SPeter Xu /* NOTE: this may set soft-dirty too on some archs */ 22060ccf7f16SPeter Xu if (dirty) 22070ccf7f16SPeter Xu entry = pte_mkdirty(entry); 2208804dd150SAndrea Arcangeli if (soft_dirty) 2209804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 2210292924b2SPeter Xu if (uffd_wp) 2211292924b2SPeter Xu entry = pte_mkuffd_wp(entry); 2212ba988280SKirill A. Shutemov } 22132ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 2214eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 22152ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 2216ec0abae6SRalph Campbell if (!pmd_migration) 2217eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2218eef1b3baSKirill A. Shutemov pte_unmap(pte); 2219eef1b3baSKirill A. Shutemov } 2220eef1b3baSKirill A. Shutemov 2221ec0abae6SRalph Campbell if (!pmd_migration) { 2222eef1b3baSKirill A. Shutemov /* 2223eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 2224eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 2225eef1b3baSKirill A. Shutemov */ 2226ec0abae6SRalph Campbell if (compound_mapcount(page) > 1 && 2227ec0abae6SRalph Campbell !TestSetPageDoubleMap(page)) { 2228eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2229eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 2230eef1b3baSKirill A. Shutemov } 2231eef1b3baSKirill A. Shutemov 2232468c3982SJohannes Weiner lock_page_memcg(page); 2233eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2234eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 223569473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, 223669473e5dSMuchun Song -HPAGE_PMD_NR); 2237eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 2238eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 2239eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2240eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 2241eef1b3baSKirill A. Shutemov } 2242eef1b3baSKirill A. Shutemov } 2243468c3982SJohannes Weiner unlock_page_memcg(page); 2244cea86fe2SHugh Dickins 2245cea86fe2SHugh Dickins /* Above is effectively page_remove_rmap(page, vma, true) */ 2246cea86fe2SHugh Dickins munlock_vma_page(page, vma, true); 2247ec0abae6SRalph Campbell } 2248eef1b3baSKirill A. Shutemov 2249eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2250eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 2251e9b61f19SKirill A. Shutemov 2252e9b61f19SKirill A. Shutemov if (freeze) { 22532ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2254cea86fe2SHugh Dickins page_remove_rmap(page + i, vma, false); 2255e9b61f19SKirill A. Shutemov put_page(page + i); 2256e9b61f19SKirill A. Shutemov } 2257e9b61f19SKirill A. Shutemov } 2258eef1b3baSKirill A. Shutemov } 2259eef1b3baSKirill A. Shutemov 2260eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2261af28a988SMatthew Wilcox (Oracle) unsigned long address, bool freeze, struct folio *folio) 2262eef1b3baSKirill A. Shutemov { 2263eef1b3baSKirill A. Shutemov spinlock_t *ptl; 2264ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2265eef1b3baSKirill A. Shutemov 22667269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 22676f4f13e8SJérôme Glisse address & HPAGE_PMD_MASK, 2268ac46d4f3SJérôme Glisse (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2269ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2270ac46d4f3SJérôme Glisse ptl = pmd_lock(vma->vm_mm, pmd); 227133f4751eSNaoya Horiguchi 227233f4751eSNaoya Horiguchi /* 2273af28a988SMatthew Wilcox (Oracle) * If caller asks to setup a migration entry, we need a folio to check 2274af28a988SMatthew Wilcox (Oracle) * pmd against. Otherwise we can end up replacing wrong folio. 227533f4751eSNaoya Horiguchi */ 2276af28a988SMatthew Wilcox (Oracle) VM_BUG_ON(freeze && !folio); 227783a8441fSMatthew Wilcox (Oracle) VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 227833f4751eSNaoya Horiguchi 22797f760917SDavid Hildenbrand if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || 228083a8441fSMatthew Wilcox (Oracle) is_pmd_migration_entry(*pmd)) { 2281cea33328SMiaohe Lin /* 2282cea33328SMiaohe Lin * It's safe to call pmd_page when folio is set because it's 2283cea33328SMiaohe Lin * guaranteed that pmd is present. 2284cea33328SMiaohe Lin */ 228583a8441fSMatthew Wilcox (Oracle) if (folio && folio != page_folio(pmd_page(*pmd))) 228683a8441fSMatthew Wilcox (Oracle) goto out; 2287ac46d4f3SJérôme Glisse __split_huge_pmd_locked(vma, pmd, range.start, freeze); 228883a8441fSMatthew Wilcox (Oracle) } 22897f760917SDavid Hildenbrand 2290e90309c9SKirill A. Shutemov out: 2291eef1b3baSKirill A. Shutemov spin_unlock(ptl); 22924645b9feSJérôme Glisse /* 22934645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback. 22944645b9feSJérôme Glisse * They are 3 cases to consider inside __split_huge_pmd_locked(): 22954645b9feSJérôme Glisse * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 22964645b9feSJérôme Glisse * 2) __split_huge_zero_page_pmd() read only zero page and any write 22974645b9feSJérôme Glisse * fault will trigger a flush_notify before pointing to a new page 22984645b9feSJérôme Glisse * (it is fine if the secondary mmu keeps pointing to the old zero 22994645b9feSJérôme Glisse * page in the meantime) 23004645b9feSJérôme Glisse * 3) Split a huge pmd into pte pointing to the same page. No need 23014645b9feSJérôme Glisse * to invalidate secondary tlb entry they are all still valid. 23024645b9feSJérôme Glisse * any further changes to individual pte will notify. So no need 23034645b9feSJérôme Glisse * to call mmu_notifier->invalidate_range() 23044645b9feSJérôme Glisse */ 2305ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_only_end(&range); 2306eef1b3baSKirill A. Shutemov } 2307eef1b3baSKirill A. Shutemov 2308fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2309af28a988SMatthew Wilcox (Oracle) bool freeze, struct folio *folio) 231094fcc585SAndrea Arcangeli { 231150722804SZach O'Keefe pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 231294fcc585SAndrea Arcangeli 231350722804SZach O'Keefe if (!pmd) 2314f72e7dcdSHugh Dickins return; 2315f72e7dcdSHugh Dickins 2316af28a988SMatthew Wilcox (Oracle) __split_huge_pmd(vma, pmd, address, freeze, folio); 231794fcc585SAndrea Arcangeli } 231894fcc585SAndrea Arcangeli 231971f9e58eSMiaohe Lin static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 232071f9e58eSMiaohe Lin { 232171f9e58eSMiaohe Lin /* 232271f9e58eSMiaohe Lin * If the new address isn't hpage aligned and it could previously 232371f9e58eSMiaohe Lin * contain an hugepage: check if we need to split an huge pmd. 232471f9e58eSMiaohe Lin */ 232571f9e58eSMiaohe Lin if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 232671f9e58eSMiaohe Lin range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 232771f9e58eSMiaohe Lin ALIGN(address, HPAGE_PMD_SIZE))) 232871f9e58eSMiaohe Lin split_huge_pmd_address(vma, address, false, NULL); 232971f9e58eSMiaohe Lin } 233071f9e58eSMiaohe Lin 2331e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 233294fcc585SAndrea Arcangeli unsigned long start, 233394fcc585SAndrea Arcangeli unsigned long end, 233494fcc585SAndrea Arcangeli long adjust_next) 233594fcc585SAndrea Arcangeli { 233671f9e58eSMiaohe Lin /* Check if we need to split start first. */ 233771f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, start); 233871f9e58eSMiaohe Lin 233971f9e58eSMiaohe Lin /* Check if we need to split end next. */ 234071f9e58eSMiaohe Lin split_huge_pmd_if_needed(vma, end); 234194fcc585SAndrea Arcangeli 234294fcc585SAndrea Arcangeli /* 234371f9e58eSMiaohe Lin * If we're also updating the vma->vm_next->vm_start, 234471f9e58eSMiaohe Lin * check if we need to split it. 234594fcc585SAndrea Arcangeli */ 234694fcc585SAndrea Arcangeli if (adjust_next > 0) { 234794fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 234894fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 2349f9d86a60SWei Yang nstart += adjust_next; 235071f9e58eSMiaohe Lin split_huge_pmd_if_needed(next, nstart); 235194fcc585SAndrea Arcangeli } 235294fcc585SAndrea Arcangeli } 2353e9b61f19SKirill A. Shutemov 2354906f9cdfSHugh Dickins static void unmap_page(struct page *page) 2355e9b61f19SKirill A. Shutemov { 2356869f7ee6SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2357a98a2f0cSAlistair Popple enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2358a98a2f0cSAlistair Popple TTU_SYNC; 2359e9b61f19SKirill A. Shutemov 2360e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 2361e9b61f19SKirill A. Shutemov 2362a98a2f0cSAlistair Popple /* 2363a98a2f0cSAlistair Popple * Anon pages need migration entries to preserve them, but file 2364a98a2f0cSAlistair Popple * pages can simply be left unmapped, then faulted back on demand. 2365a98a2f0cSAlistair Popple * If that is ever changed (perhaps for mlock), update remap_page(). 2366a98a2f0cSAlistair Popple */ 23674b8554c5SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 23684b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, ttu_flags); 2369a98a2f0cSAlistair Popple else 2370869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 2371bd56086fSKirill A. Shutemov } 2372bd56086fSKirill A. Shutemov 23734eecb8b9SMatthew Wilcox (Oracle) static void remap_page(struct folio *folio, unsigned long nr) 2374e9b61f19SKirill A. Shutemov { 23754eecb8b9SMatthew Wilcox (Oracle) int i = 0; 2376ab02c252SHugh Dickins 237764b586d1SHugh Dickins /* If unmap_page() uses try_to_migrate() on file, remove this check */ 23784eecb8b9SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2379ab02c252SHugh Dickins return; 23804eecb8b9SMatthew Wilcox (Oracle) for (;;) { 23814eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, true); 23824eecb8b9SMatthew Wilcox (Oracle) i += folio_nr_pages(folio); 23834eecb8b9SMatthew Wilcox (Oracle) if (i >= nr) 23844eecb8b9SMatthew Wilcox (Oracle) break; 23854eecb8b9SMatthew Wilcox (Oracle) folio = folio_next(folio); 2386e9b61f19SKirill A. Shutemov } 2387ace71a19SKirill A. Shutemov } 2388e9b61f19SKirill A. Shutemov 238994866635SAlex Shi static void lru_add_page_tail(struct page *head, struct page *tail, 239088dcb9a3SAlex Shi struct lruvec *lruvec, struct list_head *list) 239188dcb9a3SAlex Shi { 239294866635SAlex Shi VM_BUG_ON_PAGE(!PageHead(head), head); 239394866635SAlex Shi VM_BUG_ON_PAGE(PageCompound(tail), head); 239494866635SAlex Shi VM_BUG_ON_PAGE(PageLRU(tail), head); 23956168d0daSAlex Shi lockdep_assert_held(&lruvec->lru_lock); 239688dcb9a3SAlex Shi 23976dbb5741SAlex Shi if (list) { 239888dcb9a3SAlex Shi /* page reclaim is reclaiming a huge page */ 23996dbb5741SAlex Shi VM_WARN_ON(PageLRU(head)); 240094866635SAlex Shi get_page(tail); 240194866635SAlex Shi list_add_tail(&tail->lru, list); 240288dcb9a3SAlex Shi } else { 24036dbb5741SAlex Shi /* head is still on lru (and we have it frozen) */ 24046dbb5741SAlex Shi VM_WARN_ON(!PageLRU(head)); 240507ca7606SHugh Dickins if (PageUnevictable(tail)) 240607ca7606SHugh Dickins tail->mlock_count = 0; 240707ca7606SHugh Dickins else 24086dbb5741SAlex Shi list_add_tail(&tail->lru, &head->lru); 240907ca7606SHugh Dickins SetPageLRU(tail); 241088dcb9a3SAlex Shi } 241188dcb9a3SAlex Shi } 241288dcb9a3SAlex Shi 24138df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 2414e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 2415e9b61f19SKirill A. Shutemov { 2416e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 2417e9b61f19SKirill A. Shutemov 24188df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2419e9b61f19SKirill A. Shutemov 2420e9b61f19SKirill A. Shutemov /* 2421605ca5edSKonstantin Khlebnikov * Clone page flags before unfreezing refcount. 2422605ca5edSKonstantin Khlebnikov * 2423605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow flags change, 24248958b249SHaitao Shi * for example lock_page() which set PG_waiters. 24256c287605SDavid Hildenbrand * 24266c287605SDavid Hildenbrand * Note that for mapped sub-pages of an anonymous THP, 24276c287605SDavid Hildenbrand * PG_anon_exclusive has been cleared in unmap_page() and is stored in 24286c287605SDavid Hildenbrand * the migration entry instead from where remap_page() will restore it. 24296c287605SDavid Hildenbrand * We can still have PG_anon_exclusive set on effectively unmapped and 24306c287605SDavid Hildenbrand * unreferenced sub-pages of an anonymous THP: we can simply drop 24316c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 2432e9b61f19SKirill A. Shutemov */ 2433e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2434e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 2435e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 2436e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 243738d8b4e6SHuang Ying (1L << PG_swapcache) | 2438e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 2439e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 2440e9b61f19SKirill A. Shutemov (1L << PG_active) | 24411899ad18SJohannes Weiner (1L << PG_workingset) | 2442e9b61f19SKirill A. Shutemov (1L << PG_locked) | 2443b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 244472e6afa0SCatalin Marinas #ifdef CONFIG_64BIT 244572e6afa0SCatalin Marinas (1L << PG_arch_2) | 244672e6afa0SCatalin Marinas #endif 2447*ec1c86b2SYu Zhao (1L << PG_dirty) | 2448*ec1c86b2SYu Zhao LRU_GEN_MASK | LRU_REFS_MASK)); 2449e9b61f19SKirill A. Shutemov 2450173d9d9fSHugh Dickins /* ->mapping in first tail page is compound_mapcount */ 2451173d9d9fSHugh Dickins VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2452173d9d9fSHugh Dickins page_tail); 2453173d9d9fSHugh Dickins page_tail->mapping = head->mapping; 2454173d9d9fSHugh Dickins page_tail->index = head->index + tail; 2455b653db77SMatthew Wilcox (Oracle) page_tail->private = 0; 2456173d9d9fSHugh Dickins 2457605ca5edSKonstantin Khlebnikov /* Page flags must be visible before we make the page non-compound. */ 2458e9b61f19SKirill A. Shutemov smp_wmb(); 2459e9b61f19SKirill A. Shutemov 2460605ca5edSKonstantin Khlebnikov /* 2461605ca5edSKonstantin Khlebnikov * Clear PageTail before unfreezing page refcount. 2462605ca5edSKonstantin Khlebnikov * 2463605ca5edSKonstantin Khlebnikov * After successful get_page_unless_zero() might follow put_page() 2464605ca5edSKonstantin Khlebnikov * which needs correct compound_head(). 2465605ca5edSKonstantin Khlebnikov */ 2466e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 2467e9b61f19SKirill A. Shutemov 2468605ca5edSKonstantin Khlebnikov /* Finally unfreeze refcount. Additional reference from page cache. */ 2469605ca5edSKonstantin Khlebnikov page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2470605ca5edSKonstantin Khlebnikov PageSwapCache(head))); 2471605ca5edSKonstantin Khlebnikov 2472e9b61f19SKirill A. Shutemov if (page_is_young(head)) 2473e9b61f19SKirill A. Shutemov set_page_young(page_tail); 2474e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 2475e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 2476e9b61f19SKirill A. Shutemov 2477e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 247894723aafSMichal Hocko 247994723aafSMichal Hocko /* 248094723aafSMichal Hocko * always add to the tail because some iterators expect new 248194723aafSMichal Hocko * pages to show after the currently processed elements - e.g. 248294723aafSMichal Hocko * migrate_pages 248394723aafSMichal Hocko */ 2484e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 2485e9b61f19SKirill A. Shutemov } 2486e9b61f19SKirill A. Shutemov 2487baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 2488b6769834SAlex Shi pgoff_t end) 2489e9b61f19SKirill A. Shutemov { 2490e809c3feSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2491e809c3feSMatthew Wilcox (Oracle) struct page *head = &folio->page; 2492e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 24934101196bSMatthew Wilcox (Oracle) struct address_space *swap_cache = NULL; 24944101196bSMatthew Wilcox (Oracle) unsigned long offset = 0; 24958cce5475SKirill A. Shutemov unsigned int nr = thp_nr_pages(head); 24968df651c7SKirill A. Shutemov int i; 2497e9b61f19SKirill A. Shutemov 2498e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 2499be6c8982SZhou Guanghui split_page_memcg(head, nr); 2500e9b61f19SKirill A. Shutemov 25014101196bSMatthew Wilcox (Oracle) if (PageAnon(head) && PageSwapCache(head)) { 25024101196bSMatthew Wilcox (Oracle) swp_entry_t entry = { .val = page_private(head) }; 25034101196bSMatthew Wilcox (Oracle) 25044101196bSMatthew Wilcox (Oracle) offset = swp_offset(entry); 25054101196bSMatthew Wilcox (Oracle) swap_cache = swap_address_space(entry); 25064101196bSMatthew Wilcox (Oracle) xa_lock(&swap_cache->i_pages); 25074101196bSMatthew Wilcox (Oracle) } 25084101196bSMatthew Wilcox (Oracle) 2509f0953a1bSIngo Molnar /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 2510e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock(folio); 2511b6769834SAlex Shi 2512eac96c3eSYang Shi ClearPageHasHWPoisoned(head); 2513eac96c3eSYang Shi 25148cce5475SKirill A. Shutemov for (i = nr - 1; i >= 1; i--) { 25158df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 2516d144bf62SHugh Dickins /* Some pages can be beyond EOF: drop them from page cache */ 2517baa355fdSKirill A. Shutemov if (head[i].index >= end) { 2518fb5c2029SMatthew Wilcox (Oracle) struct folio *tail = page_folio(head + i); 2519fb5c2029SMatthew Wilcox (Oracle) 2520d144bf62SHugh Dickins if (shmem_mapping(head->mapping)) 2521800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 2522fb5c2029SMatthew Wilcox (Oracle) else if (folio_test_clear_dirty(tail)) 2523fb5c2029SMatthew Wilcox (Oracle) folio_account_cleaned(tail, 2524fb5c2029SMatthew Wilcox (Oracle) inode_to_wb(folio->mapping->host)); 2525fb5c2029SMatthew Wilcox (Oracle) __filemap_remove_folio(tail, NULL); 2526fb5c2029SMatthew Wilcox (Oracle) folio_put(tail); 25274101196bSMatthew Wilcox (Oracle) } else if (!PageAnon(page)) { 25284101196bSMatthew Wilcox (Oracle) __xa_store(&head->mapping->i_pages, head[i].index, 25294101196bSMatthew Wilcox (Oracle) head + i, 0); 25304101196bSMatthew Wilcox (Oracle) } else if (swap_cache) { 25314101196bSMatthew Wilcox (Oracle) __xa_store(&swap_cache->i_pages, offset + i, 25324101196bSMatthew Wilcox (Oracle) head + i, 0); 2533baa355fdSKirill A. Shutemov } 2534baa355fdSKirill A. Shutemov } 2535e9b61f19SKirill A. Shutemov 2536e9b61f19SKirill A. Shutemov ClearPageCompound(head); 25376168d0daSAlex Shi unlock_page_lruvec(lruvec); 2538b6769834SAlex Shi /* Caller disabled irqs, so they are still disabled here */ 2539f7da677bSVlastimil Babka 25408cce5475SKirill A. Shutemov split_page_owner(head, nr); 2541f7da677bSVlastimil Babka 2542baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 2543baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2544aa5dc07fSMatthew Wilcox /* Additional pin to swap cache */ 25454101196bSMatthew Wilcox (Oracle) if (PageSwapCache(head)) { 254638d8b4e6SHuang Ying page_ref_add(head, 2); 25474101196bSMatthew Wilcox (Oracle) xa_unlock(&swap_cache->i_pages); 25484101196bSMatthew Wilcox (Oracle) } else { 2549baa355fdSKirill A. Shutemov page_ref_inc(head); 25504101196bSMatthew Wilcox (Oracle) } 2551baa355fdSKirill A. Shutemov } else { 2552aa5dc07fSMatthew Wilcox /* Additional pin to page cache */ 2553baa355fdSKirill A. Shutemov page_ref_add(head, 2); 2554b93b0163SMatthew Wilcox xa_unlock(&head->mapping->i_pages); 2555baa355fdSKirill A. Shutemov } 2556b6769834SAlex Shi local_irq_enable(); 2557e9b61f19SKirill A. Shutemov 25584eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, nr); 2559e9b61f19SKirill A. Shutemov 2560c4f9c701SHuang Ying if (PageSwapCache(head)) { 2561c4f9c701SHuang Ying swp_entry_t entry = { .val = page_private(head) }; 2562c4f9c701SHuang Ying 2563c4f9c701SHuang Ying split_swap_cluster(entry); 2564c4f9c701SHuang Ying } 2565c4f9c701SHuang Ying 25668cce5475SKirill A. Shutemov for (i = 0; i < nr; i++) { 2567e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 2568e9b61f19SKirill A. Shutemov if (subpage == page) 2569e9b61f19SKirill A. Shutemov continue; 2570e9b61f19SKirill A. Shutemov unlock_page(subpage); 2571e9b61f19SKirill A. Shutemov 2572e9b61f19SKirill A. Shutemov /* 2573e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 2574e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2575e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2576e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2577e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2578e9b61f19SKirill A. Shutemov */ 25790b175468SMiaohe Lin free_page_and_swap_cache(subpage); 2580e9b61f19SKirill A. Shutemov } 2581e9b61f19SKirill A. Shutemov } 2582e9b61f19SKirill A. Shutemov 2583b8f593cdSHuang Ying /* Racy check whether the huge page can be split */ 2584d4b4084aSMatthew Wilcox (Oracle) bool can_split_folio(struct folio *folio, int *pextra_pins) 2585b8f593cdSHuang Ying { 2586b8f593cdSHuang Ying int extra_pins; 2587b8f593cdSHuang Ying 2588aa5dc07fSMatthew Wilcox /* Additional pins from page cache */ 2589d4b4084aSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 2590d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_test_swapcache(folio) ? 2591d4b4084aSMatthew Wilcox (Oracle) folio_nr_pages(folio) : 0; 2592b8f593cdSHuang Ying else 2593d4b4084aSMatthew Wilcox (Oracle) extra_pins = folio_nr_pages(folio); 2594b8f593cdSHuang Ying if (pextra_pins) 2595b8f593cdSHuang Ying *pextra_pins = extra_pins; 2596d4b4084aSMatthew Wilcox (Oracle) return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; 2597b8f593cdSHuang Ying } 2598b8f593cdSHuang Ying 25996d0a07edSAndrea Arcangeli /* 2600e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2601e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2602e9b61f19SKirill A. Shutemov * 2603e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2604e9b61f19SKirill A. Shutemov * The huge page must be locked. 2605e9b61f19SKirill A. Shutemov * 2606e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2607e9b61f19SKirill A. Shutemov * 2608e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2609e9b61f19SKirill A. Shutemov * the hugepage. 2610e9b61f19SKirill A. Shutemov * 2611e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2612e9b61f19SKirill A. Shutemov * they are not mapped. 2613e9b61f19SKirill A. Shutemov * 2614e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2615e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2616e9b61f19SKirill A. Shutemov * us. 2617e9b61f19SKirill A. Shutemov */ 2618e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2619e9b61f19SKirill A. Shutemov { 26204eecb8b9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 26214eecb8b9SMatthew Wilcox (Oracle) struct page *head = &folio->page; 2622a8803e6cSWei Yang struct deferred_split *ds_queue = get_deferred_split_queue(head); 26236b24ca4aSMatthew Wilcox (Oracle) XA_STATE(xas, &head->mapping->i_pages, head->index); 2624baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2625baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2626504e070dSYang Shi int extra_pins, ret; 2627006d3ff2SHugh Dickins pgoff_t end; 2628478d134eSXu Yu bool is_hzp; 2629e9b61f19SKirill A. Shutemov 2630a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageLocked(head), head); 2631a8803e6cSWei Yang VM_BUG_ON_PAGE(!PageCompound(head), head); 2632e9b61f19SKirill A. Shutemov 2633478d134eSXu Yu is_hzp = is_huge_zero_page(head); 2634478d134eSXu Yu VM_WARN_ON_ONCE_PAGE(is_hzp, head); 2635478d134eSXu Yu if (is_hzp) 2636478d134eSXu Yu return -EBUSY; 2637478d134eSXu Yu 2638a8803e6cSWei Yang if (PageWriteback(head)) 263959807685SHuang Ying return -EBUSY; 264059807685SHuang Ying 2641baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2642e9b61f19SKirill A. Shutemov /* 2643c1e8d7c6SMichel Lespinasse * The caller does not necessarily hold an mmap_lock that would 2644baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2645baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 26462f031c6fSMatthew Wilcox (Oracle) * is similar to folio_lock_anon_vma_read except the write lock 2647baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2648baa355fdSKirill A. Shutemov * operations. 2649e9b61f19SKirill A. Shutemov */ 2650e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2651e9b61f19SKirill A. Shutemov if (!anon_vma) { 2652e9b61f19SKirill A. Shutemov ret = -EBUSY; 2653e9b61f19SKirill A. Shutemov goto out; 2654e9b61f19SKirill A. Shutemov } 2655006d3ff2SHugh Dickins end = -1; 2656baa355fdSKirill A. Shutemov mapping = NULL; 2657e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2658baa355fdSKirill A. Shutemov } else { 26596a3edd29SYin Fengwei gfp_t gfp; 26606a3edd29SYin Fengwei 2661baa355fdSKirill A. Shutemov mapping = head->mapping; 2662baa355fdSKirill A. Shutemov 2663baa355fdSKirill A. Shutemov /* Truncated ? */ 2664baa355fdSKirill A. Shutemov if (!mapping) { 2665baa355fdSKirill A. Shutemov ret = -EBUSY; 2666baa355fdSKirill A. Shutemov goto out; 2667baa355fdSKirill A. Shutemov } 2668baa355fdSKirill A. Shutemov 26696a3edd29SYin Fengwei gfp = current_gfp_context(mapping_gfp_mask(mapping) & 26706a3edd29SYin Fengwei GFP_RECLAIM_MASK); 26716a3edd29SYin Fengwei 26726a3edd29SYin Fengwei if (folio_test_private(folio) && 26736a3edd29SYin Fengwei !filemap_release_folio(folio, gfp)) { 26746a3edd29SYin Fengwei ret = -EBUSY; 26756a3edd29SYin Fengwei goto out; 26766a3edd29SYin Fengwei } 26776a3edd29SYin Fengwei 26786a3edd29SYin Fengwei xas_split_alloc(&xas, head, compound_order(head), gfp); 26796b24ca4aSMatthew Wilcox (Oracle) if (xas_error(&xas)) { 26806b24ca4aSMatthew Wilcox (Oracle) ret = xas_error(&xas); 26816b24ca4aSMatthew Wilcox (Oracle) goto out; 26826b24ca4aSMatthew Wilcox (Oracle) } 26836b24ca4aSMatthew Wilcox (Oracle) 2684baa355fdSKirill A. Shutemov anon_vma = NULL; 2685baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2686006d3ff2SHugh Dickins 2687006d3ff2SHugh Dickins /* 2688006d3ff2SHugh Dickins *__split_huge_page() may need to trim off pages beyond EOF: 2689006d3ff2SHugh Dickins * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2690006d3ff2SHugh Dickins * which cannot be nested inside the page tree lock. So note 2691006d3ff2SHugh Dickins * end now: i_size itself may be changed at any moment, but 2692006d3ff2SHugh Dickins * head page lock is good enough to serialize the trimming. 2693006d3ff2SHugh Dickins */ 2694006d3ff2SHugh Dickins end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2695d144bf62SHugh Dickins if (shmem_mapping(mapping)) 2696d144bf62SHugh Dickins end = shmem_fallocend(mapping->host, end); 2697baa355fdSKirill A. Shutemov } 2698e9b61f19SKirill A. Shutemov 2699e9b61f19SKirill A. Shutemov /* 2700906f9cdfSHugh Dickins * Racy check if we can split the page, before unmap_page() will 2701e9b61f19SKirill A. Shutemov * split PMDs 2702e9b61f19SKirill A. Shutemov */ 2703d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(folio, &extra_pins)) { 2704e9b61f19SKirill A. Shutemov ret = -EBUSY; 2705e9b61f19SKirill A. Shutemov goto out_unlock; 2706e9b61f19SKirill A. Shutemov } 2707e9b61f19SKirill A. Shutemov 2708906f9cdfSHugh Dickins unmap_page(head); 2709e9b61f19SKirill A. Shutemov 2710b6769834SAlex Shi /* block interrupt reentry in xa_lock and spinlock */ 2711b6769834SAlex Shi local_irq_disable(); 2712baa355fdSKirill A. Shutemov if (mapping) { 2713baa355fdSKirill A. Shutemov /* 2714aa5dc07fSMatthew Wilcox * Check if the head page is present in page cache. 2715baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2716baa355fdSKirill A. Shutemov */ 27176b24ca4aSMatthew Wilcox (Oracle) xas_lock(&xas); 27186b24ca4aSMatthew Wilcox (Oracle) xas_reset(&xas); 2719aa5dc07fSMatthew Wilcox if (xas_load(&xas) != head) 2720baa355fdSKirill A. Shutemov goto fail; 2721baa355fdSKirill A. Shutemov } 2722baa355fdSKirill A. Shutemov 27230139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2724364c1eebSYang Shi spin_lock(&ds_queue->split_queue_lock); 2725504e070dSYang Shi if (page_ref_freeze(head, 1 + extra_pins)) { 27269a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2727364c1eebSYang Shi ds_queue->split_queue_len--; 27289a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 27299a982250SKirill A. Shutemov } 2730afb97172SWei Yang spin_unlock(&ds_queue->split_queue_lock); 273106d3eff6SKirill A. Shutemov if (mapping) { 2732bf9eceadSMuchun Song int nr = thp_nr_pages(head); 2733bf9eceadSMuchun Song 27346b24ca4aSMatthew Wilcox (Oracle) xas_split(&xas, head, thp_order(head)); 27351ca7554dSMarek Szyprowski if (PageSwapBacked(head)) { 273657b2847dSMuchun Song __mod_lruvec_page_state(head, NR_SHMEM_THPS, 273757b2847dSMuchun Song -nr); 27381ca7554dSMarek Szyprowski } else { 2739bf9eceadSMuchun Song __mod_lruvec_page_state(head, NR_FILE_THPS, 2740bf9eceadSMuchun Song -nr); 27411ca7554dSMarek Szyprowski filemap_nr_thps_dec(mapping); 27421ca7554dSMarek Szyprowski } 274306d3eff6SKirill A. Shutemov } 274406d3eff6SKirill A. Shutemov 2745b6769834SAlex Shi __split_huge_page(page, list, end); 2746e9b61f19SKirill A. Shutemov ret = 0; 2747baa355fdSKirill A. Shutemov } else { 2748364c1eebSYang Shi spin_unlock(&ds_queue->split_queue_lock); 2749504e070dSYang Shi fail: 2750504e070dSYang Shi if (mapping) 27516b24ca4aSMatthew Wilcox (Oracle) xas_unlock(&xas); 2752b6769834SAlex Shi local_irq_enable(); 27534eecb8b9SMatthew Wilcox (Oracle) remap_page(folio, folio_nr_pages(folio)); 2754e9b61f19SKirill A. Shutemov ret = -EBUSY; 2755e9b61f19SKirill A. Shutemov } 2756e9b61f19SKirill A. Shutemov 2757e9b61f19SKirill A. Shutemov out_unlock: 2758baa355fdSKirill A. Shutemov if (anon_vma) { 2759e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2760e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2761baa355fdSKirill A. Shutemov } 2762baa355fdSKirill A. Shutemov if (mapping) 2763baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2764e9b61f19SKirill A. Shutemov out: 276569a37a8bSMatthew Wilcox (Oracle) xas_destroy(&xas); 2766e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2767e9b61f19SKirill A. Shutemov return ret; 2768e9b61f19SKirill A. Shutemov } 27699a982250SKirill A. Shutemov 27709a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 27719a982250SKirill A. Shutemov { 277287eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 27739a982250SKirill A. Shutemov unsigned long flags; 27749a982250SKirill A. Shutemov 2775364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 27769a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2777364c1eebSYang Shi ds_queue->split_queue_len--; 27789a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 27799a982250SKirill A. Shutemov } 2780364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 27819a982250SKirill A. Shutemov free_compound_page(page); 27829a982250SKirill A. Shutemov } 27839a982250SKirill A. Shutemov 27849a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 27859a982250SKirill A. Shutemov { 278687eaceb3SYang Shi struct deferred_split *ds_queue = get_deferred_split_queue(page); 278787eaceb3SYang Shi #ifdef CONFIG_MEMCG 2788bcfe06bfSRoman Gushchin struct mem_cgroup *memcg = page_memcg(compound_head(page)); 278987eaceb3SYang Shi #endif 27909a982250SKirill A. Shutemov unsigned long flags; 27919a982250SKirill A. Shutemov 27929a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 27939a982250SKirill A. Shutemov 279487eaceb3SYang Shi /* 279587eaceb3SYang Shi * The try_to_unmap() in page reclaim path might reach here too, 279687eaceb3SYang Shi * this may cause a race condition to corrupt deferred split queue. 279787eaceb3SYang Shi * And, if page reclaim is already handling the same page, it is 279887eaceb3SYang Shi * unnecessary to handle it again in shrinker. 279987eaceb3SYang Shi * 280087eaceb3SYang Shi * Check PageSwapCache to determine if the page is being 280187eaceb3SYang Shi * handled by page reclaim since THP swap would add the page into 280287eaceb3SYang Shi * swap cache before calling try_to_unmap(). 280387eaceb3SYang Shi */ 280487eaceb3SYang Shi if (PageSwapCache(page)) 280587eaceb3SYang Shi return; 280687eaceb3SYang Shi 2807364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28089a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2809f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2810364c1eebSYang Shi list_add_tail(page_deferred_list(page), &ds_queue->split_queue); 2811364c1eebSYang Shi ds_queue->split_queue_len++; 281287eaceb3SYang Shi #ifdef CONFIG_MEMCG 281387eaceb3SYang Shi if (memcg) 28142bfd3637SYang Shi set_shrinker_bit(memcg, page_to_nid(page), 281587eaceb3SYang Shi deferred_split_shrinker.id); 281687eaceb3SYang Shi #endif 28179a982250SKirill A. Shutemov } 2818364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28199a982250SKirill A. Shutemov } 28209a982250SKirill A. Shutemov 28219a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 28229a982250SKirill A. Shutemov struct shrink_control *sc) 28239a982250SKirill A. Shutemov { 2824a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2825364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 282687eaceb3SYang Shi 282787eaceb3SYang Shi #ifdef CONFIG_MEMCG 282887eaceb3SYang Shi if (sc->memcg) 282987eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 283087eaceb3SYang Shi #endif 2831364c1eebSYang Shi return READ_ONCE(ds_queue->split_queue_len); 28329a982250SKirill A. Shutemov } 28339a982250SKirill A. Shutemov 28349a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 28359a982250SKirill A. Shutemov struct shrink_control *sc) 28369a982250SKirill A. Shutemov { 2837a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2838364c1eebSYang Shi struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 28399a982250SKirill A. Shutemov unsigned long flags; 28409a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 28419a982250SKirill A. Shutemov struct page *page; 28429a982250SKirill A. Shutemov int split = 0; 28439a982250SKirill A. Shutemov 284487eaceb3SYang Shi #ifdef CONFIG_MEMCG 284587eaceb3SYang Shi if (sc->memcg) 284687eaceb3SYang Shi ds_queue = &sc->memcg->deferred_split_queue; 284787eaceb3SYang Shi #endif 284887eaceb3SYang Shi 2849364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 28509a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2851364c1eebSYang Shi list_for_each_safe(pos, next, &ds_queue->split_queue) { 2852dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 28539a982250SKirill A. Shutemov page = compound_head(page); 2854e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2855e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2856e3ae1953SKirill A. Shutemov } else { 2857e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 28589a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2859364c1eebSYang Shi ds_queue->split_queue_len--; 28609a982250SKirill A. Shutemov } 2861e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2862e3ae1953SKirill A. Shutemov break; 28639a982250SKirill A. Shutemov } 2864364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28659a982250SKirill A. Shutemov 28669a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 2867dfe5c51cSMiaohe Lin page = list_entry((void *)pos, struct page, deferred_list); 2868fa41b900SKirill A. Shutemov if (!trylock_page(page)) 2869fa41b900SKirill A. Shutemov goto next; 28709a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 28719a982250SKirill A. Shutemov if (!split_huge_page(page)) 28729a982250SKirill A. Shutemov split++; 28739a982250SKirill A. Shutemov unlock_page(page); 2874fa41b900SKirill A. Shutemov next: 28759a982250SKirill A. Shutemov put_page(page); 28769a982250SKirill A. Shutemov } 28779a982250SKirill A. Shutemov 2878364c1eebSYang Shi spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2879364c1eebSYang Shi list_splice_tail(&list, &ds_queue->split_queue); 2880364c1eebSYang Shi spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 28819a982250SKirill A. Shutemov 2882cb8d68ecSKirill A. Shutemov /* 2883cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2884cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2885cb8d68ecSKirill A. Shutemov */ 2886364c1eebSYang Shi if (!split && list_empty(&ds_queue->split_queue)) 2887cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2888cb8d68ecSKirill A. Shutemov return split; 28899a982250SKirill A. Shutemov } 28909a982250SKirill A. Shutemov 28919a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 28929a982250SKirill A. Shutemov .count_objects = deferred_split_count, 28939a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 28949a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 289587eaceb3SYang Shi .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 289687eaceb3SYang Shi SHRINKER_NONSLAB, 28979a982250SKirill A. Shutemov }; 289849071d43SKirill A. Shutemov 289949071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 2900fa6c0231SZi Yan static void split_huge_pages_all(void) 290149071d43SKirill A. Shutemov { 290249071d43SKirill A. Shutemov struct zone *zone; 290349071d43SKirill A. Shutemov struct page *page; 290449071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 290549071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 290649071d43SKirill A. Shutemov 2907fa6c0231SZi Yan pr_debug("Split all THPs\n"); 2908a17206daSMiaohe Lin for_each_zone(zone) { 2909a17206daSMiaohe Lin if (!managed_zone(zone)) 2910a17206daSMiaohe Lin continue; 291149071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 291249071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2913a17206daSMiaohe Lin int nr_pages; 291449071d43SKirill A. Shutemov 29152b7aa91bSNaoya Horiguchi page = pfn_to_online_page(pfn); 29162b7aa91bSNaoya Horiguchi if (!page || !get_page_unless_zero(page)) 291749071d43SKirill A. Shutemov continue; 291849071d43SKirill A. Shutemov 291949071d43SKirill A. Shutemov if (zone != page_zone(page)) 292049071d43SKirill A. Shutemov goto next; 292149071d43SKirill A. Shutemov 2922baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 292349071d43SKirill A. Shutemov goto next; 292449071d43SKirill A. Shutemov 292549071d43SKirill A. Shutemov total++; 292649071d43SKirill A. Shutemov lock_page(page); 2927a17206daSMiaohe Lin nr_pages = thp_nr_pages(page); 292849071d43SKirill A. Shutemov if (!split_huge_page(page)) 292949071d43SKirill A. Shutemov split++; 2930a17206daSMiaohe Lin pfn += nr_pages - 1; 293149071d43SKirill A. Shutemov unlock_page(page); 293249071d43SKirill A. Shutemov next: 293349071d43SKirill A. Shutemov put_page(page); 2934fa6c0231SZi Yan cond_resched(); 293549071d43SKirill A. Shutemov } 293649071d43SKirill A. Shutemov } 293749071d43SKirill A. Shutemov 2938fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 293949071d43SKirill A. Shutemov } 2940fa6c0231SZi Yan 2941fa6c0231SZi Yan static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 2942fa6c0231SZi Yan { 2943fa6c0231SZi Yan return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 2944fa6c0231SZi Yan is_vm_hugetlb_page(vma); 2945fa6c0231SZi Yan } 2946fa6c0231SZi Yan 2947fa6c0231SZi Yan static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 2948fa6c0231SZi Yan unsigned long vaddr_end) 2949fa6c0231SZi Yan { 2950fa6c0231SZi Yan int ret = 0; 2951fa6c0231SZi Yan struct task_struct *task; 2952fa6c0231SZi Yan struct mm_struct *mm; 2953fa6c0231SZi Yan unsigned long total = 0, split = 0; 2954fa6c0231SZi Yan unsigned long addr; 2955fa6c0231SZi Yan 2956fa6c0231SZi Yan vaddr_start &= PAGE_MASK; 2957fa6c0231SZi Yan vaddr_end &= PAGE_MASK; 2958fa6c0231SZi Yan 2959fa6c0231SZi Yan /* Find the task_struct from pid */ 2960fa6c0231SZi Yan rcu_read_lock(); 2961fa6c0231SZi Yan task = find_task_by_vpid(pid); 2962fa6c0231SZi Yan if (!task) { 2963fa6c0231SZi Yan rcu_read_unlock(); 2964fa6c0231SZi Yan ret = -ESRCH; 2965fa6c0231SZi Yan goto out; 2966fa6c0231SZi Yan } 2967fa6c0231SZi Yan get_task_struct(task); 2968fa6c0231SZi Yan rcu_read_unlock(); 2969fa6c0231SZi Yan 2970fa6c0231SZi Yan /* Find the mm_struct */ 2971fa6c0231SZi Yan mm = get_task_mm(task); 2972fa6c0231SZi Yan put_task_struct(task); 2973fa6c0231SZi Yan 2974fa6c0231SZi Yan if (!mm) { 2975fa6c0231SZi Yan ret = -EINVAL; 2976fa6c0231SZi Yan goto out; 2977fa6c0231SZi Yan } 2978fa6c0231SZi Yan 2979fa6c0231SZi Yan pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 2980fa6c0231SZi Yan pid, vaddr_start, vaddr_end); 2981fa6c0231SZi Yan 2982fa6c0231SZi Yan mmap_read_lock(mm); 2983fa6c0231SZi Yan /* 2984fa6c0231SZi Yan * always increase addr by PAGE_SIZE, since we could have a PTE page 2985fa6c0231SZi Yan * table filled with PTE-mapped THPs, each of which is distinct. 2986fa6c0231SZi Yan */ 2987fa6c0231SZi Yan for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 298874ba2b38SMiaohe Lin struct vm_area_struct *vma = vma_lookup(mm, addr); 2989fa6c0231SZi Yan struct page *page; 2990fa6c0231SZi Yan 299174ba2b38SMiaohe Lin if (!vma) 2992fa6c0231SZi Yan break; 2993fa6c0231SZi Yan 2994fa6c0231SZi Yan /* skip special VMA and hugetlb VMA */ 2995fa6c0231SZi Yan if (vma_not_suitable_for_thp_split(vma)) { 2996fa6c0231SZi Yan addr = vma->vm_end; 2997fa6c0231SZi Yan continue; 2998fa6c0231SZi Yan } 2999fa6c0231SZi Yan 3000fa6c0231SZi Yan /* FOLL_DUMP to ignore special (like zero) pages */ 300187d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 3002fa6c0231SZi Yan 3003e75858b9SMiaohe Lin if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) 3004fa6c0231SZi Yan continue; 3005fa6c0231SZi Yan 3006fa6c0231SZi Yan if (!is_transparent_hugepage(page)) 3007fa6c0231SZi Yan goto next; 3008fa6c0231SZi Yan 3009fa6c0231SZi Yan total++; 3010d4b4084aSMatthew Wilcox (Oracle) if (!can_split_folio(page_folio(page), NULL)) 3011fa6c0231SZi Yan goto next; 3012fa6c0231SZi Yan 3013fa6c0231SZi Yan if (!trylock_page(page)) 3014fa6c0231SZi Yan goto next; 3015fa6c0231SZi Yan 3016fa6c0231SZi Yan if (!split_huge_page(page)) 3017fa6c0231SZi Yan split++; 3018fa6c0231SZi Yan 3019fa6c0231SZi Yan unlock_page(page); 3020fa6c0231SZi Yan next: 3021fa6c0231SZi Yan put_page(page); 3022fa6c0231SZi Yan cond_resched(); 3023fa6c0231SZi Yan } 3024fa6c0231SZi Yan mmap_read_unlock(mm); 3025fa6c0231SZi Yan mmput(mm); 3026fa6c0231SZi Yan 3027fa6c0231SZi Yan pr_debug("%lu of %lu THP split\n", split, total); 3028fa6c0231SZi Yan 3029fa6c0231SZi Yan out: 3030fa6c0231SZi Yan return ret; 3031fa6c0231SZi Yan } 3032fa6c0231SZi Yan 3033fbe37501SZi Yan static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 3034fbe37501SZi Yan pgoff_t off_end) 3035fbe37501SZi Yan { 3036fbe37501SZi Yan struct filename *file; 3037fbe37501SZi Yan struct file *candidate; 3038fbe37501SZi Yan struct address_space *mapping; 3039fbe37501SZi Yan int ret = -EINVAL; 3040fbe37501SZi Yan pgoff_t index; 3041fbe37501SZi Yan int nr_pages = 1; 3042fbe37501SZi Yan unsigned long total = 0, split = 0; 3043fbe37501SZi Yan 3044fbe37501SZi Yan file = getname_kernel(file_path); 3045fbe37501SZi Yan if (IS_ERR(file)) 3046fbe37501SZi Yan return ret; 3047fbe37501SZi Yan 3048fbe37501SZi Yan candidate = file_open_name(file, O_RDONLY, 0); 3049fbe37501SZi Yan if (IS_ERR(candidate)) 3050fbe37501SZi Yan goto out; 3051fbe37501SZi Yan 3052fbe37501SZi Yan pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 3053fbe37501SZi Yan file_path, off_start, off_end); 3054fbe37501SZi Yan 3055fbe37501SZi Yan mapping = candidate->f_mapping; 3056fbe37501SZi Yan 3057fbe37501SZi Yan for (index = off_start; index < off_end; index += nr_pages) { 3058fbe37501SZi Yan struct page *fpage = pagecache_get_page(mapping, index, 3059fbe37501SZi Yan FGP_ENTRY | FGP_HEAD, 0); 3060fbe37501SZi Yan 3061fbe37501SZi Yan nr_pages = 1; 3062fbe37501SZi Yan if (xa_is_value(fpage) || !fpage) 3063fbe37501SZi Yan continue; 3064fbe37501SZi Yan 3065fbe37501SZi Yan if (!is_transparent_hugepage(fpage)) 3066fbe37501SZi Yan goto next; 3067fbe37501SZi Yan 3068fbe37501SZi Yan total++; 3069fbe37501SZi Yan nr_pages = thp_nr_pages(fpage); 3070fbe37501SZi Yan 3071fbe37501SZi Yan if (!trylock_page(fpage)) 3072fbe37501SZi Yan goto next; 3073fbe37501SZi Yan 3074fbe37501SZi Yan if (!split_huge_page(fpage)) 3075fbe37501SZi Yan split++; 3076fbe37501SZi Yan 3077fbe37501SZi Yan unlock_page(fpage); 3078fbe37501SZi Yan next: 3079fbe37501SZi Yan put_page(fpage); 3080fbe37501SZi Yan cond_resched(); 3081fbe37501SZi Yan } 3082fbe37501SZi Yan 3083fbe37501SZi Yan filp_close(candidate, NULL); 3084fbe37501SZi Yan ret = 0; 3085fbe37501SZi Yan 3086fbe37501SZi Yan pr_debug("%lu of %lu file-backed THP split\n", split, total); 3087fbe37501SZi Yan out: 3088fbe37501SZi Yan putname(file); 3089fbe37501SZi Yan return ret; 3090fbe37501SZi Yan } 3091fbe37501SZi Yan 3092fa6c0231SZi Yan #define MAX_INPUT_BUF_SZ 255 3093fa6c0231SZi Yan 3094fa6c0231SZi Yan static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 3095fa6c0231SZi Yan size_t count, loff_t *ppops) 3096fa6c0231SZi Yan { 3097fa6c0231SZi Yan static DEFINE_MUTEX(split_debug_mutex); 3098fa6c0231SZi Yan ssize_t ret; 3099fbe37501SZi Yan /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ 3100fbe37501SZi Yan char input_buf[MAX_INPUT_BUF_SZ]; 3101fa6c0231SZi Yan int pid; 3102fa6c0231SZi Yan unsigned long vaddr_start, vaddr_end; 3103fa6c0231SZi Yan 3104fa6c0231SZi Yan ret = mutex_lock_interruptible(&split_debug_mutex); 3105fa6c0231SZi Yan if (ret) 3106fa6c0231SZi Yan return ret; 3107fa6c0231SZi Yan 3108fa6c0231SZi Yan ret = -EFAULT; 3109fa6c0231SZi Yan 3110fa6c0231SZi Yan memset(input_buf, 0, MAX_INPUT_BUF_SZ); 3111fa6c0231SZi Yan if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 3112fa6c0231SZi Yan goto out; 3113fa6c0231SZi Yan 3114fa6c0231SZi Yan input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 3115fbe37501SZi Yan 3116fbe37501SZi Yan if (input_buf[0] == '/') { 3117fbe37501SZi Yan char *tok; 3118fbe37501SZi Yan char *buf = input_buf; 3119fbe37501SZi Yan char file_path[MAX_INPUT_BUF_SZ]; 3120fbe37501SZi Yan pgoff_t off_start = 0, off_end = 0; 3121fbe37501SZi Yan size_t input_len = strlen(input_buf); 3122fbe37501SZi Yan 3123fbe37501SZi Yan tok = strsep(&buf, ","); 3124fbe37501SZi Yan if (tok) { 31251212e00cSMatthew Wilcox (Oracle) strcpy(file_path, tok); 3126fbe37501SZi Yan } else { 3127fbe37501SZi Yan ret = -EINVAL; 3128fbe37501SZi Yan goto out; 3129fbe37501SZi Yan } 3130fbe37501SZi Yan 3131fbe37501SZi Yan ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); 3132fbe37501SZi Yan if (ret != 2) { 3133fbe37501SZi Yan ret = -EINVAL; 3134fbe37501SZi Yan goto out; 3135fbe37501SZi Yan } 3136fbe37501SZi Yan ret = split_huge_pages_in_file(file_path, off_start, off_end); 3137fbe37501SZi Yan if (!ret) 3138fbe37501SZi Yan ret = input_len; 3139fbe37501SZi Yan 3140fbe37501SZi Yan goto out; 3141fbe37501SZi Yan } 3142fbe37501SZi Yan 3143fa6c0231SZi Yan ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); 3144fa6c0231SZi Yan if (ret == 1 && pid == 1) { 3145fa6c0231SZi Yan split_huge_pages_all(); 3146fa6c0231SZi Yan ret = strlen(input_buf); 3147fa6c0231SZi Yan goto out; 3148fa6c0231SZi Yan } else if (ret != 3) { 3149fa6c0231SZi Yan ret = -EINVAL; 3150fa6c0231SZi Yan goto out; 3151fa6c0231SZi Yan } 3152fa6c0231SZi Yan 3153fa6c0231SZi Yan ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); 3154fa6c0231SZi Yan if (!ret) 3155fa6c0231SZi Yan ret = strlen(input_buf); 3156fa6c0231SZi Yan out: 3157fa6c0231SZi Yan mutex_unlock(&split_debug_mutex); 3158fa6c0231SZi Yan return ret; 3159fa6c0231SZi Yan 3160fa6c0231SZi Yan } 3161fa6c0231SZi Yan 3162fa6c0231SZi Yan static const struct file_operations split_huge_pages_fops = { 3163fa6c0231SZi Yan .owner = THIS_MODULE, 3164fa6c0231SZi Yan .write = split_huge_pages_write, 3165fa6c0231SZi Yan .llseek = no_llseek, 3166fa6c0231SZi Yan }; 316749071d43SKirill A. Shutemov 316849071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 316949071d43SKirill A. Shutemov { 3170d9f7979cSGreg Kroah-Hartman debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 317149071d43SKirill A. Shutemov &split_huge_pages_fops); 317249071d43SKirill A. Shutemov return 0; 317349071d43SKirill A. Shutemov } 317449071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 317549071d43SKirill A. Shutemov #endif 3176616b8371SZi Yan 3177616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 31787f5abe60SDavid Hildenbrand int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3179616b8371SZi Yan struct page *page) 3180616b8371SZi Yan { 3181616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3182616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3183616b8371SZi Yan unsigned long address = pvmw->address; 31846c287605SDavid Hildenbrand bool anon_exclusive; 3185616b8371SZi Yan pmd_t pmdval; 3186616b8371SZi Yan swp_entry_t entry; 3187ab6e3d09SNaoya Horiguchi pmd_t pmdswp; 3188616b8371SZi Yan 3189616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 31907f5abe60SDavid Hildenbrand return 0; 3191616b8371SZi Yan 3192616b8371SZi Yan flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 31938a8683adSHuang Ying pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 31946c287605SDavid Hildenbrand 3195088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): invalidate PMD first. */ 31966c287605SDavid Hildenbrand anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 31976c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 31986c287605SDavid Hildenbrand set_pmd_at(mm, address, pvmw->pmd, pmdval); 31997f5abe60SDavid Hildenbrand return -EBUSY; 32006c287605SDavid Hildenbrand } 32016c287605SDavid Hildenbrand 3202616b8371SZi Yan if (pmd_dirty(pmdval)) 3203616b8371SZi Yan set_page_dirty(page); 32044dd845b5SAlistair Popple if (pmd_write(pmdval)) 32054dd845b5SAlistair Popple entry = make_writable_migration_entry(page_to_pfn(page)); 32066c287605SDavid Hildenbrand else if (anon_exclusive) 32076c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 32084dd845b5SAlistair Popple else 32094dd845b5SAlistair Popple entry = make_readable_migration_entry(page_to_pfn(page)); 32102e346877SPeter Xu if (pmd_young(pmdval)) 32112e346877SPeter Xu entry = make_migration_entry_young(entry); 32122e346877SPeter Xu if (pmd_dirty(pmdval)) 32132e346877SPeter Xu entry = make_migration_entry_dirty(entry); 3214ab6e3d09SNaoya Horiguchi pmdswp = swp_entry_to_pmd(entry); 3215ab6e3d09SNaoya Horiguchi if (pmd_soft_dirty(pmdval)) 3216ab6e3d09SNaoya Horiguchi pmdswp = pmd_swp_mksoft_dirty(pmdswp); 3217ab6e3d09SNaoya Horiguchi set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3218cea86fe2SHugh Dickins page_remove_rmap(page, vma, true); 3219616b8371SZi Yan put_page(page); 3220283fd6feSAnshuman Khandual trace_set_migration_pmd(address, pmd_val(pmdswp)); 32217f5abe60SDavid Hildenbrand 32227f5abe60SDavid Hildenbrand return 0; 3223616b8371SZi Yan } 3224616b8371SZi Yan 3225616b8371SZi Yan void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3226616b8371SZi Yan { 3227616b8371SZi Yan struct vm_area_struct *vma = pvmw->vma; 3228616b8371SZi Yan struct mm_struct *mm = vma->vm_mm; 3229616b8371SZi Yan unsigned long address = pvmw->address; 32304fba8f2aSMiaohe Lin unsigned long haddr = address & HPAGE_PMD_MASK; 3231616b8371SZi Yan pmd_t pmde; 3232616b8371SZi Yan swp_entry_t entry; 3233616b8371SZi Yan 3234616b8371SZi Yan if (!(pvmw->pmd && !pvmw->pte)) 3235616b8371SZi Yan return; 3236616b8371SZi Yan 3237616b8371SZi Yan entry = pmd_to_swp_entry(*pvmw->pmd); 3238616b8371SZi Yan get_page(new); 32392e346877SPeter Xu pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); 3240ab6e3d09SNaoya Horiguchi if (pmd_swp_soft_dirty(*pvmw->pmd)) 3241ab6e3d09SNaoya Horiguchi pmde = pmd_mksoft_dirty(pmde); 32424dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) 3243f55e1014SLinus Torvalds pmde = maybe_pmd_mkwrite(pmde, vma); 32448f34f1eaSPeter Xu if (pmd_swp_uffd_wp(*pvmw->pmd)) 32458f34f1eaSPeter Xu pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); 32462e346877SPeter Xu if (!is_migration_entry_young(entry)) 32472e346877SPeter Xu pmde = pmd_mkold(pmde); 32482e346877SPeter Xu /* NOTE: this may contain setting soft-dirty on some archs */ 32492e346877SPeter Xu if (PageDirty(new) && is_migration_entry_dirty(entry)) 32502e346877SPeter Xu pmde = pmd_mkdirty(pmde); 3251616b8371SZi Yan 32526c287605SDavid Hildenbrand if (PageAnon(new)) { 32536c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_COMPOUND; 32546c287605SDavid Hildenbrand 32556c287605SDavid Hildenbrand if (!is_readable_migration_entry(entry)) 32566c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 32576c287605SDavid Hildenbrand 32584fba8f2aSMiaohe Lin page_add_anon_rmap(new, vma, haddr, rmap_flags); 32596c287605SDavid Hildenbrand } else { 3260cea86fe2SHugh Dickins page_add_file_rmap(new, vma, true); 32616c287605SDavid Hildenbrand } 32626c287605SDavid Hildenbrand VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); 32634fba8f2aSMiaohe Lin set_pmd_at(mm, haddr, pvmw->pmd, pmde); 32645cbcf225SMuchun Song 32655cbcf225SMuchun Song /* No need to invalidate - it was non-present before */ 3266616b8371SZi Yan update_mmu_cache_pmd(vma, address, pvmw->pmd); 3267283fd6feSAnshuman Khandual trace_remove_migration_pmd(address, pmd_val(pmde)); 3268616b8371SZi Yan } 3269616b8371SZi Yan #endif 3270