171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9ae3a8c1cSAndrew Morton 1071e3aac0SAndrea Arcangeli #include <linux/mm.h> 1171e3aac0SAndrea Arcangeli #include <linux/sched.h> 1271e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1571e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1671e3aac0SAndrea Arcangeli #include <linux/swap.h> 1797ae1749SKirill A. Shutemov #include <linux/shrinker.h> 18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 19e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 204897c765SMatthew Wilcox #include <linux/dax.h> 21ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 22878aee7dSAndrea Arcangeli #include <linux/freezer.h> 23f25748e3SDan Williams #include <linux/pfn_t.h> 24a664b2d8SAndrea Arcangeli #include <linux/mman.h> 253565fce3SDan Williams #include <linux/memremap.h> 26325adeb5SRalf Baechle #include <linux/pagemap.h> 2749071d43SKirill A. Shutemov #include <linux/debugfs.h> 284daae3b4SMel Gorman #include <linux/migrate.h> 2943b5fbbdSSasha Levin #include <linux/hashtable.h> 306b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3133c3fc71SVladimir Davydov #include <linux/page_idle.h> 32baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 3397ae1749SKirill A. Shutemov 3471e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3571e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 3671e3aac0SAndrea Arcangeli #include "internal.h" 3771e3aac0SAndrea Arcangeli 38ba76149fSAndrea Arcangeli /* 398bfa3f9aSJianguo Wu * By default transparent hugepage support is disabled in order that avoid 408bfa3f9aSJianguo Wu * to risk increase the memory footprint of applications without a guaranteed 418bfa3f9aSJianguo Wu * benefit. When transparent hugepage support is enabled, is for all mappings, 428bfa3f9aSJianguo Wu * and khugepaged scans all mappings. 438bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 448bfa3f9aSJianguo Wu * for all hugepage allocations. 45ba76149fSAndrea Arcangeli */ 4671e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 4713ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 48ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 4913ece886SAndrea Arcangeli #endif 5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 5113ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 5213ece886SAndrea Arcangeli #endif 53444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 5479da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 56ba76149fSAndrea Arcangeli 579a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 58f000565aSAndrea Arcangeli 5997ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 6056873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 614a6c1297SKirill A. Shutemov 626fcb52a5SAaron Lu static struct page *get_huge_zero_page(void) 6397ae1749SKirill A. Shutemov { 6497ae1749SKirill A. Shutemov struct page *zero_page; 6597ae1749SKirill A. Shutemov retry: 6697ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 674db0c3c2SJason Low return READ_ONCE(huge_zero_page); 6897ae1749SKirill A. Shutemov 6997ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 7097ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 71d8a8e1f0SKirill A. Shutemov if (!zero_page) { 72d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 735918d10aSKirill A. Shutemov return NULL; 74d8a8e1f0SKirill A. Shutemov } 75d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 7697ae1749SKirill A. Shutemov preempt_disable(); 775918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 7897ae1749SKirill A. Shutemov preempt_enable(); 795ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 8097ae1749SKirill A. Shutemov goto retry; 8197ae1749SKirill A. Shutemov } 8297ae1749SKirill A. Shutemov 8397ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 8497ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 8597ae1749SKirill A. Shutemov preempt_enable(); 864db0c3c2SJason Low return READ_ONCE(huge_zero_page); 8797ae1749SKirill A. Shutemov } 8897ae1749SKirill A. Shutemov 896fcb52a5SAaron Lu static void put_huge_zero_page(void) 9097ae1749SKirill A. Shutemov { 9197ae1749SKirill A. Shutemov /* 9297ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 9397ae1749SKirill A. Shutemov * last reference. 9497ae1749SKirill A. Shutemov */ 9597ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 9697ae1749SKirill A. Shutemov } 9797ae1749SKirill A. Shutemov 986fcb52a5SAaron Lu struct page *mm_get_huge_zero_page(struct mm_struct *mm) 996fcb52a5SAaron Lu { 1006fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1016fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1026fcb52a5SAaron Lu 1036fcb52a5SAaron Lu if (!get_huge_zero_page()) 1046fcb52a5SAaron Lu return NULL; 1056fcb52a5SAaron Lu 1066fcb52a5SAaron Lu if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1076fcb52a5SAaron Lu put_huge_zero_page(); 1086fcb52a5SAaron Lu 1096fcb52a5SAaron Lu return READ_ONCE(huge_zero_page); 1106fcb52a5SAaron Lu } 1116fcb52a5SAaron Lu 1126fcb52a5SAaron Lu void mm_put_huge_zero_page(struct mm_struct *mm) 1136fcb52a5SAaron Lu { 1146fcb52a5SAaron Lu if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 1156fcb52a5SAaron Lu put_huge_zero_page(); 1166fcb52a5SAaron Lu } 1176fcb52a5SAaron Lu 11848896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 11997ae1749SKirill A. Shutemov struct shrink_control *sc) 12097ae1749SKirill A. Shutemov { 12197ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 12297ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 12348896466SGlauber Costa } 12497ae1749SKirill A. Shutemov 12548896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 12648896466SGlauber Costa struct shrink_control *sc) 12748896466SGlauber Costa { 12897ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 1295918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 1305918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 1315ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 13248896466SGlauber Costa return HPAGE_PMD_NR; 13397ae1749SKirill A. Shutemov } 13497ae1749SKirill A. Shutemov 13597ae1749SKirill A. Shutemov return 0; 13697ae1749SKirill A. Shutemov } 13797ae1749SKirill A. Shutemov 13897ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 13948896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 14048896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 14197ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 14297ae1749SKirill A. Shutemov }; 14397ae1749SKirill A. Shutemov 14471e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 14571e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 14671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 14771e3aac0SAndrea Arcangeli { 148444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 149444eb2a4SMel Gorman return sprintf(buf, "[always] madvise never\n"); 150444eb2a4SMel Gorman else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 151444eb2a4SMel Gorman return sprintf(buf, "always [madvise] never\n"); 152444eb2a4SMel Gorman else 153444eb2a4SMel Gorman return sprintf(buf, "always madvise [never]\n"); 15471e3aac0SAndrea Arcangeli } 155444eb2a4SMel Gorman 15671e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 15771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 15871e3aac0SAndrea Arcangeli const char *buf, size_t count) 15971e3aac0SAndrea Arcangeli { 160*21440d7eSDavid Rientjes ssize_t ret = count; 161ba76149fSAndrea Arcangeli 162*21440d7eSDavid Rientjes if (!memcmp("always", buf, 163*21440d7eSDavid Rientjes min(sizeof("always")-1, count))) { 164*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 165*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 166*21440d7eSDavid Rientjes } else if (!memcmp("madvise", buf, 167*21440d7eSDavid Rientjes min(sizeof("madvise")-1, count))) { 168*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 169*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 170*21440d7eSDavid Rientjes } else if (!memcmp("never", buf, 171*21440d7eSDavid Rientjes min(sizeof("never")-1, count))) { 172*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 173*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 174*21440d7eSDavid Rientjes } else 175*21440d7eSDavid Rientjes ret = -EINVAL; 176ba76149fSAndrea Arcangeli 177ba76149fSAndrea Arcangeli if (ret > 0) { 178b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 179ba76149fSAndrea Arcangeli if (err) 180ba76149fSAndrea Arcangeli ret = err; 181ba76149fSAndrea Arcangeli } 182ba76149fSAndrea Arcangeli return ret; 18371e3aac0SAndrea Arcangeli } 18471e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 18571e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 18671e3aac0SAndrea Arcangeli 187b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 18871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 18971e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 19071e3aac0SAndrea Arcangeli { 191e27e6151SBen Hutchings return sprintf(buf, "%d\n", 192e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 19371e3aac0SAndrea Arcangeli } 194e27e6151SBen Hutchings 195b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 19671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 19771e3aac0SAndrea Arcangeli const char *buf, size_t count, 19871e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 19971e3aac0SAndrea Arcangeli { 200e27e6151SBen Hutchings unsigned long value; 201e27e6151SBen Hutchings int ret; 202e27e6151SBen Hutchings 203e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 204e27e6151SBen Hutchings if (ret < 0) 205e27e6151SBen Hutchings return ret; 206e27e6151SBen Hutchings if (value > 1) 20771e3aac0SAndrea Arcangeli return -EINVAL; 20871e3aac0SAndrea Arcangeli 209e27e6151SBen Hutchings if (value) 210e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 211e27e6151SBen Hutchings else 212e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 213e27e6151SBen Hutchings 21471e3aac0SAndrea Arcangeli return count; 21571e3aac0SAndrea Arcangeli } 21671e3aac0SAndrea Arcangeli 21771e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 21871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 21971e3aac0SAndrea Arcangeli { 220444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 221*21440d7eSDavid Rientjes return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 222444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 223*21440d7eSDavid Rientjes return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 224*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 225*21440d7eSDavid Rientjes return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 226*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 227*21440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 228*21440d7eSDavid Rientjes return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 22971e3aac0SAndrea Arcangeli } 230*21440d7eSDavid Rientjes 23171e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 23271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 23371e3aac0SAndrea Arcangeli const char *buf, size_t count) 23471e3aac0SAndrea Arcangeli { 235*21440d7eSDavid Rientjes if (!memcmp("always", buf, 236*21440d7eSDavid Rientjes min(sizeof("always")-1, count))) { 237*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 238*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 239*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 240*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 241*21440d7eSDavid Rientjes } else if (!memcmp("defer", buf, 242*21440d7eSDavid Rientjes min(sizeof("defer")-1, count))) { 243*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 244*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 245*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 246*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 247*21440d7eSDavid Rientjes } else if (!memcmp("defer+madvise", buf, 248*21440d7eSDavid Rientjes min(sizeof("defer+madvise")-1, count))) { 249*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 250*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 251*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 252*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 253*21440d7eSDavid Rientjes } else if (!memcmp("madvise", buf, 254*21440d7eSDavid Rientjes min(sizeof("madvise")-1, count))) { 255*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 256*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 257*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 258*21440d7eSDavid Rientjes set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 259*21440d7eSDavid Rientjes } else if (!memcmp("never", buf, 260*21440d7eSDavid Rientjes min(sizeof("never")-1, count))) { 261*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 262*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 263*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 264*21440d7eSDavid Rientjes clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 265*21440d7eSDavid Rientjes } else 266*21440d7eSDavid Rientjes return -EINVAL; 267*21440d7eSDavid Rientjes 268*21440d7eSDavid Rientjes return count; 26971e3aac0SAndrea Arcangeli } 27071e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 27171e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 27271e3aac0SAndrea Arcangeli 27379da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 27479da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 27579da5407SKirill A. Shutemov { 276b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 27779da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 27879da5407SKirill A. Shutemov } 27979da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 28079da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 28179da5407SKirill A. Shutemov { 282b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 28379da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 28479da5407SKirill A. Shutemov } 28579da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 28679da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 28749920d28SHugh Dickins 28849920d28SHugh Dickins static ssize_t hpage_pmd_size_show(struct kobject *kobj, 28949920d28SHugh Dickins struct kobj_attribute *attr, char *buf) 29049920d28SHugh Dickins { 29149920d28SHugh Dickins return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 29249920d28SHugh Dickins } 29349920d28SHugh Dickins static struct kobj_attribute hpage_pmd_size_attr = 29449920d28SHugh Dickins __ATTR_RO(hpage_pmd_size); 29549920d28SHugh Dickins 29671e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 29771e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 29871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 29971e3aac0SAndrea Arcangeli { 300b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 30171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 30271e3aac0SAndrea Arcangeli } 30371e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 30471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 30571e3aac0SAndrea Arcangeli const char *buf, size_t count) 30671e3aac0SAndrea Arcangeli { 307b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 30871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 30971e3aac0SAndrea Arcangeli } 31071e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 31171e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 31271e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 31371e3aac0SAndrea Arcangeli 31471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 31571e3aac0SAndrea Arcangeli &enabled_attr.attr, 31671e3aac0SAndrea Arcangeli &defrag_attr.attr, 31779da5407SKirill A. Shutemov &use_zero_page_attr.attr, 31849920d28SHugh Dickins &hpage_pmd_size_attr.attr, 319e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 3205a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 3215a6e75f8SKirill A. Shutemov #endif 32271e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 32371e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 32471e3aac0SAndrea Arcangeli #endif 32571e3aac0SAndrea Arcangeli NULL, 32671e3aac0SAndrea Arcangeli }; 32771e3aac0SAndrea Arcangeli 32871e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 32971e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 330ba76149fSAndrea Arcangeli }; 331ba76149fSAndrea Arcangeli 332569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 333569e5590SShaohua Li { 334569e5590SShaohua Li int err; 335569e5590SShaohua Li 336569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 337569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 338ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 339569e5590SShaohua Li return -ENOMEM; 340569e5590SShaohua Li } 341569e5590SShaohua Li 342569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 343569e5590SShaohua Li if (err) { 344ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 345569e5590SShaohua Li goto delete_obj; 346569e5590SShaohua Li } 347569e5590SShaohua Li 348569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 349569e5590SShaohua Li if (err) { 350ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 351569e5590SShaohua Li goto remove_hp_group; 352569e5590SShaohua Li } 353569e5590SShaohua Li 354569e5590SShaohua Li return 0; 355569e5590SShaohua Li 356569e5590SShaohua Li remove_hp_group: 357569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 358569e5590SShaohua Li delete_obj: 359569e5590SShaohua Li kobject_put(*hugepage_kobj); 360569e5590SShaohua Li return err; 361569e5590SShaohua Li } 362569e5590SShaohua Li 363569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 364569e5590SShaohua Li { 365569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 366569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 367569e5590SShaohua Li kobject_put(hugepage_kobj); 368569e5590SShaohua Li } 369569e5590SShaohua Li #else 370569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 371569e5590SShaohua Li { 372569e5590SShaohua Li return 0; 373569e5590SShaohua Li } 374569e5590SShaohua Li 375569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 376569e5590SShaohua Li { 377569e5590SShaohua Li } 37871e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 37971e3aac0SAndrea Arcangeli 38071e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 38171e3aac0SAndrea Arcangeli { 38271e3aac0SAndrea Arcangeli int err; 383569e5590SShaohua Li struct kobject *hugepage_kobj; 38471e3aac0SAndrea Arcangeli 3854b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 3864b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 387569e5590SShaohua Li return -EINVAL; 3884b7167b9SAndrea Arcangeli } 3894b7167b9SAndrea Arcangeli 390ff20c2e0SKirill A. Shutemov /* 391ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 392ff20c2e0SKirill A. Shutemov */ 393ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 394ff20c2e0SKirill A. Shutemov /* 395ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 396ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 397ff20c2e0SKirill A. Shutemov */ 398ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 399ff20c2e0SKirill A. Shutemov 400569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 401569e5590SShaohua Li if (err) 40265ebb64fSKirill A. Shutemov goto err_sysfs; 403ba76149fSAndrea Arcangeli 404b46e756fSKirill A. Shutemov err = khugepaged_init(); 405ba76149fSAndrea Arcangeli if (err) 40665ebb64fSKirill A. Shutemov goto err_slab; 407ba76149fSAndrea Arcangeli 40865ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 40965ebb64fSKirill A. Shutemov if (err) 41065ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 4119a982250SKirill A. Shutemov err = register_shrinker(&deferred_split_shrinker); 4129a982250SKirill A. Shutemov if (err) 4139a982250SKirill A. Shutemov goto err_split_shrinker; 41497ae1749SKirill A. Shutemov 41597562cd2SRik van Riel /* 41697562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 41797562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 41897562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 41997562cd2SRik van Riel */ 42079553da2SKirill A. Shutemov if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 42197562cd2SRik van Riel transparent_hugepage_flags = 0; 42279553da2SKirill A. Shutemov return 0; 42379553da2SKirill A. Shutemov } 42497562cd2SRik van Riel 42579553da2SKirill A. Shutemov err = start_stop_khugepaged(); 42665ebb64fSKirill A. Shutemov if (err) 42765ebb64fSKirill A. Shutemov goto err_khugepaged; 428ba76149fSAndrea Arcangeli 429569e5590SShaohua Li return 0; 43065ebb64fSKirill A. Shutemov err_khugepaged: 4319a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 4329a982250SKirill A. Shutemov err_split_shrinker: 43365ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 43465ebb64fSKirill A. Shutemov err_hzp_shrinker: 435b46e756fSKirill A. Shutemov khugepaged_destroy(); 43665ebb64fSKirill A. Shutemov err_slab: 437569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 43865ebb64fSKirill A. Shutemov err_sysfs: 439ba76149fSAndrea Arcangeli return err; 44071e3aac0SAndrea Arcangeli } 441a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 44271e3aac0SAndrea Arcangeli 44371e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 44471e3aac0SAndrea Arcangeli { 44571e3aac0SAndrea Arcangeli int ret = 0; 44671e3aac0SAndrea Arcangeli if (!str) 44771e3aac0SAndrea Arcangeli goto out; 44871e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 44971e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 45071e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 45271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45371e3aac0SAndrea Arcangeli ret = 1; 45471e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 45571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 45671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45771e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 45871e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 45971e3aac0SAndrea Arcangeli ret = 1; 46071e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 46171e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 46271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46371e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 46471e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 46571e3aac0SAndrea Arcangeli ret = 1; 46671e3aac0SAndrea Arcangeli } 46771e3aac0SAndrea Arcangeli out: 46871e3aac0SAndrea Arcangeli if (!ret) 469ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 47071e3aac0SAndrea Arcangeli return ret; 47171e3aac0SAndrea Arcangeli } 47271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 47371e3aac0SAndrea Arcangeli 474b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 47571e3aac0SAndrea Arcangeli { 47671e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 47771e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 47871e3aac0SAndrea Arcangeli return pmd; 47971e3aac0SAndrea Arcangeli } 48071e3aac0SAndrea Arcangeli 4819a982250SKirill A. Shutemov static inline struct list_head *page_deferred_list(struct page *page) 4829a982250SKirill A. Shutemov { 4839a982250SKirill A. Shutemov /* 4849a982250SKirill A. Shutemov * ->lru in the tail pages is occupied by compound_head. 4859a982250SKirill A. Shutemov * Let's use ->mapping + ->index in the second tail page as list_head. 4869a982250SKirill A. Shutemov */ 4879a982250SKirill A. Shutemov return (struct list_head *)&page[2].mapping; 4889a982250SKirill A. Shutemov } 4899a982250SKirill A. Shutemov 4909a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 4919a982250SKirill A. Shutemov { 4929a982250SKirill A. Shutemov /* 4939a982250SKirill A. Shutemov * we use page->mapping and page->indexlru in second tail page 4949a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 4959a982250SKirill A. Shutemov */ 4969a982250SKirill A. Shutemov 4979a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 4989a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 4999a982250SKirill A. Shutemov } 5009a982250SKirill A. Shutemov 50174d2fad1SToshi Kani unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, 50274d2fad1SToshi Kani loff_t off, unsigned long flags, unsigned long size) 50374d2fad1SToshi Kani { 50474d2fad1SToshi Kani unsigned long addr; 50574d2fad1SToshi Kani loff_t off_end = off + len; 50674d2fad1SToshi Kani loff_t off_align = round_up(off, size); 50774d2fad1SToshi Kani unsigned long len_pad; 50874d2fad1SToshi Kani 50974d2fad1SToshi Kani if (off_end <= off_align || (off_end - off_align) < size) 51074d2fad1SToshi Kani return 0; 51174d2fad1SToshi Kani 51274d2fad1SToshi Kani len_pad = len + size; 51374d2fad1SToshi Kani if (len_pad < len || (off + len_pad) < off) 51474d2fad1SToshi Kani return 0; 51574d2fad1SToshi Kani 51674d2fad1SToshi Kani addr = current->mm->get_unmapped_area(filp, 0, len_pad, 51774d2fad1SToshi Kani off >> PAGE_SHIFT, flags); 51874d2fad1SToshi Kani if (IS_ERR_VALUE(addr)) 51974d2fad1SToshi Kani return 0; 52074d2fad1SToshi Kani 52174d2fad1SToshi Kani addr += (off - addr) & (size - 1); 52274d2fad1SToshi Kani return addr; 52374d2fad1SToshi Kani } 52474d2fad1SToshi Kani 52574d2fad1SToshi Kani unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 52674d2fad1SToshi Kani unsigned long len, unsigned long pgoff, unsigned long flags) 52774d2fad1SToshi Kani { 52874d2fad1SToshi Kani loff_t off = (loff_t)pgoff << PAGE_SHIFT; 52974d2fad1SToshi Kani 53074d2fad1SToshi Kani if (addr) 53174d2fad1SToshi Kani goto out; 53274d2fad1SToshi Kani if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 53374d2fad1SToshi Kani goto out; 53474d2fad1SToshi Kani 53574d2fad1SToshi Kani addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); 53674d2fad1SToshi Kani if (addr) 53774d2fad1SToshi Kani return addr; 53874d2fad1SToshi Kani 53974d2fad1SToshi Kani out: 54074d2fad1SToshi Kani return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 54174d2fad1SToshi Kani } 54274d2fad1SToshi Kani EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 54374d2fad1SToshi Kani 54482b0f8c3SJan Kara static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, 545bae473a4SKirill A. Shutemov gfp_t gfp) 54671e3aac0SAndrea Arcangeli { 54782b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 54800501b53SJohannes Weiner struct mem_cgroup *memcg; 54971e3aac0SAndrea Arcangeli pgtable_t pgtable; 55082b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 55171e3aac0SAndrea Arcangeli 552309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 55300501b53SJohannes Weiner 554bae473a4SKirill A. Shutemov if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { 5556b251fc9SAndrea Arcangeli put_page(page); 5566b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 5576b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 5586b251fc9SAndrea Arcangeli } 55971e3aac0SAndrea Arcangeli 560bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 56100501b53SJohannes Weiner if (unlikely(!pgtable)) { 562f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 5636b251fc9SAndrea Arcangeli put_page(page); 56400501b53SJohannes Weiner return VM_FAULT_OOM; 56500501b53SJohannes Weiner } 56600501b53SJohannes Weiner 56771e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 56852f37629SMinchan Kim /* 56952f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 57052f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 57152f37629SMinchan Kim * write. 57252f37629SMinchan Kim */ 57371e3aac0SAndrea Arcangeli __SetPageUptodate(page); 57471e3aac0SAndrea Arcangeli 57582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 57682b0f8c3SJan Kara if (unlikely(!pmd_none(*vmf->pmd))) { 57782b0f8c3SJan Kara spin_unlock(vmf->ptl); 578f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 57971e3aac0SAndrea Arcangeli put_page(page); 580bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 58171e3aac0SAndrea Arcangeli } else { 58271e3aac0SAndrea Arcangeli pmd_t entry; 5836b251fc9SAndrea Arcangeli 5846b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 5856b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 5866b251fc9SAndrea Arcangeli int ret; 5876b251fc9SAndrea Arcangeli 58882b0f8c3SJan Kara spin_unlock(vmf->ptl); 589f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 5906b251fc9SAndrea Arcangeli put_page(page); 591bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 59282b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 5936b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 5946b251fc9SAndrea Arcangeli return ret; 5956b251fc9SAndrea Arcangeli } 5966b251fc9SAndrea Arcangeli 5973122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 5983122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 599d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr, true); 600f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, true); 60100501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 60282b0f8c3SJan Kara pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 60382b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 604bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 605bae473a4SKirill A. Shutemov atomic_long_inc(&vma->vm_mm->nr_ptes); 60682b0f8c3SJan Kara spin_unlock(vmf->ptl); 6076b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 60871e3aac0SAndrea Arcangeli } 60971e3aac0SAndrea Arcangeli 610aa2e878eSDavid Rientjes return 0; 61171e3aac0SAndrea Arcangeli } 61271e3aac0SAndrea Arcangeli 613444eb2a4SMel Gorman /* 614*21440d7eSDavid Rientjes * always: directly stall for all thp allocations 615*21440d7eSDavid Rientjes * defer: wake kswapd and fail if not immediately available 616*21440d7eSDavid Rientjes * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 617*21440d7eSDavid Rientjes * fail if not immediately available 618*21440d7eSDavid Rientjes * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 619*21440d7eSDavid Rientjes * available 620*21440d7eSDavid Rientjes * never: never stall for any thp allocation 621444eb2a4SMel Gorman */ 622444eb2a4SMel Gorman static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 6230bbbc0b3SAndrea Arcangeli { 624*21440d7eSDavid Rientjes const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 625444eb2a4SMel Gorman 626*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 62725160354SVlastimil Babka return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 628*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 629*21440d7eSDavid Rientjes return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 630*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 631*21440d7eSDavid Rientjes return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 632*21440d7eSDavid Rientjes __GFP_KSWAPD_RECLAIM); 633*21440d7eSDavid Rientjes if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 634*21440d7eSDavid Rientjes return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 635*21440d7eSDavid Rientjes 0); 63625160354SVlastimil Babka return GFP_TRANSHUGE_LIGHT; 637444eb2a4SMel Gorman } 638444eb2a4SMel Gorman 639c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 640d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 64197ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 6425918d10aSKirill A. Shutemov struct page *zero_page) 643fc9fe822SKirill A. Shutemov { 644fc9fe822SKirill A. Shutemov pmd_t entry; 6457c414164SAndrew Morton if (!pmd_none(*pmd)) 6467c414164SAndrew Morton return false; 6475918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 648fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 64912c9d70bSMatthew Wilcox if (pgtable) 6506b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 651fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 652e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 6537c414164SAndrew Morton return true; 654fc9fe822SKirill A. Shutemov } 655fc9fe822SKirill A. Shutemov 65682b0f8c3SJan Kara int do_huge_pmd_anonymous_page(struct vm_fault *vmf) 65771e3aac0SAndrea Arcangeli { 65882b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 659077fcf11SAneesh Kumar K.V gfp_t gfp; 66071e3aac0SAndrea Arcangeli struct page *page; 66182b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 66271e3aac0SAndrea Arcangeli 663128ec037SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 664c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 66571e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 66671e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 6676d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 668ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 66982b0f8c3SJan Kara if (!(vmf->flags & FAULT_FLAG_WRITE) && 670bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 67179da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 67280371957SKirill A. Shutemov pgtable_t pgtable; 6735918d10aSKirill A. Shutemov struct page *zero_page; 6743ea41e62SKirill A. Shutemov bool set; 6756b251fc9SAndrea Arcangeli int ret; 676bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 67780371957SKirill A. Shutemov if (unlikely(!pgtable)) 67880371957SKirill A. Shutemov return VM_FAULT_OOM; 6796fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(vma->vm_mm); 6805918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 681bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 68297ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 683c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 68497ae1749SKirill A. Shutemov } 68582b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 6866b251fc9SAndrea Arcangeli ret = 0; 6876b251fc9SAndrea Arcangeli set = false; 68882b0f8c3SJan Kara if (pmd_none(*vmf->pmd)) { 6896b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 69082b0f8c3SJan Kara spin_unlock(vmf->ptl); 69182b0f8c3SJan Kara ret = handle_userfault(vmf, VM_UFFD_MISSING); 6926b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 6936b251fc9SAndrea Arcangeli } else { 694bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 69582b0f8c3SJan Kara haddr, vmf->pmd, zero_page); 69682b0f8c3SJan Kara spin_unlock(vmf->ptl); 6976b251fc9SAndrea Arcangeli set = true; 6986b251fc9SAndrea Arcangeli } 6996b251fc9SAndrea Arcangeli } else 70082b0f8c3SJan Kara spin_unlock(vmf->ptl); 7016fcb52a5SAaron Lu if (!set) 702bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 7036b251fc9SAndrea Arcangeli return ret; 70480371957SKirill A. Shutemov } 705444eb2a4SMel Gorman gfp = alloc_hugepage_direct_gfpmask(vma); 706077fcf11SAneesh Kumar K.V page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 70781ab4201SAndi Kleen if (unlikely(!page)) { 70881ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 709c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 71081ab4201SAndi Kleen } 7119a982250SKirill A. Shutemov prep_transhuge_page(page); 71282b0f8c3SJan Kara return __do_huge_pmd_anonymous_page(vmf, page, gfp); 71371e3aac0SAndrea Arcangeli } 71471e3aac0SAndrea Arcangeli 715ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 716f25748e3SDan Williams pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) 7175cad465dSMatthew Wilcox { 7185cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 7195cad465dSMatthew Wilcox pmd_t entry; 7205cad465dSMatthew Wilcox spinlock_t *ptl; 7215cad465dSMatthew Wilcox 7225cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 723f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 724f25748e3SDan Williams if (pfn_t_devmap(pfn)) 725f25748e3SDan Williams entry = pmd_mkdevmap(entry); 7265cad465dSMatthew Wilcox if (write) { 7275cad465dSMatthew Wilcox entry = pmd_mkyoung(pmd_mkdirty(entry)); 7285cad465dSMatthew Wilcox entry = maybe_pmd_mkwrite(entry, vma); 7295cad465dSMatthew Wilcox } 7305cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 7315cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 7325cad465dSMatthew Wilcox spin_unlock(ptl); 7335cad465dSMatthew Wilcox } 7345cad465dSMatthew Wilcox 7355cad465dSMatthew Wilcox int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 736f25748e3SDan Williams pmd_t *pmd, pfn_t pfn, bool write) 7375cad465dSMatthew Wilcox { 7385cad465dSMatthew Wilcox pgprot_t pgprot = vma->vm_page_prot; 7395cad465dSMatthew Wilcox /* 7405cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 7415cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 7425cad465dSMatthew Wilcox * can't support a 'special' bit. 7435cad465dSMatthew Wilcox */ 7445cad465dSMatthew Wilcox BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 7455cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 7465cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 7475cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 748f25748e3SDan Williams BUG_ON(!pfn_t_devmap(pfn)); 7495cad465dSMatthew Wilcox 7505cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 7515cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 752308a047cSBorislav Petkov 753308a047cSBorislav Petkov track_pfn_insert(vma, &pgprot, pfn); 754308a047cSBorislav Petkov 755ae18d6dcSMatthew Wilcox insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); 756ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 7575cad465dSMatthew Wilcox } 758dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 7595cad465dSMatthew Wilcox 7603565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 7613565fce3SDan Williams pmd_t *pmd) 7623565fce3SDan Williams { 7633565fce3SDan Williams pmd_t _pmd; 7643565fce3SDan Williams 7653565fce3SDan Williams /* 7663565fce3SDan Williams * We should set the dirty bit only for FOLL_WRITE but for now 7673565fce3SDan Williams * the dirty bit in the pmd is meaningless. And if the dirty 7683565fce3SDan Williams * bit will become meaningful and we'll only set it with 7693565fce3SDan Williams * FOLL_WRITE, an atomic set_bit will be required on the pmd to 7703565fce3SDan Williams * set the young bit, instead of the current set_pmd_at. 7713565fce3SDan Williams */ 7723565fce3SDan Williams _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 7733565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 7743565fce3SDan Williams pmd, _pmd, 1)) 7753565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 7763565fce3SDan Williams } 7773565fce3SDan Williams 7783565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 7793565fce3SDan Williams pmd_t *pmd, int flags) 7803565fce3SDan Williams { 7813565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 7823565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 7833565fce3SDan Williams struct dev_pagemap *pgmap; 7843565fce3SDan Williams struct page *page; 7853565fce3SDan Williams 7863565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 7873565fce3SDan Williams 7888310d48bSKeno Fischer /* 7898310d48bSKeno Fischer * When we COW a devmap PMD entry, we split it into PTEs, so we should 7908310d48bSKeno Fischer * not be in this function with `flags & FOLL_COW` set. 7918310d48bSKeno Fischer */ 7928310d48bSKeno Fischer WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 7938310d48bSKeno Fischer 7943565fce3SDan Williams if (flags & FOLL_WRITE && !pmd_write(*pmd)) 7953565fce3SDan Williams return NULL; 7963565fce3SDan Williams 7973565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 7983565fce3SDan Williams /* pass */; 7993565fce3SDan Williams else 8003565fce3SDan Williams return NULL; 8013565fce3SDan Williams 8023565fce3SDan Williams if (flags & FOLL_TOUCH) 8033565fce3SDan Williams touch_pmd(vma, addr, pmd); 8043565fce3SDan Williams 8053565fce3SDan Williams /* 8063565fce3SDan Williams * device mapped pages can only be returned if the 8073565fce3SDan Williams * caller will manage the page reference count. 8083565fce3SDan Williams */ 8093565fce3SDan Williams if (!(flags & FOLL_GET)) 8103565fce3SDan Williams return ERR_PTR(-EEXIST); 8113565fce3SDan Williams 8123565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 8133565fce3SDan Williams pgmap = get_dev_pagemap(pfn, NULL); 8143565fce3SDan Williams if (!pgmap) 8153565fce3SDan Williams return ERR_PTR(-EFAULT); 8163565fce3SDan Williams page = pfn_to_page(pfn); 8173565fce3SDan Williams get_page(page); 8183565fce3SDan Williams put_dev_pagemap(pgmap); 8193565fce3SDan Williams 8203565fce3SDan Williams return page; 8213565fce3SDan Williams } 8223565fce3SDan Williams 82371e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 82471e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 82571e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 82671e3aac0SAndrea Arcangeli { 827c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 82871e3aac0SAndrea Arcangeli struct page *src_page; 82971e3aac0SAndrea Arcangeli pmd_t pmd; 83012c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 831628d47ceSKirill A. Shutemov int ret = -ENOMEM; 83271e3aac0SAndrea Arcangeli 833628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 834628d47ceSKirill A. Shutemov if (!vma_is_anonymous(vma)) 835628d47ceSKirill A. Shutemov return 0; 836628d47ceSKirill A. Shutemov 83771e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 83871e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 83971e3aac0SAndrea Arcangeli goto out; 84071e3aac0SAndrea Arcangeli 841c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 842c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 843c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 84471e3aac0SAndrea Arcangeli 84571e3aac0SAndrea Arcangeli ret = -EAGAIN; 84671e3aac0SAndrea Arcangeli pmd = *src_pmd; 847628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 84871e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 84971e3aac0SAndrea Arcangeli goto out_unlock; 85071e3aac0SAndrea Arcangeli } 851fc9fe822SKirill A. Shutemov /* 852c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 853fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 854fc9fe822SKirill A. Shutemov * a page table. 855fc9fe822SKirill A. Shutemov */ 856fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 8575918d10aSKirill A. Shutemov struct page *zero_page; 85897ae1749SKirill A. Shutemov /* 85997ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 86097ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 86197ae1749SKirill A. Shutemov * reference. 86297ae1749SKirill A. Shutemov */ 8636fcb52a5SAaron Lu zero_page = mm_get_huge_zero_page(dst_mm); 8646b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 8655918d10aSKirill A. Shutemov zero_page); 866fc9fe822SKirill A. Shutemov ret = 0; 867fc9fe822SKirill A. Shutemov goto out_unlock; 868fc9fe822SKirill A. Shutemov } 869de466bd6SMel Gorman 87071e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 871309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 87271e3aac0SAndrea Arcangeli get_page(src_page); 87353f9263bSKirill A. Shutemov page_dup_rmap(src_page, true); 87471e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 8755c7fb56eSDan Williams atomic_long_inc(&dst_mm->nr_ptes); 8765c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 87771e3aac0SAndrea Arcangeli 87871e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 87971e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 88071e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 88171e3aac0SAndrea Arcangeli 88271e3aac0SAndrea Arcangeli ret = 0; 88371e3aac0SAndrea Arcangeli out_unlock: 884c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 885c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 88671e3aac0SAndrea Arcangeli out: 88771e3aac0SAndrea Arcangeli return ret; 88871e3aac0SAndrea Arcangeli } 88971e3aac0SAndrea Arcangeli 89082b0f8c3SJan Kara void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 891a1dd450bSWill Deacon { 892a1dd450bSWill Deacon pmd_t entry; 893a1dd450bSWill Deacon unsigned long haddr; 89420f664aaSMinchan Kim bool write = vmf->flags & FAULT_FLAG_WRITE; 895a1dd450bSWill Deacon 89682b0f8c3SJan Kara vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 89782b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 898a1dd450bSWill Deacon goto unlock; 899a1dd450bSWill Deacon 900a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 90120f664aaSMinchan Kim if (write) 90220f664aaSMinchan Kim entry = pmd_mkdirty(entry); 90382b0f8c3SJan Kara haddr = vmf->address & HPAGE_PMD_MASK; 90420f664aaSMinchan Kim if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 90582b0f8c3SJan Kara update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 906a1dd450bSWill Deacon 907a1dd450bSWill Deacon unlock: 90882b0f8c3SJan Kara spin_unlock(vmf->ptl); 909a1dd450bSWill Deacon } 910a1dd450bSWill Deacon 91182b0f8c3SJan Kara static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, 912bae473a4SKirill A. Shutemov struct page *page) 91371e3aac0SAndrea Arcangeli { 91482b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 91582b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 91600501b53SJohannes Weiner struct mem_cgroup *memcg; 91771e3aac0SAndrea Arcangeli pgtable_t pgtable; 91871e3aac0SAndrea Arcangeli pmd_t _pmd; 91971e3aac0SAndrea Arcangeli int ret = 0, i; 92071e3aac0SAndrea Arcangeli struct page **pages; 9212ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 9222ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 92371e3aac0SAndrea Arcangeli 92471e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 92571e3aac0SAndrea Arcangeli GFP_KERNEL); 92671e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 92771e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 92871e3aac0SAndrea Arcangeli goto out; 92971e3aac0SAndrea Arcangeli } 93071e3aac0SAndrea Arcangeli 93171e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 93241b6167eSMichal Hocko pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, 93382b0f8c3SJan Kara vmf->address, page_to_nid(page)); 934b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 935bae473a4SKirill A. Shutemov mem_cgroup_try_charge(pages[i], vma->vm_mm, 936bae473a4SKirill A. Shutemov GFP_KERNEL, &memcg, false))) { 937b9bbfbe3SAndrea Arcangeli if (pages[i]) 93871e3aac0SAndrea Arcangeli put_page(pages[i]); 939b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 94000501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 94100501b53SJohannes Weiner set_page_private(pages[i], 0); 942f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, 943f627c2f5SKirill A. Shutemov false); 944b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 945b9bbfbe3SAndrea Arcangeli } 94671e3aac0SAndrea Arcangeli kfree(pages); 94771e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 94871e3aac0SAndrea Arcangeli goto out; 94971e3aac0SAndrea Arcangeli } 95000501b53SJohannes Weiner set_page_private(pages[i], (unsigned long)memcg); 95171e3aac0SAndrea Arcangeli } 95271e3aac0SAndrea Arcangeli 95371e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 95471e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 9550089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 95671e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 95771e3aac0SAndrea Arcangeli cond_resched(); 95871e3aac0SAndrea Arcangeli } 95971e3aac0SAndrea Arcangeli 9602ec74c3eSSagi Grimberg mmun_start = haddr; 9612ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 962bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 9632ec74c3eSSagi Grimberg 96482b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 96582b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 96671e3aac0SAndrea Arcangeli goto out_free_pages; 967309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 96871e3aac0SAndrea Arcangeli 96982b0f8c3SJan Kara pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 97071e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 97171e3aac0SAndrea Arcangeli 97282b0f8c3SJan Kara pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); 973bae473a4SKirill A. Shutemov pmd_populate(vma->vm_mm, &_pmd, pgtable); 97471e3aac0SAndrea Arcangeli 97571e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 976bae473a4SKirill A. Shutemov pte_t entry; 97771e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 97871e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 97900501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 98000501b53SJohannes Weiner set_page_private(pages[i], 0); 98182b0f8c3SJan Kara page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); 982f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(pages[i], memcg, false, false); 98300501b53SJohannes Weiner lru_cache_add_active_or_unevictable(pages[i], vma); 98482b0f8c3SJan Kara vmf->pte = pte_offset_map(&_pmd, haddr); 98582b0f8c3SJan Kara VM_BUG_ON(!pte_none(*vmf->pte)); 98682b0f8c3SJan Kara set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); 98782b0f8c3SJan Kara pte_unmap(vmf->pte); 98871e3aac0SAndrea Arcangeli } 98971e3aac0SAndrea Arcangeli kfree(pages); 99071e3aac0SAndrea Arcangeli 99171e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 99282b0f8c3SJan Kara pmd_populate(vma->vm_mm, vmf->pmd, pgtable); 993d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 99482b0f8c3SJan Kara spin_unlock(vmf->ptl); 99571e3aac0SAndrea Arcangeli 996bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 9972ec74c3eSSagi Grimberg 99871e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 99971e3aac0SAndrea Arcangeli put_page(page); 100071e3aac0SAndrea Arcangeli 100171e3aac0SAndrea Arcangeli out: 100271e3aac0SAndrea Arcangeli return ret; 100371e3aac0SAndrea Arcangeli 100471e3aac0SAndrea Arcangeli out_free_pages: 100582b0f8c3SJan Kara spin_unlock(vmf->ptl); 1006bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1007b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 100800501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 100900501b53SJohannes Weiner set_page_private(pages[i], 0); 1010f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, false); 101171e3aac0SAndrea Arcangeli put_page(pages[i]); 1012b9bbfbe3SAndrea Arcangeli } 101371e3aac0SAndrea Arcangeli kfree(pages); 101471e3aac0SAndrea Arcangeli goto out; 101571e3aac0SAndrea Arcangeli } 101671e3aac0SAndrea Arcangeli 101782b0f8c3SJan Kara int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 101871e3aac0SAndrea Arcangeli { 101982b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 102093b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 102100501b53SJohannes Weiner struct mem_cgroup *memcg; 102282b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 10232ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 10242ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 10253b363692SMichal Hocko gfp_t huge_gfp; /* for allocation and charge */ 1026bae473a4SKirill A. Shutemov int ret = 0; 102771e3aac0SAndrea Arcangeli 102882b0f8c3SJan Kara vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 102981d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 103093b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 103193b4796dSKirill A. Shutemov goto alloc; 103282b0f8c3SJan Kara spin_lock(vmf->ptl); 103382b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 103471e3aac0SAndrea Arcangeli goto out_unlock; 103571e3aac0SAndrea Arcangeli 103671e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1037309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 10381f25fe20SKirill A. Shutemov /* 10391f25fe20SKirill A. Shutemov * We can only reuse the page if nobody else maps the huge page or it's 10406d0a07edSAndrea Arcangeli * part. 10411f25fe20SKirill A. Shutemov */ 10426d0a07edSAndrea Arcangeli if (page_trans_huge_mapcount(page, NULL) == 1) { 104371e3aac0SAndrea Arcangeli pmd_t entry; 104471e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 104571e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 104682b0f8c3SJan Kara if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 104782b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 104871e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 104971e3aac0SAndrea Arcangeli goto out_unlock; 105071e3aac0SAndrea Arcangeli } 1051ddc58f27SKirill A. Shutemov get_page(page); 105282b0f8c3SJan Kara spin_unlock(vmf->ptl); 105393b4796dSKirill A. Shutemov alloc: 105471e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 1055077fcf11SAneesh Kumar K.V !transparent_hugepage_debug_cow()) { 1056444eb2a4SMel Gorman huge_gfp = alloc_hugepage_direct_gfpmask(vma); 10573b363692SMichal Hocko new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1058077fcf11SAneesh Kumar K.V } else 105971e3aac0SAndrea Arcangeli new_page = NULL; 106071e3aac0SAndrea Arcangeli 10619a982250SKirill A. Shutemov if (likely(new_page)) { 10629a982250SKirill A. Shutemov prep_transhuge_page(new_page); 10639a982250SKirill A. Shutemov } else { 1064eecc1e42SHugh Dickins if (!page) { 106582b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 1066e9b71ca9SKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 106793b4796dSKirill A. Shutemov } else { 106882b0f8c3SJan Kara ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); 10699845cbbdSKirill A. Shutemov if (ret & VM_FAULT_OOM) { 107082b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 10719845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 10729845cbbdSKirill A. Shutemov } 1073ddc58f27SKirill A. Shutemov put_page(page); 107493b4796dSKirill A. Shutemov } 107517766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 107671e3aac0SAndrea Arcangeli goto out; 107771e3aac0SAndrea Arcangeli } 107871e3aac0SAndrea Arcangeli 1079bae473a4SKirill A. Shutemov if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 1080bae473a4SKirill A. Shutemov huge_gfp, &memcg, true))) { 1081b9bbfbe3SAndrea Arcangeli put_page(new_page); 108282b0f8c3SJan Kara split_huge_pmd(vma, vmf->pmd, vmf->address); 1083bae473a4SKirill A. Shutemov if (page) 1084ddc58f27SKirill A. Shutemov put_page(page); 10859845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 108617766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 1087b9bbfbe3SAndrea Arcangeli goto out; 1088b9bbfbe3SAndrea Arcangeli } 1089b9bbfbe3SAndrea Arcangeli 109017766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 109117766ddeSDavid Rientjes 1092eecc1e42SHugh Dickins if (!page) 109393b4796dSKirill A. Shutemov clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 109493b4796dSKirill A. Shutemov else 109571e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 109671e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 109771e3aac0SAndrea Arcangeli 10982ec74c3eSSagi Grimberg mmun_start = haddr; 10992ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 1100bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 11012ec74c3eSSagi Grimberg 110282b0f8c3SJan Kara spin_lock(vmf->ptl); 110393b4796dSKirill A. Shutemov if (page) 1104ddc58f27SKirill A. Shutemov put_page(page); 110582b0f8c3SJan Kara if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 110682b0f8c3SJan Kara spin_unlock(vmf->ptl); 1107f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(new_page, memcg, true); 110871e3aac0SAndrea Arcangeli put_page(new_page); 11092ec74c3eSSagi Grimberg goto out_mn; 1110b9bbfbe3SAndrea Arcangeli } else { 111171e3aac0SAndrea Arcangeli pmd_t entry; 11123122359aSKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 11133122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 111482b0f8c3SJan Kara pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1115d281ee61SKirill A. Shutemov page_add_new_anon_rmap(new_page, vma, haddr, true); 1116f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(new_page, memcg, false, true); 111700501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 111882b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 111982b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1120eecc1e42SHugh Dickins if (!page) { 1121bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 112297ae1749SKirill A. Shutemov } else { 1123309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1124d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 112571e3aac0SAndrea Arcangeli put_page(page); 112693b4796dSKirill A. Shutemov } 112771e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 112871e3aac0SAndrea Arcangeli } 112982b0f8c3SJan Kara spin_unlock(vmf->ptl); 11302ec74c3eSSagi Grimberg out_mn: 1131bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 11322ec74c3eSSagi Grimberg out: 11332ec74c3eSSagi Grimberg return ret; 113471e3aac0SAndrea Arcangeli out_unlock: 113582b0f8c3SJan Kara spin_unlock(vmf->ptl); 113671e3aac0SAndrea Arcangeli return ret; 113771e3aac0SAndrea Arcangeli } 113871e3aac0SAndrea Arcangeli 11398310d48bSKeno Fischer /* 11408310d48bSKeno Fischer * FOLL_FORCE can write to even unwritable pmd's, but only 11418310d48bSKeno Fischer * after we've gone through a COW cycle and they are dirty. 11428310d48bSKeno Fischer */ 11438310d48bSKeno Fischer static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 11448310d48bSKeno Fischer { 11458310d48bSKeno Fischer return pmd_write(pmd) || 11468310d48bSKeno Fischer ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 11478310d48bSKeno Fischer } 11488310d48bSKeno Fischer 1149b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 115071e3aac0SAndrea Arcangeli unsigned long addr, 115171e3aac0SAndrea Arcangeli pmd_t *pmd, 115271e3aac0SAndrea Arcangeli unsigned int flags) 115371e3aac0SAndrea Arcangeli { 1154b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 115571e3aac0SAndrea Arcangeli struct page *page = NULL; 115671e3aac0SAndrea Arcangeli 1157c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 115871e3aac0SAndrea Arcangeli 11598310d48bSKeno Fischer if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 116071e3aac0SAndrea Arcangeli goto out; 116171e3aac0SAndrea Arcangeli 116285facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 116385facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 116485facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 116585facf25SKirill A. Shutemov 11662b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 11678a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 11682b4847e7SMel Gorman goto out; 11692b4847e7SMel Gorman 117071e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1171ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 11723565fce3SDan Williams if (flags & FOLL_TOUCH) 11733565fce3SDan Williams touch_pmd(vma, addr, pmd); 1174de60f5f1SEric B Munson if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1175e90309c9SKirill A. Shutemov /* 1176e90309c9SKirill A. Shutemov * We don't mlock() pte-mapped THPs. This way we can avoid 1177e90309c9SKirill A. Shutemov * leaking mlocked pages into non-VM_LOCKED VMAs. 1178e90309c9SKirill A. Shutemov * 11799a73f61bSKirill A. Shutemov * For anon THP: 11809a73f61bSKirill A. Shutemov * 1181e90309c9SKirill A. Shutemov * In most cases the pmd is the only mapping of the page as we 1182e90309c9SKirill A. Shutemov * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1183e90309c9SKirill A. Shutemov * writable private mappings in populate_vma_page_range(). 1184e90309c9SKirill A. Shutemov * 1185e90309c9SKirill A. Shutemov * The only scenario when we have the page shared here is if we 1186e90309c9SKirill A. Shutemov * mlocking read-only mapping shared over fork(). We skip 1187e90309c9SKirill A. Shutemov * mlocking such pages. 11889a73f61bSKirill A. Shutemov * 11899a73f61bSKirill A. Shutemov * For file THP: 11909a73f61bSKirill A. Shutemov * 11919a73f61bSKirill A. Shutemov * We can expect PageDoubleMap() to be stable under page lock: 11929a73f61bSKirill A. Shutemov * for file pages we set it in page_add_file_rmap(), which 11939a73f61bSKirill A. Shutemov * requires page to be locked. 1194e90309c9SKirill A. Shutemov */ 11959a73f61bSKirill A. Shutemov 11969a73f61bSKirill A. Shutemov if (PageAnon(page) && compound_mapcount(page) != 1) 11979a73f61bSKirill A. Shutemov goto skip_mlock; 11989a73f61bSKirill A. Shutemov if (PageDoubleMap(page) || !page->mapping) 11999a73f61bSKirill A. Shutemov goto skip_mlock; 12009a73f61bSKirill A. Shutemov if (!trylock_page(page)) 12019a73f61bSKirill A. Shutemov goto skip_mlock; 1202b676b293SDavid Rientjes lru_add_drain(); 12039a73f61bSKirill A. Shutemov if (page->mapping && !PageDoubleMap(page)) 1204b676b293SDavid Rientjes mlock_vma_page(page); 1205b676b293SDavid Rientjes unlock_page(page); 1206b676b293SDavid Rientjes } 12079a73f61bSKirill A. Shutemov skip_mlock: 120871e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1209ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 121071e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 1211ddc58f27SKirill A. Shutemov get_page(page); 121271e3aac0SAndrea Arcangeli 121371e3aac0SAndrea Arcangeli out: 121471e3aac0SAndrea Arcangeli return page; 121571e3aac0SAndrea Arcangeli } 121671e3aac0SAndrea Arcangeli 1217d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 121882b0f8c3SJan Kara int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1219d10e63f2SMel Gorman { 122082b0f8c3SJan Kara struct vm_area_struct *vma = vmf->vma; 1221b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1222b32967ffSMel Gorman struct page *page; 122382b0f8c3SJan Kara unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 12248191acbdSMel Gorman int page_nid = -1, this_nid = numa_node_id(); 122590572890SPeter Zijlstra int target_nid, last_cpupid = -1; 12268191acbdSMel Gorman bool page_locked; 12278191acbdSMel Gorman bool migrated = false; 1228b191f9b1SMel Gorman bool was_writable; 12296688cc05SPeter Zijlstra int flags = 0; 1230d10e63f2SMel Gorman 123182b0f8c3SJan Kara vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 123282b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1233d10e63f2SMel Gorman goto out_unlock; 1234d10e63f2SMel Gorman 1235de466bd6SMel Gorman /* 1236de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1237de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1238de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1239de466bd6SMel Gorman */ 124082b0f8c3SJan Kara if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 124182b0f8c3SJan Kara page = pmd_page(*vmf->pmd); 124282b0f8c3SJan Kara spin_unlock(vmf->ptl); 12435d833062SMel Gorman wait_on_page_locked(page); 1244de466bd6SMel Gorman goto out; 1245de466bd6SMel Gorman } 1246de466bd6SMel Gorman 1247d10e63f2SMel Gorman page = pmd_page(pmd); 1248a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 12498191acbdSMel Gorman page_nid = page_to_nid(page); 125090572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 125103c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 125204bb2f94SRik van Riel if (page_nid == this_nid) { 125303c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 125404bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 125504bb2f94SRik van Riel } 12564daae3b4SMel Gorman 1257bea66fbdSMel Gorman /* See similar comment in do_numa_page for explanation */ 1258d59dc7bcSRik van Riel if (!pmd_write(pmd)) 12596688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 12606688cc05SPeter Zijlstra 12616688cc05SPeter Zijlstra /* 1262ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1263ff9042b1SMel Gorman * page_table_lock if at all possible 1264ff9042b1SMel Gorman */ 1265b8916634SMel Gorman page_locked = trylock_page(page); 1266b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1267b8916634SMel Gorman if (target_nid == -1) { 1268b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1269a54a407fSMel Gorman if (page_locked) 1270b8916634SMel Gorman goto clear_pmdnuma; 12712b4847e7SMel Gorman } 1272cbee9f88SPeter Zijlstra 1273de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 12742b4847e7SMel Gorman if (!page_locked) { 127582b0f8c3SJan Kara spin_unlock(vmf->ptl); 1276b8916634SMel Gorman wait_on_page_locked(page); 1277a54a407fSMel Gorman page_nid = -1; 1278b8916634SMel Gorman goto out; 1279b8916634SMel Gorman } 1280b8916634SMel Gorman 12812b4847e7SMel Gorman /* 12822b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 12832b4847e7SMel Gorman * to serialises splits 12842b4847e7SMel Gorman */ 1285b8916634SMel Gorman get_page(page); 128682b0f8c3SJan Kara spin_unlock(vmf->ptl); 1287b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1288b32967ffSMel Gorman 1289c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 129082b0f8c3SJan Kara spin_lock(vmf->ptl); 129182b0f8c3SJan Kara if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1292b32967ffSMel Gorman unlock_page(page); 1293b32967ffSMel Gorman put_page(page); 1294a54a407fSMel Gorman page_nid = -1; 1295b32967ffSMel Gorman goto out_unlock; 1296b32967ffSMel Gorman } 1297ff9042b1SMel Gorman 1298c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1299c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1300c3a489caSMel Gorman put_page(page); 1301c3a489caSMel Gorman page_nid = -1; 1302c3a489caSMel Gorman goto clear_pmdnuma; 1303c3a489caSMel Gorman } 1304c3a489caSMel Gorman 1305a54a407fSMel Gorman /* 1306a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 13078a0516edSMel Gorman * and access rights restored. 1308a54a407fSMel Gorman */ 130982b0f8c3SJan Kara spin_unlock(vmf->ptl); 1310bae473a4SKirill A. Shutemov migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 131182b0f8c3SJan Kara vmf->pmd, pmd, vmf->address, page, target_nid); 13126688cc05SPeter Zijlstra if (migrated) { 13136688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 13148191acbdSMel Gorman page_nid = target_nid; 1315074c2381SMel Gorman } else 1316074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1317b32967ffSMel Gorman 13188191acbdSMel Gorman goto out; 13194daae3b4SMel Gorman clear_pmdnuma: 1320a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1321b191f9b1SMel Gorman was_writable = pmd_write(pmd); 13224d942466SMel Gorman pmd = pmd_modify(pmd, vma->vm_page_prot); 1323b7b04004SMel Gorman pmd = pmd_mkyoung(pmd); 1324b191f9b1SMel Gorman if (was_writable) 1325b191f9b1SMel Gorman pmd = pmd_mkwrite(pmd); 132682b0f8c3SJan Kara set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 132782b0f8c3SJan Kara update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1328a54a407fSMel Gorman unlock_page(page); 1329d10e63f2SMel Gorman out_unlock: 133082b0f8c3SJan Kara spin_unlock(vmf->ptl); 1331b8916634SMel Gorman 1332b8916634SMel Gorman out: 1333b8916634SMel Gorman if (anon_vma) 1334b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1335b8916634SMel Gorman 13368191acbdSMel Gorman if (page_nid != -1) 133782b0f8c3SJan Kara task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 133882b0f8c3SJan Kara vmf->flags); 13398191acbdSMel Gorman 1340d10e63f2SMel Gorman return 0; 1341d10e63f2SMel Gorman } 1342d10e63f2SMel Gorman 1343319904adSHuang Ying /* 1344319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1345319904adSHuang Ying * Otherwise, return false. 1346319904adSHuang Ying */ 1347319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1348b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1349b8d3c4c3SMinchan Kim { 1350b8d3c4c3SMinchan Kim spinlock_t *ptl; 1351b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1352b8d3c4c3SMinchan Kim struct page *page; 1353b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1354319904adSHuang Ying bool ret = false; 1355b8d3c4c3SMinchan Kim 135607e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 135707e32661SAneesh Kumar K.V 1358b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1359b6ec57f4SKirill A. Shutemov if (!ptl) 136025eedabeSLinus Torvalds goto out_unlocked; 1361b8d3c4c3SMinchan Kim 1362b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1363319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1364b8d3c4c3SMinchan Kim goto out; 1365b8d3c4c3SMinchan Kim 1366b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1367b8d3c4c3SMinchan Kim /* 1368b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1369b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1370b8d3c4c3SMinchan Kim */ 1371b8d3c4c3SMinchan Kim if (page_mapcount(page) != 1) 1372b8d3c4c3SMinchan Kim goto out; 1373b8d3c4c3SMinchan Kim 1374b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1375b8d3c4c3SMinchan Kim goto out; 1376b8d3c4c3SMinchan Kim 1377b8d3c4c3SMinchan Kim /* 1378b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1379b8d3c4c3SMinchan Kim * will deactivate only them. 1380b8d3c4c3SMinchan Kim */ 1381b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1382b8d3c4c3SMinchan Kim get_page(page); 1383b8d3c4c3SMinchan Kim spin_unlock(ptl); 13849818b8cdSHuang Ying split_huge_page(page); 1385b8d3c4c3SMinchan Kim put_page(page); 1386b8d3c4c3SMinchan Kim unlock_page(page); 1387b8d3c4c3SMinchan Kim goto out_unlocked; 1388b8d3c4c3SMinchan Kim } 1389b8d3c4c3SMinchan Kim 1390b8d3c4c3SMinchan Kim if (PageDirty(page)) 1391b8d3c4c3SMinchan Kim ClearPageDirty(page); 1392b8d3c4c3SMinchan Kim unlock_page(page); 1393b8d3c4c3SMinchan Kim 1394b8d3c4c3SMinchan Kim if (PageActive(page)) 1395b8d3c4c3SMinchan Kim deactivate_page(page); 1396b8d3c4c3SMinchan Kim 1397b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1398b8d3c4c3SMinchan Kim orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1399b8d3c4c3SMinchan Kim tlb->fullmm); 1400b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1401b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1402b8d3c4c3SMinchan Kim 1403b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1404b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1405b8d3c4c3SMinchan Kim } 1406319904adSHuang Ying ret = true; 1407b8d3c4c3SMinchan Kim out: 1408b8d3c4c3SMinchan Kim spin_unlock(ptl); 1409b8d3c4c3SMinchan Kim out_unlocked: 1410b8d3c4c3SMinchan Kim return ret; 1411b8d3c4c3SMinchan Kim } 1412b8d3c4c3SMinchan Kim 1413953c66c2SAneesh Kumar K.V static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1414953c66c2SAneesh Kumar K.V { 1415953c66c2SAneesh Kumar K.V pgtable_t pgtable; 1416953c66c2SAneesh Kumar K.V 1417953c66c2SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1418953c66c2SAneesh Kumar K.V pte_free(mm, pgtable); 1419953c66c2SAneesh Kumar K.V atomic_long_dec(&mm->nr_ptes); 1420953c66c2SAneesh Kumar K.V } 1421953c66c2SAneesh Kumar K.V 142271e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1423f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 142471e3aac0SAndrea Arcangeli { 1425f5c8ad47SDavid Miller pmd_t orig_pmd; 1426da146769SKirill A. Shutemov spinlock_t *ptl; 1427da146769SKirill A. Shutemov 142807e32661SAneesh Kumar K.V tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 142907e32661SAneesh Kumar K.V 1430b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1431b6ec57f4SKirill A. Shutemov if (!ptl) 1432da146769SKirill A. Shutemov return 0; 1433a6bf2bb0SAneesh Kumar K.V /* 1434a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 14358809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1436a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1437a6bf2bb0SAneesh Kumar K.V * operations. 1438a6bf2bb0SAneesh Kumar K.V */ 14398809aa2dSAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1440fcbe08d6SMartin Schwidefsky tlb->fullmm); 1441f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 14424897c765SMatthew Wilcox if (vma_is_dax(vma)) { 14434897c765SMatthew Wilcox spin_unlock(ptl); 1444da146769SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 1445c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1446da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1447da146769SKirill A. Shutemov pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1448e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1449bf929152SKirill A. Shutemov spin_unlock(ptl); 1450c0f2e176SAneesh Kumar K.V tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1451479f0abbSKirill A. Shutemov } else { 14524897c765SMatthew Wilcox struct page *page = pmd_page(orig_pmd); 1453d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 1454309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1455309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1456b5072380SKirill A. Shutemov if (PageAnon(page)) { 1457b5072380SKirill A. Shutemov pgtable_t pgtable; 1458b5072380SKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1459b5072380SKirill A. Shutemov pte_free(tlb->mm, pgtable); 1460e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1461b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1462b5072380SKirill A. Shutemov } else { 1463953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1464953c66c2SAneesh Kumar K.V zap_deposited_table(tlb->mm, pmd); 1465b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1466b5072380SKirill A. Shutemov } 1467bf929152SKirill A. Shutemov spin_unlock(ptl); 1468e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1469479f0abbSKirill A. Shutemov } 1470da146769SKirill A. Shutemov return 1; 147171e3aac0SAndrea Arcangeli } 147271e3aac0SAndrea Arcangeli 14731dd38b6cSAneesh Kumar K.V #ifndef pmd_move_must_withdraw 14741dd38b6cSAneesh Kumar K.V static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 14751dd38b6cSAneesh Kumar K.V spinlock_t *old_pmd_ptl, 14761dd38b6cSAneesh Kumar K.V struct vm_area_struct *vma) 14771dd38b6cSAneesh Kumar K.V { 14781dd38b6cSAneesh Kumar K.V /* 14791dd38b6cSAneesh Kumar K.V * With split pmd lock we also need to move preallocated 14801dd38b6cSAneesh Kumar K.V * PTE page table if new_pmd is on different PMD page table. 14811dd38b6cSAneesh Kumar K.V * 14821dd38b6cSAneesh Kumar K.V * We also don't deposit and withdraw tables for file pages. 14831dd38b6cSAneesh Kumar K.V */ 14841dd38b6cSAneesh Kumar K.V return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 14851dd38b6cSAneesh Kumar K.V } 14861dd38b6cSAneesh Kumar K.V #endif 14871dd38b6cSAneesh Kumar K.V 1488bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 148937a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 14905d190420SAaron Lu pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) 149137a1c49aSAndrea Arcangeli { 1492bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 149337a1c49aSAndrea Arcangeli pmd_t pmd; 149437a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 14955d190420SAaron Lu bool force_flush = false; 149637a1c49aSAndrea Arcangeli 149737a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 149837a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 1499bf8616d5SHugh Dickins old_end - old_addr < HPAGE_PMD_SIZE) 15004b471e88SKirill A. Shutemov return false; 150137a1c49aSAndrea Arcangeli 150237a1c49aSAndrea Arcangeli /* 150337a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 150437a1c49aSAndrea Arcangeli * should have release it. 150537a1c49aSAndrea Arcangeli */ 150637a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 150737a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 15084b471e88SKirill A. Shutemov return false; 150937a1c49aSAndrea Arcangeli } 151037a1c49aSAndrea Arcangeli 1511bf929152SKirill A. Shutemov /* 1512bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1513bf929152SKirill A. Shutemov * ptlocks because exclusive mmap_sem prevents deadlock. 1514bf929152SKirill A. Shutemov */ 1515b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1516b6ec57f4SKirill A. Shutemov if (old_ptl) { 1517bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1518bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1519bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 15208809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1521a2ce2666SAaron Lu if (pmd_present(pmd) && pmd_dirty(pmd)) 1522a2ce2666SAaron Lu force_flush = true; 152337a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 15243592806cSKirill A. Shutemov 15251dd38b6cSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1526b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 15273592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 15283592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 15293592806cSKirill A. Shutemov } 1530b3084f4dSAneesh Kumar K.V set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1531b3084f4dSAneesh Kumar K.V if (new_ptl != old_ptl) 1532b3084f4dSAneesh Kumar K.V spin_unlock(new_ptl); 15335d190420SAaron Lu if (force_flush) 15345d190420SAaron Lu flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 15355d190420SAaron Lu else 15365d190420SAaron Lu *need_flush = true; 1537bf929152SKirill A. Shutemov spin_unlock(old_ptl); 15384b471e88SKirill A. Shutemov return true; 153937a1c49aSAndrea Arcangeli } 15404b471e88SKirill A. Shutemov return false; 154137a1c49aSAndrea Arcangeli } 154237a1c49aSAndrea Arcangeli 1543f123d74aSMel Gorman /* 1544f123d74aSMel Gorman * Returns 1545f123d74aSMel Gorman * - 0 if PMD could not be locked 1546f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1547f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1548f123d74aSMel Gorman */ 1549cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1550e944fd67SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1551cd7548abSJohannes Weiner { 1552cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1553bf929152SKirill A. Shutemov spinlock_t *ptl; 1554cd7548abSJohannes Weiner int ret = 0; 1555cd7548abSJohannes Weiner 1556b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1557b6ec57f4SKirill A. Shutemov if (ptl) { 1558cd7548abSJohannes Weiner pmd_t entry; 1559b191f9b1SMel Gorman bool preserve_write = prot_numa && pmd_write(*pmd); 1560ba68bc01SMel Gorman ret = 1; 1561e944fd67SMel Gorman 1562e944fd67SMel Gorman /* 1563e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1564e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1565e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1566e944fd67SMel Gorman */ 1567e944fd67SMel Gorman if (prot_numa && is_huge_zero_pmd(*pmd)) { 1568e944fd67SMel Gorman spin_unlock(ptl); 1569ba68bc01SMel Gorman return ret; 1570e944fd67SMel Gorman } 1571e944fd67SMel Gorman 157210c1045fSMel Gorman if (!prot_numa || !pmd_protnone(*pmd)) { 15738809aa2dSAneesh Kumar K.V entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1574cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1575b191f9b1SMel Gorman if (preserve_write) 1576b191f9b1SMel Gorman entry = pmd_mkwrite(entry); 1577f123d74aSMel Gorman ret = HPAGE_PMD_NR; 157856eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 1579b237adedSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1580b237adedSKirill A. Shutemov pmd_write(entry)); 158110c1045fSMel Gorman } 1582bf929152SKirill A. Shutemov spin_unlock(ptl); 1583cd7548abSJohannes Weiner } 1584cd7548abSJohannes Weiner 1585cd7548abSJohannes Weiner return ret; 1586cd7548abSJohannes Weiner } 1587cd7548abSJohannes Weiner 1588025c5b24SNaoya Horiguchi /* 15898f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1590025c5b24SNaoya Horiguchi * 15918f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 15928f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1593025c5b24SNaoya Horiguchi */ 1594b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1595025c5b24SNaoya Horiguchi { 1596b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1597b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 15985c7fb56eSDan Williams if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 1599b6ec57f4SKirill A. Shutemov return ptl; 1600b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1601b6ec57f4SKirill A. Shutemov return NULL; 1602025c5b24SNaoya Horiguchi } 1603025c5b24SNaoya Horiguchi 1604eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1605eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 1606eef1b3baSKirill A. Shutemov { 1607eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1608eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1609eef1b3baSKirill A. Shutemov pmd_t _pmd; 1610eef1b3baSKirill A. Shutemov int i; 1611eef1b3baSKirill A. Shutemov 1612eef1b3baSKirill A. Shutemov /* leave pmd empty until pte is filled */ 1613eef1b3baSKirill A. Shutemov pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1614eef1b3baSKirill A. Shutemov 1615eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1616eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1617eef1b3baSKirill A. Shutemov 1618eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1619eef1b3baSKirill A. Shutemov pte_t *pte, entry; 1620eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 1621eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 1622eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 1623eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 1624eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 1625eef1b3baSKirill A. Shutemov pte_unmap(pte); 1626eef1b3baSKirill A. Shutemov } 1627eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 1628eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 1629eef1b3baSKirill A. Shutemov } 1630eef1b3baSKirill A. Shutemov 1631eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 1632ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 1633eef1b3baSKirill A. Shutemov { 1634eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1635eef1b3baSKirill A. Shutemov struct page *page; 1636eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1637eef1b3baSKirill A. Shutemov pmd_t _pmd; 1638804dd150SAndrea Arcangeli bool young, write, dirty, soft_dirty; 16392ac015e2SKirill A. Shutemov unsigned long addr; 1640eef1b3baSKirill A. Shutemov int i; 1641eef1b3baSKirill A. Shutemov 1642eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 1643eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1644eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 16455c7fb56eSDan Williams VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); 1646eef1b3baSKirill A. Shutemov 1647eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 1648eef1b3baSKirill A. Shutemov 1649d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 1650d21b9e57SKirill A. Shutemov _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1651953c66c2SAneesh Kumar K.V /* 1652953c66c2SAneesh Kumar K.V * We are going to unmap this huge page. So 1653953c66c2SAneesh Kumar K.V * just go ahead and zap it 1654953c66c2SAneesh Kumar K.V */ 1655953c66c2SAneesh Kumar K.V if (arch_needs_pgtable_deposit()) 1656953c66c2SAneesh Kumar K.V zap_deposited_table(mm, pmd); 1657d21b9e57SKirill A. Shutemov if (vma_is_dax(vma)) 1658d21b9e57SKirill A. Shutemov return; 1659d21b9e57SKirill A. Shutemov page = pmd_page(_pmd); 1660d21b9e57SKirill A. Shutemov if (!PageReferenced(page) && pmd_young(_pmd)) 1661d21b9e57SKirill A. Shutemov SetPageReferenced(page); 1662d21b9e57SKirill A. Shutemov page_remove_rmap(page, true); 1663d21b9e57SKirill A. Shutemov put_page(page); 1664d21b9e57SKirill A. Shutemov add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1665eef1b3baSKirill A. Shutemov return; 1666eef1b3baSKirill A. Shutemov } else if (is_huge_zero_pmd(*pmd)) { 1667eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 1668eef1b3baSKirill A. Shutemov } 1669eef1b3baSKirill A. Shutemov 1670eef1b3baSKirill A. Shutemov page = pmd_page(*pmd); 1671eef1b3baSKirill A. Shutemov VM_BUG_ON_PAGE(!page_count(page), page); 1672fe896d18SJoonsoo Kim page_ref_add(page, HPAGE_PMD_NR - 1); 1673eef1b3baSKirill A. Shutemov write = pmd_write(*pmd); 1674eef1b3baSKirill A. Shutemov young = pmd_young(*pmd); 1675b8d3c4c3SMinchan Kim dirty = pmd_dirty(*pmd); 1676804dd150SAndrea Arcangeli soft_dirty = pmd_soft_dirty(*pmd); 1677eef1b3baSKirill A. Shutemov 1678c777e2a8SAneesh Kumar K.V pmdp_huge_split_prepare(vma, haddr, pmd); 1679eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1680eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1681eef1b3baSKirill A. Shutemov 16822ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 1683eef1b3baSKirill A. Shutemov pte_t entry, *pte; 1684eef1b3baSKirill A. Shutemov /* 1685eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 1686eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 1687eef1b3baSKirill A. Shutemov * permissions across VMAs. 1688eef1b3baSKirill A. Shutemov */ 1689ba988280SKirill A. Shutemov if (freeze) { 1690ba988280SKirill A. Shutemov swp_entry_t swp_entry; 1691ba988280SKirill A. Shutemov swp_entry = make_migration_entry(page + i, write); 1692ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 1693804dd150SAndrea Arcangeli if (soft_dirty) 1694804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 1695ba988280SKirill A. Shutemov } else { 16966d2329f8SAndrea Arcangeli entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 1697b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 1698eef1b3baSKirill A. Shutemov if (!write) 1699eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 1700eef1b3baSKirill A. Shutemov if (!young) 1701eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 1702804dd150SAndrea Arcangeli if (soft_dirty) 1703804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 1704ba988280SKirill A. Shutemov } 1705b8d3c4c3SMinchan Kim if (dirty) 1706b8d3c4c3SMinchan Kim SetPageDirty(page + i); 17072ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 1708eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 17092ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 1710eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 1711eef1b3baSKirill A. Shutemov pte_unmap(pte); 1712eef1b3baSKirill A. Shutemov } 1713eef1b3baSKirill A. Shutemov 1714eef1b3baSKirill A. Shutemov /* 1715eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 1716eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 1717eef1b3baSKirill A. Shutemov */ 1718eef1b3baSKirill A. Shutemov if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 1719eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1720eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 1721eef1b3baSKirill A. Shutemov } 1722eef1b3baSKirill A. Shutemov 1723eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 1724eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 172511fb9989SMel Gorman __dec_node_page_state(page, NR_ANON_THPS); 1726eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 1727eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 1728eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1729eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 1730eef1b3baSKirill A. Shutemov } 1731eef1b3baSKirill A. Shutemov } 1732eef1b3baSKirill A. Shutemov 1733eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 1734e9b61f19SKirill A. Shutemov /* 1735e9b61f19SKirill A. Shutemov * Up to this point the pmd is present and huge and userland has the 1736e9b61f19SKirill A. Shutemov * whole access to the hugepage during the split (which happens in 1737e9b61f19SKirill A. Shutemov * place). If we overwrite the pmd with the not-huge version pointing 1738e9b61f19SKirill A. Shutemov * to the pte here (which of course we could if all CPUs were bug 1739e9b61f19SKirill A. Shutemov * free), userland could trigger a small page size TLB miss on the 1740e9b61f19SKirill A. Shutemov * small sized TLB while the hugepage TLB entry is still established in 1741e9b61f19SKirill A. Shutemov * the huge TLB. Some CPU doesn't like that. 1742e9b61f19SKirill A. Shutemov * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 1743e9b61f19SKirill A. Shutemov * 383 on page 93. Intel should be safe but is also warns that it's 1744e9b61f19SKirill A. Shutemov * only safe if the permission and cache attributes of the two entries 1745e9b61f19SKirill A. Shutemov * loaded in the two TLB is identical (which should be the case here). 1746e9b61f19SKirill A. Shutemov * But it is generally safer to never allow small and huge TLB entries 1747e9b61f19SKirill A. Shutemov * for the same virtual address to be loaded simultaneously. So instead 1748e9b61f19SKirill A. Shutemov * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 1749e9b61f19SKirill A. Shutemov * current pmd notpresent (atomically because here the pmd_trans_huge 1750e9b61f19SKirill A. Shutemov * and pmd_trans_splitting must remain set at all times on the pmd 1751e9b61f19SKirill A. Shutemov * until the split is complete for this pmd), then we flush the SMP TLB 1752e9b61f19SKirill A. Shutemov * and finally we write the non-huge version of the pmd entry with 1753e9b61f19SKirill A. Shutemov * pmd_populate. 1754e9b61f19SKirill A. Shutemov */ 1755e9b61f19SKirill A. Shutemov pmdp_invalidate(vma, haddr, pmd); 1756eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 1757e9b61f19SKirill A. Shutemov 1758e9b61f19SKirill A. Shutemov if (freeze) { 17592ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 1760e9b61f19SKirill A. Shutemov page_remove_rmap(page + i, false); 1761e9b61f19SKirill A. Shutemov put_page(page + i); 1762e9b61f19SKirill A. Shutemov } 1763e9b61f19SKirill A. Shutemov } 1764eef1b3baSKirill A. Shutemov } 1765eef1b3baSKirill A. Shutemov 1766eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 176733f4751eSNaoya Horiguchi unsigned long address, bool freeze, struct page *page) 1768eef1b3baSKirill A. Shutemov { 1769eef1b3baSKirill A. Shutemov spinlock_t *ptl; 1770eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1771eef1b3baSKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 1772eef1b3baSKirill A. Shutemov 1773eef1b3baSKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); 1774eef1b3baSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 177533f4751eSNaoya Horiguchi 177633f4751eSNaoya Horiguchi /* 177733f4751eSNaoya Horiguchi * If caller asks to setup a migration entries, we need a page to check 177833f4751eSNaoya Horiguchi * pmd against. Otherwise we can end up replacing wrong page. 177933f4751eSNaoya Horiguchi */ 178033f4751eSNaoya Horiguchi VM_BUG_ON(freeze && !page); 178133f4751eSNaoya Horiguchi if (page && page != pmd_page(*pmd)) 178233f4751eSNaoya Horiguchi goto out; 178333f4751eSNaoya Horiguchi 17845c7fb56eSDan Williams if (pmd_trans_huge(*pmd)) { 178533f4751eSNaoya Horiguchi page = pmd_page(*pmd); 1786e90309c9SKirill A. Shutemov if (PageMlocked(page)) 17875f737714SKirill A. Shutemov clear_page_mlock(page); 17885c7fb56eSDan Williams } else if (!pmd_devmap(*pmd)) 17895c7fb56eSDan Williams goto out; 1790fec89c10SKirill A. Shutemov __split_huge_pmd_locked(vma, pmd, haddr, freeze); 1791e90309c9SKirill A. Shutemov out: 1792eef1b3baSKirill A. Shutemov spin_unlock(ptl); 1793eef1b3baSKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); 1794eef1b3baSKirill A. Shutemov } 1795eef1b3baSKirill A. Shutemov 1796fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 1797fec89c10SKirill A. Shutemov bool freeze, struct page *page) 179894fcc585SAndrea Arcangeli { 1799f72e7dcdSHugh Dickins pgd_t *pgd; 1800f72e7dcdSHugh Dickins pud_t *pud; 180194fcc585SAndrea Arcangeli pmd_t *pmd; 180294fcc585SAndrea Arcangeli 180378ddc534SKirill A. Shutemov pgd = pgd_offset(vma->vm_mm, address); 1804f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 1805f72e7dcdSHugh Dickins return; 1806f72e7dcdSHugh Dickins 1807f72e7dcdSHugh Dickins pud = pud_offset(pgd, address); 1808f72e7dcdSHugh Dickins if (!pud_present(*pud)) 1809f72e7dcdSHugh Dickins return; 1810f72e7dcdSHugh Dickins 1811f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 1812fec89c10SKirill A. Shutemov 181333f4751eSNaoya Horiguchi __split_huge_pmd(vma, pmd, address, freeze, page); 181494fcc585SAndrea Arcangeli } 181594fcc585SAndrea Arcangeli 1816e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 181794fcc585SAndrea Arcangeli unsigned long start, 181894fcc585SAndrea Arcangeli unsigned long end, 181994fcc585SAndrea Arcangeli long adjust_next) 182094fcc585SAndrea Arcangeli { 182194fcc585SAndrea Arcangeli /* 182294fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 182394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 182494fcc585SAndrea Arcangeli * an huge pmd. 182594fcc585SAndrea Arcangeli */ 182694fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 182794fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 182894fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 1829fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, start, false, NULL); 183094fcc585SAndrea Arcangeli 183194fcc585SAndrea Arcangeli /* 183294fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 183394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 183494fcc585SAndrea Arcangeli * an huge pmd. 183594fcc585SAndrea Arcangeli */ 183694fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 183794fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 183894fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 1839fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, end, false, NULL); 184094fcc585SAndrea Arcangeli 184194fcc585SAndrea Arcangeli /* 184294fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 184394fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 184494fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 184594fcc585SAndrea Arcangeli */ 184694fcc585SAndrea Arcangeli if (adjust_next > 0) { 184794fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 184894fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 184994fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 185094fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 185194fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 185294fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 1853fec89c10SKirill A. Shutemov split_huge_pmd_address(next, nstart, false, NULL); 185494fcc585SAndrea Arcangeli } 185594fcc585SAndrea Arcangeli } 1856e9b61f19SKirill A. Shutemov 1857fec89c10SKirill A. Shutemov static void freeze_page(struct page *page) 1858e9b61f19SKirill A. Shutemov { 1859baa355fdSKirill A. Shutemov enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 1860baa355fdSKirill A. Shutemov TTU_RMAP_LOCKED; 1861fec89c10SKirill A. Shutemov int i, ret; 1862e9b61f19SKirill A. Shutemov 1863e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 1864e9b61f19SKirill A. Shutemov 1865baa355fdSKirill A. Shutemov if (PageAnon(page)) 1866baa355fdSKirill A. Shutemov ttu_flags |= TTU_MIGRATION; 1867baa355fdSKirill A. Shutemov 1868fec89c10SKirill A. Shutemov /* We only need TTU_SPLIT_HUGE_PMD once */ 1869fec89c10SKirill A. Shutemov ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); 1870fec89c10SKirill A. Shutemov for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { 1871fec89c10SKirill A. Shutemov /* Cut short if the page is unmapped */ 1872fec89c10SKirill A. Shutemov if (page_count(page) == 1) 1873e9b61f19SKirill A. Shutemov return; 1874bd56086fSKirill A. Shutemov 1875fec89c10SKirill A. Shutemov ret = try_to_unmap(page + i, ttu_flags); 1876fec89c10SKirill A. Shutemov } 1877baa355fdSKirill A. Shutemov VM_BUG_ON_PAGE(ret, page + i - 1); 1878bd56086fSKirill A. Shutemov } 1879bd56086fSKirill A. Shutemov 1880fec89c10SKirill A. Shutemov static void unfreeze_page(struct page *page) 1881e9b61f19SKirill A. Shutemov { 1882fec89c10SKirill A. Shutemov int i; 1883e9b61f19SKirill A. Shutemov 1884fec89c10SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1885fec89c10SKirill A. Shutemov remove_migration_ptes(page + i, page + i, true); 1886e9b61f19SKirill A. Shutemov } 1887e9b61f19SKirill A. Shutemov 18888df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 1889e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 1890e9b61f19SKirill A. Shutemov { 1891e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 1892e9b61f19SKirill A. Shutemov 18938df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 1894fe896d18SJoonsoo Kim VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail); 1895e9b61f19SKirill A. Shutemov 1896e9b61f19SKirill A. Shutemov /* 18970139aa7bSJoonsoo Kim * tail_page->_refcount is zero and not changing from under us. But 1898e9b61f19SKirill A. Shutemov * get_page_unless_zero() may be running from under us on the 1899baa355fdSKirill A. Shutemov * tail_page. If we used atomic_set() below instead of atomic_inc() or 1900baa355fdSKirill A. Shutemov * atomic_add(), we would then run atomic_set() concurrently with 1901e9b61f19SKirill A. Shutemov * get_page_unless_zero(), and atomic_set() is implemented in C not 1902e9b61f19SKirill A. Shutemov * using locked ops. spin_unlock on x86 sometime uses locked ops 1903e9b61f19SKirill A. Shutemov * because of PPro errata 66, 92, so unless somebody can guarantee 1904e9b61f19SKirill A. Shutemov * atomic_set() here would be safe on all archs (and not only on x86), 1905baa355fdSKirill A. Shutemov * it's safer to use atomic_inc()/atomic_add(). 1906e9b61f19SKirill A. Shutemov */ 1907baa355fdSKirill A. Shutemov if (PageAnon(head)) { 1908fe896d18SJoonsoo Kim page_ref_inc(page_tail); 1909baa355fdSKirill A. Shutemov } else { 1910baa355fdSKirill A. Shutemov /* Additional pin to radix tree */ 1911baa355fdSKirill A. Shutemov page_ref_add(page_tail, 2); 1912baa355fdSKirill A. Shutemov } 1913e9b61f19SKirill A. Shutemov 1914e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1915e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 1916e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 1917e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 1918e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 1919e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 1920e9b61f19SKirill A. Shutemov (1L << PG_active) | 1921e9b61f19SKirill A. Shutemov (1L << PG_locked) | 1922b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 1923b8d3c4c3SMinchan Kim (1L << PG_dirty))); 1924e9b61f19SKirill A. Shutemov 1925e9b61f19SKirill A. Shutemov /* 1926e9b61f19SKirill A. Shutemov * After clearing PageTail the gup refcount can be released. 1927e9b61f19SKirill A. Shutemov * Page flags also must be visible before we make the page non-compound. 1928e9b61f19SKirill A. Shutemov */ 1929e9b61f19SKirill A. Shutemov smp_wmb(); 1930e9b61f19SKirill A. Shutemov 1931e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 1932e9b61f19SKirill A. Shutemov 1933e9b61f19SKirill A. Shutemov if (page_is_young(head)) 1934e9b61f19SKirill A. Shutemov set_page_young(page_tail); 1935e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 1936e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 1937e9b61f19SKirill A. Shutemov 1938e9b61f19SKirill A. Shutemov /* ->mapping in first tail page is compound_mapcount */ 19399a982250SKirill A. Shutemov VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 1940e9b61f19SKirill A. Shutemov page_tail); 1941e9b61f19SKirill A. Shutemov page_tail->mapping = head->mapping; 1942e9b61f19SKirill A. Shutemov 1943e9b61f19SKirill A. Shutemov page_tail->index = head->index + tail; 1944e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 1945e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 1946e9b61f19SKirill A. Shutemov } 1947e9b61f19SKirill A. Shutemov 1948baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 1949baa355fdSKirill A. Shutemov unsigned long flags) 1950e9b61f19SKirill A. Shutemov { 1951e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 1952e9b61f19SKirill A. Shutemov struct zone *zone = page_zone(head); 1953e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 1954baa355fdSKirill A. Shutemov pgoff_t end = -1; 19558df651c7SKirill A. Shutemov int i; 1956e9b61f19SKirill A. Shutemov 1957599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 1958e9b61f19SKirill A. Shutemov 1959e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 1960e9b61f19SKirill A. Shutemov mem_cgroup_split_huge_fixup(head); 1961e9b61f19SKirill A. Shutemov 1962baa355fdSKirill A. Shutemov if (!PageAnon(page)) 1963baa355fdSKirill A. Shutemov end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); 1964baa355fdSKirill A. Shutemov 1965baa355fdSKirill A. Shutemov for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 19668df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 1967baa355fdSKirill A. Shutemov /* Some pages can be beyond i_size: drop them from page cache */ 1968baa355fdSKirill A. Shutemov if (head[i].index >= end) { 1969baa355fdSKirill A. Shutemov __ClearPageDirty(head + i); 1970baa355fdSKirill A. Shutemov __delete_from_page_cache(head + i, NULL); 1971800d8c63SKirill A. Shutemov if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 1972800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 1973baa355fdSKirill A. Shutemov put_page(head + i); 1974baa355fdSKirill A. Shutemov } 1975baa355fdSKirill A. Shutemov } 1976e9b61f19SKirill A. Shutemov 1977e9b61f19SKirill A. Shutemov ClearPageCompound(head); 1978baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 1979baa355fdSKirill A. Shutemov if (PageAnon(head)) { 1980baa355fdSKirill A. Shutemov page_ref_inc(head); 1981baa355fdSKirill A. Shutemov } else { 1982baa355fdSKirill A. Shutemov /* Additional pin to radix tree */ 1983baa355fdSKirill A. Shutemov page_ref_add(head, 2); 1984baa355fdSKirill A. Shutemov spin_unlock(&head->mapping->tree_lock); 1985baa355fdSKirill A. Shutemov } 1986baa355fdSKirill A. Shutemov 1987a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 1988e9b61f19SKirill A. Shutemov 1989fec89c10SKirill A. Shutemov unfreeze_page(head); 1990e9b61f19SKirill A. Shutemov 1991e9b61f19SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 1992e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 1993e9b61f19SKirill A. Shutemov if (subpage == page) 1994e9b61f19SKirill A. Shutemov continue; 1995e9b61f19SKirill A. Shutemov unlock_page(subpage); 1996e9b61f19SKirill A. Shutemov 1997e9b61f19SKirill A. Shutemov /* 1998e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 1999e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 2000e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 2001e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 2002e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 2003e9b61f19SKirill A. Shutemov */ 2004e9b61f19SKirill A. Shutemov put_page(subpage); 2005e9b61f19SKirill A. Shutemov } 2006e9b61f19SKirill A. Shutemov } 2007e9b61f19SKirill A. Shutemov 2008b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page) 2009b20ce5e0SKirill A. Shutemov { 2010dd78feddSKirill A. Shutemov int i, compound, ret; 2011b20ce5e0SKirill A. Shutemov 2012b20ce5e0SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2013b20ce5e0SKirill A. Shutemov 2014b20ce5e0SKirill A. Shutemov if (likely(!PageCompound(page))) 2015b20ce5e0SKirill A. Shutemov return atomic_read(&page->_mapcount) + 1; 2016b20ce5e0SKirill A. Shutemov 2017dd78feddSKirill A. Shutemov compound = compound_mapcount(page); 2018b20ce5e0SKirill A. Shutemov if (PageHuge(page)) 2019dd78feddSKirill A. Shutemov return compound; 2020dd78feddSKirill A. Shutemov ret = compound; 2021b20ce5e0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 2022b20ce5e0SKirill A. Shutemov ret += atomic_read(&page[i]._mapcount) + 1; 2023dd78feddSKirill A. Shutemov /* File pages has compound_mapcount included in _mapcount */ 2024dd78feddSKirill A. Shutemov if (!PageAnon(page)) 2025dd78feddSKirill A. Shutemov return ret - compound * HPAGE_PMD_NR; 2026b20ce5e0SKirill A. Shutemov if (PageDoubleMap(page)) 2027b20ce5e0SKirill A. Shutemov ret -= HPAGE_PMD_NR; 2028b20ce5e0SKirill A. Shutemov return ret; 2029b20ce5e0SKirill A. Shutemov } 2030b20ce5e0SKirill A. Shutemov 2031e9b61f19SKirill A. Shutemov /* 20326d0a07edSAndrea Arcangeli * This calculates accurately how many mappings a transparent hugepage 20336d0a07edSAndrea Arcangeli * has (unlike page_mapcount() which isn't fully accurate). This full 20346d0a07edSAndrea Arcangeli * accuracy is primarily needed to know if copy-on-write faults can 20356d0a07edSAndrea Arcangeli * reuse the page and change the mapping to read-write instead of 20366d0a07edSAndrea Arcangeli * copying them. At the same time this returns the total_mapcount too. 20376d0a07edSAndrea Arcangeli * 20386d0a07edSAndrea Arcangeli * The function returns the highest mapcount any one of the subpages 20396d0a07edSAndrea Arcangeli * has. If the return value is one, even if different processes are 20406d0a07edSAndrea Arcangeli * mapping different subpages of the transparent hugepage, they can 20416d0a07edSAndrea Arcangeli * all reuse it, because each process is reusing a different subpage. 20426d0a07edSAndrea Arcangeli * 20436d0a07edSAndrea Arcangeli * The total_mapcount is instead counting all virtual mappings of the 20446d0a07edSAndrea Arcangeli * subpages. If the total_mapcount is equal to "one", it tells the 20456d0a07edSAndrea Arcangeli * caller all mappings belong to the same "mm" and in turn the 20466d0a07edSAndrea Arcangeli * anon_vma of the transparent hugepage can become the vma->anon_vma 20476d0a07edSAndrea Arcangeli * local one as no other process may be mapping any of the subpages. 20486d0a07edSAndrea Arcangeli * 20496d0a07edSAndrea Arcangeli * It would be more accurate to replace page_mapcount() with 20506d0a07edSAndrea Arcangeli * page_trans_huge_mapcount(), however we only use 20516d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() in the copy-on-write faults where we 20526d0a07edSAndrea Arcangeli * need full accuracy to avoid breaking page pinning, because 20536d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() is slower than page_mapcount(). 20546d0a07edSAndrea Arcangeli */ 20556d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 20566d0a07edSAndrea Arcangeli { 20576d0a07edSAndrea Arcangeli int i, ret, _total_mapcount, mapcount; 20586d0a07edSAndrea Arcangeli 20596d0a07edSAndrea Arcangeli /* hugetlbfs shouldn't call it */ 20606d0a07edSAndrea Arcangeli VM_BUG_ON_PAGE(PageHuge(page), page); 20616d0a07edSAndrea Arcangeli 20626d0a07edSAndrea Arcangeli if (likely(!PageTransCompound(page))) { 20636d0a07edSAndrea Arcangeli mapcount = atomic_read(&page->_mapcount) + 1; 20646d0a07edSAndrea Arcangeli if (total_mapcount) 20656d0a07edSAndrea Arcangeli *total_mapcount = mapcount; 20666d0a07edSAndrea Arcangeli return mapcount; 20676d0a07edSAndrea Arcangeli } 20686d0a07edSAndrea Arcangeli 20696d0a07edSAndrea Arcangeli page = compound_head(page); 20706d0a07edSAndrea Arcangeli 20716d0a07edSAndrea Arcangeli _total_mapcount = ret = 0; 20726d0a07edSAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 20736d0a07edSAndrea Arcangeli mapcount = atomic_read(&page[i]._mapcount) + 1; 20746d0a07edSAndrea Arcangeli ret = max(ret, mapcount); 20756d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 20766d0a07edSAndrea Arcangeli } 20776d0a07edSAndrea Arcangeli if (PageDoubleMap(page)) { 20786d0a07edSAndrea Arcangeli ret -= 1; 20796d0a07edSAndrea Arcangeli _total_mapcount -= HPAGE_PMD_NR; 20806d0a07edSAndrea Arcangeli } 20816d0a07edSAndrea Arcangeli mapcount = compound_mapcount(page); 20826d0a07edSAndrea Arcangeli ret += mapcount; 20836d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 20846d0a07edSAndrea Arcangeli if (total_mapcount) 20856d0a07edSAndrea Arcangeli *total_mapcount = _total_mapcount; 20866d0a07edSAndrea Arcangeli return ret; 20876d0a07edSAndrea Arcangeli } 20886d0a07edSAndrea Arcangeli 20896d0a07edSAndrea Arcangeli /* 2090e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 2091e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 2092e9b61f19SKirill A. Shutemov * 2093e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2094e9b61f19SKirill A. Shutemov * The huge page must be locked. 2095e9b61f19SKirill A. Shutemov * 2096e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2097e9b61f19SKirill A. Shutemov * 2098e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 2099e9b61f19SKirill A. Shutemov * the hugepage. 2100e9b61f19SKirill A. Shutemov * 2101e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2102e9b61f19SKirill A. Shutemov * they are not mapped. 2103e9b61f19SKirill A. Shutemov * 2104e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 2105e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2106e9b61f19SKirill A. Shutemov * us. 2107e9b61f19SKirill A. Shutemov */ 2108e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 2109e9b61f19SKirill A. Shutemov { 2110e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 2111a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2112baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 2113baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 2114baa355fdSKirill A. Shutemov int count, mapcount, extra_pins, ret; 2115d9654322SKirill A. Shutemov bool mlocked; 21160b9b6fffSKirill A. Shutemov unsigned long flags; 2117e9b61f19SKirill A. Shutemov 2118e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2119e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 2120e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2121e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageCompound(page), page); 2122e9b61f19SKirill A. Shutemov 2123baa355fdSKirill A. Shutemov if (PageAnon(head)) { 2124e9b61f19SKirill A. Shutemov /* 2125baa355fdSKirill A. Shutemov * The caller does not necessarily hold an mmap_sem that would 2126baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2127baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 2128baa355fdSKirill A. Shutemov * is similar to page_lock_anon_vma_read except the write lock 2129baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2130baa355fdSKirill A. Shutemov * operations. 2131e9b61f19SKirill A. Shutemov */ 2132e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2133e9b61f19SKirill A. Shutemov if (!anon_vma) { 2134e9b61f19SKirill A. Shutemov ret = -EBUSY; 2135e9b61f19SKirill A. Shutemov goto out; 2136e9b61f19SKirill A. Shutemov } 2137baa355fdSKirill A. Shutemov extra_pins = 0; 2138baa355fdSKirill A. Shutemov mapping = NULL; 2139e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2140baa355fdSKirill A. Shutemov } else { 2141baa355fdSKirill A. Shutemov mapping = head->mapping; 2142baa355fdSKirill A. Shutemov 2143baa355fdSKirill A. Shutemov /* Truncated ? */ 2144baa355fdSKirill A. Shutemov if (!mapping) { 2145baa355fdSKirill A. Shutemov ret = -EBUSY; 2146baa355fdSKirill A. Shutemov goto out; 2147baa355fdSKirill A. Shutemov } 2148baa355fdSKirill A. Shutemov 2149baa355fdSKirill A. Shutemov /* Addidional pins from radix tree */ 2150baa355fdSKirill A. Shutemov extra_pins = HPAGE_PMD_NR; 2151baa355fdSKirill A. Shutemov anon_vma = NULL; 2152baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2153baa355fdSKirill A. Shutemov } 2154e9b61f19SKirill A. Shutemov 2155e9b61f19SKirill A. Shutemov /* 2156e9b61f19SKirill A. Shutemov * Racy check if we can split the page, before freeze_page() will 2157e9b61f19SKirill A. Shutemov * split PMDs 2158e9b61f19SKirill A. Shutemov */ 2159baa355fdSKirill A. Shutemov if (total_mapcount(head) != page_count(head) - extra_pins - 1) { 2160e9b61f19SKirill A. Shutemov ret = -EBUSY; 2161e9b61f19SKirill A. Shutemov goto out_unlock; 2162e9b61f19SKirill A. Shutemov } 2163e9b61f19SKirill A. Shutemov 2164d9654322SKirill A. Shutemov mlocked = PageMlocked(page); 2165fec89c10SKirill A. Shutemov freeze_page(head); 2166e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(compound_mapcount(head), head); 2167e9b61f19SKirill A. Shutemov 2168d9654322SKirill A. Shutemov /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2169d9654322SKirill A. Shutemov if (mlocked) 2170d9654322SKirill A. Shutemov lru_add_drain(); 2171d9654322SKirill A. Shutemov 2172baa355fdSKirill A. Shutemov /* prevent PageLRU to go away from under us, and freeze lru stats */ 2173a52633d8SMel Gorman spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2174baa355fdSKirill A. Shutemov 2175baa355fdSKirill A. Shutemov if (mapping) { 2176baa355fdSKirill A. Shutemov void **pslot; 2177baa355fdSKirill A. Shutemov 2178baa355fdSKirill A. Shutemov spin_lock(&mapping->tree_lock); 2179baa355fdSKirill A. Shutemov pslot = radix_tree_lookup_slot(&mapping->page_tree, 2180baa355fdSKirill A. Shutemov page_index(head)); 2181baa355fdSKirill A. Shutemov /* 2182baa355fdSKirill A. Shutemov * Check if the head page is present in radix tree. 2183baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2184baa355fdSKirill A. Shutemov */ 2185baa355fdSKirill A. Shutemov if (radix_tree_deref_slot_protected(pslot, 2186baa355fdSKirill A. Shutemov &mapping->tree_lock) != head) 2187baa355fdSKirill A. Shutemov goto fail; 2188baa355fdSKirill A. Shutemov } 2189baa355fdSKirill A. Shutemov 21900139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2191baa355fdSKirill A. Shutemov spin_lock(&pgdata->split_queue_lock); 2192e9b61f19SKirill A. Shutemov count = page_count(head); 2193e9b61f19SKirill A. Shutemov mapcount = total_mapcount(head); 2194baa355fdSKirill A. Shutemov if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 21959a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2196a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 21979a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 21989a982250SKirill A. Shutemov } 219965c45377SKirill A. Shutemov if (mapping) 220011fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM_THPS); 2201baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2202baa355fdSKirill A. Shutemov __split_huge_page(page, list, flags); 2203e9b61f19SKirill A. Shutemov ret = 0; 2204baa355fdSKirill A. Shutemov } else { 2205baa355fdSKirill A. Shutemov if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2206e9b61f19SKirill A. Shutemov pr_alert("total_mapcount: %u, page_count(): %u\n", 2207e9b61f19SKirill A. Shutemov mapcount, count); 2208e9b61f19SKirill A. Shutemov if (PageTail(page)) 2209e9b61f19SKirill A. Shutemov dump_page(head, NULL); 2210bd56086fSKirill A. Shutemov dump_page(page, "total_mapcount(head) > 0"); 2211e9b61f19SKirill A. Shutemov BUG(); 2212baa355fdSKirill A. Shutemov } 2213baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2214baa355fdSKirill A. Shutemov fail: if (mapping) 2215baa355fdSKirill A. Shutemov spin_unlock(&mapping->tree_lock); 2216a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2217fec89c10SKirill A. Shutemov unfreeze_page(head); 2218e9b61f19SKirill A. Shutemov ret = -EBUSY; 2219e9b61f19SKirill A. Shutemov } 2220e9b61f19SKirill A. Shutemov 2221e9b61f19SKirill A. Shutemov out_unlock: 2222baa355fdSKirill A. Shutemov if (anon_vma) { 2223e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2224e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2225baa355fdSKirill A. Shutemov } 2226baa355fdSKirill A. Shutemov if (mapping) 2227baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2228e9b61f19SKirill A. Shutemov out: 2229e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2230e9b61f19SKirill A. Shutemov return ret; 2231e9b61f19SKirill A. Shutemov } 22329a982250SKirill A. Shutemov 22339a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 22349a982250SKirill A. Shutemov { 2235a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 22369a982250SKirill A. Shutemov unsigned long flags; 22379a982250SKirill A. Shutemov 2238a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 22399a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2240a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 22419a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 22429a982250SKirill A. Shutemov } 2243a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 22449a982250SKirill A. Shutemov free_compound_page(page); 22459a982250SKirill A. Shutemov } 22469a982250SKirill A. Shutemov 22479a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 22489a982250SKirill A. Shutemov { 2249a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 22509a982250SKirill A. Shutemov unsigned long flags; 22519a982250SKirill A. Shutemov 22529a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 22539a982250SKirill A. Shutemov 2254a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 22559a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2256f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2257a3d0a918SKirill A. Shutemov list_add_tail(page_deferred_list(page), &pgdata->split_queue); 2258a3d0a918SKirill A. Shutemov pgdata->split_queue_len++; 22599a982250SKirill A. Shutemov } 2260a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 22619a982250SKirill A. Shutemov } 22629a982250SKirill A. Shutemov 22639a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 22649a982250SKirill A. Shutemov struct shrink_control *sc) 22659a982250SKirill A. Shutemov { 2266a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2267cb8d68ecSKirill A. Shutemov return ACCESS_ONCE(pgdata->split_queue_len); 22689a982250SKirill A. Shutemov } 22699a982250SKirill A. Shutemov 22709a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 22719a982250SKirill A. Shutemov struct shrink_control *sc) 22729a982250SKirill A. Shutemov { 2273a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 22749a982250SKirill A. Shutemov unsigned long flags; 22759a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 22769a982250SKirill A. Shutemov struct page *page; 22779a982250SKirill A. Shutemov int split = 0; 22789a982250SKirill A. Shutemov 2279a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 22809a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2281ae026204SKirill A. Shutemov list_for_each_safe(pos, next, &pgdata->split_queue) { 22829a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 22839a982250SKirill A. Shutemov page = compound_head(page); 2284e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2285e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2286e3ae1953SKirill A. Shutemov } else { 2287e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 22889a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2289a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 22909a982250SKirill A. Shutemov } 2291e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2292e3ae1953SKirill A. Shutemov break; 22939a982250SKirill A. Shutemov } 2294a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 22959a982250SKirill A. Shutemov 22969a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 22979a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 22989a982250SKirill A. Shutemov lock_page(page); 22999a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 23009a982250SKirill A. Shutemov if (!split_huge_page(page)) 23019a982250SKirill A. Shutemov split++; 23029a982250SKirill A. Shutemov unlock_page(page); 23039a982250SKirill A. Shutemov put_page(page); 23049a982250SKirill A. Shutemov } 23059a982250SKirill A. Shutemov 2306a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2307a3d0a918SKirill A. Shutemov list_splice_tail(&list, &pgdata->split_queue); 2308a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 23099a982250SKirill A. Shutemov 2310cb8d68ecSKirill A. Shutemov /* 2311cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2312cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2313cb8d68ecSKirill A. Shutemov */ 2314cb8d68ecSKirill A. Shutemov if (!split && list_empty(&pgdata->split_queue)) 2315cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2316cb8d68ecSKirill A. Shutemov return split; 23179a982250SKirill A. Shutemov } 23189a982250SKirill A. Shutemov 23199a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 23209a982250SKirill A. Shutemov .count_objects = deferred_split_count, 23219a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 23229a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 2323a3d0a918SKirill A. Shutemov .flags = SHRINKER_NUMA_AWARE, 23249a982250SKirill A. Shutemov }; 232549071d43SKirill A. Shutemov 232649071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 232749071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val) 232849071d43SKirill A. Shutemov { 232949071d43SKirill A. Shutemov struct zone *zone; 233049071d43SKirill A. Shutemov struct page *page; 233149071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 233249071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 233349071d43SKirill A. Shutemov 233449071d43SKirill A. Shutemov if (val != 1) 233549071d43SKirill A. Shutemov return -EINVAL; 233649071d43SKirill A. Shutemov 233749071d43SKirill A. Shutemov for_each_populated_zone(zone) { 233849071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 233949071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 234049071d43SKirill A. Shutemov if (!pfn_valid(pfn)) 234149071d43SKirill A. Shutemov continue; 234249071d43SKirill A. Shutemov 234349071d43SKirill A. Shutemov page = pfn_to_page(pfn); 234449071d43SKirill A. Shutemov if (!get_page_unless_zero(page)) 234549071d43SKirill A. Shutemov continue; 234649071d43SKirill A. Shutemov 234749071d43SKirill A. Shutemov if (zone != page_zone(page)) 234849071d43SKirill A. Shutemov goto next; 234949071d43SKirill A. Shutemov 2350baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 235149071d43SKirill A. Shutemov goto next; 235249071d43SKirill A. Shutemov 235349071d43SKirill A. Shutemov total++; 235449071d43SKirill A. Shutemov lock_page(page); 235549071d43SKirill A. Shutemov if (!split_huge_page(page)) 235649071d43SKirill A. Shutemov split++; 235749071d43SKirill A. Shutemov unlock_page(page); 235849071d43SKirill A. Shutemov next: 235949071d43SKirill A. Shutemov put_page(page); 236049071d43SKirill A. Shutemov } 236149071d43SKirill A. Shutemov } 236249071d43SKirill A. Shutemov 2363145bdaa1SYang Shi pr_info("%lu of %lu THP split\n", split, total); 236449071d43SKirill A. Shutemov 236549071d43SKirill A. Shutemov return 0; 236649071d43SKirill A. Shutemov } 236749071d43SKirill A. Shutemov DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 236849071d43SKirill A. Shutemov "%llu\n"); 236949071d43SKirill A. Shutemov 237049071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 237149071d43SKirill A. Shutemov { 237249071d43SKirill A. Shutemov void *ret; 237349071d43SKirill A. Shutemov 2374145bdaa1SYang Shi ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 237549071d43SKirill A. Shutemov &split_huge_pages_fops); 237649071d43SKirill A. Shutemov if (!ret) 237749071d43SKirill A. Shutemov pr_warn("Failed to create split_huge_pages in debugfs"); 237849071d43SKirill A. Shutemov return 0; 237949071d43SKirill A. Shutemov } 238049071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 238149071d43SKirill A. Shutemov #endif 2382