171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9ae3a8c1cSAndrew Morton 1071e3aac0SAndrea Arcangeli #include <linux/mm.h> 1171e3aac0SAndrea Arcangeli #include <linux/sched.h> 1271e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1571e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1671e3aac0SAndrea Arcangeli #include <linux/swap.h> 1797ae1749SKirill A. Shutemov #include <linux/shrinker.h> 18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 19e9b61f19SKirill A. Shutemov #include <linux/swapops.h> 204897c765SMatthew Wilcox #include <linux/dax.h> 21ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 22878aee7dSAndrea Arcangeli #include <linux/freezer.h> 23f25748e3SDan Williams #include <linux/pfn_t.h> 24a664b2d8SAndrea Arcangeli #include <linux/mman.h> 253565fce3SDan Williams #include <linux/memremap.h> 26325adeb5SRalf Baechle #include <linux/pagemap.h> 2749071d43SKirill A. Shutemov #include <linux/debugfs.h> 284daae3b4SMel Gorman #include <linux/migrate.h> 2943b5fbbdSSasha Levin #include <linux/hashtable.h> 306b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 3133c3fc71SVladimir Davydov #include <linux/page_idle.h> 32baa355fdSKirill A. Shutemov #include <linux/shmem_fs.h> 3397ae1749SKirill A. Shutemov 3471e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3571e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 3671e3aac0SAndrea Arcangeli #include "internal.h" 3771e3aac0SAndrea Arcangeli 38ba76149fSAndrea Arcangeli /* 398bfa3f9aSJianguo Wu * By default transparent hugepage support is disabled in order that avoid 408bfa3f9aSJianguo Wu * to risk increase the memory footprint of applications without a guaranteed 418bfa3f9aSJianguo Wu * benefit. When transparent hugepage support is enabled, is for all mappings, 428bfa3f9aSJianguo Wu * and khugepaged scans all mappings. 438bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 448bfa3f9aSJianguo Wu * for all hugepage allocations. 45ba76149fSAndrea Arcangeli */ 4671e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 4713ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 48ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 4913ece886SAndrea Arcangeli #endif 5013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 5113ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 5213ece886SAndrea Arcangeli #endif 53444eb2a4SMel Gorman (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 5479da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5579da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 56ba76149fSAndrea Arcangeli 579a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker; 58f000565aSAndrea Arcangeli 5997ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 6056873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 614a6c1297SKirill A. Shutemov 62fc437044SMatthew Wilcox struct page *get_huge_zero_page(void) 6397ae1749SKirill A. Shutemov { 6497ae1749SKirill A. Shutemov struct page *zero_page; 6597ae1749SKirill A. Shutemov retry: 6697ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 674db0c3c2SJason Low return READ_ONCE(huge_zero_page); 6897ae1749SKirill A. Shutemov 6997ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 7097ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 71d8a8e1f0SKirill A. Shutemov if (!zero_page) { 72d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 735918d10aSKirill A. Shutemov return NULL; 74d8a8e1f0SKirill A. Shutemov } 75d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 7697ae1749SKirill A. Shutemov preempt_disable(); 775918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 7897ae1749SKirill A. Shutemov preempt_enable(); 795ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 8097ae1749SKirill A. Shutemov goto retry; 8197ae1749SKirill A. Shutemov } 8297ae1749SKirill A. Shutemov 8397ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 8497ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 8597ae1749SKirill A. Shutemov preempt_enable(); 864db0c3c2SJason Low return READ_ONCE(huge_zero_page); 8797ae1749SKirill A. Shutemov } 8897ae1749SKirill A. Shutemov 89aa88b68cSKirill A. Shutemov void put_huge_zero_page(void) 9097ae1749SKirill A. Shutemov { 9197ae1749SKirill A. Shutemov /* 9297ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 9397ae1749SKirill A. Shutemov * last reference. 9497ae1749SKirill A. Shutemov */ 9597ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 9697ae1749SKirill A. Shutemov } 9797ae1749SKirill A. Shutemov 9848896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 9997ae1749SKirill A. Shutemov struct shrink_control *sc) 10097ae1749SKirill A. Shutemov { 10197ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 10297ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 10348896466SGlauber Costa } 10497ae1749SKirill A. Shutemov 10548896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 10648896466SGlauber Costa struct shrink_control *sc) 10748896466SGlauber Costa { 10897ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 1095918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 1105918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 1115ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 11248896466SGlauber Costa return HPAGE_PMD_NR; 11397ae1749SKirill A. Shutemov } 11497ae1749SKirill A. Shutemov 11597ae1749SKirill A. Shutemov return 0; 11697ae1749SKirill A. Shutemov } 11797ae1749SKirill A. Shutemov 11897ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 11948896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 12048896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 12197ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 12297ae1749SKirill A. Shutemov }; 12397ae1749SKirill A. Shutemov 12471e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 125ba76149fSAndrea Arcangeli 126444eb2a4SMel Gorman static ssize_t triple_flag_store(struct kobject *kobj, 12771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 12871e3aac0SAndrea Arcangeli const char *buf, size_t count, 12971e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 130444eb2a4SMel Gorman enum transparent_hugepage_flag deferred, 13171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 13271e3aac0SAndrea Arcangeli { 133444eb2a4SMel Gorman if (!memcmp("defer", buf, 134444eb2a4SMel Gorman min(sizeof("defer")-1, count))) { 135444eb2a4SMel Gorman if (enabled == deferred) 136444eb2a4SMel Gorman return -EINVAL; 137444eb2a4SMel Gorman clear_bit(enabled, &transparent_hugepage_flags); 13871e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 139444eb2a4SMel Gorman set_bit(deferred, &transparent_hugepage_flags); 140444eb2a4SMel Gorman } else if (!memcmp("always", buf, 141444eb2a4SMel Gorman min(sizeof("always")-1, count))) { 142444eb2a4SMel Gorman clear_bit(deferred, &transparent_hugepage_flags); 143444eb2a4SMel Gorman clear_bit(req_madv, &transparent_hugepage_flags); 144444eb2a4SMel Gorman set_bit(enabled, &transparent_hugepage_flags); 14571e3aac0SAndrea Arcangeli } else if (!memcmp("madvise", buf, 14671e3aac0SAndrea Arcangeli min(sizeof("madvise")-1, count))) { 14771e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 148444eb2a4SMel Gorman clear_bit(deferred, &transparent_hugepage_flags); 14971e3aac0SAndrea Arcangeli set_bit(req_madv, &transparent_hugepage_flags); 15071e3aac0SAndrea Arcangeli } else if (!memcmp("never", buf, 15171e3aac0SAndrea Arcangeli min(sizeof("never")-1, count))) { 15271e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 15371e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 154444eb2a4SMel Gorman clear_bit(deferred, &transparent_hugepage_flags); 15571e3aac0SAndrea Arcangeli } else 15671e3aac0SAndrea Arcangeli return -EINVAL; 15771e3aac0SAndrea Arcangeli 15871e3aac0SAndrea Arcangeli return count; 15971e3aac0SAndrea Arcangeli } 16071e3aac0SAndrea Arcangeli 16171e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 16271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 16371e3aac0SAndrea Arcangeli { 164444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 165444eb2a4SMel Gorman return sprintf(buf, "[always] madvise never\n"); 166444eb2a4SMel Gorman else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 167444eb2a4SMel Gorman return sprintf(buf, "always [madvise] never\n"); 168444eb2a4SMel Gorman else 169444eb2a4SMel Gorman return sprintf(buf, "always madvise [never]\n"); 17071e3aac0SAndrea Arcangeli } 171444eb2a4SMel Gorman 17271e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 17371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 17471e3aac0SAndrea Arcangeli const char *buf, size_t count) 17571e3aac0SAndrea Arcangeli { 176ba76149fSAndrea Arcangeli ssize_t ret; 177ba76149fSAndrea Arcangeli 178444eb2a4SMel Gorman ret = triple_flag_store(kobj, attr, buf, count, 179444eb2a4SMel Gorman TRANSPARENT_HUGEPAGE_FLAG, 18071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 18171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 182ba76149fSAndrea Arcangeli 183ba76149fSAndrea Arcangeli if (ret > 0) { 184b46e756fSKirill A. Shutemov int err = start_stop_khugepaged(); 185ba76149fSAndrea Arcangeli if (err) 186ba76149fSAndrea Arcangeli ret = err; 187ba76149fSAndrea Arcangeli } 188ba76149fSAndrea Arcangeli 189ba76149fSAndrea Arcangeli return ret; 19071e3aac0SAndrea Arcangeli } 19171e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 19271e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 19371e3aac0SAndrea Arcangeli 194b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_show(struct kobject *kobj, 19571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 19671e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 19771e3aac0SAndrea Arcangeli { 198e27e6151SBen Hutchings return sprintf(buf, "%d\n", 199e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 20071e3aac0SAndrea Arcangeli } 201e27e6151SBen Hutchings 202b46e756fSKirill A. Shutemov ssize_t single_hugepage_flag_store(struct kobject *kobj, 20371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 20471e3aac0SAndrea Arcangeli const char *buf, size_t count, 20571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 20671e3aac0SAndrea Arcangeli { 207e27e6151SBen Hutchings unsigned long value; 208e27e6151SBen Hutchings int ret; 209e27e6151SBen Hutchings 210e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 211e27e6151SBen Hutchings if (ret < 0) 212e27e6151SBen Hutchings return ret; 213e27e6151SBen Hutchings if (value > 1) 21471e3aac0SAndrea Arcangeli return -EINVAL; 21571e3aac0SAndrea Arcangeli 216e27e6151SBen Hutchings if (value) 217e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 218e27e6151SBen Hutchings else 219e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 220e27e6151SBen Hutchings 22171e3aac0SAndrea Arcangeli return count; 22271e3aac0SAndrea Arcangeli } 22371e3aac0SAndrea Arcangeli 22471e3aac0SAndrea Arcangeli /* 22571e3aac0SAndrea Arcangeli * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 22671e3aac0SAndrea Arcangeli * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 22771e3aac0SAndrea Arcangeli * memory just to allocate one more hugepage. 22871e3aac0SAndrea Arcangeli */ 22971e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 23071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 23171e3aac0SAndrea Arcangeli { 232444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 233444eb2a4SMel Gorman return sprintf(buf, "[always] defer madvise never\n"); 234444eb2a4SMel Gorman if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 235444eb2a4SMel Gorman return sprintf(buf, "always [defer] madvise never\n"); 236444eb2a4SMel Gorman else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 237444eb2a4SMel Gorman return sprintf(buf, "always defer [madvise] never\n"); 238444eb2a4SMel Gorman else 239444eb2a4SMel Gorman return sprintf(buf, "always defer madvise [never]\n"); 240444eb2a4SMel Gorman 24171e3aac0SAndrea Arcangeli } 24271e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 24371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 24471e3aac0SAndrea Arcangeli const char *buf, size_t count) 24571e3aac0SAndrea Arcangeli { 246444eb2a4SMel Gorman return triple_flag_store(kobj, attr, buf, count, 247444eb2a4SMel Gorman TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 248444eb2a4SMel Gorman TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 24971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 25071e3aac0SAndrea Arcangeli } 25171e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 25271e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 25371e3aac0SAndrea Arcangeli 25479da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 25579da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 25679da5407SKirill A. Shutemov { 257b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 25879da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 25979da5407SKirill A. Shutemov } 26079da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 26179da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 26279da5407SKirill A. Shutemov { 263b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 26479da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 26579da5407SKirill A. Shutemov } 26679da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 26779da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 26871e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 26971e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 27071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 27171e3aac0SAndrea Arcangeli { 272b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 27371e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 27471e3aac0SAndrea Arcangeli } 27571e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 27671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 27771e3aac0SAndrea Arcangeli const char *buf, size_t count) 27871e3aac0SAndrea Arcangeli { 279b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 28071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 28171e3aac0SAndrea Arcangeli } 28271e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 28371e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 28471e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 28571e3aac0SAndrea Arcangeli 28671e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 28771e3aac0SAndrea Arcangeli &enabled_attr.attr, 28871e3aac0SAndrea Arcangeli &defrag_attr.attr, 28979da5407SKirill A. Shutemov &use_zero_page_attr.attr, 290e496cf3dSKirill A. Shutemov #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 2915a6e75f8SKirill A. Shutemov &shmem_enabled_attr.attr, 2925a6e75f8SKirill A. Shutemov #endif 29371e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 29471e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 29571e3aac0SAndrea Arcangeli #endif 29671e3aac0SAndrea Arcangeli NULL, 29771e3aac0SAndrea Arcangeli }; 29871e3aac0SAndrea Arcangeli 29971e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 30071e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 301ba76149fSAndrea Arcangeli }; 302ba76149fSAndrea Arcangeli 303569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 304569e5590SShaohua Li { 305569e5590SShaohua Li int err; 306569e5590SShaohua Li 307569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 308569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 309ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 310569e5590SShaohua Li return -ENOMEM; 311569e5590SShaohua Li } 312569e5590SShaohua Li 313569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 314569e5590SShaohua Li if (err) { 315ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 316569e5590SShaohua Li goto delete_obj; 317569e5590SShaohua Li } 318569e5590SShaohua Li 319569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 320569e5590SShaohua Li if (err) { 321ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 322569e5590SShaohua Li goto remove_hp_group; 323569e5590SShaohua Li } 324569e5590SShaohua Li 325569e5590SShaohua Li return 0; 326569e5590SShaohua Li 327569e5590SShaohua Li remove_hp_group: 328569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 329569e5590SShaohua Li delete_obj: 330569e5590SShaohua Li kobject_put(*hugepage_kobj); 331569e5590SShaohua Li return err; 332569e5590SShaohua Li } 333569e5590SShaohua Li 334569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 335569e5590SShaohua Li { 336569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 337569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 338569e5590SShaohua Li kobject_put(hugepage_kobj); 339569e5590SShaohua Li } 340569e5590SShaohua Li #else 341569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 342569e5590SShaohua Li { 343569e5590SShaohua Li return 0; 344569e5590SShaohua Li } 345569e5590SShaohua Li 346569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 347569e5590SShaohua Li { 348569e5590SShaohua Li } 34971e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 35071e3aac0SAndrea Arcangeli 35171e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 35271e3aac0SAndrea Arcangeli { 35371e3aac0SAndrea Arcangeli int err; 354569e5590SShaohua Li struct kobject *hugepage_kobj; 35571e3aac0SAndrea Arcangeli 3564b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 3574b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 358569e5590SShaohua Li return -EINVAL; 3594b7167b9SAndrea Arcangeli } 3604b7167b9SAndrea Arcangeli 361ff20c2e0SKirill A. Shutemov /* 362ff20c2e0SKirill A. Shutemov * hugepages can't be allocated by the buddy allocator 363ff20c2e0SKirill A. Shutemov */ 364ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 365ff20c2e0SKirill A. Shutemov /* 366ff20c2e0SKirill A. Shutemov * we use page->mapping and page->index in second tail page 367ff20c2e0SKirill A. Shutemov * as list_head: assuming THP order >= 2 368ff20c2e0SKirill A. Shutemov */ 369ff20c2e0SKirill A. Shutemov MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 370ff20c2e0SKirill A. Shutemov 371569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 372569e5590SShaohua Li if (err) 37365ebb64fSKirill A. Shutemov goto err_sysfs; 374ba76149fSAndrea Arcangeli 375b46e756fSKirill A. Shutemov err = khugepaged_init(); 376ba76149fSAndrea Arcangeli if (err) 37765ebb64fSKirill A. Shutemov goto err_slab; 378ba76149fSAndrea Arcangeli 37965ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 38065ebb64fSKirill A. Shutemov if (err) 38165ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 3829a982250SKirill A. Shutemov err = register_shrinker(&deferred_split_shrinker); 3839a982250SKirill A. Shutemov if (err) 3849a982250SKirill A. Shutemov goto err_split_shrinker; 38597ae1749SKirill A. Shutemov 38697562cd2SRik van Riel /* 38797562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 38897562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 38997562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 39097562cd2SRik van Riel */ 39179553da2SKirill A. Shutemov if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 39297562cd2SRik van Riel transparent_hugepage_flags = 0; 39379553da2SKirill A. Shutemov return 0; 39479553da2SKirill A. Shutemov } 39597562cd2SRik van Riel 39679553da2SKirill A. Shutemov err = start_stop_khugepaged(); 39765ebb64fSKirill A. Shutemov if (err) 39865ebb64fSKirill A. Shutemov goto err_khugepaged; 399ba76149fSAndrea Arcangeli 400569e5590SShaohua Li return 0; 40165ebb64fSKirill A. Shutemov err_khugepaged: 4029a982250SKirill A. Shutemov unregister_shrinker(&deferred_split_shrinker); 4039a982250SKirill A. Shutemov err_split_shrinker: 40465ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 40565ebb64fSKirill A. Shutemov err_hzp_shrinker: 406b46e756fSKirill A. Shutemov khugepaged_destroy(); 40765ebb64fSKirill A. Shutemov err_slab: 408569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 40965ebb64fSKirill A. Shutemov err_sysfs: 410ba76149fSAndrea Arcangeli return err; 41171e3aac0SAndrea Arcangeli } 412a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 41371e3aac0SAndrea Arcangeli 41471e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 41571e3aac0SAndrea Arcangeli { 41671e3aac0SAndrea Arcangeli int ret = 0; 41771e3aac0SAndrea Arcangeli if (!str) 41871e3aac0SAndrea Arcangeli goto out; 41971e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 42071e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 42171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 42271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 42371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 42471e3aac0SAndrea Arcangeli ret = 1; 42571e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 42671e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 42771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 42871e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 42971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 43071e3aac0SAndrea Arcangeli ret = 1; 43171e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 43271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 43371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 43471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 43571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 43671e3aac0SAndrea Arcangeli ret = 1; 43771e3aac0SAndrea Arcangeli } 43871e3aac0SAndrea Arcangeli out: 43971e3aac0SAndrea Arcangeli if (!ret) 440ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 44171e3aac0SAndrea Arcangeli return ret; 44271e3aac0SAndrea Arcangeli } 44371e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 44471e3aac0SAndrea Arcangeli 445b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 44671e3aac0SAndrea Arcangeli { 44771e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 44871e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 44971e3aac0SAndrea Arcangeli return pmd; 45071e3aac0SAndrea Arcangeli } 45171e3aac0SAndrea Arcangeli 4529a982250SKirill A. Shutemov static inline struct list_head *page_deferred_list(struct page *page) 4539a982250SKirill A. Shutemov { 4549a982250SKirill A. Shutemov /* 4559a982250SKirill A. Shutemov * ->lru in the tail pages is occupied by compound_head. 4569a982250SKirill A. Shutemov * Let's use ->mapping + ->index in the second tail page as list_head. 4579a982250SKirill A. Shutemov */ 4589a982250SKirill A. Shutemov return (struct list_head *)&page[2].mapping; 4599a982250SKirill A. Shutemov } 4609a982250SKirill A. Shutemov 4619a982250SKirill A. Shutemov void prep_transhuge_page(struct page *page) 4629a982250SKirill A. Shutemov { 4639a982250SKirill A. Shutemov /* 4649a982250SKirill A. Shutemov * we use page->mapping and page->indexlru in second tail page 4659a982250SKirill A. Shutemov * as list_head: assuming THP order >= 2 4669a982250SKirill A. Shutemov */ 4679a982250SKirill A. Shutemov 4689a982250SKirill A. Shutemov INIT_LIST_HEAD(page_deferred_list(page)); 4699a982250SKirill A. Shutemov set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 4709a982250SKirill A. Shutemov } 4719a982250SKirill A. Shutemov 472bae473a4SKirill A. Shutemov static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, 473bae473a4SKirill A. Shutemov gfp_t gfp) 47471e3aac0SAndrea Arcangeli { 475bae473a4SKirill A. Shutemov struct vm_area_struct *vma = fe->vma; 47600501b53SJohannes Weiner struct mem_cgroup *memcg; 47771e3aac0SAndrea Arcangeli pgtable_t pgtable; 478bae473a4SKirill A. Shutemov unsigned long haddr = fe->address & HPAGE_PMD_MASK; 47971e3aac0SAndrea Arcangeli 480309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 48100501b53SJohannes Weiner 482bae473a4SKirill A. Shutemov if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { 4836b251fc9SAndrea Arcangeli put_page(page); 4846b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 4856b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 4866b251fc9SAndrea Arcangeli } 48771e3aac0SAndrea Arcangeli 488bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 48900501b53SJohannes Weiner if (unlikely(!pgtable)) { 490f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 4916b251fc9SAndrea Arcangeli put_page(page); 49200501b53SJohannes Weiner return VM_FAULT_OOM; 49300501b53SJohannes Weiner } 49400501b53SJohannes Weiner 49571e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 49652f37629SMinchan Kim /* 49752f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 49852f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 49952f37629SMinchan Kim * write. 50052f37629SMinchan Kim */ 50171e3aac0SAndrea Arcangeli __SetPageUptodate(page); 50271e3aac0SAndrea Arcangeli 503bae473a4SKirill A. Shutemov fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 504bae473a4SKirill A. Shutemov if (unlikely(!pmd_none(*fe->pmd))) { 505bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 506f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 50771e3aac0SAndrea Arcangeli put_page(page); 508bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 50971e3aac0SAndrea Arcangeli } else { 51071e3aac0SAndrea Arcangeli pmd_t entry; 5116b251fc9SAndrea Arcangeli 5126b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 5136b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 5146b251fc9SAndrea Arcangeli int ret; 5156b251fc9SAndrea Arcangeli 516bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 517f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, true); 5186b251fc9SAndrea Arcangeli put_page(page); 519bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 520bae473a4SKirill A. Shutemov ret = handle_userfault(fe, VM_UFFD_MISSING); 5216b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 5226b251fc9SAndrea Arcangeli return ret; 5236b251fc9SAndrea Arcangeli } 5246b251fc9SAndrea Arcangeli 5253122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 5263122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 527d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr, true); 528f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, true); 52900501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 530bae473a4SKirill A. Shutemov pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); 531bae473a4SKirill A. Shutemov set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); 532bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 533bae473a4SKirill A. Shutemov atomic_long_inc(&vma->vm_mm->nr_ptes); 534bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 5356b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 53671e3aac0SAndrea Arcangeli } 53771e3aac0SAndrea Arcangeli 538aa2e878eSDavid Rientjes return 0; 53971e3aac0SAndrea Arcangeli } 54071e3aac0SAndrea Arcangeli 541444eb2a4SMel Gorman /* 54225160354SVlastimil Babka * If THP defrag is set to always then directly reclaim/compact as necessary 54325160354SVlastimil Babka * If set to defer then do only background reclaim/compact and defer to khugepaged 544444eb2a4SMel Gorman * If set to madvise and the VMA is flagged then directly reclaim/compact 54525160354SVlastimil Babka * When direct reclaim/compact is allowed, don't retry except for flagged VMA's 546444eb2a4SMel Gorman */ 547444eb2a4SMel Gorman static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 5480bbbc0b3SAndrea Arcangeli { 54925160354SVlastimil Babka bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 550444eb2a4SMel Gorman 55125160354SVlastimil Babka if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 55225160354SVlastimil Babka &transparent_hugepage_flags) && vma_madvised) 55325160354SVlastimil Babka return GFP_TRANSHUGE; 55425160354SVlastimil Babka else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 55525160354SVlastimil Babka &transparent_hugepage_flags)) 55625160354SVlastimil Babka return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 55725160354SVlastimil Babka else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 55825160354SVlastimil Babka &transparent_hugepage_flags)) 55925160354SVlastimil Babka return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 560444eb2a4SMel Gorman 56125160354SVlastimil Babka return GFP_TRANSHUGE_LIGHT; 562444eb2a4SMel Gorman } 563444eb2a4SMel Gorman 564c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 565d295e341SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 56697ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 5675918d10aSKirill A. Shutemov struct page *zero_page) 568fc9fe822SKirill A. Shutemov { 569fc9fe822SKirill A. Shutemov pmd_t entry; 5707c414164SAndrew Morton if (!pmd_none(*pmd)) 5717c414164SAndrew Morton return false; 5725918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 573fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 57412c9d70bSMatthew Wilcox if (pgtable) 5756b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 576fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 577e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 5787c414164SAndrew Morton return true; 579fc9fe822SKirill A. Shutemov } 580fc9fe822SKirill A. Shutemov 581bae473a4SKirill A. Shutemov int do_huge_pmd_anonymous_page(struct fault_env *fe) 58271e3aac0SAndrea Arcangeli { 583bae473a4SKirill A. Shutemov struct vm_area_struct *vma = fe->vma; 584077fcf11SAneesh Kumar K.V gfp_t gfp; 58571e3aac0SAndrea Arcangeli struct page *page; 586bae473a4SKirill A. Shutemov unsigned long haddr = fe->address & HPAGE_PMD_MASK; 58771e3aac0SAndrea Arcangeli 588128ec037SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 589c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 59071e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 59171e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 5926d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 593ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 594bae473a4SKirill A. Shutemov if (!(fe->flags & FAULT_FLAG_WRITE) && 595bae473a4SKirill A. Shutemov !mm_forbids_zeropage(vma->vm_mm) && 59679da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 59780371957SKirill A. Shutemov pgtable_t pgtable; 5985918d10aSKirill A. Shutemov struct page *zero_page; 5993ea41e62SKirill A. Shutemov bool set; 6006b251fc9SAndrea Arcangeli int ret; 601bae473a4SKirill A. Shutemov pgtable = pte_alloc_one(vma->vm_mm, haddr); 60280371957SKirill A. Shutemov if (unlikely(!pgtable)) 60380371957SKirill A. Shutemov return VM_FAULT_OOM; 6045918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 6055918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 606bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 60797ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 608c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 60997ae1749SKirill A. Shutemov } 610bae473a4SKirill A. Shutemov fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 6116b251fc9SAndrea Arcangeli ret = 0; 6126b251fc9SAndrea Arcangeli set = false; 613bae473a4SKirill A. Shutemov if (pmd_none(*fe->pmd)) { 6146b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 615bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 616bae473a4SKirill A. Shutemov ret = handle_userfault(fe, VM_UFFD_MISSING); 6176b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 6186b251fc9SAndrea Arcangeli } else { 619bae473a4SKirill A. Shutemov set_huge_zero_page(pgtable, vma->vm_mm, vma, 620bae473a4SKirill A. Shutemov haddr, fe->pmd, zero_page); 621bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 6226b251fc9SAndrea Arcangeli set = true; 6236b251fc9SAndrea Arcangeli } 6246b251fc9SAndrea Arcangeli } else 625bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 6263ea41e62SKirill A. Shutemov if (!set) { 627bae473a4SKirill A. Shutemov pte_free(vma->vm_mm, pgtable); 6283ea41e62SKirill A. Shutemov put_huge_zero_page(); 6293ea41e62SKirill A. Shutemov } 6306b251fc9SAndrea Arcangeli return ret; 63180371957SKirill A. Shutemov } 632444eb2a4SMel Gorman gfp = alloc_hugepage_direct_gfpmask(vma); 633077fcf11SAneesh Kumar K.V page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 63481ab4201SAndi Kleen if (unlikely(!page)) { 63581ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 636c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 63781ab4201SAndi Kleen } 6389a982250SKirill A. Shutemov prep_transhuge_page(page); 639bae473a4SKirill A. Shutemov return __do_huge_pmd_anonymous_page(fe, page, gfp); 64071e3aac0SAndrea Arcangeli } 64171e3aac0SAndrea Arcangeli 642ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 643f25748e3SDan Williams pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) 6445cad465dSMatthew Wilcox { 6455cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 6465cad465dSMatthew Wilcox pmd_t entry; 6475cad465dSMatthew Wilcox spinlock_t *ptl; 6485cad465dSMatthew Wilcox 6495cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 650f25748e3SDan Williams entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 651f25748e3SDan Williams if (pfn_t_devmap(pfn)) 652f25748e3SDan Williams entry = pmd_mkdevmap(entry); 6535cad465dSMatthew Wilcox if (write) { 6545cad465dSMatthew Wilcox entry = pmd_mkyoung(pmd_mkdirty(entry)); 6555cad465dSMatthew Wilcox entry = maybe_pmd_mkwrite(entry, vma); 6565cad465dSMatthew Wilcox } 6575cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 6585cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 6595cad465dSMatthew Wilcox spin_unlock(ptl); 6605cad465dSMatthew Wilcox } 6615cad465dSMatthew Wilcox 6625cad465dSMatthew Wilcox int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 663f25748e3SDan Williams pmd_t *pmd, pfn_t pfn, bool write) 6645cad465dSMatthew Wilcox { 6655cad465dSMatthew Wilcox pgprot_t pgprot = vma->vm_page_prot; 6665cad465dSMatthew Wilcox /* 6675cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 6685cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 6695cad465dSMatthew Wilcox * can't support a 'special' bit. 6705cad465dSMatthew Wilcox */ 6715cad465dSMatthew Wilcox BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 6725cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 6735cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 6745cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 675f25748e3SDan Williams BUG_ON(!pfn_t_devmap(pfn)); 6765cad465dSMatthew Wilcox 6775cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 6785cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 6795cad465dSMatthew Wilcox if (track_pfn_insert(vma, &pgprot, pfn)) 6805cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 681ae18d6dcSMatthew Wilcox insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); 682ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 6835cad465dSMatthew Wilcox } 684dee41079SDan Williams EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 6855cad465dSMatthew Wilcox 6863565fce3SDan Williams static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 6873565fce3SDan Williams pmd_t *pmd) 6883565fce3SDan Williams { 6893565fce3SDan Williams pmd_t _pmd; 6903565fce3SDan Williams 6913565fce3SDan Williams /* 6923565fce3SDan Williams * We should set the dirty bit only for FOLL_WRITE but for now 6933565fce3SDan Williams * the dirty bit in the pmd is meaningless. And if the dirty 6943565fce3SDan Williams * bit will become meaningful and we'll only set it with 6953565fce3SDan Williams * FOLL_WRITE, an atomic set_bit will be required on the pmd to 6963565fce3SDan Williams * set the young bit, instead of the current set_pmd_at. 6973565fce3SDan Williams */ 6983565fce3SDan Williams _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 6993565fce3SDan Williams if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 7003565fce3SDan Williams pmd, _pmd, 1)) 7013565fce3SDan Williams update_mmu_cache_pmd(vma, addr, pmd); 7023565fce3SDan Williams } 7033565fce3SDan Williams 7043565fce3SDan Williams struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 7053565fce3SDan Williams pmd_t *pmd, int flags) 7063565fce3SDan Williams { 7073565fce3SDan Williams unsigned long pfn = pmd_pfn(*pmd); 7083565fce3SDan Williams struct mm_struct *mm = vma->vm_mm; 7093565fce3SDan Williams struct dev_pagemap *pgmap; 7103565fce3SDan Williams struct page *page; 7113565fce3SDan Williams 7123565fce3SDan Williams assert_spin_locked(pmd_lockptr(mm, pmd)); 7133565fce3SDan Williams 7143565fce3SDan Williams if (flags & FOLL_WRITE && !pmd_write(*pmd)) 7153565fce3SDan Williams return NULL; 7163565fce3SDan Williams 7173565fce3SDan Williams if (pmd_present(*pmd) && pmd_devmap(*pmd)) 7183565fce3SDan Williams /* pass */; 7193565fce3SDan Williams else 7203565fce3SDan Williams return NULL; 7213565fce3SDan Williams 7223565fce3SDan Williams if (flags & FOLL_TOUCH) 7233565fce3SDan Williams touch_pmd(vma, addr, pmd); 7243565fce3SDan Williams 7253565fce3SDan Williams /* 7263565fce3SDan Williams * device mapped pages can only be returned if the 7273565fce3SDan Williams * caller will manage the page reference count. 7283565fce3SDan Williams */ 7293565fce3SDan Williams if (!(flags & FOLL_GET)) 7303565fce3SDan Williams return ERR_PTR(-EEXIST); 7313565fce3SDan Williams 7323565fce3SDan Williams pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 7333565fce3SDan Williams pgmap = get_dev_pagemap(pfn, NULL); 7343565fce3SDan Williams if (!pgmap) 7353565fce3SDan Williams return ERR_PTR(-EFAULT); 7363565fce3SDan Williams page = pfn_to_page(pfn); 7373565fce3SDan Williams get_page(page); 7383565fce3SDan Williams put_dev_pagemap(pgmap); 7393565fce3SDan Williams 7403565fce3SDan Williams return page; 7413565fce3SDan Williams } 7423565fce3SDan Williams 74371e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 74471e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 74571e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 74671e3aac0SAndrea Arcangeli { 747c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 74871e3aac0SAndrea Arcangeli struct page *src_page; 74971e3aac0SAndrea Arcangeli pmd_t pmd; 75012c9d70bSMatthew Wilcox pgtable_t pgtable = NULL; 751628d47ceSKirill A. Shutemov int ret = -ENOMEM; 75271e3aac0SAndrea Arcangeli 753628d47ceSKirill A. Shutemov /* Skip if can be re-fill on fault */ 754628d47ceSKirill A. Shutemov if (!vma_is_anonymous(vma)) 755628d47ceSKirill A. Shutemov return 0; 756628d47ceSKirill A. Shutemov 75771e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 75871e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 75971e3aac0SAndrea Arcangeli goto out; 76071e3aac0SAndrea Arcangeli 761c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 762c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 763c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 76471e3aac0SAndrea Arcangeli 76571e3aac0SAndrea Arcangeli ret = -EAGAIN; 76671e3aac0SAndrea Arcangeli pmd = *src_pmd; 767628d47ceSKirill A. Shutemov if (unlikely(!pmd_trans_huge(pmd))) { 76871e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 76971e3aac0SAndrea Arcangeli goto out_unlock; 77071e3aac0SAndrea Arcangeli } 771fc9fe822SKirill A. Shutemov /* 772c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 773fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 774fc9fe822SKirill A. Shutemov * a page table. 775fc9fe822SKirill A. Shutemov */ 776fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 7775918d10aSKirill A. Shutemov struct page *zero_page; 77897ae1749SKirill A. Shutemov /* 77997ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 78097ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 78197ae1749SKirill A. Shutemov * reference. 78297ae1749SKirill A. Shutemov */ 7835918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 7846b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 7855918d10aSKirill A. Shutemov zero_page); 786fc9fe822SKirill A. Shutemov ret = 0; 787fc9fe822SKirill A. Shutemov goto out_unlock; 788fc9fe822SKirill A. Shutemov } 789de466bd6SMel Gorman 79071e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 791309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 79271e3aac0SAndrea Arcangeli get_page(src_page); 79353f9263bSKirill A. Shutemov page_dup_rmap(src_page, true); 79471e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 7955c7fb56eSDan Williams atomic_long_inc(&dst_mm->nr_ptes); 7965c7fb56eSDan Williams pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 79771e3aac0SAndrea Arcangeli 79871e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 79971e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 80071e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 80171e3aac0SAndrea Arcangeli 80271e3aac0SAndrea Arcangeli ret = 0; 80371e3aac0SAndrea Arcangeli out_unlock: 804c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 805c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 80671e3aac0SAndrea Arcangeli out: 80771e3aac0SAndrea Arcangeli return ret; 80871e3aac0SAndrea Arcangeli } 80971e3aac0SAndrea Arcangeli 810bae473a4SKirill A. Shutemov void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) 811a1dd450bSWill Deacon { 812a1dd450bSWill Deacon pmd_t entry; 813a1dd450bSWill Deacon unsigned long haddr; 814a1dd450bSWill Deacon 815bae473a4SKirill A. Shutemov fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); 816bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 817a1dd450bSWill Deacon goto unlock; 818a1dd450bSWill Deacon 819a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 820bae473a4SKirill A. Shutemov haddr = fe->address & HPAGE_PMD_MASK; 821bae473a4SKirill A. Shutemov if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, 822bae473a4SKirill A. Shutemov fe->flags & FAULT_FLAG_WRITE)) 823bae473a4SKirill A. Shutemov update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); 824a1dd450bSWill Deacon 825a1dd450bSWill Deacon unlock: 826bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 827a1dd450bSWill Deacon } 828a1dd450bSWill Deacon 829bae473a4SKirill A. Shutemov static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, 830bae473a4SKirill A. Shutemov struct page *page) 83171e3aac0SAndrea Arcangeli { 832bae473a4SKirill A. Shutemov struct vm_area_struct *vma = fe->vma; 833bae473a4SKirill A. Shutemov unsigned long haddr = fe->address & HPAGE_PMD_MASK; 83400501b53SJohannes Weiner struct mem_cgroup *memcg; 83571e3aac0SAndrea Arcangeli pgtable_t pgtable; 83671e3aac0SAndrea Arcangeli pmd_t _pmd; 83771e3aac0SAndrea Arcangeli int ret = 0, i; 83871e3aac0SAndrea Arcangeli struct page **pages; 8392ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 8402ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 84171e3aac0SAndrea Arcangeli 84271e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 84371e3aac0SAndrea Arcangeli GFP_KERNEL); 84471e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 84571e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 84671e3aac0SAndrea Arcangeli goto out; 84771e3aac0SAndrea Arcangeli } 84871e3aac0SAndrea Arcangeli 84971e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 850cc5d462fSAndi Kleen pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 851bae473a4SKirill A. Shutemov __GFP_OTHER_NODE, vma, 852bae473a4SKirill A. Shutemov fe->address, page_to_nid(page)); 853b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 854bae473a4SKirill A. Shutemov mem_cgroup_try_charge(pages[i], vma->vm_mm, 855bae473a4SKirill A. Shutemov GFP_KERNEL, &memcg, false))) { 856b9bbfbe3SAndrea Arcangeli if (pages[i]) 85771e3aac0SAndrea Arcangeli put_page(pages[i]); 858b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 85900501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 86000501b53SJohannes Weiner set_page_private(pages[i], 0); 861f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, 862f627c2f5SKirill A. Shutemov false); 863b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 864b9bbfbe3SAndrea Arcangeli } 86571e3aac0SAndrea Arcangeli kfree(pages); 86671e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 86771e3aac0SAndrea Arcangeli goto out; 86871e3aac0SAndrea Arcangeli } 86900501b53SJohannes Weiner set_page_private(pages[i], (unsigned long)memcg); 87071e3aac0SAndrea Arcangeli } 87171e3aac0SAndrea Arcangeli 87271e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 87371e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 8740089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 87571e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 87671e3aac0SAndrea Arcangeli cond_resched(); 87771e3aac0SAndrea Arcangeli } 87871e3aac0SAndrea Arcangeli 8792ec74c3eSSagi Grimberg mmun_start = haddr; 8802ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 881bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 8822ec74c3eSSagi Grimberg 883bae473a4SKirill A. Shutemov fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 884bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 88571e3aac0SAndrea Arcangeli goto out_free_pages; 886309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 88771e3aac0SAndrea Arcangeli 888bae473a4SKirill A. Shutemov pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); 88971e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 89071e3aac0SAndrea Arcangeli 891bae473a4SKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); 892bae473a4SKirill A. Shutemov pmd_populate(vma->vm_mm, &_pmd, pgtable); 89371e3aac0SAndrea Arcangeli 89471e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 895bae473a4SKirill A. Shutemov pte_t entry; 89671e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 89771e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 89800501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 89900501b53SJohannes Weiner set_page_private(pages[i], 0); 900bae473a4SKirill A. Shutemov page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); 901f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(pages[i], memcg, false, false); 90200501b53SJohannes Weiner lru_cache_add_active_or_unevictable(pages[i], vma); 903bae473a4SKirill A. Shutemov fe->pte = pte_offset_map(&_pmd, haddr); 904bae473a4SKirill A. Shutemov VM_BUG_ON(!pte_none(*fe->pte)); 905bae473a4SKirill A. Shutemov set_pte_at(vma->vm_mm, haddr, fe->pte, entry); 906bae473a4SKirill A. Shutemov pte_unmap(fe->pte); 90771e3aac0SAndrea Arcangeli } 90871e3aac0SAndrea Arcangeli kfree(pages); 90971e3aac0SAndrea Arcangeli 91071e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 911bae473a4SKirill A. Shutemov pmd_populate(vma->vm_mm, fe->pmd, pgtable); 912d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 913bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 91471e3aac0SAndrea Arcangeli 915bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 9162ec74c3eSSagi Grimberg 91771e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 91871e3aac0SAndrea Arcangeli put_page(page); 91971e3aac0SAndrea Arcangeli 92071e3aac0SAndrea Arcangeli out: 92171e3aac0SAndrea Arcangeli return ret; 92271e3aac0SAndrea Arcangeli 92371e3aac0SAndrea Arcangeli out_free_pages: 924bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 925bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 926b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 92700501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 92800501b53SJohannes Weiner set_page_private(pages[i], 0); 929f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(pages[i], memcg, false); 93071e3aac0SAndrea Arcangeli put_page(pages[i]); 931b9bbfbe3SAndrea Arcangeli } 93271e3aac0SAndrea Arcangeli kfree(pages); 93371e3aac0SAndrea Arcangeli goto out; 93471e3aac0SAndrea Arcangeli } 93571e3aac0SAndrea Arcangeli 936bae473a4SKirill A. Shutemov int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) 93771e3aac0SAndrea Arcangeli { 938bae473a4SKirill A. Shutemov struct vm_area_struct *vma = fe->vma; 93993b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 94000501b53SJohannes Weiner struct mem_cgroup *memcg; 941bae473a4SKirill A. Shutemov unsigned long haddr = fe->address & HPAGE_PMD_MASK; 9422ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 9432ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 9443b363692SMichal Hocko gfp_t huge_gfp; /* for allocation and charge */ 945bae473a4SKirill A. Shutemov int ret = 0; 94671e3aac0SAndrea Arcangeli 947bae473a4SKirill A. Shutemov fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); 94881d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 94993b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 95093b4796dSKirill A. Shutemov goto alloc; 951bae473a4SKirill A. Shutemov spin_lock(fe->ptl); 952bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 95371e3aac0SAndrea Arcangeli goto out_unlock; 95471e3aac0SAndrea Arcangeli 95571e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 956309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 9571f25fe20SKirill A. Shutemov /* 9581f25fe20SKirill A. Shutemov * We can only reuse the page if nobody else maps the huge page or it's 9596d0a07edSAndrea Arcangeli * part. 9601f25fe20SKirill A. Shutemov */ 9616d0a07edSAndrea Arcangeli if (page_trans_huge_mapcount(page, NULL) == 1) { 96271e3aac0SAndrea Arcangeli pmd_t entry; 96371e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 96471e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 965bae473a4SKirill A. Shutemov if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) 966bae473a4SKirill A. Shutemov update_mmu_cache_pmd(vma, fe->address, fe->pmd); 96771e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 96871e3aac0SAndrea Arcangeli goto out_unlock; 96971e3aac0SAndrea Arcangeli } 970ddc58f27SKirill A. Shutemov get_page(page); 971bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 97293b4796dSKirill A. Shutemov alloc: 97371e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 974077fcf11SAneesh Kumar K.V !transparent_hugepage_debug_cow()) { 975444eb2a4SMel Gorman huge_gfp = alloc_hugepage_direct_gfpmask(vma); 9763b363692SMichal Hocko new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 977077fcf11SAneesh Kumar K.V } else 97871e3aac0SAndrea Arcangeli new_page = NULL; 97971e3aac0SAndrea Arcangeli 9809a982250SKirill A. Shutemov if (likely(new_page)) { 9819a982250SKirill A. Shutemov prep_transhuge_page(new_page); 9829a982250SKirill A. Shutemov } else { 983eecc1e42SHugh Dickins if (!page) { 984bae473a4SKirill A. Shutemov split_huge_pmd(vma, fe->pmd, fe->address); 985e9b71ca9SKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 98693b4796dSKirill A. Shutemov } else { 987bae473a4SKirill A. Shutemov ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); 9889845cbbdSKirill A. Shutemov if (ret & VM_FAULT_OOM) { 989bae473a4SKirill A. Shutemov split_huge_pmd(vma, fe->pmd, fe->address); 9909845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 9919845cbbdSKirill A. Shutemov } 992ddc58f27SKirill A. Shutemov put_page(page); 99393b4796dSKirill A. Shutemov } 99417766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 99571e3aac0SAndrea Arcangeli goto out; 99671e3aac0SAndrea Arcangeli } 99771e3aac0SAndrea Arcangeli 998bae473a4SKirill A. Shutemov if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 999bae473a4SKirill A. Shutemov huge_gfp, &memcg, true))) { 1000b9bbfbe3SAndrea Arcangeli put_page(new_page); 1001bae473a4SKirill A. Shutemov split_huge_pmd(vma, fe->pmd, fe->address); 1002bae473a4SKirill A. Shutemov if (page) 1003ddc58f27SKirill A. Shutemov put_page(page); 10049845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 100517766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 1006b9bbfbe3SAndrea Arcangeli goto out; 1007b9bbfbe3SAndrea Arcangeli } 1008b9bbfbe3SAndrea Arcangeli 100917766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 101017766ddeSDavid Rientjes 1011eecc1e42SHugh Dickins if (!page) 101293b4796dSKirill A. Shutemov clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 101393b4796dSKirill A. Shutemov else 101471e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 101571e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 101671e3aac0SAndrea Arcangeli 10172ec74c3eSSagi Grimberg mmun_start = haddr; 10182ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 1019bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 10202ec74c3eSSagi Grimberg 1021bae473a4SKirill A. Shutemov spin_lock(fe->ptl); 102293b4796dSKirill A. Shutemov if (page) 1023ddc58f27SKirill A. Shutemov put_page(page); 1024bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { 1025bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 1026f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(new_page, memcg, true); 102771e3aac0SAndrea Arcangeli put_page(new_page); 10282ec74c3eSSagi Grimberg goto out_mn; 1029b9bbfbe3SAndrea Arcangeli } else { 103071e3aac0SAndrea Arcangeli pmd_t entry; 10313122359aSKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 10323122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1033bae473a4SKirill A. Shutemov pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); 1034d281ee61SKirill A. Shutemov page_add_new_anon_rmap(new_page, vma, haddr, true); 1035f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(new_page, memcg, false, true); 103600501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 1037bae473a4SKirill A. Shutemov set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); 1038bae473a4SKirill A. Shutemov update_mmu_cache_pmd(vma, fe->address, fe->pmd); 1039eecc1e42SHugh Dickins if (!page) { 1040bae473a4SKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 104197ae1749SKirill A. Shutemov put_huge_zero_page(); 104297ae1749SKirill A. Shutemov } else { 1043309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1044d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 104571e3aac0SAndrea Arcangeli put_page(page); 104693b4796dSKirill A. Shutemov } 104771e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 104871e3aac0SAndrea Arcangeli } 1049bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 10502ec74c3eSSagi Grimberg out_mn: 1051bae473a4SKirill A. Shutemov mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 10522ec74c3eSSagi Grimberg out: 10532ec74c3eSSagi Grimberg return ret; 105471e3aac0SAndrea Arcangeli out_unlock: 1055bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 105671e3aac0SAndrea Arcangeli return ret; 105771e3aac0SAndrea Arcangeli } 105871e3aac0SAndrea Arcangeli 1059b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 106071e3aac0SAndrea Arcangeli unsigned long addr, 106171e3aac0SAndrea Arcangeli pmd_t *pmd, 106271e3aac0SAndrea Arcangeli unsigned int flags) 106371e3aac0SAndrea Arcangeli { 1064b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 106571e3aac0SAndrea Arcangeli struct page *page = NULL; 106671e3aac0SAndrea Arcangeli 1067c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 106871e3aac0SAndrea Arcangeli 106971e3aac0SAndrea Arcangeli if (flags & FOLL_WRITE && !pmd_write(*pmd)) 107071e3aac0SAndrea Arcangeli goto out; 107171e3aac0SAndrea Arcangeli 107285facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 107385facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 107485facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 107585facf25SKirill A. Shutemov 10762b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 10778a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 10782b4847e7SMel Gorman goto out; 10792b4847e7SMel Gorman 108071e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1081*ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 10823565fce3SDan Williams if (flags & FOLL_TOUCH) 10833565fce3SDan Williams touch_pmd(vma, addr, pmd); 1084de60f5f1SEric B Munson if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1085e90309c9SKirill A. Shutemov /* 1086e90309c9SKirill A. Shutemov * We don't mlock() pte-mapped THPs. This way we can avoid 1087e90309c9SKirill A. Shutemov * leaking mlocked pages into non-VM_LOCKED VMAs. 1088e90309c9SKirill A. Shutemov * 10899a73f61bSKirill A. Shutemov * For anon THP: 10909a73f61bSKirill A. Shutemov * 1091e90309c9SKirill A. Shutemov * In most cases the pmd is the only mapping of the page as we 1092e90309c9SKirill A. Shutemov * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1093e90309c9SKirill A. Shutemov * writable private mappings in populate_vma_page_range(). 1094e90309c9SKirill A. Shutemov * 1095e90309c9SKirill A. Shutemov * The only scenario when we have the page shared here is if we 1096e90309c9SKirill A. Shutemov * mlocking read-only mapping shared over fork(). We skip 1097e90309c9SKirill A. Shutemov * mlocking such pages. 10989a73f61bSKirill A. Shutemov * 10999a73f61bSKirill A. Shutemov * For file THP: 11009a73f61bSKirill A. Shutemov * 11019a73f61bSKirill A. Shutemov * We can expect PageDoubleMap() to be stable under page lock: 11029a73f61bSKirill A. Shutemov * for file pages we set it in page_add_file_rmap(), which 11039a73f61bSKirill A. Shutemov * requires page to be locked. 1104e90309c9SKirill A. Shutemov */ 11059a73f61bSKirill A. Shutemov 11069a73f61bSKirill A. Shutemov if (PageAnon(page) && compound_mapcount(page) != 1) 11079a73f61bSKirill A. Shutemov goto skip_mlock; 11089a73f61bSKirill A. Shutemov if (PageDoubleMap(page) || !page->mapping) 11099a73f61bSKirill A. Shutemov goto skip_mlock; 11109a73f61bSKirill A. Shutemov if (!trylock_page(page)) 11119a73f61bSKirill A. Shutemov goto skip_mlock; 1112b676b293SDavid Rientjes lru_add_drain(); 11139a73f61bSKirill A. Shutemov if (page->mapping && !PageDoubleMap(page)) 1114b676b293SDavid Rientjes mlock_vma_page(page); 1115b676b293SDavid Rientjes unlock_page(page); 1116b676b293SDavid Rientjes } 11179a73f61bSKirill A. Shutemov skip_mlock: 111871e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1119*ca120cf6SDan Williams VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 112071e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 1121ddc58f27SKirill A. Shutemov get_page(page); 112271e3aac0SAndrea Arcangeli 112371e3aac0SAndrea Arcangeli out: 112471e3aac0SAndrea Arcangeli return page; 112571e3aac0SAndrea Arcangeli } 112671e3aac0SAndrea Arcangeli 1127d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 1128bae473a4SKirill A. Shutemov int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) 1129d10e63f2SMel Gorman { 1130bae473a4SKirill A. Shutemov struct vm_area_struct *vma = fe->vma; 1131b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1132b32967ffSMel Gorman struct page *page; 1133bae473a4SKirill A. Shutemov unsigned long haddr = fe->address & HPAGE_PMD_MASK; 11348191acbdSMel Gorman int page_nid = -1, this_nid = numa_node_id(); 113590572890SPeter Zijlstra int target_nid, last_cpupid = -1; 11368191acbdSMel Gorman bool page_locked; 11378191acbdSMel Gorman bool migrated = false; 1138b191f9b1SMel Gorman bool was_writable; 11396688cc05SPeter Zijlstra int flags = 0; 1140d10e63f2SMel Gorman 1141c0e7cad9SMel Gorman /* A PROT_NONE fault should not end up here */ 1142c0e7cad9SMel Gorman BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); 1143c0e7cad9SMel Gorman 1144bae473a4SKirill A. Shutemov fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1145bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(pmd, *fe->pmd))) 1146d10e63f2SMel Gorman goto out_unlock; 1147d10e63f2SMel Gorman 1148de466bd6SMel Gorman /* 1149de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1150de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1151de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1152de466bd6SMel Gorman */ 1153bae473a4SKirill A. Shutemov if (unlikely(pmd_trans_migrating(*fe->pmd))) { 1154bae473a4SKirill A. Shutemov page = pmd_page(*fe->pmd); 1155bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 11565d833062SMel Gorman wait_on_page_locked(page); 1157de466bd6SMel Gorman goto out; 1158de466bd6SMel Gorman } 1159de466bd6SMel Gorman 1160d10e63f2SMel Gorman page = pmd_page(pmd); 1161a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 11628191acbdSMel Gorman page_nid = page_to_nid(page); 116390572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 116403c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 116504bb2f94SRik van Riel if (page_nid == this_nid) { 116603c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 116704bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 116804bb2f94SRik van Riel } 11694daae3b4SMel Gorman 1170bea66fbdSMel Gorman /* See similar comment in do_numa_page for explanation */ 1171bea66fbdSMel Gorman if (!(vma->vm_flags & VM_WRITE)) 11726688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 11736688cc05SPeter Zijlstra 11746688cc05SPeter Zijlstra /* 1175ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1176ff9042b1SMel Gorman * page_table_lock if at all possible 1177ff9042b1SMel Gorman */ 1178b8916634SMel Gorman page_locked = trylock_page(page); 1179b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1180b8916634SMel Gorman if (target_nid == -1) { 1181b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1182a54a407fSMel Gorman if (page_locked) 1183b8916634SMel Gorman goto clear_pmdnuma; 11842b4847e7SMel Gorman } 1185cbee9f88SPeter Zijlstra 1186de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 11872b4847e7SMel Gorman if (!page_locked) { 1188bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 1189b8916634SMel Gorman wait_on_page_locked(page); 1190a54a407fSMel Gorman page_nid = -1; 1191b8916634SMel Gorman goto out; 1192b8916634SMel Gorman } 1193b8916634SMel Gorman 11942b4847e7SMel Gorman /* 11952b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 11962b4847e7SMel Gorman * to serialises splits 11972b4847e7SMel Gorman */ 1198b8916634SMel Gorman get_page(page); 1199bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 1200b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1201b32967ffSMel Gorman 1202c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 1203bae473a4SKirill A. Shutemov spin_lock(fe->ptl); 1204bae473a4SKirill A. Shutemov if (unlikely(!pmd_same(pmd, *fe->pmd))) { 1205b32967ffSMel Gorman unlock_page(page); 1206b32967ffSMel Gorman put_page(page); 1207a54a407fSMel Gorman page_nid = -1; 1208b32967ffSMel Gorman goto out_unlock; 1209b32967ffSMel Gorman } 1210ff9042b1SMel Gorman 1211c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1212c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1213c3a489caSMel Gorman put_page(page); 1214c3a489caSMel Gorman page_nid = -1; 1215c3a489caSMel Gorman goto clear_pmdnuma; 1216c3a489caSMel Gorman } 1217c3a489caSMel Gorman 1218a54a407fSMel Gorman /* 1219a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 12208a0516edSMel Gorman * and access rights restored. 1221a54a407fSMel Gorman */ 1222bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 1223bae473a4SKirill A. Shutemov migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 1224bae473a4SKirill A. Shutemov fe->pmd, pmd, fe->address, page, target_nid); 12256688cc05SPeter Zijlstra if (migrated) { 12266688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 12278191acbdSMel Gorman page_nid = target_nid; 1228074c2381SMel Gorman } else 1229074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1230b32967ffSMel Gorman 12318191acbdSMel Gorman goto out; 12324daae3b4SMel Gorman clear_pmdnuma: 1233a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1234b191f9b1SMel Gorman was_writable = pmd_write(pmd); 12354d942466SMel Gorman pmd = pmd_modify(pmd, vma->vm_page_prot); 1236b7b04004SMel Gorman pmd = pmd_mkyoung(pmd); 1237b191f9b1SMel Gorman if (was_writable) 1238b191f9b1SMel Gorman pmd = pmd_mkwrite(pmd); 1239bae473a4SKirill A. Shutemov set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); 1240bae473a4SKirill A. Shutemov update_mmu_cache_pmd(vma, fe->address, fe->pmd); 1241a54a407fSMel Gorman unlock_page(page); 1242d10e63f2SMel Gorman out_unlock: 1243bae473a4SKirill A. Shutemov spin_unlock(fe->ptl); 1244b8916634SMel Gorman 1245b8916634SMel Gorman out: 1246b8916634SMel Gorman if (anon_vma) 1247b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1248b8916634SMel Gorman 12498191acbdSMel Gorman if (page_nid != -1) 1250bae473a4SKirill A. Shutemov task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); 12518191acbdSMel Gorman 1252d10e63f2SMel Gorman return 0; 1253d10e63f2SMel Gorman } 1254d10e63f2SMel Gorman 1255319904adSHuang Ying /* 1256319904adSHuang Ying * Return true if we do MADV_FREE successfully on entire pmd page. 1257319904adSHuang Ying * Otherwise, return false. 1258319904adSHuang Ying */ 1259319904adSHuang Ying bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1260b8d3c4c3SMinchan Kim pmd_t *pmd, unsigned long addr, unsigned long next) 1261b8d3c4c3SMinchan Kim { 1262b8d3c4c3SMinchan Kim spinlock_t *ptl; 1263b8d3c4c3SMinchan Kim pmd_t orig_pmd; 1264b8d3c4c3SMinchan Kim struct page *page; 1265b8d3c4c3SMinchan Kim struct mm_struct *mm = tlb->mm; 1266319904adSHuang Ying bool ret = false; 1267b8d3c4c3SMinchan Kim 1268b6ec57f4SKirill A. Shutemov ptl = pmd_trans_huge_lock(pmd, vma); 1269b6ec57f4SKirill A. Shutemov if (!ptl) 127025eedabeSLinus Torvalds goto out_unlocked; 1271b8d3c4c3SMinchan Kim 1272b8d3c4c3SMinchan Kim orig_pmd = *pmd; 1273319904adSHuang Ying if (is_huge_zero_pmd(orig_pmd)) 1274b8d3c4c3SMinchan Kim goto out; 1275b8d3c4c3SMinchan Kim 1276b8d3c4c3SMinchan Kim page = pmd_page(orig_pmd); 1277b8d3c4c3SMinchan Kim /* 1278b8d3c4c3SMinchan Kim * If other processes are mapping this page, we couldn't discard 1279b8d3c4c3SMinchan Kim * the page unless they all do MADV_FREE so let's skip the page. 1280b8d3c4c3SMinchan Kim */ 1281b8d3c4c3SMinchan Kim if (page_mapcount(page) != 1) 1282b8d3c4c3SMinchan Kim goto out; 1283b8d3c4c3SMinchan Kim 1284b8d3c4c3SMinchan Kim if (!trylock_page(page)) 1285b8d3c4c3SMinchan Kim goto out; 1286b8d3c4c3SMinchan Kim 1287b8d3c4c3SMinchan Kim /* 1288b8d3c4c3SMinchan Kim * If user want to discard part-pages of THP, split it so MADV_FREE 1289b8d3c4c3SMinchan Kim * will deactivate only them. 1290b8d3c4c3SMinchan Kim */ 1291b8d3c4c3SMinchan Kim if (next - addr != HPAGE_PMD_SIZE) { 1292b8d3c4c3SMinchan Kim get_page(page); 1293b8d3c4c3SMinchan Kim spin_unlock(ptl); 12949818b8cdSHuang Ying split_huge_page(page); 1295b8d3c4c3SMinchan Kim put_page(page); 1296b8d3c4c3SMinchan Kim unlock_page(page); 1297b8d3c4c3SMinchan Kim goto out_unlocked; 1298b8d3c4c3SMinchan Kim } 1299b8d3c4c3SMinchan Kim 1300b8d3c4c3SMinchan Kim if (PageDirty(page)) 1301b8d3c4c3SMinchan Kim ClearPageDirty(page); 1302b8d3c4c3SMinchan Kim unlock_page(page); 1303b8d3c4c3SMinchan Kim 1304b8d3c4c3SMinchan Kim if (PageActive(page)) 1305b8d3c4c3SMinchan Kim deactivate_page(page); 1306b8d3c4c3SMinchan Kim 1307b8d3c4c3SMinchan Kim if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1308b8d3c4c3SMinchan Kim orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1309b8d3c4c3SMinchan Kim tlb->fullmm); 1310b8d3c4c3SMinchan Kim orig_pmd = pmd_mkold(orig_pmd); 1311b8d3c4c3SMinchan Kim orig_pmd = pmd_mkclean(orig_pmd); 1312b8d3c4c3SMinchan Kim 1313b8d3c4c3SMinchan Kim set_pmd_at(mm, addr, pmd, orig_pmd); 1314b8d3c4c3SMinchan Kim tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1315b8d3c4c3SMinchan Kim } 1316319904adSHuang Ying ret = true; 1317b8d3c4c3SMinchan Kim out: 1318b8d3c4c3SMinchan Kim spin_unlock(ptl); 1319b8d3c4c3SMinchan Kim out_unlocked: 1320b8d3c4c3SMinchan Kim return ret; 1321b8d3c4c3SMinchan Kim } 1322b8d3c4c3SMinchan Kim 132371e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1324f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 132571e3aac0SAndrea Arcangeli { 1326f5c8ad47SDavid Miller pmd_t orig_pmd; 1327da146769SKirill A. Shutemov spinlock_t *ptl; 1328da146769SKirill A. Shutemov 1329b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1330b6ec57f4SKirill A. Shutemov if (!ptl) 1331da146769SKirill A. Shutemov return 0; 1332a6bf2bb0SAneesh Kumar K.V /* 1333a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 13348809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1335a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1336a6bf2bb0SAneesh Kumar K.V * operations. 1337a6bf2bb0SAneesh Kumar K.V */ 13388809aa2dSAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1339fcbe08d6SMartin Schwidefsky tlb->fullmm); 1340f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 13414897c765SMatthew Wilcox if (vma_is_dax(vma)) { 13424897c765SMatthew Wilcox spin_unlock(ptl); 1343da146769SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 1344aa88b68cSKirill A. Shutemov tlb_remove_page(tlb, pmd_page(orig_pmd)); 1345da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1346da146769SKirill A. Shutemov pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1347e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1348bf929152SKirill A. Shutemov spin_unlock(ptl); 1349aa88b68cSKirill A. Shutemov tlb_remove_page(tlb, pmd_page(orig_pmd)); 1350479f0abbSKirill A. Shutemov } else { 13514897c765SMatthew Wilcox struct page *page = pmd_page(orig_pmd); 1352d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 1353309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1354309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1355b5072380SKirill A. Shutemov if (PageAnon(page)) { 1356b5072380SKirill A. Shutemov pgtable_t pgtable; 1357b5072380SKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1358b5072380SKirill A. Shutemov pte_free(tlb->mm, pgtable); 1359e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1360b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1361b5072380SKirill A. Shutemov } else { 1362b5072380SKirill A. Shutemov add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1363b5072380SKirill A. Shutemov } 1364bf929152SKirill A. Shutemov spin_unlock(ptl); 1365e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1366479f0abbSKirill A. Shutemov } 1367da146769SKirill A. Shutemov return 1; 136871e3aac0SAndrea Arcangeli } 136971e3aac0SAndrea Arcangeli 1370bf8616d5SHugh Dickins bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 137137a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 137237a1c49aSAndrea Arcangeli pmd_t *old_pmd, pmd_t *new_pmd) 137337a1c49aSAndrea Arcangeli { 1374bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 137537a1c49aSAndrea Arcangeli pmd_t pmd; 137637a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 137737a1c49aSAndrea Arcangeli 137837a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 137937a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 1380bf8616d5SHugh Dickins old_end - old_addr < HPAGE_PMD_SIZE) 13814b471e88SKirill A. Shutemov return false; 138237a1c49aSAndrea Arcangeli 138337a1c49aSAndrea Arcangeli /* 138437a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 138537a1c49aSAndrea Arcangeli * should have release it. 138637a1c49aSAndrea Arcangeli */ 138737a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 138837a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 13894b471e88SKirill A. Shutemov return false; 139037a1c49aSAndrea Arcangeli } 139137a1c49aSAndrea Arcangeli 1392bf929152SKirill A. Shutemov /* 1393bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1394bf929152SKirill A. Shutemov * ptlocks because exclusive mmap_sem prevents deadlock. 1395bf929152SKirill A. Shutemov */ 1396b6ec57f4SKirill A. Shutemov old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1397b6ec57f4SKirill A. Shutemov if (old_ptl) { 1398bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1399bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1400bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 14018809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 140237a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 14033592806cSKirill A. Shutemov 140469a8ec2dSKirill A. Shutemov if (pmd_move_must_withdraw(new_ptl, old_ptl) && 140569a8ec2dSKirill A. Shutemov vma_is_anonymous(vma)) { 1406b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 14073592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 14083592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 14093592806cSKirill A. Shutemov } 1410b3084f4dSAneesh Kumar K.V set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1411b3084f4dSAneesh Kumar K.V if (new_ptl != old_ptl) 1412b3084f4dSAneesh Kumar K.V spin_unlock(new_ptl); 1413bf929152SKirill A. Shutemov spin_unlock(old_ptl); 14144b471e88SKirill A. Shutemov return true; 141537a1c49aSAndrea Arcangeli } 14164b471e88SKirill A. Shutemov return false; 141737a1c49aSAndrea Arcangeli } 141837a1c49aSAndrea Arcangeli 1419f123d74aSMel Gorman /* 1420f123d74aSMel Gorman * Returns 1421f123d74aSMel Gorman * - 0 if PMD could not be locked 1422f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1423f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1424f123d74aSMel Gorman */ 1425cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1426e944fd67SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1427cd7548abSJohannes Weiner { 1428cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1429bf929152SKirill A. Shutemov spinlock_t *ptl; 1430cd7548abSJohannes Weiner int ret = 0; 1431cd7548abSJohannes Weiner 1432b6ec57f4SKirill A. Shutemov ptl = __pmd_trans_huge_lock(pmd, vma); 1433b6ec57f4SKirill A. Shutemov if (ptl) { 1434cd7548abSJohannes Weiner pmd_t entry; 1435b191f9b1SMel Gorman bool preserve_write = prot_numa && pmd_write(*pmd); 1436ba68bc01SMel Gorman ret = 1; 1437e944fd67SMel Gorman 1438e944fd67SMel Gorman /* 1439e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1440e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1441e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1442e944fd67SMel Gorman */ 1443e944fd67SMel Gorman if (prot_numa && is_huge_zero_pmd(*pmd)) { 1444e944fd67SMel Gorman spin_unlock(ptl); 1445ba68bc01SMel Gorman return ret; 1446e944fd67SMel Gorman } 1447e944fd67SMel Gorman 144810c1045fSMel Gorman if (!prot_numa || !pmd_protnone(*pmd)) { 14498809aa2dSAneesh Kumar K.V entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1450cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1451b191f9b1SMel Gorman if (preserve_write) 1452b191f9b1SMel Gorman entry = pmd_mkwrite(entry); 1453f123d74aSMel Gorman ret = HPAGE_PMD_NR; 145456eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 1455b237adedSKirill A. Shutemov BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1456b237adedSKirill A. Shutemov pmd_write(entry)); 145710c1045fSMel Gorman } 1458bf929152SKirill A. Shutemov spin_unlock(ptl); 1459cd7548abSJohannes Weiner } 1460cd7548abSJohannes Weiner 1461cd7548abSJohannes Weiner return ret; 1462cd7548abSJohannes Weiner } 1463cd7548abSJohannes Weiner 1464025c5b24SNaoya Horiguchi /* 14658f19b0c0SHuang Ying * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1466025c5b24SNaoya Horiguchi * 14678f19b0c0SHuang Ying * Note that if it returns page table lock pointer, this routine returns without 14688f19b0c0SHuang Ying * unlocking page table lock. So callers must unlock it. 1469025c5b24SNaoya Horiguchi */ 1470b6ec57f4SKirill A. Shutemov spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1471025c5b24SNaoya Horiguchi { 1472b6ec57f4SKirill A. Shutemov spinlock_t *ptl; 1473b6ec57f4SKirill A. Shutemov ptl = pmd_lock(vma->vm_mm, pmd); 14745c7fb56eSDan Williams if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 1475b6ec57f4SKirill A. Shutemov return ptl; 1476b6ec57f4SKirill A. Shutemov spin_unlock(ptl); 1477b6ec57f4SKirill A. Shutemov return NULL; 1478025c5b24SNaoya Horiguchi } 1479025c5b24SNaoya Horiguchi 1480eef1b3baSKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1481eef1b3baSKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 1482eef1b3baSKirill A. Shutemov { 1483eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1484eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1485eef1b3baSKirill A. Shutemov pmd_t _pmd; 1486eef1b3baSKirill A. Shutemov int i; 1487eef1b3baSKirill A. Shutemov 1488eef1b3baSKirill A. Shutemov /* leave pmd empty until pte is filled */ 1489eef1b3baSKirill A. Shutemov pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1490eef1b3baSKirill A. Shutemov 1491eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1492eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1493eef1b3baSKirill A. Shutemov 1494eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1495eef1b3baSKirill A. Shutemov pte_t *pte, entry; 1496eef1b3baSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 1497eef1b3baSKirill A. Shutemov entry = pte_mkspecial(entry); 1498eef1b3baSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 1499eef1b3baSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 1500eef1b3baSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 1501eef1b3baSKirill A. Shutemov pte_unmap(pte); 1502eef1b3baSKirill A. Shutemov } 1503eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 1504eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 1505eef1b3baSKirill A. Shutemov put_huge_zero_page(); 1506eef1b3baSKirill A. Shutemov } 1507eef1b3baSKirill A. Shutemov 1508eef1b3baSKirill A. Shutemov static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 1509ba988280SKirill A. Shutemov unsigned long haddr, bool freeze) 1510eef1b3baSKirill A. Shutemov { 1511eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1512eef1b3baSKirill A. Shutemov struct page *page; 1513eef1b3baSKirill A. Shutemov pgtable_t pgtable; 1514eef1b3baSKirill A. Shutemov pmd_t _pmd; 1515804dd150SAndrea Arcangeli bool young, write, dirty, soft_dirty; 15162ac015e2SKirill A. Shutemov unsigned long addr; 1517eef1b3baSKirill A. Shutemov int i; 1518eef1b3baSKirill A. Shutemov 1519eef1b3baSKirill A. Shutemov VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 1520eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1521eef1b3baSKirill A. Shutemov VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 15225c7fb56eSDan Williams VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); 1523eef1b3baSKirill A. Shutemov 1524eef1b3baSKirill A. Shutemov count_vm_event(THP_SPLIT_PMD); 1525eef1b3baSKirill A. Shutemov 1526d21b9e57SKirill A. Shutemov if (!vma_is_anonymous(vma)) { 1527d21b9e57SKirill A. Shutemov _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1528eef1b3baSKirill A. Shutemov if (is_huge_zero_pmd(_pmd)) 1529eef1b3baSKirill A. Shutemov put_huge_zero_page(); 1530d21b9e57SKirill A. Shutemov if (vma_is_dax(vma)) 1531d21b9e57SKirill A. Shutemov return; 1532d21b9e57SKirill A. Shutemov page = pmd_page(_pmd); 1533d21b9e57SKirill A. Shutemov if (!PageReferenced(page) && pmd_young(_pmd)) 1534d21b9e57SKirill A. Shutemov SetPageReferenced(page); 1535d21b9e57SKirill A. Shutemov page_remove_rmap(page, true); 1536d21b9e57SKirill A. Shutemov put_page(page); 1537d21b9e57SKirill A. Shutemov add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1538eef1b3baSKirill A. Shutemov return; 1539eef1b3baSKirill A. Shutemov } else if (is_huge_zero_pmd(*pmd)) { 1540eef1b3baSKirill A. Shutemov return __split_huge_zero_page_pmd(vma, haddr, pmd); 1541eef1b3baSKirill A. Shutemov } 1542eef1b3baSKirill A. Shutemov 1543eef1b3baSKirill A. Shutemov page = pmd_page(*pmd); 1544eef1b3baSKirill A. Shutemov VM_BUG_ON_PAGE(!page_count(page), page); 1545fe896d18SJoonsoo Kim page_ref_add(page, HPAGE_PMD_NR - 1); 1546eef1b3baSKirill A. Shutemov write = pmd_write(*pmd); 1547eef1b3baSKirill A. Shutemov young = pmd_young(*pmd); 1548b8d3c4c3SMinchan Kim dirty = pmd_dirty(*pmd); 1549804dd150SAndrea Arcangeli soft_dirty = pmd_soft_dirty(*pmd); 1550eef1b3baSKirill A. Shutemov 1551c777e2a8SAneesh Kumar K.V pmdp_huge_split_prepare(vma, haddr, pmd); 1552eef1b3baSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1553eef1b3baSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 1554eef1b3baSKirill A. Shutemov 15552ac015e2SKirill A. Shutemov for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 1556eef1b3baSKirill A. Shutemov pte_t entry, *pte; 1557eef1b3baSKirill A. Shutemov /* 1558eef1b3baSKirill A. Shutemov * Note that NUMA hinting access restrictions are not 1559eef1b3baSKirill A. Shutemov * transferred to avoid any possibility of altering 1560eef1b3baSKirill A. Shutemov * permissions across VMAs. 1561eef1b3baSKirill A. Shutemov */ 1562ba988280SKirill A. Shutemov if (freeze) { 1563ba988280SKirill A. Shutemov swp_entry_t swp_entry; 1564ba988280SKirill A. Shutemov swp_entry = make_migration_entry(page + i, write); 1565ba988280SKirill A. Shutemov entry = swp_entry_to_pte(swp_entry); 1566804dd150SAndrea Arcangeli if (soft_dirty) 1567804dd150SAndrea Arcangeli entry = pte_swp_mksoft_dirty(entry); 1568ba988280SKirill A. Shutemov } else { 1569eef1b3baSKirill A. Shutemov entry = mk_pte(page + i, vma->vm_page_prot); 1570b8d3c4c3SMinchan Kim entry = maybe_mkwrite(entry, vma); 1571eef1b3baSKirill A. Shutemov if (!write) 1572eef1b3baSKirill A. Shutemov entry = pte_wrprotect(entry); 1573eef1b3baSKirill A. Shutemov if (!young) 1574eef1b3baSKirill A. Shutemov entry = pte_mkold(entry); 1575804dd150SAndrea Arcangeli if (soft_dirty) 1576804dd150SAndrea Arcangeli entry = pte_mksoft_dirty(entry); 1577ba988280SKirill A. Shutemov } 1578b8d3c4c3SMinchan Kim if (dirty) 1579b8d3c4c3SMinchan Kim SetPageDirty(page + i); 15802ac015e2SKirill A. Shutemov pte = pte_offset_map(&_pmd, addr); 1581eef1b3baSKirill A. Shutemov BUG_ON(!pte_none(*pte)); 15822ac015e2SKirill A. Shutemov set_pte_at(mm, addr, pte, entry); 1583eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 1584eef1b3baSKirill A. Shutemov pte_unmap(pte); 1585eef1b3baSKirill A. Shutemov } 1586eef1b3baSKirill A. Shutemov 1587eef1b3baSKirill A. Shutemov /* 1588eef1b3baSKirill A. Shutemov * Set PG_double_map before dropping compound_mapcount to avoid 1589eef1b3baSKirill A. Shutemov * false-negative page_mapped(). 1590eef1b3baSKirill A. Shutemov */ 1591eef1b3baSKirill A. Shutemov if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 1592eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1593eef1b3baSKirill A. Shutemov atomic_inc(&page[i]._mapcount); 1594eef1b3baSKirill A. Shutemov } 1595eef1b3baSKirill A. Shutemov 1596eef1b3baSKirill A. Shutemov if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 1597eef1b3baSKirill A. Shutemov /* Last compound_mapcount is gone. */ 159811fb9989SMel Gorman __dec_node_page_state(page, NR_ANON_THPS); 1599eef1b3baSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 1600eef1b3baSKirill A. Shutemov /* No need in mapcount reference anymore */ 1601eef1b3baSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1602eef1b3baSKirill A. Shutemov atomic_dec(&page[i]._mapcount); 1603eef1b3baSKirill A. Shutemov } 1604eef1b3baSKirill A. Shutemov } 1605eef1b3baSKirill A. Shutemov 1606eef1b3baSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 1607e9b61f19SKirill A. Shutemov /* 1608e9b61f19SKirill A. Shutemov * Up to this point the pmd is present and huge and userland has the 1609e9b61f19SKirill A. Shutemov * whole access to the hugepage during the split (which happens in 1610e9b61f19SKirill A. Shutemov * place). If we overwrite the pmd with the not-huge version pointing 1611e9b61f19SKirill A. Shutemov * to the pte here (which of course we could if all CPUs were bug 1612e9b61f19SKirill A. Shutemov * free), userland could trigger a small page size TLB miss on the 1613e9b61f19SKirill A. Shutemov * small sized TLB while the hugepage TLB entry is still established in 1614e9b61f19SKirill A. Shutemov * the huge TLB. Some CPU doesn't like that. 1615e9b61f19SKirill A. Shutemov * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 1616e9b61f19SKirill A. Shutemov * 383 on page 93. Intel should be safe but is also warns that it's 1617e9b61f19SKirill A. Shutemov * only safe if the permission and cache attributes of the two entries 1618e9b61f19SKirill A. Shutemov * loaded in the two TLB is identical (which should be the case here). 1619e9b61f19SKirill A. Shutemov * But it is generally safer to never allow small and huge TLB entries 1620e9b61f19SKirill A. Shutemov * for the same virtual address to be loaded simultaneously. So instead 1621e9b61f19SKirill A. Shutemov * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 1622e9b61f19SKirill A. Shutemov * current pmd notpresent (atomically because here the pmd_trans_huge 1623e9b61f19SKirill A. Shutemov * and pmd_trans_splitting must remain set at all times on the pmd 1624e9b61f19SKirill A. Shutemov * until the split is complete for this pmd), then we flush the SMP TLB 1625e9b61f19SKirill A. Shutemov * and finally we write the non-huge version of the pmd entry with 1626e9b61f19SKirill A. Shutemov * pmd_populate. 1627e9b61f19SKirill A. Shutemov */ 1628e9b61f19SKirill A. Shutemov pmdp_invalidate(vma, haddr, pmd); 1629eef1b3baSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 1630e9b61f19SKirill A. Shutemov 1631e9b61f19SKirill A. Shutemov if (freeze) { 16322ac015e2SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 1633e9b61f19SKirill A. Shutemov page_remove_rmap(page + i, false); 1634e9b61f19SKirill A. Shutemov put_page(page + i); 1635e9b61f19SKirill A. Shutemov } 1636e9b61f19SKirill A. Shutemov } 1637eef1b3baSKirill A. Shutemov } 1638eef1b3baSKirill A. Shutemov 1639eef1b3baSKirill A. Shutemov void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 164033f4751eSNaoya Horiguchi unsigned long address, bool freeze, struct page *page) 1641eef1b3baSKirill A. Shutemov { 1642eef1b3baSKirill A. Shutemov spinlock_t *ptl; 1643eef1b3baSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 1644eef1b3baSKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 1645eef1b3baSKirill A. Shutemov 1646eef1b3baSKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); 1647eef1b3baSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 164833f4751eSNaoya Horiguchi 164933f4751eSNaoya Horiguchi /* 165033f4751eSNaoya Horiguchi * If caller asks to setup a migration entries, we need a page to check 165133f4751eSNaoya Horiguchi * pmd against. Otherwise we can end up replacing wrong page. 165233f4751eSNaoya Horiguchi */ 165333f4751eSNaoya Horiguchi VM_BUG_ON(freeze && !page); 165433f4751eSNaoya Horiguchi if (page && page != pmd_page(*pmd)) 165533f4751eSNaoya Horiguchi goto out; 165633f4751eSNaoya Horiguchi 16575c7fb56eSDan Williams if (pmd_trans_huge(*pmd)) { 165833f4751eSNaoya Horiguchi page = pmd_page(*pmd); 1659e90309c9SKirill A. Shutemov if (PageMlocked(page)) 16605f737714SKirill A. Shutemov clear_page_mlock(page); 16615c7fb56eSDan Williams } else if (!pmd_devmap(*pmd)) 16625c7fb56eSDan Williams goto out; 1663fec89c10SKirill A. Shutemov __split_huge_pmd_locked(vma, pmd, haddr, freeze); 1664e90309c9SKirill A. Shutemov out: 1665eef1b3baSKirill A. Shutemov spin_unlock(ptl); 1666eef1b3baSKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); 1667eef1b3baSKirill A. Shutemov } 1668eef1b3baSKirill A. Shutemov 1669fec89c10SKirill A. Shutemov void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 1670fec89c10SKirill A. Shutemov bool freeze, struct page *page) 167194fcc585SAndrea Arcangeli { 1672f72e7dcdSHugh Dickins pgd_t *pgd; 1673f72e7dcdSHugh Dickins pud_t *pud; 167494fcc585SAndrea Arcangeli pmd_t *pmd; 167594fcc585SAndrea Arcangeli 167678ddc534SKirill A. Shutemov pgd = pgd_offset(vma->vm_mm, address); 1677f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 1678f72e7dcdSHugh Dickins return; 1679f72e7dcdSHugh Dickins 1680f72e7dcdSHugh Dickins pud = pud_offset(pgd, address); 1681f72e7dcdSHugh Dickins if (!pud_present(*pud)) 1682f72e7dcdSHugh Dickins return; 1683f72e7dcdSHugh Dickins 1684f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 1685fec89c10SKirill A. Shutemov 168633f4751eSNaoya Horiguchi __split_huge_pmd(vma, pmd, address, freeze, page); 168794fcc585SAndrea Arcangeli } 168894fcc585SAndrea Arcangeli 1689e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 169094fcc585SAndrea Arcangeli unsigned long start, 169194fcc585SAndrea Arcangeli unsigned long end, 169294fcc585SAndrea Arcangeli long adjust_next) 169394fcc585SAndrea Arcangeli { 169494fcc585SAndrea Arcangeli /* 169594fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 169694fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 169794fcc585SAndrea Arcangeli * an huge pmd. 169894fcc585SAndrea Arcangeli */ 169994fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 170094fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 170194fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 1702fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, start, false, NULL); 170394fcc585SAndrea Arcangeli 170494fcc585SAndrea Arcangeli /* 170594fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 170694fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 170794fcc585SAndrea Arcangeli * an huge pmd. 170894fcc585SAndrea Arcangeli */ 170994fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 171094fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 171194fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 1712fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, end, false, NULL); 171394fcc585SAndrea Arcangeli 171494fcc585SAndrea Arcangeli /* 171594fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 171694fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 171794fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 171894fcc585SAndrea Arcangeli */ 171994fcc585SAndrea Arcangeli if (adjust_next > 0) { 172094fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 172194fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 172294fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 172394fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 172494fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 172594fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 1726fec89c10SKirill A. Shutemov split_huge_pmd_address(next, nstart, false, NULL); 172794fcc585SAndrea Arcangeli } 172894fcc585SAndrea Arcangeli } 1729e9b61f19SKirill A. Shutemov 1730fec89c10SKirill A. Shutemov static void freeze_page(struct page *page) 1731e9b61f19SKirill A. Shutemov { 1732baa355fdSKirill A. Shutemov enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 1733baa355fdSKirill A. Shutemov TTU_RMAP_LOCKED; 1734fec89c10SKirill A. Shutemov int i, ret; 1735e9b61f19SKirill A. Shutemov 1736e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageHead(page), page); 1737e9b61f19SKirill A. Shutemov 1738baa355fdSKirill A. Shutemov if (PageAnon(page)) 1739baa355fdSKirill A. Shutemov ttu_flags |= TTU_MIGRATION; 1740baa355fdSKirill A. Shutemov 1741fec89c10SKirill A. Shutemov /* We only need TTU_SPLIT_HUGE_PMD once */ 1742fec89c10SKirill A. Shutemov ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); 1743fec89c10SKirill A. Shutemov for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { 1744fec89c10SKirill A. Shutemov /* Cut short if the page is unmapped */ 1745fec89c10SKirill A. Shutemov if (page_count(page) == 1) 1746e9b61f19SKirill A. Shutemov return; 1747bd56086fSKirill A. Shutemov 1748fec89c10SKirill A. Shutemov ret = try_to_unmap(page + i, ttu_flags); 1749fec89c10SKirill A. Shutemov } 1750baa355fdSKirill A. Shutemov VM_BUG_ON_PAGE(ret, page + i - 1); 1751bd56086fSKirill A. Shutemov } 1752bd56086fSKirill A. Shutemov 1753fec89c10SKirill A. Shutemov static void unfreeze_page(struct page *page) 1754e9b61f19SKirill A. Shutemov { 1755fec89c10SKirill A. Shutemov int i; 1756e9b61f19SKirill A. Shutemov 1757fec89c10SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1758fec89c10SKirill A. Shutemov remove_migration_ptes(page + i, page + i, true); 1759e9b61f19SKirill A. Shutemov } 1760e9b61f19SKirill A. Shutemov 17618df651c7SKirill A. Shutemov static void __split_huge_page_tail(struct page *head, int tail, 1762e9b61f19SKirill A. Shutemov struct lruvec *lruvec, struct list_head *list) 1763e9b61f19SKirill A. Shutemov { 1764e9b61f19SKirill A. Shutemov struct page *page_tail = head + tail; 1765e9b61f19SKirill A. Shutemov 17668df651c7SKirill A. Shutemov VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 1767fe896d18SJoonsoo Kim VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail); 1768e9b61f19SKirill A. Shutemov 1769e9b61f19SKirill A. Shutemov /* 17700139aa7bSJoonsoo Kim * tail_page->_refcount is zero and not changing from under us. But 1771e9b61f19SKirill A. Shutemov * get_page_unless_zero() may be running from under us on the 1772baa355fdSKirill A. Shutemov * tail_page. If we used atomic_set() below instead of atomic_inc() or 1773baa355fdSKirill A. Shutemov * atomic_add(), we would then run atomic_set() concurrently with 1774e9b61f19SKirill A. Shutemov * get_page_unless_zero(), and atomic_set() is implemented in C not 1775e9b61f19SKirill A. Shutemov * using locked ops. spin_unlock on x86 sometime uses locked ops 1776e9b61f19SKirill A. Shutemov * because of PPro errata 66, 92, so unless somebody can guarantee 1777e9b61f19SKirill A. Shutemov * atomic_set() here would be safe on all archs (and not only on x86), 1778baa355fdSKirill A. Shutemov * it's safer to use atomic_inc()/atomic_add(). 1779e9b61f19SKirill A. Shutemov */ 1780baa355fdSKirill A. Shutemov if (PageAnon(head)) { 1781fe896d18SJoonsoo Kim page_ref_inc(page_tail); 1782baa355fdSKirill A. Shutemov } else { 1783baa355fdSKirill A. Shutemov /* Additional pin to radix tree */ 1784baa355fdSKirill A. Shutemov page_ref_add(page_tail, 2); 1785baa355fdSKirill A. Shutemov } 1786e9b61f19SKirill A. Shutemov 1787e9b61f19SKirill A. Shutemov page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1788e9b61f19SKirill A. Shutemov page_tail->flags |= (head->flags & 1789e9b61f19SKirill A. Shutemov ((1L << PG_referenced) | 1790e9b61f19SKirill A. Shutemov (1L << PG_swapbacked) | 1791e9b61f19SKirill A. Shutemov (1L << PG_mlocked) | 1792e9b61f19SKirill A. Shutemov (1L << PG_uptodate) | 1793e9b61f19SKirill A. Shutemov (1L << PG_active) | 1794e9b61f19SKirill A. Shutemov (1L << PG_locked) | 1795b8d3c4c3SMinchan Kim (1L << PG_unevictable) | 1796b8d3c4c3SMinchan Kim (1L << PG_dirty))); 1797e9b61f19SKirill A. Shutemov 1798e9b61f19SKirill A. Shutemov /* 1799e9b61f19SKirill A. Shutemov * After clearing PageTail the gup refcount can be released. 1800e9b61f19SKirill A. Shutemov * Page flags also must be visible before we make the page non-compound. 1801e9b61f19SKirill A. Shutemov */ 1802e9b61f19SKirill A. Shutemov smp_wmb(); 1803e9b61f19SKirill A. Shutemov 1804e9b61f19SKirill A. Shutemov clear_compound_head(page_tail); 1805e9b61f19SKirill A. Shutemov 1806e9b61f19SKirill A. Shutemov if (page_is_young(head)) 1807e9b61f19SKirill A. Shutemov set_page_young(page_tail); 1808e9b61f19SKirill A. Shutemov if (page_is_idle(head)) 1809e9b61f19SKirill A. Shutemov set_page_idle(page_tail); 1810e9b61f19SKirill A. Shutemov 1811e9b61f19SKirill A. Shutemov /* ->mapping in first tail page is compound_mapcount */ 18129a982250SKirill A. Shutemov VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 1813e9b61f19SKirill A. Shutemov page_tail); 1814e9b61f19SKirill A. Shutemov page_tail->mapping = head->mapping; 1815e9b61f19SKirill A. Shutemov 1816e9b61f19SKirill A. Shutemov page_tail->index = head->index + tail; 1817e9b61f19SKirill A. Shutemov page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 1818e9b61f19SKirill A. Shutemov lru_add_page_tail(head, page_tail, lruvec, list); 1819e9b61f19SKirill A. Shutemov } 1820e9b61f19SKirill A. Shutemov 1821baa355fdSKirill A. Shutemov static void __split_huge_page(struct page *page, struct list_head *list, 1822baa355fdSKirill A. Shutemov unsigned long flags) 1823e9b61f19SKirill A. Shutemov { 1824e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 1825e9b61f19SKirill A. Shutemov struct zone *zone = page_zone(head); 1826e9b61f19SKirill A. Shutemov struct lruvec *lruvec; 1827baa355fdSKirill A. Shutemov pgoff_t end = -1; 18288df651c7SKirill A. Shutemov int i; 1829e9b61f19SKirill A. Shutemov 1830599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 1831e9b61f19SKirill A. Shutemov 1832e9b61f19SKirill A. Shutemov /* complete memcg works before add pages to LRU */ 1833e9b61f19SKirill A. Shutemov mem_cgroup_split_huge_fixup(head); 1834e9b61f19SKirill A. Shutemov 1835baa355fdSKirill A. Shutemov if (!PageAnon(page)) 1836baa355fdSKirill A. Shutemov end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); 1837baa355fdSKirill A. Shutemov 1838baa355fdSKirill A. Shutemov for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 18398df651c7SKirill A. Shutemov __split_huge_page_tail(head, i, lruvec, list); 1840baa355fdSKirill A. Shutemov /* Some pages can be beyond i_size: drop them from page cache */ 1841baa355fdSKirill A. Shutemov if (head[i].index >= end) { 1842baa355fdSKirill A. Shutemov __ClearPageDirty(head + i); 1843baa355fdSKirill A. Shutemov __delete_from_page_cache(head + i, NULL); 1844800d8c63SKirill A. Shutemov if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 1845800d8c63SKirill A. Shutemov shmem_uncharge(head->mapping->host, 1); 1846baa355fdSKirill A. Shutemov put_page(head + i); 1847baa355fdSKirill A. Shutemov } 1848baa355fdSKirill A. Shutemov } 1849e9b61f19SKirill A. Shutemov 1850e9b61f19SKirill A. Shutemov ClearPageCompound(head); 1851baa355fdSKirill A. Shutemov /* See comment in __split_huge_page_tail() */ 1852baa355fdSKirill A. Shutemov if (PageAnon(head)) { 1853baa355fdSKirill A. Shutemov page_ref_inc(head); 1854baa355fdSKirill A. Shutemov } else { 1855baa355fdSKirill A. Shutemov /* Additional pin to radix tree */ 1856baa355fdSKirill A. Shutemov page_ref_add(head, 2); 1857baa355fdSKirill A. Shutemov spin_unlock(&head->mapping->tree_lock); 1858baa355fdSKirill A. Shutemov } 1859baa355fdSKirill A. Shutemov 1860a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 1861e9b61f19SKirill A. Shutemov 1862fec89c10SKirill A. Shutemov unfreeze_page(head); 1863e9b61f19SKirill A. Shutemov 1864e9b61f19SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 1865e9b61f19SKirill A. Shutemov struct page *subpage = head + i; 1866e9b61f19SKirill A. Shutemov if (subpage == page) 1867e9b61f19SKirill A. Shutemov continue; 1868e9b61f19SKirill A. Shutemov unlock_page(subpage); 1869e9b61f19SKirill A. Shutemov 1870e9b61f19SKirill A. Shutemov /* 1871e9b61f19SKirill A. Shutemov * Subpages may be freed if there wasn't any mapping 1872e9b61f19SKirill A. Shutemov * like if add_to_swap() is running on a lru page that 1873e9b61f19SKirill A. Shutemov * had its mapping zapped. And freeing these pages 1874e9b61f19SKirill A. Shutemov * requires taking the lru_lock so we do the put_page 1875e9b61f19SKirill A. Shutemov * of the tail pages after the split is complete. 1876e9b61f19SKirill A. Shutemov */ 1877e9b61f19SKirill A. Shutemov put_page(subpage); 1878e9b61f19SKirill A. Shutemov } 1879e9b61f19SKirill A. Shutemov } 1880e9b61f19SKirill A. Shutemov 1881b20ce5e0SKirill A. Shutemov int total_mapcount(struct page *page) 1882b20ce5e0SKirill A. Shutemov { 1883dd78feddSKirill A. Shutemov int i, compound, ret; 1884b20ce5e0SKirill A. Shutemov 1885b20ce5e0SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 1886b20ce5e0SKirill A. Shutemov 1887b20ce5e0SKirill A. Shutemov if (likely(!PageCompound(page))) 1888b20ce5e0SKirill A. Shutemov return atomic_read(&page->_mapcount) + 1; 1889b20ce5e0SKirill A. Shutemov 1890dd78feddSKirill A. Shutemov compound = compound_mapcount(page); 1891b20ce5e0SKirill A. Shutemov if (PageHuge(page)) 1892dd78feddSKirill A. Shutemov return compound; 1893dd78feddSKirill A. Shutemov ret = compound; 1894b20ce5e0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) 1895b20ce5e0SKirill A. Shutemov ret += atomic_read(&page[i]._mapcount) + 1; 1896dd78feddSKirill A. Shutemov /* File pages has compound_mapcount included in _mapcount */ 1897dd78feddSKirill A. Shutemov if (!PageAnon(page)) 1898dd78feddSKirill A. Shutemov return ret - compound * HPAGE_PMD_NR; 1899b20ce5e0SKirill A. Shutemov if (PageDoubleMap(page)) 1900b20ce5e0SKirill A. Shutemov ret -= HPAGE_PMD_NR; 1901b20ce5e0SKirill A. Shutemov return ret; 1902b20ce5e0SKirill A. Shutemov } 1903b20ce5e0SKirill A. Shutemov 1904e9b61f19SKirill A. Shutemov /* 19056d0a07edSAndrea Arcangeli * This calculates accurately how many mappings a transparent hugepage 19066d0a07edSAndrea Arcangeli * has (unlike page_mapcount() which isn't fully accurate). This full 19076d0a07edSAndrea Arcangeli * accuracy is primarily needed to know if copy-on-write faults can 19086d0a07edSAndrea Arcangeli * reuse the page and change the mapping to read-write instead of 19096d0a07edSAndrea Arcangeli * copying them. At the same time this returns the total_mapcount too. 19106d0a07edSAndrea Arcangeli * 19116d0a07edSAndrea Arcangeli * The function returns the highest mapcount any one of the subpages 19126d0a07edSAndrea Arcangeli * has. If the return value is one, even if different processes are 19136d0a07edSAndrea Arcangeli * mapping different subpages of the transparent hugepage, they can 19146d0a07edSAndrea Arcangeli * all reuse it, because each process is reusing a different subpage. 19156d0a07edSAndrea Arcangeli * 19166d0a07edSAndrea Arcangeli * The total_mapcount is instead counting all virtual mappings of the 19176d0a07edSAndrea Arcangeli * subpages. If the total_mapcount is equal to "one", it tells the 19186d0a07edSAndrea Arcangeli * caller all mappings belong to the same "mm" and in turn the 19196d0a07edSAndrea Arcangeli * anon_vma of the transparent hugepage can become the vma->anon_vma 19206d0a07edSAndrea Arcangeli * local one as no other process may be mapping any of the subpages. 19216d0a07edSAndrea Arcangeli * 19226d0a07edSAndrea Arcangeli * It would be more accurate to replace page_mapcount() with 19236d0a07edSAndrea Arcangeli * page_trans_huge_mapcount(), however we only use 19246d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() in the copy-on-write faults where we 19256d0a07edSAndrea Arcangeli * need full accuracy to avoid breaking page pinning, because 19266d0a07edSAndrea Arcangeli * page_trans_huge_mapcount() is slower than page_mapcount(). 19276d0a07edSAndrea Arcangeli */ 19286d0a07edSAndrea Arcangeli int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 19296d0a07edSAndrea Arcangeli { 19306d0a07edSAndrea Arcangeli int i, ret, _total_mapcount, mapcount; 19316d0a07edSAndrea Arcangeli 19326d0a07edSAndrea Arcangeli /* hugetlbfs shouldn't call it */ 19336d0a07edSAndrea Arcangeli VM_BUG_ON_PAGE(PageHuge(page), page); 19346d0a07edSAndrea Arcangeli 19356d0a07edSAndrea Arcangeli if (likely(!PageTransCompound(page))) { 19366d0a07edSAndrea Arcangeli mapcount = atomic_read(&page->_mapcount) + 1; 19376d0a07edSAndrea Arcangeli if (total_mapcount) 19386d0a07edSAndrea Arcangeli *total_mapcount = mapcount; 19396d0a07edSAndrea Arcangeli return mapcount; 19406d0a07edSAndrea Arcangeli } 19416d0a07edSAndrea Arcangeli 19426d0a07edSAndrea Arcangeli page = compound_head(page); 19436d0a07edSAndrea Arcangeli 19446d0a07edSAndrea Arcangeli _total_mapcount = ret = 0; 19456d0a07edSAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 19466d0a07edSAndrea Arcangeli mapcount = atomic_read(&page[i]._mapcount) + 1; 19476d0a07edSAndrea Arcangeli ret = max(ret, mapcount); 19486d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 19496d0a07edSAndrea Arcangeli } 19506d0a07edSAndrea Arcangeli if (PageDoubleMap(page)) { 19516d0a07edSAndrea Arcangeli ret -= 1; 19526d0a07edSAndrea Arcangeli _total_mapcount -= HPAGE_PMD_NR; 19536d0a07edSAndrea Arcangeli } 19546d0a07edSAndrea Arcangeli mapcount = compound_mapcount(page); 19556d0a07edSAndrea Arcangeli ret += mapcount; 19566d0a07edSAndrea Arcangeli _total_mapcount += mapcount; 19576d0a07edSAndrea Arcangeli if (total_mapcount) 19586d0a07edSAndrea Arcangeli *total_mapcount = _total_mapcount; 19596d0a07edSAndrea Arcangeli return ret; 19606d0a07edSAndrea Arcangeli } 19616d0a07edSAndrea Arcangeli 19626d0a07edSAndrea Arcangeli /* 1963e9b61f19SKirill A. Shutemov * This function splits huge page into normal pages. @page can point to any 1964e9b61f19SKirill A. Shutemov * subpage of huge page to split. Split doesn't change the position of @page. 1965e9b61f19SKirill A. Shutemov * 1966e9b61f19SKirill A. Shutemov * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 1967e9b61f19SKirill A. Shutemov * The huge page must be locked. 1968e9b61f19SKirill A. Shutemov * 1969e9b61f19SKirill A. Shutemov * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 1970e9b61f19SKirill A. Shutemov * 1971e9b61f19SKirill A. Shutemov * Both head page and tail pages will inherit mapping, flags, and so on from 1972e9b61f19SKirill A. Shutemov * the hugepage. 1973e9b61f19SKirill A. Shutemov * 1974e9b61f19SKirill A. Shutemov * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 1975e9b61f19SKirill A. Shutemov * they are not mapped. 1976e9b61f19SKirill A. Shutemov * 1977e9b61f19SKirill A. Shutemov * Returns 0 if the hugepage is split successfully. 1978e9b61f19SKirill A. Shutemov * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 1979e9b61f19SKirill A. Shutemov * us. 1980e9b61f19SKirill A. Shutemov */ 1981e9b61f19SKirill A. Shutemov int split_huge_page_to_list(struct page *page, struct list_head *list) 1982e9b61f19SKirill A. Shutemov { 1983e9b61f19SKirill A. Shutemov struct page *head = compound_head(page); 1984a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 1985baa355fdSKirill A. Shutemov struct anon_vma *anon_vma = NULL; 1986baa355fdSKirill A. Shutemov struct address_space *mapping = NULL; 1987baa355fdSKirill A. Shutemov int count, mapcount, extra_pins, ret; 1988d9654322SKirill A. Shutemov bool mlocked; 19890b9b6fffSKirill A. Shutemov unsigned long flags; 1990e9b61f19SKirill A. Shutemov 1991e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 1992e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 1993e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1994e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageCompound(page), page); 1995e9b61f19SKirill A. Shutemov 1996baa355fdSKirill A. Shutemov if (PageAnon(head)) { 1997e9b61f19SKirill A. Shutemov /* 1998baa355fdSKirill A. Shutemov * The caller does not necessarily hold an mmap_sem that would 1999baa355fdSKirill A. Shutemov * prevent the anon_vma disappearing so we first we take a 2000baa355fdSKirill A. Shutemov * reference to it and then lock the anon_vma for write. This 2001baa355fdSKirill A. Shutemov * is similar to page_lock_anon_vma_read except the write lock 2002baa355fdSKirill A. Shutemov * is taken to serialise against parallel split or collapse 2003baa355fdSKirill A. Shutemov * operations. 2004e9b61f19SKirill A. Shutemov */ 2005e9b61f19SKirill A. Shutemov anon_vma = page_get_anon_vma(head); 2006e9b61f19SKirill A. Shutemov if (!anon_vma) { 2007e9b61f19SKirill A. Shutemov ret = -EBUSY; 2008e9b61f19SKirill A. Shutemov goto out; 2009e9b61f19SKirill A. Shutemov } 2010baa355fdSKirill A. Shutemov extra_pins = 0; 2011baa355fdSKirill A. Shutemov mapping = NULL; 2012e9b61f19SKirill A. Shutemov anon_vma_lock_write(anon_vma); 2013baa355fdSKirill A. Shutemov } else { 2014baa355fdSKirill A. Shutemov mapping = head->mapping; 2015baa355fdSKirill A. Shutemov 2016baa355fdSKirill A. Shutemov /* Truncated ? */ 2017baa355fdSKirill A. Shutemov if (!mapping) { 2018baa355fdSKirill A. Shutemov ret = -EBUSY; 2019baa355fdSKirill A. Shutemov goto out; 2020baa355fdSKirill A. Shutemov } 2021baa355fdSKirill A. Shutemov 2022baa355fdSKirill A. Shutemov /* Addidional pins from radix tree */ 2023baa355fdSKirill A. Shutemov extra_pins = HPAGE_PMD_NR; 2024baa355fdSKirill A. Shutemov anon_vma = NULL; 2025baa355fdSKirill A. Shutemov i_mmap_lock_read(mapping); 2026baa355fdSKirill A. Shutemov } 2027e9b61f19SKirill A. Shutemov 2028e9b61f19SKirill A. Shutemov /* 2029e9b61f19SKirill A. Shutemov * Racy check if we can split the page, before freeze_page() will 2030e9b61f19SKirill A. Shutemov * split PMDs 2031e9b61f19SKirill A. Shutemov */ 2032baa355fdSKirill A. Shutemov if (total_mapcount(head) != page_count(head) - extra_pins - 1) { 2033e9b61f19SKirill A. Shutemov ret = -EBUSY; 2034e9b61f19SKirill A. Shutemov goto out_unlock; 2035e9b61f19SKirill A. Shutemov } 2036e9b61f19SKirill A. Shutemov 2037d9654322SKirill A. Shutemov mlocked = PageMlocked(page); 2038fec89c10SKirill A. Shutemov freeze_page(head); 2039e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(compound_mapcount(head), head); 2040e9b61f19SKirill A. Shutemov 2041d9654322SKirill A. Shutemov /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2042d9654322SKirill A. Shutemov if (mlocked) 2043d9654322SKirill A. Shutemov lru_add_drain(); 2044d9654322SKirill A. Shutemov 2045baa355fdSKirill A. Shutemov /* prevent PageLRU to go away from under us, and freeze lru stats */ 2046a52633d8SMel Gorman spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2047baa355fdSKirill A. Shutemov 2048baa355fdSKirill A. Shutemov if (mapping) { 2049baa355fdSKirill A. Shutemov void **pslot; 2050baa355fdSKirill A. Shutemov 2051baa355fdSKirill A. Shutemov spin_lock(&mapping->tree_lock); 2052baa355fdSKirill A. Shutemov pslot = radix_tree_lookup_slot(&mapping->page_tree, 2053baa355fdSKirill A. Shutemov page_index(head)); 2054baa355fdSKirill A. Shutemov /* 2055baa355fdSKirill A. Shutemov * Check if the head page is present in radix tree. 2056baa355fdSKirill A. Shutemov * We assume all tail are present too, if head is there. 2057baa355fdSKirill A. Shutemov */ 2058baa355fdSKirill A. Shutemov if (radix_tree_deref_slot_protected(pslot, 2059baa355fdSKirill A. Shutemov &mapping->tree_lock) != head) 2060baa355fdSKirill A. Shutemov goto fail; 2061baa355fdSKirill A. Shutemov } 2062baa355fdSKirill A. Shutemov 20630139aa7bSJoonsoo Kim /* Prevent deferred_split_scan() touching ->_refcount */ 2064baa355fdSKirill A. Shutemov spin_lock(&pgdata->split_queue_lock); 2065e9b61f19SKirill A. Shutemov count = page_count(head); 2066e9b61f19SKirill A. Shutemov mapcount = total_mapcount(head); 2067baa355fdSKirill A. Shutemov if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 20689a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(head))) { 2069a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 20709a982250SKirill A. Shutemov list_del(page_deferred_list(head)); 20719a982250SKirill A. Shutemov } 207265c45377SKirill A. Shutemov if (mapping) 207311fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM_THPS); 2074baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2075baa355fdSKirill A. Shutemov __split_huge_page(page, list, flags); 2076e9b61f19SKirill A. Shutemov ret = 0; 2077baa355fdSKirill A. Shutemov } else { 2078baa355fdSKirill A. Shutemov if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2079e9b61f19SKirill A. Shutemov pr_alert("total_mapcount: %u, page_count(): %u\n", 2080e9b61f19SKirill A. Shutemov mapcount, count); 2081e9b61f19SKirill A. Shutemov if (PageTail(page)) 2082e9b61f19SKirill A. Shutemov dump_page(head, NULL); 2083bd56086fSKirill A. Shutemov dump_page(page, "total_mapcount(head) > 0"); 2084e9b61f19SKirill A. Shutemov BUG(); 2085baa355fdSKirill A. Shutemov } 2086baa355fdSKirill A. Shutemov spin_unlock(&pgdata->split_queue_lock); 2087baa355fdSKirill A. Shutemov fail: if (mapping) 2088baa355fdSKirill A. Shutemov spin_unlock(&mapping->tree_lock); 2089a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2090fec89c10SKirill A. Shutemov unfreeze_page(head); 2091e9b61f19SKirill A. Shutemov ret = -EBUSY; 2092e9b61f19SKirill A. Shutemov } 2093e9b61f19SKirill A. Shutemov 2094e9b61f19SKirill A. Shutemov out_unlock: 2095baa355fdSKirill A. Shutemov if (anon_vma) { 2096e9b61f19SKirill A. Shutemov anon_vma_unlock_write(anon_vma); 2097e9b61f19SKirill A. Shutemov put_anon_vma(anon_vma); 2098baa355fdSKirill A. Shutemov } 2099baa355fdSKirill A. Shutemov if (mapping) 2100baa355fdSKirill A. Shutemov i_mmap_unlock_read(mapping); 2101e9b61f19SKirill A. Shutemov out: 2102e9b61f19SKirill A. Shutemov count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2103e9b61f19SKirill A. Shutemov return ret; 2104e9b61f19SKirill A. Shutemov } 21059a982250SKirill A. Shutemov 21069a982250SKirill A. Shutemov void free_transhuge_page(struct page *page) 21079a982250SKirill A. Shutemov { 2108a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 21099a982250SKirill A. Shutemov unsigned long flags; 21109a982250SKirill A. Shutemov 2111a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 21129a982250SKirill A. Shutemov if (!list_empty(page_deferred_list(page))) { 2113a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 21149a982250SKirill A. Shutemov list_del(page_deferred_list(page)); 21159a982250SKirill A. Shutemov } 2116a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 21179a982250SKirill A. Shutemov free_compound_page(page); 21189a982250SKirill A. Shutemov } 21199a982250SKirill A. Shutemov 21209a982250SKirill A. Shutemov void deferred_split_huge_page(struct page *page) 21219a982250SKirill A. Shutemov { 2122a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 21239a982250SKirill A. Shutemov unsigned long flags; 21249a982250SKirill A. Shutemov 21259a982250SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 21269a982250SKirill A. Shutemov 2127a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 21289a982250SKirill A. Shutemov if (list_empty(page_deferred_list(page))) { 2129f9719a03SKirill A. Shutemov count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2130a3d0a918SKirill A. Shutemov list_add_tail(page_deferred_list(page), &pgdata->split_queue); 2131a3d0a918SKirill A. Shutemov pgdata->split_queue_len++; 21329a982250SKirill A. Shutemov } 2133a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 21349a982250SKirill A. Shutemov } 21359a982250SKirill A. Shutemov 21369a982250SKirill A. Shutemov static unsigned long deferred_split_count(struct shrinker *shrink, 21379a982250SKirill A. Shutemov struct shrink_control *sc) 21389a982250SKirill A. Shutemov { 2139a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 2140cb8d68ecSKirill A. Shutemov return ACCESS_ONCE(pgdata->split_queue_len); 21419a982250SKirill A. Shutemov } 21429a982250SKirill A. Shutemov 21439a982250SKirill A. Shutemov static unsigned long deferred_split_scan(struct shrinker *shrink, 21449a982250SKirill A. Shutemov struct shrink_control *sc) 21459a982250SKirill A. Shutemov { 2146a3d0a918SKirill A. Shutemov struct pglist_data *pgdata = NODE_DATA(sc->nid); 21479a982250SKirill A. Shutemov unsigned long flags; 21489a982250SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 21499a982250SKirill A. Shutemov struct page *page; 21509a982250SKirill A. Shutemov int split = 0; 21519a982250SKirill A. Shutemov 2152a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 21539a982250SKirill A. Shutemov /* Take pin on all head pages to avoid freeing them under us */ 2154ae026204SKirill A. Shutemov list_for_each_safe(pos, next, &pgdata->split_queue) { 21559a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 21569a982250SKirill A. Shutemov page = compound_head(page); 2157e3ae1953SKirill A. Shutemov if (get_page_unless_zero(page)) { 2158e3ae1953SKirill A. Shutemov list_move(page_deferred_list(page), &list); 2159e3ae1953SKirill A. Shutemov } else { 2160e3ae1953SKirill A. Shutemov /* We lost race with put_compound_page() */ 21619a982250SKirill A. Shutemov list_del_init(page_deferred_list(page)); 2162a3d0a918SKirill A. Shutemov pgdata->split_queue_len--; 21639a982250SKirill A. Shutemov } 2164e3ae1953SKirill A. Shutemov if (!--sc->nr_to_scan) 2165e3ae1953SKirill A. Shutemov break; 21669a982250SKirill A. Shutemov } 2167a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 21689a982250SKirill A. Shutemov 21699a982250SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 21709a982250SKirill A. Shutemov page = list_entry((void *)pos, struct page, mapping); 21719a982250SKirill A. Shutemov lock_page(page); 21729a982250SKirill A. Shutemov /* split_huge_page() removes page from list on success */ 21739a982250SKirill A. Shutemov if (!split_huge_page(page)) 21749a982250SKirill A. Shutemov split++; 21759a982250SKirill A. Shutemov unlock_page(page); 21769a982250SKirill A. Shutemov put_page(page); 21779a982250SKirill A. Shutemov } 21789a982250SKirill A. Shutemov 2179a3d0a918SKirill A. Shutemov spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2180a3d0a918SKirill A. Shutemov list_splice_tail(&list, &pgdata->split_queue); 2181a3d0a918SKirill A. Shutemov spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 21829a982250SKirill A. Shutemov 2183cb8d68ecSKirill A. Shutemov /* 2184cb8d68ecSKirill A. Shutemov * Stop shrinker if we didn't split any page, but the queue is empty. 2185cb8d68ecSKirill A. Shutemov * This can happen if pages were freed under us. 2186cb8d68ecSKirill A. Shutemov */ 2187cb8d68ecSKirill A. Shutemov if (!split && list_empty(&pgdata->split_queue)) 2188cb8d68ecSKirill A. Shutemov return SHRINK_STOP; 2189cb8d68ecSKirill A. Shutemov return split; 21909a982250SKirill A. Shutemov } 21919a982250SKirill A. Shutemov 21929a982250SKirill A. Shutemov static struct shrinker deferred_split_shrinker = { 21939a982250SKirill A. Shutemov .count_objects = deferred_split_count, 21949a982250SKirill A. Shutemov .scan_objects = deferred_split_scan, 21959a982250SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 2196a3d0a918SKirill A. Shutemov .flags = SHRINKER_NUMA_AWARE, 21979a982250SKirill A. Shutemov }; 219849071d43SKirill A. Shutemov 219949071d43SKirill A. Shutemov #ifdef CONFIG_DEBUG_FS 220049071d43SKirill A. Shutemov static int split_huge_pages_set(void *data, u64 val) 220149071d43SKirill A. Shutemov { 220249071d43SKirill A. Shutemov struct zone *zone; 220349071d43SKirill A. Shutemov struct page *page; 220449071d43SKirill A. Shutemov unsigned long pfn, max_zone_pfn; 220549071d43SKirill A. Shutemov unsigned long total = 0, split = 0; 220649071d43SKirill A. Shutemov 220749071d43SKirill A. Shutemov if (val != 1) 220849071d43SKirill A. Shutemov return -EINVAL; 220949071d43SKirill A. Shutemov 221049071d43SKirill A. Shutemov for_each_populated_zone(zone) { 221149071d43SKirill A. Shutemov max_zone_pfn = zone_end_pfn(zone); 221249071d43SKirill A. Shutemov for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 221349071d43SKirill A. Shutemov if (!pfn_valid(pfn)) 221449071d43SKirill A. Shutemov continue; 221549071d43SKirill A. Shutemov 221649071d43SKirill A. Shutemov page = pfn_to_page(pfn); 221749071d43SKirill A. Shutemov if (!get_page_unless_zero(page)) 221849071d43SKirill A. Shutemov continue; 221949071d43SKirill A. Shutemov 222049071d43SKirill A. Shutemov if (zone != page_zone(page)) 222149071d43SKirill A. Shutemov goto next; 222249071d43SKirill A. Shutemov 2223baa355fdSKirill A. Shutemov if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 222449071d43SKirill A. Shutemov goto next; 222549071d43SKirill A. Shutemov 222649071d43SKirill A. Shutemov total++; 222749071d43SKirill A. Shutemov lock_page(page); 222849071d43SKirill A. Shutemov if (!split_huge_page(page)) 222949071d43SKirill A. Shutemov split++; 223049071d43SKirill A. Shutemov unlock_page(page); 223149071d43SKirill A. Shutemov next: 223249071d43SKirill A. Shutemov put_page(page); 223349071d43SKirill A. Shutemov } 223449071d43SKirill A. Shutemov } 223549071d43SKirill A. Shutemov 2236145bdaa1SYang Shi pr_info("%lu of %lu THP split\n", split, total); 223749071d43SKirill A. Shutemov 223849071d43SKirill A. Shutemov return 0; 223949071d43SKirill A. Shutemov } 224049071d43SKirill A. Shutemov DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 224149071d43SKirill A. Shutemov "%llu\n"); 224249071d43SKirill A. Shutemov 224349071d43SKirill A. Shutemov static int __init split_huge_pages_debugfs(void) 224449071d43SKirill A. Shutemov { 224549071d43SKirill A. Shutemov void *ret; 224649071d43SKirill A. Shutemov 2247145bdaa1SYang Shi ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 224849071d43SKirill A. Shutemov &split_huge_pages_fops); 224949071d43SKirill A. Shutemov if (!ret) 225049071d43SKirill A. Shutemov pr_warn("Failed to create split_huge_pages in debugfs"); 225149071d43SKirill A. Shutemov return 0; 225249071d43SKirill A. Shutemov } 225349071d43SKirill A. Shutemov late_initcall(split_huge_pages_debugfs); 225449071d43SKirill A. Shutemov #endif 2255