171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9ae3a8c1cSAndrew Morton 1071e3aac0SAndrea Arcangeli #include <linux/mm.h> 1171e3aac0SAndrea Arcangeli #include <linux/sched.h> 1271e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1571e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1671e3aac0SAndrea Arcangeli #include <linux/swap.h> 1797ae1749SKirill A. Shutemov #include <linux/shrinker.h> 18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 194897c765SMatthew Wilcox #include <linux/dax.h> 20ba76149fSAndrea Arcangeli #include <linux/kthread.h> 21ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 22878aee7dSAndrea Arcangeli #include <linux/freezer.h> 23a664b2d8SAndrea Arcangeli #include <linux/mman.h> 24325adeb5SRalf Baechle #include <linux/pagemap.h> 254daae3b4SMel Gorman #include <linux/migrate.h> 2643b5fbbdSSasha Levin #include <linux/hashtable.h> 276b251fc9SAndrea Arcangeli #include <linux/userfaultfd_k.h> 2897ae1749SKirill A. Shutemov 2971e3aac0SAndrea Arcangeli #include <asm/tlb.h> 3071e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 3171e3aac0SAndrea Arcangeli #include "internal.h" 3271e3aac0SAndrea Arcangeli 33ba76149fSAndrea Arcangeli /* 348bfa3f9aSJianguo Wu * By default transparent hugepage support is disabled in order that avoid 358bfa3f9aSJianguo Wu * to risk increase the memory footprint of applications without a guaranteed 368bfa3f9aSJianguo Wu * benefit. When transparent hugepage support is enabled, is for all mappings, 378bfa3f9aSJianguo Wu * and khugepaged scans all mappings. 388bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 398bfa3f9aSJianguo Wu * for all hugepage allocations. 40ba76149fSAndrea Arcangeli */ 4171e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 4213ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 43ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 4413ece886SAndrea Arcangeli #endif 4513ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 4613ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 4713ece886SAndrea Arcangeli #endif 48d39d33c3SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 4979da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 5079da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 51ba76149fSAndrea Arcangeli 52ba76149fSAndrea Arcangeli /* default scan 8*512 pte (or vmas) every 30 second */ 53ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 54ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_collapsed; 55ba76149fSAndrea Arcangeli static unsigned int khugepaged_full_scans; 56ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 57ba76149fSAndrea Arcangeli /* during fragmentation poll the hugepage allocator once every minute */ 58ba76149fSAndrea Arcangeli static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 59ba76149fSAndrea Arcangeli static struct task_struct *khugepaged_thread __read_mostly; 60ba76149fSAndrea Arcangeli static DEFINE_MUTEX(khugepaged_mutex); 61ba76149fSAndrea Arcangeli static DEFINE_SPINLOCK(khugepaged_mm_lock); 62ba76149fSAndrea Arcangeli static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 63ba76149fSAndrea Arcangeli /* 64ba76149fSAndrea Arcangeli * default collapse hugepages if there is at least one pte mapped like 65ba76149fSAndrea Arcangeli * it would have happened if the vma was large enough during page 66ba76149fSAndrea Arcangeli * fault. 67ba76149fSAndrea Arcangeli */ 68ba76149fSAndrea Arcangeli static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 69ba76149fSAndrea Arcangeli 70ba76149fSAndrea Arcangeli static int khugepaged(void *none); 71ba76149fSAndrea Arcangeli static int khugepaged_slab_init(void); 7265ebb64fSKirill A. Shutemov static void khugepaged_slab_exit(void); 73ba76149fSAndrea Arcangeli 7443b5fbbdSSasha Levin #define MM_SLOTS_HASH_BITS 10 7543b5fbbdSSasha Levin static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 7643b5fbbdSSasha Levin 77ba76149fSAndrea Arcangeli static struct kmem_cache *mm_slot_cache __read_mostly; 78ba76149fSAndrea Arcangeli 79ba76149fSAndrea Arcangeli /** 80ba76149fSAndrea Arcangeli * struct mm_slot - hash lookup from mm to mm_slot 81ba76149fSAndrea Arcangeli * @hash: hash collision list 82ba76149fSAndrea Arcangeli * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 83ba76149fSAndrea Arcangeli * @mm: the mm that this information is valid for 84ba76149fSAndrea Arcangeli */ 85ba76149fSAndrea Arcangeli struct mm_slot { 86ba76149fSAndrea Arcangeli struct hlist_node hash; 87ba76149fSAndrea Arcangeli struct list_head mm_node; 88ba76149fSAndrea Arcangeli struct mm_struct *mm; 89ba76149fSAndrea Arcangeli }; 90ba76149fSAndrea Arcangeli 91ba76149fSAndrea Arcangeli /** 92ba76149fSAndrea Arcangeli * struct khugepaged_scan - cursor for scanning 93ba76149fSAndrea Arcangeli * @mm_head: the head of the mm list to scan 94ba76149fSAndrea Arcangeli * @mm_slot: the current mm_slot we are scanning 95ba76149fSAndrea Arcangeli * @address: the next address inside that to be scanned 96ba76149fSAndrea Arcangeli * 97ba76149fSAndrea Arcangeli * There is only the one khugepaged_scan instance of this cursor structure. 98ba76149fSAndrea Arcangeli */ 99ba76149fSAndrea Arcangeli struct khugepaged_scan { 100ba76149fSAndrea Arcangeli struct list_head mm_head; 101ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 102ba76149fSAndrea Arcangeli unsigned long address; 1032f1da642SH Hartley Sweeten }; 1042f1da642SH Hartley Sweeten static struct khugepaged_scan khugepaged_scan = { 105ba76149fSAndrea Arcangeli .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 106ba76149fSAndrea Arcangeli }; 107ba76149fSAndrea Arcangeli 108f000565aSAndrea Arcangeli 109f000565aSAndrea Arcangeli static int set_recommended_min_free_kbytes(void) 110f000565aSAndrea Arcangeli { 111f000565aSAndrea Arcangeli struct zone *zone; 112f000565aSAndrea Arcangeli int nr_zones = 0; 113f000565aSAndrea Arcangeli unsigned long recommended_min; 114f000565aSAndrea Arcangeli 115f000565aSAndrea Arcangeli for_each_populated_zone(zone) 116f000565aSAndrea Arcangeli nr_zones++; 117f000565aSAndrea Arcangeli 118f000565aSAndrea Arcangeli /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 119f000565aSAndrea Arcangeli recommended_min = pageblock_nr_pages * nr_zones * 2; 120f000565aSAndrea Arcangeli 121f000565aSAndrea Arcangeli /* 122f000565aSAndrea Arcangeli * Make sure that on average at least two pageblocks are almost free 123f000565aSAndrea Arcangeli * of another type, one for a migratetype to fall back to and a 124f000565aSAndrea Arcangeli * second to avoid subsequent fallbacks of other types There are 3 125f000565aSAndrea Arcangeli * MIGRATE_TYPES we care about. 126f000565aSAndrea Arcangeli */ 127f000565aSAndrea Arcangeli recommended_min += pageblock_nr_pages * nr_zones * 128f000565aSAndrea Arcangeli MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 129f000565aSAndrea Arcangeli 130f000565aSAndrea Arcangeli /* don't ever allow to reserve more than 5% of the lowmem */ 131f000565aSAndrea Arcangeli recommended_min = min(recommended_min, 132f000565aSAndrea Arcangeli (unsigned long) nr_free_buffer_pages() / 20); 133f000565aSAndrea Arcangeli recommended_min <<= (PAGE_SHIFT-10); 134f000565aSAndrea Arcangeli 13542aa83cbSHan Pingtian if (recommended_min > min_free_kbytes) { 13642aa83cbSHan Pingtian if (user_min_free_kbytes >= 0) 13742aa83cbSHan Pingtian pr_info("raising min_free_kbytes from %d to %lu " 13842aa83cbSHan Pingtian "to help transparent hugepage allocations\n", 13942aa83cbSHan Pingtian min_free_kbytes, recommended_min); 14042aa83cbSHan Pingtian 141f000565aSAndrea Arcangeli min_free_kbytes = recommended_min; 14242aa83cbSHan Pingtian } 143f000565aSAndrea Arcangeli setup_per_zone_wmarks(); 144f000565aSAndrea Arcangeli return 0; 145f000565aSAndrea Arcangeli } 146f000565aSAndrea Arcangeli 14779553da2SKirill A. Shutemov static int start_stop_khugepaged(void) 148ba76149fSAndrea Arcangeli { 149ba76149fSAndrea Arcangeli int err = 0; 150ba76149fSAndrea Arcangeli if (khugepaged_enabled()) { 151ba76149fSAndrea Arcangeli if (!khugepaged_thread) 152ba76149fSAndrea Arcangeli khugepaged_thread = kthread_run(khugepaged, NULL, 153ba76149fSAndrea Arcangeli "khugepaged"); 154ba76149fSAndrea Arcangeli if (unlikely(IS_ERR(khugepaged_thread))) { 155ae3a8c1cSAndrew Morton pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 156ba76149fSAndrea Arcangeli err = PTR_ERR(khugepaged_thread); 157ba76149fSAndrea Arcangeli khugepaged_thread = NULL; 15879553da2SKirill A. Shutemov goto fail; 159ba76149fSAndrea Arcangeli } 160911891afSXiao Guangrong 161911891afSXiao Guangrong if (!list_empty(&khugepaged_scan.mm_head)) 162ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 163f000565aSAndrea Arcangeli 164f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 165911891afSXiao Guangrong } else if (khugepaged_thread) { 166911891afSXiao Guangrong kthread_stop(khugepaged_thread); 167911891afSXiao Guangrong khugepaged_thread = NULL; 168911891afSXiao Guangrong } 16979553da2SKirill A. Shutemov fail: 170ba76149fSAndrea Arcangeli return err; 171ba76149fSAndrea Arcangeli } 17271e3aac0SAndrea Arcangeli 17397ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 17456873f43SWang, Yalin struct page *huge_zero_page __read_mostly; 1754a6c1297SKirill A. Shutemov 176fc437044SMatthew Wilcox struct page *get_huge_zero_page(void) 17797ae1749SKirill A. Shutemov { 17897ae1749SKirill A. Shutemov struct page *zero_page; 17997ae1749SKirill A. Shutemov retry: 18097ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 1814db0c3c2SJason Low return READ_ONCE(huge_zero_page); 18297ae1749SKirill A. Shutemov 18397ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 18497ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 185d8a8e1f0SKirill A. Shutemov if (!zero_page) { 186d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 1875918d10aSKirill A. Shutemov return NULL; 188d8a8e1f0SKirill A. Shutemov } 189d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 19097ae1749SKirill A. Shutemov preempt_disable(); 1915918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 19297ae1749SKirill A. Shutemov preempt_enable(); 1935ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 19497ae1749SKirill A. Shutemov goto retry; 19597ae1749SKirill A. Shutemov } 19697ae1749SKirill A. Shutemov 19797ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 19897ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 19997ae1749SKirill A. Shutemov preempt_enable(); 2004db0c3c2SJason Low return READ_ONCE(huge_zero_page); 20197ae1749SKirill A. Shutemov } 20297ae1749SKirill A. Shutemov 20397ae1749SKirill A. Shutemov static void put_huge_zero_page(void) 20497ae1749SKirill A. Shutemov { 20597ae1749SKirill A. Shutemov /* 20697ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 20797ae1749SKirill A. Shutemov * last reference. 20897ae1749SKirill A. Shutemov */ 20997ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 21097ae1749SKirill A. Shutemov } 21197ae1749SKirill A. Shutemov 21248896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 21397ae1749SKirill A. Shutemov struct shrink_control *sc) 21497ae1749SKirill A. Shutemov { 21597ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 21697ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 21748896466SGlauber Costa } 21897ae1749SKirill A. Shutemov 21948896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 22048896466SGlauber Costa struct shrink_control *sc) 22148896466SGlauber Costa { 22297ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2235918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2245918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2255ddacbe9SYu Zhao __free_pages(zero_page, compound_order(zero_page)); 22648896466SGlauber Costa return HPAGE_PMD_NR; 22797ae1749SKirill A. Shutemov } 22897ae1749SKirill A. Shutemov 22997ae1749SKirill A. Shutemov return 0; 23097ae1749SKirill A. Shutemov } 23197ae1749SKirill A. Shutemov 23297ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23348896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 23448896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 23597ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23697ae1749SKirill A. Shutemov }; 23797ae1749SKirill A. Shutemov 23871e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 239ba76149fSAndrea Arcangeli 24071e3aac0SAndrea Arcangeli static ssize_t double_flag_show(struct kobject *kobj, 24171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 24271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 24371e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 24471e3aac0SAndrea Arcangeli { 24571e3aac0SAndrea Arcangeli if (test_bit(enabled, &transparent_hugepage_flags)) { 24671e3aac0SAndrea Arcangeli VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 24771e3aac0SAndrea Arcangeli return sprintf(buf, "[always] madvise never\n"); 24871e3aac0SAndrea Arcangeli } else if (test_bit(req_madv, &transparent_hugepage_flags)) 24971e3aac0SAndrea Arcangeli return sprintf(buf, "always [madvise] never\n"); 25071e3aac0SAndrea Arcangeli else 25171e3aac0SAndrea Arcangeli return sprintf(buf, "always madvise [never]\n"); 25271e3aac0SAndrea Arcangeli } 25371e3aac0SAndrea Arcangeli static ssize_t double_flag_store(struct kobject *kobj, 25471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25571e3aac0SAndrea Arcangeli const char *buf, size_t count, 25671e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 25771e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 25871e3aac0SAndrea Arcangeli { 25971e3aac0SAndrea Arcangeli if (!memcmp("always", buf, 26071e3aac0SAndrea Arcangeli min(sizeof("always")-1, count))) { 26171e3aac0SAndrea Arcangeli set_bit(enabled, &transparent_hugepage_flags); 26271e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 26371e3aac0SAndrea Arcangeli } else if (!memcmp("madvise", buf, 26471e3aac0SAndrea Arcangeli min(sizeof("madvise")-1, count))) { 26571e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 26671e3aac0SAndrea Arcangeli set_bit(req_madv, &transparent_hugepage_flags); 26771e3aac0SAndrea Arcangeli } else if (!memcmp("never", buf, 26871e3aac0SAndrea Arcangeli min(sizeof("never")-1, count))) { 26971e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 27071e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 27171e3aac0SAndrea Arcangeli } else 27271e3aac0SAndrea Arcangeli return -EINVAL; 27371e3aac0SAndrea Arcangeli 27471e3aac0SAndrea Arcangeli return count; 27571e3aac0SAndrea Arcangeli } 27671e3aac0SAndrea Arcangeli 27771e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 27871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 27971e3aac0SAndrea Arcangeli { 28071e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 28171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 28271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 28371e3aac0SAndrea Arcangeli } 28471e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 28571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 28671e3aac0SAndrea Arcangeli const char *buf, size_t count) 28771e3aac0SAndrea Arcangeli { 288ba76149fSAndrea Arcangeli ssize_t ret; 289ba76149fSAndrea Arcangeli 290ba76149fSAndrea Arcangeli ret = double_flag_store(kobj, attr, buf, count, 29171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 29271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 293ba76149fSAndrea Arcangeli 294ba76149fSAndrea Arcangeli if (ret > 0) { 295911891afSXiao Guangrong int err; 296911891afSXiao Guangrong 297911891afSXiao Guangrong mutex_lock(&khugepaged_mutex); 29879553da2SKirill A. Shutemov err = start_stop_khugepaged(); 299911891afSXiao Guangrong mutex_unlock(&khugepaged_mutex); 300911891afSXiao Guangrong 301ba76149fSAndrea Arcangeli if (err) 302ba76149fSAndrea Arcangeli ret = err; 303ba76149fSAndrea Arcangeli } 304ba76149fSAndrea Arcangeli 305ba76149fSAndrea Arcangeli return ret; 30671e3aac0SAndrea Arcangeli } 30771e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 30871e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 30971e3aac0SAndrea Arcangeli 31071e3aac0SAndrea Arcangeli static ssize_t single_flag_show(struct kobject *kobj, 31171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 31271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 31371e3aac0SAndrea Arcangeli { 314e27e6151SBen Hutchings return sprintf(buf, "%d\n", 315e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 31671e3aac0SAndrea Arcangeli } 317e27e6151SBen Hutchings 31871e3aac0SAndrea Arcangeli static ssize_t single_flag_store(struct kobject *kobj, 31971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 32071e3aac0SAndrea Arcangeli const char *buf, size_t count, 32171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 32271e3aac0SAndrea Arcangeli { 323e27e6151SBen Hutchings unsigned long value; 324e27e6151SBen Hutchings int ret; 325e27e6151SBen Hutchings 326e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 327e27e6151SBen Hutchings if (ret < 0) 328e27e6151SBen Hutchings return ret; 329e27e6151SBen Hutchings if (value > 1) 33071e3aac0SAndrea Arcangeli return -EINVAL; 33171e3aac0SAndrea Arcangeli 332e27e6151SBen Hutchings if (value) 333e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 334e27e6151SBen Hutchings else 335e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 336e27e6151SBen Hutchings 33771e3aac0SAndrea Arcangeli return count; 33871e3aac0SAndrea Arcangeli } 33971e3aac0SAndrea Arcangeli 34071e3aac0SAndrea Arcangeli /* 34171e3aac0SAndrea Arcangeli * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 34271e3aac0SAndrea Arcangeli * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 34371e3aac0SAndrea Arcangeli * memory just to allocate one more hugepage. 34471e3aac0SAndrea Arcangeli */ 34571e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 34671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 34771e3aac0SAndrea Arcangeli { 34871e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 34971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 35071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 35171e3aac0SAndrea Arcangeli } 35271e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 35371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 35471e3aac0SAndrea Arcangeli const char *buf, size_t count) 35571e3aac0SAndrea Arcangeli { 35671e3aac0SAndrea Arcangeli return double_flag_store(kobj, attr, buf, count, 35771e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 35871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 35971e3aac0SAndrea Arcangeli } 36071e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 36171e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 36271e3aac0SAndrea Arcangeli 36379da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 36479da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 36579da5407SKirill A. Shutemov { 36679da5407SKirill A. Shutemov return single_flag_show(kobj, attr, buf, 36779da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 36879da5407SKirill A. Shutemov } 36979da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 37079da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 37179da5407SKirill A. Shutemov { 37279da5407SKirill A. Shutemov return single_flag_store(kobj, attr, buf, count, 37379da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37479da5407SKirill A. Shutemov } 37579da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 37679da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 37771e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 37871e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 37971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 38071e3aac0SAndrea Arcangeli { 38171e3aac0SAndrea Arcangeli return single_flag_show(kobj, attr, buf, 38271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 38371e3aac0SAndrea Arcangeli } 38471e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 38571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 38671e3aac0SAndrea Arcangeli const char *buf, size_t count) 38771e3aac0SAndrea Arcangeli { 38871e3aac0SAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 38971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 39071e3aac0SAndrea Arcangeli } 39171e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 39271e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 39371e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 39471e3aac0SAndrea Arcangeli 39571e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39671e3aac0SAndrea Arcangeli &enabled_attr.attr, 39771e3aac0SAndrea Arcangeli &defrag_attr.attr, 39879da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39971e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 40071e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 40171e3aac0SAndrea Arcangeli #endif 40271e3aac0SAndrea Arcangeli NULL, 40371e3aac0SAndrea Arcangeli }; 40471e3aac0SAndrea Arcangeli 40571e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 40671e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 407ba76149fSAndrea Arcangeli }; 408ba76149fSAndrea Arcangeli 409ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 410ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 411ba76149fSAndrea Arcangeli char *buf) 412ba76149fSAndrea Arcangeli { 413ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 414ba76149fSAndrea Arcangeli } 415ba76149fSAndrea Arcangeli 416ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 417ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 418ba76149fSAndrea Arcangeli const char *buf, size_t count) 419ba76149fSAndrea Arcangeli { 420ba76149fSAndrea Arcangeli unsigned long msecs; 421ba76149fSAndrea Arcangeli int err; 422ba76149fSAndrea Arcangeli 4233dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 424ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 425ba76149fSAndrea Arcangeli return -EINVAL; 426ba76149fSAndrea Arcangeli 427ba76149fSAndrea Arcangeli khugepaged_scan_sleep_millisecs = msecs; 428ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 429ba76149fSAndrea Arcangeli 430ba76149fSAndrea Arcangeli return count; 431ba76149fSAndrea Arcangeli } 432ba76149fSAndrea Arcangeli static struct kobj_attribute scan_sleep_millisecs_attr = 433ba76149fSAndrea Arcangeli __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 434ba76149fSAndrea Arcangeli scan_sleep_millisecs_store); 435ba76149fSAndrea Arcangeli 436ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 437ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 438ba76149fSAndrea Arcangeli char *buf) 439ba76149fSAndrea Arcangeli { 440ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 441ba76149fSAndrea Arcangeli } 442ba76149fSAndrea Arcangeli 443ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 444ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 445ba76149fSAndrea Arcangeli const char *buf, size_t count) 446ba76149fSAndrea Arcangeli { 447ba76149fSAndrea Arcangeli unsigned long msecs; 448ba76149fSAndrea Arcangeli int err; 449ba76149fSAndrea Arcangeli 4503dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 451ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 452ba76149fSAndrea Arcangeli return -EINVAL; 453ba76149fSAndrea Arcangeli 454ba76149fSAndrea Arcangeli khugepaged_alloc_sleep_millisecs = msecs; 455ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 456ba76149fSAndrea Arcangeli 457ba76149fSAndrea Arcangeli return count; 458ba76149fSAndrea Arcangeli } 459ba76149fSAndrea Arcangeli static struct kobj_attribute alloc_sleep_millisecs_attr = 460ba76149fSAndrea Arcangeli __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 461ba76149fSAndrea Arcangeli alloc_sleep_millisecs_store); 462ba76149fSAndrea Arcangeli 463ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_show(struct kobject *kobj, 464ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 465ba76149fSAndrea Arcangeli char *buf) 466ba76149fSAndrea Arcangeli { 467ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 468ba76149fSAndrea Arcangeli } 469ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_store(struct kobject *kobj, 470ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 471ba76149fSAndrea Arcangeli const char *buf, size_t count) 472ba76149fSAndrea Arcangeli { 473ba76149fSAndrea Arcangeli int err; 474ba76149fSAndrea Arcangeli unsigned long pages; 475ba76149fSAndrea Arcangeli 4763dbb95f7SJingoo Han err = kstrtoul(buf, 10, &pages); 477ba76149fSAndrea Arcangeli if (err || !pages || pages > UINT_MAX) 478ba76149fSAndrea Arcangeli return -EINVAL; 479ba76149fSAndrea Arcangeli 480ba76149fSAndrea Arcangeli khugepaged_pages_to_scan = pages; 481ba76149fSAndrea Arcangeli 482ba76149fSAndrea Arcangeli return count; 483ba76149fSAndrea Arcangeli } 484ba76149fSAndrea Arcangeli static struct kobj_attribute pages_to_scan_attr = 485ba76149fSAndrea Arcangeli __ATTR(pages_to_scan, 0644, pages_to_scan_show, 486ba76149fSAndrea Arcangeli pages_to_scan_store); 487ba76149fSAndrea Arcangeli 488ba76149fSAndrea Arcangeli static ssize_t pages_collapsed_show(struct kobject *kobj, 489ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 490ba76149fSAndrea Arcangeli char *buf) 491ba76149fSAndrea Arcangeli { 492ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 493ba76149fSAndrea Arcangeli } 494ba76149fSAndrea Arcangeli static struct kobj_attribute pages_collapsed_attr = 495ba76149fSAndrea Arcangeli __ATTR_RO(pages_collapsed); 496ba76149fSAndrea Arcangeli 497ba76149fSAndrea Arcangeli static ssize_t full_scans_show(struct kobject *kobj, 498ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 499ba76149fSAndrea Arcangeli char *buf) 500ba76149fSAndrea Arcangeli { 501ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_full_scans); 502ba76149fSAndrea Arcangeli } 503ba76149fSAndrea Arcangeli static struct kobj_attribute full_scans_attr = 504ba76149fSAndrea Arcangeli __ATTR_RO(full_scans); 505ba76149fSAndrea Arcangeli 506ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_show(struct kobject *kobj, 507ba76149fSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 508ba76149fSAndrea Arcangeli { 509ba76149fSAndrea Arcangeli return single_flag_show(kobj, attr, buf, 510ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 511ba76149fSAndrea Arcangeli } 512ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_store(struct kobject *kobj, 513ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 514ba76149fSAndrea Arcangeli const char *buf, size_t count) 515ba76149fSAndrea Arcangeli { 516ba76149fSAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 517ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 518ba76149fSAndrea Arcangeli } 519ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_defrag_attr = 520ba76149fSAndrea Arcangeli __ATTR(defrag, 0644, khugepaged_defrag_show, 521ba76149fSAndrea Arcangeli khugepaged_defrag_store); 522ba76149fSAndrea Arcangeli 523ba76149fSAndrea Arcangeli /* 524ba76149fSAndrea Arcangeli * max_ptes_none controls if khugepaged should collapse hugepages over 525ba76149fSAndrea Arcangeli * any unmapped ptes in turn potentially increasing the memory 526ba76149fSAndrea Arcangeli * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 527ba76149fSAndrea Arcangeli * reduce the available free memory in the system as it 528ba76149fSAndrea Arcangeli * runs. Increasing max_ptes_none will instead potentially reduce the 529ba76149fSAndrea Arcangeli * free memory in the system during the khugepaged scan. 530ba76149fSAndrea Arcangeli */ 531ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 532ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 533ba76149fSAndrea Arcangeli char *buf) 534ba76149fSAndrea Arcangeli { 535ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 536ba76149fSAndrea Arcangeli } 537ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 538ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 539ba76149fSAndrea Arcangeli const char *buf, size_t count) 540ba76149fSAndrea Arcangeli { 541ba76149fSAndrea Arcangeli int err; 542ba76149fSAndrea Arcangeli unsigned long max_ptes_none; 543ba76149fSAndrea Arcangeli 5443dbb95f7SJingoo Han err = kstrtoul(buf, 10, &max_ptes_none); 545ba76149fSAndrea Arcangeli if (err || max_ptes_none > HPAGE_PMD_NR-1) 546ba76149fSAndrea Arcangeli return -EINVAL; 547ba76149fSAndrea Arcangeli 548ba76149fSAndrea Arcangeli khugepaged_max_ptes_none = max_ptes_none; 549ba76149fSAndrea Arcangeli 550ba76149fSAndrea Arcangeli return count; 551ba76149fSAndrea Arcangeli } 552ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_max_ptes_none_attr = 553ba76149fSAndrea Arcangeli __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 554ba76149fSAndrea Arcangeli khugepaged_max_ptes_none_store); 555ba76149fSAndrea Arcangeli 556ba76149fSAndrea Arcangeli static struct attribute *khugepaged_attr[] = { 557ba76149fSAndrea Arcangeli &khugepaged_defrag_attr.attr, 558ba76149fSAndrea Arcangeli &khugepaged_max_ptes_none_attr.attr, 559ba76149fSAndrea Arcangeli &pages_to_scan_attr.attr, 560ba76149fSAndrea Arcangeli &pages_collapsed_attr.attr, 561ba76149fSAndrea Arcangeli &full_scans_attr.attr, 562ba76149fSAndrea Arcangeli &scan_sleep_millisecs_attr.attr, 563ba76149fSAndrea Arcangeli &alloc_sleep_millisecs_attr.attr, 564ba76149fSAndrea Arcangeli NULL, 565ba76149fSAndrea Arcangeli }; 566ba76149fSAndrea Arcangeli 567ba76149fSAndrea Arcangeli static struct attribute_group khugepaged_attr_group = { 568ba76149fSAndrea Arcangeli .attrs = khugepaged_attr, 569ba76149fSAndrea Arcangeli .name = "khugepaged", 57071e3aac0SAndrea Arcangeli }; 571569e5590SShaohua Li 572569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 573569e5590SShaohua Li { 574569e5590SShaohua Li int err; 575569e5590SShaohua Li 576569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 577569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 578ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 579569e5590SShaohua Li return -ENOMEM; 580569e5590SShaohua Li } 581569e5590SShaohua Li 582569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 583569e5590SShaohua Li if (err) { 584ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 585569e5590SShaohua Li goto delete_obj; 586569e5590SShaohua Li } 587569e5590SShaohua Li 588569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 589569e5590SShaohua Li if (err) { 590ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 591569e5590SShaohua Li goto remove_hp_group; 592569e5590SShaohua Li } 593569e5590SShaohua Li 594569e5590SShaohua Li return 0; 595569e5590SShaohua Li 596569e5590SShaohua Li remove_hp_group: 597569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 598569e5590SShaohua Li delete_obj: 599569e5590SShaohua Li kobject_put(*hugepage_kobj); 600569e5590SShaohua Li return err; 601569e5590SShaohua Li } 602569e5590SShaohua Li 603569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 604569e5590SShaohua Li { 605569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 606569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 607569e5590SShaohua Li kobject_put(hugepage_kobj); 608569e5590SShaohua Li } 609569e5590SShaohua Li #else 610569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 611569e5590SShaohua Li { 612569e5590SShaohua Li return 0; 613569e5590SShaohua Li } 614569e5590SShaohua Li 615569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 616569e5590SShaohua Li { 617569e5590SShaohua Li } 61871e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 61971e3aac0SAndrea Arcangeli 62071e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 62171e3aac0SAndrea Arcangeli { 62271e3aac0SAndrea Arcangeli int err; 623569e5590SShaohua Li struct kobject *hugepage_kobj; 62471e3aac0SAndrea Arcangeli 6254b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 6264b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 627569e5590SShaohua Li return -EINVAL; 6284b7167b9SAndrea Arcangeli } 6294b7167b9SAndrea Arcangeli 630569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 631569e5590SShaohua Li if (err) 63265ebb64fSKirill A. Shutemov goto err_sysfs; 633ba76149fSAndrea Arcangeli 634ba76149fSAndrea Arcangeli err = khugepaged_slab_init(); 635ba76149fSAndrea Arcangeli if (err) 63665ebb64fSKirill A. Shutemov goto err_slab; 637ba76149fSAndrea Arcangeli 63865ebb64fSKirill A. Shutemov err = register_shrinker(&huge_zero_page_shrinker); 63965ebb64fSKirill A. Shutemov if (err) 64065ebb64fSKirill A. Shutemov goto err_hzp_shrinker; 64197ae1749SKirill A. Shutemov 64297562cd2SRik van Riel /* 64397562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 64497562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 64597562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 64697562cd2SRik van Riel */ 64779553da2SKirill A. Shutemov if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 64897562cd2SRik van Riel transparent_hugepage_flags = 0; 64979553da2SKirill A. Shutemov return 0; 65079553da2SKirill A. Shutemov } 65197562cd2SRik van Riel 65279553da2SKirill A. Shutemov err = start_stop_khugepaged(); 65365ebb64fSKirill A. Shutemov if (err) 65465ebb64fSKirill A. Shutemov goto err_khugepaged; 655ba76149fSAndrea Arcangeli 656569e5590SShaohua Li return 0; 65765ebb64fSKirill A. Shutemov err_khugepaged: 65865ebb64fSKirill A. Shutemov unregister_shrinker(&huge_zero_page_shrinker); 65965ebb64fSKirill A. Shutemov err_hzp_shrinker: 66065ebb64fSKirill A. Shutemov khugepaged_slab_exit(); 66165ebb64fSKirill A. Shutemov err_slab: 662569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 66365ebb64fSKirill A. Shutemov err_sysfs: 664ba76149fSAndrea Arcangeli return err; 66571e3aac0SAndrea Arcangeli } 666a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 66771e3aac0SAndrea Arcangeli 66871e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 66971e3aac0SAndrea Arcangeli { 67071e3aac0SAndrea Arcangeli int ret = 0; 67171e3aac0SAndrea Arcangeli if (!str) 67271e3aac0SAndrea Arcangeli goto out; 67371e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 67471e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 67571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67671e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 67771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67871e3aac0SAndrea Arcangeli ret = 1; 67971e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 68071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 68171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68271e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 68371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68471e3aac0SAndrea Arcangeli ret = 1; 68571e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 68671e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 68771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68871e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 68971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 69071e3aac0SAndrea Arcangeli ret = 1; 69171e3aac0SAndrea Arcangeli } 69271e3aac0SAndrea Arcangeli out: 69371e3aac0SAndrea Arcangeli if (!ret) 694ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 69571e3aac0SAndrea Arcangeli return ret; 69671e3aac0SAndrea Arcangeli } 69771e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 69871e3aac0SAndrea Arcangeli 699b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 70071e3aac0SAndrea Arcangeli { 70171e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 70271e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 70371e3aac0SAndrea Arcangeli return pmd; 70471e3aac0SAndrea Arcangeli } 70571e3aac0SAndrea Arcangeli 7063122359aSKirill A. Shutemov static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) 707b3092b3bSBob Liu { 708b3092b3bSBob Liu pmd_t entry; 7093122359aSKirill A. Shutemov entry = mk_pmd(page, prot); 710b3092b3bSBob Liu entry = pmd_mkhuge(entry); 711b3092b3bSBob Liu return entry; 712b3092b3bSBob Liu } 713b3092b3bSBob Liu 71471e3aac0SAndrea Arcangeli static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 71571e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 716230c92a8SAndrea Arcangeli unsigned long address, pmd_t *pmd, 7176b251fc9SAndrea Arcangeli struct page *page, gfp_t gfp, 7186b251fc9SAndrea Arcangeli unsigned int flags) 71971e3aac0SAndrea Arcangeli { 72000501b53SJohannes Weiner struct mem_cgroup *memcg; 72171e3aac0SAndrea Arcangeli pgtable_t pgtable; 722c4088ebdSKirill A. Shutemov spinlock_t *ptl; 723230c92a8SAndrea Arcangeli unsigned long haddr = address & HPAGE_PMD_MASK; 72471e3aac0SAndrea Arcangeli 725309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 72600501b53SJohannes Weiner 7276b251fc9SAndrea Arcangeli if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { 7286b251fc9SAndrea Arcangeli put_page(page); 7296b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_FALLBACK); 7306b251fc9SAndrea Arcangeli return VM_FAULT_FALLBACK; 7316b251fc9SAndrea Arcangeli } 73271e3aac0SAndrea Arcangeli 73300501b53SJohannes Weiner pgtable = pte_alloc_one(mm, haddr); 73400501b53SJohannes Weiner if (unlikely(!pgtable)) { 73500501b53SJohannes Weiner mem_cgroup_cancel_charge(page, memcg); 7366b251fc9SAndrea Arcangeli put_page(page); 73700501b53SJohannes Weiner return VM_FAULT_OOM; 73800501b53SJohannes Weiner } 73900501b53SJohannes Weiner 74071e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 74152f37629SMinchan Kim /* 74252f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 74352f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 74452f37629SMinchan Kim * write. 74552f37629SMinchan Kim */ 74671e3aac0SAndrea Arcangeli __SetPageUptodate(page); 74771e3aac0SAndrea Arcangeli 748c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 74971e3aac0SAndrea Arcangeli if (unlikely(!pmd_none(*pmd))) { 750c4088ebdSKirill A. Shutemov spin_unlock(ptl); 75100501b53SJohannes Weiner mem_cgroup_cancel_charge(page, memcg); 75271e3aac0SAndrea Arcangeli put_page(page); 75371e3aac0SAndrea Arcangeli pte_free(mm, pgtable); 75471e3aac0SAndrea Arcangeli } else { 75571e3aac0SAndrea Arcangeli pmd_t entry; 7566b251fc9SAndrea Arcangeli 7576b251fc9SAndrea Arcangeli /* Deliver the page fault to userland */ 7586b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 7596b251fc9SAndrea Arcangeli int ret; 7606b251fc9SAndrea Arcangeli 7616b251fc9SAndrea Arcangeli spin_unlock(ptl); 7626b251fc9SAndrea Arcangeli mem_cgroup_cancel_charge(page, memcg); 7636b251fc9SAndrea Arcangeli put_page(page); 7646b251fc9SAndrea Arcangeli pte_free(mm, pgtable); 765230c92a8SAndrea Arcangeli ret = handle_userfault(vma, address, flags, 7666b251fc9SAndrea Arcangeli VM_UFFD_MISSING); 7676b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 7686b251fc9SAndrea Arcangeli return ret; 7696b251fc9SAndrea Arcangeli } 7706b251fc9SAndrea Arcangeli 7713122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 7723122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 77371e3aac0SAndrea Arcangeli page_add_new_anon_rmap(page, vma, haddr); 77400501b53SJohannes Weiner mem_cgroup_commit_charge(page, memcg, false); 77500501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 7766b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 77771e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 77871e3aac0SAndrea Arcangeli add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 779e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 780c4088ebdSKirill A. Shutemov spin_unlock(ptl); 7816b251fc9SAndrea Arcangeli count_vm_event(THP_FAULT_ALLOC); 78271e3aac0SAndrea Arcangeli } 78371e3aac0SAndrea Arcangeli 784aa2e878eSDavid Rientjes return 0; 78571e3aac0SAndrea Arcangeli } 78671e3aac0SAndrea Arcangeli 787cc5d462fSAndi Kleen static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 7880bbbc0b3SAndrea Arcangeli { 789cc5d462fSAndi Kleen return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 7900bbbc0b3SAndrea Arcangeli } 7910bbbc0b3SAndrea Arcangeli 792c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7934897c765SMatthew Wilcox bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 79497ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7955918d10aSKirill A. Shutemov struct page *zero_page) 796fc9fe822SKirill A. Shutemov { 797fc9fe822SKirill A. Shutemov pmd_t entry; 7987c414164SAndrew Morton if (!pmd_none(*pmd)) 7997c414164SAndrew Morton return false; 8005918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 801fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 8026b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 803fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 804e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 8057c414164SAndrew Morton return true; 806fc9fe822SKirill A. Shutemov } 807fc9fe822SKirill A. Shutemov 80871e3aac0SAndrea Arcangeli int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 80971e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, 81071e3aac0SAndrea Arcangeli unsigned int flags) 81171e3aac0SAndrea Arcangeli { 812077fcf11SAneesh Kumar K.V gfp_t gfp; 81371e3aac0SAndrea Arcangeli struct page *page; 81471e3aac0SAndrea Arcangeli unsigned long haddr = address & HPAGE_PMD_MASK; 81571e3aac0SAndrea Arcangeli 816128ec037SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 817c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 81871e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 81971e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 8206d50e60cSDavid Rientjes if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 821ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 822593befa6SDominik Dingel if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) && 82379da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 824c4088ebdSKirill A. Shutemov spinlock_t *ptl; 82580371957SKirill A. Shutemov pgtable_t pgtable; 8265918d10aSKirill A. Shutemov struct page *zero_page; 8273ea41e62SKirill A. Shutemov bool set; 8286b251fc9SAndrea Arcangeli int ret; 82980371957SKirill A. Shutemov pgtable = pte_alloc_one(mm, haddr); 83080371957SKirill A. Shutemov if (unlikely(!pgtable)) 83180371957SKirill A. Shutemov return VM_FAULT_OOM; 8325918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 8335918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 83497ae1749SKirill A. Shutemov pte_free(mm, pgtable); 83597ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 836c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83797ae1749SKirill A. Shutemov } 838c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 8396b251fc9SAndrea Arcangeli ret = 0; 8406b251fc9SAndrea Arcangeli set = false; 8416b251fc9SAndrea Arcangeli if (pmd_none(*pmd)) { 8426b251fc9SAndrea Arcangeli if (userfaultfd_missing(vma)) { 8436b251fc9SAndrea Arcangeli spin_unlock(ptl); 844230c92a8SAndrea Arcangeli ret = handle_userfault(vma, address, flags, 8456b251fc9SAndrea Arcangeli VM_UFFD_MISSING); 8466b251fc9SAndrea Arcangeli VM_BUG_ON(ret & VM_FAULT_FALLBACK); 8476b251fc9SAndrea Arcangeli } else { 8486b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, mm, vma, 8496b251fc9SAndrea Arcangeli haddr, pmd, 8505918d10aSKirill A. Shutemov zero_page); 851c4088ebdSKirill A. Shutemov spin_unlock(ptl); 8526b251fc9SAndrea Arcangeli set = true; 8536b251fc9SAndrea Arcangeli } 8546b251fc9SAndrea Arcangeli } else 8556b251fc9SAndrea Arcangeli spin_unlock(ptl); 8563ea41e62SKirill A. Shutemov if (!set) { 8573ea41e62SKirill A. Shutemov pte_free(mm, pgtable); 8583ea41e62SKirill A. Shutemov put_huge_zero_page(); 8593ea41e62SKirill A. Shutemov } 8606b251fc9SAndrea Arcangeli return ret; 86180371957SKirill A. Shutemov } 862077fcf11SAneesh Kumar K.V gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); 863077fcf11SAneesh Kumar K.V page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 86481ab4201SAndi Kleen if (unlikely(!page)) { 86581ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 866c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 86781ab4201SAndi Kleen } 868230c92a8SAndrea Arcangeli return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, 869230c92a8SAndrea Arcangeli flags); 87071e3aac0SAndrea Arcangeli } 87171e3aac0SAndrea Arcangeli 872ae18d6dcSMatthew Wilcox static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8735cad465dSMatthew Wilcox pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write) 8745cad465dSMatthew Wilcox { 8755cad465dSMatthew Wilcox struct mm_struct *mm = vma->vm_mm; 8765cad465dSMatthew Wilcox pmd_t entry; 8775cad465dSMatthew Wilcox spinlock_t *ptl; 8785cad465dSMatthew Wilcox 8795cad465dSMatthew Wilcox ptl = pmd_lock(mm, pmd); 8805cad465dSMatthew Wilcox if (pmd_none(*pmd)) { 8815cad465dSMatthew Wilcox entry = pmd_mkhuge(pfn_pmd(pfn, prot)); 8825cad465dSMatthew Wilcox if (write) { 8835cad465dSMatthew Wilcox entry = pmd_mkyoung(pmd_mkdirty(entry)); 8845cad465dSMatthew Wilcox entry = maybe_pmd_mkwrite(entry, vma); 8855cad465dSMatthew Wilcox } 8865cad465dSMatthew Wilcox set_pmd_at(mm, addr, pmd, entry); 8875cad465dSMatthew Wilcox update_mmu_cache_pmd(vma, addr, pmd); 8885cad465dSMatthew Wilcox } 8895cad465dSMatthew Wilcox spin_unlock(ptl); 8905cad465dSMatthew Wilcox } 8915cad465dSMatthew Wilcox 8925cad465dSMatthew Wilcox int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 8935cad465dSMatthew Wilcox pmd_t *pmd, unsigned long pfn, bool write) 8945cad465dSMatthew Wilcox { 8955cad465dSMatthew Wilcox pgprot_t pgprot = vma->vm_page_prot; 8965cad465dSMatthew Wilcox /* 8975cad465dSMatthew Wilcox * If we had pmd_special, we could avoid all these restrictions, 8985cad465dSMatthew Wilcox * but we need to be consistent with PTEs and architectures that 8995cad465dSMatthew Wilcox * can't support a 'special' bit. 9005cad465dSMatthew Wilcox */ 9015cad465dSMatthew Wilcox BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 9025cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 9035cad465dSMatthew Wilcox (VM_PFNMAP|VM_MIXEDMAP)); 9045cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 9055cad465dSMatthew Wilcox BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 9065cad465dSMatthew Wilcox 9075cad465dSMatthew Wilcox if (addr < vma->vm_start || addr >= vma->vm_end) 9085cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 9095cad465dSMatthew Wilcox if (track_pfn_insert(vma, &pgprot, pfn)) 9105cad465dSMatthew Wilcox return VM_FAULT_SIGBUS; 911ae18d6dcSMatthew Wilcox insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); 912ae18d6dcSMatthew Wilcox return VM_FAULT_NOPAGE; 9135cad465dSMatthew Wilcox } 9145cad465dSMatthew Wilcox 91571e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 91671e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 91771e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 91871e3aac0SAndrea Arcangeli { 919c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 92071e3aac0SAndrea Arcangeli struct page *src_page; 92171e3aac0SAndrea Arcangeli pmd_t pmd; 92271e3aac0SAndrea Arcangeli pgtable_t pgtable; 92371e3aac0SAndrea Arcangeli int ret; 92471e3aac0SAndrea Arcangeli 92571e3aac0SAndrea Arcangeli ret = -ENOMEM; 92671e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 92771e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 92871e3aac0SAndrea Arcangeli goto out; 92971e3aac0SAndrea Arcangeli 930c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 931c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 932c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 93371e3aac0SAndrea Arcangeli 93471e3aac0SAndrea Arcangeli ret = -EAGAIN; 93571e3aac0SAndrea Arcangeli pmd = *src_pmd; 93671e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(pmd))) { 93771e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 93871e3aac0SAndrea Arcangeli goto out_unlock; 93971e3aac0SAndrea Arcangeli } 940fc9fe822SKirill A. Shutemov /* 941c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 942fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 943fc9fe822SKirill A. Shutemov * a page table. 944fc9fe822SKirill A. Shutemov */ 945fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 9465918d10aSKirill A. Shutemov struct page *zero_page; 94797ae1749SKirill A. Shutemov /* 94897ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 94997ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 95097ae1749SKirill A. Shutemov * reference. 95197ae1749SKirill A. Shutemov */ 9525918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 9536b251fc9SAndrea Arcangeli set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 9545918d10aSKirill A. Shutemov zero_page); 955fc9fe822SKirill A. Shutemov ret = 0; 956fc9fe822SKirill A. Shutemov goto out_unlock; 957fc9fe822SKirill A. Shutemov } 958de466bd6SMel Gorman 95971e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_splitting(pmd))) { 96071e3aac0SAndrea Arcangeli /* split huge page running from under us */ 961c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 962c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 96371e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 96471e3aac0SAndrea Arcangeli 96571e3aac0SAndrea Arcangeli wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 96671e3aac0SAndrea Arcangeli goto out; 96771e3aac0SAndrea Arcangeli } 96871e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 969309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 97071e3aac0SAndrea Arcangeli get_page(src_page); 97171e3aac0SAndrea Arcangeli page_dup_rmap(src_page); 97271e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 97371e3aac0SAndrea Arcangeli 97471e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 97571e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 9766b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 97771e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 978e1f56c89SKirill A. Shutemov atomic_long_inc(&dst_mm->nr_ptes); 97971e3aac0SAndrea Arcangeli 98071e3aac0SAndrea Arcangeli ret = 0; 98171e3aac0SAndrea Arcangeli out_unlock: 982c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 983c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 98471e3aac0SAndrea Arcangeli out: 98571e3aac0SAndrea Arcangeli return ret; 98671e3aac0SAndrea Arcangeli } 98771e3aac0SAndrea Arcangeli 988a1dd450bSWill Deacon void huge_pmd_set_accessed(struct mm_struct *mm, 989a1dd450bSWill Deacon struct vm_area_struct *vma, 990a1dd450bSWill Deacon unsigned long address, 991a1dd450bSWill Deacon pmd_t *pmd, pmd_t orig_pmd, 992a1dd450bSWill Deacon int dirty) 993a1dd450bSWill Deacon { 994c4088ebdSKirill A. Shutemov spinlock_t *ptl; 995a1dd450bSWill Deacon pmd_t entry; 996a1dd450bSWill Deacon unsigned long haddr; 997a1dd450bSWill Deacon 998c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 999a1dd450bSWill Deacon if (unlikely(!pmd_same(*pmd, orig_pmd))) 1000a1dd450bSWill Deacon goto unlock; 1001a1dd450bSWill Deacon 1002a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 1003a1dd450bSWill Deacon haddr = address & HPAGE_PMD_MASK; 1004a1dd450bSWill Deacon if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 1005a1dd450bSWill Deacon update_mmu_cache_pmd(vma, address, pmd); 1006a1dd450bSWill Deacon 1007a1dd450bSWill Deacon unlock: 1008c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1009a1dd450bSWill Deacon } 1010a1dd450bSWill Deacon 10115338a937SHugh Dickins /* 10125338a937SHugh Dickins * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages 10135338a937SHugh Dickins * during copy_user_huge_page()'s copy_page_rep(): in the case when 10145338a937SHugh Dickins * the source page gets split and a tail freed before copy completes. 10155338a937SHugh Dickins * Called under pmd_lock of checked pmd, so safe from splitting itself. 10165338a937SHugh Dickins */ 10175338a937SHugh Dickins static void get_user_huge_page(struct page *page) 10185338a937SHugh Dickins { 10195338a937SHugh Dickins if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { 10205338a937SHugh Dickins struct page *endpage = page + HPAGE_PMD_NR; 10215338a937SHugh Dickins 10225338a937SHugh Dickins atomic_add(HPAGE_PMD_NR, &page->_count); 10235338a937SHugh Dickins while (++page < endpage) 10245338a937SHugh Dickins get_huge_page_tail(page); 10255338a937SHugh Dickins } else { 10265338a937SHugh Dickins get_page(page); 10275338a937SHugh Dickins } 10285338a937SHugh Dickins } 10295338a937SHugh Dickins 10305338a937SHugh Dickins static void put_user_huge_page(struct page *page) 10315338a937SHugh Dickins { 10325338a937SHugh Dickins if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { 10335338a937SHugh Dickins struct page *endpage = page + HPAGE_PMD_NR; 10345338a937SHugh Dickins 10355338a937SHugh Dickins while (page < endpage) 10365338a937SHugh Dickins put_page(page++); 10375338a937SHugh Dickins } else { 10385338a937SHugh Dickins put_page(page); 10395338a937SHugh Dickins } 10405338a937SHugh Dickins } 10415338a937SHugh Dickins 104271e3aac0SAndrea Arcangeli static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 104371e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 104471e3aac0SAndrea Arcangeli unsigned long address, 104571e3aac0SAndrea Arcangeli pmd_t *pmd, pmd_t orig_pmd, 104671e3aac0SAndrea Arcangeli struct page *page, 104771e3aac0SAndrea Arcangeli unsigned long haddr) 104871e3aac0SAndrea Arcangeli { 104900501b53SJohannes Weiner struct mem_cgroup *memcg; 1050c4088ebdSKirill A. Shutemov spinlock_t *ptl; 105171e3aac0SAndrea Arcangeli pgtable_t pgtable; 105271e3aac0SAndrea Arcangeli pmd_t _pmd; 105371e3aac0SAndrea Arcangeli int ret = 0, i; 105471e3aac0SAndrea Arcangeli struct page **pages; 10552ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 10562ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 105771e3aac0SAndrea Arcangeli 105871e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 105971e3aac0SAndrea Arcangeli GFP_KERNEL); 106071e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 106171e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 106271e3aac0SAndrea Arcangeli goto out; 106371e3aac0SAndrea Arcangeli } 106471e3aac0SAndrea Arcangeli 106571e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 1066cc5d462fSAndi Kleen pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 1067cc5d462fSAndi Kleen __GFP_OTHER_NODE, 106819ee151eSAndi Kleen vma, address, page_to_nid(page)); 1069b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 107000501b53SJohannes Weiner mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, 107100501b53SJohannes Weiner &memcg))) { 1072b9bbfbe3SAndrea Arcangeli if (pages[i]) 107371e3aac0SAndrea Arcangeli put_page(pages[i]); 1074b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 107500501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 107600501b53SJohannes Weiner set_page_private(pages[i], 0); 107700501b53SJohannes Weiner mem_cgroup_cancel_charge(pages[i], memcg); 1078b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 1079b9bbfbe3SAndrea Arcangeli } 108071e3aac0SAndrea Arcangeli kfree(pages); 108171e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 108271e3aac0SAndrea Arcangeli goto out; 108371e3aac0SAndrea Arcangeli } 108400501b53SJohannes Weiner set_page_private(pages[i], (unsigned long)memcg); 108571e3aac0SAndrea Arcangeli } 108671e3aac0SAndrea Arcangeli 108771e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 108871e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 10890089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 109071e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 109171e3aac0SAndrea Arcangeli cond_resched(); 109271e3aac0SAndrea Arcangeli } 109371e3aac0SAndrea Arcangeli 10942ec74c3eSSagi Grimberg mmun_start = haddr; 10952ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 10962ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 10972ec74c3eSSagi Grimberg 1098c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 109971e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 110071e3aac0SAndrea Arcangeli goto out_free_pages; 1101309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 110271e3aac0SAndrea Arcangeli 11038809aa2dSAneesh Kumar K.V pmdp_huge_clear_flush_notify(vma, haddr, pmd); 110471e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 110571e3aac0SAndrea Arcangeli 11066b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 110771e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 110871e3aac0SAndrea Arcangeli 110971e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 111071e3aac0SAndrea Arcangeli pte_t *pte, entry; 111171e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 111271e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 111300501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 111400501b53SJohannes Weiner set_page_private(pages[i], 0); 111571e3aac0SAndrea Arcangeli page_add_new_anon_rmap(pages[i], vma, haddr); 111600501b53SJohannes Weiner mem_cgroup_commit_charge(pages[i], memcg, false); 111700501b53SJohannes Weiner lru_cache_add_active_or_unevictable(pages[i], vma); 111871e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 111971e3aac0SAndrea Arcangeli VM_BUG_ON(!pte_none(*pte)); 112071e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 112171e3aac0SAndrea Arcangeli pte_unmap(pte); 112271e3aac0SAndrea Arcangeli } 112371e3aac0SAndrea Arcangeli kfree(pages); 112471e3aac0SAndrea Arcangeli 112571e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 112671e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 112771e3aac0SAndrea Arcangeli page_remove_rmap(page); 1128c4088ebdSKirill A. Shutemov spin_unlock(ptl); 112971e3aac0SAndrea Arcangeli 11302ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 11312ec74c3eSSagi Grimberg 113271e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 113371e3aac0SAndrea Arcangeli put_page(page); 113471e3aac0SAndrea Arcangeli 113571e3aac0SAndrea Arcangeli out: 113671e3aac0SAndrea Arcangeli return ret; 113771e3aac0SAndrea Arcangeli 113871e3aac0SAndrea Arcangeli out_free_pages: 1139c4088ebdSKirill A. Shutemov spin_unlock(ptl); 11402ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1141b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 114200501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 114300501b53SJohannes Weiner set_page_private(pages[i], 0); 114400501b53SJohannes Weiner mem_cgroup_cancel_charge(pages[i], memcg); 114571e3aac0SAndrea Arcangeli put_page(pages[i]); 1146b9bbfbe3SAndrea Arcangeli } 114771e3aac0SAndrea Arcangeli kfree(pages); 114871e3aac0SAndrea Arcangeli goto out; 114971e3aac0SAndrea Arcangeli } 115071e3aac0SAndrea Arcangeli 115171e3aac0SAndrea Arcangeli int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 115271e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 115371e3aac0SAndrea Arcangeli { 1154c4088ebdSKirill A. Shutemov spinlock_t *ptl; 115571e3aac0SAndrea Arcangeli int ret = 0; 115693b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 115700501b53SJohannes Weiner struct mem_cgroup *memcg; 115871e3aac0SAndrea Arcangeli unsigned long haddr; 11592ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 11602ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 11613b363692SMichal Hocko gfp_t huge_gfp; /* for allocation and charge */ 116271e3aac0SAndrea Arcangeli 1163c4088ebdSKirill A. Shutemov ptl = pmd_lockptr(mm, pmd); 116481d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 116593b4796dSKirill A. Shutemov haddr = address & HPAGE_PMD_MASK; 116693b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 116793b4796dSKirill A. Shutemov goto alloc; 1168c4088ebdSKirill A. Shutemov spin_lock(ptl); 116971e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 117071e3aac0SAndrea Arcangeli goto out_unlock; 117171e3aac0SAndrea Arcangeli 117271e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1173309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 117471e3aac0SAndrea Arcangeli if (page_mapcount(page) == 1) { 117571e3aac0SAndrea Arcangeli pmd_t entry; 117671e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 117771e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 117871e3aac0SAndrea Arcangeli if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1179b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 118071e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 118171e3aac0SAndrea Arcangeli goto out_unlock; 118271e3aac0SAndrea Arcangeli } 11835338a937SHugh Dickins get_user_huge_page(page); 1184c4088ebdSKirill A. Shutemov spin_unlock(ptl); 118593b4796dSKirill A. Shutemov alloc: 118671e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 1187077fcf11SAneesh Kumar K.V !transparent_hugepage_debug_cow()) { 11883b363692SMichal Hocko huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); 11893b363692SMichal Hocko new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1190077fcf11SAneesh Kumar K.V } else 119171e3aac0SAndrea Arcangeli new_page = NULL; 119271e3aac0SAndrea Arcangeli 119371e3aac0SAndrea Arcangeli if (unlikely(!new_page)) { 1194eecc1e42SHugh Dickins if (!page) { 1195e9b71ca9SKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 1196e9b71ca9SKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 119793b4796dSKirill A. Shutemov } else { 119871e3aac0SAndrea Arcangeli ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 119971e3aac0SAndrea Arcangeli pmd, orig_pmd, page, haddr); 12009845cbbdSKirill A. Shutemov if (ret & VM_FAULT_OOM) { 12011f1d06c3SDavid Rientjes split_huge_page(page); 12029845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 12039845cbbdSKirill A. Shutemov } 12045338a937SHugh Dickins put_user_huge_page(page); 120593b4796dSKirill A. Shutemov } 120617766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 120771e3aac0SAndrea Arcangeli goto out; 120871e3aac0SAndrea Arcangeli } 120971e3aac0SAndrea Arcangeli 12103b363692SMichal Hocko if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { 1211b9bbfbe3SAndrea Arcangeli put_page(new_page); 121293b4796dSKirill A. Shutemov if (page) { 12131f1d06c3SDavid Rientjes split_huge_page(page); 12145338a937SHugh Dickins put_user_huge_page(page); 12159845cbbdSKirill A. Shutemov } else 12169845cbbdSKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 12179845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 121817766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 1219b9bbfbe3SAndrea Arcangeli goto out; 1220b9bbfbe3SAndrea Arcangeli } 1221b9bbfbe3SAndrea Arcangeli 122217766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 122317766ddeSDavid Rientjes 1224eecc1e42SHugh Dickins if (!page) 122593b4796dSKirill A. Shutemov clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 122693b4796dSKirill A. Shutemov else 122771e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 122871e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 122971e3aac0SAndrea Arcangeli 12302ec74c3eSSagi Grimberg mmun_start = haddr; 12312ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 12322ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 12332ec74c3eSSagi Grimberg 1234c4088ebdSKirill A. Shutemov spin_lock(ptl); 123593b4796dSKirill A. Shutemov if (page) 12365338a937SHugh Dickins put_user_huge_page(page); 1237b9bbfbe3SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1238c4088ebdSKirill A. Shutemov spin_unlock(ptl); 123900501b53SJohannes Weiner mem_cgroup_cancel_charge(new_page, memcg); 124071e3aac0SAndrea Arcangeli put_page(new_page); 12412ec74c3eSSagi Grimberg goto out_mn; 1242b9bbfbe3SAndrea Arcangeli } else { 124371e3aac0SAndrea Arcangeli pmd_t entry; 12443122359aSKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 12453122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 12468809aa2dSAneesh Kumar K.V pmdp_huge_clear_flush_notify(vma, haddr, pmd); 124771e3aac0SAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, haddr); 124800501b53SJohannes Weiner mem_cgroup_commit_charge(new_page, memcg, false); 124900501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 125071e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 1251b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 1252eecc1e42SHugh Dickins if (!page) { 125393b4796dSKirill A. Shutemov add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 125497ae1749SKirill A. Shutemov put_huge_zero_page(); 125597ae1749SKirill A. Shutemov } else { 1256309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 125771e3aac0SAndrea Arcangeli page_remove_rmap(page); 125871e3aac0SAndrea Arcangeli put_page(page); 125993b4796dSKirill A. Shutemov } 126071e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 126171e3aac0SAndrea Arcangeli } 1262c4088ebdSKirill A. Shutemov spin_unlock(ptl); 12632ec74c3eSSagi Grimberg out_mn: 12642ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 12652ec74c3eSSagi Grimberg out: 12662ec74c3eSSagi Grimberg return ret; 126771e3aac0SAndrea Arcangeli out_unlock: 1268c4088ebdSKirill A. Shutemov spin_unlock(ptl); 126971e3aac0SAndrea Arcangeli return ret; 127071e3aac0SAndrea Arcangeli } 127171e3aac0SAndrea Arcangeli 1272b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 127371e3aac0SAndrea Arcangeli unsigned long addr, 127471e3aac0SAndrea Arcangeli pmd_t *pmd, 127571e3aac0SAndrea Arcangeli unsigned int flags) 127671e3aac0SAndrea Arcangeli { 1277b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 127871e3aac0SAndrea Arcangeli struct page *page = NULL; 127971e3aac0SAndrea Arcangeli 1280c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 128171e3aac0SAndrea Arcangeli 128271e3aac0SAndrea Arcangeli if (flags & FOLL_WRITE && !pmd_write(*pmd)) 128371e3aac0SAndrea Arcangeli goto out; 128471e3aac0SAndrea Arcangeli 128585facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 128685facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 128785facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 128885facf25SKirill A. Shutemov 12892b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 12908a0516edSMel Gorman if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 12912b4847e7SMel Gorman goto out; 12922b4847e7SMel Gorman 129371e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1294309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 129571e3aac0SAndrea Arcangeli if (flags & FOLL_TOUCH) { 129671e3aac0SAndrea Arcangeli pmd_t _pmd; 129771e3aac0SAndrea Arcangeli /* 129871e3aac0SAndrea Arcangeli * We should set the dirty bit only for FOLL_WRITE but 129971e3aac0SAndrea Arcangeli * for now the dirty bit in the pmd is meaningless. 130071e3aac0SAndrea Arcangeli * And if the dirty bit will become meaningful and 130171e3aac0SAndrea Arcangeli * we'll only set it with FOLL_WRITE, an atomic 130271e3aac0SAndrea Arcangeli * set_bit will be required on the pmd to set the 130371e3aac0SAndrea Arcangeli * young bit, instead of the current set_pmd_at. 130471e3aac0SAndrea Arcangeli */ 130571e3aac0SAndrea Arcangeli _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 13068663890aSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 13078663890aSAneesh Kumar K.V pmd, _pmd, 1)) 13088663890aSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 130971e3aac0SAndrea Arcangeli } 131084d33df2SKirill A. Shutemov if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { 1311b676b293SDavid Rientjes if (page->mapping && trylock_page(page)) { 1312b676b293SDavid Rientjes lru_add_drain(); 1313b676b293SDavid Rientjes if (page->mapping) 1314b676b293SDavid Rientjes mlock_vma_page(page); 1315b676b293SDavid Rientjes unlock_page(page); 1316b676b293SDavid Rientjes } 1317b676b293SDavid Rientjes } 131871e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1319309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 132071e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 132170b50f94SAndrea Arcangeli get_page_foll(page); 132271e3aac0SAndrea Arcangeli 132371e3aac0SAndrea Arcangeli out: 132471e3aac0SAndrea Arcangeli return page; 132571e3aac0SAndrea Arcangeli } 132671e3aac0SAndrea Arcangeli 1327d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 13284daae3b4SMel Gorman int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 13294daae3b4SMel Gorman unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1330d10e63f2SMel Gorman { 1331c4088ebdSKirill A. Shutemov spinlock_t *ptl; 1332b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1333b32967ffSMel Gorman struct page *page; 1334d10e63f2SMel Gorman unsigned long haddr = addr & HPAGE_PMD_MASK; 13358191acbdSMel Gorman int page_nid = -1, this_nid = numa_node_id(); 133690572890SPeter Zijlstra int target_nid, last_cpupid = -1; 13378191acbdSMel Gorman bool page_locked; 13388191acbdSMel Gorman bool migrated = false; 1339b191f9b1SMel Gorman bool was_writable; 13406688cc05SPeter Zijlstra int flags = 0; 1341d10e63f2SMel Gorman 1342c0e7cad9SMel Gorman /* A PROT_NONE fault should not end up here */ 1343c0e7cad9SMel Gorman BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); 1344c0e7cad9SMel Gorman 1345c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmdp); 1346d10e63f2SMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) 1347d10e63f2SMel Gorman goto out_unlock; 1348d10e63f2SMel Gorman 1349de466bd6SMel Gorman /* 1350de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1351de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1352de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1353de466bd6SMel Gorman */ 1354de466bd6SMel Gorman if (unlikely(pmd_trans_migrating(*pmdp))) { 13555d833062SMel Gorman page = pmd_page(*pmdp); 1356de466bd6SMel Gorman spin_unlock(ptl); 13575d833062SMel Gorman wait_on_page_locked(page); 1358de466bd6SMel Gorman goto out; 1359de466bd6SMel Gorman } 1360de466bd6SMel Gorman 1361d10e63f2SMel Gorman page = pmd_page(pmd); 1362a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 13638191acbdSMel Gorman page_nid = page_to_nid(page); 136490572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 136503c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 136604bb2f94SRik van Riel if (page_nid == this_nid) { 136703c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 136804bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 136904bb2f94SRik van Riel } 13704daae3b4SMel Gorman 1371bea66fbdSMel Gorman /* See similar comment in do_numa_page for explanation */ 1372bea66fbdSMel Gorman if (!(vma->vm_flags & VM_WRITE)) 13736688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 13746688cc05SPeter Zijlstra 13756688cc05SPeter Zijlstra /* 1376ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1377ff9042b1SMel Gorman * page_table_lock if at all possible 1378ff9042b1SMel Gorman */ 1379b8916634SMel Gorman page_locked = trylock_page(page); 1380b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1381b8916634SMel Gorman if (target_nid == -1) { 1382b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1383a54a407fSMel Gorman if (page_locked) 1384b8916634SMel Gorman goto clear_pmdnuma; 13852b4847e7SMel Gorman } 1386cbee9f88SPeter Zijlstra 1387de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 13882b4847e7SMel Gorman if (!page_locked) { 1389c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1390b8916634SMel Gorman wait_on_page_locked(page); 1391a54a407fSMel Gorman page_nid = -1; 1392b8916634SMel Gorman goto out; 1393b8916634SMel Gorman } 1394b8916634SMel Gorman 13952b4847e7SMel Gorman /* 13962b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 13972b4847e7SMel Gorman * to serialises splits 13982b4847e7SMel Gorman */ 1399b8916634SMel Gorman get_page(page); 1400c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1401b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1402b32967ffSMel Gorman 1403c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 1404c4088ebdSKirill A. Shutemov spin_lock(ptl); 1405b32967ffSMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) { 1406b32967ffSMel Gorman unlock_page(page); 1407b32967ffSMel Gorman put_page(page); 1408a54a407fSMel Gorman page_nid = -1; 1409b32967ffSMel Gorman goto out_unlock; 1410b32967ffSMel Gorman } 1411ff9042b1SMel Gorman 1412c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1413c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1414c3a489caSMel Gorman put_page(page); 1415c3a489caSMel Gorman page_nid = -1; 1416c3a489caSMel Gorman goto clear_pmdnuma; 1417c3a489caSMel Gorman } 1418c3a489caSMel Gorman 1419a54a407fSMel Gorman /* 1420a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 14218a0516edSMel Gorman * and access rights restored. 1422a54a407fSMel Gorman */ 1423c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1424b32967ffSMel Gorman migrated = migrate_misplaced_transhuge_page(mm, vma, 1425340ef390SHugh Dickins pmdp, pmd, addr, page, target_nid); 14266688cc05SPeter Zijlstra if (migrated) { 14276688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 14288191acbdSMel Gorman page_nid = target_nid; 1429074c2381SMel Gorman } else 1430074c2381SMel Gorman flags |= TNF_MIGRATE_FAIL; 1431b32967ffSMel Gorman 14328191acbdSMel Gorman goto out; 14334daae3b4SMel Gorman clear_pmdnuma: 1434a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1435b191f9b1SMel Gorman was_writable = pmd_write(pmd); 14364d942466SMel Gorman pmd = pmd_modify(pmd, vma->vm_page_prot); 1437b7b04004SMel Gorman pmd = pmd_mkyoung(pmd); 1438b191f9b1SMel Gorman if (was_writable) 1439b191f9b1SMel Gorman pmd = pmd_mkwrite(pmd); 1440d10e63f2SMel Gorman set_pmd_at(mm, haddr, pmdp, pmd); 1441d10e63f2SMel Gorman update_mmu_cache_pmd(vma, addr, pmdp); 1442a54a407fSMel Gorman unlock_page(page); 1443d10e63f2SMel Gorman out_unlock: 1444c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1445b8916634SMel Gorman 1446b8916634SMel Gorman out: 1447b8916634SMel Gorman if (anon_vma) 1448b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1449b8916634SMel Gorman 14508191acbdSMel Gorman if (page_nid != -1) 14516688cc05SPeter Zijlstra task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); 14528191acbdSMel Gorman 1453d10e63f2SMel Gorman return 0; 1454d10e63f2SMel Gorman } 1455d10e63f2SMel Gorman 145671e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1457f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 145871e3aac0SAndrea Arcangeli { 1459f5c8ad47SDavid Miller pmd_t orig_pmd; 1460*da146769SKirill A. Shutemov spinlock_t *ptl; 1461*da146769SKirill A. Shutemov 1462*da146769SKirill A. Shutemov if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) 1463*da146769SKirill A. Shutemov return 0; 1464a6bf2bb0SAneesh Kumar K.V /* 1465a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 14668809aa2dSAneesh Kumar K.V * when calling pmdp_huge_get_and_clear. So do the 1467a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1468a6bf2bb0SAneesh Kumar K.V * operations. 1469a6bf2bb0SAneesh Kumar K.V */ 14708809aa2dSAneesh Kumar K.V orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1471fcbe08d6SMartin Schwidefsky tlb->fullmm); 1472f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 14734897c765SMatthew Wilcox if (vma_is_dax(vma)) { 14744897c765SMatthew Wilcox spin_unlock(ptl); 1475*da146769SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 1476*da146769SKirill A. Shutemov put_huge_zero_page(); 1477*da146769SKirill A. Shutemov } else if (is_huge_zero_pmd(orig_pmd)) { 1478*da146769SKirill A. Shutemov pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1479e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1480bf929152SKirill A. Shutemov spin_unlock(ptl); 148197ae1749SKirill A. Shutemov put_huge_zero_page(); 1482479f0abbSKirill A. Shutemov } else { 14834897c765SMatthew Wilcox struct page *page = pmd_page(orig_pmd); 148471e3aac0SAndrea Arcangeli page_remove_rmap(page); 1485309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 148671e3aac0SAndrea Arcangeli add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1487309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1488*da146769SKirill A. Shutemov pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1489e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1490bf929152SKirill A. Shutemov spin_unlock(ptl); 149171e3aac0SAndrea Arcangeli tlb_remove_page(tlb, page); 1492479f0abbSKirill A. Shutemov } 1493*da146769SKirill A. Shutemov return 1; 149471e3aac0SAndrea Arcangeli } 149571e3aac0SAndrea Arcangeli 149637a1c49aSAndrea Arcangeli int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 149737a1c49aSAndrea Arcangeli unsigned long old_addr, 149837a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 149937a1c49aSAndrea Arcangeli pmd_t *old_pmd, pmd_t *new_pmd) 150037a1c49aSAndrea Arcangeli { 1501bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 150237a1c49aSAndrea Arcangeli int ret = 0; 150337a1c49aSAndrea Arcangeli pmd_t pmd; 150437a1c49aSAndrea Arcangeli 150537a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 150637a1c49aSAndrea Arcangeli 150737a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 150837a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 150937a1c49aSAndrea Arcangeli old_end - old_addr < HPAGE_PMD_SIZE || 151037a1c49aSAndrea Arcangeli (new_vma->vm_flags & VM_NOHUGEPAGE)) 151137a1c49aSAndrea Arcangeli goto out; 151237a1c49aSAndrea Arcangeli 151337a1c49aSAndrea Arcangeli /* 151437a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 151537a1c49aSAndrea Arcangeli * should have release it. 151637a1c49aSAndrea Arcangeli */ 151737a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 151837a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 151937a1c49aSAndrea Arcangeli goto out; 152037a1c49aSAndrea Arcangeli } 152137a1c49aSAndrea Arcangeli 1522bf929152SKirill A. Shutemov /* 1523bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1524bf929152SKirill A. Shutemov * ptlocks because exclusive mmap_sem prevents deadlock. 1525bf929152SKirill A. Shutemov */ 1526bf929152SKirill A. Shutemov ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); 1527025c5b24SNaoya Horiguchi if (ret == 1) { 1528bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1529bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1530bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 15318809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 153237a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 15333592806cSKirill A. Shutemov 1534b3084f4dSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1535b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 15363592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 15373592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 15383592806cSKirill A. Shutemov } 1539b3084f4dSAneesh Kumar K.V set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1540b3084f4dSAneesh Kumar K.V if (new_ptl != old_ptl) 1541b3084f4dSAneesh Kumar K.V spin_unlock(new_ptl); 1542bf929152SKirill A. Shutemov spin_unlock(old_ptl); 154337a1c49aSAndrea Arcangeli } 154437a1c49aSAndrea Arcangeli out: 154537a1c49aSAndrea Arcangeli return ret; 154637a1c49aSAndrea Arcangeli } 154737a1c49aSAndrea Arcangeli 1548f123d74aSMel Gorman /* 1549f123d74aSMel Gorman * Returns 1550f123d74aSMel Gorman * - 0 if PMD could not be locked 1551f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1552f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1553f123d74aSMel Gorman */ 1554cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1555e944fd67SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1556cd7548abSJohannes Weiner { 1557cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1558bf929152SKirill A. Shutemov spinlock_t *ptl; 1559cd7548abSJohannes Weiner int ret = 0; 1560cd7548abSJohannes Weiner 1561bf929152SKirill A. Shutemov if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1562cd7548abSJohannes Weiner pmd_t entry; 1563b191f9b1SMel Gorman bool preserve_write = prot_numa && pmd_write(*pmd); 1564ba68bc01SMel Gorman ret = 1; 1565e944fd67SMel Gorman 1566e944fd67SMel Gorman /* 1567e944fd67SMel Gorman * Avoid trapping faults against the zero page. The read-only 1568e944fd67SMel Gorman * data is likely to be read-cached on the local CPU and 1569e944fd67SMel Gorman * local/remote hits to the zero page are not interesting. 1570e944fd67SMel Gorman */ 1571e944fd67SMel Gorman if (prot_numa && is_huge_zero_pmd(*pmd)) { 1572e944fd67SMel Gorman spin_unlock(ptl); 1573ba68bc01SMel Gorman return ret; 1574e944fd67SMel Gorman } 1575e944fd67SMel Gorman 157610c1045fSMel Gorman if (!prot_numa || !pmd_protnone(*pmd)) { 15778809aa2dSAneesh Kumar K.V entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1578cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1579b191f9b1SMel Gorman if (preserve_write) 1580b191f9b1SMel Gorman entry = pmd_mkwrite(entry); 1581f123d74aSMel Gorman ret = HPAGE_PMD_NR; 158256eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 1583b191f9b1SMel Gorman BUG_ON(!preserve_write && pmd_write(entry)); 158410c1045fSMel Gorman } 1585bf929152SKirill A. Shutemov spin_unlock(ptl); 1586cd7548abSJohannes Weiner } 1587cd7548abSJohannes Weiner 1588cd7548abSJohannes Weiner return ret; 1589cd7548abSJohannes Weiner } 1590cd7548abSJohannes Weiner 1591025c5b24SNaoya Horiguchi /* 1592025c5b24SNaoya Horiguchi * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1593025c5b24SNaoya Horiguchi * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1594025c5b24SNaoya Horiguchi * 1595025c5b24SNaoya Horiguchi * Note that if it returns 1, this routine returns without unlocking page 1596025c5b24SNaoya Horiguchi * table locks. So callers must unlock them. 1597025c5b24SNaoya Horiguchi */ 1598bf929152SKirill A. Shutemov int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1599bf929152SKirill A. Shutemov spinlock_t **ptl) 1600025c5b24SNaoya Horiguchi { 1601bf929152SKirill A. Shutemov *ptl = pmd_lock(vma->vm_mm, pmd); 1602025c5b24SNaoya Horiguchi if (likely(pmd_trans_huge(*pmd))) { 1603025c5b24SNaoya Horiguchi if (unlikely(pmd_trans_splitting(*pmd))) { 1604bf929152SKirill A. Shutemov spin_unlock(*ptl); 1605025c5b24SNaoya Horiguchi wait_split_huge_page(vma->anon_vma, pmd); 1606025c5b24SNaoya Horiguchi return -1; 1607025c5b24SNaoya Horiguchi } else { 1608025c5b24SNaoya Horiguchi /* Thp mapped by 'pmd' is stable, so we can 1609025c5b24SNaoya Horiguchi * handle it as it is. */ 1610025c5b24SNaoya Horiguchi return 1; 1611025c5b24SNaoya Horiguchi } 1612025c5b24SNaoya Horiguchi } 1613bf929152SKirill A. Shutemov spin_unlock(*ptl); 1614025c5b24SNaoya Horiguchi return 0; 1615025c5b24SNaoya Horiguchi } 1616025c5b24SNaoya Horiguchi 1617117b0791SKirill A. Shutemov /* 1618117b0791SKirill A. Shutemov * This function returns whether a given @page is mapped onto the @address 1619117b0791SKirill A. Shutemov * in the virtual space of @mm. 1620117b0791SKirill A. Shutemov * 1621117b0791SKirill A. Shutemov * When it's true, this function returns *pmd with holding the page table lock 1622117b0791SKirill A. Shutemov * and passing it back to the caller via @ptl. 1623117b0791SKirill A. Shutemov * If it's false, returns NULL without holding the page table lock. 1624117b0791SKirill A. Shutemov */ 162571e3aac0SAndrea Arcangeli pmd_t *page_check_address_pmd(struct page *page, 162671e3aac0SAndrea Arcangeli struct mm_struct *mm, 162771e3aac0SAndrea Arcangeli unsigned long address, 1628117b0791SKirill A. Shutemov enum page_check_address_pmd_flag flag, 1629117b0791SKirill A. Shutemov spinlock_t **ptl) 163071e3aac0SAndrea Arcangeli { 1631b5a8cad3SKirill A. Shutemov pgd_t *pgd; 1632b5a8cad3SKirill A. Shutemov pud_t *pud; 1633117b0791SKirill A. Shutemov pmd_t *pmd; 163471e3aac0SAndrea Arcangeli 163571e3aac0SAndrea Arcangeli if (address & ~HPAGE_PMD_MASK) 1636117b0791SKirill A. Shutemov return NULL; 163771e3aac0SAndrea Arcangeli 1638b5a8cad3SKirill A. Shutemov pgd = pgd_offset(mm, address); 1639b5a8cad3SKirill A. Shutemov if (!pgd_present(*pgd)) 1640117b0791SKirill A. Shutemov return NULL; 1641b5a8cad3SKirill A. Shutemov pud = pud_offset(pgd, address); 1642b5a8cad3SKirill A. Shutemov if (!pud_present(*pud)) 1643b5a8cad3SKirill A. Shutemov return NULL; 1644b5a8cad3SKirill A. Shutemov pmd = pmd_offset(pud, address); 1645b5a8cad3SKirill A. Shutemov 1646117b0791SKirill A. Shutemov *ptl = pmd_lock(mm, pmd); 1647b5a8cad3SKirill A. Shutemov if (!pmd_present(*pmd)) 1648117b0791SKirill A. Shutemov goto unlock; 164971e3aac0SAndrea Arcangeli if (pmd_page(*pmd) != page) 1650117b0791SKirill A. Shutemov goto unlock; 165194fcc585SAndrea Arcangeli /* 165294fcc585SAndrea Arcangeli * split_vma() may create temporary aliased mappings. There is 165394fcc585SAndrea Arcangeli * no risk as long as all huge pmd are found and have their 165494fcc585SAndrea Arcangeli * splitting bit set before __split_huge_page_refcount 165594fcc585SAndrea Arcangeli * runs. Finding the same huge pmd more than once during the 165694fcc585SAndrea Arcangeli * same rmap walk is not a problem. 165794fcc585SAndrea Arcangeli */ 165894fcc585SAndrea Arcangeli if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 165994fcc585SAndrea Arcangeli pmd_trans_splitting(*pmd)) 1660117b0791SKirill A. Shutemov goto unlock; 166171e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) { 166271e3aac0SAndrea Arcangeli VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 166371e3aac0SAndrea Arcangeli !pmd_trans_splitting(*pmd)); 1664117b0791SKirill A. Shutemov return pmd; 166571e3aac0SAndrea Arcangeli } 1666117b0791SKirill A. Shutemov unlock: 1667117b0791SKirill A. Shutemov spin_unlock(*ptl); 1668117b0791SKirill A. Shutemov return NULL; 166971e3aac0SAndrea Arcangeli } 167071e3aac0SAndrea Arcangeli 167171e3aac0SAndrea Arcangeli static int __split_huge_page_splitting(struct page *page, 167271e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 167371e3aac0SAndrea Arcangeli unsigned long address) 167471e3aac0SAndrea Arcangeli { 167571e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 1676117b0791SKirill A. Shutemov spinlock_t *ptl; 167771e3aac0SAndrea Arcangeli pmd_t *pmd; 167871e3aac0SAndrea Arcangeli int ret = 0; 16792ec74c3eSSagi Grimberg /* For mmu_notifiers */ 16802ec74c3eSSagi Grimberg const unsigned long mmun_start = address; 16812ec74c3eSSagi Grimberg const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 168271e3aac0SAndrea Arcangeli 16832ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 168471e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 1685117b0791SKirill A. Shutemov PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl); 168671e3aac0SAndrea Arcangeli if (pmd) { 168771e3aac0SAndrea Arcangeli /* 168871e3aac0SAndrea Arcangeli * We can't temporarily set the pmd to null in order 168971e3aac0SAndrea Arcangeli * to split it, the pmd must remain marked huge at all 169071e3aac0SAndrea Arcangeli * times or the VM won't take the pmd_trans_huge paths 16915a505085SIngo Molnar * and it won't wait on the anon_vma->root->rwsem to 169271e3aac0SAndrea Arcangeli * serialize against split_huge_page*. 169371e3aac0SAndrea Arcangeli */ 16942ec74c3eSSagi Grimberg pmdp_splitting_flush(vma, address, pmd); 169534ee645eSJoerg Roedel 169671e3aac0SAndrea Arcangeli ret = 1; 1697117b0791SKirill A. Shutemov spin_unlock(ptl); 169871e3aac0SAndrea Arcangeli } 16992ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 170071e3aac0SAndrea Arcangeli 170171e3aac0SAndrea Arcangeli return ret; 170271e3aac0SAndrea Arcangeli } 170371e3aac0SAndrea Arcangeli 17045bc7b8acSShaohua Li static void __split_huge_page_refcount(struct page *page, 17055bc7b8acSShaohua Li struct list_head *list) 170671e3aac0SAndrea Arcangeli { 170771e3aac0SAndrea Arcangeli int i; 170871e3aac0SAndrea Arcangeli struct zone *zone = page_zone(page); 1709fa9add64SHugh Dickins struct lruvec *lruvec; 171070b50f94SAndrea Arcangeli int tail_count = 0; 171171e3aac0SAndrea Arcangeli 171271e3aac0SAndrea Arcangeli /* prevent PageLRU to go away from under us, and freeze lru stats */ 171371e3aac0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 1714fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 1715fa9add64SHugh Dickins 171671e3aac0SAndrea Arcangeli compound_lock(page); 1717e94c8a9cSKAMEZAWA Hiroyuki /* complete memcg works before add pages to LRU */ 1718e94c8a9cSKAMEZAWA Hiroyuki mem_cgroup_split_huge_fixup(page); 171971e3aac0SAndrea Arcangeli 172045676885SShaohua Li for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 172171e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 172271e3aac0SAndrea Arcangeli 172370b50f94SAndrea Arcangeli /* tail_page->_mapcount cannot change */ 172470b50f94SAndrea Arcangeli BUG_ON(page_mapcount(page_tail) < 0); 172570b50f94SAndrea Arcangeli tail_count += page_mapcount(page_tail); 172670b50f94SAndrea Arcangeli /* check for overflow */ 172770b50f94SAndrea Arcangeli BUG_ON(tail_count < 0); 172870b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page_tail->_count) != 0); 172970b50f94SAndrea Arcangeli /* 173070b50f94SAndrea Arcangeli * tail_page->_count is zero and not changing from 173170b50f94SAndrea Arcangeli * under us. But get_page_unless_zero() may be running 173270b50f94SAndrea Arcangeli * from under us on the tail_page. If we used 173370b50f94SAndrea Arcangeli * atomic_set() below instead of atomic_add(), we 173470b50f94SAndrea Arcangeli * would then run atomic_set() concurrently with 173570b50f94SAndrea Arcangeli * get_page_unless_zero(), and atomic_set() is 173670b50f94SAndrea Arcangeli * implemented in C not using locked ops. spin_unlock 173770b50f94SAndrea Arcangeli * on x86 sometime uses locked ops because of PPro 173870b50f94SAndrea Arcangeli * errata 66, 92, so unless somebody can guarantee 173970b50f94SAndrea Arcangeli * atomic_set() here would be safe on all archs (and 174070b50f94SAndrea Arcangeli * not only on x86), it's safer to use atomic_add(). 174170b50f94SAndrea Arcangeli */ 174270b50f94SAndrea Arcangeli atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 174370b50f94SAndrea Arcangeli &page_tail->_count); 174471e3aac0SAndrea Arcangeli 174571e3aac0SAndrea Arcangeli /* after clearing PageTail the gup refcount can be released */ 17463a79d52aSWaiman Long smp_mb__after_atomic(); 174771e3aac0SAndrea Arcangeli 1748f4c18e6fSNaoya Horiguchi page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 174971e3aac0SAndrea Arcangeli page_tail->flags |= (page->flags & 175071e3aac0SAndrea Arcangeli ((1L << PG_referenced) | 175171e3aac0SAndrea Arcangeli (1L << PG_swapbacked) | 175271e3aac0SAndrea Arcangeli (1L << PG_mlocked) | 1753e180cf80SKirill A. Shutemov (1L << PG_uptodate) | 1754e180cf80SKirill A. Shutemov (1L << PG_active) | 1755e180cf80SKirill A. Shutemov (1L << PG_unevictable))); 175671e3aac0SAndrea Arcangeli page_tail->flags |= (1L << PG_dirty); 175771e3aac0SAndrea Arcangeli 175870b50f94SAndrea Arcangeli /* clear PageTail before overwriting first_page */ 175971e3aac0SAndrea Arcangeli smp_wmb(); 176071e3aac0SAndrea Arcangeli 176171e3aac0SAndrea Arcangeli /* 176271e3aac0SAndrea Arcangeli * __split_huge_page_splitting() already set the 176371e3aac0SAndrea Arcangeli * splitting bit in all pmd that could map this 176471e3aac0SAndrea Arcangeli * hugepage, that will ensure no CPU can alter the 176571e3aac0SAndrea Arcangeli * mapcount on the head page. The mapcount is only 176671e3aac0SAndrea Arcangeli * accounted in the head page and it has to be 176771e3aac0SAndrea Arcangeli * transferred to all tail pages in the below code. So 176871e3aac0SAndrea Arcangeli * for this code to be safe, the split the mapcount 176971e3aac0SAndrea Arcangeli * can't change. But that doesn't mean userland can't 177071e3aac0SAndrea Arcangeli * keep changing and reading the page contents while 177171e3aac0SAndrea Arcangeli * we transfer the mapcount, so the pmd splitting 177271e3aac0SAndrea Arcangeli * status is achieved setting a reserved bit in the 177371e3aac0SAndrea Arcangeli * pmd, not by clearing the present bit. 177471e3aac0SAndrea Arcangeli */ 177571e3aac0SAndrea Arcangeli page_tail->_mapcount = page->_mapcount; 177671e3aac0SAndrea Arcangeli 177771e3aac0SAndrea Arcangeli BUG_ON(page_tail->mapping); 177871e3aac0SAndrea Arcangeli page_tail->mapping = page->mapping; 177971e3aac0SAndrea Arcangeli 178045676885SShaohua Li page_tail->index = page->index + i; 178190572890SPeter Zijlstra page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); 178271e3aac0SAndrea Arcangeli 178371e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page_tail)); 178471e3aac0SAndrea Arcangeli BUG_ON(!PageUptodate(page_tail)); 178571e3aac0SAndrea Arcangeli BUG_ON(!PageDirty(page_tail)); 178671e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page_tail)); 178771e3aac0SAndrea Arcangeli 17885bc7b8acSShaohua Li lru_add_page_tail(page, page_tail, lruvec, list); 178971e3aac0SAndrea Arcangeli } 179070b50f94SAndrea Arcangeli atomic_sub(tail_count, &page->_count); 179170b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page->_count) <= 0); 179271e3aac0SAndrea Arcangeli 1793fa9add64SHugh Dickins __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 179479134171SAndrea Arcangeli 179571e3aac0SAndrea Arcangeli ClearPageCompound(page); 179671e3aac0SAndrea Arcangeli compound_unlock(page); 179771e3aac0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 179871e3aac0SAndrea Arcangeli 179971e3aac0SAndrea Arcangeli for (i = 1; i < HPAGE_PMD_NR; i++) { 180071e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 180171e3aac0SAndrea Arcangeli BUG_ON(page_count(page_tail) <= 0); 180271e3aac0SAndrea Arcangeli /* 180371e3aac0SAndrea Arcangeli * Tail pages may be freed if there wasn't any mapping 180471e3aac0SAndrea Arcangeli * like if add_to_swap() is running on a lru page that 180571e3aac0SAndrea Arcangeli * had its mapping zapped. And freeing these pages 180671e3aac0SAndrea Arcangeli * requires taking the lru_lock so we do the put_page 180771e3aac0SAndrea Arcangeli * of the tail pages after the split is complete. 180871e3aac0SAndrea Arcangeli */ 180971e3aac0SAndrea Arcangeli put_page(page_tail); 181071e3aac0SAndrea Arcangeli } 181171e3aac0SAndrea Arcangeli 181271e3aac0SAndrea Arcangeli /* 181371e3aac0SAndrea Arcangeli * Only the head page (now become a regular page) is required 181471e3aac0SAndrea Arcangeli * to be pinned by the caller. 181571e3aac0SAndrea Arcangeli */ 181671e3aac0SAndrea Arcangeli BUG_ON(page_count(page) <= 0); 181771e3aac0SAndrea Arcangeli } 181871e3aac0SAndrea Arcangeli 181971e3aac0SAndrea Arcangeli static int __split_huge_page_map(struct page *page, 182071e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 182171e3aac0SAndrea Arcangeli unsigned long address) 182271e3aac0SAndrea Arcangeli { 182371e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 1824117b0791SKirill A. Shutemov spinlock_t *ptl; 182571e3aac0SAndrea Arcangeli pmd_t *pmd, _pmd; 182671e3aac0SAndrea Arcangeli int ret = 0, i; 182771e3aac0SAndrea Arcangeli pgtable_t pgtable; 182871e3aac0SAndrea Arcangeli unsigned long haddr; 182971e3aac0SAndrea Arcangeli 183071e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 1831117b0791SKirill A. Shutemov PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl); 183271e3aac0SAndrea Arcangeli if (pmd) { 18336b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 183471e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 1835f8303c25SWaiman Long if (pmd_write(*pmd)) 1836f8303c25SWaiman Long BUG_ON(page_mapcount(page) != 1); 183771e3aac0SAndrea Arcangeli 1838e3ebcf64SGerald Schaefer haddr = address; 1839e3ebcf64SGerald Schaefer for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 184071e3aac0SAndrea Arcangeli pte_t *pte, entry; 184171e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page+i)); 1842abc40bd2SMel Gorman /* 18438a0516edSMel Gorman * Note that NUMA hinting access restrictions are not 18448a0516edSMel Gorman * transferred to avoid any possibility of altering 18458a0516edSMel Gorman * permissions across VMAs. 1846abc40bd2SMel Gorman */ 184771e3aac0SAndrea Arcangeli entry = mk_pte(page + i, vma->vm_page_prot); 184871e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 184971e3aac0SAndrea Arcangeli if (!pmd_write(*pmd)) 185071e3aac0SAndrea Arcangeli entry = pte_wrprotect(entry); 185171e3aac0SAndrea Arcangeli if (!pmd_young(*pmd)) 185271e3aac0SAndrea Arcangeli entry = pte_mkold(entry); 185371e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 185471e3aac0SAndrea Arcangeli BUG_ON(!pte_none(*pte)); 185571e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 185671e3aac0SAndrea Arcangeli pte_unmap(pte); 185771e3aac0SAndrea Arcangeli } 185871e3aac0SAndrea Arcangeli 185971e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 186071e3aac0SAndrea Arcangeli /* 186171e3aac0SAndrea Arcangeli * Up to this point the pmd is present and huge and 186271e3aac0SAndrea Arcangeli * userland has the whole access to the hugepage 186371e3aac0SAndrea Arcangeli * during the split (which happens in place). If we 186471e3aac0SAndrea Arcangeli * overwrite the pmd with the not-huge version 186571e3aac0SAndrea Arcangeli * pointing to the pte here (which of course we could 186671e3aac0SAndrea Arcangeli * if all CPUs were bug free), userland could trigger 186771e3aac0SAndrea Arcangeli * a small page size TLB miss on the small sized TLB 186871e3aac0SAndrea Arcangeli * while the hugepage TLB entry is still established 186971e3aac0SAndrea Arcangeli * in the huge TLB. Some CPU doesn't like that. See 187071e3aac0SAndrea Arcangeli * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 187171e3aac0SAndrea Arcangeli * Erratum 383 on page 93. Intel should be safe but is 187271e3aac0SAndrea Arcangeli * also warns that it's only safe if the permission 187371e3aac0SAndrea Arcangeli * and cache attributes of the two entries loaded in 187471e3aac0SAndrea Arcangeli * the two TLB is identical (which should be the case 187571e3aac0SAndrea Arcangeli * here). But it is generally safer to never allow 187671e3aac0SAndrea Arcangeli * small and huge TLB entries for the same virtual 187771e3aac0SAndrea Arcangeli * address to be loaded simultaneously. So instead of 187871e3aac0SAndrea Arcangeli * doing "pmd_populate(); flush_tlb_range();" we first 187971e3aac0SAndrea Arcangeli * mark the current pmd notpresent (atomically because 188071e3aac0SAndrea Arcangeli * here the pmd_trans_huge and pmd_trans_splitting 188171e3aac0SAndrea Arcangeli * must remain set at all times on the pmd until the 188271e3aac0SAndrea Arcangeli * split is complete for this pmd), then we flush the 188371e3aac0SAndrea Arcangeli * SMP TLB and finally we write the non-huge version 188471e3aac0SAndrea Arcangeli * of the pmd entry with pmd_populate. 188571e3aac0SAndrea Arcangeli */ 188646dcde73SGerald Schaefer pmdp_invalidate(vma, address, pmd); 188771e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 188871e3aac0SAndrea Arcangeli ret = 1; 1889117b0791SKirill A. Shutemov spin_unlock(ptl); 189071e3aac0SAndrea Arcangeli } 189171e3aac0SAndrea Arcangeli 189271e3aac0SAndrea Arcangeli return ret; 189371e3aac0SAndrea Arcangeli } 189471e3aac0SAndrea Arcangeli 18955a505085SIngo Molnar /* must be called with anon_vma->root->rwsem held */ 189671e3aac0SAndrea Arcangeli static void __split_huge_page(struct page *page, 18975bc7b8acSShaohua Li struct anon_vma *anon_vma, 18985bc7b8acSShaohua Li struct list_head *list) 189971e3aac0SAndrea Arcangeli { 190071e3aac0SAndrea Arcangeli int mapcount, mapcount2; 1901bf181b9fSMichel Lespinasse pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 190271e3aac0SAndrea Arcangeli struct anon_vma_chain *avc; 190371e3aac0SAndrea Arcangeli 190471e3aac0SAndrea Arcangeli BUG_ON(!PageHead(page)); 190571e3aac0SAndrea Arcangeli BUG_ON(PageTail(page)); 190671e3aac0SAndrea Arcangeli 190771e3aac0SAndrea Arcangeli mapcount = 0; 1908bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 190971e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 191071e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 191171e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 191271e3aac0SAndrea Arcangeli mapcount += __split_huge_page_splitting(page, vma, addr); 191371e3aac0SAndrea Arcangeli } 191405759d38SAndrea Arcangeli /* 191505759d38SAndrea Arcangeli * It is critical that new vmas are added to the tail of the 191605759d38SAndrea Arcangeli * anon_vma list. This guarantes that if copy_huge_pmd() runs 191705759d38SAndrea Arcangeli * and establishes a child pmd before 191805759d38SAndrea Arcangeli * __split_huge_page_splitting() freezes the parent pmd (so if 191905759d38SAndrea Arcangeli * we fail to prevent copy_huge_pmd() from running until the 192005759d38SAndrea Arcangeli * whole __split_huge_page() is complete), we will still see 192105759d38SAndrea Arcangeli * the newly established pmd of the child later during the 192205759d38SAndrea Arcangeli * walk, to be able to set it as pmd_trans_splitting too. 192305759d38SAndrea Arcangeli */ 1924ff9e43ebSKirill A. Shutemov if (mapcount != page_mapcount(page)) { 1925ae3a8c1cSAndrew Morton pr_err("mapcount %d page_mapcount %d\n", 192605759d38SAndrea Arcangeli mapcount, page_mapcount(page)); 1927ff9e43ebSKirill A. Shutemov BUG(); 1928ff9e43ebSKirill A. Shutemov } 192971e3aac0SAndrea Arcangeli 19305bc7b8acSShaohua Li __split_huge_page_refcount(page, list); 193171e3aac0SAndrea Arcangeli 193271e3aac0SAndrea Arcangeli mapcount2 = 0; 1933bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 193471e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 193571e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 193671e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 193771e3aac0SAndrea Arcangeli mapcount2 += __split_huge_page_map(page, vma, addr); 193871e3aac0SAndrea Arcangeli } 1939ff9e43ebSKirill A. Shutemov if (mapcount != mapcount2) { 1940ae3a8c1cSAndrew Morton pr_err("mapcount %d mapcount2 %d page_mapcount %d\n", 194105759d38SAndrea Arcangeli mapcount, mapcount2, page_mapcount(page)); 1942ff9e43ebSKirill A. Shutemov BUG(); 1943ff9e43ebSKirill A. Shutemov } 194471e3aac0SAndrea Arcangeli } 194571e3aac0SAndrea Arcangeli 19465bc7b8acSShaohua Li /* 19475bc7b8acSShaohua Li * Split a hugepage into normal pages. This doesn't change the position of head 19485bc7b8acSShaohua Li * page. If @list is null, tail pages will be added to LRU list, otherwise, to 19495bc7b8acSShaohua Li * @list. Both head page and tail pages will inherit mapping, flags, and so on 19505bc7b8acSShaohua Li * from the hugepage. 19515bc7b8acSShaohua Li * Return 0 if the hugepage is split successfully otherwise return 1. 19525bc7b8acSShaohua Li */ 19535bc7b8acSShaohua Li int split_huge_page_to_list(struct page *page, struct list_head *list) 195471e3aac0SAndrea Arcangeli { 195571e3aac0SAndrea Arcangeli struct anon_vma *anon_vma; 195671e3aac0SAndrea Arcangeli int ret = 1; 195771e3aac0SAndrea Arcangeli 19585918d10aSKirill A. Shutemov BUG_ON(is_huge_zero_page(page)); 195971e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page)); 1960062f1af2SMel Gorman 1961062f1af2SMel Gorman /* 1962062f1af2SMel Gorman * The caller does not necessarily hold an mmap_sem that would prevent 1963062f1af2SMel Gorman * the anon_vma disappearing so we first we take a reference to it 1964062f1af2SMel Gorman * and then lock the anon_vma for write. This is similar to 1965062f1af2SMel Gorman * page_lock_anon_vma_read except the write lock is taken to serialise 1966062f1af2SMel Gorman * against parallel split or collapse operations. 1967062f1af2SMel Gorman */ 1968062f1af2SMel Gorman anon_vma = page_get_anon_vma(page); 196971e3aac0SAndrea Arcangeli if (!anon_vma) 197071e3aac0SAndrea Arcangeli goto out; 1971062f1af2SMel Gorman anon_vma_lock_write(anon_vma); 1972062f1af2SMel Gorman 197371e3aac0SAndrea Arcangeli ret = 0; 197471e3aac0SAndrea Arcangeli if (!PageCompound(page)) 197571e3aac0SAndrea Arcangeli goto out_unlock; 197671e3aac0SAndrea Arcangeli 197771e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page)); 19785bc7b8acSShaohua Li __split_huge_page(page, anon_vma, list); 197981ab4201SAndi Kleen count_vm_event(THP_SPLIT); 198071e3aac0SAndrea Arcangeli 198171e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page)); 198271e3aac0SAndrea Arcangeli out_unlock: 198308b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 1984062f1af2SMel Gorman put_anon_vma(anon_vma); 198571e3aac0SAndrea Arcangeli out: 198671e3aac0SAndrea Arcangeli return ret; 198771e3aac0SAndrea Arcangeli } 198871e3aac0SAndrea Arcangeli 19899050d7ebSVlastimil Babka #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 199078f11a25SAndrea Arcangeli 199160ab3244SAndrea Arcangeli int hugepage_madvise(struct vm_area_struct *vma, 199260ab3244SAndrea Arcangeli unsigned long *vm_flags, int advice) 19930af4e98bSAndrea Arcangeli { 1994a664b2d8SAndrea Arcangeli switch (advice) { 1995a664b2d8SAndrea Arcangeli case MADV_HUGEPAGE: 19961e1836e8SAlex Thorlton #ifdef CONFIG_S390 19971e1836e8SAlex Thorlton /* 19981e1836e8SAlex Thorlton * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 19991e1836e8SAlex Thorlton * can't handle this properly after s390_enable_sie, so we simply 20001e1836e8SAlex Thorlton * ignore the madvise to prevent qemu from causing a SIGSEGV. 20011e1836e8SAlex Thorlton */ 20021e1836e8SAlex Thorlton if (mm_has_pgste(vma->vm_mm)) 20031e1836e8SAlex Thorlton return 0; 20041e1836e8SAlex Thorlton #endif 20050af4e98bSAndrea Arcangeli /* 20060af4e98bSAndrea Arcangeli * Be somewhat over-protective like KSM for now! 20070af4e98bSAndrea Arcangeli */ 200878f11a25SAndrea Arcangeli if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 20090af4e98bSAndrea Arcangeli return -EINVAL; 2010a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_NOHUGEPAGE; 20110af4e98bSAndrea Arcangeli *vm_flags |= VM_HUGEPAGE; 201260ab3244SAndrea Arcangeli /* 201360ab3244SAndrea Arcangeli * If the vma become good for khugepaged to scan, 201460ab3244SAndrea Arcangeli * register it here without waiting a page fault that 201560ab3244SAndrea Arcangeli * may not happen any time soon. 201660ab3244SAndrea Arcangeli */ 20176d50e60cSDavid Rientjes if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) 201860ab3244SAndrea Arcangeli return -ENOMEM; 2019a664b2d8SAndrea Arcangeli break; 2020a664b2d8SAndrea Arcangeli case MADV_NOHUGEPAGE: 2021a664b2d8SAndrea Arcangeli /* 2022a664b2d8SAndrea Arcangeli * Be somewhat over-protective like KSM for now! 2023a664b2d8SAndrea Arcangeli */ 202478f11a25SAndrea Arcangeli if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 2025a664b2d8SAndrea Arcangeli return -EINVAL; 2026a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_HUGEPAGE; 2027a664b2d8SAndrea Arcangeli *vm_flags |= VM_NOHUGEPAGE; 202860ab3244SAndrea Arcangeli /* 202960ab3244SAndrea Arcangeli * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 203060ab3244SAndrea Arcangeli * this vma even if we leave the mm registered in khugepaged if 203160ab3244SAndrea Arcangeli * it got registered before VM_NOHUGEPAGE was set. 203260ab3244SAndrea Arcangeli */ 2033a664b2d8SAndrea Arcangeli break; 2034a664b2d8SAndrea Arcangeli } 20350af4e98bSAndrea Arcangeli 20360af4e98bSAndrea Arcangeli return 0; 20370af4e98bSAndrea Arcangeli } 20380af4e98bSAndrea Arcangeli 2039ba76149fSAndrea Arcangeli static int __init khugepaged_slab_init(void) 2040ba76149fSAndrea Arcangeli { 2041ba76149fSAndrea Arcangeli mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 2042ba76149fSAndrea Arcangeli sizeof(struct mm_slot), 2043ba76149fSAndrea Arcangeli __alignof__(struct mm_slot), 0, NULL); 2044ba76149fSAndrea Arcangeli if (!mm_slot_cache) 2045ba76149fSAndrea Arcangeli return -ENOMEM; 2046ba76149fSAndrea Arcangeli 2047ba76149fSAndrea Arcangeli return 0; 2048ba76149fSAndrea Arcangeli } 2049ba76149fSAndrea Arcangeli 205065ebb64fSKirill A. Shutemov static void __init khugepaged_slab_exit(void) 205165ebb64fSKirill A. Shutemov { 205265ebb64fSKirill A. Shutemov kmem_cache_destroy(mm_slot_cache); 205365ebb64fSKirill A. Shutemov } 205465ebb64fSKirill A. Shutemov 2055ba76149fSAndrea Arcangeli static inline struct mm_slot *alloc_mm_slot(void) 2056ba76149fSAndrea Arcangeli { 2057ba76149fSAndrea Arcangeli if (!mm_slot_cache) /* initialization failed */ 2058ba76149fSAndrea Arcangeli return NULL; 2059ba76149fSAndrea Arcangeli return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 2060ba76149fSAndrea Arcangeli } 2061ba76149fSAndrea Arcangeli 2062ba76149fSAndrea Arcangeli static inline void free_mm_slot(struct mm_slot *mm_slot) 2063ba76149fSAndrea Arcangeli { 2064ba76149fSAndrea Arcangeli kmem_cache_free(mm_slot_cache, mm_slot); 2065ba76149fSAndrea Arcangeli } 2066ba76149fSAndrea Arcangeli 2067ba76149fSAndrea Arcangeli static struct mm_slot *get_mm_slot(struct mm_struct *mm) 2068ba76149fSAndrea Arcangeli { 2069ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2070ba76149fSAndrea Arcangeli 2071b67bfe0dSSasha Levin hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 2072ba76149fSAndrea Arcangeli if (mm == mm_slot->mm) 2073ba76149fSAndrea Arcangeli return mm_slot; 207443b5fbbdSSasha Levin 2075ba76149fSAndrea Arcangeli return NULL; 2076ba76149fSAndrea Arcangeli } 2077ba76149fSAndrea Arcangeli 2078ba76149fSAndrea Arcangeli static void insert_to_mm_slots_hash(struct mm_struct *mm, 2079ba76149fSAndrea Arcangeli struct mm_slot *mm_slot) 2080ba76149fSAndrea Arcangeli { 2081ba76149fSAndrea Arcangeli mm_slot->mm = mm; 208243b5fbbdSSasha Levin hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 2083ba76149fSAndrea Arcangeli } 2084ba76149fSAndrea Arcangeli 2085ba76149fSAndrea Arcangeli static inline int khugepaged_test_exit(struct mm_struct *mm) 2086ba76149fSAndrea Arcangeli { 2087ba76149fSAndrea Arcangeli return atomic_read(&mm->mm_users) == 0; 2088ba76149fSAndrea Arcangeli } 2089ba76149fSAndrea Arcangeli 2090ba76149fSAndrea Arcangeli int __khugepaged_enter(struct mm_struct *mm) 2091ba76149fSAndrea Arcangeli { 2092ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2093ba76149fSAndrea Arcangeli int wakeup; 2094ba76149fSAndrea Arcangeli 2095ba76149fSAndrea Arcangeli mm_slot = alloc_mm_slot(); 2096ba76149fSAndrea Arcangeli if (!mm_slot) 2097ba76149fSAndrea Arcangeli return -ENOMEM; 2098ba76149fSAndrea Arcangeli 2099ba76149fSAndrea Arcangeli /* __khugepaged_exit() must not run from under us */ 210096dad67fSSasha Levin VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 2101ba76149fSAndrea Arcangeli if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 2102ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2103ba76149fSAndrea Arcangeli return 0; 2104ba76149fSAndrea Arcangeli } 2105ba76149fSAndrea Arcangeli 2106ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2107ba76149fSAndrea Arcangeli insert_to_mm_slots_hash(mm, mm_slot); 2108ba76149fSAndrea Arcangeli /* 2109ba76149fSAndrea Arcangeli * Insert just behind the scanning cursor, to let the area settle 2110ba76149fSAndrea Arcangeli * down a little. 2111ba76149fSAndrea Arcangeli */ 2112ba76149fSAndrea Arcangeli wakeup = list_empty(&khugepaged_scan.mm_head); 2113ba76149fSAndrea Arcangeli list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 2114ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2115ba76149fSAndrea Arcangeli 2116ba76149fSAndrea Arcangeli atomic_inc(&mm->mm_count); 2117ba76149fSAndrea Arcangeli if (wakeup) 2118ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 2119ba76149fSAndrea Arcangeli 2120ba76149fSAndrea Arcangeli return 0; 2121ba76149fSAndrea Arcangeli } 2122ba76149fSAndrea Arcangeli 21236d50e60cSDavid Rientjes int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 21246d50e60cSDavid Rientjes unsigned long vm_flags) 2125ba76149fSAndrea Arcangeli { 2126ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2127ba76149fSAndrea Arcangeli if (!vma->anon_vma) 2128ba76149fSAndrea Arcangeli /* 2129ba76149fSAndrea Arcangeli * Not yet faulted in so we will register later in the 2130ba76149fSAndrea Arcangeli * page fault if needed. 2131ba76149fSAndrea Arcangeli */ 2132ba76149fSAndrea Arcangeli return 0; 213378f11a25SAndrea Arcangeli if (vma->vm_ops) 2134ba76149fSAndrea Arcangeli /* khugepaged not yet working on file or special mappings */ 2135ba76149fSAndrea Arcangeli return 0; 21366d50e60cSDavid Rientjes VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); 2137ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2138ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2139ba76149fSAndrea Arcangeli if (hstart < hend) 21406d50e60cSDavid Rientjes return khugepaged_enter(vma, vm_flags); 2141ba76149fSAndrea Arcangeli return 0; 2142ba76149fSAndrea Arcangeli } 2143ba76149fSAndrea Arcangeli 2144ba76149fSAndrea Arcangeli void __khugepaged_exit(struct mm_struct *mm) 2145ba76149fSAndrea Arcangeli { 2146ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2147ba76149fSAndrea Arcangeli int free = 0; 2148ba76149fSAndrea Arcangeli 2149ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2150ba76149fSAndrea Arcangeli mm_slot = get_mm_slot(mm); 2151ba76149fSAndrea Arcangeli if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 215243b5fbbdSSasha Levin hash_del(&mm_slot->hash); 2153ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2154ba76149fSAndrea Arcangeli free = 1; 2155ba76149fSAndrea Arcangeli } 2156d788e80aSChris Wright spin_unlock(&khugepaged_mm_lock); 2157ba76149fSAndrea Arcangeli 2158ba76149fSAndrea Arcangeli if (free) { 2159ba76149fSAndrea Arcangeli clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2160ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2161ba76149fSAndrea Arcangeli mmdrop(mm); 2162ba76149fSAndrea Arcangeli } else if (mm_slot) { 2163ba76149fSAndrea Arcangeli /* 2164ba76149fSAndrea Arcangeli * This is required to serialize against 2165ba76149fSAndrea Arcangeli * khugepaged_test_exit() (which is guaranteed to run 2166ba76149fSAndrea Arcangeli * under mmap sem read mode). Stop here (after we 2167ba76149fSAndrea Arcangeli * return all pagetables will be destroyed) until 2168ba76149fSAndrea Arcangeli * khugepaged has finished working on the pagetables 2169ba76149fSAndrea Arcangeli * under the mmap_sem. 2170ba76149fSAndrea Arcangeli */ 2171ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2172ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 2173d788e80aSChris Wright } 2174ba76149fSAndrea Arcangeli } 2175ba76149fSAndrea Arcangeli 2176ba76149fSAndrea Arcangeli static void release_pte_page(struct page *page) 2177ba76149fSAndrea Arcangeli { 2178ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2179ba76149fSAndrea Arcangeli dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2180ba76149fSAndrea Arcangeli unlock_page(page); 2181ba76149fSAndrea Arcangeli putback_lru_page(page); 2182ba76149fSAndrea Arcangeli } 2183ba76149fSAndrea Arcangeli 2184ba76149fSAndrea Arcangeli static void release_pte_pages(pte_t *pte, pte_t *_pte) 2185ba76149fSAndrea Arcangeli { 2186ba76149fSAndrea Arcangeli while (--_pte >= pte) { 2187ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2188ca0984caSEbru Akagunduz if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) 2189ba76149fSAndrea Arcangeli release_pte_page(pte_page(pteval)); 2190ba76149fSAndrea Arcangeli } 2191ba76149fSAndrea Arcangeli } 2192ba76149fSAndrea Arcangeli 2193ba76149fSAndrea Arcangeli static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2194ba76149fSAndrea Arcangeli unsigned long address, 2195ba76149fSAndrea Arcangeli pte_t *pte) 2196ba76149fSAndrea Arcangeli { 2197ba76149fSAndrea Arcangeli struct page *page; 2198ba76149fSAndrea Arcangeli pte_t *_pte; 2199ca0984caSEbru Akagunduz int none_or_zero = 0; 220010359213SEbru Akagunduz bool referenced = false, writable = false; 2201ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2202ba76149fSAndrea Arcangeli _pte++, address += PAGE_SIZE) { 2203ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2204ca0984caSEbru Akagunduz if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2205c1294d05SAndrea Arcangeli if (!userfaultfd_armed(vma) && 2206c1294d05SAndrea Arcangeli ++none_or_zero <= khugepaged_max_ptes_none) 2207ba76149fSAndrea Arcangeli continue; 2208344aa35cSBob Liu else 2209ba76149fSAndrea Arcangeli goto out; 2210ba76149fSAndrea Arcangeli } 221110359213SEbru Akagunduz if (!pte_present(pteval)) 2212ba76149fSAndrea Arcangeli goto out; 2213ba76149fSAndrea Arcangeli page = vm_normal_page(vma, address, pteval); 2214344aa35cSBob Liu if (unlikely(!page)) 2215ba76149fSAndrea Arcangeli goto out; 2216344aa35cSBob Liu 2217309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page), page); 2218309381feSSasha Levin VM_BUG_ON_PAGE(!PageAnon(page), page); 2219309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2220ba76149fSAndrea Arcangeli 2221ba76149fSAndrea Arcangeli /* 2222ba76149fSAndrea Arcangeli * We can do it before isolate_lru_page because the 2223ba76149fSAndrea Arcangeli * page can't be freed from under us. NOTE: PG_lock 2224ba76149fSAndrea Arcangeli * is needed to serialize against split_huge_page 2225ba76149fSAndrea Arcangeli * when invoked from the VM. 2226ba76149fSAndrea Arcangeli */ 2227344aa35cSBob Liu if (!trylock_page(page)) 2228ba76149fSAndrea Arcangeli goto out; 222910359213SEbru Akagunduz 223010359213SEbru Akagunduz /* 223110359213SEbru Akagunduz * cannot use mapcount: can't collapse if there's a gup pin. 223210359213SEbru Akagunduz * The page must only be referenced by the scanned process 223310359213SEbru Akagunduz * and page swap cache. 223410359213SEbru Akagunduz */ 223510359213SEbru Akagunduz if (page_count(page) != 1 + !!PageSwapCache(page)) { 223610359213SEbru Akagunduz unlock_page(page); 223710359213SEbru Akagunduz goto out; 223810359213SEbru Akagunduz } 223910359213SEbru Akagunduz if (pte_write(pteval)) { 224010359213SEbru Akagunduz writable = true; 224110359213SEbru Akagunduz } else { 224210359213SEbru Akagunduz if (PageSwapCache(page) && !reuse_swap_page(page)) { 224310359213SEbru Akagunduz unlock_page(page); 224410359213SEbru Akagunduz goto out; 224510359213SEbru Akagunduz } 224610359213SEbru Akagunduz /* 224710359213SEbru Akagunduz * Page is not in the swap cache. It can be collapsed 224810359213SEbru Akagunduz * into a THP. 224910359213SEbru Akagunduz */ 225010359213SEbru Akagunduz } 225110359213SEbru Akagunduz 2252ba76149fSAndrea Arcangeli /* 2253ba76149fSAndrea Arcangeli * Isolate the page to avoid collapsing an hugepage 2254ba76149fSAndrea Arcangeli * currently in use by the VM. 2255ba76149fSAndrea Arcangeli */ 2256ba76149fSAndrea Arcangeli if (isolate_lru_page(page)) { 2257ba76149fSAndrea Arcangeli unlock_page(page); 2258ba76149fSAndrea Arcangeli goto out; 2259ba76149fSAndrea Arcangeli } 2260ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2261ba76149fSAndrea Arcangeli inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2262309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 2263309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 2264ba76149fSAndrea Arcangeli 2265ba76149fSAndrea Arcangeli /* If there is no mapped pte young don't collapse the page */ 22668ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 22678ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 226810359213SEbru Akagunduz referenced = true; 2269ba76149fSAndrea Arcangeli } 227010359213SEbru Akagunduz if (likely(referenced && writable)) 2271344aa35cSBob Liu return 1; 2272ba76149fSAndrea Arcangeli out: 2273344aa35cSBob Liu release_pte_pages(pte, _pte); 2274344aa35cSBob Liu return 0; 2275ba76149fSAndrea Arcangeli } 2276ba76149fSAndrea Arcangeli 2277ba76149fSAndrea Arcangeli static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2278ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2279ba76149fSAndrea Arcangeli unsigned long address, 2280ba76149fSAndrea Arcangeli spinlock_t *ptl) 2281ba76149fSAndrea Arcangeli { 2282ba76149fSAndrea Arcangeli pte_t *_pte; 2283ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2284ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2285ba76149fSAndrea Arcangeli struct page *src_page; 2286ba76149fSAndrea Arcangeli 2287ca0984caSEbru Akagunduz if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2288ba76149fSAndrea Arcangeli clear_user_highpage(page, address); 2289ba76149fSAndrea Arcangeli add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2290ca0984caSEbru Akagunduz if (is_zero_pfn(pte_pfn(pteval))) { 2291ca0984caSEbru Akagunduz /* 2292ca0984caSEbru Akagunduz * ptl mostly unnecessary. 2293ca0984caSEbru Akagunduz */ 2294ca0984caSEbru Akagunduz spin_lock(ptl); 2295ca0984caSEbru Akagunduz /* 2296ca0984caSEbru Akagunduz * paravirt calls inside pte_clear here are 2297ca0984caSEbru Akagunduz * superfluous. 2298ca0984caSEbru Akagunduz */ 2299ca0984caSEbru Akagunduz pte_clear(vma->vm_mm, address, _pte); 2300ca0984caSEbru Akagunduz spin_unlock(ptl); 2301ca0984caSEbru Akagunduz } 2302ba76149fSAndrea Arcangeli } else { 2303ba76149fSAndrea Arcangeli src_page = pte_page(pteval); 2304ba76149fSAndrea Arcangeli copy_user_highpage(page, src_page, address, vma); 2305309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2306ba76149fSAndrea Arcangeli release_pte_page(src_page); 2307ba76149fSAndrea Arcangeli /* 2308ba76149fSAndrea Arcangeli * ptl mostly unnecessary, but preempt has to 2309ba76149fSAndrea Arcangeli * be disabled to update the per-cpu stats 2310ba76149fSAndrea Arcangeli * inside page_remove_rmap(). 2311ba76149fSAndrea Arcangeli */ 2312ba76149fSAndrea Arcangeli spin_lock(ptl); 2313ba76149fSAndrea Arcangeli /* 2314ba76149fSAndrea Arcangeli * paravirt calls inside pte_clear here are 2315ba76149fSAndrea Arcangeli * superfluous. 2316ba76149fSAndrea Arcangeli */ 2317ba76149fSAndrea Arcangeli pte_clear(vma->vm_mm, address, _pte); 2318ba76149fSAndrea Arcangeli page_remove_rmap(src_page); 2319ba76149fSAndrea Arcangeli spin_unlock(ptl); 2320ba76149fSAndrea Arcangeli free_page_and_swap_cache(src_page); 2321ba76149fSAndrea Arcangeli } 2322ba76149fSAndrea Arcangeli 2323ba76149fSAndrea Arcangeli address += PAGE_SIZE; 2324ba76149fSAndrea Arcangeli page++; 2325ba76149fSAndrea Arcangeli } 2326ba76149fSAndrea Arcangeli } 2327ba76149fSAndrea Arcangeli 232826234f36SXiao Guangrong static void khugepaged_alloc_sleep(void) 232926234f36SXiao Guangrong { 233026234f36SXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, false, 233126234f36SXiao Guangrong msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 233226234f36SXiao Guangrong } 233326234f36SXiao Guangrong 23349f1b868aSBob Liu static int khugepaged_node_load[MAX_NUMNODES]; 23359f1b868aSBob Liu 233614a4e214SDavid Rientjes static bool khugepaged_scan_abort(int nid) 233714a4e214SDavid Rientjes { 233814a4e214SDavid Rientjes int i; 233914a4e214SDavid Rientjes 234014a4e214SDavid Rientjes /* 234114a4e214SDavid Rientjes * If zone_reclaim_mode is disabled, then no extra effort is made to 234214a4e214SDavid Rientjes * allocate memory locally. 234314a4e214SDavid Rientjes */ 234414a4e214SDavid Rientjes if (!zone_reclaim_mode) 234514a4e214SDavid Rientjes return false; 234614a4e214SDavid Rientjes 234714a4e214SDavid Rientjes /* If there is a count for this node already, it must be acceptable */ 234814a4e214SDavid Rientjes if (khugepaged_node_load[nid]) 234914a4e214SDavid Rientjes return false; 235014a4e214SDavid Rientjes 235114a4e214SDavid Rientjes for (i = 0; i < MAX_NUMNODES; i++) { 235214a4e214SDavid Rientjes if (!khugepaged_node_load[i]) 235314a4e214SDavid Rientjes continue; 235414a4e214SDavid Rientjes if (node_distance(nid, i) > RECLAIM_DISTANCE) 235514a4e214SDavid Rientjes return true; 235614a4e214SDavid Rientjes } 235714a4e214SDavid Rientjes return false; 235814a4e214SDavid Rientjes } 235914a4e214SDavid Rientjes 236026234f36SXiao Guangrong #ifdef CONFIG_NUMA 23619f1b868aSBob Liu static int khugepaged_find_target_node(void) 23629f1b868aSBob Liu { 23639f1b868aSBob Liu static int last_khugepaged_target_node = NUMA_NO_NODE; 23649f1b868aSBob Liu int nid, target_node = 0, max_value = 0; 23659f1b868aSBob Liu 23669f1b868aSBob Liu /* find first node with max normal pages hit */ 23679f1b868aSBob Liu for (nid = 0; nid < MAX_NUMNODES; nid++) 23689f1b868aSBob Liu if (khugepaged_node_load[nid] > max_value) { 23699f1b868aSBob Liu max_value = khugepaged_node_load[nid]; 23709f1b868aSBob Liu target_node = nid; 23719f1b868aSBob Liu } 23729f1b868aSBob Liu 23739f1b868aSBob Liu /* do some balance if several nodes have the same hit record */ 23749f1b868aSBob Liu if (target_node <= last_khugepaged_target_node) 23759f1b868aSBob Liu for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 23769f1b868aSBob Liu nid++) 23779f1b868aSBob Liu if (max_value == khugepaged_node_load[nid]) { 23789f1b868aSBob Liu target_node = nid; 23799f1b868aSBob Liu break; 23809f1b868aSBob Liu } 23819f1b868aSBob Liu 23829f1b868aSBob Liu last_khugepaged_target_node = target_node; 23839f1b868aSBob Liu return target_node; 23849f1b868aSBob Liu } 23859f1b868aSBob Liu 238626234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 238726234f36SXiao Guangrong { 238826234f36SXiao Guangrong if (IS_ERR(*hpage)) { 238926234f36SXiao Guangrong if (!*wait) 239026234f36SXiao Guangrong return false; 239126234f36SXiao Guangrong 239226234f36SXiao Guangrong *wait = false; 2393e3b4126cSXiao Guangrong *hpage = NULL; 239426234f36SXiao Guangrong khugepaged_alloc_sleep(); 239526234f36SXiao Guangrong } else if (*hpage) { 239626234f36SXiao Guangrong put_page(*hpage); 239726234f36SXiao Guangrong *hpage = NULL; 239826234f36SXiao Guangrong } 239926234f36SXiao Guangrong 240026234f36SXiao Guangrong return true; 240126234f36SXiao Guangrong } 240226234f36SXiao Guangrong 24033b363692SMichal Hocko static struct page * 24043b363692SMichal Hocko khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, 240526234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 240626234f36SXiao Guangrong int node) 240726234f36SXiao Guangrong { 2408309381feSSasha Levin VM_BUG_ON_PAGE(*hpage, *hpage); 24098b164568SVlastimil Babka 241026234f36SXiao Guangrong /* 24118b164568SVlastimil Babka * Before allocating the hugepage, release the mmap_sem read lock. 24128b164568SVlastimil Babka * The allocation can take potentially a long time if it involves 24138b164568SVlastimil Babka * sync compaction, and we do not need to hold the mmap_sem during 24148b164568SVlastimil Babka * that. We will recheck the vma after taking it again in write mode. 241526234f36SXiao Guangrong */ 241626234f36SXiao Guangrong up_read(&mm->mmap_sem); 24178b164568SVlastimil Babka 24183b363692SMichal Hocko *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER); 241926234f36SXiao Guangrong if (unlikely(!*hpage)) { 242026234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 242126234f36SXiao Guangrong *hpage = ERR_PTR(-ENOMEM); 242226234f36SXiao Guangrong return NULL; 242326234f36SXiao Guangrong } 242426234f36SXiao Guangrong 242526234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 242626234f36SXiao Guangrong return *hpage; 242726234f36SXiao Guangrong } 242826234f36SXiao Guangrong #else 24299f1b868aSBob Liu static int khugepaged_find_target_node(void) 24309f1b868aSBob Liu { 24319f1b868aSBob Liu return 0; 24329f1b868aSBob Liu } 24339f1b868aSBob Liu 243410dc4155SBob Liu static inline struct page *alloc_hugepage(int defrag) 243510dc4155SBob Liu { 243610dc4155SBob Liu return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 243710dc4155SBob Liu HPAGE_PMD_ORDER); 243810dc4155SBob Liu } 243910dc4155SBob Liu 244026234f36SXiao Guangrong static struct page *khugepaged_alloc_hugepage(bool *wait) 244126234f36SXiao Guangrong { 244226234f36SXiao Guangrong struct page *hpage; 244326234f36SXiao Guangrong 244426234f36SXiao Guangrong do { 244526234f36SXiao Guangrong hpage = alloc_hugepage(khugepaged_defrag()); 244626234f36SXiao Guangrong if (!hpage) { 244726234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 244826234f36SXiao Guangrong if (!*wait) 244926234f36SXiao Guangrong return NULL; 245026234f36SXiao Guangrong 245126234f36SXiao Guangrong *wait = false; 245226234f36SXiao Guangrong khugepaged_alloc_sleep(); 245326234f36SXiao Guangrong } else 245426234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 245526234f36SXiao Guangrong } while (unlikely(!hpage) && likely(khugepaged_enabled())); 245626234f36SXiao Guangrong 245726234f36SXiao Guangrong return hpage; 245826234f36SXiao Guangrong } 245926234f36SXiao Guangrong 246026234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 246126234f36SXiao Guangrong { 246226234f36SXiao Guangrong if (!*hpage) 246326234f36SXiao Guangrong *hpage = khugepaged_alloc_hugepage(wait); 246426234f36SXiao Guangrong 246526234f36SXiao Guangrong if (unlikely(!*hpage)) 246626234f36SXiao Guangrong return false; 246726234f36SXiao Guangrong 246826234f36SXiao Guangrong return true; 246926234f36SXiao Guangrong } 247026234f36SXiao Guangrong 24713b363692SMichal Hocko static struct page * 24723b363692SMichal Hocko khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, 247326234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 247426234f36SXiao Guangrong int node) 247526234f36SXiao Guangrong { 247626234f36SXiao Guangrong up_read(&mm->mmap_sem); 247726234f36SXiao Guangrong VM_BUG_ON(!*hpage); 24783b363692SMichal Hocko 247926234f36SXiao Guangrong return *hpage; 248026234f36SXiao Guangrong } 248126234f36SXiao Guangrong #endif 248226234f36SXiao Guangrong 2483fa475e51SBob Liu static bool hugepage_vma_check(struct vm_area_struct *vma) 2484fa475e51SBob Liu { 2485fa475e51SBob Liu if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2486fa475e51SBob Liu (vma->vm_flags & VM_NOHUGEPAGE)) 2487fa475e51SBob Liu return false; 2488fa475e51SBob Liu 2489fa475e51SBob Liu if (!vma->anon_vma || vma->vm_ops) 2490fa475e51SBob Liu return false; 2491fa475e51SBob Liu if (is_vma_temporary_stack(vma)) 2492fa475e51SBob Liu return false; 249381d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2494fa475e51SBob Liu return true; 2495fa475e51SBob Liu } 2496fa475e51SBob Liu 2497ba76149fSAndrea Arcangeli static void collapse_huge_page(struct mm_struct *mm, 2498ba76149fSAndrea Arcangeli unsigned long address, 2499ce83d217SAndrea Arcangeli struct page **hpage, 25005c4b4be3SAndi Kleen struct vm_area_struct *vma, 25015c4b4be3SAndi Kleen int node) 2502ba76149fSAndrea Arcangeli { 2503ba76149fSAndrea Arcangeli pmd_t *pmd, _pmd; 2504ba76149fSAndrea Arcangeli pte_t *pte; 2505ba76149fSAndrea Arcangeli pgtable_t pgtable; 2506ba76149fSAndrea Arcangeli struct page *new_page; 2507c4088ebdSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl; 2508ba76149fSAndrea Arcangeli int isolated; 2509ba76149fSAndrea Arcangeli unsigned long hstart, hend; 251000501b53SJohannes Weiner struct mem_cgroup *memcg; 25112ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 25122ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 25133b363692SMichal Hocko gfp_t gfp; 2514ba76149fSAndrea Arcangeli 2515ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2516692e0b35SAndrea Arcangeli 25173b363692SMichal Hocko /* Only allocate from the target node */ 25183b363692SMichal Hocko gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) | 25193b363692SMichal Hocko __GFP_THISNODE; 25203b363692SMichal Hocko 252126234f36SXiao Guangrong /* release the mmap_sem read lock. */ 25223b363692SMichal Hocko new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node); 252326234f36SXiao Guangrong if (!new_page) 2524ce83d217SAndrea Arcangeli return; 2525ce83d217SAndrea Arcangeli 252600501b53SJohannes Weiner if (unlikely(mem_cgroup_try_charge(new_page, mm, 25273b363692SMichal Hocko gfp, &memcg))) 2528692e0b35SAndrea Arcangeli return; 2529ba76149fSAndrea Arcangeli 2530ba76149fSAndrea Arcangeli /* 2531ba76149fSAndrea Arcangeli * Prevent all access to pagetables with the exception of 2532ba76149fSAndrea Arcangeli * gup_fast later hanlded by the ptep_clear_flush and the VM 2533ba76149fSAndrea Arcangeli * handled by the anon_vma lock + PG_lock. 2534ba76149fSAndrea Arcangeli */ 2535ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2536ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2537ba76149fSAndrea Arcangeli goto out; 2538ba76149fSAndrea Arcangeli 2539ba76149fSAndrea Arcangeli vma = find_vma(mm, address); 2540a8f531ebSLibin if (!vma) 2541a8f531ebSLibin goto out; 2542ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2543ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2544ba76149fSAndrea Arcangeli if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2545ba76149fSAndrea Arcangeli goto out; 2546fa475e51SBob Liu if (!hugepage_vma_check(vma)) 2547ba76149fSAndrea Arcangeli goto out; 25486219049aSBob Liu pmd = mm_find_pmd(mm, address); 25496219049aSBob Liu if (!pmd) 2550ba76149fSAndrea Arcangeli goto out; 2551ba76149fSAndrea Arcangeli 25524fc3f1d6SIngo Molnar anon_vma_lock_write(vma->anon_vma); 2553ba76149fSAndrea Arcangeli 2554ba76149fSAndrea Arcangeli pte = pte_offset_map(pmd, address); 2555c4088ebdSKirill A. Shutemov pte_ptl = pte_lockptr(mm, pmd); 2556ba76149fSAndrea Arcangeli 25572ec74c3eSSagi Grimberg mmun_start = address; 25582ec74c3eSSagi Grimberg mmun_end = address + HPAGE_PMD_SIZE; 25592ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2560c4088ebdSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 2561ba76149fSAndrea Arcangeli /* 2562ba76149fSAndrea Arcangeli * After this gup_fast can't run anymore. This also removes 2563ba76149fSAndrea Arcangeli * any huge TLB entry from the CPU so we won't allow 2564ba76149fSAndrea Arcangeli * huge and small TLB entries for the same virtual address 2565ba76149fSAndrea Arcangeli * to avoid the risk of CPU bugs in that area. 2566ba76149fSAndrea Arcangeli */ 256715a25b2eSAneesh Kumar K.V _pmd = pmdp_collapse_flush(vma, address, pmd); 2568c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 25692ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2570ba76149fSAndrea Arcangeli 2571c4088ebdSKirill A. Shutemov spin_lock(pte_ptl); 2572ba76149fSAndrea Arcangeli isolated = __collapse_huge_page_isolate(vma, address, pte); 2573c4088ebdSKirill A. Shutemov spin_unlock(pte_ptl); 2574ba76149fSAndrea Arcangeli 2575ba76149fSAndrea Arcangeli if (unlikely(!isolated)) { 2576453c7192SJohannes Weiner pte_unmap(pte); 2577c4088ebdSKirill A. Shutemov spin_lock(pmd_ptl); 2578ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 25797c342512SAneesh Kumar K.V /* 25807c342512SAneesh Kumar K.V * We can only use set_pmd_at when establishing 25817c342512SAneesh Kumar K.V * hugepmds and never for establishing regular pmds that 25827c342512SAneesh Kumar K.V * points to regular pagetables. Use pmd_populate for that 25837c342512SAneesh Kumar K.V */ 25847c342512SAneesh Kumar K.V pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 2585c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 258608b52706SKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2587ce83d217SAndrea Arcangeli goto out; 2588ba76149fSAndrea Arcangeli } 2589ba76149fSAndrea Arcangeli 2590ba76149fSAndrea Arcangeli /* 2591ba76149fSAndrea Arcangeli * All pages are isolated and locked so anon_vma rmap 2592ba76149fSAndrea Arcangeli * can't run anymore. 2593ba76149fSAndrea Arcangeli */ 259408b52706SKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2595ba76149fSAndrea Arcangeli 2596c4088ebdSKirill A. Shutemov __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 2597453c7192SJohannes Weiner pte_unmap(pte); 2598ba76149fSAndrea Arcangeli __SetPageUptodate(new_page); 2599ba76149fSAndrea Arcangeli pgtable = pmd_pgtable(_pmd); 2600ba76149fSAndrea Arcangeli 26013122359aSKirill A. Shutemov _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 26023122359aSKirill A. Shutemov _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 2603ba76149fSAndrea Arcangeli 2604ba76149fSAndrea Arcangeli /* 2605ba76149fSAndrea Arcangeli * spin_lock() below is not the equivalent of smp_wmb(), so 2606ba76149fSAndrea Arcangeli * this is needed to avoid the copy_huge_page writes to become 2607ba76149fSAndrea Arcangeli * visible after the set_pmd_at() write. 2608ba76149fSAndrea Arcangeli */ 2609ba76149fSAndrea Arcangeli smp_wmb(); 2610ba76149fSAndrea Arcangeli 2611c4088ebdSKirill A. Shutemov spin_lock(pmd_ptl); 2612ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 2613ba76149fSAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, address); 261400501b53SJohannes Weiner mem_cgroup_commit_charge(new_page, memcg, false); 261500501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 2616fce144b4SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 2617ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 2618b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 2619c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 2620ba76149fSAndrea Arcangeli 2621ba76149fSAndrea Arcangeli *hpage = NULL; 2622420256efSXiao Guangrong 2623ba76149fSAndrea Arcangeli khugepaged_pages_collapsed++; 2624ce83d217SAndrea Arcangeli out_up_write: 2625ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 26260bbbc0b3SAndrea Arcangeli return; 26270bbbc0b3SAndrea Arcangeli 2628ce83d217SAndrea Arcangeli out: 262900501b53SJohannes Weiner mem_cgroup_cancel_charge(new_page, memcg); 2630ce83d217SAndrea Arcangeli goto out_up_write; 2631ba76149fSAndrea Arcangeli } 2632ba76149fSAndrea Arcangeli 2633ba76149fSAndrea Arcangeli static int khugepaged_scan_pmd(struct mm_struct *mm, 2634ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2635ba76149fSAndrea Arcangeli unsigned long address, 2636ba76149fSAndrea Arcangeli struct page **hpage) 2637ba76149fSAndrea Arcangeli { 2638ba76149fSAndrea Arcangeli pmd_t *pmd; 2639ba76149fSAndrea Arcangeli pte_t *pte, *_pte; 2640ca0984caSEbru Akagunduz int ret = 0, none_or_zero = 0; 2641ba76149fSAndrea Arcangeli struct page *page; 2642ba76149fSAndrea Arcangeli unsigned long _address; 2643ba76149fSAndrea Arcangeli spinlock_t *ptl; 264400ef2d2fSDavid Rientjes int node = NUMA_NO_NODE; 264510359213SEbru Akagunduz bool writable = false, referenced = false; 2646ba76149fSAndrea Arcangeli 2647ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2648ba76149fSAndrea Arcangeli 26496219049aSBob Liu pmd = mm_find_pmd(mm, address); 26506219049aSBob Liu if (!pmd) 2651ba76149fSAndrea Arcangeli goto out; 2652ba76149fSAndrea Arcangeli 26539f1b868aSBob Liu memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2654ba76149fSAndrea Arcangeli pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2655ba76149fSAndrea Arcangeli for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2656ba76149fSAndrea Arcangeli _pte++, _address += PAGE_SIZE) { 2657ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2658ca0984caSEbru Akagunduz if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2659c1294d05SAndrea Arcangeli if (!userfaultfd_armed(vma) && 2660c1294d05SAndrea Arcangeli ++none_or_zero <= khugepaged_max_ptes_none) 2661ba76149fSAndrea Arcangeli continue; 2662ba76149fSAndrea Arcangeli else 2663ba76149fSAndrea Arcangeli goto out_unmap; 2664ba76149fSAndrea Arcangeli } 266510359213SEbru Akagunduz if (!pte_present(pteval)) 2666ba76149fSAndrea Arcangeli goto out_unmap; 266710359213SEbru Akagunduz if (pte_write(pteval)) 266810359213SEbru Akagunduz writable = true; 266910359213SEbru Akagunduz 2670ba76149fSAndrea Arcangeli page = vm_normal_page(vma, _address, pteval); 2671ba76149fSAndrea Arcangeli if (unlikely(!page)) 2672ba76149fSAndrea Arcangeli goto out_unmap; 26735c4b4be3SAndi Kleen /* 26749f1b868aSBob Liu * Record which node the original page is from and save this 26759f1b868aSBob Liu * information to khugepaged_node_load[]. 26769f1b868aSBob Liu * Khupaged will allocate hugepage from the node has the max 26779f1b868aSBob Liu * hit record. 26785c4b4be3SAndi Kleen */ 26795c4b4be3SAndi Kleen node = page_to_nid(page); 268014a4e214SDavid Rientjes if (khugepaged_scan_abort(node)) 268114a4e214SDavid Rientjes goto out_unmap; 26829f1b868aSBob Liu khugepaged_node_load[node]++; 2683309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page), page); 2684ba76149fSAndrea Arcangeli if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2685ba76149fSAndrea Arcangeli goto out_unmap; 268610359213SEbru Akagunduz /* 268710359213SEbru Akagunduz * cannot use mapcount: can't collapse if there's a gup pin. 268810359213SEbru Akagunduz * The page must only be referenced by the scanned process 268910359213SEbru Akagunduz * and page swap cache. 269010359213SEbru Akagunduz */ 269110359213SEbru Akagunduz if (page_count(page) != 1 + !!PageSwapCache(page)) 2692ba76149fSAndrea Arcangeli goto out_unmap; 26938ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 26948ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 269510359213SEbru Akagunduz referenced = true; 2696ba76149fSAndrea Arcangeli } 269710359213SEbru Akagunduz if (referenced && writable) 2698ba76149fSAndrea Arcangeli ret = 1; 2699ba76149fSAndrea Arcangeli out_unmap: 2700ba76149fSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 27019f1b868aSBob Liu if (ret) { 27029f1b868aSBob Liu node = khugepaged_find_target_node(); 2703ce83d217SAndrea Arcangeli /* collapse_huge_page will return with the mmap_sem released */ 27045c4b4be3SAndi Kleen collapse_huge_page(mm, address, hpage, vma, node); 27059f1b868aSBob Liu } 2706ba76149fSAndrea Arcangeli out: 2707ba76149fSAndrea Arcangeli return ret; 2708ba76149fSAndrea Arcangeli } 2709ba76149fSAndrea Arcangeli 2710ba76149fSAndrea Arcangeli static void collect_mm_slot(struct mm_slot *mm_slot) 2711ba76149fSAndrea Arcangeli { 2712ba76149fSAndrea Arcangeli struct mm_struct *mm = mm_slot->mm; 2713ba76149fSAndrea Arcangeli 2714b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2715ba76149fSAndrea Arcangeli 2716ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm)) { 2717ba76149fSAndrea Arcangeli /* free mm_slot */ 271843b5fbbdSSasha Levin hash_del(&mm_slot->hash); 2719ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2720ba76149fSAndrea Arcangeli 2721ba76149fSAndrea Arcangeli /* 2722ba76149fSAndrea Arcangeli * Not strictly needed because the mm exited already. 2723ba76149fSAndrea Arcangeli * 2724ba76149fSAndrea Arcangeli * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2725ba76149fSAndrea Arcangeli */ 2726ba76149fSAndrea Arcangeli 2727ba76149fSAndrea Arcangeli /* khugepaged_mm_lock actually not necessary for the below */ 2728ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2729ba76149fSAndrea Arcangeli mmdrop(mm); 2730ba76149fSAndrea Arcangeli } 2731ba76149fSAndrea Arcangeli } 2732ba76149fSAndrea Arcangeli 2733ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2734ba76149fSAndrea Arcangeli struct page **hpage) 27352f1da642SH Hartley Sweeten __releases(&khugepaged_mm_lock) 27362f1da642SH Hartley Sweeten __acquires(&khugepaged_mm_lock) 2737ba76149fSAndrea Arcangeli { 2738ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2739ba76149fSAndrea Arcangeli struct mm_struct *mm; 2740ba76149fSAndrea Arcangeli struct vm_area_struct *vma; 2741ba76149fSAndrea Arcangeli int progress = 0; 2742ba76149fSAndrea Arcangeli 2743ba76149fSAndrea Arcangeli VM_BUG_ON(!pages); 2744b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2745ba76149fSAndrea Arcangeli 2746ba76149fSAndrea Arcangeli if (khugepaged_scan.mm_slot) 2747ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2748ba76149fSAndrea Arcangeli else { 2749ba76149fSAndrea Arcangeli mm_slot = list_entry(khugepaged_scan.mm_head.next, 2750ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2751ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2752ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = mm_slot; 2753ba76149fSAndrea Arcangeli } 2754ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2755ba76149fSAndrea Arcangeli 2756ba76149fSAndrea Arcangeli mm = mm_slot->mm; 2757ba76149fSAndrea Arcangeli down_read(&mm->mmap_sem); 2758ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2759ba76149fSAndrea Arcangeli vma = NULL; 2760ba76149fSAndrea Arcangeli else 2761ba76149fSAndrea Arcangeli vma = find_vma(mm, khugepaged_scan.address); 2762ba76149fSAndrea Arcangeli 2763ba76149fSAndrea Arcangeli progress++; 2764ba76149fSAndrea Arcangeli for (; vma; vma = vma->vm_next) { 2765ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2766ba76149fSAndrea Arcangeli 2767ba76149fSAndrea Arcangeli cond_resched(); 2768ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) { 2769ba76149fSAndrea Arcangeli progress++; 2770ba76149fSAndrea Arcangeli break; 2771ba76149fSAndrea Arcangeli } 2772fa475e51SBob Liu if (!hugepage_vma_check(vma)) { 2773a7d6e4ecSAndrea Arcangeli skip: 2774ba76149fSAndrea Arcangeli progress++; 2775ba76149fSAndrea Arcangeli continue; 2776ba76149fSAndrea Arcangeli } 2777ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2778ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2779a7d6e4ecSAndrea Arcangeli if (hstart >= hend) 2780a7d6e4ecSAndrea Arcangeli goto skip; 2781a7d6e4ecSAndrea Arcangeli if (khugepaged_scan.address > hend) 2782a7d6e4ecSAndrea Arcangeli goto skip; 2783ba76149fSAndrea Arcangeli if (khugepaged_scan.address < hstart) 2784ba76149fSAndrea Arcangeli khugepaged_scan.address = hstart; 2785a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2786ba76149fSAndrea Arcangeli 2787ba76149fSAndrea Arcangeli while (khugepaged_scan.address < hend) { 2788ba76149fSAndrea Arcangeli int ret; 2789ba76149fSAndrea Arcangeli cond_resched(); 2790ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2791ba76149fSAndrea Arcangeli goto breakouterloop; 2792ba76149fSAndrea Arcangeli 2793ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address < hstart || 2794ba76149fSAndrea Arcangeli khugepaged_scan.address + HPAGE_PMD_SIZE > 2795ba76149fSAndrea Arcangeli hend); 2796ba76149fSAndrea Arcangeli ret = khugepaged_scan_pmd(mm, vma, 2797ba76149fSAndrea Arcangeli khugepaged_scan.address, 2798ba76149fSAndrea Arcangeli hpage); 2799ba76149fSAndrea Arcangeli /* move to next address */ 2800ba76149fSAndrea Arcangeli khugepaged_scan.address += HPAGE_PMD_SIZE; 2801ba76149fSAndrea Arcangeli progress += HPAGE_PMD_NR; 2802ba76149fSAndrea Arcangeli if (ret) 2803ba76149fSAndrea Arcangeli /* we released mmap_sem so break loop */ 2804ba76149fSAndrea Arcangeli goto breakouterloop_mmap_sem; 2805ba76149fSAndrea Arcangeli if (progress >= pages) 2806ba76149fSAndrea Arcangeli goto breakouterloop; 2807ba76149fSAndrea Arcangeli } 2808ba76149fSAndrea Arcangeli } 2809ba76149fSAndrea Arcangeli breakouterloop: 2810ba76149fSAndrea Arcangeli up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2811ba76149fSAndrea Arcangeli breakouterloop_mmap_sem: 2812ba76149fSAndrea Arcangeli 2813ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2814a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2815ba76149fSAndrea Arcangeli /* 2816ba76149fSAndrea Arcangeli * Release the current mm_slot if this mm is about to die, or 2817ba76149fSAndrea Arcangeli * if we scanned all vmas of this mm. 2818ba76149fSAndrea Arcangeli */ 2819ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm) || !vma) { 2820ba76149fSAndrea Arcangeli /* 2821ba76149fSAndrea Arcangeli * Make sure that if mm_users is reaching zero while 2822ba76149fSAndrea Arcangeli * khugepaged runs here, khugepaged_exit will find 2823ba76149fSAndrea Arcangeli * mm_slot not pointing to the exiting mm. 2824ba76149fSAndrea Arcangeli */ 2825ba76149fSAndrea Arcangeli if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2826ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = list_entry( 2827ba76149fSAndrea Arcangeli mm_slot->mm_node.next, 2828ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2829ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2830ba76149fSAndrea Arcangeli } else { 2831ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2832ba76149fSAndrea Arcangeli khugepaged_full_scans++; 2833ba76149fSAndrea Arcangeli } 2834ba76149fSAndrea Arcangeli 2835ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2836ba76149fSAndrea Arcangeli } 2837ba76149fSAndrea Arcangeli 2838ba76149fSAndrea Arcangeli return progress; 2839ba76149fSAndrea Arcangeli } 2840ba76149fSAndrea Arcangeli 2841ba76149fSAndrea Arcangeli static int khugepaged_has_work(void) 2842ba76149fSAndrea Arcangeli { 2843ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) && 2844ba76149fSAndrea Arcangeli khugepaged_enabled(); 2845ba76149fSAndrea Arcangeli } 2846ba76149fSAndrea Arcangeli 2847ba76149fSAndrea Arcangeli static int khugepaged_wait_event(void) 2848ba76149fSAndrea Arcangeli { 2849ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) || 28502017c0bfSXiao Guangrong kthread_should_stop(); 2851ba76149fSAndrea Arcangeli } 2852ba76149fSAndrea Arcangeli 2853d516904bSXiao Guangrong static void khugepaged_do_scan(void) 2854d516904bSXiao Guangrong { 2855d516904bSXiao Guangrong struct page *hpage = NULL; 2856ba76149fSAndrea Arcangeli unsigned int progress = 0, pass_through_head = 0; 2857ba76149fSAndrea Arcangeli unsigned int pages = khugepaged_pages_to_scan; 2858d516904bSXiao Guangrong bool wait = true; 2859ba76149fSAndrea Arcangeli 2860ba76149fSAndrea Arcangeli barrier(); /* write khugepaged_pages_to_scan to local stack */ 2861ba76149fSAndrea Arcangeli 2862ba76149fSAndrea Arcangeli while (progress < pages) { 286326234f36SXiao Guangrong if (!khugepaged_prealloc_page(&hpage, &wait)) 286426234f36SXiao Guangrong break; 2865d516904bSXiao Guangrong 2866420256efSXiao Guangrong cond_resched(); 2867ba76149fSAndrea Arcangeli 2868cd092411SJiri Kosina if (unlikely(kthread_should_stop() || try_to_freeze())) 2869878aee7dSAndrea Arcangeli break; 2870878aee7dSAndrea Arcangeli 2871ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2872ba76149fSAndrea Arcangeli if (!khugepaged_scan.mm_slot) 2873ba76149fSAndrea Arcangeli pass_through_head++; 2874ba76149fSAndrea Arcangeli if (khugepaged_has_work() && 2875ba76149fSAndrea Arcangeli pass_through_head < 2) 2876ba76149fSAndrea Arcangeli progress += khugepaged_scan_mm_slot(pages - progress, 2877d516904bSXiao Guangrong &hpage); 2878ba76149fSAndrea Arcangeli else 2879ba76149fSAndrea Arcangeli progress = pages; 2880ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2881ba76149fSAndrea Arcangeli } 2882ba76149fSAndrea Arcangeli 2883d516904bSXiao Guangrong if (!IS_ERR_OR_NULL(hpage)) 2884d516904bSXiao Guangrong put_page(hpage); 2885ba76149fSAndrea Arcangeli } 28860bbbc0b3SAndrea Arcangeli 28872017c0bfSXiao Guangrong static void khugepaged_wait_work(void) 28882017c0bfSXiao Guangrong { 28892017c0bfSXiao Guangrong if (khugepaged_has_work()) { 28902017c0bfSXiao Guangrong if (!khugepaged_scan_sleep_millisecs) 28912017c0bfSXiao Guangrong return; 28922017c0bfSXiao Guangrong 28932017c0bfSXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, 28942017c0bfSXiao Guangrong kthread_should_stop(), 28952017c0bfSXiao Guangrong msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 28962017c0bfSXiao Guangrong return; 28972017c0bfSXiao Guangrong } 28982017c0bfSXiao Guangrong 28992017c0bfSXiao Guangrong if (khugepaged_enabled()) 29002017c0bfSXiao Guangrong wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 29012017c0bfSXiao Guangrong } 29022017c0bfSXiao Guangrong 2903ba76149fSAndrea Arcangeli static int khugepaged(void *none) 2904ba76149fSAndrea Arcangeli { 2905ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2906ba76149fSAndrea Arcangeli 2907878aee7dSAndrea Arcangeli set_freezable(); 29088698a745SDongsheng Yang set_user_nice(current, MAX_NICE); 2909ba76149fSAndrea Arcangeli 2910b7231789SXiao Guangrong while (!kthread_should_stop()) { 2911b7231789SXiao Guangrong khugepaged_do_scan(); 2912b7231789SXiao Guangrong khugepaged_wait_work(); 2913b7231789SXiao Guangrong } 2914ba76149fSAndrea Arcangeli 2915ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2916ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2917ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2918ba76149fSAndrea Arcangeli if (mm_slot) 2919ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2920ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2921ba76149fSAndrea Arcangeli return 0; 2922ba76149fSAndrea Arcangeli } 2923ba76149fSAndrea Arcangeli 2924c5a647d0SKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2925c5a647d0SKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2926c5a647d0SKirill A. Shutemov { 2927c5a647d0SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2928c5a647d0SKirill A. Shutemov pgtable_t pgtable; 2929c5a647d0SKirill A. Shutemov pmd_t _pmd; 2930c5a647d0SKirill A. Shutemov int i; 2931c5a647d0SKirill A. Shutemov 29328809aa2dSAneesh Kumar K.V pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2933c5a647d0SKirill A. Shutemov /* leave pmd empty until pte is filled */ 2934c5a647d0SKirill A. Shutemov 29356b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2936c5a647d0SKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2937c5a647d0SKirill A. Shutemov 2938c5a647d0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2939c5a647d0SKirill A. Shutemov pte_t *pte, entry; 2940c5a647d0SKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2941c5a647d0SKirill A. Shutemov entry = pte_mkspecial(entry); 2942c5a647d0SKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2943c5a647d0SKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2944c5a647d0SKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2945c5a647d0SKirill A. Shutemov pte_unmap(pte); 2946c5a647d0SKirill A. Shutemov } 2947c5a647d0SKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2948c5a647d0SKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 294997ae1749SKirill A. Shutemov put_huge_zero_page(); 2950c5a647d0SKirill A. Shutemov } 2951c5a647d0SKirill A. Shutemov 2952e180377fSKirill A. Shutemov void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2953e180377fSKirill A. Shutemov pmd_t *pmd) 295471e3aac0SAndrea Arcangeli { 2955c4088ebdSKirill A. Shutemov spinlock_t *ptl; 29564897c765SMatthew Wilcox struct page *page = NULL; 2957e180377fSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2958c5a647d0SKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 2959c5a647d0SKirill A. Shutemov unsigned long mmun_start; /* For mmu_notifiers */ 2960c5a647d0SKirill A. Shutemov unsigned long mmun_end; /* For mmu_notifiers */ 2961e180377fSKirill A. Shutemov 2962e180377fSKirill A. Shutemov BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 296371e3aac0SAndrea Arcangeli 2964c5a647d0SKirill A. Shutemov mmun_start = haddr; 2965c5a647d0SKirill A. Shutemov mmun_end = haddr + HPAGE_PMD_SIZE; 2966750e8165SHugh Dickins again: 2967c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2968c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 29694897c765SMatthew Wilcox if (unlikely(!pmd_trans_huge(*pmd))) 29704897c765SMatthew Wilcox goto unlock; 29714897c765SMatthew Wilcox if (vma_is_dax(vma)) { 29725b701b84SKirill A. Shutemov pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 29735b701b84SKirill A. Shutemov if (is_huge_zero_pmd(_pmd)) 29745b701b84SKirill A. Shutemov put_huge_zero_page(); 29754897c765SMatthew Wilcox } else if (is_huge_zero_pmd(*pmd)) { 2976c5a647d0SKirill A. Shutemov __split_huge_zero_page_pmd(vma, haddr, pmd); 29774897c765SMatthew Wilcox } else { 297871e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 2979309381feSSasha Levin VM_BUG_ON_PAGE(!page_count(page), page); 298071e3aac0SAndrea Arcangeli get_page(page); 29814897c765SMatthew Wilcox } 29824897c765SMatthew Wilcox unlock: 2983c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2984c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 298571e3aac0SAndrea Arcangeli 29864897c765SMatthew Wilcox if (!page) 29874897c765SMatthew Wilcox return; 298871e3aac0SAndrea Arcangeli 29894897c765SMatthew Wilcox split_huge_page(page); 299071e3aac0SAndrea Arcangeli put_page(page); 2991750e8165SHugh Dickins 2992750e8165SHugh Dickins /* 2993750e8165SHugh Dickins * We don't always have down_write of mmap_sem here: a racing 2994750e8165SHugh Dickins * do_huge_pmd_wp_page() might have copied-on-write to another 2995750e8165SHugh Dickins * huge page before our split_huge_page() got the anon_vma lock. 2996750e8165SHugh Dickins */ 2997750e8165SHugh Dickins if (unlikely(pmd_trans_huge(*pmd))) 2998750e8165SHugh Dickins goto again; 299971e3aac0SAndrea Arcangeli } 300094fcc585SAndrea Arcangeli 3001e180377fSKirill A. Shutemov void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 3002e180377fSKirill A. Shutemov pmd_t *pmd) 3003e180377fSKirill A. Shutemov { 3004e180377fSKirill A. Shutemov struct vm_area_struct *vma; 3005e180377fSKirill A. Shutemov 3006e180377fSKirill A. Shutemov vma = find_vma(mm, address); 3007e180377fSKirill A. Shutemov BUG_ON(vma == NULL); 3008e180377fSKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 3009e180377fSKirill A. Shutemov } 3010e180377fSKirill A. Shutemov 301194fcc585SAndrea Arcangeli static void split_huge_page_address(struct mm_struct *mm, 301294fcc585SAndrea Arcangeli unsigned long address) 301394fcc585SAndrea Arcangeli { 3014f72e7dcdSHugh Dickins pgd_t *pgd; 3015f72e7dcdSHugh Dickins pud_t *pud; 301694fcc585SAndrea Arcangeli pmd_t *pmd; 301794fcc585SAndrea Arcangeli 301894fcc585SAndrea Arcangeli VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 301994fcc585SAndrea Arcangeli 3020f72e7dcdSHugh Dickins pgd = pgd_offset(mm, address); 3021f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 3022f72e7dcdSHugh Dickins return; 3023f72e7dcdSHugh Dickins 3024f72e7dcdSHugh Dickins pud = pud_offset(pgd, address); 3025f72e7dcdSHugh Dickins if (!pud_present(*pud)) 3026f72e7dcdSHugh Dickins return; 3027f72e7dcdSHugh Dickins 3028f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 3029f72e7dcdSHugh Dickins if (!pmd_present(*pmd)) 303094fcc585SAndrea Arcangeli return; 303194fcc585SAndrea Arcangeli /* 303294fcc585SAndrea Arcangeli * Caller holds the mmap_sem write mode, so a huge pmd cannot 303394fcc585SAndrea Arcangeli * materialize from under us. 303494fcc585SAndrea Arcangeli */ 3035e180377fSKirill A. Shutemov split_huge_page_pmd_mm(mm, address, pmd); 303694fcc585SAndrea Arcangeli } 303794fcc585SAndrea Arcangeli 3038e1b9996bSKirill A. Shutemov void vma_adjust_trans_huge(struct vm_area_struct *vma, 303994fcc585SAndrea Arcangeli unsigned long start, 304094fcc585SAndrea Arcangeli unsigned long end, 304194fcc585SAndrea Arcangeli long adjust_next) 304294fcc585SAndrea Arcangeli { 304394fcc585SAndrea Arcangeli /* 304494fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 304594fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 304694fcc585SAndrea Arcangeli * an huge pmd. 304794fcc585SAndrea Arcangeli */ 304894fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 304994fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 305094fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 305194fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, start); 305294fcc585SAndrea Arcangeli 305394fcc585SAndrea Arcangeli /* 305494fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 305594fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 305694fcc585SAndrea Arcangeli * an huge pmd. 305794fcc585SAndrea Arcangeli */ 305894fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 305994fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 306094fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 306194fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, end); 306294fcc585SAndrea Arcangeli 306394fcc585SAndrea Arcangeli /* 306494fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 306594fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 306694fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 306794fcc585SAndrea Arcangeli */ 306894fcc585SAndrea Arcangeli if (adjust_next > 0) { 306994fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 307094fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 307194fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 307294fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 307394fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 307494fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 307594fcc585SAndrea Arcangeli split_huge_page_address(next->vm_mm, nstart); 307694fcc585SAndrea Arcangeli } 307794fcc585SAndrea Arcangeli } 3078