171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 8ae3a8c1cSAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9ae3a8c1cSAndrew Morton 1071e3aac0SAndrea Arcangeli #include <linux/mm.h> 1171e3aac0SAndrea Arcangeli #include <linux/sched.h> 1271e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1371e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1471e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1571e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1671e3aac0SAndrea Arcangeli #include <linux/swap.h> 1797ae1749SKirill A. Shutemov #include <linux/shrinker.h> 18ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 19ba76149fSAndrea Arcangeli #include <linux/kthread.h> 20ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 21878aee7dSAndrea Arcangeli #include <linux/freezer.h> 22a664b2d8SAndrea Arcangeli #include <linux/mman.h> 23325adeb5SRalf Baechle #include <linux/pagemap.h> 244daae3b4SMel Gorman #include <linux/migrate.h> 2543b5fbbdSSasha Levin #include <linux/hashtable.h> 2697ae1749SKirill A. Shutemov 2771e3aac0SAndrea Arcangeli #include <asm/tlb.h> 2871e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 2971e3aac0SAndrea Arcangeli #include "internal.h" 3071e3aac0SAndrea Arcangeli 31ba76149fSAndrea Arcangeli /* 328bfa3f9aSJianguo Wu * By default transparent hugepage support is disabled in order that avoid 338bfa3f9aSJianguo Wu * to risk increase the memory footprint of applications without a guaranteed 348bfa3f9aSJianguo Wu * benefit. When transparent hugepage support is enabled, is for all mappings, 358bfa3f9aSJianguo Wu * and khugepaged scans all mappings. 368bfa3f9aSJianguo Wu * Defrag is invoked by khugepaged hugepage allocations and by page faults 378bfa3f9aSJianguo Wu * for all hugepage allocations. 38ba76149fSAndrea Arcangeli */ 3971e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 4013ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 41ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 4213ece886SAndrea Arcangeli #endif 4313ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 4413ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 4513ece886SAndrea Arcangeli #endif 46d39d33c3SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 4779da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 4879da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 49ba76149fSAndrea Arcangeli 50ba76149fSAndrea Arcangeli /* default scan 8*512 pte (or vmas) every 30 second */ 51ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 52ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_collapsed; 53ba76149fSAndrea Arcangeli static unsigned int khugepaged_full_scans; 54ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 55ba76149fSAndrea Arcangeli /* during fragmentation poll the hugepage allocator once every minute */ 56ba76149fSAndrea Arcangeli static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 57ba76149fSAndrea Arcangeli static struct task_struct *khugepaged_thread __read_mostly; 58ba76149fSAndrea Arcangeli static DEFINE_MUTEX(khugepaged_mutex); 59ba76149fSAndrea Arcangeli static DEFINE_SPINLOCK(khugepaged_mm_lock); 60ba76149fSAndrea Arcangeli static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 61ba76149fSAndrea Arcangeli /* 62ba76149fSAndrea Arcangeli * default collapse hugepages if there is at least one pte mapped like 63ba76149fSAndrea Arcangeli * it would have happened if the vma was large enough during page 64ba76149fSAndrea Arcangeli * fault. 65ba76149fSAndrea Arcangeli */ 66ba76149fSAndrea Arcangeli static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 67ba76149fSAndrea Arcangeli 68ba76149fSAndrea Arcangeli static int khugepaged(void *none); 69ba76149fSAndrea Arcangeli static int khugepaged_slab_init(void); 70ba76149fSAndrea Arcangeli 7143b5fbbdSSasha Levin #define MM_SLOTS_HASH_BITS 10 7243b5fbbdSSasha Levin static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 7343b5fbbdSSasha Levin 74ba76149fSAndrea Arcangeli static struct kmem_cache *mm_slot_cache __read_mostly; 75ba76149fSAndrea Arcangeli 76ba76149fSAndrea Arcangeli /** 77ba76149fSAndrea Arcangeli * struct mm_slot - hash lookup from mm to mm_slot 78ba76149fSAndrea Arcangeli * @hash: hash collision list 79ba76149fSAndrea Arcangeli * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 80ba76149fSAndrea Arcangeli * @mm: the mm that this information is valid for 81ba76149fSAndrea Arcangeli */ 82ba76149fSAndrea Arcangeli struct mm_slot { 83ba76149fSAndrea Arcangeli struct hlist_node hash; 84ba76149fSAndrea Arcangeli struct list_head mm_node; 85ba76149fSAndrea Arcangeli struct mm_struct *mm; 86ba76149fSAndrea Arcangeli }; 87ba76149fSAndrea Arcangeli 88ba76149fSAndrea Arcangeli /** 89ba76149fSAndrea Arcangeli * struct khugepaged_scan - cursor for scanning 90ba76149fSAndrea Arcangeli * @mm_head: the head of the mm list to scan 91ba76149fSAndrea Arcangeli * @mm_slot: the current mm_slot we are scanning 92ba76149fSAndrea Arcangeli * @address: the next address inside that to be scanned 93ba76149fSAndrea Arcangeli * 94ba76149fSAndrea Arcangeli * There is only the one khugepaged_scan instance of this cursor structure. 95ba76149fSAndrea Arcangeli */ 96ba76149fSAndrea Arcangeli struct khugepaged_scan { 97ba76149fSAndrea Arcangeli struct list_head mm_head; 98ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 99ba76149fSAndrea Arcangeli unsigned long address; 1002f1da642SH Hartley Sweeten }; 1012f1da642SH Hartley Sweeten static struct khugepaged_scan khugepaged_scan = { 102ba76149fSAndrea Arcangeli .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 103ba76149fSAndrea Arcangeli }; 104ba76149fSAndrea Arcangeli 105f000565aSAndrea Arcangeli 106f000565aSAndrea Arcangeli static int set_recommended_min_free_kbytes(void) 107f000565aSAndrea Arcangeli { 108f000565aSAndrea Arcangeli struct zone *zone; 109f000565aSAndrea Arcangeli int nr_zones = 0; 110f000565aSAndrea Arcangeli unsigned long recommended_min; 111f000565aSAndrea Arcangeli 11217c230afSXiao Guangrong if (!khugepaged_enabled()) 113f000565aSAndrea Arcangeli return 0; 114f000565aSAndrea Arcangeli 115f000565aSAndrea Arcangeli for_each_populated_zone(zone) 116f000565aSAndrea Arcangeli nr_zones++; 117f000565aSAndrea Arcangeli 118f000565aSAndrea Arcangeli /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 119f000565aSAndrea Arcangeli recommended_min = pageblock_nr_pages * nr_zones * 2; 120f000565aSAndrea Arcangeli 121f000565aSAndrea Arcangeli /* 122f000565aSAndrea Arcangeli * Make sure that on average at least two pageblocks are almost free 123f000565aSAndrea Arcangeli * of another type, one for a migratetype to fall back to and a 124f000565aSAndrea Arcangeli * second to avoid subsequent fallbacks of other types There are 3 125f000565aSAndrea Arcangeli * MIGRATE_TYPES we care about. 126f000565aSAndrea Arcangeli */ 127f000565aSAndrea Arcangeli recommended_min += pageblock_nr_pages * nr_zones * 128f000565aSAndrea Arcangeli MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 129f000565aSAndrea Arcangeli 130f000565aSAndrea Arcangeli /* don't ever allow to reserve more than 5% of the lowmem */ 131f000565aSAndrea Arcangeli recommended_min = min(recommended_min, 132f000565aSAndrea Arcangeli (unsigned long) nr_free_buffer_pages() / 20); 133f000565aSAndrea Arcangeli recommended_min <<= (PAGE_SHIFT-10); 134f000565aSAndrea Arcangeli 13542aa83cbSHan Pingtian if (recommended_min > min_free_kbytes) { 13642aa83cbSHan Pingtian if (user_min_free_kbytes >= 0) 13742aa83cbSHan Pingtian pr_info("raising min_free_kbytes from %d to %lu " 13842aa83cbSHan Pingtian "to help transparent hugepage allocations\n", 13942aa83cbSHan Pingtian min_free_kbytes, recommended_min); 14042aa83cbSHan Pingtian 141f000565aSAndrea Arcangeli min_free_kbytes = recommended_min; 14242aa83cbSHan Pingtian } 143f000565aSAndrea Arcangeli setup_per_zone_wmarks(); 144f000565aSAndrea Arcangeli return 0; 145f000565aSAndrea Arcangeli } 146f000565aSAndrea Arcangeli late_initcall(set_recommended_min_free_kbytes); 147f000565aSAndrea Arcangeli 148ba76149fSAndrea Arcangeli static int start_khugepaged(void) 149ba76149fSAndrea Arcangeli { 150ba76149fSAndrea Arcangeli int err = 0; 151ba76149fSAndrea Arcangeli if (khugepaged_enabled()) { 152ba76149fSAndrea Arcangeli if (!khugepaged_thread) 153ba76149fSAndrea Arcangeli khugepaged_thread = kthread_run(khugepaged, NULL, 154ba76149fSAndrea Arcangeli "khugepaged"); 155ba76149fSAndrea Arcangeli if (unlikely(IS_ERR(khugepaged_thread))) { 156ae3a8c1cSAndrew Morton pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 157ba76149fSAndrea Arcangeli err = PTR_ERR(khugepaged_thread); 158ba76149fSAndrea Arcangeli khugepaged_thread = NULL; 159ba76149fSAndrea Arcangeli } 160911891afSXiao Guangrong 161911891afSXiao Guangrong if (!list_empty(&khugepaged_scan.mm_head)) 162ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 163f000565aSAndrea Arcangeli 164f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 165911891afSXiao Guangrong } else if (khugepaged_thread) { 166911891afSXiao Guangrong kthread_stop(khugepaged_thread); 167911891afSXiao Guangrong khugepaged_thread = NULL; 168911891afSXiao Guangrong } 169637e3a27SXiao Guangrong 170ba76149fSAndrea Arcangeli return err; 171ba76149fSAndrea Arcangeli } 17271e3aac0SAndrea Arcangeli 17397ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 1745918d10aSKirill A. Shutemov static struct page *huge_zero_page __read_mostly; 1754a6c1297SKirill A. Shutemov 1765918d10aSKirill A. Shutemov static inline bool is_huge_zero_page(struct page *page) 1774a6c1297SKirill A. Shutemov { 1785918d10aSKirill A. Shutemov return ACCESS_ONCE(huge_zero_page) == page; 1794a6c1297SKirill A. Shutemov } 1804a6c1297SKirill A. Shutemov 1814a6c1297SKirill A. Shutemov static inline bool is_huge_zero_pmd(pmd_t pmd) 1824a6c1297SKirill A. Shutemov { 1835918d10aSKirill A. Shutemov return is_huge_zero_page(pmd_page(pmd)); 1844a6c1297SKirill A. Shutemov } 1854a6c1297SKirill A. Shutemov 1865918d10aSKirill A. Shutemov static struct page *get_huge_zero_page(void) 18797ae1749SKirill A. Shutemov { 18897ae1749SKirill A. Shutemov struct page *zero_page; 18997ae1749SKirill A. Shutemov retry: 19097ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 1915918d10aSKirill A. Shutemov return ACCESS_ONCE(huge_zero_page); 19297ae1749SKirill A. Shutemov 19397ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 19497ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 195d8a8e1f0SKirill A. Shutemov if (!zero_page) { 196d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 1975918d10aSKirill A. Shutemov return NULL; 198d8a8e1f0SKirill A. Shutemov } 199d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 20097ae1749SKirill A. Shutemov preempt_disable(); 2015918d10aSKirill A. Shutemov if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 20297ae1749SKirill A. Shutemov preempt_enable(); 20397ae1749SKirill A. Shutemov __free_page(zero_page); 20497ae1749SKirill A. Shutemov goto retry; 20597ae1749SKirill A. Shutemov } 20697ae1749SKirill A. Shutemov 20797ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 20897ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 20997ae1749SKirill A. Shutemov preempt_enable(); 2105918d10aSKirill A. Shutemov return ACCESS_ONCE(huge_zero_page); 21197ae1749SKirill A. Shutemov } 21297ae1749SKirill A. Shutemov 21397ae1749SKirill A. Shutemov static void put_huge_zero_page(void) 21497ae1749SKirill A. Shutemov { 21597ae1749SKirill A. Shutemov /* 21697ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 21797ae1749SKirill A. Shutemov * last reference. 21897ae1749SKirill A. Shutemov */ 21997ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 22097ae1749SKirill A. Shutemov } 22197ae1749SKirill A. Shutemov 22248896466SGlauber Costa static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 22397ae1749SKirill A. Shutemov struct shrink_control *sc) 22497ae1749SKirill A. Shutemov { 22597ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 22697ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 22748896466SGlauber Costa } 22897ae1749SKirill A. Shutemov 22948896466SGlauber Costa static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 23048896466SGlauber Costa struct shrink_control *sc) 23148896466SGlauber Costa { 23297ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 2335918d10aSKirill A. Shutemov struct page *zero_page = xchg(&huge_zero_page, NULL); 2345918d10aSKirill A. Shutemov BUG_ON(zero_page == NULL); 2355918d10aSKirill A. Shutemov __free_page(zero_page); 23648896466SGlauber Costa return HPAGE_PMD_NR; 23797ae1749SKirill A. Shutemov } 23897ae1749SKirill A. Shutemov 23997ae1749SKirill A. Shutemov return 0; 24097ae1749SKirill A. Shutemov } 24197ae1749SKirill A. Shutemov 24297ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 24348896466SGlauber Costa .count_objects = shrink_huge_zero_page_count, 24448896466SGlauber Costa .scan_objects = shrink_huge_zero_page_scan, 24597ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 24697ae1749SKirill A. Shutemov }; 24797ae1749SKirill A. Shutemov 24871e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 249ba76149fSAndrea Arcangeli 25071e3aac0SAndrea Arcangeli static ssize_t double_flag_show(struct kobject *kobj, 25171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 25271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 25371e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 25471e3aac0SAndrea Arcangeli { 25571e3aac0SAndrea Arcangeli if (test_bit(enabled, &transparent_hugepage_flags)) { 25671e3aac0SAndrea Arcangeli VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 25771e3aac0SAndrea Arcangeli return sprintf(buf, "[always] madvise never\n"); 25871e3aac0SAndrea Arcangeli } else if (test_bit(req_madv, &transparent_hugepage_flags)) 25971e3aac0SAndrea Arcangeli return sprintf(buf, "always [madvise] never\n"); 26071e3aac0SAndrea Arcangeli else 26171e3aac0SAndrea Arcangeli return sprintf(buf, "always madvise [never]\n"); 26271e3aac0SAndrea Arcangeli } 26371e3aac0SAndrea Arcangeli static ssize_t double_flag_store(struct kobject *kobj, 26471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 26571e3aac0SAndrea Arcangeli const char *buf, size_t count, 26671e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 26771e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 26871e3aac0SAndrea Arcangeli { 26971e3aac0SAndrea Arcangeli if (!memcmp("always", buf, 27071e3aac0SAndrea Arcangeli min(sizeof("always")-1, count))) { 27171e3aac0SAndrea Arcangeli set_bit(enabled, &transparent_hugepage_flags); 27271e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 27371e3aac0SAndrea Arcangeli } else if (!memcmp("madvise", buf, 27471e3aac0SAndrea Arcangeli min(sizeof("madvise")-1, count))) { 27571e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 27671e3aac0SAndrea Arcangeli set_bit(req_madv, &transparent_hugepage_flags); 27771e3aac0SAndrea Arcangeli } else if (!memcmp("never", buf, 27871e3aac0SAndrea Arcangeli min(sizeof("never")-1, count))) { 27971e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 28071e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 28171e3aac0SAndrea Arcangeli } else 28271e3aac0SAndrea Arcangeli return -EINVAL; 28371e3aac0SAndrea Arcangeli 28471e3aac0SAndrea Arcangeli return count; 28571e3aac0SAndrea Arcangeli } 28671e3aac0SAndrea Arcangeli 28771e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 28871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 28971e3aac0SAndrea Arcangeli { 29071e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 29171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 29271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 29371e3aac0SAndrea Arcangeli } 29471e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 29571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 29671e3aac0SAndrea Arcangeli const char *buf, size_t count) 29771e3aac0SAndrea Arcangeli { 298ba76149fSAndrea Arcangeli ssize_t ret; 299ba76149fSAndrea Arcangeli 300ba76149fSAndrea Arcangeli ret = double_flag_store(kobj, attr, buf, count, 30171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 30271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 303ba76149fSAndrea Arcangeli 304ba76149fSAndrea Arcangeli if (ret > 0) { 305911891afSXiao Guangrong int err; 306911891afSXiao Guangrong 307911891afSXiao Guangrong mutex_lock(&khugepaged_mutex); 308911891afSXiao Guangrong err = start_khugepaged(); 309911891afSXiao Guangrong mutex_unlock(&khugepaged_mutex); 310911891afSXiao Guangrong 311ba76149fSAndrea Arcangeli if (err) 312ba76149fSAndrea Arcangeli ret = err; 313ba76149fSAndrea Arcangeli } 314ba76149fSAndrea Arcangeli 315ba76149fSAndrea Arcangeli return ret; 31671e3aac0SAndrea Arcangeli } 31771e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 31871e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 31971e3aac0SAndrea Arcangeli 32071e3aac0SAndrea Arcangeli static ssize_t single_flag_show(struct kobject *kobj, 32171e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 32271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 32371e3aac0SAndrea Arcangeli { 324e27e6151SBen Hutchings return sprintf(buf, "%d\n", 325e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 32671e3aac0SAndrea Arcangeli } 327e27e6151SBen Hutchings 32871e3aac0SAndrea Arcangeli static ssize_t single_flag_store(struct kobject *kobj, 32971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 33071e3aac0SAndrea Arcangeli const char *buf, size_t count, 33171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 33271e3aac0SAndrea Arcangeli { 333e27e6151SBen Hutchings unsigned long value; 334e27e6151SBen Hutchings int ret; 335e27e6151SBen Hutchings 336e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 337e27e6151SBen Hutchings if (ret < 0) 338e27e6151SBen Hutchings return ret; 339e27e6151SBen Hutchings if (value > 1) 34071e3aac0SAndrea Arcangeli return -EINVAL; 34171e3aac0SAndrea Arcangeli 342e27e6151SBen Hutchings if (value) 343e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 344e27e6151SBen Hutchings else 345e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 346e27e6151SBen Hutchings 34771e3aac0SAndrea Arcangeli return count; 34871e3aac0SAndrea Arcangeli } 34971e3aac0SAndrea Arcangeli 35071e3aac0SAndrea Arcangeli /* 35171e3aac0SAndrea Arcangeli * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 35271e3aac0SAndrea Arcangeli * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 35371e3aac0SAndrea Arcangeli * memory just to allocate one more hugepage. 35471e3aac0SAndrea Arcangeli */ 35571e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 35671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 35771e3aac0SAndrea Arcangeli { 35871e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 35971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 36071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 36171e3aac0SAndrea Arcangeli } 36271e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 36371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 36471e3aac0SAndrea Arcangeli const char *buf, size_t count) 36571e3aac0SAndrea Arcangeli { 36671e3aac0SAndrea Arcangeli return double_flag_store(kobj, attr, buf, count, 36771e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 36871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 36971e3aac0SAndrea Arcangeli } 37071e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 37171e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 37271e3aac0SAndrea Arcangeli 37379da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 37479da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37579da5407SKirill A. Shutemov { 37679da5407SKirill A. Shutemov return single_flag_show(kobj, attr, buf, 37779da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37879da5407SKirill A. Shutemov } 37979da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 38079da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 38179da5407SKirill A. Shutemov { 38279da5407SKirill A. Shutemov return single_flag_store(kobj, attr, buf, count, 38379da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 38479da5407SKirill A. Shutemov } 38579da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 38679da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 38771e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 38871e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 38971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 39071e3aac0SAndrea Arcangeli { 39171e3aac0SAndrea Arcangeli return single_flag_show(kobj, attr, buf, 39271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 39371e3aac0SAndrea Arcangeli } 39471e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 39571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 39671e3aac0SAndrea Arcangeli const char *buf, size_t count) 39771e3aac0SAndrea Arcangeli { 39871e3aac0SAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 39971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 40071e3aac0SAndrea Arcangeli } 40171e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 40271e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 40371e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 40471e3aac0SAndrea Arcangeli 40571e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 40671e3aac0SAndrea Arcangeli &enabled_attr.attr, 40771e3aac0SAndrea Arcangeli &defrag_attr.attr, 40879da5407SKirill A. Shutemov &use_zero_page_attr.attr, 40971e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 41071e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 41171e3aac0SAndrea Arcangeli #endif 41271e3aac0SAndrea Arcangeli NULL, 41371e3aac0SAndrea Arcangeli }; 41471e3aac0SAndrea Arcangeli 41571e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 41671e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 417ba76149fSAndrea Arcangeli }; 418ba76149fSAndrea Arcangeli 419ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 420ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 421ba76149fSAndrea Arcangeli char *buf) 422ba76149fSAndrea Arcangeli { 423ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 424ba76149fSAndrea Arcangeli } 425ba76149fSAndrea Arcangeli 426ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 427ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 428ba76149fSAndrea Arcangeli const char *buf, size_t count) 429ba76149fSAndrea Arcangeli { 430ba76149fSAndrea Arcangeli unsigned long msecs; 431ba76149fSAndrea Arcangeli int err; 432ba76149fSAndrea Arcangeli 4333dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 434ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 435ba76149fSAndrea Arcangeli return -EINVAL; 436ba76149fSAndrea Arcangeli 437ba76149fSAndrea Arcangeli khugepaged_scan_sleep_millisecs = msecs; 438ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 439ba76149fSAndrea Arcangeli 440ba76149fSAndrea Arcangeli return count; 441ba76149fSAndrea Arcangeli } 442ba76149fSAndrea Arcangeli static struct kobj_attribute scan_sleep_millisecs_attr = 443ba76149fSAndrea Arcangeli __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 444ba76149fSAndrea Arcangeli scan_sleep_millisecs_store); 445ba76149fSAndrea Arcangeli 446ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 447ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 448ba76149fSAndrea Arcangeli char *buf) 449ba76149fSAndrea Arcangeli { 450ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 451ba76149fSAndrea Arcangeli } 452ba76149fSAndrea Arcangeli 453ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 454ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 455ba76149fSAndrea Arcangeli const char *buf, size_t count) 456ba76149fSAndrea Arcangeli { 457ba76149fSAndrea Arcangeli unsigned long msecs; 458ba76149fSAndrea Arcangeli int err; 459ba76149fSAndrea Arcangeli 4603dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 461ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 462ba76149fSAndrea Arcangeli return -EINVAL; 463ba76149fSAndrea Arcangeli 464ba76149fSAndrea Arcangeli khugepaged_alloc_sleep_millisecs = msecs; 465ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 466ba76149fSAndrea Arcangeli 467ba76149fSAndrea Arcangeli return count; 468ba76149fSAndrea Arcangeli } 469ba76149fSAndrea Arcangeli static struct kobj_attribute alloc_sleep_millisecs_attr = 470ba76149fSAndrea Arcangeli __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 471ba76149fSAndrea Arcangeli alloc_sleep_millisecs_store); 472ba76149fSAndrea Arcangeli 473ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_show(struct kobject *kobj, 474ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 475ba76149fSAndrea Arcangeli char *buf) 476ba76149fSAndrea Arcangeli { 477ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 478ba76149fSAndrea Arcangeli } 479ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_store(struct kobject *kobj, 480ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 481ba76149fSAndrea Arcangeli const char *buf, size_t count) 482ba76149fSAndrea Arcangeli { 483ba76149fSAndrea Arcangeli int err; 484ba76149fSAndrea Arcangeli unsigned long pages; 485ba76149fSAndrea Arcangeli 4863dbb95f7SJingoo Han err = kstrtoul(buf, 10, &pages); 487ba76149fSAndrea Arcangeli if (err || !pages || pages > UINT_MAX) 488ba76149fSAndrea Arcangeli return -EINVAL; 489ba76149fSAndrea Arcangeli 490ba76149fSAndrea Arcangeli khugepaged_pages_to_scan = pages; 491ba76149fSAndrea Arcangeli 492ba76149fSAndrea Arcangeli return count; 493ba76149fSAndrea Arcangeli } 494ba76149fSAndrea Arcangeli static struct kobj_attribute pages_to_scan_attr = 495ba76149fSAndrea Arcangeli __ATTR(pages_to_scan, 0644, pages_to_scan_show, 496ba76149fSAndrea Arcangeli pages_to_scan_store); 497ba76149fSAndrea Arcangeli 498ba76149fSAndrea Arcangeli static ssize_t pages_collapsed_show(struct kobject *kobj, 499ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 500ba76149fSAndrea Arcangeli char *buf) 501ba76149fSAndrea Arcangeli { 502ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 503ba76149fSAndrea Arcangeli } 504ba76149fSAndrea Arcangeli static struct kobj_attribute pages_collapsed_attr = 505ba76149fSAndrea Arcangeli __ATTR_RO(pages_collapsed); 506ba76149fSAndrea Arcangeli 507ba76149fSAndrea Arcangeli static ssize_t full_scans_show(struct kobject *kobj, 508ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 509ba76149fSAndrea Arcangeli char *buf) 510ba76149fSAndrea Arcangeli { 511ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_full_scans); 512ba76149fSAndrea Arcangeli } 513ba76149fSAndrea Arcangeli static struct kobj_attribute full_scans_attr = 514ba76149fSAndrea Arcangeli __ATTR_RO(full_scans); 515ba76149fSAndrea Arcangeli 516ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_show(struct kobject *kobj, 517ba76149fSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 518ba76149fSAndrea Arcangeli { 519ba76149fSAndrea Arcangeli return single_flag_show(kobj, attr, buf, 520ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 521ba76149fSAndrea Arcangeli } 522ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_store(struct kobject *kobj, 523ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 524ba76149fSAndrea Arcangeli const char *buf, size_t count) 525ba76149fSAndrea Arcangeli { 526ba76149fSAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 527ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 528ba76149fSAndrea Arcangeli } 529ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_defrag_attr = 530ba76149fSAndrea Arcangeli __ATTR(defrag, 0644, khugepaged_defrag_show, 531ba76149fSAndrea Arcangeli khugepaged_defrag_store); 532ba76149fSAndrea Arcangeli 533ba76149fSAndrea Arcangeli /* 534ba76149fSAndrea Arcangeli * max_ptes_none controls if khugepaged should collapse hugepages over 535ba76149fSAndrea Arcangeli * any unmapped ptes in turn potentially increasing the memory 536ba76149fSAndrea Arcangeli * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 537ba76149fSAndrea Arcangeli * reduce the available free memory in the system as it 538ba76149fSAndrea Arcangeli * runs. Increasing max_ptes_none will instead potentially reduce the 539ba76149fSAndrea Arcangeli * free memory in the system during the khugepaged scan. 540ba76149fSAndrea Arcangeli */ 541ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 542ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 543ba76149fSAndrea Arcangeli char *buf) 544ba76149fSAndrea Arcangeli { 545ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 546ba76149fSAndrea Arcangeli } 547ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 548ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 549ba76149fSAndrea Arcangeli const char *buf, size_t count) 550ba76149fSAndrea Arcangeli { 551ba76149fSAndrea Arcangeli int err; 552ba76149fSAndrea Arcangeli unsigned long max_ptes_none; 553ba76149fSAndrea Arcangeli 5543dbb95f7SJingoo Han err = kstrtoul(buf, 10, &max_ptes_none); 555ba76149fSAndrea Arcangeli if (err || max_ptes_none > HPAGE_PMD_NR-1) 556ba76149fSAndrea Arcangeli return -EINVAL; 557ba76149fSAndrea Arcangeli 558ba76149fSAndrea Arcangeli khugepaged_max_ptes_none = max_ptes_none; 559ba76149fSAndrea Arcangeli 560ba76149fSAndrea Arcangeli return count; 561ba76149fSAndrea Arcangeli } 562ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_max_ptes_none_attr = 563ba76149fSAndrea Arcangeli __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 564ba76149fSAndrea Arcangeli khugepaged_max_ptes_none_store); 565ba76149fSAndrea Arcangeli 566ba76149fSAndrea Arcangeli static struct attribute *khugepaged_attr[] = { 567ba76149fSAndrea Arcangeli &khugepaged_defrag_attr.attr, 568ba76149fSAndrea Arcangeli &khugepaged_max_ptes_none_attr.attr, 569ba76149fSAndrea Arcangeli &pages_to_scan_attr.attr, 570ba76149fSAndrea Arcangeli &pages_collapsed_attr.attr, 571ba76149fSAndrea Arcangeli &full_scans_attr.attr, 572ba76149fSAndrea Arcangeli &scan_sleep_millisecs_attr.attr, 573ba76149fSAndrea Arcangeli &alloc_sleep_millisecs_attr.attr, 574ba76149fSAndrea Arcangeli NULL, 575ba76149fSAndrea Arcangeli }; 576ba76149fSAndrea Arcangeli 577ba76149fSAndrea Arcangeli static struct attribute_group khugepaged_attr_group = { 578ba76149fSAndrea Arcangeli .attrs = khugepaged_attr, 579ba76149fSAndrea Arcangeli .name = "khugepaged", 58071e3aac0SAndrea Arcangeli }; 581569e5590SShaohua Li 582569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 583569e5590SShaohua Li { 584569e5590SShaohua Li int err; 585569e5590SShaohua Li 586569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 587569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 588ae3a8c1cSAndrew Morton pr_err("failed to create transparent hugepage kobject\n"); 589569e5590SShaohua Li return -ENOMEM; 590569e5590SShaohua Li } 591569e5590SShaohua Li 592569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 593569e5590SShaohua Li if (err) { 594ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 595569e5590SShaohua Li goto delete_obj; 596569e5590SShaohua Li } 597569e5590SShaohua Li 598569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 599569e5590SShaohua Li if (err) { 600ae3a8c1cSAndrew Morton pr_err("failed to register transparent hugepage group\n"); 601569e5590SShaohua Li goto remove_hp_group; 602569e5590SShaohua Li } 603569e5590SShaohua Li 604569e5590SShaohua Li return 0; 605569e5590SShaohua Li 606569e5590SShaohua Li remove_hp_group: 607569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 608569e5590SShaohua Li delete_obj: 609569e5590SShaohua Li kobject_put(*hugepage_kobj); 610569e5590SShaohua Li return err; 611569e5590SShaohua Li } 612569e5590SShaohua Li 613569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 614569e5590SShaohua Li { 615569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 616569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 617569e5590SShaohua Li kobject_put(hugepage_kobj); 618569e5590SShaohua Li } 619569e5590SShaohua Li #else 620569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 621569e5590SShaohua Li { 622569e5590SShaohua Li return 0; 623569e5590SShaohua Li } 624569e5590SShaohua Li 625569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 626569e5590SShaohua Li { 627569e5590SShaohua Li } 62871e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 62971e3aac0SAndrea Arcangeli 63071e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 63171e3aac0SAndrea Arcangeli { 63271e3aac0SAndrea Arcangeli int err; 633569e5590SShaohua Li struct kobject *hugepage_kobj; 63471e3aac0SAndrea Arcangeli 6354b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 6364b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 637569e5590SShaohua Li return -EINVAL; 6384b7167b9SAndrea Arcangeli } 6394b7167b9SAndrea Arcangeli 640569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 641569e5590SShaohua Li if (err) 642569e5590SShaohua Li return err; 643ba76149fSAndrea Arcangeli 644ba76149fSAndrea Arcangeli err = khugepaged_slab_init(); 645ba76149fSAndrea Arcangeli if (err) 646ba76149fSAndrea Arcangeli goto out; 647ba76149fSAndrea Arcangeli 64897ae1749SKirill A. Shutemov register_shrinker(&huge_zero_page_shrinker); 64997ae1749SKirill A. Shutemov 65097562cd2SRik van Riel /* 65197562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 65297562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 65397562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 65497562cd2SRik van Riel */ 65597562cd2SRik van Riel if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 65697562cd2SRik van Riel transparent_hugepage_flags = 0; 65797562cd2SRik van Riel 658ba76149fSAndrea Arcangeli start_khugepaged(); 659ba76149fSAndrea Arcangeli 660569e5590SShaohua Li return 0; 661ba76149fSAndrea Arcangeli out: 662569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 663ba76149fSAndrea Arcangeli return err; 66471e3aac0SAndrea Arcangeli } 665a64fb3cdSPaul Gortmaker subsys_initcall(hugepage_init); 66671e3aac0SAndrea Arcangeli 66771e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 66871e3aac0SAndrea Arcangeli { 66971e3aac0SAndrea Arcangeli int ret = 0; 67071e3aac0SAndrea Arcangeli if (!str) 67171e3aac0SAndrea Arcangeli goto out; 67271e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 67371e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 67471e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 67671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67771e3aac0SAndrea Arcangeli ret = 1; 67871e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 67971e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 68071e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68171e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 68271e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68371e3aac0SAndrea Arcangeli ret = 1; 68471e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 68571e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 68671e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68771e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 68871e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68971e3aac0SAndrea Arcangeli ret = 1; 69071e3aac0SAndrea Arcangeli } 69171e3aac0SAndrea Arcangeli out: 69271e3aac0SAndrea Arcangeli if (!ret) 693ae3a8c1cSAndrew Morton pr_warn("transparent_hugepage= cannot parse, ignored\n"); 69471e3aac0SAndrea Arcangeli return ret; 69571e3aac0SAndrea Arcangeli } 69671e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 69771e3aac0SAndrea Arcangeli 698b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 69971e3aac0SAndrea Arcangeli { 70071e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 70171e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 70271e3aac0SAndrea Arcangeli return pmd; 70371e3aac0SAndrea Arcangeli } 70471e3aac0SAndrea Arcangeli 7053122359aSKirill A. Shutemov static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) 706b3092b3bSBob Liu { 707b3092b3bSBob Liu pmd_t entry; 7083122359aSKirill A. Shutemov entry = mk_pmd(page, prot); 709b3092b3bSBob Liu entry = pmd_mkhuge(entry); 710b3092b3bSBob Liu return entry; 711b3092b3bSBob Liu } 712b3092b3bSBob Liu 71371e3aac0SAndrea Arcangeli static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 71471e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 71571e3aac0SAndrea Arcangeli unsigned long haddr, pmd_t *pmd, 71671e3aac0SAndrea Arcangeli struct page *page) 71771e3aac0SAndrea Arcangeli { 71800501b53SJohannes Weiner struct mem_cgroup *memcg; 71971e3aac0SAndrea Arcangeli pgtable_t pgtable; 720c4088ebdSKirill A. Shutemov spinlock_t *ptl; 72171e3aac0SAndrea Arcangeli 722309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 72300501b53SJohannes Weiner 72400501b53SJohannes Weiner if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg)) 72571e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 72671e3aac0SAndrea Arcangeli 72700501b53SJohannes Weiner pgtable = pte_alloc_one(mm, haddr); 72800501b53SJohannes Weiner if (unlikely(!pgtable)) { 72900501b53SJohannes Weiner mem_cgroup_cancel_charge(page, memcg); 73000501b53SJohannes Weiner return VM_FAULT_OOM; 73100501b53SJohannes Weiner } 73200501b53SJohannes Weiner 73371e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 73452f37629SMinchan Kim /* 73552f37629SMinchan Kim * The memory barrier inside __SetPageUptodate makes sure that 73652f37629SMinchan Kim * clear_huge_page writes become visible before the set_pmd_at() 73752f37629SMinchan Kim * write. 73852f37629SMinchan Kim */ 73971e3aac0SAndrea Arcangeli __SetPageUptodate(page); 74071e3aac0SAndrea Arcangeli 741c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 74271e3aac0SAndrea Arcangeli if (unlikely(!pmd_none(*pmd))) { 743c4088ebdSKirill A. Shutemov spin_unlock(ptl); 74400501b53SJohannes Weiner mem_cgroup_cancel_charge(page, memcg); 74571e3aac0SAndrea Arcangeli put_page(page); 74671e3aac0SAndrea Arcangeli pte_free(mm, pgtable); 74771e3aac0SAndrea Arcangeli } else { 74871e3aac0SAndrea Arcangeli pmd_t entry; 7493122359aSKirill A. Shutemov entry = mk_huge_pmd(page, vma->vm_page_prot); 7503122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 75171e3aac0SAndrea Arcangeli page_add_new_anon_rmap(page, vma, haddr); 75200501b53SJohannes Weiner mem_cgroup_commit_charge(page, memcg, false); 75300501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 7546b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 75571e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 75671e3aac0SAndrea Arcangeli add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 757e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 758c4088ebdSKirill A. Shutemov spin_unlock(ptl); 75971e3aac0SAndrea Arcangeli } 76071e3aac0SAndrea Arcangeli 761aa2e878eSDavid Rientjes return 0; 76271e3aac0SAndrea Arcangeli } 76371e3aac0SAndrea Arcangeli 764cc5d462fSAndi Kleen static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 7650bbbc0b3SAndrea Arcangeli { 766cc5d462fSAndi Kleen return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 7670bbbc0b3SAndrea Arcangeli } 7680bbbc0b3SAndrea Arcangeli 7690bbbc0b3SAndrea Arcangeli static inline struct page *alloc_hugepage_vma(int defrag, 7700bbbc0b3SAndrea Arcangeli struct vm_area_struct *vma, 771cc5d462fSAndi Kleen unsigned long haddr, int nd, 772cc5d462fSAndi Kleen gfp_t extra_gfp) 7730bbbc0b3SAndrea Arcangeli { 774cc5d462fSAndi Kleen return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 7755c4b4be3SAndi Kleen HPAGE_PMD_ORDER, vma, haddr, nd); 7760bbbc0b3SAndrea Arcangeli } 7770bbbc0b3SAndrea Arcangeli 778c4088ebdSKirill A. Shutemov /* Caller must hold page table lock. */ 7793ea41e62SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 78097ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 7815918d10aSKirill A. Shutemov struct page *zero_page) 782fc9fe822SKirill A. Shutemov { 783fc9fe822SKirill A. Shutemov pmd_t entry; 7843ea41e62SKirill A. Shutemov if (!pmd_none(*pmd)) 7853ea41e62SKirill A. Shutemov return false; 7865918d10aSKirill A. Shutemov entry = mk_pmd(zero_page, vma->vm_page_prot); 787fc9fe822SKirill A. Shutemov entry = pmd_wrprotect(entry); 788fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 7896b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 790fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 791e1f56c89SKirill A. Shutemov atomic_long_inc(&mm->nr_ptes); 7923ea41e62SKirill A. Shutemov return true; 793fc9fe822SKirill A. Shutemov } 794fc9fe822SKirill A. Shutemov 79571e3aac0SAndrea Arcangeli int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 79671e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, 79771e3aac0SAndrea Arcangeli unsigned int flags) 79871e3aac0SAndrea Arcangeli { 79971e3aac0SAndrea Arcangeli struct page *page; 80071e3aac0SAndrea Arcangeli unsigned long haddr = address & HPAGE_PMD_MASK; 80171e3aac0SAndrea Arcangeli 802128ec037SKirill A. Shutemov if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 803c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 80471e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 80571e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 806ba76149fSAndrea Arcangeli if (unlikely(khugepaged_enter(vma))) 807ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 80879da5407SKirill A. Shutemov if (!(flags & FAULT_FLAG_WRITE) && 80979da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 810c4088ebdSKirill A. Shutemov spinlock_t *ptl; 81180371957SKirill A. Shutemov pgtable_t pgtable; 8125918d10aSKirill A. Shutemov struct page *zero_page; 8133ea41e62SKirill A. Shutemov bool set; 81480371957SKirill A. Shutemov pgtable = pte_alloc_one(mm, haddr); 81580371957SKirill A. Shutemov if (unlikely(!pgtable)) 81680371957SKirill A. Shutemov return VM_FAULT_OOM; 8175918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 8185918d10aSKirill A. Shutemov if (unlikely(!zero_page)) { 81997ae1749SKirill A. Shutemov pte_free(mm, pgtable); 82097ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 821c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 82297ae1749SKirill A. Shutemov } 823c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 8243ea41e62SKirill A. Shutemov set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 8255918d10aSKirill A. Shutemov zero_page); 826c4088ebdSKirill A. Shutemov spin_unlock(ptl); 8273ea41e62SKirill A. Shutemov if (!set) { 8283ea41e62SKirill A. Shutemov pte_free(mm, pgtable); 8293ea41e62SKirill A. Shutemov put_huge_zero_page(); 8303ea41e62SKirill A. Shutemov } 83180371957SKirill A. Shutemov return 0; 83280371957SKirill A. Shutemov } 8330bbbc0b3SAndrea Arcangeli page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 834cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 83581ab4201SAndi Kleen if (unlikely(!page)) { 83681ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 837c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 83881ab4201SAndi Kleen } 839128ec037SKirill A. Shutemov if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { 840edad9d2cSDavid Rientjes put_page(page); 84117766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 842c0292554SKirill A. Shutemov return VM_FAULT_FALLBACK; 843edad9d2cSDavid Rientjes } 84471e3aac0SAndrea Arcangeli 84517766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 846edad9d2cSDavid Rientjes return 0; 84771e3aac0SAndrea Arcangeli } 84871e3aac0SAndrea Arcangeli 84971e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 85071e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 85171e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 85271e3aac0SAndrea Arcangeli { 853c4088ebdSKirill A. Shutemov spinlock_t *dst_ptl, *src_ptl; 85471e3aac0SAndrea Arcangeli struct page *src_page; 85571e3aac0SAndrea Arcangeli pmd_t pmd; 85671e3aac0SAndrea Arcangeli pgtable_t pgtable; 85771e3aac0SAndrea Arcangeli int ret; 85871e3aac0SAndrea Arcangeli 85971e3aac0SAndrea Arcangeli ret = -ENOMEM; 86071e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 86171e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 86271e3aac0SAndrea Arcangeli goto out; 86371e3aac0SAndrea Arcangeli 864c4088ebdSKirill A. Shutemov dst_ptl = pmd_lock(dst_mm, dst_pmd); 865c4088ebdSKirill A. Shutemov src_ptl = pmd_lockptr(src_mm, src_pmd); 866c4088ebdSKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 86771e3aac0SAndrea Arcangeli 86871e3aac0SAndrea Arcangeli ret = -EAGAIN; 86971e3aac0SAndrea Arcangeli pmd = *src_pmd; 87071e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(pmd))) { 87171e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 87271e3aac0SAndrea Arcangeli goto out_unlock; 87371e3aac0SAndrea Arcangeli } 874fc9fe822SKirill A. Shutemov /* 875c4088ebdSKirill A. Shutemov * When page table lock is held, the huge zero pmd should not be 876fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 877fc9fe822SKirill A. Shutemov * a page table. 878fc9fe822SKirill A. Shutemov */ 879fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 8805918d10aSKirill A. Shutemov struct page *zero_page; 8813ea41e62SKirill A. Shutemov bool set; 88297ae1749SKirill A. Shutemov /* 88397ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 88497ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 88597ae1749SKirill A. Shutemov * reference. 88697ae1749SKirill A. Shutemov */ 8875918d10aSKirill A. Shutemov zero_page = get_huge_zero_page(); 8883ea41e62SKirill A. Shutemov set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 8895918d10aSKirill A. Shutemov zero_page); 8903ea41e62SKirill A. Shutemov BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ 891fc9fe822SKirill A. Shutemov ret = 0; 892fc9fe822SKirill A. Shutemov goto out_unlock; 893fc9fe822SKirill A. Shutemov } 894de466bd6SMel Gorman 89571e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_splitting(pmd))) { 89671e3aac0SAndrea Arcangeli /* split huge page running from under us */ 897c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 898c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 89971e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 90071e3aac0SAndrea Arcangeli 90171e3aac0SAndrea Arcangeli wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 90271e3aac0SAndrea Arcangeli goto out; 90371e3aac0SAndrea Arcangeli } 90471e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 905309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 90671e3aac0SAndrea Arcangeli get_page(src_page); 90771e3aac0SAndrea Arcangeli page_dup_rmap(src_page); 90871e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 90971e3aac0SAndrea Arcangeli 91071e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 91171e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 9126b0b50b0SAneesh Kumar K.V pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 91371e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 914e1f56c89SKirill A. Shutemov atomic_long_inc(&dst_mm->nr_ptes); 91571e3aac0SAndrea Arcangeli 91671e3aac0SAndrea Arcangeli ret = 0; 91771e3aac0SAndrea Arcangeli out_unlock: 918c4088ebdSKirill A. Shutemov spin_unlock(src_ptl); 919c4088ebdSKirill A. Shutemov spin_unlock(dst_ptl); 92071e3aac0SAndrea Arcangeli out: 92171e3aac0SAndrea Arcangeli return ret; 92271e3aac0SAndrea Arcangeli } 92371e3aac0SAndrea Arcangeli 924a1dd450bSWill Deacon void huge_pmd_set_accessed(struct mm_struct *mm, 925a1dd450bSWill Deacon struct vm_area_struct *vma, 926a1dd450bSWill Deacon unsigned long address, 927a1dd450bSWill Deacon pmd_t *pmd, pmd_t orig_pmd, 928a1dd450bSWill Deacon int dirty) 929a1dd450bSWill Deacon { 930c4088ebdSKirill A. Shutemov spinlock_t *ptl; 931a1dd450bSWill Deacon pmd_t entry; 932a1dd450bSWill Deacon unsigned long haddr; 933a1dd450bSWill Deacon 934c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 935a1dd450bSWill Deacon if (unlikely(!pmd_same(*pmd, orig_pmd))) 936a1dd450bSWill Deacon goto unlock; 937a1dd450bSWill Deacon 938a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 939a1dd450bSWill Deacon haddr = address & HPAGE_PMD_MASK; 940a1dd450bSWill Deacon if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 941a1dd450bSWill Deacon update_mmu_cache_pmd(vma, address, pmd); 942a1dd450bSWill Deacon 943a1dd450bSWill Deacon unlock: 944c4088ebdSKirill A. Shutemov spin_unlock(ptl); 945a1dd450bSWill Deacon } 946a1dd450bSWill Deacon 9475338a937SHugh Dickins /* 9485338a937SHugh Dickins * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages 9495338a937SHugh Dickins * during copy_user_huge_page()'s copy_page_rep(): in the case when 9505338a937SHugh Dickins * the source page gets split and a tail freed before copy completes. 9515338a937SHugh Dickins * Called under pmd_lock of checked pmd, so safe from splitting itself. 9525338a937SHugh Dickins */ 9535338a937SHugh Dickins static void get_user_huge_page(struct page *page) 9545338a937SHugh Dickins { 9555338a937SHugh Dickins if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { 9565338a937SHugh Dickins struct page *endpage = page + HPAGE_PMD_NR; 9575338a937SHugh Dickins 9585338a937SHugh Dickins atomic_add(HPAGE_PMD_NR, &page->_count); 9595338a937SHugh Dickins while (++page < endpage) 9605338a937SHugh Dickins get_huge_page_tail(page); 9615338a937SHugh Dickins } else { 9625338a937SHugh Dickins get_page(page); 9635338a937SHugh Dickins } 9645338a937SHugh Dickins } 9655338a937SHugh Dickins 9665338a937SHugh Dickins static void put_user_huge_page(struct page *page) 9675338a937SHugh Dickins { 9685338a937SHugh Dickins if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { 9695338a937SHugh Dickins struct page *endpage = page + HPAGE_PMD_NR; 9705338a937SHugh Dickins 9715338a937SHugh Dickins while (page < endpage) 9725338a937SHugh Dickins put_page(page++); 9735338a937SHugh Dickins } else { 9745338a937SHugh Dickins put_page(page); 9755338a937SHugh Dickins } 9765338a937SHugh Dickins } 9775338a937SHugh Dickins 97871e3aac0SAndrea Arcangeli static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 97971e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 98071e3aac0SAndrea Arcangeli unsigned long address, 98171e3aac0SAndrea Arcangeli pmd_t *pmd, pmd_t orig_pmd, 98271e3aac0SAndrea Arcangeli struct page *page, 98371e3aac0SAndrea Arcangeli unsigned long haddr) 98471e3aac0SAndrea Arcangeli { 98500501b53SJohannes Weiner struct mem_cgroup *memcg; 986c4088ebdSKirill A. Shutemov spinlock_t *ptl; 98771e3aac0SAndrea Arcangeli pgtable_t pgtable; 98871e3aac0SAndrea Arcangeli pmd_t _pmd; 98971e3aac0SAndrea Arcangeli int ret = 0, i; 99071e3aac0SAndrea Arcangeli struct page **pages; 9912ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 9922ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 99371e3aac0SAndrea Arcangeli 99471e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 99571e3aac0SAndrea Arcangeli GFP_KERNEL); 99671e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 99771e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 99871e3aac0SAndrea Arcangeli goto out; 99971e3aac0SAndrea Arcangeli } 100071e3aac0SAndrea Arcangeli 100171e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 1002cc5d462fSAndi Kleen pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 1003cc5d462fSAndi Kleen __GFP_OTHER_NODE, 100419ee151eSAndi Kleen vma, address, page_to_nid(page)); 1005b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 100600501b53SJohannes Weiner mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, 100700501b53SJohannes Weiner &memcg))) { 1008b9bbfbe3SAndrea Arcangeli if (pages[i]) 100971e3aac0SAndrea Arcangeli put_page(pages[i]); 1010b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 101100501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 101200501b53SJohannes Weiner set_page_private(pages[i], 0); 101300501b53SJohannes Weiner mem_cgroup_cancel_charge(pages[i], memcg); 1014b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 1015b9bbfbe3SAndrea Arcangeli } 101671e3aac0SAndrea Arcangeli kfree(pages); 101771e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 101871e3aac0SAndrea Arcangeli goto out; 101971e3aac0SAndrea Arcangeli } 102000501b53SJohannes Weiner set_page_private(pages[i], (unsigned long)memcg); 102171e3aac0SAndrea Arcangeli } 102271e3aac0SAndrea Arcangeli 102371e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 102471e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 10250089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 102671e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 102771e3aac0SAndrea Arcangeli cond_resched(); 102871e3aac0SAndrea Arcangeli } 102971e3aac0SAndrea Arcangeli 10302ec74c3eSSagi Grimberg mmun_start = haddr; 10312ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 10322ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 10332ec74c3eSSagi Grimberg 1034c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 103571e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 103671e3aac0SAndrea Arcangeli goto out_free_pages; 1037309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 103871e3aac0SAndrea Arcangeli 10392ec74c3eSSagi Grimberg pmdp_clear_flush(vma, haddr, pmd); 104071e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 104171e3aac0SAndrea Arcangeli 10426b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 104371e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 104471e3aac0SAndrea Arcangeli 104571e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 104671e3aac0SAndrea Arcangeli pte_t *pte, entry; 104771e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 104871e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 104900501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 105000501b53SJohannes Weiner set_page_private(pages[i], 0); 105171e3aac0SAndrea Arcangeli page_add_new_anon_rmap(pages[i], vma, haddr); 105200501b53SJohannes Weiner mem_cgroup_commit_charge(pages[i], memcg, false); 105300501b53SJohannes Weiner lru_cache_add_active_or_unevictable(pages[i], vma); 105471e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 105571e3aac0SAndrea Arcangeli VM_BUG_ON(!pte_none(*pte)); 105671e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 105771e3aac0SAndrea Arcangeli pte_unmap(pte); 105871e3aac0SAndrea Arcangeli } 105971e3aac0SAndrea Arcangeli kfree(pages); 106071e3aac0SAndrea Arcangeli 106171e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 106271e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 106371e3aac0SAndrea Arcangeli page_remove_rmap(page); 1064c4088ebdSKirill A. Shutemov spin_unlock(ptl); 106571e3aac0SAndrea Arcangeli 10662ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 10672ec74c3eSSagi Grimberg 106871e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 106971e3aac0SAndrea Arcangeli put_page(page); 107071e3aac0SAndrea Arcangeli 107171e3aac0SAndrea Arcangeli out: 107271e3aac0SAndrea Arcangeli return ret; 107371e3aac0SAndrea Arcangeli 107471e3aac0SAndrea Arcangeli out_free_pages: 1075c4088ebdSKirill A. Shutemov spin_unlock(ptl); 10762ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1077b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 107800501b53SJohannes Weiner memcg = (void *)page_private(pages[i]); 107900501b53SJohannes Weiner set_page_private(pages[i], 0); 108000501b53SJohannes Weiner mem_cgroup_cancel_charge(pages[i], memcg); 108171e3aac0SAndrea Arcangeli put_page(pages[i]); 1082b9bbfbe3SAndrea Arcangeli } 108371e3aac0SAndrea Arcangeli kfree(pages); 108471e3aac0SAndrea Arcangeli goto out; 108571e3aac0SAndrea Arcangeli } 108671e3aac0SAndrea Arcangeli 108771e3aac0SAndrea Arcangeli int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 108871e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 108971e3aac0SAndrea Arcangeli { 1090c4088ebdSKirill A. Shutemov spinlock_t *ptl; 109171e3aac0SAndrea Arcangeli int ret = 0; 109293b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 109300501b53SJohannes Weiner struct mem_cgroup *memcg; 109471e3aac0SAndrea Arcangeli unsigned long haddr; 10952ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 10962ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 109771e3aac0SAndrea Arcangeli 1098c4088ebdSKirill A. Shutemov ptl = pmd_lockptr(mm, pmd); 1099*81d1b09cSSasha Levin VM_BUG_ON_VMA(!vma->anon_vma, vma); 110093b4796dSKirill A. Shutemov haddr = address & HPAGE_PMD_MASK; 110193b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 110293b4796dSKirill A. Shutemov goto alloc; 1103c4088ebdSKirill A. Shutemov spin_lock(ptl); 110471e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 110571e3aac0SAndrea Arcangeli goto out_unlock; 110671e3aac0SAndrea Arcangeli 110771e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 1108309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 110971e3aac0SAndrea Arcangeli if (page_mapcount(page) == 1) { 111071e3aac0SAndrea Arcangeli pmd_t entry; 111171e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 111271e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 111371e3aac0SAndrea Arcangeli if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1114b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 111571e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 111671e3aac0SAndrea Arcangeli goto out_unlock; 111771e3aac0SAndrea Arcangeli } 11185338a937SHugh Dickins get_user_huge_page(page); 1119c4088ebdSKirill A. Shutemov spin_unlock(ptl); 112093b4796dSKirill A. Shutemov alloc: 112171e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 112271e3aac0SAndrea Arcangeli !transparent_hugepage_debug_cow()) 11230bbbc0b3SAndrea Arcangeli new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 1124cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 112571e3aac0SAndrea Arcangeli else 112671e3aac0SAndrea Arcangeli new_page = NULL; 112771e3aac0SAndrea Arcangeli 112871e3aac0SAndrea Arcangeli if (unlikely(!new_page)) { 1129eecc1e42SHugh Dickins if (!page) { 1130e9b71ca9SKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 1131e9b71ca9SKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 113293b4796dSKirill A. Shutemov } else { 113371e3aac0SAndrea Arcangeli ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 113471e3aac0SAndrea Arcangeli pmd, orig_pmd, page, haddr); 11359845cbbdSKirill A. Shutemov if (ret & VM_FAULT_OOM) { 11361f1d06c3SDavid Rientjes split_huge_page(page); 11379845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 11389845cbbdSKirill A. Shutemov } 11395338a937SHugh Dickins put_user_huge_page(page); 114093b4796dSKirill A. Shutemov } 114117766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 114271e3aac0SAndrea Arcangeli goto out; 114371e3aac0SAndrea Arcangeli } 114471e3aac0SAndrea Arcangeli 114500501b53SJohannes Weiner if (unlikely(mem_cgroup_try_charge(new_page, mm, 114600501b53SJohannes Weiner GFP_TRANSHUGE, &memcg))) { 1147b9bbfbe3SAndrea Arcangeli put_page(new_page); 114893b4796dSKirill A. Shutemov if (page) { 11491f1d06c3SDavid Rientjes split_huge_page(page); 11505338a937SHugh Dickins put_user_huge_page(page); 11519845cbbdSKirill A. Shutemov } else 11529845cbbdSKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 11539845cbbdSKirill A. Shutemov ret |= VM_FAULT_FALLBACK; 115417766ddeSDavid Rientjes count_vm_event(THP_FAULT_FALLBACK); 1155b9bbfbe3SAndrea Arcangeli goto out; 1156b9bbfbe3SAndrea Arcangeli } 1157b9bbfbe3SAndrea Arcangeli 115817766ddeSDavid Rientjes count_vm_event(THP_FAULT_ALLOC); 115917766ddeSDavid Rientjes 1160eecc1e42SHugh Dickins if (!page) 116193b4796dSKirill A. Shutemov clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 116293b4796dSKirill A. Shutemov else 116371e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 116471e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 116571e3aac0SAndrea Arcangeli 11662ec74c3eSSagi Grimberg mmun_start = haddr; 11672ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 11682ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 11692ec74c3eSSagi Grimberg 1170c4088ebdSKirill A. Shutemov spin_lock(ptl); 117193b4796dSKirill A. Shutemov if (page) 11725338a937SHugh Dickins put_user_huge_page(page); 1173b9bbfbe3SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1174c4088ebdSKirill A. Shutemov spin_unlock(ptl); 117500501b53SJohannes Weiner mem_cgroup_cancel_charge(new_page, memcg); 117671e3aac0SAndrea Arcangeli put_page(new_page); 11772ec74c3eSSagi Grimberg goto out_mn; 1178b9bbfbe3SAndrea Arcangeli } else { 117971e3aac0SAndrea Arcangeli pmd_t entry; 11803122359aSKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 11813122359aSKirill A. Shutemov entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 11822ec74c3eSSagi Grimberg pmdp_clear_flush(vma, haddr, pmd); 118371e3aac0SAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, haddr); 118400501b53SJohannes Weiner mem_cgroup_commit_charge(new_page, memcg, false); 118500501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 118671e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 1187b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 1188eecc1e42SHugh Dickins if (!page) { 118993b4796dSKirill A. Shutemov add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 119097ae1749SKirill A. Shutemov put_huge_zero_page(); 119197ae1749SKirill A. Shutemov } else { 1192309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 119371e3aac0SAndrea Arcangeli page_remove_rmap(page); 119471e3aac0SAndrea Arcangeli put_page(page); 119593b4796dSKirill A. Shutemov } 119671e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 119771e3aac0SAndrea Arcangeli } 1198c4088ebdSKirill A. Shutemov spin_unlock(ptl); 11992ec74c3eSSagi Grimberg out_mn: 12002ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 12012ec74c3eSSagi Grimberg out: 12022ec74c3eSSagi Grimberg return ret; 120371e3aac0SAndrea Arcangeli out_unlock: 1204c4088ebdSKirill A. Shutemov spin_unlock(ptl); 120571e3aac0SAndrea Arcangeli return ret; 120671e3aac0SAndrea Arcangeli } 120771e3aac0SAndrea Arcangeli 1208b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 120971e3aac0SAndrea Arcangeli unsigned long addr, 121071e3aac0SAndrea Arcangeli pmd_t *pmd, 121171e3aac0SAndrea Arcangeli unsigned int flags) 121271e3aac0SAndrea Arcangeli { 1213b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 121471e3aac0SAndrea Arcangeli struct page *page = NULL; 121571e3aac0SAndrea Arcangeli 1216c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmd)); 121771e3aac0SAndrea Arcangeli 121871e3aac0SAndrea Arcangeli if (flags & FOLL_WRITE && !pmd_write(*pmd)) 121971e3aac0SAndrea Arcangeli goto out; 122071e3aac0SAndrea Arcangeli 122185facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 122285facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 122385facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 122485facf25SKirill A. Shutemov 12252b4847e7SMel Gorman /* Full NUMA hinting faults to serialise migration in fault paths */ 12262b4847e7SMel Gorman if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) 12272b4847e7SMel Gorman goto out; 12282b4847e7SMel Gorman 122971e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 1230309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 123171e3aac0SAndrea Arcangeli if (flags & FOLL_TOUCH) { 123271e3aac0SAndrea Arcangeli pmd_t _pmd; 123371e3aac0SAndrea Arcangeli /* 123471e3aac0SAndrea Arcangeli * We should set the dirty bit only for FOLL_WRITE but 123571e3aac0SAndrea Arcangeli * for now the dirty bit in the pmd is meaningless. 123671e3aac0SAndrea Arcangeli * And if the dirty bit will become meaningful and 123771e3aac0SAndrea Arcangeli * we'll only set it with FOLL_WRITE, an atomic 123871e3aac0SAndrea Arcangeli * set_bit will be required on the pmd to set the 123971e3aac0SAndrea Arcangeli * young bit, instead of the current set_pmd_at. 124071e3aac0SAndrea Arcangeli */ 124171e3aac0SAndrea Arcangeli _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 12428663890aSAneesh Kumar K.V if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 12438663890aSAneesh Kumar K.V pmd, _pmd, 1)) 12448663890aSAneesh Kumar K.V update_mmu_cache_pmd(vma, addr, pmd); 124571e3aac0SAndrea Arcangeli } 1246b676b293SDavid Rientjes if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1247b676b293SDavid Rientjes if (page->mapping && trylock_page(page)) { 1248b676b293SDavid Rientjes lru_add_drain(); 1249b676b293SDavid Rientjes if (page->mapping) 1250b676b293SDavid Rientjes mlock_vma_page(page); 1251b676b293SDavid Rientjes unlock_page(page); 1252b676b293SDavid Rientjes } 1253b676b293SDavid Rientjes } 125471e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1255309381feSSasha Levin VM_BUG_ON_PAGE(!PageCompound(page), page); 125671e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 125770b50f94SAndrea Arcangeli get_page_foll(page); 125871e3aac0SAndrea Arcangeli 125971e3aac0SAndrea Arcangeli out: 126071e3aac0SAndrea Arcangeli return page; 126171e3aac0SAndrea Arcangeli } 126271e3aac0SAndrea Arcangeli 1263d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 12644daae3b4SMel Gorman int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 12654daae3b4SMel Gorman unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1266d10e63f2SMel Gorman { 1267c4088ebdSKirill A. Shutemov spinlock_t *ptl; 1268b8916634SMel Gorman struct anon_vma *anon_vma = NULL; 1269b32967ffSMel Gorman struct page *page; 1270d10e63f2SMel Gorman unsigned long haddr = addr & HPAGE_PMD_MASK; 12718191acbdSMel Gorman int page_nid = -1, this_nid = numa_node_id(); 127290572890SPeter Zijlstra int target_nid, last_cpupid = -1; 12738191acbdSMel Gorman bool page_locked; 12748191acbdSMel Gorman bool migrated = false; 12756688cc05SPeter Zijlstra int flags = 0; 1276d10e63f2SMel Gorman 1277c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmdp); 1278d10e63f2SMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) 1279d10e63f2SMel Gorman goto out_unlock; 1280d10e63f2SMel Gorman 1281de466bd6SMel Gorman /* 1282de466bd6SMel Gorman * If there are potential migrations, wait for completion and retry 1283de466bd6SMel Gorman * without disrupting NUMA hinting information. Do not relock and 1284de466bd6SMel Gorman * check_same as the page may no longer be mapped. 1285de466bd6SMel Gorman */ 1286de466bd6SMel Gorman if (unlikely(pmd_trans_migrating(*pmdp))) { 1287de466bd6SMel Gorman spin_unlock(ptl); 1288de466bd6SMel Gorman wait_migrate_huge_page(vma->anon_vma, pmdp); 1289de466bd6SMel Gorman goto out; 1290de466bd6SMel Gorman } 1291de466bd6SMel Gorman 1292d10e63f2SMel Gorman page = pmd_page(pmd); 1293a1a46184SMel Gorman BUG_ON(is_huge_zero_page(page)); 12948191acbdSMel Gorman page_nid = page_to_nid(page); 129590572890SPeter Zijlstra last_cpupid = page_cpupid_last(page); 129603c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 129704bb2f94SRik van Riel if (page_nid == this_nid) { 129803c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 129904bb2f94SRik van Riel flags |= TNF_FAULT_LOCAL; 130004bb2f94SRik van Riel } 13014daae3b4SMel Gorman 1302ff9042b1SMel Gorman /* 13036688cc05SPeter Zijlstra * Avoid grouping on DSO/COW pages in specific and RO pages 13046688cc05SPeter Zijlstra * in general, RO pages shouldn't hurt as much anyway since 13056688cc05SPeter Zijlstra * they can be in shared cache state. 13066688cc05SPeter Zijlstra */ 13076688cc05SPeter Zijlstra if (!pmd_write(pmd)) 13086688cc05SPeter Zijlstra flags |= TNF_NO_GROUP; 13096688cc05SPeter Zijlstra 13106688cc05SPeter Zijlstra /* 1311ff9042b1SMel Gorman * Acquire the page lock to serialise THP migrations but avoid dropping 1312ff9042b1SMel Gorman * page_table_lock if at all possible 1313ff9042b1SMel Gorman */ 1314b8916634SMel Gorman page_locked = trylock_page(page); 1315b8916634SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1316b8916634SMel Gorman if (target_nid == -1) { 1317b8916634SMel Gorman /* If the page was locked, there are no parallel migrations */ 1318a54a407fSMel Gorman if (page_locked) 1319b8916634SMel Gorman goto clear_pmdnuma; 13202b4847e7SMel Gorman } 1321cbee9f88SPeter Zijlstra 1322de466bd6SMel Gorman /* Migration could have started since the pmd_trans_migrating check */ 13232b4847e7SMel Gorman if (!page_locked) { 1324c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1325b8916634SMel Gorman wait_on_page_locked(page); 1326a54a407fSMel Gorman page_nid = -1; 1327b8916634SMel Gorman goto out; 1328b8916634SMel Gorman } 1329b8916634SMel Gorman 13302b4847e7SMel Gorman /* 13312b4847e7SMel Gorman * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 13322b4847e7SMel Gorman * to serialises splits 13332b4847e7SMel Gorman */ 1334b8916634SMel Gorman get_page(page); 1335c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1336b8916634SMel Gorman anon_vma = page_lock_anon_vma_read(page); 1337b32967ffSMel Gorman 1338c69307d5SPeter Zijlstra /* Confirm the PMD did not change while page_table_lock was released */ 1339c4088ebdSKirill A. Shutemov spin_lock(ptl); 1340b32967ffSMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) { 1341b32967ffSMel Gorman unlock_page(page); 1342b32967ffSMel Gorman put_page(page); 1343a54a407fSMel Gorman page_nid = -1; 1344b32967ffSMel Gorman goto out_unlock; 1345b32967ffSMel Gorman } 1346ff9042b1SMel Gorman 1347c3a489caSMel Gorman /* Bail if we fail to protect against THP splits for any reason */ 1348c3a489caSMel Gorman if (unlikely(!anon_vma)) { 1349c3a489caSMel Gorman put_page(page); 1350c3a489caSMel Gorman page_nid = -1; 1351c3a489caSMel Gorman goto clear_pmdnuma; 1352c3a489caSMel Gorman } 1353c3a489caSMel Gorman 1354a54a407fSMel Gorman /* 1355a54a407fSMel Gorman * Migrate the THP to the requested node, returns with page unlocked 1356a54a407fSMel Gorman * and pmd_numa cleared. 1357a54a407fSMel Gorman */ 1358c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1359b32967ffSMel Gorman migrated = migrate_misplaced_transhuge_page(mm, vma, 1360340ef390SHugh Dickins pmdp, pmd, addr, page, target_nid); 13616688cc05SPeter Zijlstra if (migrated) { 13626688cc05SPeter Zijlstra flags |= TNF_MIGRATED; 13638191acbdSMel Gorman page_nid = target_nid; 13646688cc05SPeter Zijlstra } 1365b32967ffSMel Gorman 13668191acbdSMel Gorman goto out; 13674daae3b4SMel Gorman clear_pmdnuma: 1368a54a407fSMel Gorman BUG_ON(!PageLocked(page)); 1369d10e63f2SMel Gorman pmd = pmd_mknonnuma(pmd); 1370d10e63f2SMel Gorman set_pmd_at(mm, haddr, pmdp, pmd); 1371d10e63f2SMel Gorman VM_BUG_ON(pmd_numa(*pmdp)); 1372d10e63f2SMel Gorman update_mmu_cache_pmd(vma, addr, pmdp); 1373a54a407fSMel Gorman unlock_page(page); 1374d10e63f2SMel Gorman out_unlock: 1375c4088ebdSKirill A. Shutemov spin_unlock(ptl); 1376b8916634SMel Gorman 1377b8916634SMel Gorman out: 1378b8916634SMel Gorman if (anon_vma) 1379b8916634SMel Gorman page_unlock_anon_vma_read(anon_vma); 1380b8916634SMel Gorman 13818191acbdSMel Gorman if (page_nid != -1) 13826688cc05SPeter Zijlstra task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); 13838191acbdSMel Gorman 1384d10e63f2SMel Gorman return 0; 1385d10e63f2SMel Gorman } 1386d10e63f2SMel Gorman 138771e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1388f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 138971e3aac0SAndrea Arcangeli { 1390bf929152SKirill A. Shutemov spinlock_t *ptl; 139171e3aac0SAndrea Arcangeli int ret = 0; 139271e3aac0SAndrea Arcangeli 1393bf929152SKirill A. Shutemov if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 139471e3aac0SAndrea Arcangeli struct page *page; 139571e3aac0SAndrea Arcangeli pgtable_t pgtable; 1396f5c8ad47SDavid Miller pmd_t orig_pmd; 1397a6bf2bb0SAneesh Kumar K.V /* 1398a6bf2bb0SAneesh Kumar K.V * For architectures like ppc64 we look at deposited pgtable 1399a6bf2bb0SAneesh Kumar K.V * when calling pmdp_get_and_clear. So do the 1400a6bf2bb0SAneesh Kumar K.V * pgtable_trans_huge_withdraw after finishing pmdp related 1401a6bf2bb0SAneesh Kumar K.V * operations. 1402a6bf2bb0SAneesh Kumar K.V */ 1403f5c8ad47SDavid Miller orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1404f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1405a6bf2bb0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1406479f0abbSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) { 1407e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1408bf929152SKirill A. Shutemov spin_unlock(ptl); 140997ae1749SKirill A. Shutemov put_huge_zero_page(); 1410479f0abbSKirill A. Shutemov } else { 1411479f0abbSKirill A. Shutemov page = pmd_page(orig_pmd); 141271e3aac0SAndrea Arcangeli page_remove_rmap(page); 1413309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 141471e3aac0SAndrea Arcangeli add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1415309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 1416e1f56c89SKirill A. Shutemov atomic_long_dec(&tlb->mm->nr_ptes); 1417bf929152SKirill A. Shutemov spin_unlock(ptl); 141871e3aac0SAndrea Arcangeli tlb_remove_page(tlb, page); 1419479f0abbSKirill A. Shutemov } 142071e3aac0SAndrea Arcangeli pte_free(tlb->mm, pgtable); 142171e3aac0SAndrea Arcangeli ret = 1; 142271e3aac0SAndrea Arcangeli } 142371e3aac0SAndrea Arcangeli return ret; 142471e3aac0SAndrea Arcangeli } 142571e3aac0SAndrea Arcangeli 14260ca1634dSJohannes Weiner int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 14270ca1634dSJohannes Weiner unsigned long addr, unsigned long end, 14280ca1634dSJohannes Weiner unsigned char *vec) 14290ca1634dSJohannes Weiner { 1430bf929152SKirill A. Shutemov spinlock_t *ptl; 14310ca1634dSJohannes Weiner int ret = 0; 14320ca1634dSJohannes Weiner 1433bf929152SKirill A. Shutemov if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 14340ca1634dSJohannes Weiner /* 14350ca1634dSJohannes Weiner * All logical pages in the range are present 14360ca1634dSJohannes Weiner * if backed by a huge page. 14370ca1634dSJohannes Weiner */ 1438bf929152SKirill A. Shutemov spin_unlock(ptl); 1439025c5b24SNaoya Horiguchi memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1440025c5b24SNaoya Horiguchi ret = 1; 1441025c5b24SNaoya Horiguchi } 14420ca1634dSJohannes Weiner 14430ca1634dSJohannes Weiner return ret; 14440ca1634dSJohannes Weiner } 14450ca1634dSJohannes Weiner 144637a1c49aSAndrea Arcangeli int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 144737a1c49aSAndrea Arcangeli unsigned long old_addr, 144837a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 144937a1c49aSAndrea Arcangeli pmd_t *old_pmd, pmd_t *new_pmd) 145037a1c49aSAndrea Arcangeli { 1451bf929152SKirill A. Shutemov spinlock_t *old_ptl, *new_ptl; 145237a1c49aSAndrea Arcangeli int ret = 0; 145337a1c49aSAndrea Arcangeli pmd_t pmd; 145437a1c49aSAndrea Arcangeli 145537a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 145637a1c49aSAndrea Arcangeli 145737a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 145837a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 145937a1c49aSAndrea Arcangeli old_end - old_addr < HPAGE_PMD_SIZE || 146037a1c49aSAndrea Arcangeli (new_vma->vm_flags & VM_NOHUGEPAGE)) 146137a1c49aSAndrea Arcangeli goto out; 146237a1c49aSAndrea Arcangeli 146337a1c49aSAndrea Arcangeli /* 146437a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 146537a1c49aSAndrea Arcangeli * should have release it. 146637a1c49aSAndrea Arcangeli */ 146737a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 146837a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 146937a1c49aSAndrea Arcangeli goto out; 147037a1c49aSAndrea Arcangeli } 147137a1c49aSAndrea Arcangeli 1472bf929152SKirill A. Shutemov /* 1473bf929152SKirill A. Shutemov * We don't have to worry about the ordering of src and dst 1474bf929152SKirill A. Shutemov * ptlocks because exclusive mmap_sem prevents deadlock. 1475bf929152SKirill A. Shutemov */ 1476bf929152SKirill A. Shutemov ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); 1477025c5b24SNaoya Horiguchi if (ret == 1) { 1478bf929152SKirill A. Shutemov new_ptl = pmd_lockptr(mm, new_pmd); 1479bf929152SKirill A. Shutemov if (new_ptl != old_ptl) 1480bf929152SKirill A. Shutemov spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 148137a1c49aSAndrea Arcangeli pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 148237a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 14833592806cSKirill A. Shutemov 1484b3084f4dSAneesh Kumar K.V if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1485b3084f4dSAneesh Kumar K.V pgtable_t pgtable; 14863592806cSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 14873592806cSKirill A. Shutemov pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 14883592806cSKirill A. Shutemov } 1489b3084f4dSAneesh Kumar K.V set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1490b3084f4dSAneesh Kumar K.V if (new_ptl != old_ptl) 1491b3084f4dSAneesh Kumar K.V spin_unlock(new_ptl); 1492bf929152SKirill A. Shutemov spin_unlock(old_ptl); 149337a1c49aSAndrea Arcangeli } 149437a1c49aSAndrea Arcangeli out: 149537a1c49aSAndrea Arcangeli return ret; 149637a1c49aSAndrea Arcangeli } 149737a1c49aSAndrea Arcangeli 1498f123d74aSMel Gorman /* 1499f123d74aSMel Gorman * Returns 1500f123d74aSMel Gorman * - 0 if PMD could not be locked 1501f123d74aSMel Gorman * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1502f123d74aSMel Gorman * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1503f123d74aSMel Gorman */ 1504cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 15054b10e7d5SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1506cd7548abSJohannes Weiner { 1507cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1508bf929152SKirill A. Shutemov spinlock_t *ptl; 1509cd7548abSJohannes Weiner int ret = 0; 1510cd7548abSJohannes Weiner 1511bf929152SKirill A. Shutemov if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1512cd7548abSJohannes Weiner pmd_t entry; 1513f123d74aSMel Gorman ret = 1; 1514a4f1de17SHugh Dickins if (!prot_numa) { 1515f123d74aSMel Gorman entry = pmdp_get_and_clear(mm, addr, pmd); 15161667918bSMel Gorman if (pmd_numa(entry)) 15171667918bSMel Gorman entry = pmd_mknonnuma(entry); 1518cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1519f123d74aSMel Gorman ret = HPAGE_PMD_NR; 152056eecdb9SAneesh Kumar K.V set_pmd_at(mm, addr, pmd, entry); 1521a4f1de17SHugh Dickins BUG_ON(pmd_write(entry)); 1522a4f1de17SHugh Dickins } else { 15234b10e7d5SMel Gorman struct page *page = pmd_page(*pmd); 15244b10e7d5SMel Gorman 1525a1a46184SMel Gorman /* 15261bc115d8SMel Gorman * Do not trap faults against the zero page. The 15271bc115d8SMel Gorman * read-only data is likely to be read-cached on the 15281bc115d8SMel Gorman * local CPU cache and it is less useful to know about 15291bc115d8SMel Gorman * local vs remote hits on the zero page. 1530a1a46184SMel Gorman */ 15311bc115d8SMel Gorman if (!is_huge_zero_page(page) && 15324b10e7d5SMel Gorman !pmd_numa(*pmd)) { 153356eecdb9SAneesh Kumar K.V pmdp_set_numa(mm, addr, pmd); 1534f123d74aSMel Gorman ret = HPAGE_PMD_NR; 15354b10e7d5SMel Gorman } 15364b10e7d5SMel Gorman } 1537bf929152SKirill A. Shutemov spin_unlock(ptl); 1538cd7548abSJohannes Weiner } 1539cd7548abSJohannes Weiner 1540cd7548abSJohannes Weiner return ret; 1541cd7548abSJohannes Weiner } 1542cd7548abSJohannes Weiner 1543025c5b24SNaoya Horiguchi /* 1544025c5b24SNaoya Horiguchi * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1545025c5b24SNaoya Horiguchi * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1546025c5b24SNaoya Horiguchi * 1547025c5b24SNaoya Horiguchi * Note that if it returns 1, this routine returns without unlocking page 1548025c5b24SNaoya Horiguchi * table locks. So callers must unlock them. 1549025c5b24SNaoya Horiguchi */ 1550bf929152SKirill A. Shutemov int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1551bf929152SKirill A. Shutemov spinlock_t **ptl) 1552025c5b24SNaoya Horiguchi { 1553bf929152SKirill A. Shutemov *ptl = pmd_lock(vma->vm_mm, pmd); 1554025c5b24SNaoya Horiguchi if (likely(pmd_trans_huge(*pmd))) { 1555025c5b24SNaoya Horiguchi if (unlikely(pmd_trans_splitting(*pmd))) { 1556bf929152SKirill A. Shutemov spin_unlock(*ptl); 1557025c5b24SNaoya Horiguchi wait_split_huge_page(vma->anon_vma, pmd); 1558025c5b24SNaoya Horiguchi return -1; 1559025c5b24SNaoya Horiguchi } else { 1560025c5b24SNaoya Horiguchi /* Thp mapped by 'pmd' is stable, so we can 1561025c5b24SNaoya Horiguchi * handle it as it is. */ 1562025c5b24SNaoya Horiguchi return 1; 1563025c5b24SNaoya Horiguchi } 1564025c5b24SNaoya Horiguchi } 1565bf929152SKirill A. Shutemov spin_unlock(*ptl); 1566025c5b24SNaoya Horiguchi return 0; 1567025c5b24SNaoya Horiguchi } 1568025c5b24SNaoya Horiguchi 1569117b0791SKirill A. Shutemov /* 1570117b0791SKirill A. Shutemov * This function returns whether a given @page is mapped onto the @address 1571117b0791SKirill A. Shutemov * in the virtual space of @mm. 1572117b0791SKirill A. Shutemov * 1573117b0791SKirill A. Shutemov * When it's true, this function returns *pmd with holding the page table lock 1574117b0791SKirill A. Shutemov * and passing it back to the caller via @ptl. 1575117b0791SKirill A. Shutemov * If it's false, returns NULL without holding the page table lock. 1576117b0791SKirill A. Shutemov */ 157771e3aac0SAndrea Arcangeli pmd_t *page_check_address_pmd(struct page *page, 157871e3aac0SAndrea Arcangeli struct mm_struct *mm, 157971e3aac0SAndrea Arcangeli unsigned long address, 1580117b0791SKirill A. Shutemov enum page_check_address_pmd_flag flag, 1581117b0791SKirill A. Shutemov spinlock_t **ptl) 158271e3aac0SAndrea Arcangeli { 1583b5a8cad3SKirill A. Shutemov pgd_t *pgd; 1584b5a8cad3SKirill A. Shutemov pud_t *pud; 1585117b0791SKirill A. Shutemov pmd_t *pmd; 158671e3aac0SAndrea Arcangeli 158771e3aac0SAndrea Arcangeli if (address & ~HPAGE_PMD_MASK) 1588117b0791SKirill A. Shutemov return NULL; 158971e3aac0SAndrea Arcangeli 1590b5a8cad3SKirill A. Shutemov pgd = pgd_offset(mm, address); 1591b5a8cad3SKirill A. Shutemov if (!pgd_present(*pgd)) 1592117b0791SKirill A. Shutemov return NULL; 1593b5a8cad3SKirill A. Shutemov pud = pud_offset(pgd, address); 1594b5a8cad3SKirill A. Shutemov if (!pud_present(*pud)) 1595b5a8cad3SKirill A. Shutemov return NULL; 1596b5a8cad3SKirill A. Shutemov pmd = pmd_offset(pud, address); 1597b5a8cad3SKirill A. Shutemov 1598117b0791SKirill A. Shutemov *ptl = pmd_lock(mm, pmd); 1599b5a8cad3SKirill A. Shutemov if (!pmd_present(*pmd)) 1600117b0791SKirill A. Shutemov goto unlock; 160171e3aac0SAndrea Arcangeli if (pmd_page(*pmd) != page) 1602117b0791SKirill A. Shutemov goto unlock; 160394fcc585SAndrea Arcangeli /* 160494fcc585SAndrea Arcangeli * split_vma() may create temporary aliased mappings. There is 160594fcc585SAndrea Arcangeli * no risk as long as all huge pmd are found and have their 160694fcc585SAndrea Arcangeli * splitting bit set before __split_huge_page_refcount 160794fcc585SAndrea Arcangeli * runs. Finding the same huge pmd more than once during the 160894fcc585SAndrea Arcangeli * same rmap walk is not a problem. 160994fcc585SAndrea Arcangeli */ 161094fcc585SAndrea Arcangeli if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 161194fcc585SAndrea Arcangeli pmd_trans_splitting(*pmd)) 1612117b0791SKirill A. Shutemov goto unlock; 161371e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) { 161471e3aac0SAndrea Arcangeli VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 161571e3aac0SAndrea Arcangeli !pmd_trans_splitting(*pmd)); 1616117b0791SKirill A. Shutemov return pmd; 161771e3aac0SAndrea Arcangeli } 1618117b0791SKirill A. Shutemov unlock: 1619117b0791SKirill A. Shutemov spin_unlock(*ptl); 1620117b0791SKirill A. Shutemov return NULL; 162171e3aac0SAndrea Arcangeli } 162271e3aac0SAndrea Arcangeli 162371e3aac0SAndrea Arcangeli static int __split_huge_page_splitting(struct page *page, 162471e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 162571e3aac0SAndrea Arcangeli unsigned long address) 162671e3aac0SAndrea Arcangeli { 162771e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 1628117b0791SKirill A. Shutemov spinlock_t *ptl; 162971e3aac0SAndrea Arcangeli pmd_t *pmd; 163071e3aac0SAndrea Arcangeli int ret = 0; 16312ec74c3eSSagi Grimberg /* For mmu_notifiers */ 16322ec74c3eSSagi Grimberg const unsigned long mmun_start = address; 16332ec74c3eSSagi Grimberg const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 163471e3aac0SAndrea Arcangeli 16352ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 163671e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 1637117b0791SKirill A. Shutemov PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl); 163871e3aac0SAndrea Arcangeli if (pmd) { 163971e3aac0SAndrea Arcangeli /* 164071e3aac0SAndrea Arcangeli * We can't temporarily set the pmd to null in order 164171e3aac0SAndrea Arcangeli * to split it, the pmd must remain marked huge at all 164271e3aac0SAndrea Arcangeli * times or the VM won't take the pmd_trans_huge paths 16435a505085SIngo Molnar * and it won't wait on the anon_vma->root->rwsem to 164471e3aac0SAndrea Arcangeli * serialize against split_huge_page*. 164571e3aac0SAndrea Arcangeli */ 16462ec74c3eSSagi Grimberg pmdp_splitting_flush(vma, address, pmd); 164771e3aac0SAndrea Arcangeli ret = 1; 1648117b0791SKirill A. Shutemov spin_unlock(ptl); 164971e3aac0SAndrea Arcangeli } 16502ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 165171e3aac0SAndrea Arcangeli 165271e3aac0SAndrea Arcangeli return ret; 165371e3aac0SAndrea Arcangeli } 165471e3aac0SAndrea Arcangeli 16555bc7b8acSShaohua Li static void __split_huge_page_refcount(struct page *page, 16565bc7b8acSShaohua Li struct list_head *list) 165771e3aac0SAndrea Arcangeli { 165871e3aac0SAndrea Arcangeli int i; 165971e3aac0SAndrea Arcangeli struct zone *zone = page_zone(page); 1660fa9add64SHugh Dickins struct lruvec *lruvec; 166170b50f94SAndrea Arcangeli int tail_count = 0; 166271e3aac0SAndrea Arcangeli 166371e3aac0SAndrea Arcangeli /* prevent PageLRU to go away from under us, and freeze lru stats */ 166471e3aac0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 1665fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 1666fa9add64SHugh Dickins 166771e3aac0SAndrea Arcangeli compound_lock(page); 1668e94c8a9cSKAMEZAWA Hiroyuki /* complete memcg works before add pages to LRU */ 1669e94c8a9cSKAMEZAWA Hiroyuki mem_cgroup_split_huge_fixup(page); 167071e3aac0SAndrea Arcangeli 167145676885SShaohua Li for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 167271e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 167371e3aac0SAndrea Arcangeli 167470b50f94SAndrea Arcangeli /* tail_page->_mapcount cannot change */ 167570b50f94SAndrea Arcangeli BUG_ON(page_mapcount(page_tail) < 0); 167670b50f94SAndrea Arcangeli tail_count += page_mapcount(page_tail); 167770b50f94SAndrea Arcangeli /* check for overflow */ 167870b50f94SAndrea Arcangeli BUG_ON(tail_count < 0); 167970b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page_tail->_count) != 0); 168070b50f94SAndrea Arcangeli /* 168170b50f94SAndrea Arcangeli * tail_page->_count is zero and not changing from 168270b50f94SAndrea Arcangeli * under us. But get_page_unless_zero() may be running 168370b50f94SAndrea Arcangeli * from under us on the tail_page. If we used 168470b50f94SAndrea Arcangeli * atomic_set() below instead of atomic_add(), we 168570b50f94SAndrea Arcangeli * would then run atomic_set() concurrently with 168670b50f94SAndrea Arcangeli * get_page_unless_zero(), and atomic_set() is 168770b50f94SAndrea Arcangeli * implemented in C not using locked ops. spin_unlock 168870b50f94SAndrea Arcangeli * on x86 sometime uses locked ops because of PPro 168970b50f94SAndrea Arcangeli * errata 66, 92, so unless somebody can guarantee 169070b50f94SAndrea Arcangeli * atomic_set() here would be safe on all archs (and 169170b50f94SAndrea Arcangeli * not only on x86), it's safer to use atomic_add(). 169270b50f94SAndrea Arcangeli */ 169370b50f94SAndrea Arcangeli atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 169470b50f94SAndrea Arcangeli &page_tail->_count); 169571e3aac0SAndrea Arcangeli 169671e3aac0SAndrea Arcangeli /* after clearing PageTail the gup refcount can be released */ 16973a79d52aSWaiman Long smp_mb__after_atomic(); 169871e3aac0SAndrea Arcangeli 1699a6d30dddSJin Dongming /* 1700a6d30dddSJin Dongming * retain hwpoison flag of the poisoned tail page: 1701a6d30dddSJin Dongming * fix for the unsuitable process killed on Guest Machine(KVM) 1702a6d30dddSJin Dongming * by the memory-failure. 1703a6d30dddSJin Dongming */ 1704a6d30dddSJin Dongming page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 170571e3aac0SAndrea Arcangeli page_tail->flags |= (page->flags & 170671e3aac0SAndrea Arcangeli ((1L << PG_referenced) | 170771e3aac0SAndrea Arcangeli (1L << PG_swapbacked) | 170871e3aac0SAndrea Arcangeli (1L << PG_mlocked) | 1709e180cf80SKirill A. Shutemov (1L << PG_uptodate) | 1710e180cf80SKirill A. Shutemov (1L << PG_active) | 1711e180cf80SKirill A. Shutemov (1L << PG_unevictable))); 171271e3aac0SAndrea Arcangeli page_tail->flags |= (1L << PG_dirty); 171371e3aac0SAndrea Arcangeli 171470b50f94SAndrea Arcangeli /* clear PageTail before overwriting first_page */ 171571e3aac0SAndrea Arcangeli smp_wmb(); 171671e3aac0SAndrea Arcangeli 171771e3aac0SAndrea Arcangeli /* 171871e3aac0SAndrea Arcangeli * __split_huge_page_splitting() already set the 171971e3aac0SAndrea Arcangeli * splitting bit in all pmd that could map this 172071e3aac0SAndrea Arcangeli * hugepage, that will ensure no CPU can alter the 172171e3aac0SAndrea Arcangeli * mapcount on the head page. The mapcount is only 172271e3aac0SAndrea Arcangeli * accounted in the head page and it has to be 172371e3aac0SAndrea Arcangeli * transferred to all tail pages in the below code. So 172471e3aac0SAndrea Arcangeli * for this code to be safe, the split the mapcount 172571e3aac0SAndrea Arcangeli * can't change. But that doesn't mean userland can't 172671e3aac0SAndrea Arcangeli * keep changing and reading the page contents while 172771e3aac0SAndrea Arcangeli * we transfer the mapcount, so the pmd splitting 172871e3aac0SAndrea Arcangeli * status is achieved setting a reserved bit in the 172971e3aac0SAndrea Arcangeli * pmd, not by clearing the present bit. 173071e3aac0SAndrea Arcangeli */ 173171e3aac0SAndrea Arcangeli page_tail->_mapcount = page->_mapcount; 173271e3aac0SAndrea Arcangeli 173371e3aac0SAndrea Arcangeli BUG_ON(page_tail->mapping); 173471e3aac0SAndrea Arcangeli page_tail->mapping = page->mapping; 173571e3aac0SAndrea Arcangeli 173645676885SShaohua Li page_tail->index = page->index + i; 173790572890SPeter Zijlstra page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); 173871e3aac0SAndrea Arcangeli 173971e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page_tail)); 174071e3aac0SAndrea Arcangeli BUG_ON(!PageUptodate(page_tail)); 174171e3aac0SAndrea Arcangeli BUG_ON(!PageDirty(page_tail)); 174271e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page_tail)); 174371e3aac0SAndrea Arcangeli 17445bc7b8acSShaohua Li lru_add_page_tail(page, page_tail, lruvec, list); 174571e3aac0SAndrea Arcangeli } 174670b50f94SAndrea Arcangeli atomic_sub(tail_count, &page->_count); 174770b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page->_count) <= 0); 174871e3aac0SAndrea Arcangeli 1749fa9add64SHugh Dickins __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 175079134171SAndrea Arcangeli 175171e3aac0SAndrea Arcangeli ClearPageCompound(page); 175271e3aac0SAndrea Arcangeli compound_unlock(page); 175371e3aac0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 175471e3aac0SAndrea Arcangeli 175571e3aac0SAndrea Arcangeli for (i = 1; i < HPAGE_PMD_NR; i++) { 175671e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 175771e3aac0SAndrea Arcangeli BUG_ON(page_count(page_tail) <= 0); 175871e3aac0SAndrea Arcangeli /* 175971e3aac0SAndrea Arcangeli * Tail pages may be freed if there wasn't any mapping 176071e3aac0SAndrea Arcangeli * like if add_to_swap() is running on a lru page that 176171e3aac0SAndrea Arcangeli * had its mapping zapped. And freeing these pages 176271e3aac0SAndrea Arcangeli * requires taking the lru_lock so we do the put_page 176371e3aac0SAndrea Arcangeli * of the tail pages after the split is complete. 176471e3aac0SAndrea Arcangeli */ 176571e3aac0SAndrea Arcangeli put_page(page_tail); 176671e3aac0SAndrea Arcangeli } 176771e3aac0SAndrea Arcangeli 176871e3aac0SAndrea Arcangeli /* 176971e3aac0SAndrea Arcangeli * Only the head page (now become a regular page) is required 177071e3aac0SAndrea Arcangeli * to be pinned by the caller. 177171e3aac0SAndrea Arcangeli */ 177271e3aac0SAndrea Arcangeli BUG_ON(page_count(page) <= 0); 177371e3aac0SAndrea Arcangeli } 177471e3aac0SAndrea Arcangeli 177571e3aac0SAndrea Arcangeli static int __split_huge_page_map(struct page *page, 177671e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 177771e3aac0SAndrea Arcangeli unsigned long address) 177871e3aac0SAndrea Arcangeli { 177971e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 1780117b0791SKirill A. Shutemov spinlock_t *ptl; 178171e3aac0SAndrea Arcangeli pmd_t *pmd, _pmd; 178271e3aac0SAndrea Arcangeli int ret = 0, i; 178371e3aac0SAndrea Arcangeli pgtable_t pgtable; 178471e3aac0SAndrea Arcangeli unsigned long haddr; 178571e3aac0SAndrea Arcangeli 178671e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 1787117b0791SKirill A. Shutemov PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl); 178871e3aac0SAndrea Arcangeli if (pmd) { 17896b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 179071e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 1791f8303c25SWaiman Long if (pmd_write(*pmd)) 1792f8303c25SWaiman Long BUG_ON(page_mapcount(page) != 1); 179371e3aac0SAndrea Arcangeli 1794e3ebcf64SGerald Schaefer haddr = address; 1795e3ebcf64SGerald Schaefer for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 179671e3aac0SAndrea Arcangeli pte_t *pte, entry; 179771e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page+i)); 1798abc40bd2SMel Gorman /* 1799abc40bd2SMel Gorman * Note that pmd_numa is not transferred deliberately 1800abc40bd2SMel Gorman * to avoid any possibility that pte_numa leaks to 1801abc40bd2SMel Gorman * a PROT_NONE VMA by accident. 1802abc40bd2SMel Gorman */ 180371e3aac0SAndrea Arcangeli entry = mk_pte(page + i, vma->vm_page_prot); 180471e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 180571e3aac0SAndrea Arcangeli if (!pmd_write(*pmd)) 180671e3aac0SAndrea Arcangeli entry = pte_wrprotect(entry); 180771e3aac0SAndrea Arcangeli if (!pmd_young(*pmd)) 180871e3aac0SAndrea Arcangeli entry = pte_mkold(entry); 180971e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 181071e3aac0SAndrea Arcangeli BUG_ON(!pte_none(*pte)); 181171e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 181271e3aac0SAndrea Arcangeli pte_unmap(pte); 181371e3aac0SAndrea Arcangeli } 181471e3aac0SAndrea Arcangeli 181571e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 181671e3aac0SAndrea Arcangeli /* 181771e3aac0SAndrea Arcangeli * Up to this point the pmd is present and huge and 181871e3aac0SAndrea Arcangeli * userland has the whole access to the hugepage 181971e3aac0SAndrea Arcangeli * during the split (which happens in place). If we 182071e3aac0SAndrea Arcangeli * overwrite the pmd with the not-huge version 182171e3aac0SAndrea Arcangeli * pointing to the pte here (which of course we could 182271e3aac0SAndrea Arcangeli * if all CPUs were bug free), userland could trigger 182371e3aac0SAndrea Arcangeli * a small page size TLB miss on the small sized TLB 182471e3aac0SAndrea Arcangeli * while the hugepage TLB entry is still established 182571e3aac0SAndrea Arcangeli * in the huge TLB. Some CPU doesn't like that. See 182671e3aac0SAndrea Arcangeli * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 182771e3aac0SAndrea Arcangeli * Erratum 383 on page 93. Intel should be safe but is 182871e3aac0SAndrea Arcangeli * also warns that it's only safe if the permission 182971e3aac0SAndrea Arcangeli * and cache attributes of the two entries loaded in 183071e3aac0SAndrea Arcangeli * the two TLB is identical (which should be the case 183171e3aac0SAndrea Arcangeli * here). But it is generally safer to never allow 183271e3aac0SAndrea Arcangeli * small and huge TLB entries for the same virtual 183371e3aac0SAndrea Arcangeli * address to be loaded simultaneously. So instead of 183471e3aac0SAndrea Arcangeli * doing "pmd_populate(); flush_tlb_range();" we first 183571e3aac0SAndrea Arcangeli * mark the current pmd notpresent (atomically because 183671e3aac0SAndrea Arcangeli * here the pmd_trans_huge and pmd_trans_splitting 183771e3aac0SAndrea Arcangeli * must remain set at all times on the pmd until the 183871e3aac0SAndrea Arcangeli * split is complete for this pmd), then we flush the 183971e3aac0SAndrea Arcangeli * SMP TLB and finally we write the non-huge version 184071e3aac0SAndrea Arcangeli * of the pmd entry with pmd_populate. 184171e3aac0SAndrea Arcangeli */ 184246dcde73SGerald Schaefer pmdp_invalidate(vma, address, pmd); 184371e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 184471e3aac0SAndrea Arcangeli ret = 1; 1845117b0791SKirill A. Shutemov spin_unlock(ptl); 184671e3aac0SAndrea Arcangeli } 184771e3aac0SAndrea Arcangeli 184871e3aac0SAndrea Arcangeli return ret; 184971e3aac0SAndrea Arcangeli } 185071e3aac0SAndrea Arcangeli 18515a505085SIngo Molnar /* must be called with anon_vma->root->rwsem held */ 185271e3aac0SAndrea Arcangeli static void __split_huge_page(struct page *page, 18535bc7b8acSShaohua Li struct anon_vma *anon_vma, 18545bc7b8acSShaohua Li struct list_head *list) 185571e3aac0SAndrea Arcangeli { 185671e3aac0SAndrea Arcangeli int mapcount, mapcount2; 1857bf181b9fSMichel Lespinasse pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 185871e3aac0SAndrea Arcangeli struct anon_vma_chain *avc; 185971e3aac0SAndrea Arcangeli 186071e3aac0SAndrea Arcangeli BUG_ON(!PageHead(page)); 186171e3aac0SAndrea Arcangeli BUG_ON(PageTail(page)); 186271e3aac0SAndrea Arcangeli 186371e3aac0SAndrea Arcangeli mapcount = 0; 1864bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 186571e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 186671e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 186771e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 186871e3aac0SAndrea Arcangeli mapcount += __split_huge_page_splitting(page, vma, addr); 186971e3aac0SAndrea Arcangeli } 187005759d38SAndrea Arcangeli /* 187105759d38SAndrea Arcangeli * It is critical that new vmas are added to the tail of the 187205759d38SAndrea Arcangeli * anon_vma list. This guarantes that if copy_huge_pmd() runs 187305759d38SAndrea Arcangeli * and establishes a child pmd before 187405759d38SAndrea Arcangeli * __split_huge_page_splitting() freezes the parent pmd (so if 187505759d38SAndrea Arcangeli * we fail to prevent copy_huge_pmd() from running until the 187605759d38SAndrea Arcangeli * whole __split_huge_page() is complete), we will still see 187705759d38SAndrea Arcangeli * the newly established pmd of the child later during the 187805759d38SAndrea Arcangeli * walk, to be able to set it as pmd_trans_splitting too. 187905759d38SAndrea Arcangeli */ 1880ff9e43ebSKirill A. Shutemov if (mapcount != page_mapcount(page)) { 1881ae3a8c1cSAndrew Morton pr_err("mapcount %d page_mapcount %d\n", 188205759d38SAndrea Arcangeli mapcount, page_mapcount(page)); 1883ff9e43ebSKirill A. Shutemov BUG(); 1884ff9e43ebSKirill A. Shutemov } 188571e3aac0SAndrea Arcangeli 18865bc7b8acSShaohua Li __split_huge_page_refcount(page, list); 188771e3aac0SAndrea Arcangeli 188871e3aac0SAndrea Arcangeli mapcount2 = 0; 1889bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 189071e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 189171e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 189271e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 189371e3aac0SAndrea Arcangeli mapcount2 += __split_huge_page_map(page, vma, addr); 189471e3aac0SAndrea Arcangeli } 1895ff9e43ebSKirill A. Shutemov if (mapcount != mapcount2) { 1896ae3a8c1cSAndrew Morton pr_err("mapcount %d mapcount2 %d page_mapcount %d\n", 189705759d38SAndrea Arcangeli mapcount, mapcount2, page_mapcount(page)); 1898ff9e43ebSKirill A. Shutemov BUG(); 1899ff9e43ebSKirill A. Shutemov } 190071e3aac0SAndrea Arcangeli } 190171e3aac0SAndrea Arcangeli 19025bc7b8acSShaohua Li /* 19035bc7b8acSShaohua Li * Split a hugepage into normal pages. This doesn't change the position of head 19045bc7b8acSShaohua Li * page. If @list is null, tail pages will be added to LRU list, otherwise, to 19055bc7b8acSShaohua Li * @list. Both head page and tail pages will inherit mapping, flags, and so on 19065bc7b8acSShaohua Li * from the hugepage. 19075bc7b8acSShaohua Li * Return 0 if the hugepage is split successfully otherwise return 1. 19085bc7b8acSShaohua Li */ 19095bc7b8acSShaohua Li int split_huge_page_to_list(struct page *page, struct list_head *list) 191071e3aac0SAndrea Arcangeli { 191171e3aac0SAndrea Arcangeli struct anon_vma *anon_vma; 191271e3aac0SAndrea Arcangeli int ret = 1; 191371e3aac0SAndrea Arcangeli 19145918d10aSKirill A. Shutemov BUG_ON(is_huge_zero_page(page)); 191571e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page)); 1916062f1af2SMel Gorman 1917062f1af2SMel Gorman /* 1918062f1af2SMel Gorman * The caller does not necessarily hold an mmap_sem that would prevent 1919062f1af2SMel Gorman * the anon_vma disappearing so we first we take a reference to it 1920062f1af2SMel Gorman * and then lock the anon_vma for write. This is similar to 1921062f1af2SMel Gorman * page_lock_anon_vma_read except the write lock is taken to serialise 1922062f1af2SMel Gorman * against parallel split or collapse operations. 1923062f1af2SMel Gorman */ 1924062f1af2SMel Gorman anon_vma = page_get_anon_vma(page); 192571e3aac0SAndrea Arcangeli if (!anon_vma) 192671e3aac0SAndrea Arcangeli goto out; 1927062f1af2SMel Gorman anon_vma_lock_write(anon_vma); 1928062f1af2SMel Gorman 192971e3aac0SAndrea Arcangeli ret = 0; 193071e3aac0SAndrea Arcangeli if (!PageCompound(page)) 193171e3aac0SAndrea Arcangeli goto out_unlock; 193271e3aac0SAndrea Arcangeli 193371e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page)); 19345bc7b8acSShaohua Li __split_huge_page(page, anon_vma, list); 193581ab4201SAndi Kleen count_vm_event(THP_SPLIT); 193671e3aac0SAndrea Arcangeli 193771e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page)); 193871e3aac0SAndrea Arcangeli out_unlock: 193908b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 1940062f1af2SMel Gorman put_anon_vma(anon_vma); 194171e3aac0SAndrea Arcangeli out: 194271e3aac0SAndrea Arcangeli return ret; 194371e3aac0SAndrea Arcangeli } 194471e3aac0SAndrea Arcangeli 19459050d7ebSVlastimil Babka #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 194678f11a25SAndrea Arcangeli 194760ab3244SAndrea Arcangeli int hugepage_madvise(struct vm_area_struct *vma, 194860ab3244SAndrea Arcangeli unsigned long *vm_flags, int advice) 19490af4e98bSAndrea Arcangeli { 1950a664b2d8SAndrea Arcangeli switch (advice) { 1951a664b2d8SAndrea Arcangeli case MADV_HUGEPAGE: 19521e1836e8SAlex Thorlton #ifdef CONFIG_S390 19531e1836e8SAlex Thorlton /* 19541e1836e8SAlex Thorlton * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 19551e1836e8SAlex Thorlton * can't handle this properly after s390_enable_sie, so we simply 19561e1836e8SAlex Thorlton * ignore the madvise to prevent qemu from causing a SIGSEGV. 19571e1836e8SAlex Thorlton */ 19581e1836e8SAlex Thorlton if (mm_has_pgste(vma->vm_mm)) 19591e1836e8SAlex Thorlton return 0; 19601e1836e8SAlex Thorlton #endif 19610af4e98bSAndrea Arcangeli /* 19620af4e98bSAndrea Arcangeli * Be somewhat over-protective like KSM for now! 19630af4e98bSAndrea Arcangeli */ 196478f11a25SAndrea Arcangeli if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 19650af4e98bSAndrea Arcangeli return -EINVAL; 1966a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_NOHUGEPAGE; 19670af4e98bSAndrea Arcangeli *vm_flags |= VM_HUGEPAGE; 196860ab3244SAndrea Arcangeli /* 196960ab3244SAndrea Arcangeli * If the vma become good for khugepaged to scan, 197060ab3244SAndrea Arcangeli * register it here without waiting a page fault that 197160ab3244SAndrea Arcangeli * may not happen any time soon. 197260ab3244SAndrea Arcangeli */ 197360ab3244SAndrea Arcangeli if (unlikely(khugepaged_enter_vma_merge(vma))) 197460ab3244SAndrea Arcangeli return -ENOMEM; 1975a664b2d8SAndrea Arcangeli break; 1976a664b2d8SAndrea Arcangeli case MADV_NOHUGEPAGE: 1977a664b2d8SAndrea Arcangeli /* 1978a664b2d8SAndrea Arcangeli * Be somewhat over-protective like KSM for now! 1979a664b2d8SAndrea Arcangeli */ 198078f11a25SAndrea Arcangeli if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1981a664b2d8SAndrea Arcangeli return -EINVAL; 1982a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_HUGEPAGE; 1983a664b2d8SAndrea Arcangeli *vm_flags |= VM_NOHUGEPAGE; 198460ab3244SAndrea Arcangeli /* 198560ab3244SAndrea Arcangeli * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 198660ab3244SAndrea Arcangeli * this vma even if we leave the mm registered in khugepaged if 198760ab3244SAndrea Arcangeli * it got registered before VM_NOHUGEPAGE was set. 198860ab3244SAndrea Arcangeli */ 1989a664b2d8SAndrea Arcangeli break; 1990a664b2d8SAndrea Arcangeli } 19910af4e98bSAndrea Arcangeli 19920af4e98bSAndrea Arcangeli return 0; 19930af4e98bSAndrea Arcangeli } 19940af4e98bSAndrea Arcangeli 1995ba76149fSAndrea Arcangeli static int __init khugepaged_slab_init(void) 1996ba76149fSAndrea Arcangeli { 1997ba76149fSAndrea Arcangeli mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1998ba76149fSAndrea Arcangeli sizeof(struct mm_slot), 1999ba76149fSAndrea Arcangeli __alignof__(struct mm_slot), 0, NULL); 2000ba76149fSAndrea Arcangeli if (!mm_slot_cache) 2001ba76149fSAndrea Arcangeli return -ENOMEM; 2002ba76149fSAndrea Arcangeli 2003ba76149fSAndrea Arcangeli return 0; 2004ba76149fSAndrea Arcangeli } 2005ba76149fSAndrea Arcangeli 2006ba76149fSAndrea Arcangeli static inline struct mm_slot *alloc_mm_slot(void) 2007ba76149fSAndrea Arcangeli { 2008ba76149fSAndrea Arcangeli if (!mm_slot_cache) /* initialization failed */ 2009ba76149fSAndrea Arcangeli return NULL; 2010ba76149fSAndrea Arcangeli return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 2011ba76149fSAndrea Arcangeli } 2012ba76149fSAndrea Arcangeli 2013ba76149fSAndrea Arcangeli static inline void free_mm_slot(struct mm_slot *mm_slot) 2014ba76149fSAndrea Arcangeli { 2015ba76149fSAndrea Arcangeli kmem_cache_free(mm_slot_cache, mm_slot); 2016ba76149fSAndrea Arcangeli } 2017ba76149fSAndrea Arcangeli 2018ba76149fSAndrea Arcangeli static struct mm_slot *get_mm_slot(struct mm_struct *mm) 2019ba76149fSAndrea Arcangeli { 2020ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2021ba76149fSAndrea Arcangeli 2022b67bfe0dSSasha Levin hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 2023ba76149fSAndrea Arcangeli if (mm == mm_slot->mm) 2024ba76149fSAndrea Arcangeli return mm_slot; 202543b5fbbdSSasha Levin 2026ba76149fSAndrea Arcangeli return NULL; 2027ba76149fSAndrea Arcangeli } 2028ba76149fSAndrea Arcangeli 2029ba76149fSAndrea Arcangeli static void insert_to_mm_slots_hash(struct mm_struct *mm, 2030ba76149fSAndrea Arcangeli struct mm_slot *mm_slot) 2031ba76149fSAndrea Arcangeli { 2032ba76149fSAndrea Arcangeli mm_slot->mm = mm; 203343b5fbbdSSasha Levin hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 2034ba76149fSAndrea Arcangeli } 2035ba76149fSAndrea Arcangeli 2036ba76149fSAndrea Arcangeli static inline int khugepaged_test_exit(struct mm_struct *mm) 2037ba76149fSAndrea Arcangeli { 2038ba76149fSAndrea Arcangeli return atomic_read(&mm->mm_users) == 0; 2039ba76149fSAndrea Arcangeli } 2040ba76149fSAndrea Arcangeli 2041ba76149fSAndrea Arcangeli int __khugepaged_enter(struct mm_struct *mm) 2042ba76149fSAndrea Arcangeli { 2043ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2044ba76149fSAndrea Arcangeli int wakeup; 2045ba76149fSAndrea Arcangeli 2046ba76149fSAndrea Arcangeli mm_slot = alloc_mm_slot(); 2047ba76149fSAndrea Arcangeli if (!mm_slot) 2048ba76149fSAndrea Arcangeli return -ENOMEM; 2049ba76149fSAndrea Arcangeli 2050ba76149fSAndrea Arcangeli /* __khugepaged_exit() must not run from under us */ 2051ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_test_exit(mm)); 2052ba76149fSAndrea Arcangeli if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 2053ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2054ba76149fSAndrea Arcangeli return 0; 2055ba76149fSAndrea Arcangeli } 2056ba76149fSAndrea Arcangeli 2057ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2058ba76149fSAndrea Arcangeli insert_to_mm_slots_hash(mm, mm_slot); 2059ba76149fSAndrea Arcangeli /* 2060ba76149fSAndrea Arcangeli * Insert just behind the scanning cursor, to let the area settle 2061ba76149fSAndrea Arcangeli * down a little. 2062ba76149fSAndrea Arcangeli */ 2063ba76149fSAndrea Arcangeli wakeup = list_empty(&khugepaged_scan.mm_head); 2064ba76149fSAndrea Arcangeli list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 2065ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2066ba76149fSAndrea Arcangeli 2067ba76149fSAndrea Arcangeli atomic_inc(&mm->mm_count); 2068ba76149fSAndrea Arcangeli if (wakeup) 2069ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 2070ba76149fSAndrea Arcangeli 2071ba76149fSAndrea Arcangeli return 0; 2072ba76149fSAndrea Arcangeli } 2073ba76149fSAndrea Arcangeli 2074ba76149fSAndrea Arcangeli int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2075ba76149fSAndrea Arcangeli { 2076ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2077ba76149fSAndrea Arcangeli if (!vma->anon_vma) 2078ba76149fSAndrea Arcangeli /* 2079ba76149fSAndrea Arcangeli * Not yet faulted in so we will register later in the 2080ba76149fSAndrea Arcangeli * page fault if needed. 2081ba76149fSAndrea Arcangeli */ 2082ba76149fSAndrea Arcangeli return 0; 208378f11a25SAndrea Arcangeli if (vma->vm_ops) 2084ba76149fSAndrea Arcangeli /* khugepaged not yet working on file or special mappings */ 2085ba76149fSAndrea Arcangeli return 0; 2086*81d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2087ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2088ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2089ba76149fSAndrea Arcangeli if (hstart < hend) 2090ba76149fSAndrea Arcangeli return khugepaged_enter(vma); 2091ba76149fSAndrea Arcangeli return 0; 2092ba76149fSAndrea Arcangeli } 2093ba76149fSAndrea Arcangeli 2094ba76149fSAndrea Arcangeli void __khugepaged_exit(struct mm_struct *mm) 2095ba76149fSAndrea Arcangeli { 2096ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2097ba76149fSAndrea Arcangeli int free = 0; 2098ba76149fSAndrea Arcangeli 2099ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2100ba76149fSAndrea Arcangeli mm_slot = get_mm_slot(mm); 2101ba76149fSAndrea Arcangeli if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 210243b5fbbdSSasha Levin hash_del(&mm_slot->hash); 2103ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2104ba76149fSAndrea Arcangeli free = 1; 2105ba76149fSAndrea Arcangeli } 2106d788e80aSChris Wright spin_unlock(&khugepaged_mm_lock); 2107ba76149fSAndrea Arcangeli 2108ba76149fSAndrea Arcangeli if (free) { 2109ba76149fSAndrea Arcangeli clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2110ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2111ba76149fSAndrea Arcangeli mmdrop(mm); 2112ba76149fSAndrea Arcangeli } else if (mm_slot) { 2113ba76149fSAndrea Arcangeli /* 2114ba76149fSAndrea Arcangeli * This is required to serialize against 2115ba76149fSAndrea Arcangeli * khugepaged_test_exit() (which is guaranteed to run 2116ba76149fSAndrea Arcangeli * under mmap sem read mode). Stop here (after we 2117ba76149fSAndrea Arcangeli * return all pagetables will be destroyed) until 2118ba76149fSAndrea Arcangeli * khugepaged has finished working on the pagetables 2119ba76149fSAndrea Arcangeli * under the mmap_sem. 2120ba76149fSAndrea Arcangeli */ 2121ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2122ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 2123d788e80aSChris Wright } 2124ba76149fSAndrea Arcangeli } 2125ba76149fSAndrea Arcangeli 2126ba76149fSAndrea Arcangeli static void release_pte_page(struct page *page) 2127ba76149fSAndrea Arcangeli { 2128ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2129ba76149fSAndrea Arcangeli dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2130ba76149fSAndrea Arcangeli unlock_page(page); 2131ba76149fSAndrea Arcangeli putback_lru_page(page); 2132ba76149fSAndrea Arcangeli } 2133ba76149fSAndrea Arcangeli 2134ba76149fSAndrea Arcangeli static void release_pte_pages(pte_t *pte, pte_t *_pte) 2135ba76149fSAndrea Arcangeli { 2136ba76149fSAndrea Arcangeli while (--_pte >= pte) { 2137ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2138ba76149fSAndrea Arcangeli if (!pte_none(pteval)) 2139ba76149fSAndrea Arcangeli release_pte_page(pte_page(pteval)); 2140ba76149fSAndrea Arcangeli } 2141ba76149fSAndrea Arcangeli } 2142ba76149fSAndrea Arcangeli 2143ba76149fSAndrea Arcangeli static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2144ba76149fSAndrea Arcangeli unsigned long address, 2145ba76149fSAndrea Arcangeli pte_t *pte) 2146ba76149fSAndrea Arcangeli { 2147ba76149fSAndrea Arcangeli struct page *page; 2148ba76149fSAndrea Arcangeli pte_t *_pte; 2149344aa35cSBob Liu int referenced = 0, none = 0; 2150ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2151ba76149fSAndrea Arcangeli _pte++, address += PAGE_SIZE) { 2152ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2153ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2154ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 2155ba76149fSAndrea Arcangeli continue; 2156344aa35cSBob Liu else 2157ba76149fSAndrea Arcangeli goto out; 2158ba76149fSAndrea Arcangeli } 2159344aa35cSBob Liu if (!pte_present(pteval) || !pte_write(pteval)) 2160ba76149fSAndrea Arcangeli goto out; 2161ba76149fSAndrea Arcangeli page = vm_normal_page(vma, address, pteval); 2162344aa35cSBob Liu if (unlikely(!page)) 2163ba76149fSAndrea Arcangeli goto out; 2164344aa35cSBob Liu 2165309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page), page); 2166309381feSSasha Levin VM_BUG_ON_PAGE(!PageAnon(page), page); 2167309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2168ba76149fSAndrea Arcangeli 2169ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 2170344aa35cSBob Liu if (page_count(page) != 1) 2171ba76149fSAndrea Arcangeli goto out; 2172ba76149fSAndrea Arcangeli /* 2173ba76149fSAndrea Arcangeli * We can do it before isolate_lru_page because the 2174ba76149fSAndrea Arcangeli * page can't be freed from under us. NOTE: PG_lock 2175ba76149fSAndrea Arcangeli * is needed to serialize against split_huge_page 2176ba76149fSAndrea Arcangeli * when invoked from the VM. 2177ba76149fSAndrea Arcangeli */ 2178344aa35cSBob Liu if (!trylock_page(page)) 2179ba76149fSAndrea Arcangeli goto out; 2180ba76149fSAndrea Arcangeli /* 2181ba76149fSAndrea Arcangeli * Isolate the page to avoid collapsing an hugepage 2182ba76149fSAndrea Arcangeli * currently in use by the VM. 2183ba76149fSAndrea Arcangeli */ 2184ba76149fSAndrea Arcangeli if (isolate_lru_page(page)) { 2185ba76149fSAndrea Arcangeli unlock_page(page); 2186ba76149fSAndrea Arcangeli goto out; 2187ba76149fSAndrea Arcangeli } 2188ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2189ba76149fSAndrea Arcangeli inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2190309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 2191309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 2192ba76149fSAndrea Arcangeli 2193ba76149fSAndrea Arcangeli /* If there is no mapped pte young don't collapse the page */ 21948ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 21958ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 2196ba76149fSAndrea Arcangeli referenced = 1; 2197ba76149fSAndrea Arcangeli } 2198344aa35cSBob Liu if (likely(referenced)) 2199344aa35cSBob Liu return 1; 2200ba76149fSAndrea Arcangeli out: 2201344aa35cSBob Liu release_pte_pages(pte, _pte); 2202344aa35cSBob Liu return 0; 2203ba76149fSAndrea Arcangeli } 2204ba76149fSAndrea Arcangeli 2205ba76149fSAndrea Arcangeli static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2206ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2207ba76149fSAndrea Arcangeli unsigned long address, 2208ba76149fSAndrea Arcangeli spinlock_t *ptl) 2209ba76149fSAndrea Arcangeli { 2210ba76149fSAndrea Arcangeli pte_t *_pte; 2211ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2212ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2213ba76149fSAndrea Arcangeli struct page *src_page; 2214ba76149fSAndrea Arcangeli 2215ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2216ba76149fSAndrea Arcangeli clear_user_highpage(page, address); 2217ba76149fSAndrea Arcangeli add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2218ba76149fSAndrea Arcangeli } else { 2219ba76149fSAndrea Arcangeli src_page = pte_page(pteval); 2220ba76149fSAndrea Arcangeli copy_user_highpage(page, src_page, address, vma); 2221309381feSSasha Levin VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2222ba76149fSAndrea Arcangeli release_pte_page(src_page); 2223ba76149fSAndrea Arcangeli /* 2224ba76149fSAndrea Arcangeli * ptl mostly unnecessary, but preempt has to 2225ba76149fSAndrea Arcangeli * be disabled to update the per-cpu stats 2226ba76149fSAndrea Arcangeli * inside page_remove_rmap(). 2227ba76149fSAndrea Arcangeli */ 2228ba76149fSAndrea Arcangeli spin_lock(ptl); 2229ba76149fSAndrea Arcangeli /* 2230ba76149fSAndrea Arcangeli * paravirt calls inside pte_clear here are 2231ba76149fSAndrea Arcangeli * superfluous. 2232ba76149fSAndrea Arcangeli */ 2233ba76149fSAndrea Arcangeli pte_clear(vma->vm_mm, address, _pte); 2234ba76149fSAndrea Arcangeli page_remove_rmap(src_page); 2235ba76149fSAndrea Arcangeli spin_unlock(ptl); 2236ba76149fSAndrea Arcangeli free_page_and_swap_cache(src_page); 2237ba76149fSAndrea Arcangeli } 2238ba76149fSAndrea Arcangeli 2239ba76149fSAndrea Arcangeli address += PAGE_SIZE; 2240ba76149fSAndrea Arcangeli page++; 2241ba76149fSAndrea Arcangeli } 2242ba76149fSAndrea Arcangeli } 2243ba76149fSAndrea Arcangeli 224426234f36SXiao Guangrong static void khugepaged_alloc_sleep(void) 224526234f36SXiao Guangrong { 224626234f36SXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, false, 224726234f36SXiao Guangrong msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 224826234f36SXiao Guangrong } 224926234f36SXiao Guangrong 22509f1b868aSBob Liu static int khugepaged_node_load[MAX_NUMNODES]; 22519f1b868aSBob Liu 225214a4e214SDavid Rientjes static bool khugepaged_scan_abort(int nid) 225314a4e214SDavid Rientjes { 225414a4e214SDavid Rientjes int i; 225514a4e214SDavid Rientjes 225614a4e214SDavid Rientjes /* 225714a4e214SDavid Rientjes * If zone_reclaim_mode is disabled, then no extra effort is made to 225814a4e214SDavid Rientjes * allocate memory locally. 225914a4e214SDavid Rientjes */ 226014a4e214SDavid Rientjes if (!zone_reclaim_mode) 226114a4e214SDavid Rientjes return false; 226214a4e214SDavid Rientjes 226314a4e214SDavid Rientjes /* If there is a count for this node already, it must be acceptable */ 226414a4e214SDavid Rientjes if (khugepaged_node_load[nid]) 226514a4e214SDavid Rientjes return false; 226614a4e214SDavid Rientjes 226714a4e214SDavid Rientjes for (i = 0; i < MAX_NUMNODES; i++) { 226814a4e214SDavid Rientjes if (!khugepaged_node_load[i]) 226914a4e214SDavid Rientjes continue; 227014a4e214SDavid Rientjes if (node_distance(nid, i) > RECLAIM_DISTANCE) 227114a4e214SDavid Rientjes return true; 227214a4e214SDavid Rientjes } 227314a4e214SDavid Rientjes return false; 227414a4e214SDavid Rientjes } 227514a4e214SDavid Rientjes 227626234f36SXiao Guangrong #ifdef CONFIG_NUMA 22779f1b868aSBob Liu static int khugepaged_find_target_node(void) 22789f1b868aSBob Liu { 22799f1b868aSBob Liu static int last_khugepaged_target_node = NUMA_NO_NODE; 22809f1b868aSBob Liu int nid, target_node = 0, max_value = 0; 22819f1b868aSBob Liu 22829f1b868aSBob Liu /* find first node with max normal pages hit */ 22839f1b868aSBob Liu for (nid = 0; nid < MAX_NUMNODES; nid++) 22849f1b868aSBob Liu if (khugepaged_node_load[nid] > max_value) { 22859f1b868aSBob Liu max_value = khugepaged_node_load[nid]; 22869f1b868aSBob Liu target_node = nid; 22879f1b868aSBob Liu } 22889f1b868aSBob Liu 22899f1b868aSBob Liu /* do some balance if several nodes have the same hit record */ 22909f1b868aSBob Liu if (target_node <= last_khugepaged_target_node) 22919f1b868aSBob Liu for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 22929f1b868aSBob Liu nid++) 22939f1b868aSBob Liu if (max_value == khugepaged_node_load[nid]) { 22949f1b868aSBob Liu target_node = nid; 22959f1b868aSBob Liu break; 22969f1b868aSBob Liu } 22979f1b868aSBob Liu 22989f1b868aSBob Liu last_khugepaged_target_node = target_node; 22999f1b868aSBob Liu return target_node; 23009f1b868aSBob Liu } 23019f1b868aSBob Liu 230226234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 230326234f36SXiao Guangrong { 230426234f36SXiao Guangrong if (IS_ERR(*hpage)) { 230526234f36SXiao Guangrong if (!*wait) 230626234f36SXiao Guangrong return false; 230726234f36SXiao Guangrong 230826234f36SXiao Guangrong *wait = false; 2309e3b4126cSXiao Guangrong *hpage = NULL; 231026234f36SXiao Guangrong khugepaged_alloc_sleep(); 231126234f36SXiao Guangrong } else if (*hpage) { 231226234f36SXiao Guangrong put_page(*hpage); 231326234f36SXiao Guangrong *hpage = NULL; 231426234f36SXiao Guangrong } 231526234f36SXiao Guangrong 231626234f36SXiao Guangrong return true; 231726234f36SXiao Guangrong } 231826234f36SXiao Guangrong 231926234f36SXiao Guangrong static struct page 232026234f36SXiao Guangrong *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 232126234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 232226234f36SXiao Guangrong int node) 232326234f36SXiao Guangrong { 2324309381feSSasha Levin VM_BUG_ON_PAGE(*hpage, *hpage); 23258b164568SVlastimil Babka 232626234f36SXiao Guangrong /* 23278b164568SVlastimil Babka * Before allocating the hugepage, release the mmap_sem read lock. 23288b164568SVlastimil Babka * The allocation can take potentially a long time if it involves 23298b164568SVlastimil Babka * sync compaction, and we do not need to hold the mmap_sem during 23308b164568SVlastimil Babka * that. We will recheck the vma after taking it again in write mode. 233126234f36SXiao Guangrong */ 233226234f36SXiao Guangrong up_read(&mm->mmap_sem); 23338b164568SVlastimil Babka 23348b164568SVlastimil Babka *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( 23358b164568SVlastimil Babka khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); 233626234f36SXiao Guangrong if (unlikely(!*hpage)) { 233726234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 233826234f36SXiao Guangrong *hpage = ERR_PTR(-ENOMEM); 233926234f36SXiao Guangrong return NULL; 234026234f36SXiao Guangrong } 234126234f36SXiao Guangrong 234226234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 234326234f36SXiao Guangrong return *hpage; 234426234f36SXiao Guangrong } 234526234f36SXiao Guangrong #else 23469f1b868aSBob Liu static int khugepaged_find_target_node(void) 23479f1b868aSBob Liu { 23489f1b868aSBob Liu return 0; 23499f1b868aSBob Liu } 23509f1b868aSBob Liu 235110dc4155SBob Liu static inline struct page *alloc_hugepage(int defrag) 235210dc4155SBob Liu { 235310dc4155SBob Liu return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 235410dc4155SBob Liu HPAGE_PMD_ORDER); 235510dc4155SBob Liu } 235610dc4155SBob Liu 235726234f36SXiao Guangrong static struct page *khugepaged_alloc_hugepage(bool *wait) 235826234f36SXiao Guangrong { 235926234f36SXiao Guangrong struct page *hpage; 236026234f36SXiao Guangrong 236126234f36SXiao Guangrong do { 236226234f36SXiao Guangrong hpage = alloc_hugepage(khugepaged_defrag()); 236326234f36SXiao Guangrong if (!hpage) { 236426234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 236526234f36SXiao Guangrong if (!*wait) 236626234f36SXiao Guangrong return NULL; 236726234f36SXiao Guangrong 236826234f36SXiao Guangrong *wait = false; 236926234f36SXiao Guangrong khugepaged_alloc_sleep(); 237026234f36SXiao Guangrong } else 237126234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 237226234f36SXiao Guangrong } while (unlikely(!hpage) && likely(khugepaged_enabled())); 237326234f36SXiao Guangrong 237426234f36SXiao Guangrong return hpage; 237526234f36SXiao Guangrong } 237626234f36SXiao Guangrong 237726234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 237826234f36SXiao Guangrong { 237926234f36SXiao Guangrong if (!*hpage) 238026234f36SXiao Guangrong *hpage = khugepaged_alloc_hugepage(wait); 238126234f36SXiao Guangrong 238226234f36SXiao Guangrong if (unlikely(!*hpage)) 238326234f36SXiao Guangrong return false; 238426234f36SXiao Guangrong 238526234f36SXiao Guangrong return true; 238626234f36SXiao Guangrong } 238726234f36SXiao Guangrong 238826234f36SXiao Guangrong static struct page 238926234f36SXiao Guangrong *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 239026234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 239126234f36SXiao Guangrong int node) 239226234f36SXiao Guangrong { 239326234f36SXiao Guangrong up_read(&mm->mmap_sem); 239426234f36SXiao Guangrong VM_BUG_ON(!*hpage); 239526234f36SXiao Guangrong return *hpage; 239626234f36SXiao Guangrong } 239726234f36SXiao Guangrong #endif 239826234f36SXiao Guangrong 2399fa475e51SBob Liu static bool hugepage_vma_check(struct vm_area_struct *vma) 2400fa475e51SBob Liu { 2401fa475e51SBob Liu if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2402fa475e51SBob Liu (vma->vm_flags & VM_NOHUGEPAGE)) 2403fa475e51SBob Liu return false; 2404fa475e51SBob Liu 2405fa475e51SBob Liu if (!vma->anon_vma || vma->vm_ops) 2406fa475e51SBob Liu return false; 2407fa475e51SBob Liu if (is_vma_temporary_stack(vma)) 2408fa475e51SBob Liu return false; 2409*81d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2410fa475e51SBob Liu return true; 2411fa475e51SBob Liu } 2412fa475e51SBob Liu 2413ba76149fSAndrea Arcangeli static void collapse_huge_page(struct mm_struct *mm, 2414ba76149fSAndrea Arcangeli unsigned long address, 2415ce83d217SAndrea Arcangeli struct page **hpage, 24165c4b4be3SAndi Kleen struct vm_area_struct *vma, 24175c4b4be3SAndi Kleen int node) 2418ba76149fSAndrea Arcangeli { 2419ba76149fSAndrea Arcangeli pmd_t *pmd, _pmd; 2420ba76149fSAndrea Arcangeli pte_t *pte; 2421ba76149fSAndrea Arcangeli pgtable_t pgtable; 2422ba76149fSAndrea Arcangeli struct page *new_page; 2423c4088ebdSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl; 2424ba76149fSAndrea Arcangeli int isolated; 2425ba76149fSAndrea Arcangeli unsigned long hstart, hend; 242600501b53SJohannes Weiner struct mem_cgroup *memcg; 24272ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 24282ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 2429ba76149fSAndrea Arcangeli 2430ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2431692e0b35SAndrea Arcangeli 243226234f36SXiao Guangrong /* release the mmap_sem read lock. */ 243326234f36SXiao Guangrong new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); 243426234f36SXiao Guangrong if (!new_page) 2435ce83d217SAndrea Arcangeli return; 2436ce83d217SAndrea Arcangeli 243700501b53SJohannes Weiner if (unlikely(mem_cgroup_try_charge(new_page, mm, 243800501b53SJohannes Weiner GFP_TRANSHUGE, &memcg))) 2439692e0b35SAndrea Arcangeli return; 2440ba76149fSAndrea Arcangeli 2441ba76149fSAndrea Arcangeli /* 2442ba76149fSAndrea Arcangeli * Prevent all access to pagetables with the exception of 2443ba76149fSAndrea Arcangeli * gup_fast later hanlded by the ptep_clear_flush and the VM 2444ba76149fSAndrea Arcangeli * handled by the anon_vma lock + PG_lock. 2445ba76149fSAndrea Arcangeli */ 2446ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2447ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2448ba76149fSAndrea Arcangeli goto out; 2449ba76149fSAndrea Arcangeli 2450ba76149fSAndrea Arcangeli vma = find_vma(mm, address); 2451a8f531ebSLibin if (!vma) 2452a8f531ebSLibin goto out; 2453ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2454ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2455ba76149fSAndrea Arcangeli if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2456ba76149fSAndrea Arcangeli goto out; 2457fa475e51SBob Liu if (!hugepage_vma_check(vma)) 2458ba76149fSAndrea Arcangeli goto out; 24596219049aSBob Liu pmd = mm_find_pmd(mm, address); 24606219049aSBob Liu if (!pmd) 2461ba76149fSAndrea Arcangeli goto out; 2462ba76149fSAndrea Arcangeli 24634fc3f1d6SIngo Molnar anon_vma_lock_write(vma->anon_vma); 2464ba76149fSAndrea Arcangeli 2465ba76149fSAndrea Arcangeli pte = pte_offset_map(pmd, address); 2466c4088ebdSKirill A. Shutemov pte_ptl = pte_lockptr(mm, pmd); 2467ba76149fSAndrea Arcangeli 24682ec74c3eSSagi Grimberg mmun_start = address; 24692ec74c3eSSagi Grimberg mmun_end = address + HPAGE_PMD_SIZE; 24702ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2471c4088ebdSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 2472ba76149fSAndrea Arcangeli /* 2473ba76149fSAndrea Arcangeli * After this gup_fast can't run anymore. This also removes 2474ba76149fSAndrea Arcangeli * any huge TLB entry from the CPU so we won't allow 2475ba76149fSAndrea Arcangeli * huge and small TLB entries for the same virtual address 2476ba76149fSAndrea Arcangeli * to avoid the risk of CPU bugs in that area. 2477ba76149fSAndrea Arcangeli */ 24782ec74c3eSSagi Grimberg _pmd = pmdp_clear_flush(vma, address, pmd); 2479c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 24802ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2481ba76149fSAndrea Arcangeli 2482c4088ebdSKirill A. Shutemov spin_lock(pte_ptl); 2483ba76149fSAndrea Arcangeli isolated = __collapse_huge_page_isolate(vma, address, pte); 2484c4088ebdSKirill A. Shutemov spin_unlock(pte_ptl); 2485ba76149fSAndrea Arcangeli 2486ba76149fSAndrea Arcangeli if (unlikely(!isolated)) { 2487453c7192SJohannes Weiner pte_unmap(pte); 2488c4088ebdSKirill A. Shutemov spin_lock(pmd_ptl); 2489ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 24907c342512SAneesh Kumar K.V /* 24917c342512SAneesh Kumar K.V * We can only use set_pmd_at when establishing 24927c342512SAneesh Kumar K.V * hugepmds and never for establishing regular pmds that 24937c342512SAneesh Kumar K.V * points to regular pagetables. Use pmd_populate for that 24947c342512SAneesh Kumar K.V */ 24957c342512SAneesh Kumar K.V pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 2496c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 249708b52706SKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2498ce83d217SAndrea Arcangeli goto out; 2499ba76149fSAndrea Arcangeli } 2500ba76149fSAndrea Arcangeli 2501ba76149fSAndrea Arcangeli /* 2502ba76149fSAndrea Arcangeli * All pages are isolated and locked so anon_vma rmap 2503ba76149fSAndrea Arcangeli * can't run anymore. 2504ba76149fSAndrea Arcangeli */ 250508b52706SKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2506ba76149fSAndrea Arcangeli 2507c4088ebdSKirill A. Shutemov __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 2508453c7192SJohannes Weiner pte_unmap(pte); 2509ba76149fSAndrea Arcangeli __SetPageUptodate(new_page); 2510ba76149fSAndrea Arcangeli pgtable = pmd_pgtable(_pmd); 2511ba76149fSAndrea Arcangeli 25123122359aSKirill A. Shutemov _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 25133122359aSKirill A. Shutemov _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 2514ba76149fSAndrea Arcangeli 2515ba76149fSAndrea Arcangeli /* 2516ba76149fSAndrea Arcangeli * spin_lock() below is not the equivalent of smp_wmb(), so 2517ba76149fSAndrea Arcangeli * this is needed to avoid the copy_huge_page writes to become 2518ba76149fSAndrea Arcangeli * visible after the set_pmd_at() write. 2519ba76149fSAndrea Arcangeli */ 2520ba76149fSAndrea Arcangeli smp_wmb(); 2521ba76149fSAndrea Arcangeli 2522c4088ebdSKirill A. Shutemov spin_lock(pmd_ptl); 2523ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 2524ba76149fSAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, address); 252500501b53SJohannes Weiner mem_cgroup_commit_charge(new_page, memcg, false); 252600501b53SJohannes Weiner lru_cache_add_active_or_unevictable(new_page, vma); 2527fce144b4SAneesh Kumar K.V pgtable_trans_huge_deposit(mm, pmd, pgtable); 2528ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 2529b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 2530c4088ebdSKirill A. Shutemov spin_unlock(pmd_ptl); 2531ba76149fSAndrea Arcangeli 2532ba76149fSAndrea Arcangeli *hpage = NULL; 2533420256efSXiao Guangrong 2534ba76149fSAndrea Arcangeli khugepaged_pages_collapsed++; 2535ce83d217SAndrea Arcangeli out_up_write: 2536ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 25370bbbc0b3SAndrea Arcangeli return; 25380bbbc0b3SAndrea Arcangeli 2539ce83d217SAndrea Arcangeli out: 254000501b53SJohannes Weiner mem_cgroup_cancel_charge(new_page, memcg); 2541ce83d217SAndrea Arcangeli goto out_up_write; 2542ba76149fSAndrea Arcangeli } 2543ba76149fSAndrea Arcangeli 2544ba76149fSAndrea Arcangeli static int khugepaged_scan_pmd(struct mm_struct *mm, 2545ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2546ba76149fSAndrea Arcangeli unsigned long address, 2547ba76149fSAndrea Arcangeli struct page **hpage) 2548ba76149fSAndrea Arcangeli { 2549ba76149fSAndrea Arcangeli pmd_t *pmd; 2550ba76149fSAndrea Arcangeli pte_t *pte, *_pte; 2551ba76149fSAndrea Arcangeli int ret = 0, referenced = 0, none = 0; 2552ba76149fSAndrea Arcangeli struct page *page; 2553ba76149fSAndrea Arcangeli unsigned long _address; 2554ba76149fSAndrea Arcangeli spinlock_t *ptl; 255500ef2d2fSDavid Rientjes int node = NUMA_NO_NODE; 2556ba76149fSAndrea Arcangeli 2557ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2558ba76149fSAndrea Arcangeli 25596219049aSBob Liu pmd = mm_find_pmd(mm, address); 25606219049aSBob Liu if (!pmd) 2561ba76149fSAndrea Arcangeli goto out; 2562ba76149fSAndrea Arcangeli 25639f1b868aSBob Liu memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2564ba76149fSAndrea Arcangeli pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2565ba76149fSAndrea Arcangeli for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2566ba76149fSAndrea Arcangeli _pte++, _address += PAGE_SIZE) { 2567ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2568ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2569ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 2570ba76149fSAndrea Arcangeli continue; 2571ba76149fSAndrea Arcangeli else 2572ba76149fSAndrea Arcangeli goto out_unmap; 2573ba76149fSAndrea Arcangeli } 2574ba76149fSAndrea Arcangeli if (!pte_present(pteval) || !pte_write(pteval)) 2575ba76149fSAndrea Arcangeli goto out_unmap; 2576ba76149fSAndrea Arcangeli page = vm_normal_page(vma, _address, pteval); 2577ba76149fSAndrea Arcangeli if (unlikely(!page)) 2578ba76149fSAndrea Arcangeli goto out_unmap; 25795c4b4be3SAndi Kleen /* 25809f1b868aSBob Liu * Record which node the original page is from and save this 25819f1b868aSBob Liu * information to khugepaged_node_load[]. 25829f1b868aSBob Liu * Khupaged will allocate hugepage from the node has the max 25839f1b868aSBob Liu * hit record. 25845c4b4be3SAndi Kleen */ 25855c4b4be3SAndi Kleen node = page_to_nid(page); 258614a4e214SDavid Rientjes if (khugepaged_scan_abort(node)) 258714a4e214SDavid Rientjes goto out_unmap; 25889f1b868aSBob Liu khugepaged_node_load[node]++; 2589309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page), page); 2590ba76149fSAndrea Arcangeli if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2591ba76149fSAndrea Arcangeli goto out_unmap; 2592ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 2593ba76149fSAndrea Arcangeli if (page_count(page) != 1) 2594ba76149fSAndrea Arcangeli goto out_unmap; 25958ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 25968ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 2597ba76149fSAndrea Arcangeli referenced = 1; 2598ba76149fSAndrea Arcangeli } 2599ba76149fSAndrea Arcangeli if (referenced) 2600ba76149fSAndrea Arcangeli ret = 1; 2601ba76149fSAndrea Arcangeli out_unmap: 2602ba76149fSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 26039f1b868aSBob Liu if (ret) { 26049f1b868aSBob Liu node = khugepaged_find_target_node(); 2605ce83d217SAndrea Arcangeli /* collapse_huge_page will return with the mmap_sem released */ 26065c4b4be3SAndi Kleen collapse_huge_page(mm, address, hpage, vma, node); 26079f1b868aSBob Liu } 2608ba76149fSAndrea Arcangeli out: 2609ba76149fSAndrea Arcangeli return ret; 2610ba76149fSAndrea Arcangeli } 2611ba76149fSAndrea Arcangeli 2612ba76149fSAndrea Arcangeli static void collect_mm_slot(struct mm_slot *mm_slot) 2613ba76149fSAndrea Arcangeli { 2614ba76149fSAndrea Arcangeli struct mm_struct *mm = mm_slot->mm; 2615ba76149fSAndrea Arcangeli 2616b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2617ba76149fSAndrea Arcangeli 2618ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm)) { 2619ba76149fSAndrea Arcangeli /* free mm_slot */ 262043b5fbbdSSasha Levin hash_del(&mm_slot->hash); 2621ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2622ba76149fSAndrea Arcangeli 2623ba76149fSAndrea Arcangeli /* 2624ba76149fSAndrea Arcangeli * Not strictly needed because the mm exited already. 2625ba76149fSAndrea Arcangeli * 2626ba76149fSAndrea Arcangeli * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2627ba76149fSAndrea Arcangeli */ 2628ba76149fSAndrea Arcangeli 2629ba76149fSAndrea Arcangeli /* khugepaged_mm_lock actually not necessary for the below */ 2630ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2631ba76149fSAndrea Arcangeli mmdrop(mm); 2632ba76149fSAndrea Arcangeli } 2633ba76149fSAndrea Arcangeli } 2634ba76149fSAndrea Arcangeli 2635ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2636ba76149fSAndrea Arcangeli struct page **hpage) 26372f1da642SH Hartley Sweeten __releases(&khugepaged_mm_lock) 26382f1da642SH Hartley Sweeten __acquires(&khugepaged_mm_lock) 2639ba76149fSAndrea Arcangeli { 2640ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2641ba76149fSAndrea Arcangeli struct mm_struct *mm; 2642ba76149fSAndrea Arcangeli struct vm_area_struct *vma; 2643ba76149fSAndrea Arcangeli int progress = 0; 2644ba76149fSAndrea Arcangeli 2645ba76149fSAndrea Arcangeli VM_BUG_ON(!pages); 2646b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2647ba76149fSAndrea Arcangeli 2648ba76149fSAndrea Arcangeli if (khugepaged_scan.mm_slot) 2649ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2650ba76149fSAndrea Arcangeli else { 2651ba76149fSAndrea Arcangeli mm_slot = list_entry(khugepaged_scan.mm_head.next, 2652ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2653ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2654ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = mm_slot; 2655ba76149fSAndrea Arcangeli } 2656ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2657ba76149fSAndrea Arcangeli 2658ba76149fSAndrea Arcangeli mm = mm_slot->mm; 2659ba76149fSAndrea Arcangeli down_read(&mm->mmap_sem); 2660ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2661ba76149fSAndrea Arcangeli vma = NULL; 2662ba76149fSAndrea Arcangeli else 2663ba76149fSAndrea Arcangeli vma = find_vma(mm, khugepaged_scan.address); 2664ba76149fSAndrea Arcangeli 2665ba76149fSAndrea Arcangeli progress++; 2666ba76149fSAndrea Arcangeli for (; vma; vma = vma->vm_next) { 2667ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2668ba76149fSAndrea Arcangeli 2669ba76149fSAndrea Arcangeli cond_resched(); 2670ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) { 2671ba76149fSAndrea Arcangeli progress++; 2672ba76149fSAndrea Arcangeli break; 2673ba76149fSAndrea Arcangeli } 2674fa475e51SBob Liu if (!hugepage_vma_check(vma)) { 2675a7d6e4ecSAndrea Arcangeli skip: 2676ba76149fSAndrea Arcangeli progress++; 2677ba76149fSAndrea Arcangeli continue; 2678ba76149fSAndrea Arcangeli } 2679ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2680ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2681a7d6e4ecSAndrea Arcangeli if (hstart >= hend) 2682a7d6e4ecSAndrea Arcangeli goto skip; 2683a7d6e4ecSAndrea Arcangeli if (khugepaged_scan.address > hend) 2684a7d6e4ecSAndrea Arcangeli goto skip; 2685ba76149fSAndrea Arcangeli if (khugepaged_scan.address < hstart) 2686ba76149fSAndrea Arcangeli khugepaged_scan.address = hstart; 2687a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2688ba76149fSAndrea Arcangeli 2689ba76149fSAndrea Arcangeli while (khugepaged_scan.address < hend) { 2690ba76149fSAndrea Arcangeli int ret; 2691ba76149fSAndrea Arcangeli cond_resched(); 2692ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2693ba76149fSAndrea Arcangeli goto breakouterloop; 2694ba76149fSAndrea Arcangeli 2695ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address < hstart || 2696ba76149fSAndrea Arcangeli khugepaged_scan.address + HPAGE_PMD_SIZE > 2697ba76149fSAndrea Arcangeli hend); 2698ba76149fSAndrea Arcangeli ret = khugepaged_scan_pmd(mm, vma, 2699ba76149fSAndrea Arcangeli khugepaged_scan.address, 2700ba76149fSAndrea Arcangeli hpage); 2701ba76149fSAndrea Arcangeli /* move to next address */ 2702ba76149fSAndrea Arcangeli khugepaged_scan.address += HPAGE_PMD_SIZE; 2703ba76149fSAndrea Arcangeli progress += HPAGE_PMD_NR; 2704ba76149fSAndrea Arcangeli if (ret) 2705ba76149fSAndrea Arcangeli /* we released mmap_sem so break loop */ 2706ba76149fSAndrea Arcangeli goto breakouterloop_mmap_sem; 2707ba76149fSAndrea Arcangeli if (progress >= pages) 2708ba76149fSAndrea Arcangeli goto breakouterloop; 2709ba76149fSAndrea Arcangeli } 2710ba76149fSAndrea Arcangeli } 2711ba76149fSAndrea Arcangeli breakouterloop: 2712ba76149fSAndrea Arcangeli up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2713ba76149fSAndrea Arcangeli breakouterloop_mmap_sem: 2714ba76149fSAndrea Arcangeli 2715ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2716a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2717ba76149fSAndrea Arcangeli /* 2718ba76149fSAndrea Arcangeli * Release the current mm_slot if this mm is about to die, or 2719ba76149fSAndrea Arcangeli * if we scanned all vmas of this mm. 2720ba76149fSAndrea Arcangeli */ 2721ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm) || !vma) { 2722ba76149fSAndrea Arcangeli /* 2723ba76149fSAndrea Arcangeli * Make sure that if mm_users is reaching zero while 2724ba76149fSAndrea Arcangeli * khugepaged runs here, khugepaged_exit will find 2725ba76149fSAndrea Arcangeli * mm_slot not pointing to the exiting mm. 2726ba76149fSAndrea Arcangeli */ 2727ba76149fSAndrea Arcangeli if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2728ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = list_entry( 2729ba76149fSAndrea Arcangeli mm_slot->mm_node.next, 2730ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2731ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2732ba76149fSAndrea Arcangeli } else { 2733ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2734ba76149fSAndrea Arcangeli khugepaged_full_scans++; 2735ba76149fSAndrea Arcangeli } 2736ba76149fSAndrea Arcangeli 2737ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2738ba76149fSAndrea Arcangeli } 2739ba76149fSAndrea Arcangeli 2740ba76149fSAndrea Arcangeli return progress; 2741ba76149fSAndrea Arcangeli } 2742ba76149fSAndrea Arcangeli 2743ba76149fSAndrea Arcangeli static int khugepaged_has_work(void) 2744ba76149fSAndrea Arcangeli { 2745ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) && 2746ba76149fSAndrea Arcangeli khugepaged_enabled(); 2747ba76149fSAndrea Arcangeli } 2748ba76149fSAndrea Arcangeli 2749ba76149fSAndrea Arcangeli static int khugepaged_wait_event(void) 2750ba76149fSAndrea Arcangeli { 2751ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) || 27522017c0bfSXiao Guangrong kthread_should_stop(); 2753ba76149fSAndrea Arcangeli } 2754ba76149fSAndrea Arcangeli 2755d516904bSXiao Guangrong static void khugepaged_do_scan(void) 2756d516904bSXiao Guangrong { 2757d516904bSXiao Guangrong struct page *hpage = NULL; 2758ba76149fSAndrea Arcangeli unsigned int progress = 0, pass_through_head = 0; 2759ba76149fSAndrea Arcangeli unsigned int pages = khugepaged_pages_to_scan; 2760d516904bSXiao Guangrong bool wait = true; 2761ba76149fSAndrea Arcangeli 2762ba76149fSAndrea Arcangeli barrier(); /* write khugepaged_pages_to_scan to local stack */ 2763ba76149fSAndrea Arcangeli 2764ba76149fSAndrea Arcangeli while (progress < pages) { 276526234f36SXiao Guangrong if (!khugepaged_prealloc_page(&hpage, &wait)) 276626234f36SXiao Guangrong break; 2767d516904bSXiao Guangrong 2768420256efSXiao Guangrong cond_resched(); 2769ba76149fSAndrea Arcangeli 2770878aee7dSAndrea Arcangeli if (unlikely(kthread_should_stop() || freezing(current))) 2771878aee7dSAndrea Arcangeli break; 2772878aee7dSAndrea Arcangeli 2773ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2774ba76149fSAndrea Arcangeli if (!khugepaged_scan.mm_slot) 2775ba76149fSAndrea Arcangeli pass_through_head++; 2776ba76149fSAndrea Arcangeli if (khugepaged_has_work() && 2777ba76149fSAndrea Arcangeli pass_through_head < 2) 2778ba76149fSAndrea Arcangeli progress += khugepaged_scan_mm_slot(pages - progress, 2779d516904bSXiao Guangrong &hpage); 2780ba76149fSAndrea Arcangeli else 2781ba76149fSAndrea Arcangeli progress = pages; 2782ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2783ba76149fSAndrea Arcangeli } 2784ba76149fSAndrea Arcangeli 2785d516904bSXiao Guangrong if (!IS_ERR_OR_NULL(hpage)) 2786d516904bSXiao Guangrong put_page(hpage); 2787ba76149fSAndrea Arcangeli } 27880bbbc0b3SAndrea Arcangeli 27892017c0bfSXiao Guangrong static void khugepaged_wait_work(void) 27902017c0bfSXiao Guangrong { 27912017c0bfSXiao Guangrong try_to_freeze(); 27922017c0bfSXiao Guangrong 27932017c0bfSXiao Guangrong if (khugepaged_has_work()) { 27942017c0bfSXiao Guangrong if (!khugepaged_scan_sleep_millisecs) 27952017c0bfSXiao Guangrong return; 27962017c0bfSXiao Guangrong 27972017c0bfSXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, 27982017c0bfSXiao Guangrong kthread_should_stop(), 27992017c0bfSXiao Guangrong msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 28002017c0bfSXiao Guangrong return; 28012017c0bfSXiao Guangrong } 28022017c0bfSXiao Guangrong 28032017c0bfSXiao Guangrong if (khugepaged_enabled()) 28042017c0bfSXiao Guangrong wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 28052017c0bfSXiao Guangrong } 28062017c0bfSXiao Guangrong 2807ba76149fSAndrea Arcangeli static int khugepaged(void *none) 2808ba76149fSAndrea Arcangeli { 2809ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2810ba76149fSAndrea Arcangeli 2811878aee7dSAndrea Arcangeli set_freezable(); 28128698a745SDongsheng Yang set_user_nice(current, MAX_NICE); 2813ba76149fSAndrea Arcangeli 2814b7231789SXiao Guangrong while (!kthread_should_stop()) { 2815b7231789SXiao Guangrong khugepaged_do_scan(); 2816b7231789SXiao Guangrong khugepaged_wait_work(); 2817b7231789SXiao Guangrong } 2818ba76149fSAndrea Arcangeli 2819ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2820ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2821ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2822ba76149fSAndrea Arcangeli if (mm_slot) 2823ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2824ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2825ba76149fSAndrea Arcangeli return 0; 2826ba76149fSAndrea Arcangeli } 2827ba76149fSAndrea Arcangeli 2828c5a647d0SKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2829c5a647d0SKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2830c5a647d0SKirill A. Shutemov { 2831c5a647d0SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2832c5a647d0SKirill A. Shutemov pgtable_t pgtable; 2833c5a647d0SKirill A. Shutemov pmd_t _pmd; 2834c5a647d0SKirill A. Shutemov int i; 2835c5a647d0SKirill A. Shutemov 2836c5a647d0SKirill A. Shutemov pmdp_clear_flush(vma, haddr, pmd); 2837c5a647d0SKirill A. Shutemov /* leave pmd empty until pte is filled */ 2838c5a647d0SKirill A. Shutemov 28396b0b50b0SAneesh Kumar K.V pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2840c5a647d0SKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2841c5a647d0SKirill A. Shutemov 2842c5a647d0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2843c5a647d0SKirill A. Shutemov pte_t *pte, entry; 2844c5a647d0SKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2845c5a647d0SKirill A. Shutemov entry = pte_mkspecial(entry); 2846c5a647d0SKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2847c5a647d0SKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2848c5a647d0SKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2849c5a647d0SKirill A. Shutemov pte_unmap(pte); 2850c5a647d0SKirill A. Shutemov } 2851c5a647d0SKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2852c5a647d0SKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 285397ae1749SKirill A. Shutemov put_huge_zero_page(); 2854c5a647d0SKirill A. Shutemov } 2855c5a647d0SKirill A. Shutemov 2856e180377fSKirill A. Shutemov void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2857e180377fSKirill A. Shutemov pmd_t *pmd) 285871e3aac0SAndrea Arcangeli { 2859c4088ebdSKirill A. Shutemov spinlock_t *ptl; 286071e3aac0SAndrea Arcangeli struct page *page; 2861e180377fSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2862c5a647d0SKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 2863c5a647d0SKirill A. Shutemov unsigned long mmun_start; /* For mmu_notifiers */ 2864c5a647d0SKirill A. Shutemov unsigned long mmun_end; /* For mmu_notifiers */ 2865e180377fSKirill A. Shutemov 2866e180377fSKirill A. Shutemov BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 286771e3aac0SAndrea Arcangeli 2868c5a647d0SKirill A. Shutemov mmun_start = haddr; 2869c5a647d0SKirill A. Shutemov mmun_end = haddr + HPAGE_PMD_SIZE; 2870750e8165SHugh Dickins again: 2871c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2872c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 287371e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(*pmd))) { 2874c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2875c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2876c5a647d0SKirill A. Shutemov return; 2877c5a647d0SKirill A. Shutemov } 2878c5a647d0SKirill A. Shutemov if (is_huge_zero_pmd(*pmd)) { 2879c5a647d0SKirill A. Shutemov __split_huge_zero_page_pmd(vma, haddr, pmd); 2880c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2881c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 288271e3aac0SAndrea Arcangeli return; 288371e3aac0SAndrea Arcangeli } 288471e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 2885309381feSSasha Levin VM_BUG_ON_PAGE(!page_count(page), page); 288671e3aac0SAndrea Arcangeli get_page(page); 2887c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2888c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 288971e3aac0SAndrea Arcangeli 289071e3aac0SAndrea Arcangeli split_huge_page(page); 289171e3aac0SAndrea Arcangeli 289271e3aac0SAndrea Arcangeli put_page(page); 2893750e8165SHugh Dickins 2894750e8165SHugh Dickins /* 2895750e8165SHugh Dickins * We don't always have down_write of mmap_sem here: a racing 2896750e8165SHugh Dickins * do_huge_pmd_wp_page() might have copied-on-write to another 2897750e8165SHugh Dickins * huge page before our split_huge_page() got the anon_vma lock. 2898750e8165SHugh Dickins */ 2899750e8165SHugh Dickins if (unlikely(pmd_trans_huge(*pmd))) 2900750e8165SHugh Dickins goto again; 290171e3aac0SAndrea Arcangeli } 290294fcc585SAndrea Arcangeli 2903e180377fSKirill A. Shutemov void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2904e180377fSKirill A. Shutemov pmd_t *pmd) 2905e180377fSKirill A. Shutemov { 2906e180377fSKirill A. Shutemov struct vm_area_struct *vma; 2907e180377fSKirill A. Shutemov 2908e180377fSKirill A. Shutemov vma = find_vma(mm, address); 2909e180377fSKirill A. Shutemov BUG_ON(vma == NULL); 2910e180377fSKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 2911e180377fSKirill A. Shutemov } 2912e180377fSKirill A. Shutemov 291394fcc585SAndrea Arcangeli static void split_huge_page_address(struct mm_struct *mm, 291494fcc585SAndrea Arcangeli unsigned long address) 291594fcc585SAndrea Arcangeli { 2916f72e7dcdSHugh Dickins pgd_t *pgd; 2917f72e7dcdSHugh Dickins pud_t *pud; 291894fcc585SAndrea Arcangeli pmd_t *pmd; 291994fcc585SAndrea Arcangeli 292094fcc585SAndrea Arcangeli VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 292194fcc585SAndrea Arcangeli 2922f72e7dcdSHugh Dickins pgd = pgd_offset(mm, address); 2923f72e7dcdSHugh Dickins if (!pgd_present(*pgd)) 2924f72e7dcdSHugh Dickins return; 2925f72e7dcdSHugh Dickins 2926f72e7dcdSHugh Dickins pud = pud_offset(pgd, address); 2927f72e7dcdSHugh Dickins if (!pud_present(*pud)) 2928f72e7dcdSHugh Dickins return; 2929f72e7dcdSHugh Dickins 2930f72e7dcdSHugh Dickins pmd = pmd_offset(pud, address); 2931f72e7dcdSHugh Dickins if (!pmd_present(*pmd)) 293294fcc585SAndrea Arcangeli return; 293394fcc585SAndrea Arcangeli /* 293494fcc585SAndrea Arcangeli * Caller holds the mmap_sem write mode, so a huge pmd cannot 293594fcc585SAndrea Arcangeli * materialize from under us. 293694fcc585SAndrea Arcangeli */ 2937e180377fSKirill A. Shutemov split_huge_page_pmd_mm(mm, address, pmd); 293894fcc585SAndrea Arcangeli } 293994fcc585SAndrea Arcangeli 294094fcc585SAndrea Arcangeli void __vma_adjust_trans_huge(struct vm_area_struct *vma, 294194fcc585SAndrea Arcangeli unsigned long start, 294294fcc585SAndrea Arcangeli unsigned long end, 294394fcc585SAndrea Arcangeli long adjust_next) 294494fcc585SAndrea Arcangeli { 294594fcc585SAndrea Arcangeli /* 294694fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 294794fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 294894fcc585SAndrea Arcangeli * an huge pmd. 294994fcc585SAndrea Arcangeli */ 295094fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 295194fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 295294fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 295394fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, start); 295494fcc585SAndrea Arcangeli 295594fcc585SAndrea Arcangeli /* 295694fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 295794fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 295894fcc585SAndrea Arcangeli * an huge pmd. 295994fcc585SAndrea Arcangeli */ 296094fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 296194fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 296294fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 296394fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, end); 296494fcc585SAndrea Arcangeli 296594fcc585SAndrea Arcangeli /* 296694fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 296794fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 296894fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 296994fcc585SAndrea Arcangeli */ 297094fcc585SAndrea Arcangeli if (adjust_next > 0) { 297194fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 297294fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 297394fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 297494fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 297594fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 297694fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 297794fcc585SAndrea Arcangeli split_huge_page_address(next->vm_mm, nstart); 297894fcc585SAndrea Arcangeli } 297994fcc585SAndrea Arcangeli } 2980