171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 1071e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1171e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1271e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1371e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1471e3aac0SAndrea Arcangeli #include <linux/swap.h> 1597ae1749SKirill A. Shutemov #include <linux/shrinker.h> 16ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 17ba76149fSAndrea Arcangeli #include <linux/kthread.h> 18ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 19878aee7dSAndrea Arcangeli #include <linux/freezer.h> 20a664b2d8SAndrea Arcangeli #include <linux/mman.h> 21325adeb5SRalf Baechle #include <linux/pagemap.h> 224daae3b4SMel Gorman #include <linux/migrate.h> 2397ae1749SKirill A. Shutemov 2471e3aac0SAndrea Arcangeli #include <asm/tlb.h> 2571e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 2671e3aac0SAndrea Arcangeli #include "internal.h" 2771e3aac0SAndrea Arcangeli 28ba76149fSAndrea Arcangeli /* 29ba76149fSAndrea Arcangeli * By default transparent hugepage support is enabled for all mappings 30ba76149fSAndrea Arcangeli * and khugepaged scans all mappings. Defrag is only invoked by 31ba76149fSAndrea Arcangeli * khugepaged hugepage allocations and by page faults inside 32ba76149fSAndrea Arcangeli * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived 33ba76149fSAndrea Arcangeli * allocations. 34ba76149fSAndrea Arcangeli */ 3571e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 3613ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 37ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 3813ece886SAndrea Arcangeli #endif 3913ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 4013ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 4113ece886SAndrea Arcangeli #endif 42d39d33c3SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 4379da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 4479da5407SKirill A. Shutemov (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 45ba76149fSAndrea Arcangeli 46ba76149fSAndrea Arcangeli /* default scan 8*512 pte (or vmas) every 30 second */ 47ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 48ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_collapsed; 49ba76149fSAndrea Arcangeli static unsigned int khugepaged_full_scans; 50ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 51ba76149fSAndrea Arcangeli /* during fragmentation poll the hugepage allocator once every minute */ 52ba76149fSAndrea Arcangeli static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 53ba76149fSAndrea Arcangeli static struct task_struct *khugepaged_thread __read_mostly; 54ba76149fSAndrea Arcangeli static DEFINE_MUTEX(khugepaged_mutex); 55ba76149fSAndrea Arcangeli static DEFINE_SPINLOCK(khugepaged_mm_lock); 56ba76149fSAndrea Arcangeli static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 57ba76149fSAndrea Arcangeli /* 58ba76149fSAndrea Arcangeli * default collapse hugepages if there is at least one pte mapped like 59ba76149fSAndrea Arcangeli * it would have happened if the vma was large enough during page 60ba76149fSAndrea Arcangeli * fault. 61ba76149fSAndrea Arcangeli */ 62ba76149fSAndrea Arcangeli static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 63ba76149fSAndrea Arcangeli 64ba76149fSAndrea Arcangeli static int khugepaged(void *none); 65ba76149fSAndrea Arcangeli static int mm_slots_hash_init(void); 66ba76149fSAndrea Arcangeli static int khugepaged_slab_init(void); 67ba76149fSAndrea Arcangeli static void khugepaged_slab_free(void); 68ba76149fSAndrea Arcangeli 69ba76149fSAndrea Arcangeli #define MM_SLOTS_HASH_HEADS 1024 70ba76149fSAndrea Arcangeli static struct hlist_head *mm_slots_hash __read_mostly; 71ba76149fSAndrea Arcangeli static struct kmem_cache *mm_slot_cache __read_mostly; 72ba76149fSAndrea Arcangeli 73ba76149fSAndrea Arcangeli /** 74ba76149fSAndrea Arcangeli * struct mm_slot - hash lookup from mm to mm_slot 75ba76149fSAndrea Arcangeli * @hash: hash collision list 76ba76149fSAndrea Arcangeli * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 77ba76149fSAndrea Arcangeli * @mm: the mm that this information is valid for 78ba76149fSAndrea Arcangeli */ 79ba76149fSAndrea Arcangeli struct mm_slot { 80ba76149fSAndrea Arcangeli struct hlist_node hash; 81ba76149fSAndrea Arcangeli struct list_head mm_node; 82ba76149fSAndrea Arcangeli struct mm_struct *mm; 83ba76149fSAndrea Arcangeli }; 84ba76149fSAndrea Arcangeli 85ba76149fSAndrea Arcangeli /** 86ba76149fSAndrea Arcangeli * struct khugepaged_scan - cursor for scanning 87ba76149fSAndrea Arcangeli * @mm_head: the head of the mm list to scan 88ba76149fSAndrea Arcangeli * @mm_slot: the current mm_slot we are scanning 89ba76149fSAndrea Arcangeli * @address: the next address inside that to be scanned 90ba76149fSAndrea Arcangeli * 91ba76149fSAndrea Arcangeli * There is only the one khugepaged_scan instance of this cursor structure. 92ba76149fSAndrea Arcangeli */ 93ba76149fSAndrea Arcangeli struct khugepaged_scan { 94ba76149fSAndrea Arcangeli struct list_head mm_head; 95ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 96ba76149fSAndrea Arcangeli unsigned long address; 972f1da642SH Hartley Sweeten }; 982f1da642SH Hartley Sweeten static struct khugepaged_scan khugepaged_scan = { 99ba76149fSAndrea Arcangeli .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 100ba76149fSAndrea Arcangeli }; 101ba76149fSAndrea Arcangeli 102f000565aSAndrea Arcangeli 103f000565aSAndrea Arcangeli static int set_recommended_min_free_kbytes(void) 104f000565aSAndrea Arcangeli { 105f000565aSAndrea Arcangeli struct zone *zone; 106f000565aSAndrea Arcangeli int nr_zones = 0; 107f000565aSAndrea Arcangeli unsigned long recommended_min; 108f000565aSAndrea Arcangeli extern int min_free_kbytes; 109f000565aSAndrea Arcangeli 11017c230afSXiao Guangrong if (!khugepaged_enabled()) 111f000565aSAndrea Arcangeli return 0; 112f000565aSAndrea Arcangeli 113f000565aSAndrea Arcangeli for_each_populated_zone(zone) 114f000565aSAndrea Arcangeli nr_zones++; 115f000565aSAndrea Arcangeli 116f000565aSAndrea Arcangeli /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 117f000565aSAndrea Arcangeli recommended_min = pageblock_nr_pages * nr_zones * 2; 118f000565aSAndrea Arcangeli 119f000565aSAndrea Arcangeli /* 120f000565aSAndrea Arcangeli * Make sure that on average at least two pageblocks are almost free 121f000565aSAndrea Arcangeli * of another type, one for a migratetype to fall back to and a 122f000565aSAndrea Arcangeli * second to avoid subsequent fallbacks of other types There are 3 123f000565aSAndrea Arcangeli * MIGRATE_TYPES we care about. 124f000565aSAndrea Arcangeli */ 125f000565aSAndrea Arcangeli recommended_min += pageblock_nr_pages * nr_zones * 126f000565aSAndrea Arcangeli MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 127f000565aSAndrea Arcangeli 128f000565aSAndrea Arcangeli /* don't ever allow to reserve more than 5% of the lowmem */ 129f000565aSAndrea Arcangeli recommended_min = min(recommended_min, 130f000565aSAndrea Arcangeli (unsigned long) nr_free_buffer_pages() / 20); 131f000565aSAndrea Arcangeli recommended_min <<= (PAGE_SHIFT-10); 132f000565aSAndrea Arcangeli 133f000565aSAndrea Arcangeli if (recommended_min > min_free_kbytes) 134f000565aSAndrea Arcangeli min_free_kbytes = recommended_min; 135f000565aSAndrea Arcangeli setup_per_zone_wmarks(); 136f000565aSAndrea Arcangeli return 0; 137f000565aSAndrea Arcangeli } 138f000565aSAndrea Arcangeli late_initcall(set_recommended_min_free_kbytes); 139f000565aSAndrea Arcangeli 140ba76149fSAndrea Arcangeli static int start_khugepaged(void) 141ba76149fSAndrea Arcangeli { 142ba76149fSAndrea Arcangeli int err = 0; 143ba76149fSAndrea Arcangeli if (khugepaged_enabled()) { 144ba76149fSAndrea Arcangeli if (!khugepaged_thread) 145ba76149fSAndrea Arcangeli khugepaged_thread = kthread_run(khugepaged, NULL, 146ba76149fSAndrea Arcangeli "khugepaged"); 147ba76149fSAndrea Arcangeli if (unlikely(IS_ERR(khugepaged_thread))) { 148ba76149fSAndrea Arcangeli printk(KERN_ERR 149ba76149fSAndrea Arcangeli "khugepaged: kthread_run(khugepaged) failed\n"); 150ba76149fSAndrea Arcangeli err = PTR_ERR(khugepaged_thread); 151ba76149fSAndrea Arcangeli khugepaged_thread = NULL; 152ba76149fSAndrea Arcangeli } 153911891afSXiao Guangrong 154911891afSXiao Guangrong if (!list_empty(&khugepaged_scan.mm_head)) 155ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 156f000565aSAndrea Arcangeli 157f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 158911891afSXiao Guangrong } else if (khugepaged_thread) { 159911891afSXiao Guangrong kthread_stop(khugepaged_thread); 160911891afSXiao Guangrong khugepaged_thread = NULL; 161911891afSXiao Guangrong } 162637e3a27SXiao Guangrong 163ba76149fSAndrea Arcangeli return err; 164ba76149fSAndrea Arcangeli } 16571e3aac0SAndrea Arcangeli 16697ae1749SKirill A. Shutemov static atomic_t huge_zero_refcount; 16797ae1749SKirill A. Shutemov static unsigned long huge_zero_pfn __read_mostly; 1684a6c1297SKirill A. Shutemov 1694a6c1297SKirill A. Shutemov static inline bool is_huge_zero_pfn(unsigned long pfn) 1704a6c1297SKirill A. Shutemov { 17197ae1749SKirill A. Shutemov unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn); 17297ae1749SKirill A. Shutemov return zero_pfn && pfn == zero_pfn; 1734a6c1297SKirill A. Shutemov } 1744a6c1297SKirill A. Shutemov 1754a6c1297SKirill A. Shutemov static inline bool is_huge_zero_pmd(pmd_t pmd) 1764a6c1297SKirill A. Shutemov { 1774a6c1297SKirill A. Shutemov return is_huge_zero_pfn(pmd_pfn(pmd)); 1784a6c1297SKirill A. Shutemov } 1794a6c1297SKirill A. Shutemov 18097ae1749SKirill A. Shutemov static unsigned long get_huge_zero_page(void) 18197ae1749SKirill A. Shutemov { 18297ae1749SKirill A. Shutemov struct page *zero_page; 18397ae1749SKirill A. Shutemov retry: 18497ae1749SKirill A. Shutemov if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 18597ae1749SKirill A. Shutemov return ACCESS_ONCE(huge_zero_pfn); 18697ae1749SKirill A. Shutemov 18797ae1749SKirill A. Shutemov zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 18897ae1749SKirill A. Shutemov HPAGE_PMD_ORDER); 189d8a8e1f0SKirill A. Shutemov if (!zero_page) { 190d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 19197ae1749SKirill A. Shutemov return 0; 192d8a8e1f0SKirill A. Shutemov } 193d8a8e1f0SKirill A. Shutemov count_vm_event(THP_ZERO_PAGE_ALLOC); 19497ae1749SKirill A. Shutemov preempt_disable(); 19597ae1749SKirill A. Shutemov if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) { 19697ae1749SKirill A. Shutemov preempt_enable(); 19797ae1749SKirill A. Shutemov __free_page(zero_page); 19897ae1749SKirill A. Shutemov goto retry; 19997ae1749SKirill A. Shutemov } 20097ae1749SKirill A. Shutemov 20197ae1749SKirill A. Shutemov /* We take additional reference here. It will be put back by shrinker */ 20297ae1749SKirill A. Shutemov atomic_set(&huge_zero_refcount, 2); 20397ae1749SKirill A. Shutemov preempt_enable(); 20497ae1749SKirill A. Shutemov return ACCESS_ONCE(huge_zero_pfn); 20597ae1749SKirill A. Shutemov } 20697ae1749SKirill A. Shutemov 20797ae1749SKirill A. Shutemov static void put_huge_zero_page(void) 20897ae1749SKirill A. Shutemov { 20997ae1749SKirill A. Shutemov /* 21097ae1749SKirill A. Shutemov * Counter should never go to zero here. Only shrinker can put 21197ae1749SKirill A. Shutemov * last reference. 21297ae1749SKirill A. Shutemov */ 21397ae1749SKirill A. Shutemov BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 21497ae1749SKirill A. Shutemov } 21597ae1749SKirill A. Shutemov 21697ae1749SKirill A. Shutemov static int shrink_huge_zero_page(struct shrinker *shrink, 21797ae1749SKirill A. Shutemov struct shrink_control *sc) 21897ae1749SKirill A. Shutemov { 21997ae1749SKirill A. Shutemov if (!sc->nr_to_scan) 22097ae1749SKirill A. Shutemov /* we can free zero page only if last reference remains */ 22197ae1749SKirill A. Shutemov return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 22297ae1749SKirill A. Shutemov 22397ae1749SKirill A. Shutemov if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 22497ae1749SKirill A. Shutemov unsigned long zero_pfn = xchg(&huge_zero_pfn, 0); 22597ae1749SKirill A. Shutemov BUG_ON(zero_pfn == 0); 22697ae1749SKirill A. Shutemov __free_page(__pfn_to_page(zero_pfn)); 22797ae1749SKirill A. Shutemov } 22897ae1749SKirill A. Shutemov 22997ae1749SKirill A. Shutemov return 0; 23097ae1749SKirill A. Shutemov } 23197ae1749SKirill A. Shutemov 23297ae1749SKirill A. Shutemov static struct shrinker huge_zero_page_shrinker = { 23397ae1749SKirill A. Shutemov .shrink = shrink_huge_zero_page, 23497ae1749SKirill A. Shutemov .seeks = DEFAULT_SEEKS, 23597ae1749SKirill A. Shutemov }; 23697ae1749SKirill A. Shutemov 23771e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 238ba76149fSAndrea Arcangeli 23971e3aac0SAndrea Arcangeli static ssize_t double_flag_show(struct kobject *kobj, 24071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 24171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 24271e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 24371e3aac0SAndrea Arcangeli { 24471e3aac0SAndrea Arcangeli if (test_bit(enabled, &transparent_hugepage_flags)) { 24571e3aac0SAndrea Arcangeli VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 24671e3aac0SAndrea Arcangeli return sprintf(buf, "[always] madvise never\n"); 24771e3aac0SAndrea Arcangeli } else if (test_bit(req_madv, &transparent_hugepage_flags)) 24871e3aac0SAndrea Arcangeli return sprintf(buf, "always [madvise] never\n"); 24971e3aac0SAndrea Arcangeli else 25071e3aac0SAndrea Arcangeli return sprintf(buf, "always madvise [never]\n"); 25171e3aac0SAndrea Arcangeli } 25271e3aac0SAndrea Arcangeli static ssize_t double_flag_store(struct kobject *kobj, 25371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25471e3aac0SAndrea Arcangeli const char *buf, size_t count, 25571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 25671e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 25771e3aac0SAndrea Arcangeli { 25871e3aac0SAndrea Arcangeli if (!memcmp("always", buf, 25971e3aac0SAndrea Arcangeli min(sizeof("always")-1, count))) { 26071e3aac0SAndrea Arcangeli set_bit(enabled, &transparent_hugepage_flags); 26171e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 26271e3aac0SAndrea Arcangeli } else if (!memcmp("madvise", buf, 26371e3aac0SAndrea Arcangeli min(sizeof("madvise")-1, count))) { 26471e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 26571e3aac0SAndrea Arcangeli set_bit(req_madv, &transparent_hugepage_flags); 26671e3aac0SAndrea Arcangeli } else if (!memcmp("never", buf, 26771e3aac0SAndrea Arcangeli min(sizeof("never")-1, count))) { 26871e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 26971e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 27071e3aac0SAndrea Arcangeli } else 27171e3aac0SAndrea Arcangeli return -EINVAL; 27271e3aac0SAndrea Arcangeli 27371e3aac0SAndrea Arcangeli return count; 27471e3aac0SAndrea Arcangeli } 27571e3aac0SAndrea Arcangeli 27671e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 27771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 27871e3aac0SAndrea Arcangeli { 27971e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 28071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 28171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 28271e3aac0SAndrea Arcangeli } 28371e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 28471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 28571e3aac0SAndrea Arcangeli const char *buf, size_t count) 28671e3aac0SAndrea Arcangeli { 287ba76149fSAndrea Arcangeli ssize_t ret; 288ba76149fSAndrea Arcangeli 289ba76149fSAndrea Arcangeli ret = double_flag_store(kobj, attr, buf, count, 29071e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 29171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 292ba76149fSAndrea Arcangeli 293ba76149fSAndrea Arcangeli if (ret > 0) { 294911891afSXiao Guangrong int err; 295911891afSXiao Guangrong 296911891afSXiao Guangrong mutex_lock(&khugepaged_mutex); 297911891afSXiao Guangrong err = start_khugepaged(); 298911891afSXiao Guangrong mutex_unlock(&khugepaged_mutex); 299911891afSXiao Guangrong 300ba76149fSAndrea Arcangeli if (err) 301ba76149fSAndrea Arcangeli ret = err; 302ba76149fSAndrea Arcangeli } 303ba76149fSAndrea Arcangeli 304ba76149fSAndrea Arcangeli return ret; 30571e3aac0SAndrea Arcangeli } 30671e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 30771e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 30871e3aac0SAndrea Arcangeli 30971e3aac0SAndrea Arcangeli static ssize_t single_flag_show(struct kobject *kobj, 31071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 31171e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 31271e3aac0SAndrea Arcangeli { 313e27e6151SBen Hutchings return sprintf(buf, "%d\n", 314e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 31571e3aac0SAndrea Arcangeli } 316e27e6151SBen Hutchings 31771e3aac0SAndrea Arcangeli static ssize_t single_flag_store(struct kobject *kobj, 31871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 31971e3aac0SAndrea Arcangeli const char *buf, size_t count, 32071e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 32171e3aac0SAndrea Arcangeli { 322e27e6151SBen Hutchings unsigned long value; 323e27e6151SBen Hutchings int ret; 324e27e6151SBen Hutchings 325e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 326e27e6151SBen Hutchings if (ret < 0) 327e27e6151SBen Hutchings return ret; 328e27e6151SBen Hutchings if (value > 1) 32971e3aac0SAndrea Arcangeli return -EINVAL; 33071e3aac0SAndrea Arcangeli 331e27e6151SBen Hutchings if (value) 332e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 333e27e6151SBen Hutchings else 334e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 335e27e6151SBen Hutchings 33671e3aac0SAndrea Arcangeli return count; 33771e3aac0SAndrea Arcangeli } 33871e3aac0SAndrea Arcangeli 33971e3aac0SAndrea Arcangeli /* 34071e3aac0SAndrea Arcangeli * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 34171e3aac0SAndrea Arcangeli * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 34271e3aac0SAndrea Arcangeli * memory just to allocate one more hugepage. 34371e3aac0SAndrea Arcangeli */ 34471e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 34571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 34671e3aac0SAndrea Arcangeli { 34771e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 34871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 34971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 35071e3aac0SAndrea Arcangeli } 35171e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 35271e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 35371e3aac0SAndrea Arcangeli const char *buf, size_t count) 35471e3aac0SAndrea Arcangeli { 35571e3aac0SAndrea Arcangeli return double_flag_store(kobj, attr, buf, count, 35671e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 35771e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 35871e3aac0SAndrea Arcangeli } 35971e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 36071e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 36171e3aac0SAndrea Arcangeli 36279da5407SKirill A. Shutemov static ssize_t use_zero_page_show(struct kobject *kobj, 36379da5407SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 36479da5407SKirill A. Shutemov { 36579da5407SKirill A. Shutemov return single_flag_show(kobj, attr, buf, 36679da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 36779da5407SKirill A. Shutemov } 36879da5407SKirill A. Shutemov static ssize_t use_zero_page_store(struct kobject *kobj, 36979da5407SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 37079da5407SKirill A. Shutemov { 37179da5407SKirill A. Shutemov return single_flag_store(kobj, attr, buf, count, 37279da5407SKirill A. Shutemov TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 37379da5407SKirill A. Shutemov } 37479da5407SKirill A. Shutemov static struct kobj_attribute use_zero_page_attr = 37579da5407SKirill A. Shutemov __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 37671e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 37771e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 37871e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 37971e3aac0SAndrea Arcangeli { 38071e3aac0SAndrea Arcangeli return single_flag_show(kobj, attr, buf, 38171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 38271e3aac0SAndrea Arcangeli } 38371e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 38471e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 38571e3aac0SAndrea Arcangeli const char *buf, size_t count) 38671e3aac0SAndrea Arcangeli { 38771e3aac0SAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 38871e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 38971e3aac0SAndrea Arcangeli } 39071e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 39171e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 39271e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 39371e3aac0SAndrea Arcangeli 39471e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 39571e3aac0SAndrea Arcangeli &enabled_attr.attr, 39671e3aac0SAndrea Arcangeli &defrag_attr.attr, 39779da5407SKirill A. Shutemov &use_zero_page_attr.attr, 39871e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 39971e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 40071e3aac0SAndrea Arcangeli #endif 40171e3aac0SAndrea Arcangeli NULL, 40271e3aac0SAndrea Arcangeli }; 40371e3aac0SAndrea Arcangeli 40471e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 40571e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 406ba76149fSAndrea Arcangeli }; 407ba76149fSAndrea Arcangeli 408ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 409ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 410ba76149fSAndrea Arcangeli char *buf) 411ba76149fSAndrea Arcangeli { 412ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 413ba76149fSAndrea Arcangeli } 414ba76149fSAndrea Arcangeli 415ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 416ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 417ba76149fSAndrea Arcangeli const char *buf, size_t count) 418ba76149fSAndrea Arcangeli { 419ba76149fSAndrea Arcangeli unsigned long msecs; 420ba76149fSAndrea Arcangeli int err; 421ba76149fSAndrea Arcangeli 422ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &msecs); 423ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 424ba76149fSAndrea Arcangeli return -EINVAL; 425ba76149fSAndrea Arcangeli 426ba76149fSAndrea Arcangeli khugepaged_scan_sleep_millisecs = msecs; 427ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 428ba76149fSAndrea Arcangeli 429ba76149fSAndrea Arcangeli return count; 430ba76149fSAndrea Arcangeli } 431ba76149fSAndrea Arcangeli static struct kobj_attribute scan_sleep_millisecs_attr = 432ba76149fSAndrea Arcangeli __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 433ba76149fSAndrea Arcangeli scan_sleep_millisecs_store); 434ba76149fSAndrea Arcangeli 435ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 436ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 437ba76149fSAndrea Arcangeli char *buf) 438ba76149fSAndrea Arcangeli { 439ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 440ba76149fSAndrea Arcangeli } 441ba76149fSAndrea Arcangeli 442ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 443ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 444ba76149fSAndrea Arcangeli const char *buf, size_t count) 445ba76149fSAndrea Arcangeli { 446ba76149fSAndrea Arcangeli unsigned long msecs; 447ba76149fSAndrea Arcangeli int err; 448ba76149fSAndrea Arcangeli 449ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &msecs); 450ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 451ba76149fSAndrea Arcangeli return -EINVAL; 452ba76149fSAndrea Arcangeli 453ba76149fSAndrea Arcangeli khugepaged_alloc_sleep_millisecs = msecs; 454ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 455ba76149fSAndrea Arcangeli 456ba76149fSAndrea Arcangeli return count; 457ba76149fSAndrea Arcangeli } 458ba76149fSAndrea Arcangeli static struct kobj_attribute alloc_sleep_millisecs_attr = 459ba76149fSAndrea Arcangeli __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 460ba76149fSAndrea Arcangeli alloc_sleep_millisecs_store); 461ba76149fSAndrea Arcangeli 462ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_show(struct kobject *kobj, 463ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 464ba76149fSAndrea Arcangeli char *buf) 465ba76149fSAndrea Arcangeli { 466ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 467ba76149fSAndrea Arcangeli } 468ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_store(struct kobject *kobj, 469ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 470ba76149fSAndrea Arcangeli const char *buf, size_t count) 471ba76149fSAndrea Arcangeli { 472ba76149fSAndrea Arcangeli int err; 473ba76149fSAndrea Arcangeli unsigned long pages; 474ba76149fSAndrea Arcangeli 475ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &pages); 476ba76149fSAndrea Arcangeli if (err || !pages || pages > UINT_MAX) 477ba76149fSAndrea Arcangeli return -EINVAL; 478ba76149fSAndrea Arcangeli 479ba76149fSAndrea Arcangeli khugepaged_pages_to_scan = pages; 480ba76149fSAndrea Arcangeli 481ba76149fSAndrea Arcangeli return count; 482ba76149fSAndrea Arcangeli } 483ba76149fSAndrea Arcangeli static struct kobj_attribute pages_to_scan_attr = 484ba76149fSAndrea Arcangeli __ATTR(pages_to_scan, 0644, pages_to_scan_show, 485ba76149fSAndrea Arcangeli pages_to_scan_store); 486ba76149fSAndrea Arcangeli 487ba76149fSAndrea Arcangeli static ssize_t pages_collapsed_show(struct kobject *kobj, 488ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 489ba76149fSAndrea Arcangeli char *buf) 490ba76149fSAndrea Arcangeli { 491ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 492ba76149fSAndrea Arcangeli } 493ba76149fSAndrea Arcangeli static struct kobj_attribute pages_collapsed_attr = 494ba76149fSAndrea Arcangeli __ATTR_RO(pages_collapsed); 495ba76149fSAndrea Arcangeli 496ba76149fSAndrea Arcangeli static ssize_t full_scans_show(struct kobject *kobj, 497ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 498ba76149fSAndrea Arcangeli char *buf) 499ba76149fSAndrea Arcangeli { 500ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_full_scans); 501ba76149fSAndrea Arcangeli } 502ba76149fSAndrea Arcangeli static struct kobj_attribute full_scans_attr = 503ba76149fSAndrea Arcangeli __ATTR_RO(full_scans); 504ba76149fSAndrea Arcangeli 505ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_show(struct kobject *kobj, 506ba76149fSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 507ba76149fSAndrea Arcangeli { 508ba76149fSAndrea Arcangeli return single_flag_show(kobj, attr, buf, 509ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 510ba76149fSAndrea Arcangeli } 511ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_store(struct kobject *kobj, 512ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 513ba76149fSAndrea Arcangeli const char *buf, size_t count) 514ba76149fSAndrea Arcangeli { 515ba76149fSAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 516ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 517ba76149fSAndrea Arcangeli } 518ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_defrag_attr = 519ba76149fSAndrea Arcangeli __ATTR(defrag, 0644, khugepaged_defrag_show, 520ba76149fSAndrea Arcangeli khugepaged_defrag_store); 521ba76149fSAndrea Arcangeli 522ba76149fSAndrea Arcangeli /* 523ba76149fSAndrea Arcangeli * max_ptes_none controls if khugepaged should collapse hugepages over 524ba76149fSAndrea Arcangeli * any unmapped ptes in turn potentially increasing the memory 525ba76149fSAndrea Arcangeli * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 526ba76149fSAndrea Arcangeli * reduce the available free memory in the system as it 527ba76149fSAndrea Arcangeli * runs. Increasing max_ptes_none will instead potentially reduce the 528ba76149fSAndrea Arcangeli * free memory in the system during the khugepaged scan. 529ba76149fSAndrea Arcangeli */ 530ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 531ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 532ba76149fSAndrea Arcangeli char *buf) 533ba76149fSAndrea Arcangeli { 534ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 535ba76149fSAndrea Arcangeli } 536ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 537ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 538ba76149fSAndrea Arcangeli const char *buf, size_t count) 539ba76149fSAndrea Arcangeli { 540ba76149fSAndrea Arcangeli int err; 541ba76149fSAndrea Arcangeli unsigned long max_ptes_none; 542ba76149fSAndrea Arcangeli 543ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &max_ptes_none); 544ba76149fSAndrea Arcangeli if (err || max_ptes_none > HPAGE_PMD_NR-1) 545ba76149fSAndrea Arcangeli return -EINVAL; 546ba76149fSAndrea Arcangeli 547ba76149fSAndrea Arcangeli khugepaged_max_ptes_none = max_ptes_none; 548ba76149fSAndrea Arcangeli 549ba76149fSAndrea Arcangeli return count; 550ba76149fSAndrea Arcangeli } 551ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_max_ptes_none_attr = 552ba76149fSAndrea Arcangeli __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 553ba76149fSAndrea Arcangeli khugepaged_max_ptes_none_store); 554ba76149fSAndrea Arcangeli 555ba76149fSAndrea Arcangeli static struct attribute *khugepaged_attr[] = { 556ba76149fSAndrea Arcangeli &khugepaged_defrag_attr.attr, 557ba76149fSAndrea Arcangeli &khugepaged_max_ptes_none_attr.attr, 558ba76149fSAndrea Arcangeli &pages_to_scan_attr.attr, 559ba76149fSAndrea Arcangeli &pages_collapsed_attr.attr, 560ba76149fSAndrea Arcangeli &full_scans_attr.attr, 561ba76149fSAndrea Arcangeli &scan_sleep_millisecs_attr.attr, 562ba76149fSAndrea Arcangeli &alloc_sleep_millisecs_attr.attr, 563ba76149fSAndrea Arcangeli NULL, 564ba76149fSAndrea Arcangeli }; 565ba76149fSAndrea Arcangeli 566ba76149fSAndrea Arcangeli static struct attribute_group khugepaged_attr_group = { 567ba76149fSAndrea Arcangeli .attrs = khugepaged_attr, 568ba76149fSAndrea Arcangeli .name = "khugepaged", 56971e3aac0SAndrea Arcangeli }; 570569e5590SShaohua Li 571569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 572569e5590SShaohua Li { 573569e5590SShaohua Li int err; 574569e5590SShaohua Li 575569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 576569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 5772c79737aSJeremy Eder printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n"); 578569e5590SShaohua Li return -ENOMEM; 579569e5590SShaohua Li } 580569e5590SShaohua Li 581569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 582569e5590SShaohua Li if (err) { 5832c79737aSJeremy Eder printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 584569e5590SShaohua Li goto delete_obj; 585569e5590SShaohua Li } 586569e5590SShaohua Li 587569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 588569e5590SShaohua Li if (err) { 5892c79737aSJeremy Eder printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 590569e5590SShaohua Li goto remove_hp_group; 591569e5590SShaohua Li } 592569e5590SShaohua Li 593569e5590SShaohua Li return 0; 594569e5590SShaohua Li 595569e5590SShaohua Li remove_hp_group: 596569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 597569e5590SShaohua Li delete_obj: 598569e5590SShaohua Li kobject_put(*hugepage_kobj); 599569e5590SShaohua Li return err; 600569e5590SShaohua Li } 601569e5590SShaohua Li 602569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 603569e5590SShaohua Li { 604569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 605569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 606569e5590SShaohua Li kobject_put(hugepage_kobj); 607569e5590SShaohua Li } 608569e5590SShaohua Li #else 609569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 610569e5590SShaohua Li { 611569e5590SShaohua Li return 0; 612569e5590SShaohua Li } 613569e5590SShaohua Li 614569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 615569e5590SShaohua Li { 616569e5590SShaohua Li } 61771e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 61871e3aac0SAndrea Arcangeli 61971e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 62071e3aac0SAndrea Arcangeli { 62171e3aac0SAndrea Arcangeli int err; 622569e5590SShaohua Li struct kobject *hugepage_kobj; 62371e3aac0SAndrea Arcangeli 6244b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 6254b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 626569e5590SShaohua Li return -EINVAL; 6274b7167b9SAndrea Arcangeli } 6284b7167b9SAndrea Arcangeli 629569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 630569e5590SShaohua Li if (err) 631569e5590SShaohua Li return err; 632ba76149fSAndrea Arcangeli 633ba76149fSAndrea Arcangeli err = khugepaged_slab_init(); 634ba76149fSAndrea Arcangeli if (err) 635ba76149fSAndrea Arcangeli goto out; 636ba76149fSAndrea Arcangeli 637ba76149fSAndrea Arcangeli err = mm_slots_hash_init(); 638ba76149fSAndrea Arcangeli if (err) { 639ba76149fSAndrea Arcangeli khugepaged_slab_free(); 640ba76149fSAndrea Arcangeli goto out; 641ba76149fSAndrea Arcangeli } 642ba76149fSAndrea Arcangeli 64397ae1749SKirill A. Shutemov register_shrinker(&huge_zero_page_shrinker); 64497ae1749SKirill A. Shutemov 64597562cd2SRik van Riel /* 64697562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 64797562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 64897562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 64997562cd2SRik van Riel */ 65097562cd2SRik van Riel if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 65197562cd2SRik van Riel transparent_hugepage_flags = 0; 65297562cd2SRik van Riel 653ba76149fSAndrea Arcangeli start_khugepaged(); 654ba76149fSAndrea Arcangeli 655569e5590SShaohua Li return 0; 656ba76149fSAndrea Arcangeli out: 657569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 658ba76149fSAndrea Arcangeli return err; 65971e3aac0SAndrea Arcangeli } 66071e3aac0SAndrea Arcangeli module_init(hugepage_init) 66171e3aac0SAndrea Arcangeli 66271e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 66371e3aac0SAndrea Arcangeli { 66471e3aac0SAndrea Arcangeli int ret = 0; 66571e3aac0SAndrea Arcangeli if (!str) 66671e3aac0SAndrea Arcangeli goto out; 66771e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 66871e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 66971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 67171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67271e3aac0SAndrea Arcangeli ret = 1; 67371e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 67471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 67571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67671e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 67771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 67871e3aac0SAndrea Arcangeli ret = 1; 67971e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 68071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 68171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 68371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 68471e3aac0SAndrea Arcangeli ret = 1; 68571e3aac0SAndrea Arcangeli } 68671e3aac0SAndrea Arcangeli out: 68771e3aac0SAndrea Arcangeli if (!ret) 68871e3aac0SAndrea Arcangeli printk(KERN_WARNING 68971e3aac0SAndrea Arcangeli "transparent_hugepage= cannot parse, ignored\n"); 69071e3aac0SAndrea Arcangeli return ret; 69171e3aac0SAndrea Arcangeli } 69271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 69371e3aac0SAndrea Arcangeli 694b32967ffSMel Gorman pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 69571e3aac0SAndrea Arcangeli { 69671e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 69771e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 69871e3aac0SAndrea Arcangeli return pmd; 69971e3aac0SAndrea Arcangeli } 70071e3aac0SAndrea Arcangeli 701b3092b3bSBob Liu static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma) 702b3092b3bSBob Liu { 703b3092b3bSBob Liu pmd_t entry; 704b3092b3bSBob Liu entry = mk_pmd(page, vma->vm_page_prot); 705b3092b3bSBob Liu entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 706b3092b3bSBob Liu entry = pmd_mkhuge(entry); 707b3092b3bSBob Liu return entry; 708b3092b3bSBob Liu } 709b3092b3bSBob Liu 71071e3aac0SAndrea Arcangeli static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 71171e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 71271e3aac0SAndrea Arcangeli unsigned long haddr, pmd_t *pmd, 71371e3aac0SAndrea Arcangeli struct page *page) 71471e3aac0SAndrea Arcangeli { 71571e3aac0SAndrea Arcangeli pgtable_t pgtable; 71671e3aac0SAndrea Arcangeli 71771e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page)); 71871e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(mm, haddr); 719edad9d2cSDavid Rientjes if (unlikely(!pgtable)) 72071e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 72171e3aac0SAndrea Arcangeli 72271e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 72371e3aac0SAndrea Arcangeli __SetPageUptodate(page); 72471e3aac0SAndrea Arcangeli 72571e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 72671e3aac0SAndrea Arcangeli if (unlikely(!pmd_none(*pmd))) { 72771e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 728b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(page); 72971e3aac0SAndrea Arcangeli put_page(page); 73071e3aac0SAndrea Arcangeli pte_free(mm, pgtable); 73171e3aac0SAndrea Arcangeli } else { 73271e3aac0SAndrea Arcangeli pmd_t entry; 733b3092b3bSBob Liu entry = mk_huge_pmd(page, vma); 73471e3aac0SAndrea Arcangeli /* 73571e3aac0SAndrea Arcangeli * The spinlocking to take the lru_lock inside 73671e3aac0SAndrea Arcangeli * page_add_new_anon_rmap() acts as a full memory 73771e3aac0SAndrea Arcangeli * barrier to be sure clear_huge_page writes become 73871e3aac0SAndrea Arcangeli * visible after the set_pmd_at() write. 73971e3aac0SAndrea Arcangeli */ 74071e3aac0SAndrea Arcangeli page_add_new_anon_rmap(page, vma, haddr); 74171e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 742e3ebcf64SGerald Schaefer pgtable_trans_huge_deposit(mm, pgtable); 74371e3aac0SAndrea Arcangeli add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 7441c641e84SAndrea Arcangeli mm->nr_ptes++; 74571e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 74671e3aac0SAndrea Arcangeli } 74771e3aac0SAndrea Arcangeli 748aa2e878eSDavid Rientjes return 0; 74971e3aac0SAndrea Arcangeli } 75071e3aac0SAndrea Arcangeli 751cc5d462fSAndi Kleen static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 7520bbbc0b3SAndrea Arcangeli { 753cc5d462fSAndi Kleen return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 7540bbbc0b3SAndrea Arcangeli } 7550bbbc0b3SAndrea Arcangeli 7560bbbc0b3SAndrea Arcangeli static inline struct page *alloc_hugepage_vma(int defrag, 7570bbbc0b3SAndrea Arcangeli struct vm_area_struct *vma, 758cc5d462fSAndi Kleen unsigned long haddr, int nd, 759cc5d462fSAndi Kleen gfp_t extra_gfp) 7600bbbc0b3SAndrea Arcangeli { 761cc5d462fSAndi Kleen return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 7625c4b4be3SAndi Kleen HPAGE_PMD_ORDER, vma, haddr, nd); 7630bbbc0b3SAndrea Arcangeli } 7640bbbc0b3SAndrea Arcangeli 7650bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 76671e3aac0SAndrea Arcangeli static inline struct page *alloc_hugepage(int defrag) 76771e3aac0SAndrea Arcangeli { 768cc5d462fSAndi Kleen return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 76971e3aac0SAndrea Arcangeli HPAGE_PMD_ORDER); 77071e3aac0SAndrea Arcangeli } 7710bbbc0b3SAndrea Arcangeli #endif 77271e3aac0SAndrea Arcangeli 7733ea41e62SKirill A. Shutemov static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 77497ae1749SKirill A. Shutemov struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 77597ae1749SKirill A. Shutemov unsigned long zero_pfn) 776fc9fe822SKirill A. Shutemov { 777fc9fe822SKirill A. Shutemov pmd_t entry; 7783ea41e62SKirill A. Shutemov if (!pmd_none(*pmd)) 7793ea41e62SKirill A. Shutemov return false; 78097ae1749SKirill A. Shutemov entry = pfn_pmd(zero_pfn, vma->vm_page_prot); 781fc9fe822SKirill A. Shutemov entry = pmd_wrprotect(entry); 782fc9fe822SKirill A. Shutemov entry = pmd_mkhuge(entry); 783fc9fe822SKirill A. Shutemov set_pmd_at(mm, haddr, pmd, entry); 784fc9fe822SKirill A. Shutemov pgtable_trans_huge_deposit(mm, pgtable); 785fc9fe822SKirill A. Shutemov mm->nr_ptes++; 7863ea41e62SKirill A. Shutemov return true; 787fc9fe822SKirill A. Shutemov } 788fc9fe822SKirill A. Shutemov 78971e3aac0SAndrea Arcangeli int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 79071e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, 79171e3aac0SAndrea Arcangeli unsigned int flags) 79271e3aac0SAndrea Arcangeli { 79371e3aac0SAndrea Arcangeli struct page *page; 79471e3aac0SAndrea Arcangeli unsigned long haddr = address & HPAGE_PMD_MASK; 79571e3aac0SAndrea Arcangeli pte_t *pte; 79671e3aac0SAndrea Arcangeli 79771e3aac0SAndrea Arcangeli if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { 79871e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 79971e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 800ba76149fSAndrea Arcangeli if (unlikely(khugepaged_enter(vma))) 801ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 80279da5407SKirill A. Shutemov if (!(flags & FAULT_FLAG_WRITE) && 80379da5407SKirill A. Shutemov transparent_hugepage_use_zero_page()) { 80480371957SKirill A. Shutemov pgtable_t pgtable; 80597ae1749SKirill A. Shutemov unsigned long zero_pfn; 8063ea41e62SKirill A. Shutemov bool set; 80780371957SKirill A. Shutemov pgtable = pte_alloc_one(mm, haddr); 80880371957SKirill A. Shutemov if (unlikely(!pgtable)) 80980371957SKirill A. Shutemov return VM_FAULT_OOM; 81097ae1749SKirill A. Shutemov zero_pfn = get_huge_zero_page(); 81197ae1749SKirill A. Shutemov if (unlikely(!zero_pfn)) { 81297ae1749SKirill A. Shutemov pte_free(mm, pgtable); 81397ae1749SKirill A. Shutemov count_vm_event(THP_FAULT_FALLBACK); 81497ae1749SKirill A. Shutemov goto out; 81597ae1749SKirill A. Shutemov } 81680371957SKirill A. Shutemov spin_lock(&mm->page_table_lock); 8173ea41e62SKirill A. Shutemov set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 81897ae1749SKirill A. Shutemov zero_pfn); 81980371957SKirill A. Shutemov spin_unlock(&mm->page_table_lock); 8203ea41e62SKirill A. Shutemov if (!set) { 8213ea41e62SKirill A. Shutemov pte_free(mm, pgtable); 8223ea41e62SKirill A. Shutemov put_huge_zero_page(); 8233ea41e62SKirill A. Shutemov } 82480371957SKirill A. Shutemov return 0; 82580371957SKirill A. Shutemov } 8260bbbc0b3SAndrea Arcangeli page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 827cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 82881ab4201SAndi Kleen if (unlikely(!page)) { 82981ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 83071e3aac0SAndrea Arcangeli goto out; 83181ab4201SAndi Kleen } 83281ab4201SAndi Kleen count_vm_event(THP_FAULT_ALLOC); 833b9bbfbe3SAndrea Arcangeli if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 834b9bbfbe3SAndrea Arcangeli put_page(page); 835b9bbfbe3SAndrea Arcangeli goto out; 836b9bbfbe3SAndrea Arcangeli } 837edad9d2cSDavid Rientjes if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, 838edad9d2cSDavid Rientjes page))) { 839edad9d2cSDavid Rientjes mem_cgroup_uncharge_page(page); 840edad9d2cSDavid Rientjes put_page(page); 841edad9d2cSDavid Rientjes goto out; 842edad9d2cSDavid Rientjes } 84371e3aac0SAndrea Arcangeli 844edad9d2cSDavid Rientjes return 0; 84571e3aac0SAndrea Arcangeli } 84671e3aac0SAndrea Arcangeli out: 84771e3aac0SAndrea Arcangeli /* 84871e3aac0SAndrea Arcangeli * Use __pte_alloc instead of pte_alloc_map, because we can't 84971e3aac0SAndrea Arcangeli * run pte_offset_map on the pmd, if an huge pmd could 85071e3aac0SAndrea Arcangeli * materialize from under us from a different thread. 85171e3aac0SAndrea Arcangeli */ 8524fd01770SMel Gorman if (unlikely(pmd_none(*pmd)) && 8534fd01770SMel Gorman unlikely(__pte_alloc(mm, vma, pmd, address))) 85471e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 85571e3aac0SAndrea Arcangeli /* if an huge pmd materialized from under us just retry later */ 85671e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_huge(*pmd))) 85771e3aac0SAndrea Arcangeli return 0; 85871e3aac0SAndrea Arcangeli /* 85971e3aac0SAndrea Arcangeli * A regular pmd is established and it can't morph into a huge pmd 86071e3aac0SAndrea Arcangeli * from under us anymore at this point because we hold the mmap_sem 86171e3aac0SAndrea Arcangeli * read mode and khugepaged takes it in write mode. So now it's 86271e3aac0SAndrea Arcangeli * safe to run pte_offset_map(). 86371e3aac0SAndrea Arcangeli */ 86471e3aac0SAndrea Arcangeli pte = pte_offset_map(pmd, address); 86571e3aac0SAndrea Arcangeli return handle_pte_fault(mm, vma, address, pte, pmd, flags); 86671e3aac0SAndrea Arcangeli } 86771e3aac0SAndrea Arcangeli 86871e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 86971e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 87071e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 87171e3aac0SAndrea Arcangeli { 87271e3aac0SAndrea Arcangeli struct page *src_page; 87371e3aac0SAndrea Arcangeli pmd_t pmd; 87471e3aac0SAndrea Arcangeli pgtable_t pgtable; 87571e3aac0SAndrea Arcangeli int ret; 87671e3aac0SAndrea Arcangeli 87771e3aac0SAndrea Arcangeli ret = -ENOMEM; 87871e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 87971e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 88071e3aac0SAndrea Arcangeli goto out; 88171e3aac0SAndrea Arcangeli 88271e3aac0SAndrea Arcangeli spin_lock(&dst_mm->page_table_lock); 88371e3aac0SAndrea Arcangeli spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); 88471e3aac0SAndrea Arcangeli 88571e3aac0SAndrea Arcangeli ret = -EAGAIN; 88671e3aac0SAndrea Arcangeli pmd = *src_pmd; 88771e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(pmd))) { 88871e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 88971e3aac0SAndrea Arcangeli goto out_unlock; 89071e3aac0SAndrea Arcangeli } 891fc9fe822SKirill A. Shutemov /* 892fc9fe822SKirill A. Shutemov * mm->page_table_lock is enough to be sure that huge zero pmd is not 893fc9fe822SKirill A. Shutemov * under splitting since we don't split the page itself, only pmd to 894fc9fe822SKirill A. Shutemov * a page table. 895fc9fe822SKirill A. Shutemov */ 896fc9fe822SKirill A. Shutemov if (is_huge_zero_pmd(pmd)) { 89797ae1749SKirill A. Shutemov unsigned long zero_pfn; 8983ea41e62SKirill A. Shutemov bool set; 89997ae1749SKirill A. Shutemov /* 90097ae1749SKirill A. Shutemov * get_huge_zero_page() will never allocate a new page here, 90197ae1749SKirill A. Shutemov * since we already have a zero page to copy. It just takes a 90297ae1749SKirill A. Shutemov * reference. 90397ae1749SKirill A. Shutemov */ 90497ae1749SKirill A. Shutemov zero_pfn = get_huge_zero_page(); 9053ea41e62SKirill A. Shutemov set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 90697ae1749SKirill A. Shutemov zero_pfn); 9073ea41e62SKirill A. Shutemov BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ 908fc9fe822SKirill A. Shutemov ret = 0; 909fc9fe822SKirill A. Shutemov goto out_unlock; 910fc9fe822SKirill A. Shutemov } 91171e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_splitting(pmd))) { 91271e3aac0SAndrea Arcangeli /* split huge page running from under us */ 91371e3aac0SAndrea Arcangeli spin_unlock(&src_mm->page_table_lock); 91471e3aac0SAndrea Arcangeli spin_unlock(&dst_mm->page_table_lock); 91571e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 91671e3aac0SAndrea Arcangeli 91771e3aac0SAndrea Arcangeli wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 91871e3aac0SAndrea Arcangeli goto out; 91971e3aac0SAndrea Arcangeli } 92071e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 92171e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(src_page)); 92271e3aac0SAndrea Arcangeli get_page(src_page); 92371e3aac0SAndrea Arcangeli page_dup_rmap(src_page); 92471e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 92571e3aac0SAndrea Arcangeli 92671e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 92771e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 92871e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 929e3ebcf64SGerald Schaefer pgtable_trans_huge_deposit(dst_mm, pgtable); 9301c641e84SAndrea Arcangeli dst_mm->nr_ptes++; 93171e3aac0SAndrea Arcangeli 93271e3aac0SAndrea Arcangeli ret = 0; 93371e3aac0SAndrea Arcangeli out_unlock: 93471e3aac0SAndrea Arcangeli spin_unlock(&src_mm->page_table_lock); 93571e3aac0SAndrea Arcangeli spin_unlock(&dst_mm->page_table_lock); 93671e3aac0SAndrea Arcangeli out: 93771e3aac0SAndrea Arcangeli return ret; 93871e3aac0SAndrea Arcangeli } 93971e3aac0SAndrea Arcangeli 940a1dd450bSWill Deacon void huge_pmd_set_accessed(struct mm_struct *mm, 941a1dd450bSWill Deacon struct vm_area_struct *vma, 942a1dd450bSWill Deacon unsigned long address, 943a1dd450bSWill Deacon pmd_t *pmd, pmd_t orig_pmd, 944a1dd450bSWill Deacon int dirty) 945a1dd450bSWill Deacon { 946a1dd450bSWill Deacon pmd_t entry; 947a1dd450bSWill Deacon unsigned long haddr; 948a1dd450bSWill Deacon 949a1dd450bSWill Deacon spin_lock(&mm->page_table_lock); 950a1dd450bSWill Deacon if (unlikely(!pmd_same(*pmd, orig_pmd))) 951a1dd450bSWill Deacon goto unlock; 952a1dd450bSWill Deacon 953a1dd450bSWill Deacon entry = pmd_mkyoung(orig_pmd); 954a1dd450bSWill Deacon haddr = address & HPAGE_PMD_MASK; 955a1dd450bSWill Deacon if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 956a1dd450bSWill Deacon update_mmu_cache_pmd(vma, address, pmd); 957a1dd450bSWill Deacon 958a1dd450bSWill Deacon unlock: 959a1dd450bSWill Deacon spin_unlock(&mm->page_table_lock); 960a1dd450bSWill Deacon } 961a1dd450bSWill Deacon 96293b4796dSKirill A. Shutemov static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, 96393b4796dSKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, 9643ea41e62SKirill A. Shutemov pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) 96593b4796dSKirill A. Shutemov { 96693b4796dSKirill A. Shutemov pgtable_t pgtable; 96793b4796dSKirill A. Shutemov pmd_t _pmd; 96893b4796dSKirill A. Shutemov struct page *page; 96993b4796dSKirill A. Shutemov int i, ret = 0; 97093b4796dSKirill A. Shutemov unsigned long mmun_start; /* For mmu_notifiers */ 97193b4796dSKirill A. Shutemov unsigned long mmun_end; /* For mmu_notifiers */ 97293b4796dSKirill A. Shutemov 97393b4796dSKirill A. Shutemov page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 97493b4796dSKirill A. Shutemov if (!page) { 97593b4796dSKirill A. Shutemov ret |= VM_FAULT_OOM; 97693b4796dSKirill A. Shutemov goto out; 97793b4796dSKirill A. Shutemov } 97893b4796dSKirill A. Shutemov 97993b4796dSKirill A. Shutemov if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { 98093b4796dSKirill A. Shutemov put_page(page); 98193b4796dSKirill A. Shutemov ret |= VM_FAULT_OOM; 98293b4796dSKirill A. Shutemov goto out; 98393b4796dSKirill A. Shutemov } 98493b4796dSKirill A. Shutemov 98593b4796dSKirill A. Shutemov clear_user_highpage(page, address); 98693b4796dSKirill A. Shutemov __SetPageUptodate(page); 98793b4796dSKirill A. Shutemov 98893b4796dSKirill A. Shutemov mmun_start = haddr; 98993b4796dSKirill A. Shutemov mmun_end = haddr + HPAGE_PMD_SIZE; 99093b4796dSKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 99193b4796dSKirill A. Shutemov 99293b4796dSKirill A. Shutemov spin_lock(&mm->page_table_lock); 9933ea41e62SKirill A. Shutemov if (unlikely(!pmd_same(*pmd, orig_pmd))) 9943ea41e62SKirill A. Shutemov goto out_free_page; 9953ea41e62SKirill A. Shutemov 99693b4796dSKirill A. Shutemov pmdp_clear_flush(vma, haddr, pmd); 99793b4796dSKirill A. Shutemov /* leave pmd empty until pte is filled */ 99893b4796dSKirill A. Shutemov 99993b4796dSKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm); 100093b4796dSKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 100193b4796dSKirill A. Shutemov 100293b4796dSKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 100393b4796dSKirill A. Shutemov pte_t *pte, entry; 100493b4796dSKirill A. Shutemov if (haddr == (address & PAGE_MASK)) { 100593b4796dSKirill A. Shutemov entry = mk_pte(page, vma->vm_page_prot); 100693b4796dSKirill A. Shutemov entry = maybe_mkwrite(pte_mkdirty(entry), vma); 100793b4796dSKirill A. Shutemov page_add_new_anon_rmap(page, vma, haddr); 100893b4796dSKirill A. Shutemov } else { 100993b4796dSKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 101093b4796dSKirill A. Shutemov entry = pte_mkspecial(entry); 101193b4796dSKirill A. Shutemov } 101293b4796dSKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 101393b4796dSKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 101493b4796dSKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 101593b4796dSKirill A. Shutemov pte_unmap(pte); 101693b4796dSKirill A. Shutemov } 101793b4796dSKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 101893b4796dSKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 101993b4796dSKirill A. Shutemov spin_unlock(&mm->page_table_lock); 102097ae1749SKirill A. Shutemov put_huge_zero_page(); 102193b4796dSKirill A. Shutemov inc_mm_counter(mm, MM_ANONPAGES); 102293b4796dSKirill A. Shutemov 102393b4796dSKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 102493b4796dSKirill A. Shutemov 102593b4796dSKirill A. Shutemov ret |= VM_FAULT_WRITE; 102693b4796dSKirill A. Shutemov out: 102793b4796dSKirill A. Shutemov return ret; 10283ea41e62SKirill A. Shutemov out_free_page: 10293ea41e62SKirill A. Shutemov spin_unlock(&mm->page_table_lock); 10303ea41e62SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 10313ea41e62SKirill A. Shutemov mem_cgroup_uncharge_page(page); 10323ea41e62SKirill A. Shutemov put_page(page); 10333ea41e62SKirill A. Shutemov goto out; 103493b4796dSKirill A. Shutemov } 103593b4796dSKirill A. Shutemov 103671e3aac0SAndrea Arcangeli static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 103771e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 103871e3aac0SAndrea Arcangeli unsigned long address, 103971e3aac0SAndrea Arcangeli pmd_t *pmd, pmd_t orig_pmd, 104071e3aac0SAndrea Arcangeli struct page *page, 104171e3aac0SAndrea Arcangeli unsigned long haddr) 104271e3aac0SAndrea Arcangeli { 104371e3aac0SAndrea Arcangeli pgtable_t pgtable; 104471e3aac0SAndrea Arcangeli pmd_t _pmd; 104571e3aac0SAndrea Arcangeli int ret = 0, i; 104671e3aac0SAndrea Arcangeli struct page **pages; 10472ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 10482ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 104971e3aac0SAndrea Arcangeli 105071e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 105171e3aac0SAndrea Arcangeli GFP_KERNEL); 105271e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 105371e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 105471e3aac0SAndrea Arcangeli goto out; 105571e3aac0SAndrea Arcangeli } 105671e3aac0SAndrea Arcangeli 105771e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 1058cc5d462fSAndi Kleen pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 1059cc5d462fSAndi Kleen __GFP_OTHER_NODE, 106019ee151eSAndi Kleen vma, address, page_to_nid(page)); 1061b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 1062b9bbfbe3SAndrea Arcangeli mem_cgroup_newpage_charge(pages[i], mm, 1063b9bbfbe3SAndrea Arcangeli GFP_KERNEL))) { 1064b9bbfbe3SAndrea Arcangeli if (pages[i]) 106571e3aac0SAndrea Arcangeli put_page(pages[i]); 1066b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_start(); 1067b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 1068b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(pages[i]); 1069b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 1070b9bbfbe3SAndrea Arcangeli } 1071b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_end(); 107271e3aac0SAndrea Arcangeli kfree(pages); 107371e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 107471e3aac0SAndrea Arcangeli goto out; 107571e3aac0SAndrea Arcangeli } 107671e3aac0SAndrea Arcangeli } 107771e3aac0SAndrea Arcangeli 107871e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 107971e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 10800089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 108171e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 108271e3aac0SAndrea Arcangeli cond_resched(); 108371e3aac0SAndrea Arcangeli } 108471e3aac0SAndrea Arcangeli 10852ec74c3eSSagi Grimberg mmun_start = haddr; 10862ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 10872ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 10882ec74c3eSSagi Grimberg 108971e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 109071e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 109171e3aac0SAndrea Arcangeli goto out_free_pages; 109271e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 109371e3aac0SAndrea Arcangeli 10942ec74c3eSSagi Grimberg pmdp_clear_flush(vma, haddr, pmd); 109571e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 109671e3aac0SAndrea Arcangeli 1097e3ebcf64SGerald Schaefer pgtable = pgtable_trans_huge_withdraw(mm); 109871e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 109971e3aac0SAndrea Arcangeli 110071e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 110171e3aac0SAndrea Arcangeli pte_t *pte, entry; 110271e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 110371e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 110471e3aac0SAndrea Arcangeli page_add_new_anon_rmap(pages[i], vma, haddr); 110571e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 110671e3aac0SAndrea Arcangeli VM_BUG_ON(!pte_none(*pte)); 110771e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 110871e3aac0SAndrea Arcangeli pte_unmap(pte); 110971e3aac0SAndrea Arcangeli } 111071e3aac0SAndrea Arcangeli kfree(pages); 111171e3aac0SAndrea Arcangeli 111271e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 111371e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 111471e3aac0SAndrea Arcangeli page_remove_rmap(page); 111571e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 111671e3aac0SAndrea Arcangeli 11172ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 11182ec74c3eSSagi Grimberg 111971e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 112071e3aac0SAndrea Arcangeli put_page(page); 112171e3aac0SAndrea Arcangeli 112271e3aac0SAndrea Arcangeli out: 112371e3aac0SAndrea Arcangeli return ret; 112471e3aac0SAndrea Arcangeli 112571e3aac0SAndrea Arcangeli out_free_pages: 112671e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 11272ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1128b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_start(); 1129b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 1130b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(pages[i]); 113171e3aac0SAndrea Arcangeli put_page(pages[i]); 1132b9bbfbe3SAndrea Arcangeli } 1133b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_end(); 113471e3aac0SAndrea Arcangeli kfree(pages); 113571e3aac0SAndrea Arcangeli goto out; 113671e3aac0SAndrea Arcangeli } 113771e3aac0SAndrea Arcangeli 113871e3aac0SAndrea Arcangeli int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 113971e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 114071e3aac0SAndrea Arcangeli { 114171e3aac0SAndrea Arcangeli int ret = 0; 114293b4796dSKirill A. Shutemov struct page *page = NULL, *new_page; 114371e3aac0SAndrea Arcangeli unsigned long haddr; 11442ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 11452ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 114671e3aac0SAndrea Arcangeli 114771e3aac0SAndrea Arcangeli VM_BUG_ON(!vma->anon_vma); 114893b4796dSKirill A. Shutemov haddr = address & HPAGE_PMD_MASK; 114993b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 115093b4796dSKirill A. Shutemov goto alloc; 115171e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 115271e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 115371e3aac0SAndrea Arcangeli goto out_unlock; 115471e3aac0SAndrea Arcangeli 115571e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 115671e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 115771e3aac0SAndrea Arcangeli if (page_mapcount(page) == 1) { 115871e3aac0SAndrea Arcangeli pmd_t entry; 115971e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 116071e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 116171e3aac0SAndrea Arcangeli if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1162b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 116371e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 116471e3aac0SAndrea Arcangeli goto out_unlock; 116571e3aac0SAndrea Arcangeli } 116671e3aac0SAndrea Arcangeli get_page(page); 116771e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 116893b4796dSKirill A. Shutemov alloc: 116971e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 117071e3aac0SAndrea Arcangeli !transparent_hugepage_debug_cow()) 11710bbbc0b3SAndrea Arcangeli new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 1172cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 117371e3aac0SAndrea Arcangeli else 117471e3aac0SAndrea Arcangeli new_page = NULL; 117571e3aac0SAndrea Arcangeli 117671e3aac0SAndrea Arcangeli if (unlikely(!new_page)) { 117781ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 117893b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) { 117993b4796dSKirill A. Shutemov ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 11803ea41e62SKirill A. Shutemov address, pmd, orig_pmd, haddr); 118193b4796dSKirill A. Shutemov } else { 118271e3aac0SAndrea Arcangeli ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 118371e3aac0SAndrea Arcangeli pmd, orig_pmd, page, haddr); 11841f1d06c3SDavid Rientjes if (ret & VM_FAULT_OOM) 11851f1d06c3SDavid Rientjes split_huge_page(page); 118671e3aac0SAndrea Arcangeli put_page(page); 118793b4796dSKirill A. Shutemov } 118871e3aac0SAndrea Arcangeli goto out; 118971e3aac0SAndrea Arcangeli } 119081ab4201SAndi Kleen count_vm_event(THP_FAULT_ALLOC); 119171e3aac0SAndrea Arcangeli 1192b9bbfbe3SAndrea Arcangeli if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1193b9bbfbe3SAndrea Arcangeli put_page(new_page); 119493b4796dSKirill A. Shutemov if (page) { 11951f1d06c3SDavid Rientjes split_huge_page(page); 1196b9bbfbe3SAndrea Arcangeli put_page(page); 119793b4796dSKirill A. Shutemov } 1198b9bbfbe3SAndrea Arcangeli ret |= VM_FAULT_OOM; 1199b9bbfbe3SAndrea Arcangeli goto out; 1200b9bbfbe3SAndrea Arcangeli } 1201b9bbfbe3SAndrea Arcangeli 120293b4796dSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) 120393b4796dSKirill A. Shutemov clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 120493b4796dSKirill A. Shutemov else 120571e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 120671e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 120771e3aac0SAndrea Arcangeli 12082ec74c3eSSagi Grimberg mmun_start = haddr; 12092ec74c3eSSagi Grimberg mmun_end = haddr + HPAGE_PMD_SIZE; 12102ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 12112ec74c3eSSagi Grimberg 121271e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 121393b4796dSKirill A. Shutemov if (page) 121471e3aac0SAndrea Arcangeli put_page(page); 1215b9bbfbe3SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) { 12166f60b69dSDavid Rientjes spin_unlock(&mm->page_table_lock); 1217b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(new_page); 121871e3aac0SAndrea Arcangeli put_page(new_page); 12192ec74c3eSSagi Grimberg goto out_mn; 1220b9bbfbe3SAndrea Arcangeli } else { 122171e3aac0SAndrea Arcangeli pmd_t entry; 1222b3092b3bSBob Liu entry = mk_huge_pmd(new_page, vma); 12232ec74c3eSSagi Grimberg pmdp_clear_flush(vma, haddr, pmd); 122471e3aac0SAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, haddr); 122571e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 1226b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 122797ae1749SKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) { 122893b4796dSKirill A. Shutemov add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 122997ae1749SKirill A. Shutemov put_huge_zero_page(); 123097ae1749SKirill A. Shutemov } else { 123193b4796dSKirill A. Shutemov VM_BUG_ON(!PageHead(page)); 123271e3aac0SAndrea Arcangeli page_remove_rmap(page); 123371e3aac0SAndrea Arcangeli put_page(page); 123493b4796dSKirill A. Shutemov } 123571e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 123671e3aac0SAndrea Arcangeli } 12372ec74c3eSSagi Grimberg spin_unlock(&mm->page_table_lock); 12382ec74c3eSSagi Grimberg out_mn: 12392ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 12402ec74c3eSSagi Grimberg out: 12412ec74c3eSSagi Grimberg return ret; 124271e3aac0SAndrea Arcangeli out_unlock: 124371e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 124471e3aac0SAndrea Arcangeli return ret; 124571e3aac0SAndrea Arcangeli } 124671e3aac0SAndrea Arcangeli 1247b676b293SDavid Rientjes struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 124871e3aac0SAndrea Arcangeli unsigned long addr, 124971e3aac0SAndrea Arcangeli pmd_t *pmd, 125071e3aac0SAndrea Arcangeli unsigned int flags) 125171e3aac0SAndrea Arcangeli { 1252b676b293SDavid Rientjes struct mm_struct *mm = vma->vm_mm; 125371e3aac0SAndrea Arcangeli struct page *page = NULL; 125471e3aac0SAndrea Arcangeli 125571e3aac0SAndrea Arcangeli assert_spin_locked(&mm->page_table_lock); 125671e3aac0SAndrea Arcangeli 125771e3aac0SAndrea Arcangeli if (flags & FOLL_WRITE && !pmd_write(*pmd)) 125871e3aac0SAndrea Arcangeli goto out; 125971e3aac0SAndrea Arcangeli 1260*85facf25SKirill A. Shutemov /* Avoid dumping huge zero page */ 1261*85facf25SKirill A. Shutemov if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1262*85facf25SKirill A. Shutemov return ERR_PTR(-EFAULT); 1263*85facf25SKirill A. Shutemov 126471e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 126571e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 126671e3aac0SAndrea Arcangeli if (flags & FOLL_TOUCH) { 126771e3aac0SAndrea Arcangeli pmd_t _pmd; 126871e3aac0SAndrea Arcangeli /* 126971e3aac0SAndrea Arcangeli * We should set the dirty bit only for FOLL_WRITE but 127071e3aac0SAndrea Arcangeli * for now the dirty bit in the pmd is meaningless. 127171e3aac0SAndrea Arcangeli * And if the dirty bit will become meaningful and 127271e3aac0SAndrea Arcangeli * we'll only set it with FOLL_WRITE, an atomic 127371e3aac0SAndrea Arcangeli * set_bit will be required on the pmd to set the 127471e3aac0SAndrea Arcangeli * young bit, instead of the current set_pmd_at. 127571e3aac0SAndrea Arcangeli */ 127671e3aac0SAndrea Arcangeli _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 127771e3aac0SAndrea Arcangeli set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 127871e3aac0SAndrea Arcangeli } 1279b676b293SDavid Rientjes if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1280b676b293SDavid Rientjes if (page->mapping && trylock_page(page)) { 1281b676b293SDavid Rientjes lru_add_drain(); 1282b676b293SDavid Rientjes if (page->mapping) 1283b676b293SDavid Rientjes mlock_vma_page(page); 1284b676b293SDavid Rientjes unlock_page(page); 1285b676b293SDavid Rientjes } 1286b676b293SDavid Rientjes } 128771e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 128871e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page)); 128971e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 129070b50f94SAndrea Arcangeli get_page_foll(page); 129171e3aac0SAndrea Arcangeli 129271e3aac0SAndrea Arcangeli out: 129371e3aac0SAndrea Arcangeli return page; 129471e3aac0SAndrea Arcangeli } 129571e3aac0SAndrea Arcangeli 1296d10e63f2SMel Gorman /* NUMA hinting page fault entry point for trans huge pmds */ 12974daae3b4SMel Gorman int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 12984daae3b4SMel Gorman unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1299d10e63f2SMel Gorman { 1300b32967ffSMel Gorman struct page *page; 1301d10e63f2SMel Gorman unsigned long haddr = addr & HPAGE_PMD_MASK; 13024daae3b4SMel Gorman int target_nid; 130303c5a6e1SMel Gorman int current_nid = -1; 1304b32967ffSMel Gorman bool migrated; 1305b32967ffSMel Gorman bool page_locked = false; 1306d10e63f2SMel Gorman 1307d10e63f2SMel Gorman spin_lock(&mm->page_table_lock); 1308d10e63f2SMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) 1309d10e63f2SMel Gorman goto out_unlock; 1310d10e63f2SMel Gorman 1311d10e63f2SMel Gorman page = pmd_page(pmd); 13124daae3b4SMel Gorman get_page(page); 131303c5a6e1SMel Gorman current_nid = page_to_nid(page); 131403c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS); 131503c5a6e1SMel Gorman if (current_nid == numa_node_id()) 131603c5a6e1SMel Gorman count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 13174daae3b4SMel Gorman 13184daae3b4SMel Gorman target_nid = mpol_misplaced(page, vma, haddr); 1319b32967ffSMel Gorman if (target_nid == -1) { 13204daae3b4SMel Gorman put_page(page); 1321b32967ffSMel Gorman goto clear_pmdnuma; 1322b32967ffSMel Gorman } 1323cbee9f88SPeter Zijlstra 1324b32967ffSMel Gorman /* Acquire the page lock to serialise THP migrations */ 1325b32967ffSMel Gorman spin_unlock(&mm->page_table_lock); 1326b32967ffSMel Gorman lock_page(page); 1327b32967ffSMel Gorman page_locked = true; 1328b32967ffSMel Gorman 1329b32967ffSMel Gorman /* Confirm the PTE did not while locked */ 1330b32967ffSMel Gorman spin_lock(&mm->page_table_lock); 1331b32967ffSMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) { 1332b32967ffSMel Gorman unlock_page(page); 1333b32967ffSMel Gorman put_page(page); 1334b32967ffSMel Gorman goto out_unlock; 1335b32967ffSMel Gorman } 1336b32967ffSMel Gorman spin_unlock(&mm->page_table_lock); 1337b32967ffSMel Gorman 1338b32967ffSMel Gorman /* Migrate the THP to the requested node */ 1339b32967ffSMel Gorman migrated = migrate_misplaced_transhuge_page(mm, vma, 1340b32967ffSMel Gorman pmdp, pmd, addr, 1341b32967ffSMel Gorman page, target_nid); 1342b32967ffSMel Gorman if (migrated) 1343b32967ffSMel Gorman current_nid = target_nid; 1344b32967ffSMel Gorman else { 1345b32967ffSMel Gorman spin_lock(&mm->page_table_lock); 1346b32967ffSMel Gorman if (unlikely(!pmd_same(pmd, *pmdp))) { 1347b32967ffSMel Gorman unlock_page(page); 1348b32967ffSMel Gorman goto out_unlock; 1349b32967ffSMel Gorman } 1350b32967ffSMel Gorman goto clear_pmdnuma; 1351b32967ffSMel Gorman } 1352b32967ffSMel Gorman 1353b32967ffSMel Gorman task_numa_fault(current_nid, HPAGE_PMD_NR, migrated); 13544daae3b4SMel Gorman return 0; 13554daae3b4SMel Gorman 13564daae3b4SMel Gorman clear_pmdnuma: 1357d10e63f2SMel Gorman pmd = pmd_mknonnuma(pmd); 1358d10e63f2SMel Gorman set_pmd_at(mm, haddr, pmdp, pmd); 1359d10e63f2SMel Gorman VM_BUG_ON(pmd_numa(*pmdp)); 1360d10e63f2SMel Gorman update_mmu_cache_pmd(vma, addr, pmdp); 1361b32967ffSMel Gorman if (page_locked) 1362b32967ffSMel Gorman unlock_page(page); 1363d10e63f2SMel Gorman 1364d10e63f2SMel Gorman out_unlock: 1365d10e63f2SMel Gorman spin_unlock(&mm->page_table_lock); 1366b32967ffSMel Gorman if (current_nid != -1) 1367b32967ffSMel Gorman task_numa_fault(current_nid, HPAGE_PMD_NR, migrated); 1368d10e63f2SMel Gorman return 0; 1369d10e63f2SMel Gorman } 1370d10e63f2SMel Gorman 137171e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1372f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 137371e3aac0SAndrea Arcangeli { 137471e3aac0SAndrea Arcangeli int ret = 0; 137571e3aac0SAndrea Arcangeli 1376025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 137771e3aac0SAndrea Arcangeli struct page *page; 137871e3aac0SAndrea Arcangeli pgtable_t pgtable; 1379f5c8ad47SDavid Miller pmd_t orig_pmd; 1380e3ebcf64SGerald Schaefer pgtable = pgtable_trans_huge_withdraw(tlb->mm); 1381f5c8ad47SDavid Miller orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1382f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1383479f0abbSKirill A. Shutemov if (is_huge_zero_pmd(orig_pmd)) { 1384479f0abbSKirill A. Shutemov tlb->mm->nr_ptes--; 1385479f0abbSKirill A. Shutemov spin_unlock(&tlb->mm->page_table_lock); 138697ae1749SKirill A. Shutemov put_huge_zero_page(); 1387479f0abbSKirill A. Shutemov } else { 1388479f0abbSKirill A. Shutemov page = pmd_page(orig_pmd); 138971e3aac0SAndrea Arcangeli page_remove_rmap(page); 139071e3aac0SAndrea Arcangeli VM_BUG_ON(page_mapcount(page) < 0); 139171e3aac0SAndrea Arcangeli add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 139271e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 13931c641e84SAndrea Arcangeli tlb->mm->nr_ptes--; 139471e3aac0SAndrea Arcangeli spin_unlock(&tlb->mm->page_table_lock); 139571e3aac0SAndrea Arcangeli tlb_remove_page(tlb, page); 1396479f0abbSKirill A. Shutemov } 139771e3aac0SAndrea Arcangeli pte_free(tlb->mm, pgtable); 139871e3aac0SAndrea Arcangeli ret = 1; 139971e3aac0SAndrea Arcangeli } 140071e3aac0SAndrea Arcangeli return ret; 140171e3aac0SAndrea Arcangeli } 140271e3aac0SAndrea Arcangeli 14030ca1634dSJohannes Weiner int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 14040ca1634dSJohannes Weiner unsigned long addr, unsigned long end, 14050ca1634dSJohannes Weiner unsigned char *vec) 14060ca1634dSJohannes Weiner { 14070ca1634dSJohannes Weiner int ret = 0; 14080ca1634dSJohannes Weiner 1409025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 14100ca1634dSJohannes Weiner /* 14110ca1634dSJohannes Weiner * All logical pages in the range are present 14120ca1634dSJohannes Weiner * if backed by a huge page. 14130ca1634dSJohannes Weiner */ 14140ca1634dSJohannes Weiner spin_unlock(&vma->vm_mm->page_table_lock); 1415025c5b24SNaoya Horiguchi memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1416025c5b24SNaoya Horiguchi ret = 1; 1417025c5b24SNaoya Horiguchi } 14180ca1634dSJohannes Weiner 14190ca1634dSJohannes Weiner return ret; 14200ca1634dSJohannes Weiner } 14210ca1634dSJohannes Weiner 142237a1c49aSAndrea Arcangeli int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 142337a1c49aSAndrea Arcangeli unsigned long old_addr, 142437a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 142537a1c49aSAndrea Arcangeli pmd_t *old_pmd, pmd_t *new_pmd) 142637a1c49aSAndrea Arcangeli { 142737a1c49aSAndrea Arcangeli int ret = 0; 142837a1c49aSAndrea Arcangeli pmd_t pmd; 142937a1c49aSAndrea Arcangeli 143037a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 143137a1c49aSAndrea Arcangeli 143237a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 143337a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 143437a1c49aSAndrea Arcangeli old_end - old_addr < HPAGE_PMD_SIZE || 143537a1c49aSAndrea Arcangeli (new_vma->vm_flags & VM_NOHUGEPAGE)) 143637a1c49aSAndrea Arcangeli goto out; 143737a1c49aSAndrea Arcangeli 143837a1c49aSAndrea Arcangeli /* 143937a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 144037a1c49aSAndrea Arcangeli * should have release it. 144137a1c49aSAndrea Arcangeli */ 144237a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 144337a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 144437a1c49aSAndrea Arcangeli goto out; 144537a1c49aSAndrea Arcangeli } 144637a1c49aSAndrea Arcangeli 1447025c5b24SNaoya Horiguchi ret = __pmd_trans_huge_lock(old_pmd, vma); 1448025c5b24SNaoya Horiguchi if (ret == 1) { 144937a1c49aSAndrea Arcangeli pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 145037a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 145137a1c49aSAndrea Arcangeli set_pmd_at(mm, new_addr, new_pmd, pmd); 145237a1c49aSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 145337a1c49aSAndrea Arcangeli } 145437a1c49aSAndrea Arcangeli out: 145537a1c49aSAndrea Arcangeli return ret; 145637a1c49aSAndrea Arcangeli } 145737a1c49aSAndrea Arcangeli 1458cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 14594b10e7d5SMel Gorman unsigned long addr, pgprot_t newprot, int prot_numa) 1460cd7548abSJohannes Weiner { 1461cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1462cd7548abSJohannes Weiner int ret = 0; 1463cd7548abSJohannes Weiner 1464025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1465cd7548abSJohannes Weiner pmd_t entry; 1466cd7548abSJohannes Weiner entry = pmdp_get_and_clear(mm, addr, pmd); 1467a4f1de17SHugh Dickins if (!prot_numa) { 1468cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1469a4f1de17SHugh Dickins BUG_ON(pmd_write(entry)); 1470a4f1de17SHugh Dickins } else { 14714b10e7d5SMel Gorman struct page *page = pmd_page(*pmd); 14724b10e7d5SMel Gorman 14734b10e7d5SMel Gorman /* only check non-shared pages */ 14744b10e7d5SMel Gorman if (page_mapcount(page) == 1 && 14754b10e7d5SMel Gorman !pmd_numa(*pmd)) { 14764b10e7d5SMel Gorman entry = pmd_mknuma(entry); 14774b10e7d5SMel Gorman } 14784b10e7d5SMel Gorman } 1479cd7548abSJohannes Weiner set_pmd_at(mm, addr, pmd, entry); 1480cd7548abSJohannes Weiner spin_unlock(&vma->vm_mm->page_table_lock); 1481cd7548abSJohannes Weiner ret = 1; 1482cd7548abSJohannes Weiner } 1483cd7548abSJohannes Weiner 1484cd7548abSJohannes Weiner return ret; 1485cd7548abSJohannes Weiner } 1486cd7548abSJohannes Weiner 1487025c5b24SNaoya Horiguchi /* 1488025c5b24SNaoya Horiguchi * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1489025c5b24SNaoya Horiguchi * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1490025c5b24SNaoya Horiguchi * 1491025c5b24SNaoya Horiguchi * Note that if it returns 1, this routine returns without unlocking page 1492025c5b24SNaoya Horiguchi * table locks. So callers must unlock them. 1493025c5b24SNaoya Horiguchi */ 1494025c5b24SNaoya Horiguchi int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1495025c5b24SNaoya Horiguchi { 1496025c5b24SNaoya Horiguchi spin_lock(&vma->vm_mm->page_table_lock); 1497025c5b24SNaoya Horiguchi if (likely(pmd_trans_huge(*pmd))) { 1498025c5b24SNaoya Horiguchi if (unlikely(pmd_trans_splitting(*pmd))) { 1499025c5b24SNaoya Horiguchi spin_unlock(&vma->vm_mm->page_table_lock); 1500025c5b24SNaoya Horiguchi wait_split_huge_page(vma->anon_vma, pmd); 1501025c5b24SNaoya Horiguchi return -1; 1502025c5b24SNaoya Horiguchi } else { 1503025c5b24SNaoya Horiguchi /* Thp mapped by 'pmd' is stable, so we can 1504025c5b24SNaoya Horiguchi * handle it as it is. */ 1505025c5b24SNaoya Horiguchi return 1; 1506025c5b24SNaoya Horiguchi } 1507025c5b24SNaoya Horiguchi } 1508025c5b24SNaoya Horiguchi spin_unlock(&vma->vm_mm->page_table_lock); 1509025c5b24SNaoya Horiguchi return 0; 1510025c5b24SNaoya Horiguchi } 1511025c5b24SNaoya Horiguchi 151271e3aac0SAndrea Arcangeli pmd_t *page_check_address_pmd(struct page *page, 151371e3aac0SAndrea Arcangeli struct mm_struct *mm, 151471e3aac0SAndrea Arcangeli unsigned long address, 151571e3aac0SAndrea Arcangeli enum page_check_address_pmd_flag flag) 151671e3aac0SAndrea Arcangeli { 151771e3aac0SAndrea Arcangeli pmd_t *pmd, *ret = NULL; 151871e3aac0SAndrea Arcangeli 151971e3aac0SAndrea Arcangeli if (address & ~HPAGE_PMD_MASK) 152071e3aac0SAndrea Arcangeli goto out; 152171e3aac0SAndrea Arcangeli 15226219049aSBob Liu pmd = mm_find_pmd(mm, address); 15236219049aSBob Liu if (!pmd) 152471e3aac0SAndrea Arcangeli goto out; 152571e3aac0SAndrea Arcangeli if (pmd_none(*pmd)) 152671e3aac0SAndrea Arcangeli goto out; 152771e3aac0SAndrea Arcangeli if (pmd_page(*pmd) != page) 152871e3aac0SAndrea Arcangeli goto out; 152994fcc585SAndrea Arcangeli /* 153094fcc585SAndrea Arcangeli * split_vma() may create temporary aliased mappings. There is 153194fcc585SAndrea Arcangeli * no risk as long as all huge pmd are found and have their 153294fcc585SAndrea Arcangeli * splitting bit set before __split_huge_page_refcount 153394fcc585SAndrea Arcangeli * runs. Finding the same huge pmd more than once during the 153494fcc585SAndrea Arcangeli * same rmap walk is not a problem. 153594fcc585SAndrea Arcangeli */ 153694fcc585SAndrea Arcangeli if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 153794fcc585SAndrea Arcangeli pmd_trans_splitting(*pmd)) 153894fcc585SAndrea Arcangeli goto out; 153971e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) { 154071e3aac0SAndrea Arcangeli VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 154171e3aac0SAndrea Arcangeli !pmd_trans_splitting(*pmd)); 154271e3aac0SAndrea Arcangeli ret = pmd; 154371e3aac0SAndrea Arcangeli } 154471e3aac0SAndrea Arcangeli out: 154571e3aac0SAndrea Arcangeli return ret; 154671e3aac0SAndrea Arcangeli } 154771e3aac0SAndrea Arcangeli 154871e3aac0SAndrea Arcangeli static int __split_huge_page_splitting(struct page *page, 154971e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 155071e3aac0SAndrea Arcangeli unsigned long address) 155171e3aac0SAndrea Arcangeli { 155271e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 155371e3aac0SAndrea Arcangeli pmd_t *pmd; 155471e3aac0SAndrea Arcangeli int ret = 0; 15552ec74c3eSSagi Grimberg /* For mmu_notifiers */ 15562ec74c3eSSagi Grimberg const unsigned long mmun_start = address; 15572ec74c3eSSagi Grimberg const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 155871e3aac0SAndrea Arcangeli 15592ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 156071e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 156171e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 156271e3aac0SAndrea Arcangeli PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); 156371e3aac0SAndrea Arcangeli if (pmd) { 156471e3aac0SAndrea Arcangeli /* 156571e3aac0SAndrea Arcangeli * We can't temporarily set the pmd to null in order 156671e3aac0SAndrea Arcangeli * to split it, the pmd must remain marked huge at all 156771e3aac0SAndrea Arcangeli * times or the VM won't take the pmd_trans_huge paths 15685a505085SIngo Molnar * and it won't wait on the anon_vma->root->rwsem to 156971e3aac0SAndrea Arcangeli * serialize against split_huge_page*. 157071e3aac0SAndrea Arcangeli */ 15712ec74c3eSSagi Grimberg pmdp_splitting_flush(vma, address, pmd); 157271e3aac0SAndrea Arcangeli ret = 1; 157371e3aac0SAndrea Arcangeli } 157471e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 15752ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 157671e3aac0SAndrea Arcangeli 157771e3aac0SAndrea Arcangeli return ret; 157871e3aac0SAndrea Arcangeli } 157971e3aac0SAndrea Arcangeli 158071e3aac0SAndrea Arcangeli static void __split_huge_page_refcount(struct page *page) 158171e3aac0SAndrea Arcangeli { 158271e3aac0SAndrea Arcangeli int i; 158371e3aac0SAndrea Arcangeli struct zone *zone = page_zone(page); 1584fa9add64SHugh Dickins struct lruvec *lruvec; 158570b50f94SAndrea Arcangeli int tail_count = 0; 158671e3aac0SAndrea Arcangeli 158771e3aac0SAndrea Arcangeli /* prevent PageLRU to go away from under us, and freeze lru stats */ 158871e3aac0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 1589fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 1590fa9add64SHugh Dickins 159171e3aac0SAndrea Arcangeli compound_lock(page); 1592e94c8a9cSKAMEZAWA Hiroyuki /* complete memcg works before add pages to LRU */ 1593e94c8a9cSKAMEZAWA Hiroyuki mem_cgroup_split_huge_fixup(page); 159471e3aac0SAndrea Arcangeli 159545676885SShaohua Li for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 159671e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 159771e3aac0SAndrea Arcangeli 159870b50f94SAndrea Arcangeli /* tail_page->_mapcount cannot change */ 159970b50f94SAndrea Arcangeli BUG_ON(page_mapcount(page_tail) < 0); 160070b50f94SAndrea Arcangeli tail_count += page_mapcount(page_tail); 160170b50f94SAndrea Arcangeli /* check for overflow */ 160270b50f94SAndrea Arcangeli BUG_ON(tail_count < 0); 160370b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page_tail->_count) != 0); 160470b50f94SAndrea Arcangeli /* 160570b50f94SAndrea Arcangeli * tail_page->_count is zero and not changing from 160670b50f94SAndrea Arcangeli * under us. But get_page_unless_zero() may be running 160770b50f94SAndrea Arcangeli * from under us on the tail_page. If we used 160870b50f94SAndrea Arcangeli * atomic_set() below instead of atomic_add(), we 160970b50f94SAndrea Arcangeli * would then run atomic_set() concurrently with 161070b50f94SAndrea Arcangeli * get_page_unless_zero(), and atomic_set() is 161170b50f94SAndrea Arcangeli * implemented in C not using locked ops. spin_unlock 161270b50f94SAndrea Arcangeli * on x86 sometime uses locked ops because of PPro 161370b50f94SAndrea Arcangeli * errata 66, 92, so unless somebody can guarantee 161470b50f94SAndrea Arcangeli * atomic_set() here would be safe on all archs (and 161570b50f94SAndrea Arcangeli * not only on x86), it's safer to use atomic_add(). 161670b50f94SAndrea Arcangeli */ 161770b50f94SAndrea Arcangeli atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 161870b50f94SAndrea Arcangeli &page_tail->_count); 161971e3aac0SAndrea Arcangeli 162071e3aac0SAndrea Arcangeli /* after clearing PageTail the gup refcount can be released */ 162171e3aac0SAndrea Arcangeli smp_mb(); 162271e3aac0SAndrea Arcangeli 1623a6d30dddSJin Dongming /* 1624a6d30dddSJin Dongming * retain hwpoison flag of the poisoned tail page: 1625a6d30dddSJin Dongming * fix for the unsuitable process killed on Guest Machine(KVM) 1626a6d30dddSJin Dongming * by the memory-failure. 1627a6d30dddSJin Dongming */ 1628a6d30dddSJin Dongming page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 162971e3aac0SAndrea Arcangeli page_tail->flags |= (page->flags & 163071e3aac0SAndrea Arcangeli ((1L << PG_referenced) | 163171e3aac0SAndrea Arcangeli (1L << PG_swapbacked) | 163271e3aac0SAndrea Arcangeli (1L << PG_mlocked) | 163371e3aac0SAndrea Arcangeli (1L << PG_uptodate))); 163471e3aac0SAndrea Arcangeli page_tail->flags |= (1L << PG_dirty); 163571e3aac0SAndrea Arcangeli 163670b50f94SAndrea Arcangeli /* clear PageTail before overwriting first_page */ 163771e3aac0SAndrea Arcangeli smp_wmb(); 163871e3aac0SAndrea Arcangeli 163971e3aac0SAndrea Arcangeli /* 164071e3aac0SAndrea Arcangeli * __split_huge_page_splitting() already set the 164171e3aac0SAndrea Arcangeli * splitting bit in all pmd that could map this 164271e3aac0SAndrea Arcangeli * hugepage, that will ensure no CPU can alter the 164371e3aac0SAndrea Arcangeli * mapcount on the head page. The mapcount is only 164471e3aac0SAndrea Arcangeli * accounted in the head page and it has to be 164571e3aac0SAndrea Arcangeli * transferred to all tail pages in the below code. So 164671e3aac0SAndrea Arcangeli * for this code to be safe, the split the mapcount 164771e3aac0SAndrea Arcangeli * can't change. But that doesn't mean userland can't 164871e3aac0SAndrea Arcangeli * keep changing and reading the page contents while 164971e3aac0SAndrea Arcangeli * we transfer the mapcount, so the pmd splitting 165071e3aac0SAndrea Arcangeli * status is achieved setting a reserved bit in the 165171e3aac0SAndrea Arcangeli * pmd, not by clearing the present bit. 165271e3aac0SAndrea Arcangeli */ 165371e3aac0SAndrea Arcangeli page_tail->_mapcount = page->_mapcount; 165471e3aac0SAndrea Arcangeli 165571e3aac0SAndrea Arcangeli BUG_ON(page_tail->mapping); 165671e3aac0SAndrea Arcangeli page_tail->mapping = page->mapping; 165771e3aac0SAndrea Arcangeli 165845676885SShaohua Li page_tail->index = page->index + i; 16595aa80374SHillf Danton page_xchg_last_nid(page_tail, page_last_nid(page)); 166071e3aac0SAndrea Arcangeli 166171e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page_tail)); 166271e3aac0SAndrea Arcangeli BUG_ON(!PageUptodate(page_tail)); 166371e3aac0SAndrea Arcangeli BUG_ON(!PageDirty(page_tail)); 166471e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page_tail)); 166571e3aac0SAndrea Arcangeli 1666fa9add64SHugh Dickins lru_add_page_tail(page, page_tail, lruvec); 166771e3aac0SAndrea Arcangeli } 166870b50f94SAndrea Arcangeli atomic_sub(tail_count, &page->_count); 166970b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page->_count) <= 0); 167071e3aac0SAndrea Arcangeli 1671fa9add64SHugh Dickins __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 167279134171SAndrea Arcangeli __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 167379134171SAndrea Arcangeli 167471e3aac0SAndrea Arcangeli ClearPageCompound(page); 167571e3aac0SAndrea Arcangeli compound_unlock(page); 167671e3aac0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 167771e3aac0SAndrea Arcangeli 167871e3aac0SAndrea Arcangeli for (i = 1; i < HPAGE_PMD_NR; i++) { 167971e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 168071e3aac0SAndrea Arcangeli BUG_ON(page_count(page_tail) <= 0); 168171e3aac0SAndrea Arcangeli /* 168271e3aac0SAndrea Arcangeli * Tail pages may be freed if there wasn't any mapping 168371e3aac0SAndrea Arcangeli * like if add_to_swap() is running on a lru page that 168471e3aac0SAndrea Arcangeli * had its mapping zapped. And freeing these pages 168571e3aac0SAndrea Arcangeli * requires taking the lru_lock so we do the put_page 168671e3aac0SAndrea Arcangeli * of the tail pages after the split is complete. 168771e3aac0SAndrea Arcangeli */ 168871e3aac0SAndrea Arcangeli put_page(page_tail); 168971e3aac0SAndrea Arcangeli } 169071e3aac0SAndrea Arcangeli 169171e3aac0SAndrea Arcangeli /* 169271e3aac0SAndrea Arcangeli * Only the head page (now become a regular page) is required 169371e3aac0SAndrea Arcangeli * to be pinned by the caller. 169471e3aac0SAndrea Arcangeli */ 169571e3aac0SAndrea Arcangeli BUG_ON(page_count(page) <= 0); 169671e3aac0SAndrea Arcangeli } 169771e3aac0SAndrea Arcangeli 169871e3aac0SAndrea Arcangeli static int __split_huge_page_map(struct page *page, 169971e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 170071e3aac0SAndrea Arcangeli unsigned long address) 170171e3aac0SAndrea Arcangeli { 170271e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 170371e3aac0SAndrea Arcangeli pmd_t *pmd, _pmd; 170471e3aac0SAndrea Arcangeli int ret = 0, i; 170571e3aac0SAndrea Arcangeli pgtable_t pgtable; 170671e3aac0SAndrea Arcangeli unsigned long haddr; 170771e3aac0SAndrea Arcangeli 170871e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 170971e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 171071e3aac0SAndrea Arcangeli PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 171171e3aac0SAndrea Arcangeli if (pmd) { 1712e3ebcf64SGerald Schaefer pgtable = pgtable_trans_huge_withdraw(mm); 171371e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 171471e3aac0SAndrea Arcangeli 1715e3ebcf64SGerald Schaefer haddr = address; 1716e3ebcf64SGerald Schaefer for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 171771e3aac0SAndrea Arcangeli pte_t *pte, entry; 171871e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page+i)); 171971e3aac0SAndrea Arcangeli entry = mk_pte(page + i, vma->vm_page_prot); 172071e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 172171e3aac0SAndrea Arcangeli if (!pmd_write(*pmd)) 172271e3aac0SAndrea Arcangeli entry = pte_wrprotect(entry); 172371e3aac0SAndrea Arcangeli else 172471e3aac0SAndrea Arcangeli BUG_ON(page_mapcount(page) != 1); 172571e3aac0SAndrea Arcangeli if (!pmd_young(*pmd)) 172671e3aac0SAndrea Arcangeli entry = pte_mkold(entry); 17271ba6e0b5SAndrea Arcangeli if (pmd_numa(*pmd)) 17281ba6e0b5SAndrea Arcangeli entry = pte_mknuma(entry); 172971e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 173071e3aac0SAndrea Arcangeli BUG_ON(!pte_none(*pte)); 173171e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 173271e3aac0SAndrea Arcangeli pte_unmap(pte); 173371e3aac0SAndrea Arcangeli } 173471e3aac0SAndrea Arcangeli 173571e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 173671e3aac0SAndrea Arcangeli /* 173771e3aac0SAndrea Arcangeli * Up to this point the pmd is present and huge and 173871e3aac0SAndrea Arcangeli * userland has the whole access to the hugepage 173971e3aac0SAndrea Arcangeli * during the split (which happens in place). If we 174071e3aac0SAndrea Arcangeli * overwrite the pmd with the not-huge version 174171e3aac0SAndrea Arcangeli * pointing to the pte here (which of course we could 174271e3aac0SAndrea Arcangeli * if all CPUs were bug free), userland could trigger 174371e3aac0SAndrea Arcangeli * a small page size TLB miss on the small sized TLB 174471e3aac0SAndrea Arcangeli * while the hugepage TLB entry is still established 174571e3aac0SAndrea Arcangeli * in the huge TLB. Some CPU doesn't like that. See 174671e3aac0SAndrea Arcangeli * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 174771e3aac0SAndrea Arcangeli * Erratum 383 on page 93. Intel should be safe but is 174871e3aac0SAndrea Arcangeli * also warns that it's only safe if the permission 174971e3aac0SAndrea Arcangeli * and cache attributes of the two entries loaded in 175071e3aac0SAndrea Arcangeli * the two TLB is identical (which should be the case 175171e3aac0SAndrea Arcangeli * here). But it is generally safer to never allow 175271e3aac0SAndrea Arcangeli * small and huge TLB entries for the same virtual 175371e3aac0SAndrea Arcangeli * address to be loaded simultaneously. So instead of 175471e3aac0SAndrea Arcangeli * doing "pmd_populate(); flush_tlb_range();" we first 175571e3aac0SAndrea Arcangeli * mark the current pmd notpresent (atomically because 175671e3aac0SAndrea Arcangeli * here the pmd_trans_huge and pmd_trans_splitting 175771e3aac0SAndrea Arcangeli * must remain set at all times on the pmd until the 175871e3aac0SAndrea Arcangeli * split is complete for this pmd), then we flush the 175971e3aac0SAndrea Arcangeli * SMP TLB and finally we write the non-huge version 176071e3aac0SAndrea Arcangeli * of the pmd entry with pmd_populate. 176171e3aac0SAndrea Arcangeli */ 176246dcde73SGerald Schaefer pmdp_invalidate(vma, address, pmd); 176371e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 176471e3aac0SAndrea Arcangeli ret = 1; 176571e3aac0SAndrea Arcangeli } 176671e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 176771e3aac0SAndrea Arcangeli 176871e3aac0SAndrea Arcangeli return ret; 176971e3aac0SAndrea Arcangeli } 177071e3aac0SAndrea Arcangeli 17715a505085SIngo Molnar /* must be called with anon_vma->root->rwsem held */ 177271e3aac0SAndrea Arcangeli static void __split_huge_page(struct page *page, 177371e3aac0SAndrea Arcangeli struct anon_vma *anon_vma) 177471e3aac0SAndrea Arcangeli { 177571e3aac0SAndrea Arcangeli int mapcount, mapcount2; 1776bf181b9fSMichel Lespinasse pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 177771e3aac0SAndrea Arcangeli struct anon_vma_chain *avc; 177871e3aac0SAndrea Arcangeli 177971e3aac0SAndrea Arcangeli BUG_ON(!PageHead(page)); 178071e3aac0SAndrea Arcangeli BUG_ON(PageTail(page)); 178171e3aac0SAndrea Arcangeli 178271e3aac0SAndrea Arcangeli mapcount = 0; 1783bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 178471e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 178571e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 178671e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 178771e3aac0SAndrea Arcangeli mapcount += __split_huge_page_splitting(page, vma, addr); 178871e3aac0SAndrea Arcangeli } 178905759d38SAndrea Arcangeli /* 179005759d38SAndrea Arcangeli * It is critical that new vmas are added to the tail of the 179105759d38SAndrea Arcangeli * anon_vma list. This guarantes that if copy_huge_pmd() runs 179205759d38SAndrea Arcangeli * and establishes a child pmd before 179305759d38SAndrea Arcangeli * __split_huge_page_splitting() freezes the parent pmd (so if 179405759d38SAndrea Arcangeli * we fail to prevent copy_huge_pmd() from running until the 179505759d38SAndrea Arcangeli * whole __split_huge_page() is complete), we will still see 179605759d38SAndrea Arcangeli * the newly established pmd of the child later during the 179705759d38SAndrea Arcangeli * walk, to be able to set it as pmd_trans_splitting too. 179805759d38SAndrea Arcangeli */ 179905759d38SAndrea Arcangeli if (mapcount != page_mapcount(page)) 180005759d38SAndrea Arcangeli printk(KERN_ERR "mapcount %d page_mapcount %d\n", 180105759d38SAndrea Arcangeli mapcount, page_mapcount(page)); 180271e3aac0SAndrea Arcangeli BUG_ON(mapcount != page_mapcount(page)); 180371e3aac0SAndrea Arcangeli 180471e3aac0SAndrea Arcangeli __split_huge_page_refcount(page); 180571e3aac0SAndrea Arcangeli 180671e3aac0SAndrea Arcangeli mapcount2 = 0; 1807bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 180871e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 180971e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 181071e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 181171e3aac0SAndrea Arcangeli mapcount2 += __split_huge_page_map(page, vma, addr); 181271e3aac0SAndrea Arcangeli } 181305759d38SAndrea Arcangeli if (mapcount != mapcount2) 181405759d38SAndrea Arcangeli printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", 181505759d38SAndrea Arcangeli mapcount, mapcount2, page_mapcount(page)); 181671e3aac0SAndrea Arcangeli BUG_ON(mapcount != mapcount2); 181771e3aac0SAndrea Arcangeli } 181871e3aac0SAndrea Arcangeli 181971e3aac0SAndrea Arcangeli int split_huge_page(struct page *page) 182071e3aac0SAndrea Arcangeli { 182171e3aac0SAndrea Arcangeli struct anon_vma *anon_vma; 182271e3aac0SAndrea Arcangeli int ret = 1; 182371e3aac0SAndrea Arcangeli 1824c5a647d0SKirill A. Shutemov BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); 182571e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page)); 1826062f1af2SMel Gorman 1827062f1af2SMel Gorman /* 1828062f1af2SMel Gorman * The caller does not necessarily hold an mmap_sem that would prevent 1829062f1af2SMel Gorman * the anon_vma disappearing so we first we take a reference to it 1830062f1af2SMel Gorman * and then lock the anon_vma for write. This is similar to 1831062f1af2SMel Gorman * page_lock_anon_vma_read except the write lock is taken to serialise 1832062f1af2SMel Gorman * against parallel split or collapse operations. 1833062f1af2SMel Gorman */ 1834062f1af2SMel Gorman anon_vma = page_get_anon_vma(page); 183571e3aac0SAndrea Arcangeli if (!anon_vma) 183671e3aac0SAndrea Arcangeli goto out; 1837062f1af2SMel Gorman anon_vma_lock_write(anon_vma); 1838062f1af2SMel Gorman 183971e3aac0SAndrea Arcangeli ret = 0; 184071e3aac0SAndrea Arcangeli if (!PageCompound(page)) 184171e3aac0SAndrea Arcangeli goto out_unlock; 184271e3aac0SAndrea Arcangeli 184371e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page)); 184471e3aac0SAndrea Arcangeli __split_huge_page(page, anon_vma); 184581ab4201SAndi Kleen count_vm_event(THP_SPLIT); 184671e3aac0SAndrea Arcangeli 184771e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page)); 184871e3aac0SAndrea Arcangeli out_unlock: 1849062f1af2SMel Gorman anon_vma_unlock(anon_vma); 1850062f1af2SMel Gorman put_anon_vma(anon_vma); 185171e3aac0SAndrea Arcangeli out: 185271e3aac0SAndrea Arcangeli return ret; 185371e3aac0SAndrea Arcangeli } 185471e3aac0SAndrea Arcangeli 18554b6e1e37SKonstantin Khlebnikov #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 185678f11a25SAndrea Arcangeli 185760ab3244SAndrea Arcangeli int hugepage_madvise(struct vm_area_struct *vma, 185860ab3244SAndrea Arcangeli unsigned long *vm_flags, int advice) 18590af4e98bSAndrea Arcangeli { 18608e72033fSGerald Schaefer struct mm_struct *mm = vma->vm_mm; 18618e72033fSGerald Schaefer 1862a664b2d8SAndrea Arcangeli switch (advice) { 1863a664b2d8SAndrea Arcangeli case MADV_HUGEPAGE: 18640af4e98bSAndrea Arcangeli /* 18650af4e98bSAndrea Arcangeli * Be somewhat over-protective like KSM for now! 18660af4e98bSAndrea Arcangeli */ 186778f11a25SAndrea Arcangeli if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 18680af4e98bSAndrea Arcangeli return -EINVAL; 18698e72033fSGerald Schaefer if (mm->def_flags & VM_NOHUGEPAGE) 18708e72033fSGerald Schaefer return -EINVAL; 1871a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_NOHUGEPAGE; 18720af4e98bSAndrea Arcangeli *vm_flags |= VM_HUGEPAGE; 187360ab3244SAndrea Arcangeli /* 187460ab3244SAndrea Arcangeli * If the vma become good for khugepaged to scan, 187560ab3244SAndrea Arcangeli * register it here without waiting a page fault that 187660ab3244SAndrea Arcangeli * may not happen any time soon. 187760ab3244SAndrea Arcangeli */ 187860ab3244SAndrea Arcangeli if (unlikely(khugepaged_enter_vma_merge(vma))) 187960ab3244SAndrea Arcangeli return -ENOMEM; 1880a664b2d8SAndrea Arcangeli break; 1881a664b2d8SAndrea Arcangeli case MADV_NOHUGEPAGE: 1882a664b2d8SAndrea Arcangeli /* 1883a664b2d8SAndrea Arcangeli * Be somewhat over-protective like KSM for now! 1884a664b2d8SAndrea Arcangeli */ 188578f11a25SAndrea Arcangeli if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1886a664b2d8SAndrea Arcangeli return -EINVAL; 1887a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_HUGEPAGE; 1888a664b2d8SAndrea Arcangeli *vm_flags |= VM_NOHUGEPAGE; 188960ab3244SAndrea Arcangeli /* 189060ab3244SAndrea Arcangeli * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 189160ab3244SAndrea Arcangeli * this vma even if we leave the mm registered in khugepaged if 189260ab3244SAndrea Arcangeli * it got registered before VM_NOHUGEPAGE was set. 189360ab3244SAndrea Arcangeli */ 1894a664b2d8SAndrea Arcangeli break; 1895a664b2d8SAndrea Arcangeli } 18960af4e98bSAndrea Arcangeli 18970af4e98bSAndrea Arcangeli return 0; 18980af4e98bSAndrea Arcangeli } 18990af4e98bSAndrea Arcangeli 1900ba76149fSAndrea Arcangeli static int __init khugepaged_slab_init(void) 1901ba76149fSAndrea Arcangeli { 1902ba76149fSAndrea Arcangeli mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1903ba76149fSAndrea Arcangeli sizeof(struct mm_slot), 1904ba76149fSAndrea Arcangeli __alignof__(struct mm_slot), 0, NULL); 1905ba76149fSAndrea Arcangeli if (!mm_slot_cache) 1906ba76149fSAndrea Arcangeli return -ENOMEM; 1907ba76149fSAndrea Arcangeli 1908ba76149fSAndrea Arcangeli return 0; 1909ba76149fSAndrea Arcangeli } 1910ba76149fSAndrea Arcangeli 1911ba76149fSAndrea Arcangeli static void __init khugepaged_slab_free(void) 1912ba76149fSAndrea Arcangeli { 1913ba76149fSAndrea Arcangeli kmem_cache_destroy(mm_slot_cache); 1914ba76149fSAndrea Arcangeli mm_slot_cache = NULL; 1915ba76149fSAndrea Arcangeli } 1916ba76149fSAndrea Arcangeli 1917ba76149fSAndrea Arcangeli static inline struct mm_slot *alloc_mm_slot(void) 1918ba76149fSAndrea Arcangeli { 1919ba76149fSAndrea Arcangeli if (!mm_slot_cache) /* initialization failed */ 1920ba76149fSAndrea Arcangeli return NULL; 1921ba76149fSAndrea Arcangeli return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 1922ba76149fSAndrea Arcangeli } 1923ba76149fSAndrea Arcangeli 1924ba76149fSAndrea Arcangeli static inline void free_mm_slot(struct mm_slot *mm_slot) 1925ba76149fSAndrea Arcangeli { 1926ba76149fSAndrea Arcangeli kmem_cache_free(mm_slot_cache, mm_slot); 1927ba76149fSAndrea Arcangeli } 1928ba76149fSAndrea Arcangeli 1929ba76149fSAndrea Arcangeli static int __init mm_slots_hash_init(void) 1930ba76149fSAndrea Arcangeli { 1931ba76149fSAndrea Arcangeli mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), 1932ba76149fSAndrea Arcangeli GFP_KERNEL); 1933ba76149fSAndrea Arcangeli if (!mm_slots_hash) 1934ba76149fSAndrea Arcangeli return -ENOMEM; 1935ba76149fSAndrea Arcangeli return 0; 1936ba76149fSAndrea Arcangeli } 1937ba76149fSAndrea Arcangeli 1938ba76149fSAndrea Arcangeli #if 0 1939ba76149fSAndrea Arcangeli static void __init mm_slots_hash_free(void) 1940ba76149fSAndrea Arcangeli { 1941ba76149fSAndrea Arcangeli kfree(mm_slots_hash); 1942ba76149fSAndrea Arcangeli mm_slots_hash = NULL; 1943ba76149fSAndrea Arcangeli } 1944ba76149fSAndrea Arcangeli #endif 1945ba76149fSAndrea Arcangeli 1946ba76149fSAndrea Arcangeli static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1947ba76149fSAndrea Arcangeli { 1948ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 1949ba76149fSAndrea Arcangeli struct hlist_head *bucket; 1950ba76149fSAndrea Arcangeli struct hlist_node *node; 1951ba76149fSAndrea Arcangeli 1952ba76149fSAndrea Arcangeli bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1953ba76149fSAndrea Arcangeli % MM_SLOTS_HASH_HEADS]; 1954ba76149fSAndrea Arcangeli hlist_for_each_entry(mm_slot, node, bucket, hash) { 1955ba76149fSAndrea Arcangeli if (mm == mm_slot->mm) 1956ba76149fSAndrea Arcangeli return mm_slot; 1957ba76149fSAndrea Arcangeli } 1958ba76149fSAndrea Arcangeli return NULL; 1959ba76149fSAndrea Arcangeli } 1960ba76149fSAndrea Arcangeli 1961ba76149fSAndrea Arcangeli static void insert_to_mm_slots_hash(struct mm_struct *mm, 1962ba76149fSAndrea Arcangeli struct mm_slot *mm_slot) 1963ba76149fSAndrea Arcangeli { 1964ba76149fSAndrea Arcangeli struct hlist_head *bucket; 1965ba76149fSAndrea Arcangeli 1966ba76149fSAndrea Arcangeli bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1967ba76149fSAndrea Arcangeli % MM_SLOTS_HASH_HEADS]; 1968ba76149fSAndrea Arcangeli mm_slot->mm = mm; 1969ba76149fSAndrea Arcangeli hlist_add_head(&mm_slot->hash, bucket); 1970ba76149fSAndrea Arcangeli } 1971ba76149fSAndrea Arcangeli 1972ba76149fSAndrea Arcangeli static inline int khugepaged_test_exit(struct mm_struct *mm) 1973ba76149fSAndrea Arcangeli { 1974ba76149fSAndrea Arcangeli return atomic_read(&mm->mm_users) == 0; 1975ba76149fSAndrea Arcangeli } 1976ba76149fSAndrea Arcangeli 1977ba76149fSAndrea Arcangeli int __khugepaged_enter(struct mm_struct *mm) 1978ba76149fSAndrea Arcangeli { 1979ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 1980ba76149fSAndrea Arcangeli int wakeup; 1981ba76149fSAndrea Arcangeli 1982ba76149fSAndrea Arcangeli mm_slot = alloc_mm_slot(); 1983ba76149fSAndrea Arcangeli if (!mm_slot) 1984ba76149fSAndrea Arcangeli return -ENOMEM; 1985ba76149fSAndrea Arcangeli 1986ba76149fSAndrea Arcangeli /* __khugepaged_exit() must not run from under us */ 1987ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_test_exit(mm)); 1988ba76149fSAndrea Arcangeli if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 1989ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 1990ba76149fSAndrea Arcangeli return 0; 1991ba76149fSAndrea Arcangeli } 1992ba76149fSAndrea Arcangeli 1993ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 1994ba76149fSAndrea Arcangeli insert_to_mm_slots_hash(mm, mm_slot); 1995ba76149fSAndrea Arcangeli /* 1996ba76149fSAndrea Arcangeli * Insert just behind the scanning cursor, to let the area settle 1997ba76149fSAndrea Arcangeli * down a little. 1998ba76149fSAndrea Arcangeli */ 1999ba76149fSAndrea Arcangeli wakeup = list_empty(&khugepaged_scan.mm_head); 2000ba76149fSAndrea Arcangeli list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 2001ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2002ba76149fSAndrea Arcangeli 2003ba76149fSAndrea Arcangeli atomic_inc(&mm->mm_count); 2004ba76149fSAndrea Arcangeli if (wakeup) 2005ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 2006ba76149fSAndrea Arcangeli 2007ba76149fSAndrea Arcangeli return 0; 2008ba76149fSAndrea Arcangeli } 2009ba76149fSAndrea Arcangeli 2010ba76149fSAndrea Arcangeli int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2011ba76149fSAndrea Arcangeli { 2012ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2013ba76149fSAndrea Arcangeli if (!vma->anon_vma) 2014ba76149fSAndrea Arcangeli /* 2015ba76149fSAndrea Arcangeli * Not yet faulted in so we will register later in the 2016ba76149fSAndrea Arcangeli * page fault if needed. 2017ba76149fSAndrea Arcangeli */ 2018ba76149fSAndrea Arcangeli return 0; 201978f11a25SAndrea Arcangeli if (vma->vm_ops) 2020ba76149fSAndrea Arcangeli /* khugepaged not yet working on file or special mappings */ 2021ba76149fSAndrea Arcangeli return 0; 2022b3b9c293SKonstantin Khlebnikov VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2023ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2024ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2025ba76149fSAndrea Arcangeli if (hstart < hend) 2026ba76149fSAndrea Arcangeli return khugepaged_enter(vma); 2027ba76149fSAndrea Arcangeli return 0; 2028ba76149fSAndrea Arcangeli } 2029ba76149fSAndrea Arcangeli 2030ba76149fSAndrea Arcangeli void __khugepaged_exit(struct mm_struct *mm) 2031ba76149fSAndrea Arcangeli { 2032ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2033ba76149fSAndrea Arcangeli int free = 0; 2034ba76149fSAndrea Arcangeli 2035ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2036ba76149fSAndrea Arcangeli mm_slot = get_mm_slot(mm); 2037ba76149fSAndrea Arcangeli if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 2038ba76149fSAndrea Arcangeli hlist_del(&mm_slot->hash); 2039ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2040ba76149fSAndrea Arcangeli free = 1; 2041ba76149fSAndrea Arcangeli } 2042d788e80aSChris Wright spin_unlock(&khugepaged_mm_lock); 2043ba76149fSAndrea Arcangeli 2044ba76149fSAndrea Arcangeli if (free) { 2045ba76149fSAndrea Arcangeli clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2046ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2047ba76149fSAndrea Arcangeli mmdrop(mm); 2048ba76149fSAndrea Arcangeli } else if (mm_slot) { 2049ba76149fSAndrea Arcangeli /* 2050ba76149fSAndrea Arcangeli * This is required to serialize against 2051ba76149fSAndrea Arcangeli * khugepaged_test_exit() (which is guaranteed to run 2052ba76149fSAndrea Arcangeli * under mmap sem read mode). Stop here (after we 2053ba76149fSAndrea Arcangeli * return all pagetables will be destroyed) until 2054ba76149fSAndrea Arcangeli * khugepaged has finished working on the pagetables 2055ba76149fSAndrea Arcangeli * under the mmap_sem. 2056ba76149fSAndrea Arcangeli */ 2057ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2058ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 2059d788e80aSChris Wright } 2060ba76149fSAndrea Arcangeli } 2061ba76149fSAndrea Arcangeli 2062ba76149fSAndrea Arcangeli static void release_pte_page(struct page *page) 2063ba76149fSAndrea Arcangeli { 2064ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2065ba76149fSAndrea Arcangeli dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2066ba76149fSAndrea Arcangeli unlock_page(page); 2067ba76149fSAndrea Arcangeli putback_lru_page(page); 2068ba76149fSAndrea Arcangeli } 2069ba76149fSAndrea Arcangeli 2070ba76149fSAndrea Arcangeli static void release_pte_pages(pte_t *pte, pte_t *_pte) 2071ba76149fSAndrea Arcangeli { 2072ba76149fSAndrea Arcangeli while (--_pte >= pte) { 2073ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2074ba76149fSAndrea Arcangeli if (!pte_none(pteval)) 2075ba76149fSAndrea Arcangeli release_pte_page(pte_page(pteval)); 2076ba76149fSAndrea Arcangeli } 2077ba76149fSAndrea Arcangeli } 2078ba76149fSAndrea Arcangeli 2079ba76149fSAndrea Arcangeli static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2080ba76149fSAndrea Arcangeli unsigned long address, 2081ba76149fSAndrea Arcangeli pte_t *pte) 2082ba76149fSAndrea Arcangeli { 2083ba76149fSAndrea Arcangeli struct page *page; 2084ba76149fSAndrea Arcangeli pte_t *_pte; 2085344aa35cSBob Liu int referenced = 0, none = 0; 2086ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2087ba76149fSAndrea Arcangeli _pte++, address += PAGE_SIZE) { 2088ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2089ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2090ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 2091ba76149fSAndrea Arcangeli continue; 2092344aa35cSBob Liu else 2093ba76149fSAndrea Arcangeli goto out; 2094ba76149fSAndrea Arcangeli } 2095344aa35cSBob Liu if (!pte_present(pteval) || !pte_write(pteval)) 2096ba76149fSAndrea Arcangeli goto out; 2097ba76149fSAndrea Arcangeli page = vm_normal_page(vma, address, pteval); 2098344aa35cSBob Liu if (unlikely(!page)) 2099ba76149fSAndrea Arcangeli goto out; 2100344aa35cSBob Liu 2101ba76149fSAndrea Arcangeli VM_BUG_ON(PageCompound(page)); 2102ba76149fSAndrea Arcangeli BUG_ON(!PageAnon(page)); 2103ba76149fSAndrea Arcangeli VM_BUG_ON(!PageSwapBacked(page)); 2104ba76149fSAndrea Arcangeli 2105ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 2106344aa35cSBob Liu if (page_count(page) != 1) 2107ba76149fSAndrea Arcangeli goto out; 2108ba76149fSAndrea Arcangeli /* 2109ba76149fSAndrea Arcangeli * We can do it before isolate_lru_page because the 2110ba76149fSAndrea Arcangeli * page can't be freed from under us. NOTE: PG_lock 2111ba76149fSAndrea Arcangeli * is needed to serialize against split_huge_page 2112ba76149fSAndrea Arcangeli * when invoked from the VM. 2113ba76149fSAndrea Arcangeli */ 2114344aa35cSBob Liu if (!trylock_page(page)) 2115ba76149fSAndrea Arcangeli goto out; 2116ba76149fSAndrea Arcangeli /* 2117ba76149fSAndrea Arcangeli * Isolate the page to avoid collapsing an hugepage 2118ba76149fSAndrea Arcangeli * currently in use by the VM. 2119ba76149fSAndrea Arcangeli */ 2120ba76149fSAndrea Arcangeli if (isolate_lru_page(page)) { 2121ba76149fSAndrea Arcangeli unlock_page(page); 2122ba76149fSAndrea Arcangeli goto out; 2123ba76149fSAndrea Arcangeli } 2124ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 2125ba76149fSAndrea Arcangeli inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2126ba76149fSAndrea Arcangeli VM_BUG_ON(!PageLocked(page)); 2127ba76149fSAndrea Arcangeli VM_BUG_ON(PageLRU(page)); 2128ba76149fSAndrea Arcangeli 2129ba76149fSAndrea Arcangeli /* If there is no mapped pte young don't collapse the page */ 21308ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 21318ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 2132ba76149fSAndrea Arcangeli referenced = 1; 2133ba76149fSAndrea Arcangeli } 2134344aa35cSBob Liu if (likely(referenced)) 2135344aa35cSBob Liu return 1; 2136ba76149fSAndrea Arcangeli out: 2137344aa35cSBob Liu release_pte_pages(pte, _pte); 2138344aa35cSBob Liu return 0; 2139ba76149fSAndrea Arcangeli } 2140ba76149fSAndrea Arcangeli 2141ba76149fSAndrea Arcangeli static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2142ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2143ba76149fSAndrea Arcangeli unsigned long address, 2144ba76149fSAndrea Arcangeli spinlock_t *ptl) 2145ba76149fSAndrea Arcangeli { 2146ba76149fSAndrea Arcangeli pte_t *_pte; 2147ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2148ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2149ba76149fSAndrea Arcangeli struct page *src_page; 2150ba76149fSAndrea Arcangeli 2151ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2152ba76149fSAndrea Arcangeli clear_user_highpage(page, address); 2153ba76149fSAndrea Arcangeli add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2154ba76149fSAndrea Arcangeli } else { 2155ba76149fSAndrea Arcangeli src_page = pte_page(pteval); 2156ba76149fSAndrea Arcangeli copy_user_highpage(page, src_page, address, vma); 2157ba76149fSAndrea Arcangeli VM_BUG_ON(page_mapcount(src_page) != 1); 2158ba76149fSAndrea Arcangeli release_pte_page(src_page); 2159ba76149fSAndrea Arcangeli /* 2160ba76149fSAndrea Arcangeli * ptl mostly unnecessary, but preempt has to 2161ba76149fSAndrea Arcangeli * be disabled to update the per-cpu stats 2162ba76149fSAndrea Arcangeli * inside page_remove_rmap(). 2163ba76149fSAndrea Arcangeli */ 2164ba76149fSAndrea Arcangeli spin_lock(ptl); 2165ba76149fSAndrea Arcangeli /* 2166ba76149fSAndrea Arcangeli * paravirt calls inside pte_clear here are 2167ba76149fSAndrea Arcangeli * superfluous. 2168ba76149fSAndrea Arcangeli */ 2169ba76149fSAndrea Arcangeli pte_clear(vma->vm_mm, address, _pte); 2170ba76149fSAndrea Arcangeli page_remove_rmap(src_page); 2171ba76149fSAndrea Arcangeli spin_unlock(ptl); 2172ba76149fSAndrea Arcangeli free_page_and_swap_cache(src_page); 2173ba76149fSAndrea Arcangeli } 2174ba76149fSAndrea Arcangeli 2175ba76149fSAndrea Arcangeli address += PAGE_SIZE; 2176ba76149fSAndrea Arcangeli page++; 2177ba76149fSAndrea Arcangeli } 2178ba76149fSAndrea Arcangeli } 2179ba76149fSAndrea Arcangeli 218026234f36SXiao Guangrong static void khugepaged_alloc_sleep(void) 218126234f36SXiao Guangrong { 218226234f36SXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, false, 218326234f36SXiao Guangrong msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 218426234f36SXiao Guangrong } 218526234f36SXiao Guangrong 218626234f36SXiao Guangrong #ifdef CONFIG_NUMA 218726234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 218826234f36SXiao Guangrong { 218926234f36SXiao Guangrong if (IS_ERR(*hpage)) { 219026234f36SXiao Guangrong if (!*wait) 219126234f36SXiao Guangrong return false; 219226234f36SXiao Guangrong 219326234f36SXiao Guangrong *wait = false; 2194e3b4126cSXiao Guangrong *hpage = NULL; 219526234f36SXiao Guangrong khugepaged_alloc_sleep(); 219626234f36SXiao Guangrong } else if (*hpage) { 219726234f36SXiao Guangrong put_page(*hpage); 219826234f36SXiao Guangrong *hpage = NULL; 219926234f36SXiao Guangrong } 220026234f36SXiao Guangrong 220126234f36SXiao Guangrong return true; 220226234f36SXiao Guangrong } 220326234f36SXiao Guangrong 220426234f36SXiao Guangrong static struct page 220526234f36SXiao Guangrong *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 220626234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 220726234f36SXiao Guangrong int node) 220826234f36SXiao Guangrong { 220926234f36SXiao Guangrong VM_BUG_ON(*hpage); 221026234f36SXiao Guangrong /* 221126234f36SXiao Guangrong * Allocate the page while the vma is still valid and under 221226234f36SXiao Guangrong * the mmap_sem read mode so there is no memory allocation 221326234f36SXiao Guangrong * later when we take the mmap_sem in write mode. This is more 221426234f36SXiao Guangrong * friendly behavior (OTOH it may actually hide bugs) to 221526234f36SXiao Guangrong * filesystems in userland with daemons allocating memory in 221626234f36SXiao Guangrong * the userland I/O paths. Allocating memory with the 221726234f36SXiao Guangrong * mmap_sem in read mode is good idea also to allow greater 221826234f36SXiao Guangrong * scalability. 221926234f36SXiao Guangrong */ 222026234f36SXiao Guangrong *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 222126234f36SXiao Guangrong node, __GFP_OTHER_NODE); 222226234f36SXiao Guangrong 222326234f36SXiao Guangrong /* 222426234f36SXiao Guangrong * After allocating the hugepage, release the mmap_sem read lock in 222526234f36SXiao Guangrong * preparation for taking it in write mode. 222626234f36SXiao Guangrong */ 222726234f36SXiao Guangrong up_read(&mm->mmap_sem); 222826234f36SXiao Guangrong if (unlikely(!*hpage)) { 222926234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 223026234f36SXiao Guangrong *hpage = ERR_PTR(-ENOMEM); 223126234f36SXiao Guangrong return NULL; 223226234f36SXiao Guangrong } 223326234f36SXiao Guangrong 223426234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 223526234f36SXiao Guangrong return *hpage; 223626234f36SXiao Guangrong } 223726234f36SXiao Guangrong #else 223826234f36SXiao Guangrong static struct page *khugepaged_alloc_hugepage(bool *wait) 223926234f36SXiao Guangrong { 224026234f36SXiao Guangrong struct page *hpage; 224126234f36SXiao Guangrong 224226234f36SXiao Guangrong do { 224326234f36SXiao Guangrong hpage = alloc_hugepage(khugepaged_defrag()); 224426234f36SXiao Guangrong if (!hpage) { 224526234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 224626234f36SXiao Guangrong if (!*wait) 224726234f36SXiao Guangrong return NULL; 224826234f36SXiao Guangrong 224926234f36SXiao Guangrong *wait = false; 225026234f36SXiao Guangrong khugepaged_alloc_sleep(); 225126234f36SXiao Guangrong } else 225226234f36SXiao Guangrong count_vm_event(THP_COLLAPSE_ALLOC); 225326234f36SXiao Guangrong } while (unlikely(!hpage) && likely(khugepaged_enabled())); 225426234f36SXiao Guangrong 225526234f36SXiao Guangrong return hpage; 225626234f36SXiao Guangrong } 225726234f36SXiao Guangrong 225826234f36SXiao Guangrong static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 225926234f36SXiao Guangrong { 226026234f36SXiao Guangrong if (!*hpage) 226126234f36SXiao Guangrong *hpage = khugepaged_alloc_hugepage(wait); 226226234f36SXiao Guangrong 226326234f36SXiao Guangrong if (unlikely(!*hpage)) 226426234f36SXiao Guangrong return false; 226526234f36SXiao Guangrong 226626234f36SXiao Guangrong return true; 226726234f36SXiao Guangrong } 226826234f36SXiao Guangrong 226926234f36SXiao Guangrong static struct page 227026234f36SXiao Guangrong *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 227126234f36SXiao Guangrong struct vm_area_struct *vma, unsigned long address, 227226234f36SXiao Guangrong int node) 227326234f36SXiao Guangrong { 227426234f36SXiao Guangrong up_read(&mm->mmap_sem); 227526234f36SXiao Guangrong VM_BUG_ON(!*hpage); 227626234f36SXiao Guangrong return *hpage; 227726234f36SXiao Guangrong } 227826234f36SXiao Guangrong #endif 227926234f36SXiao Guangrong 2280fa475e51SBob Liu static bool hugepage_vma_check(struct vm_area_struct *vma) 2281fa475e51SBob Liu { 2282fa475e51SBob Liu if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2283fa475e51SBob Liu (vma->vm_flags & VM_NOHUGEPAGE)) 2284fa475e51SBob Liu return false; 2285fa475e51SBob Liu 2286fa475e51SBob Liu if (!vma->anon_vma || vma->vm_ops) 2287fa475e51SBob Liu return false; 2288fa475e51SBob Liu if (is_vma_temporary_stack(vma)) 2289fa475e51SBob Liu return false; 2290fa475e51SBob Liu VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2291fa475e51SBob Liu return true; 2292fa475e51SBob Liu } 2293fa475e51SBob Liu 2294ba76149fSAndrea Arcangeli static void collapse_huge_page(struct mm_struct *mm, 2295ba76149fSAndrea Arcangeli unsigned long address, 2296ce83d217SAndrea Arcangeli struct page **hpage, 22975c4b4be3SAndi Kleen struct vm_area_struct *vma, 22985c4b4be3SAndi Kleen int node) 2299ba76149fSAndrea Arcangeli { 2300ba76149fSAndrea Arcangeli pmd_t *pmd, _pmd; 2301ba76149fSAndrea Arcangeli pte_t *pte; 2302ba76149fSAndrea Arcangeli pgtable_t pgtable; 2303ba76149fSAndrea Arcangeli struct page *new_page; 2304ba76149fSAndrea Arcangeli spinlock_t *ptl; 2305ba76149fSAndrea Arcangeli int isolated; 2306ba76149fSAndrea Arcangeli unsigned long hstart, hend; 23072ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 23082ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 2309ba76149fSAndrea Arcangeli 2310ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2311692e0b35SAndrea Arcangeli 231226234f36SXiao Guangrong /* release the mmap_sem read lock. */ 231326234f36SXiao Guangrong new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); 231426234f36SXiao Guangrong if (!new_page) 2315ce83d217SAndrea Arcangeli return; 2316ce83d217SAndrea Arcangeli 2317420256efSXiao Guangrong if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) 2318692e0b35SAndrea Arcangeli return; 2319ba76149fSAndrea Arcangeli 2320ba76149fSAndrea Arcangeli /* 2321ba76149fSAndrea Arcangeli * Prevent all access to pagetables with the exception of 2322ba76149fSAndrea Arcangeli * gup_fast later hanlded by the ptep_clear_flush and the VM 2323ba76149fSAndrea Arcangeli * handled by the anon_vma lock + PG_lock. 2324ba76149fSAndrea Arcangeli */ 2325ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 2326ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2327ba76149fSAndrea Arcangeli goto out; 2328ba76149fSAndrea Arcangeli 2329ba76149fSAndrea Arcangeli vma = find_vma(mm, address); 2330ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2331ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2332ba76149fSAndrea Arcangeli if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2333ba76149fSAndrea Arcangeli goto out; 2334fa475e51SBob Liu if (!hugepage_vma_check(vma)) 2335ba76149fSAndrea Arcangeli goto out; 23366219049aSBob Liu pmd = mm_find_pmd(mm, address); 23376219049aSBob Liu if (!pmd) 2338ba76149fSAndrea Arcangeli goto out; 23396219049aSBob Liu if (pmd_trans_huge(*pmd)) 2340ba76149fSAndrea Arcangeli goto out; 2341ba76149fSAndrea Arcangeli 23424fc3f1d6SIngo Molnar anon_vma_lock_write(vma->anon_vma); 2343ba76149fSAndrea Arcangeli 2344ba76149fSAndrea Arcangeli pte = pte_offset_map(pmd, address); 2345ba76149fSAndrea Arcangeli ptl = pte_lockptr(mm, pmd); 2346ba76149fSAndrea Arcangeli 23472ec74c3eSSagi Grimberg mmun_start = address; 23482ec74c3eSSagi Grimberg mmun_end = address + HPAGE_PMD_SIZE; 23492ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2350ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); /* probably unnecessary */ 2351ba76149fSAndrea Arcangeli /* 2352ba76149fSAndrea Arcangeli * After this gup_fast can't run anymore. This also removes 2353ba76149fSAndrea Arcangeli * any huge TLB entry from the CPU so we won't allow 2354ba76149fSAndrea Arcangeli * huge and small TLB entries for the same virtual address 2355ba76149fSAndrea Arcangeli * to avoid the risk of CPU bugs in that area. 2356ba76149fSAndrea Arcangeli */ 23572ec74c3eSSagi Grimberg _pmd = pmdp_clear_flush(vma, address, pmd); 2358ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 23592ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2360ba76149fSAndrea Arcangeli 2361ba76149fSAndrea Arcangeli spin_lock(ptl); 2362ba76149fSAndrea Arcangeli isolated = __collapse_huge_page_isolate(vma, address, pte); 2363ba76149fSAndrea Arcangeli spin_unlock(ptl); 2364ba76149fSAndrea Arcangeli 2365ba76149fSAndrea Arcangeli if (unlikely(!isolated)) { 2366453c7192SJohannes Weiner pte_unmap(pte); 2367ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); 2368ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 2369ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 2370ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 2371ba76149fSAndrea Arcangeli anon_vma_unlock(vma->anon_vma); 2372ce83d217SAndrea Arcangeli goto out; 2373ba76149fSAndrea Arcangeli } 2374ba76149fSAndrea Arcangeli 2375ba76149fSAndrea Arcangeli /* 2376ba76149fSAndrea Arcangeli * All pages are isolated and locked so anon_vma rmap 2377ba76149fSAndrea Arcangeli * can't run anymore. 2378ba76149fSAndrea Arcangeli */ 2379ba76149fSAndrea Arcangeli anon_vma_unlock(vma->anon_vma); 2380ba76149fSAndrea Arcangeli 2381ba76149fSAndrea Arcangeli __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 2382453c7192SJohannes Weiner pte_unmap(pte); 2383ba76149fSAndrea Arcangeli __SetPageUptodate(new_page); 2384ba76149fSAndrea Arcangeli pgtable = pmd_pgtable(_pmd); 2385ba76149fSAndrea Arcangeli 2386b3092b3bSBob Liu _pmd = mk_huge_pmd(new_page, vma); 2387ba76149fSAndrea Arcangeli 2388ba76149fSAndrea Arcangeli /* 2389ba76149fSAndrea Arcangeli * spin_lock() below is not the equivalent of smp_wmb(), so 2390ba76149fSAndrea Arcangeli * this is needed to avoid the copy_huge_page writes to become 2391ba76149fSAndrea Arcangeli * visible after the set_pmd_at() write. 2392ba76149fSAndrea Arcangeli */ 2393ba76149fSAndrea Arcangeli smp_wmb(); 2394ba76149fSAndrea Arcangeli 2395ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); 2396ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 2397ba76149fSAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, address); 2398ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 2399b113da65SDavid Miller update_mmu_cache_pmd(vma, address, pmd); 2400e3ebcf64SGerald Schaefer pgtable_trans_huge_deposit(mm, pgtable); 2401ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 2402ba76149fSAndrea Arcangeli 2403ba76149fSAndrea Arcangeli *hpage = NULL; 2404420256efSXiao Guangrong 2405ba76149fSAndrea Arcangeli khugepaged_pages_collapsed++; 2406ce83d217SAndrea Arcangeli out_up_write: 2407ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 24080bbbc0b3SAndrea Arcangeli return; 24090bbbc0b3SAndrea Arcangeli 2410ce83d217SAndrea Arcangeli out: 2411678ff896SKAMEZAWA Hiroyuki mem_cgroup_uncharge_page(new_page); 2412ce83d217SAndrea Arcangeli goto out_up_write; 2413ba76149fSAndrea Arcangeli } 2414ba76149fSAndrea Arcangeli 2415ba76149fSAndrea Arcangeli static int khugepaged_scan_pmd(struct mm_struct *mm, 2416ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2417ba76149fSAndrea Arcangeli unsigned long address, 2418ba76149fSAndrea Arcangeli struct page **hpage) 2419ba76149fSAndrea Arcangeli { 2420ba76149fSAndrea Arcangeli pmd_t *pmd; 2421ba76149fSAndrea Arcangeli pte_t *pte, *_pte; 2422ba76149fSAndrea Arcangeli int ret = 0, referenced = 0, none = 0; 2423ba76149fSAndrea Arcangeli struct page *page; 2424ba76149fSAndrea Arcangeli unsigned long _address; 2425ba76149fSAndrea Arcangeli spinlock_t *ptl; 24265c4b4be3SAndi Kleen int node = -1; 2427ba76149fSAndrea Arcangeli 2428ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2429ba76149fSAndrea Arcangeli 24306219049aSBob Liu pmd = mm_find_pmd(mm, address); 24316219049aSBob Liu if (!pmd) 2432ba76149fSAndrea Arcangeli goto out; 24336219049aSBob Liu if (pmd_trans_huge(*pmd)) 2434ba76149fSAndrea Arcangeli goto out; 2435ba76149fSAndrea Arcangeli 2436ba76149fSAndrea Arcangeli pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2437ba76149fSAndrea Arcangeli for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2438ba76149fSAndrea Arcangeli _pte++, _address += PAGE_SIZE) { 2439ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2440ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2441ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 2442ba76149fSAndrea Arcangeli continue; 2443ba76149fSAndrea Arcangeli else 2444ba76149fSAndrea Arcangeli goto out_unmap; 2445ba76149fSAndrea Arcangeli } 2446ba76149fSAndrea Arcangeli if (!pte_present(pteval) || !pte_write(pteval)) 2447ba76149fSAndrea Arcangeli goto out_unmap; 2448ba76149fSAndrea Arcangeli page = vm_normal_page(vma, _address, pteval); 2449ba76149fSAndrea Arcangeli if (unlikely(!page)) 2450ba76149fSAndrea Arcangeli goto out_unmap; 24515c4b4be3SAndi Kleen /* 24525c4b4be3SAndi Kleen * Chose the node of the first page. This could 24535c4b4be3SAndi Kleen * be more sophisticated and look at more pages, 24545c4b4be3SAndi Kleen * but isn't for now. 24555c4b4be3SAndi Kleen */ 24565c4b4be3SAndi Kleen if (node == -1) 24575c4b4be3SAndi Kleen node = page_to_nid(page); 2458ba76149fSAndrea Arcangeli VM_BUG_ON(PageCompound(page)); 2459ba76149fSAndrea Arcangeli if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2460ba76149fSAndrea Arcangeli goto out_unmap; 2461ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 2462ba76149fSAndrea Arcangeli if (page_count(page) != 1) 2463ba76149fSAndrea Arcangeli goto out_unmap; 24648ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 24658ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 2466ba76149fSAndrea Arcangeli referenced = 1; 2467ba76149fSAndrea Arcangeli } 2468ba76149fSAndrea Arcangeli if (referenced) 2469ba76149fSAndrea Arcangeli ret = 1; 2470ba76149fSAndrea Arcangeli out_unmap: 2471ba76149fSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 2472ce83d217SAndrea Arcangeli if (ret) 2473ce83d217SAndrea Arcangeli /* collapse_huge_page will return with the mmap_sem released */ 24745c4b4be3SAndi Kleen collapse_huge_page(mm, address, hpage, vma, node); 2475ba76149fSAndrea Arcangeli out: 2476ba76149fSAndrea Arcangeli return ret; 2477ba76149fSAndrea Arcangeli } 2478ba76149fSAndrea Arcangeli 2479ba76149fSAndrea Arcangeli static void collect_mm_slot(struct mm_slot *mm_slot) 2480ba76149fSAndrea Arcangeli { 2481ba76149fSAndrea Arcangeli struct mm_struct *mm = mm_slot->mm; 2482ba76149fSAndrea Arcangeli 2483b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2484ba76149fSAndrea Arcangeli 2485ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm)) { 2486ba76149fSAndrea Arcangeli /* free mm_slot */ 2487ba76149fSAndrea Arcangeli hlist_del(&mm_slot->hash); 2488ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2489ba76149fSAndrea Arcangeli 2490ba76149fSAndrea Arcangeli /* 2491ba76149fSAndrea Arcangeli * Not strictly needed because the mm exited already. 2492ba76149fSAndrea Arcangeli * 2493ba76149fSAndrea Arcangeli * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2494ba76149fSAndrea Arcangeli */ 2495ba76149fSAndrea Arcangeli 2496ba76149fSAndrea Arcangeli /* khugepaged_mm_lock actually not necessary for the below */ 2497ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2498ba76149fSAndrea Arcangeli mmdrop(mm); 2499ba76149fSAndrea Arcangeli } 2500ba76149fSAndrea Arcangeli } 2501ba76149fSAndrea Arcangeli 2502ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2503ba76149fSAndrea Arcangeli struct page **hpage) 25042f1da642SH Hartley Sweeten __releases(&khugepaged_mm_lock) 25052f1da642SH Hartley Sweeten __acquires(&khugepaged_mm_lock) 2506ba76149fSAndrea Arcangeli { 2507ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2508ba76149fSAndrea Arcangeli struct mm_struct *mm; 2509ba76149fSAndrea Arcangeli struct vm_area_struct *vma; 2510ba76149fSAndrea Arcangeli int progress = 0; 2511ba76149fSAndrea Arcangeli 2512ba76149fSAndrea Arcangeli VM_BUG_ON(!pages); 2513b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2514ba76149fSAndrea Arcangeli 2515ba76149fSAndrea Arcangeli if (khugepaged_scan.mm_slot) 2516ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2517ba76149fSAndrea Arcangeli else { 2518ba76149fSAndrea Arcangeli mm_slot = list_entry(khugepaged_scan.mm_head.next, 2519ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2520ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2521ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = mm_slot; 2522ba76149fSAndrea Arcangeli } 2523ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2524ba76149fSAndrea Arcangeli 2525ba76149fSAndrea Arcangeli mm = mm_slot->mm; 2526ba76149fSAndrea Arcangeli down_read(&mm->mmap_sem); 2527ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2528ba76149fSAndrea Arcangeli vma = NULL; 2529ba76149fSAndrea Arcangeli else 2530ba76149fSAndrea Arcangeli vma = find_vma(mm, khugepaged_scan.address); 2531ba76149fSAndrea Arcangeli 2532ba76149fSAndrea Arcangeli progress++; 2533ba76149fSAndrea Arcangeli for (; vma; vma = vma->vm_next) { 2534ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2535ba76149fSAndrea Arcangeli 2536ba76149fSAndrea Arcangeli cond_resched(); 2537ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) { 2538ba76149fSAndrea Arcangeli progress++; 2539ba76149fSAndrea Arcangeli break; 2540ba76149fSAndrea Arcangeli } 2541fa475e51SBob Liu if (!hugepage_vma_check(vma)) { 2542a7d6e4ecSAndrea Arcangeli skip: 2543ba76149fSAndrea Arcangeli progress++; 2544ba76149fSAndrea Arcangeli continue; 2545ba76149fSAndrea Arcangeli } 2546ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2547ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2548a7d6e4ecSAndrea Arcangeli if (hstart >= hend) 2549a7d6e4ecSAndrea Arcangeli goto skip; 2550a7d6e4ecSAndrea Arcangeli if (khugepaged_scan.address > hend) 2551a7d6e4ecSAndrea Arcangeli goto skip; 2552ba76149fSAndrea Arcangeli if (khugepaged_scan.address < hstart) 2553ba76149fSAndrea Arcangeli khugepaged_scan.address = hstart; 2554a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2555ba76149fSAndrea Arcangeli 2556ba76149fSAndrea Arcangeli while (khugepaged_scan.address < hend) { 2557ba76149fSAndrea Arcangeli int ret; 2558ba76149fSAndrea Arcangeli cond_resched(); 2559ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2560ba76149fSAndrea Arcangeli goto breakouterloop; 2561ba76149fSAndrea Arcangeli 2562ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address < hstart || 2563ba76149fSAndrea Arcangeli khugepaged_scan.address + HPAGE_PMD_SIZE > 2564ba76149fSAndrea Arcangeli hend); 2565ba76149fSAndrea Arcangeli ret = khugepaged_scan_pmd(mm, vma, 2566ba76149fSAndrea Arcangeli khugepaged_scan.address, 2567ba76149fSAndrea Arcangeli hpage); 2568ba76149fSAndrea Arcangeli /* move to next address */ 2569ba76149fSAndrea Arcangeli khugepaged_scan.address += HPAGE_PMD_SIZE; 2570ba76149fSAndrea Arcangeli progress += HPAGE_PMD_NR; 2571ba76149fSAndrea Arcangeli if (ret) 2572ba76149fSAndrea Arcangeli /* we released mmap_sem so break loop */ 2573ba76149fSAndrea Arcangeli goto breakouterloop_mmap_sem; 2574ba76149fSAndrea Arcangeli if (progress >= pages) 2575ba76149fSAndrea Arcangeli goto breakouterloop; 2576ba76149fSAndrea Arcangeli } 2577ba76149fSAndrea Arcangeli } 2578ba76149fSAndrea Arcangeli breakouterloop: 2579ba76149fSAndrea Arcangeli up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2580ba76149fSAndrea Arcangeli breakouterloop_mmap_sem: 2581ba76149fSAndrea Arcangeli 2582ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2583a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2584ba76149fSAndrea Arcangeli /* 2585ba76149fSAndrea Arcangeli * Release the current mm_slot if this mm is about to die, or 2586ba76149fSAndrea Arcangeli * if we scanned all vmas of this mm. 2587ba76149fSAndrea Arcangeli */ 2588ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm) || !vma) { 2589ba76149fSAndrea Arcangeli /* 2590ba76149fSAndrea Arcangeli * Make sure that if mm_users is reaching zero while 2591ba76149fSAndrea Arcangeli * khugepaged runs here, khugepaged_exit will find 2592ba76149fSAndrea Arcangeli * mm_slot not pointing to the exiting mm. 2593ba76149fSAndrea Arcangeli */ 2594ba76149fSAndrea Arcangeli if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2595ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = list_entry( 2596ba76149fSAndrea Arcangeli mm_slot->mm_node.next, 2597ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2598ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2599ba76149fSAndrea Arcangeli } else { 2600ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2601ba76149fSAndrea Arcangeli khugepaged_full_scans++; 2602ba76149fSAndrea Arcangeli } 2603ba76149fSAndrea Arcangeli 2604ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2605ba76149fSAndrea Arcangeli } 2606ba76149fSAndrea Arcangeli 2607ba76149fSAndrea Arcangeli return progress; 2608ba76149fSAndrea Arcangeli } 2609ba76149fSAndrea Arcangeli 2610ba76149fSAndrea Arcangeli static int khugepaged_has_work(void) 2611ba76149fSAndrea Arcangeli { 2612ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) && 2613ba76149fSAndrea Arcangeli khugepaged_enabled(); 2614ba76149fSAndrea Arcangeli } 2615ba76149fSAndrea Arcangeli 2616ba76149fSAndrea Arcangeli static int khugepaged_wait_event(void) 2617ba76149fSAndrea Arcangeli { 2618ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) || 26192017c0bfSXiao Guangrong kthread_should_stop(); 2620ba76149fSAndrea Arcangeli } 2621ba76149fSAndrea Arcangeli 2622d516904bSXiao Guangrong static void khugepaged_do_scan(void) 2623d516904bSXiao Guangrong { 2624d516904bSXiao Guangrong struct page *hpage = NULL; 2625ba76149fSAndrea Arcangeli unsigned int progress = 0, pass_through_head = 0; 2626ba76149fSAndrea Arcangeli unsigned int pages = khugepaged_pages_to_scan; 2627d516904bSXiao Guangrong bool wait = true; 2628ba76149fSAndrea Arcangeli 2629ba76149fSAndrea Arcangeli barrier(); /* write khugepaged_pages_to_scan to local stack */ 2630ba76149fSAndrea Arcangeli 2631ba76149fSAndrea Arcangeli while (progress < pages) { 263226234f36SXiao Guangrong if (!khugepaged_prealloc_page(&hpage, &wait)) 263326234f36SXiao Guangrong break; 2634d516904bSXiao Guangrong 2635420256efSXiao Guangrong cond_resched(); 2636ba76149fSAndrea Arcangeli 2637878aee7dSAndrea Arcangeli if (unlikely(kthread_should_stop() || freezing(current))) 2638878aee7dSAndrea Arcangeli break; 2639878aee7dSAndrea Arcangeli 2640ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2641ba76149fSAndrea Arcangeli if (!khugepaged_scan.mm_slot) 2642ba76149fSAndrea Arcangeli pass_through_head++; 2643ba76149fSAndrea Arcangeli if (khugepaged_has_work() && 2644ba76149fSAndrea Arcangeli pass_through_head < 2) 2645ba76149fSAndrea Arcangeli progress += khugepaged_scan_mm_slot(pages - progress, 2646d516904bSXiao Guangrong &hpage); 2647ba76149fSAndrea Arcangeli else 2648ba76149fSAndrea Arcangeli progress = pages; 2649ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2650ba76149fSAndrea Arcangeli } 2651ba76149fSAndrea Arcangeli 2652d516904bSXiao Guangrong if (!IS_ERR_OR_NULL(hpage)) 2653d516904bSXiao Guangrong put_page(hpage); 2654ba76149fSAndrea Arcangeli } 26550bbbc0b3SAndrea Arcangeli 26562017c0bfSXiao Guangrong static void khugepaged_wait_work(void) 26572017c0bfSXiao Guangrong { 26582017c0bfSXiao Guangrong try_to_freeze(); 26592017c0bfSXiao Guangrong 26602017c0bfSXiao Guangrong if (khugepaged_has_work()) { 26612017c0bfSXiao Guangrong if (!khugepaged_scan_sleep_millisecs) 26622017c0bfSXiao Guangrong return; 26632017c0bfSXiao Guangrong 26642017c0bfSXiao Guangrong wait_event_freezable_timeout(khugepaged_wait, 26652017c0bfSXiao Guangrong kthread_should_stop(), 26662017c0bfSXiao Guangrong msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 26672017c0bfSXiao Guangrong return; 26682017c0bfSXiao Guangrong } 26692017c0bfSXiao Guangrong 26702017c0bfSXiao Guangrong if (khugepaged_enabled()) 26712017c0bfSXiao Guangrong wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 26722017c0bfSXiao Guangrong } 26732017c0bfSXiao Guangrong 2674ba76149fSAndrea Arcangeli static int khugepaged(void *none) 2675ba76149fSAndrea Arcangeli { 2676ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2677ba76149fSAndrea Arcangeli 2678878aee7dSAndrea Arcangeli set_freezable(); 2679ba76149fSAndrea Arcangeli set_user_nice(current, 19); 2680ba76149fSAndrea Arcangeli 2681b7231789SXiao Guangrong while (!kthread_should_stop()) { 2682b7231789SXiao Guangrong khugepaged_do_scan(); 2683b7231789SXiao Guangrong khugepaged_wait_work(); 2684b7231789SXiao Guangrong } 2685ba76149fSAndrea Arcangeli 2686ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2687ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2688ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2689ba76149fSAndrea Arcangeli if (mm_slot) 2690ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2691ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2692ba76149fSAndrea Arcangeli return 0; 2693ba76149fSAndrea Arcangeli } 2694ba76149fSAndrea Arcangeli 2695c5a647d0SKirill A. Shutemov static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2696c5a647d0SKirill A. Shutemov unsigned long haddr, pmd_t *pmd) 2697c5a647d0SKirill A. Shutemov { 2698c5a647d0SKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2699c5a647d0SKirill A. Shutemov pgtable_t pgtable; 2700c5a647d0SKirill A. Shutemov pmd_t _pmd; 2701c5a647d0SKirill A. Shutemov int i; 2702c5a647d0SKirill A. Shutemov 2703c5a647d0SKirill A. Shutemov pmdp_clear_flush(vma, haddr, pmd); 2704c5a647d0SKirill A. Shutemov /* leave pmd empty until pte is filled */ 2705c5a647d0SKirill A. Shutemov 2706c5a647d0SKirill A. Shutemov pgtable = pgtable_trans_huge_withdraw(mm); 2707c5a647d0SKirill A. Shutemov pmd_populate(mm, &_pmd, pgtable); 2708c5a647d0SKirill A. Shutemov 2709c5a647d0SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2710c5a647d0SKirill A. Shutemov pte_t *pte, entry; 2711c5a647d0SKirill A. Shutemov entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2712c5a647d0SKirill A. Shutemov entry = pte_mkspecial(entry); 2713c5a647d0SKirill A. Shutemov pte = pte_offset_map(&_pmd, haddr); 2714c5a647d0SKirill A. Shutemov VM_BUG_ON(!pte_none(*pte)); 2715c5a647d0SKirill A. Shutemov set_pte_at(mm, haddr, pte, entry); 2716c5a647d0SKirill A. Shutemov pte_unmap(pte); 2717c5a647d0SKirill A. Shutemov } 2718c5a647d0SKirill A. Shutemov smp_wmb(); /* make pte visible before pmd */ 2719c5a647d0SKirill A. Shutemov pmd_populate(mm, pmd, pgtable); 272097ae1749SKirill A. Shutemov put_huge_zero_page(); 2721c5a647d0SKirill A. Shutemov } 2722c5a647d0SKirill A. Shutemov 2723e180377fSKirill A. Shutemov void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2724e180377fSKirill A. Shutemov pmd_t *pmd) 272571e3aac0SAndrea Arcangeli { 272671e3aac0SAndrea Arcangeli struct page *page; 2727e180377fSKirill A. Shutemov struct mm_struct *mm = vma->vm_mm; 2728c5a647d0SKirill A. Shutemov unsigned long haddr = address & HPAGE_PMD_MASK; 2729c5a647d0SKirill A. Shutemov unsigned long mmun_start; /* For mmu_notifiers */ 2730c5a647d0SKirill A. Shutemov unsigned long mmun_end; /* For mmu_notifiers */ 2731e180377fSKirill A. Shutemov 2732e180377fSKirill A. Shutemov BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 273371e3aac0SAndrea Arcangeli 2734c5a647d0SKirill A. Shutemov mmun_start = haddr; 2735c5a647d0SKirill A. Shutemov mmun_end = haddr + HPAGE_PMD_SIZE; 2736c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 273771e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 273871e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(*pmd))) { 273971e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 2740c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2741c5a647d0SKirill A. Shutemov return; 2742c5a647d0SKirill A. Shutemov } 2743c5a647d0SKirill A. Shutemov if (is_huge_zero_pmd(*pmd)) { 2744c5a647d0SKirill A. Shutemov __split_huge_zero_page_pmd(vma, haddr, pmd); 2745c5a647d0SKirill A. Shutemov spin_unlock(&mm->page_table_lock); 2746c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 274771e3aac0SAndrea Arcangeli return; 274871e3aac0SAndrea Arcangeli } 274971e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 275071e3aac0SAndrea Arcangeli VM_BUG_ON(!page_count(page)); 275171e3aac0SAndrea Arcangeli get_page(page); 275271e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 2753c5a647d0SKirill A. Shutemov mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 275471e3aac0SAndrea Arcangeli 275571e3aac0SAndrea Arcangeli split_huge_page(page); 275671e3aac0SAndrea Arcangeli 275771e3aac0SAndrea Arcangeli put_page(page); 275871e3aac0SAndrea Arcangeli BUG_ON(pmd_trans_huge(*pmd)); 275971e3aac0SAndrea Arcangeli } 276094fcc585SAndrea Arcangeli 2761e180377fSKirill A. Shutemov void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2762e180377fSKirill A. Shutemov pmd_t *pmd) 2763e180377fSKirill A. Shutemov { 2764e180377fSKirill A. Shutemov struct vm_area_struct *vma; 2765e180377fSKirill A. Shutemov 2766e180377fSKirill A. Shutemov vma = find_vma(mm, address); 2767e180377fSKirill A. Shutemov BUG_ON(vma == NULL); 2768e180377fSKirill A. Shutemov split_huge_page_pmd(vma, address, pmd); 2769e180377fSKirill A. Shutemov } 2770e180377fSKirill A. Shutemov 277194fcc585SAndrea Arcangeli static void split_huge_page_address(struct mm_struct *mm, 277294fcc585SAndrea Arcangeli unsigned long address) 277394fcc585SAndrea Arcangeli { 277494fcc585SAndrea Arcangeli pmd_t *pmd; 277594fcc585SAndrea Arcangeli 277694fcc585SAndrea Arcangeli VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 277794fcc585SAndrea Arcangeli 27786219049aSBob Liu pmd = mm_find_pmd(mm, address); 27796219049aSBob Liu if (!pmd) 278094fcc585SAndrea Arcangeli return; 278194fcc585SAndrea Arcangeli /* 278294fcc585SAndrea Arcangeli * Caller holds the mmap_sem write mode, so a huge pmd cannot 278394fcc585SAndrea Arcangeli * materialize from under us. 278494fcc585SAndrea Arcangeli */ 2785e180377fSKirill A. Shutemov split_huge_page_pmd_mm(mm, address, pmd); 278694fcc585SAndrea Arcangeli } 278794fcc585SAndrea Arcangeli 278894fcc585SAndrea Arcangeli void __vma_adjust_trans_huge(struct vm_area_struct *vma, 278994fcc585SAndrea Arcangeli unsigned long start, 279094fcc585SAndrea Arcangeli unsigned long end, 279194fcc585SAndrea Arcangeli long adjust_next) 279294fcc585SAndrea Arcangeli { 279394fcc585SAndrea Arcangeli /* 279494fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 279594fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 279694fcc585SAndrea Arcangeli * an huge pmd. 279794fcc585SAndrea Arcangeli */ 279894fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 279994fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 280094fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 280194fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, start); 280294fcc585SAndrea Arcangeli 280394fcc585SAndrea Arcangeli /* 280494fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 280594fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 280694fcc585SAndrea Arcangeli * an huge pmd. 280794fcc585SAndrea Arcangeli */ 280894fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 280994fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 281094fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 281194fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, end); 281294fcc585SAndrea Arcangeli 281394fcc585SAndrea Arcangeli /* 281494fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 281594fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 281694fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 281794fcc585SAndrea Arcangeli */ 281894fcc585SAndrea Arcangeli if (adjust_next > 0) { 281994fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 282094fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 282194fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 282294fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 282394fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 282494fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 282594fcc585SAndrea Arcangeli split_huge_page_address(next->vm_mm, nstart); 282694fcc585SAndrea Arcangeli } 282794fcc585SAndrea Arcangeli } 2828