171e3aac0SAndrea Arcangeli /* 271e3aac0SAndrea Arcangeli * Copyright (C) 2009 Red Hat, Inc. 371e3aac0SAndrea Arcangeli * 471e3aac0SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 571e3aac0SAndrea Arcangeli * the COPYING file in the top-level directory. 671e3aac0SAndrea Arcangeli */ 771e3aac0SAndrea Arcangeli 871e3aac0SAndrea Arcangeli #include <linux/mm.h> 971e3aac0SAndrea Arcangeli #include <linux/sched.h> 1071e3aac0SAndrea Arcangeli #include <linux/highmem.h> 1171e3aac0SAndrea Arcangeli #include <linux/hugetlb.h> 1271e3aac0SAndrea Arcangeli #include <linux/mmu_notifier.h> 1371e3aac0SAndrea Arcangeli #include <linux/rmap.h> 1471e3aac0SAndrea Arcangeli #include <linux/swap.h> 15ba76149fSAndrea Arcangeli #include <linux/mm_inline.h> 16ba76149fSAndrea Arcangeli #include <linux/kthread.h> 17ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 18878aee7dSAndrea Arcangeli #include <linux/freezer.h> 19a664b2d8SAndrea Arcangeli #include <linux/mman.h> 2071e3aac0SAndrea Arcangeli #include <asm/tlb.h> 2171e3aac0SAndrea Arcangeli #include <asm/pgalloc.h> 2271e3aac0SAndrea Arcangeli #include "internal.h" 2371e3aac0SAndrea Arcangeli 24ba76149fSAndrea Arcangeli /* 25ba76149fSAndrea Arcangeli * By default transparent hugepage support is enabled for all mappings 26ba76149fSAndrea Arcangeli * and khugepaged scans all mappings. Defrag is only invoked by 27ba76149fSAndrea Arcangeli * khugepaged hugepage allocations and by page faults inside 28ba76149fSAndrea Arcangeli * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived 29ba76149fSAndrea Arcangeli * allocations. 30ba76149fSAndrea Arcangeli */ 3171e3aac0SAndrea Arcangeli unsigned long transparent_hugepage_flags __read_mostly = 3213ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 33ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_FLAG)| 3413ece886SAndrea Arcangeli #endif 3513ece886SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 3613ece886SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 3713ece886SAndrea Arcangeli #endif 38d39d33c3SAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 39ba76149fSAndrea Arcangeli (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 40ba76149fSAndrea Arcangeli 41ba76149fSAndrea Arcangeli /* default scan 8*512 pte (or vmas) every 30 second */ 42ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 43ba76149fSAndrea Arcangeli static unsigned int khugepaged_pages_collapsed; 44ba76149fSAndrea Arcangeli static unsigned int khugepaged_full_scans; 45ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 46ba76149fSAndrea Arcangeli /* during fragmentation poll the hugepage allocator once every minute */ 47ba76149fSAndrea Arcangeli static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 48ba76149fSAndrea Arcangeli static struct task_struct *khugepaged_thread __read_mostly; 49ba76149fSAndrea Arcangeli static DEFINE_MUTEX(khugepaged_mutex); 50ba76149fSAndrea Arcangeli static DEFINE_SPINLOCK(khugepaged_mm_lock); 51ba76149fSAndrea Arcangeli static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 52ba76149fSAndrea Arcangeli /* 53ba76149fSAndrea Arcangeli * default collapse hugepages if there is at least one pte mapped like 54ba76149fSAndrea Arcangeli * it would have happened if the vma was large enough during page 55ba76149fSAndrea Arcangeli * fault. 56ba76149fSAndrea Arcangeli */ 57ba76149fSAndrea Arcangeli static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 58ba76149fSAndrea Arcangeli 59ba76149fSAndrea Arcangeli static int khugepaged(void *none); 60ba76149fSAndrea Arcangeli static int mm_slots_hash_init(void); 61ba76149fSAndrea Arcangeli static int khugepaged_slab_init(void); 62ba76149fSAndrea Arcangeli static void khugepaged_slab_free(void); 63ba76149fSAndrea Arcangeli 64ba76149fSAndrea Arcangeli #define MM_SLOTS_HASH_HEADS 1024 65ba76149fSAndrea Arcangeli static struct hlist_head *mm_slots_hash __read_mostly; 66ba76149fSAndrea Arcangeli static struct kmem_cache *mm_slot_cache __read_mostly; 67ba76149fSAndrea Arcangeli 68ba76149fSAndrea Arcangeli /** 69ba76149fSAndrea Arcangeli * struct mm_slot - hash lookup from mm to mm_slot 70ba76149fSAndrea Arcangeli * @hash: hash collision list 71ba76149fSAndrea Arcangeli * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 72ba76149fSAndrea Arcangeli * @mm: the mm that this information is valid for 73ba76149fSAndrea Arcangeli */ 74ba76149fSAndrea Arcangeli struct mm_slot { 75ba76149fSAndrea Arcangeli struct hlist_node hash; 76ba76149fSAndrea Arcangeli struct list_head mm_node; 77ba76149fSAndrea Arcangeli struct mm_struct *mm; 78ba76149fSAndrea Arcangeli }; 79ba76149fSAndrea Arcangeli 80ba76149fSAndrea Arcangeli /** 81ba76149fSAndrea Arcangeli * struct khugepaged_scan - cursor for scanning 82ba76149fSAndrea Arcangeli * @mm_head: the head of the mm list to scan 83ba76149fSAndrea Arcangeli * @mm_slot: the current mm_slot we are scanning 84ba76149fSAndrea Arcangeli * @address: the next address inside that to be scanned 85ba76149fSAndrea Arcangeli * 86ba76149fSAndrea Arcangeli * There is only the one khugepaged_scan instance of this cursor structure. 87ba76149fSAndrea Arcangeli */ 88ba76149fSAndrea Arcangeli struct khugepaged_scan { 89ba76149fSAndrea Arcangeli struct list_head mm_head; 90ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 91ba76149fSAndrea Arcangeli unsigned long address; 922f1da642SH Hartley Sweeten }; 932f1da642SH Hartley Sweeten static struct khugepaged_scan khugepaged_scan = { 94ba76149fSAndrea Arcangeli .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 95ba76149fSAndrea Arcangeli }; 96ba76149fSAndrea Arcangeli 97f000565aSAndrea Arcangeli 98f000565aSAndrea Arcangeli static int set_recommended_min_free_kbytes(void) 99f000565aSAndrea Arcangeli { 100f000565aSAndrea Arcangeli struct zone *zone; 101f000565aSAndrea Arcangeli int nr_zones = 0; 102f000565aSAndrea Arcangeli unsigned long recommended_min; 103f000565aSAndrea Arcangeli extern int min_free_kbytes; 104f000565aSAndrea Arcangeli 105f000565aSAndrea Arcangeli if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG, 106f000565aSAndrea Arcangeli &transparent_hugepage_flags) && 107f000565aSAndrea Arcangeli !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 108f000565aSAndrea Arcangeli &transparent_hugepage_flags)) 109f000565aSAndrea Arcangeli return 0; 110f000565aSAndrea Arcangeli 111f000565aSAndrea Arcangeli for_each_populated_zone(zone) 112f000565aSAndrea Arcangeli nr_zones++; 113f000565aSAndrea Arcangeli 114f000565aSAndrea Arcangeli /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 115f000565aSAndrea Arcangeli recommended_min = pageblock_nr_pages * nr_zones * 2; 116f000565aSAndrea Arcangeli 117f000565aSAndrea Arcangeli /* 118f000565aSAndrea Arcangeli * Make sure that on average at least two pageblocks are almost free 119f000565aSAndrea Arcangeli * of another type, one for a migratetype to fall back to and a 120f000565aSAndrea Arcangeli * second to avoid subsequent fallbacks of other types There are 3 121f000565aSAndrea Arcangeli * MIGRATE_TYPES we care about. 122f000565aSAndrea Arcangeli */ 123f000565aSAndrea Arcangeli recommended_min += pageblock_nr_pages * nr_zones * 124f000565aSAndrea Arcangeli MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 125f000565aSAndrea Arcangeli 126f000565aSAndrea Arcangeli /* don't ever allow to reserve more than 5% of the lowmem */ 127f000565aSAndrea Arcangeli recommended_min = min(recommended_min, 128f000565aSAndrea Arcangeli (unsigned long) nr_free_buffer_pages() / 20); 129f000565aSAndrea Arcangeli recommended_min <<= (PAGE_SHIFT-10); 130f000565aSAndrea Arcangeli 131f000565aSAndrea Arcangeli if (recommended_min > min_free_kbytes) 132f000565aSAndrea Arcangeli min_free_kbytes = recommended_min; 133f000565aSAndrea Arcangeli setup_per_zone_wmarks(); 134f000565aSAndrea Arcangeli return 0; 135f000565aSAndrea Arcangeli } 136f000565aSAndrea Arcangeli late_initcall(set_recommended_min_free_kbytes); 137f000565aSAndrea Arcangeli 138ba76149fSAndrea Arcangeli static int start_khugepaged(void) 139ba76149fSAndrea Arcangeli { 140ba76149fSAndrea Arcangeli int err = 0; 141ba76149fSAndrea Arcangeli if (khugepaged_enabled()) { 142ba76149fSAndrea Arcangeli int wakeup; 143ba76149fSAndrea Arcangeli if (unlikely(!mm_slot_cache || !mm_slots_hash)) { 144ba76149fSAndrea Arcangeli err = -ENOMEM; 145ba76149fSAndrea Arcangeli goto out; 146ba76149fSAndrea Arcangeli } 147ba76149fSAndrea Arcangeli mutex_lock(&khugepaged_mutex); 148ba76149fSAndrea Arcangeli if (!khugepaged_thread) 149ba76149fSAndrea Arcangeli khugepaged_thread = kthread_run(khugepaged, NULL, 150ba76149fSAndrea Arcangeli "khugepaged"); 151ba76149fSAndrea Arcangeli if (unlikely(IS_ERR(khugepaged_thread))) { 152ba76149fSAndrea Arcangeli printk(KERN_ERR 153ba76149fSAndrea Arcangeli "khugepaged: kthread_run(khugepaged) failed\n"); 154ba76149fSAndrea Arcangeli err = PTR_ERR(khugepaged_thread); 155ba76149fSAndrea Arcangeli khugepaged_thread = NULL; 156ba76149fSAndrea Arcangeli } 157ba76149fSAndrea Arcangeli wakeup = !list_empty(&khugepaged_scan.mm_head); 158ba76149fSAndrea Arcangeli mutex_unlock(&khugepaged_mutex); 159ba76149fSAndrea Arcangeli if (wakeup) 160ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 161f000565aSAndrea Arcangeli 162f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 163ba76149fSAndrea Arcangeli } else 164ba76149fSAndrea Arcangeli /* wakeup to exit */ 165ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 166ba76149fSAndrea Arcangeli out: 167ba76149fSAndrea Arcangeli return err; 168ba76149fSAndrea Arcangeli } 16971e3aac0SAndrea Arcangeli 17071e3aac0SAndrea Arcangeli #ifdef CONFIG_SYSFS 171ba76149fSAndrea Arcangeli 17271e3aac0SAndrea Arcangeli static ssize_t double_flag_show(struct kobject *kobj, 17371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 17471e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 17571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 17671e3aac0SAndrea Arcangeli { 17771e3aac0SAndrea Arcangeli if (test_bit(enabled, &transparent_hugepage_flags)) { 17871e3aac0SAndrea Arcangeli VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 17971e3aac0SAndrea Arcangeli return sprintf(buf, "[always] madvise never\n"); 18071e3aac0SAndrea Arcangeli } else if (test_bit(req_madv, &transparent_hugepage_flags)) 18171e3aac0SAndrea Arcangeli return sprintf(buf, "always [madvise] never\n"); 18271e3aac0SAndrea Arcangeli else 18371e3aac0SAndrea Arcangeli return sprintf(buf, "always madvise [never]\n"); 18471e3aac0SAndrea Arcangeli } 18571e3aac0SAndrea Arcangeli static ssize_t double_flag_store(struct kobject *kobj, 18671e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 18771e3aac0SAndrea Arcangeli const char *buf, size_t count, 18871e3aac0SAndrea Arcangeli enum transparent_hugepage_flag enabled, 18971e3aac0SAndrea Arcangeli enum transparent_hugepage_flag req_madv) 19071e3aac0SAndrea Arcangeli { 19171e3aac0SAndrea Arcangeli if (!memcmp("always", buf, 19271e3aac0SAndrea Arcangeli min(sizeof("always")-1, count))) { 19371e3aac0SAndrea Arcangeli set_bit(enabled, &transparent_hugepage_flags); 19471e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 19571e3aac0SAndrea Arcangeli } else if (!memcmp("madvise", buf, 19671e3aac0SAndrea Arcangeli min(sizeof("madvise")-1, count))) { 19771e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 19871e3aac0SAndrea Arcangeli set_bit(req_madv, &transparent_hugepage_flags); 19971e3aac0SAndrea Arcangeli } else if (!memcmp("never", buf, 20071e3aac0SAndrea Arcangeli min(sizeof("never")-1, count))) { 20171e3aac0SAndrea Arcangeli clear_bit(enabled, &transparent_hugepage_flags); 20271e3aac0SAndrea Arcangeli clear_bit(req_madv, &transparent_hugepage_flags); 20371e3aac0SAndrea Arcangeli } else 20471e3aac0SAndrea Arcangeli return -EINVAL; 20571e3aac0SAndrea Arcangeli 20671e3aac0SAndrea Arcangeli return count; 20771e3aac0SAndrea Arcangeli } 20871e3aac0SAndrea Arcangeli 20971e3aac0SAndrea Arcangeli static ssize_t enabled_show(struct kobject *kobj, 21071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 21171e3aac0SAndrea Arcangeli { 21271e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 21371e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 21471e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 21571e3aac0SAndrea Arcangeli } 21671e3aac0SAndrea Arcangeli static ssize_t enabled_store(struct kobject *kobj, 21771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 21871e3aac0SAndrea Arcangeli const char *buf, size_t count) 21971e3aac0SAndrea Arcangeli { 220ba76149fSAndrea Arcangeli ssize_t ret; 221ba76149fSAndrea Arcangeli 222ba76149fSAndrea Arcangeli ret = double_flag_store(kobj, attr, buf, count, 22371e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_FLAG, 22471e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 225ba76149fSAndrea Arcangeli 226ba76149fSAndrea Arcangeli if (ret > 0) { 227ba76149fSAndrea Arcangeli int err = start_khugepaged(); 228ba76149fSAndrea Arcangeli if (err) 229ba76149fSAndrea Arcangeli ret = err; 230ba76149fSAndrea Arcangeli } 231ba76149fSAndrea Arcangeli 232f000565aSAndrea Arcangeli if (ret > 0 && 233f000565aSAndrea Arcangeli (test_bit(TRANSPARENT_HUGEPAGE_FLAG, 234f000565aSAndrea Arcangeli &transparent_hugepage_flags) || 235f000565aSAndrea Arcangeli test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 236f000565aSAndrea Arcangeli &transparent_hugepage_flags))) 237f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 238f000565aSAndrea Arcangeli 239ba76149fSAndrea Arcangeli return ret; 24071e3aac0SAndrea Arcangeli } 24171e3aac0SAndrea Arcangeli static struct kobj_attribute enabled_attr = 24271e3aac0SAndrea Arcangeli __ATTR(enabled, 0644, enabled_show, enabled_store); 24371e3aac0SAndrea Arcangeli 24471e3aac0SAndrea Arcangeli static ssize_t single_flag_show(struct kobject *kobj, 24571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf, 24671e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 24771e3aac0SAndrea Arcangeli { 248e27e6151SBen Hutchings return sprintf(buf, "%d\n", 249e27e6151SBen Hutchings !!test_bit(flag, &transparent_hugepage_flags)); 25071e3aac0SAndrea Arcangeli } 251e27e6151SBen Hutchings 25271e3aac0SAndrea Arcangeli static ssize_t single_flag_store(struct kobject *kobj, 25371e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 25471e3aac0SAndrea Arcangeli const char *buf, size_t count, 25571e3aac0SAndrea Arcangeli enum transparent_hugepage_flag flag) 25671e3aac0SAndrea Arcangeli { 257e27e6151SBen Hutchings unsigned long value; 258e27e6151SBen Hutchings int ret; 259e27e6151SBen Hutchings 260e27e6151SBen Hutchings ret = kstrtoul(buf, 10, &value); 261e27e6151SBen Hutchings if (ret < 0) 262e27e6151SBen Hutchings return ret; 263e27e6151SBen Hutchings if (value > 1) 26471e3aac0SAndrea Arcangeli return -EINVAL; 26571e3aac0SAndrea Arcangeli 266e27e6151SBen Hutchings if (value) 267e27e6151SBen Hutchings set_bit(flag, &transparent_hugepage_flags); 268e27e6151SBen Hutchings else 269e27e6151SBen Hutchings clear_bit(flag, &transparent_hugepage_flags); 270e27e6151SBen Hutchings 27171e3aac0SAndrea Arcangeli return count; 27271e3aac0SAndrea Arcangeli } 27371e3aac0SAndrea Arcangeli 27471e3aac0SAndrea Arcangeli /* 27571e3aac0SAndrea Arcangeli * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 27671e3aac0SAndrea Arcangeli * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 27771e3aac0SAndrea Arcangeli * memory just to allocate one more hugepage. 27871e3aac0SAndrea Arcangeli */ 27971e3aac0SAndrea Arcangeli static ssize_t defrag_show(struct kobject *kobj, 28071e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 28171e3aac0SAndrea Arcangeli { 28271e3aac0SAndrea Arcangeli return double_flag_show(kobj, attr, buf, 28371e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 28471e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 28571e3aac0SAndrea Arcangeli } 28671e3aac0SAndrea Arcangeli static ssize_t defrag_store(struct kobject *kobj, 28771e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 28871e3aac0SAndrea Arcangeli const char *buf, size_t count) 28971e3aac0SAndrea Arcangeli { 29071e3aac0SAndrea Arcangeli return double_flag_store(kobj, attr, buf, count, 29171e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 29271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 29371e3aac0SAndrea Arcangeli } 29471e3aac0SAndrea Arcangeli static struct kobj_attribute defrag_attr = 29571e3aac0SAndrea Arcangeli __ATTR(defrag, 0644, defrag_show, defrag_store); 29671e3aac0SAndrea Arcangeli 29771e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 29871e3aac0SAndrea Arcangeli static ssize_t debug_cow_show(struct kobject *kobj, 29971e3aac0SAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30071e3aac0SAndrea Arcangeli { 30171e3aac0SAndrea Arcangeli return single_flag_show(kobj, attr, buf, 30271e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 30371e3aac0SAndrea Arcangeli } 30471e3aac0SAndrea Arcangeli static ssize_t debug_cow_store(struct kobject *kobj, 30571e3aac0SAndrea Arcangeli struct kobj_attribute *attr, 30671e3aac0SAndrea Arcangeli const char *buf, size_t count) 30771e3aac0SAndrea Arcangeli { 30871e3aac0SAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 30971e3aac0SAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 31071e3aac0SAndrea Arcangeli } 31171e3aac0SAndrea Arcangeli static struct kobj_attribute debug_cow_attr = 31271e3aac0SAndrea Arcangeli __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 31371e3aac0SAndrea Arcangeli #endif /* CONFIG_DEBUG_VM */ 31471e3aac0SAndrea Arcangeli 31571e3aac0SAndrea Arcangeli static struct attribute *hugepage_attr[] = { 31671e3aac0SAndrea Arcangeli &enabled_attr.attr, 31771e3aac0SAndrea Arcangeli &defrag_attr.attr, 31871e3aac0SAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 31971e3aac0SAndrea Arcangeli &debug_cow_attr.attr, 32071e3aac0SAndrea Arcangeli #endif 32171e3aac0SAndrea Arcangeli NULL, 32271e3aac0SAndrea Arcangeli }; 32371e3aac0SAndrea Arcangeli 32471e3aac0SAndrea Arcangeli static struct attribute_group hugepage_attr_group = { 32571e3aac0SAndrea Arcangeli .attrs = hugepage_attr, 326ba76149fSAndrea Arcangeli }; 327ba76149fSAndrea Arcangeli 328ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 329ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 330ba76149fSAndrea Arcangeli char *buf) 331ba76149fSAndrea Arcangeli { 332ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 333ba76149fSAndrea Arcangeli } 334ba76149fSAndrea Arcangeli 335ba76149fSAndrea Arcangeli static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 336ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 337ba76149fSAndrea Arcangeli const char *buf, size_t count) 338ba76149fSAndrea Arcangeli { 339ba76149fSAndrea Arcangeli unsigned long msecs; 340ba76149fSAndrea Arcangeli int err; 341ba76149fSAndrea Arcangeli 342ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &msecs); 343ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 344ba76149fSAndrea Arcangeli return -EINVAL; 345ba76149fSAndrea Arcangeli 346ba76149fSAndrea Arcangeli khugepaged_scan_sleep_millisecs = msecs; 347ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 348ba76149fSAndrea Arcangeli 349ba76149fSAndrea Arcangeli return count; 350ba76149fSAndrea Arcangeli } 351ba76149fSAndrea Arcangeli static struct kobj_attribute scan_sleep_millisecs_attr = 352ba76149fSAndrea Arcangeli __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 353ba76149fSAndrea Arcangeli scan_sleep_millisecs_store); 354ba76149fSAndrea Arcangeli 355ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 356ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 357ba76149fSAndrea Arcangeli char *buf) 358ba76149fSAndrea Arcangeli { 359ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 360ba76149fSAndrea Arcangeli } 361ba76149fSAndrea Arcangeli 362ba76149fSAndrea Arcangeli static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 363ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 364ba76149fSAndrea Arcangeli const char *buf, size_t count) 365ba76149fSAndrea Arcangeli { 366ba76149fSAndrea Arcangeli unsigned long msecs; 367ba76149fSAndrea Arcangeli int err; 368ba76149fSAndrea Arcangeli 369ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &msecs); 370ba76149fSAndrea Arcangeli if (err || msecs > UINT_MAX) 371ba76149fSAndrea Arcangeli return -EINVAL; 372ba76149fSAndrea Arcangeli 373ba76149fSAndrea Arcangeli khugepaged_alloc_sleep_millisecs = msecs; 374ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 375ba76149fSAndrea Arcangeli 376ba76149fSAndrea Arcangeli return count; 377ba76149fSAndrea Arcangeli } 378ba76149fSAndrea Arcangeli static struct kobj_attribute alloc_sleep_millisecs_attr = 379ba76149fSAndrea Arcangeli __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 380ba76149fSAndrea Arcangeli alloc_sleep_millisecs_store); 381ba76149fSAndrea Arcangeli 382ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_show(struct kobject *kobj, 383ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 384ba76149fSAndrea Arcangeli char *buf) 385ba76149fSAndrea Arcangeli { 386ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 387ba76149fSAndrea Arcangeli } 388ba76149fSAndrea Arcangeli static ssize_t pages_to_scan_store(struct kobject *kobj, 389ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 390ba76149fSAndrea Arcangeli const char *buf, size_t count) 391ba76149fSAndrea Arcangeli { 392ba76149fSAndrea Arcangeli int err; 393ba76149fSAndrea Arcangeli unsigned long pages; 394ba76149fSAndrea Arcangeli 395ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &pages); 396ba76149fSAndrea Arcangeli if (err || !pages || pages > UINT_MAX) 397ba76149fSAndrea Arcangeli return -EINVAL; 398ba76149fSAndrea Arcangeli 399ba76149fSAndrea Arcangeli khugepaged_pages_to_scan = pages; 400ba76149fSAndrea Arcangeli 401ba76149fSAndrea Arcangeli return count; 402ba76149fSAndrea Arcangeli } 403ba76149fSAndrea Arcangeli static struct kobj_attribute pages_to_scan_attr = 404ba76149fSAndrea Arcangeli __ATTR(pages_to_scan, 0644, pages_to_scan_show, 405ba76149fSAndrea Arcangeli pages_to_scan_store); 406ba76149fSAndrea Arcangeli 407ba76149fSAndrea Arcangeli static ssize_t pages_collapsed_show(struct kobject *kobj, 408ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 409ba76149fSAndrea Arcangeli char *buf) 410ba76149fSAndrea Arcangeli { 411ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 412ba76149fSAndrea Arcangeli } 413ba76149fSAndrea Arcangeli static struct kobj_attribute pages_collapsed_attr = 414ba76149fSAndrea Arcangeli __ATTR_RO(pages_collapsed); 415ba76149fSAndrea Arcangeli 416ba76149fSAndrea Arcangeli static ssize_t full_scans_show(struct kobject *kobj, 417ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 418ba76149fSAndrea Arcangeli char *buf) 419ba76149fSAndrea Arcangeli { 420ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_full_scans); 421ba76149fSAndrea Arcangeli } 422ba76149fSAndrea Arcangeli static struct kobj_attribute full_scans_attr = 423ba76149fSAndrea Arcangeli __ATTR_RO(full_scans); 424ba76149fSAndrea Arcangeli 425ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_show(struct kobject *kobj, 426ba76149fSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 427ba76149fSAndrea Arcangeli { 428ba76149fSAndrea Arcangeli return single_flag_show(kobj, attr, buf, 429ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 430ba76149fSAndrea Arcangeli } 431ba76149fSAndrea Arcangeli static ssize_t khugepaged_defrag_store(struct kobject *kobj, 432ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 433ba76149fSAndrea Arcangeli const char *buf, size_t count) 434ba76149fSAndrea Arcangeli { 435ba76149fSAndrea Arcangeli return single_flag_store(kobj, attr, buf, count, 436ba76149fSAndrea Arcangeli TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 437ba76149fSAndrea Arcangeli } 438ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_defrag_attr = 439ba76149fSAndrea Arcangeli __ATTR(defrag, 0644, khugepaged_defrag_show, 440ba76149fSAndrea Arcangeli khugepaged_defrag_store); 441ba76149fSAndrea Arcangeli 442ba76149fSAndrea Arcangeli /* 443ba76149fSAndrea Arcangeli * max_ptes_none controls if khugepaged should collapse hugepages over 444ba76149fSAndrea Arcangeli * any unmapped ptes in turn potentially increasing the memory 445ba76149fSAndrea Arcangeli * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 446ba76149fSAndrea Arcangeli * reduce the available free memory in the system as it 447ba76149fSAndrea Arcangeli * runs. Increasing max_ptes_none will instead potentially reduce the 448ba76149fSAndrea Arcangeli * free memory in the system during the khugepaged scan. 449ba76149fSAndrea Arcangeli */ 450ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 451ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 452ba76149fSAndrea Arcangeli char *buf) 453ba76149fSAndrea Arcangeli { 454ba76149fSAndrea Arcangeli return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 455ba76149fSAndrea Arcangeli } 456ba76149fSAndrea Arcangeli static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 457ba76149fSAndrea Arcangeli struct kobj_attribute *attr, 458ba76149fSAndrea Arcangeli const char *buf, size_t count) 459ba76149fSAndrea Arcangeli { 460ba76149fSAndrea Arcangeli int err; 461ba76149fSAndrea Arcangeli unsigned long max_ptes_none; 462ba76149fSAndrea Arcangeli 463ba76149fSAndrea Arcangeli err = strict_strtoul(buf, 10, &max_ptes_none); 464ba76149fSAndrea Arcangeli if (err || max_ptes_none > HPAGE_PMD_NR-1) 465ba76149fSAndrea Arcangeli return -EINVAL; 466ba76149fSAndrea Arcangeli 467ba76149fSAndrea Arcangeli khugepaged_max_ptes_none = max_ptes_none; 468ba76149fSAndrea Arcangeli 469ba76149fSAndrea Arcangeli return count; 470ba76149fSAndrea Arcangeli } 471ba76149fSAndrea Arcangeli static struct kobj_attribute khugepaged_max_ptes_none_attr = 472ba76149fSAndrea Arcangeli __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 473ba76149fSAndrea Arcangeli khugepaged_max_ptes_none_store); 474ba76149fSAndrea Arcangeli 475ba76149fSAndrea Arcangeli static struct attribute *khugepaged_attr[] = { 476ba76149fSAndrea Arcangeli &khugepaged_defrag_attr.attr, 477ba76149fSAndrea Arcangeli &khugepaged_max_ptes_none_attr.attr, 478ba76149fSAndrea Arcangeli &pages_to_scan_attr.attr, 479ba76149fSAndrea Arcangeli &pages_collapsed_attr.attr, 480ba76149fSAndrea Arcangeli &full_scans_attr.attr, 481ba76149fSAndrea Arcangeli &scan_sleep_millisecs_attr.attr, 482ba76149fSAndrea Arcangeli &alloc_sleep_millisecs_attr.attr, 483ba76149fSAndrea Arcangeli NULL, 484ba76149fSAndrea Arcangeli }; 485ba76149fSAndrea Arcangeli 486ba76149fSAndrea Arcangeli static struct attribute_group khugepaged_attr_group = { 487ba76149fSAndrea Arcangeli .attrs = khugepaged_attr, 488ba76149fSAndrea Arcangeli .name = "khugepaged", 48971e3aac0SAndrea Arcangeli }; 490569e5590SShaohua Li 491569e5590SShaohua Li static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 492569e5590SShaohua Li { 493569e5590SShaohua Li int err; 494569e5590SShaohua Li 495569e5590SShaohua Li *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 496569e5590SShaohua Li if (unlikely(!*hugepage_kobj)) { 497569e5590SShaohua Li printk(KERN_ERR "hugepage: failed kobject create\n"); 498569e5590SShaohua Li return -ENOMEM; 499569e5590SShaohua Li } 500569e5590SShaohua Li 501569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 502569e5590SShaohua Li if (err) { 503569e5590SShaohua Li printk(KERN_ERR "hugepage: failed register hugeage group\n"); 504569e5590SShaohua Li goto delete_obj; 505569e5590SShaohua Li } 506569e5590SShaohua Li 507569e5590SShaohua Li err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 508569e5590SShaohua Li if (err) { 509569e5590SShaohua Li printk(KERN_ERR "hugepage: failed register hugeage group\n"); 510569e5590SShaohua Li goto remove_hp_group; 511569e5590SShaohua Li } 512569e5590SShaohua Li 513569e5590SShaohua Li return 0; 514569e5590SShaohua Li 515569e5590SShaohua Li remove_hp_group: 516569e5590SShaohua Li sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 517569e5590SShaohua Li delete_obj: 518569e5590SShaohua Li kobject_put(*hugepage_kobj); 519569e5590SShaohua Li return err; 520569e5590SShaohua Li } 521569e5590SShaohua Li 522569e5590SShaohua Li static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 523569e5590SShaohua Li { 524569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 525569e5590SShaohua Li sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 526569e5590SShaohua Li kobject_put(hugepage_kobj); 527569e5590SShaohua Li } 528569e5590SShaohua Li #else 529569e5590SShaohua Li static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 530569e5590SShaohua Li { 531569e5590SShaohua Li return 0; 532569e5590SShaohua Li } 533569e5590SShaohua Li 534569e5590SShaohua Li static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 535569e5590SShaohua Li { 536569e5590SShaohua Li } 53771e3aac0SAndrea Arcangeli #endif /* CONFIG_SYSFS */ 53871e3aac0SAndrea Arcangeli 53971e3aac0SAndrea Arcangeli static int __init hugepage_init(void) 54071e3aac0SAndrea Arcangeli { 54171e3aac0SAndrea Arcangeli int err; 542569e5590SShaohua Li struct kobject *hugepage_kobj; 54371e3aac0SAndrea Arcangeli 5444b7167b9SAndrea Arcangeli if (!has_transparent_hugepage()) { 5454b7167b9SAndrea Arcangeli transparent_hugepage_flags = 0; 546569e5590SShaohua Li return -EINVAL; 5474b7167b9SAndrea Arcangeli } 5484b7167b9SAndrea Arcangeli 549569e5590SShaohua Li err = hugepage_init_sysfs(&hugepage_kobj); 550569e5590SShaohua Li if (err) 551569e5590SShaohua Li return err; 552ba76149fSAndrea Arcangeli 553ba76149fSAndrea Arcangeli err = khugepaged_slab_init(); 554ba76149fSAndrea Arcangeli if (err) 555ba76149fSAndrea Arcangeli goto out; 556ba76149fSAndrea Arcangeli 557ba76149fSAndrea Arcangeli err = mm_slots_hash_init(); 558ba76149fSAndrea Arcangeli if (err) { 559ba76149fSAndrea Arcangeli khugepaged_slab_free(); 560ba76149fSAndrea Arcangeli goto out; 561ba76149fSAndrea Arcangeli } 562ba76149fSAndrea Arcangeli 56397562cd2SRik van Riel /* 56497562cd2SRik van Riel * By default disable transparent hugepages on smaller systems, 56597562cd2SRik van Riel * where the extra memory used could hurt more than TLB overhead 56697562cd2SRik van Riel * is likely to save. The admin can still enable it through /sys. 56797562cd2SRik van Riel */ 56897562cd2SRik van Riel if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 56997562cd2SRik van Riel transparent_hugepage_flags = 0; 57097562cd2SRik van Riel 571ba76149fSAndrea Arcangeli start_khugepaged(); 572ba76149fSAndrea Arcangeli 573f000565aSAndrea Arcangeli set_recommended_min_free_kbytes(); 574f000565aSAndrea Arcangeli 575569e5590SShaohua Li return 0; 576ba76149fSAndrea Arcangeli out: 577569e5590SShaohua Li hugepage_exit_sysfs(hugepage_kobj); 578ba76149fSAndrea Arcangeli return err; 57971e3aac0SAndrea Arcangeli } 58071e3aac0SAndrea Arcangeli module_init(hugepage_init) 58171e3aac0SAndrea Arcangeli 58271e3aac0SAndrea Arcangeli static int __init setup_transparent_hugepage(char *str) 58371e3aac0SAndrea Arcangeli { 58471e3aac0SAndrea Arcangeli int ret = 0; 58571e3aac0SAndrea Arcangeli if (!str) 58671e3aac0SAndrea Arcangeli goto out; 58771e3aac0SAndrea Arcangeli if (!strcmp(str, "always")) { 58871e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_FLAG, 58971e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 59071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 59171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 59271e3aac0SAndrea Arcangeli ret = 1; 59371e3aac0SAndrea Arcangeli } else if (!strcmp(str, "madvise")) { 59471e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 59571e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 59671e3aac0SAndrea Arcangeli set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 59771e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 59871e3aac0SAndrea Arcangeli ret = 1; 59971e3aac0SAndrea Arcangeli } else if (!strcmp(str, "never")) { 60071e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 60171e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 60271e3aac0SAndrea Arcangeli clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 60371e3aac0SAndrea Arcangeli &transparent_hugepage_flags); 60471e3aac0SAndrea Arcangeli ret = 1; 60571e3aac0SAndrea Arcangeli } 60671e3aac0SAndrea Arcangeli out: 60771e3aac0SAndrea Arcangeli if (!ret) 60871e3aac0SAndrea Arcangeli printk(KERN_WARNING 60971e3aac0SAndrea Arcangeli "transparent_hugepage= cannot parse, ignored\n"); 61071e3aac0SAndrea Arcangeli return ret; 61171e3aac0SAndrea Arcangeli } 61271e3aac0SAndrea Arcangeli __setup("transparent_hugepage=", setup_transparent_hugepage); 61371e3aac0SAndrea Arcangeli 61471e3aac0SAndrea Arcangeli static void prepare_pmd_huge_pte(pgtable_t pgtable, 61571e3aac0SAndrea Arcangeli struct mm_struct *mm) 61671e3aac0SAndrea Arcangeli { 61771e3aac0SAndrea Arcangeli assert_spin_locked(&mm->page_table_lock); 61871e3aac0SAndrea Arcangeli 61971e3aac0SAndrea Arcangeli /* FIFO */ 62071e3aac0SAndrea Arcangeli if (!mm->pmd_huge_pte) 62171e3aac0SAndrea Arcangeli INIT_LIST_HEAD(&pgtable->lru); 62271e3aac0SAndrea Arcangeli else 62371e3aac0SAndrea Arcangeli list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); 62471e3aac0SAndrea Arcangeli mm->pmd_huge_pte = pgtable; 62571e3aac0SAndrea Arcangeli } 62671e3aac0SAndrea Arcangeli 62771e3aac0SAndrea Arcangeli static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 62871e3aac0SAndrea Arcangeli { 62971e3aac0SAndrea Arcangeli if (likely(vma->vm_flags & VM_WRITE)) 63071e3aac0SAndrea Arcangeli pmd = pmd_mkwrite(pmd); 63171e3aac0SAndrea Arcangeli return pmd; 63271e3aac0SAndrea Arcangeli } 63371e3aac0SAndrea Arcangeli 63471e3aac0SAndrea Arcangeli static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 63571e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 63671e3aac0SAndrea Arcangeli unsigned long haddr, pmd_t *pmd, 63771e3aac0SAndrea Arcangeli struct page *page) 63871e3aac0SAndrea Arcangeli { 63971e3aac0SAndrea Arcangeli pgtable_t pgtable; 64071e3aac0SAndrea Arcangeli 64171e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page)); 64271e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(mm, haddr); 64371e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) { 644b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(page); 64571e3aac0SAndrea Arcangeli put_page(page); 64671e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 64771e3aac0SAndrea Arcangeli } 64871e3aac0SAndrea Arcangeli 64971e3aac0SAndrea Arcangeli clear_huge_page(page, haddr, HPAGE_PMD_NR); 65071e3aac0SAndrea Arcangeli __SetPageUptodate(page); 65171e3aac0SAndrea Arcangeli 65271e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 65371e3aac0SAndrea Arcangeli if (unlikely(!pmd_none(*pmd))) { 65471e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 655b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(page); 65671e3aac0SAndrea Arcangeli put_page(page); 65771e3aac0SAndrea Arcangeli pte_free(mm, pgtable); 65871e3aac0SAndrea Arcangeli } else { 65971e3aac0SAndrea Arcangeli pmd_t entry; 66071e3aac0SAndrea Arcangeli entry = mk_pmd(page, vma->vm_page_prot); 66171e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 66271e3aac0SAndrea Arcangeli entry = pmd_mkhuge(entry); 66371e3aac0SAndrea Arcangeli /* 66471e3aac0SAndrea Arcangeli * The spinlocking to take the lru_lock inside 66571e3aac0SAndrea Arcangeli * page_add_new_anon_rmap() acts as a full memory 66671e3aac0SAndrea Arcangeli * barrier to be sure clear_huge_page writes become 66771e3aac0SAndrea Arcangeli * visible after the set_pmd_at() write. 66871e3aac0SAndrea Arcangeli */ 66971e3aac0SAndrea Arcangeli page_add_new_anon_rmap(page, vma, haddr); 67071e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 67171e3aac0SAndrea Arcangeli prepare_pmd_huge_pte(pgtable, mm); 67271e3aac0SAndrea Arcangeli add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 6731c641e84SAndrea Arcangeli mm->nr_ptes++; 67471e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 67571e3aac0SAndrea Arcangeli } 67671e3aac0SAndrea Arcangeli 677*aa2e878eSDavid Rientjes return 0; 67871e3aac0SAndrea Arcangeli } 67971e3aac0SAndrea Arcangeli 680cc5d462fSAndi Kleen static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 6810bbbc0b3SAndrea Arcangeli { 682cc5d462fSAndi Kleen return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 6830bbbc0b3SAndrea Arcangeli } 6840bbbc0b3SAndrea Arcangeli 6850bbbc0b3SAndrea Arcangeli static inline struct page *alloc_hugepage_vma(int defrag, 6860bbbc0b3SAndrea Arcangeli struct vm_area_struct *vma, 687cc5d462fSAndi Kleen unsigned long haddr, int nd, 688cc5d462fSAndi Kleen gfp_t extra_gfp) 6890bbbc0b3SAndrea Arcangeli { 690cc5d462fSAndi Kleen return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 6915c4b4be3SAndi Kleen HPAGE_PMD_ORDER, vma, haddr, nd); 6920bbbc0b3SAndrea Arcangeli } 6930bbbc0b3SAndrea Arcangeli 6940bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 69571e3aac0SAndrea Arcangeli static inline struct page *alloc_hugepage(int defrag) 69671e3aac0SAndrea Arcangeli { 697cc5d462fSAndi Kleen return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 69871e3aac0SAndrea Arcangeli HPAGE_PMD_ORDER); 69971e3aac0SAndrea Arcangeli } 7000bbbc0b3SAndrea Arcangeli #endif 70171e3aac0SAndrea Arcangeli 70271e3aac0SAndrea Arcangeli int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 70371e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, 70471e3aac0SAndrea Arcangeli unsigned int flags) 70571e3aac0SAndrea Arcangeli { 70671e3aac0SAndrea Arcangeli struct page *page; 70771e3aac0SAndrea Arcangeli unsigned long haddr = address & HPAGE_PMD_MASK; 70871e3aac0SAndrea Arcangeli pte_t *pte; 70971e3aac0SAndrea Arcangeli 71071e3aac0SAndrea Arcangeli if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { 71171e3aac0SAndrea Arcangeli if (unlikely(anon_vma_prepare(vma))) 71271e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 713ba76149fSAndrea Arcangeli if (unlikely(khugepaged_enter(vma))) 714ba76149fSAndrea Arcangeli return VM_FAULT_OOM; 7150bbbc0b3SAndrea Arcangeli page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 716cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 71781ab4201SAndi Kleen if (unlikely(!page)) { 71881ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 71971e3aac0SAndrea Arcangeli goto out; 72081ab4201SAndi Kleen } 72181ab4201SAndi Kleen count_vm_event(THP_FAULT_ALLOC); 722b9bbfbe3SAndrea Arcangeli if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 723b9bbfbe3SAndrea Arcangeli put_page(page); 724b9bbfbe3SAndrea Arcangeli goto out; 725b9bbfbe3SAndrea Arcangeli } 72671e3aac0SAndrea Arcangeli 72771e3aac0SAndrea Arcangeli return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); 72871e3aac0SAndrea Arcangeli } 72971e3aac0SAndrea Arcangeli out: 73071e3aac0SAndrea Arcangeli /* 73171e3aac0SAndrea Arcangeli * Use __pte_alloc instead of pte_alloc_map, because we can't 73271e3aac0SAndrea Arcangeli * run pte_offset_map on the pmd, if an huge pmd could 73371e3aac0SAndrea Arcangeli * materialize from under us from a different thread. 73471e3aac0SAndrea Arcangeli */ 73571e3aac0SAndrea Arcangeli if (unlikely(__pte_alloc(mm, vma, pmd, address))) 73671e3aac0SAndrea Arcangeli return VM_FAULT_OOM; 73771e3aac0SAndrea Arcangeli /* if an huge pmd materialized from under us just retry later */ 73871e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_huge(*pmd))) 73971e3aac0SAndrea Arcangeli return 0; 74071e3aac0SAndrea Arcangeli /* 74171e3aac0SAndrea Arcangeli * A regular pmd is established and it can't morph into a huge pmd 74271e3aac0SAndrea Arcangeli * from under us anymore at this point because we hold the mmap_sem 74371e3aac0SAndrea Arcangeli * read mode and khugepaged takes it in write mode. So now it's 74471e3aac0SAndrea Arcangeli * safe to run pte_offset_map(). 74571e3aac0SAndrea Arcangeli */ 74671e3aac0SAndrea Arcangeli pte = pte_offset_map(pmd, address); 74771e3aac0SAndrea Arcangeli return handle_pte_fault(mm, vma, address, pte, pmd, flags); 74871e3aac0SAndrea Arcangeli } 74971e3aac0SAndrea Arcangeli 75071e3aac0SAndrea Arcangeli int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 75171e3aac0SAndrea Arcangeli pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 75271e3aac0SAndrea Arcangeli struct vm_area_struct *vma) 75371e3aac0SAndrea Arcangeli { 75471e3aac0SAndrea Arcangeli struct page *src_page; 75571e3aac0SAndrea Arcangeli pmd_t pmd; 75671e3aac0SAndrea Arcangeli pgtable_t pgtable; 75771e3aac0SAndrea Arcangeli int ret; 75871e3aac0SAndrea Arcangeli 75971e3aac0SAndrea Arcangeli ret = -ENOMEM; 76071e3aac0SAndrea Arcangeli pgtable = pte_alloc_one(dst_mm, addr); 76171e3aac0SAndrea Arcangeli if (unlikely(!pgtable)) 76271e3aac0SAndrea Arcangeli goto out; 76371e3aac0SAndrea Arcangeli 76471e3aac0SAndrea Arcangeli spin_lock(&dst_mm->page_table_lock); 76571e3aac0SAndrea Arcangeli spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); 76671e3aac0SAndrea Arcangeli 76771e3aac0SAndrea Arcangeli ret = -EAGAIN; 76871e3aac0SAndrea Arcangeli pmd = *src_pmd; 76971e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(pmd))) { 77071e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 77171e3aac0SAndrea Arcangeli goto out_unlock; 77271e3aac0SAndrea Arcangeli } 77371e3aac0SAndrea Arcangeli if (unlikely(pmd_trans_splitting(pmd))) { 77471e3aac0SAndrea Arcangeli /* split huge page running from under us */ 77571e3aac0SAndrea Arcangeli spin_unlock(&src_mm->page_table_lock); 77671e3aac0SAndrea Arcangeli spin_unlock(&dst_mm->page_table_lock); 77771e3aac0SAndrea Arcangeli pte_free(dst_mm, pgtable); 77871e3aac0SAndrea Arcangeli 77971e3aac0SAndrea Arcangeli wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 78071e3aac0SAndrea Arcangeli goto out; 78171e3aac0SAndrea Arcangeli } 78271e3aac0SAndrea Arcangeli src_page = pmd_page(pmd); 78371e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(src_page)); 78471e3aac0SAndrea Arcangeli get_page(src_page); 78571e3aac0SAndrea Arcangeli page_dup_rmap(src_page); 78671e3aac0SAndrea Arcangeli add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 78771e3aac0SAndrea Arcangeli 78871e3aac0SAndrea Arcangeli pmdp_set_wrprotect(src_mm, addr, src_pmd); 78971e3aac0SAndrea Arcangeli pmd = pmd_mkold(pmd_wrprotect(pmd)); 79071e3aac0SAndrea Arcangeli set_pmd_at(dst_mm, addr, dst_pmd, pmd); 79171e3aac0SAndrea Arcangeli prepare_pmd_huge_pte(pgtable, dst_mm); 7921c641e84SAndrea Arcangeli dst_mm->nr_ptes++; 79371e3aac0SAndrea Arcangeli 79471e3aac0SAndrea Arcangeli ret = 0; 79571e3aac0SAndrea Arcangeli out_unlock: 79671e3aac0SAndrea Arcangeli spin_unlock(&src_mm->page_table_lock); 79771e3aac0SAndrea Arcangeli spin_unlock(&dst_mm->page_table_lock); 79871e3aac0SAndrea Arcangeli out: 79971e3aac0SAndrea Arcangeli return ret; 80071e3aac0SAndrea Arcangeli } 80171e3aac0SAndrea Arcangeli 80271e3aac0SAndrea Arcangeli /* no "address" argument so destroys page coloring of some arch */ 80371e3aac0SAndrea Arcangeli pgtable_t get_pmd_huge_pte(struct mm_struct *mm) 80471e3aac0SAndrea Arcangeli { 80571e3aac0SAndrea Arcangeli pgtable_t pgtable; 80671e3aac0SAndrea Arcangeli 80771e3aac0SAndrea Arcangeli assert_spin_locked(&mm->page_table_lock); 80871e3aac0SAndrea Arcangeli 80971e3aac0SAndrea Arcangeli /* FIFO */ 81071e3aac0SAndrea Arcangeli pgtable = mm->pmd_huge_pte; 81171e3aac0SAndrea Arcangeli if (list_empty(&pgtable->lru)) 81271e3aac0SAndrea Arcangeli mm->pmd_huge_pte = NULL; 81371e3aac0SAndrea Arcangeli else { 81471e3aac0SAndrea Arcangeli mm->pmd_huge_pte = list_entry(pgtable->lru.next, 81571e3aac0SAndrea Arcangeli struct page, lru); 81671e3aac0SAndrea Arcangeli list_del(&pgtable->lru); 81771e3aac0SAndrea Arcangeli } 81871e3aac0SAndrea Arcangeli return pgtable; 81971e3aac0SAndrea Arcangeli } 82071e3aac0SAndrea Arcangeli 82171e3aac0SAndrea Arcangeli static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 82271e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 82371e3aac0SAndrea Arcangeli unsigned long address, 82471e3aac0SAndrea Arcangeli pmd_t *pmd, pmd_t orig_pmd, 82571e3aac0SAndrea Arcangeli struct page *page, 82671e3aac0SAndrea Arcangeli unsigned long haddr) 82771e3aac0SAndrea Arcangeli { 82871e3aac0SAndrea Arcangeli pgtable_t pgtable; 82971e3aac0SAndrea Arcangeli pmd_t _pmd; 83071e3aac0SAndrea Arcangeli int ret = 0, i; 83171e3aac0SAndrea Arcangeli struct page **pages; 83271e3aac0SAndrea Arcangeli 83371e3aac0SAndrea Arcangeli pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 83471e3aac0SAndrea Arcangeli GFP_KERNEL); 83571e3aac0SAndrea Arcangeli if (unlikely(!pages)) { 83671e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 83771e3aac0SAndrea Arcangeli goto out; 83871e3aac0SAndrea Arcangeli } 83971e3aac0SAndrea Arcangeli 84071e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 841cc5d462fSAndi Kleen pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 842cc5d462fSAndi Kleen __GFP_OTHER_NODE, 84319ee151eSAndi Kleen vma, address, page_to_nid(page)); 844b9bbfbe3SAndrea Arcangeli if (unlikely(!pages[i] || 845b9bbfbe3SAndrea Arcangeli mem_cgroup_newpage_charge(pages[i], mm, 846b9bbfbe3SAndrea Arcangeli GFP_KERNEL))) { 847b9bbfbe3SAndrea Arcangeli if (pages[i]) 84871e3aac0SAndrea Arcangeli put_page(pages[i]); 849b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_start(); 850b9bbfbe3SAndrea Arcangeli while (--i >= 0) { 851b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(pages[i]); 852b9bbfbe3SAndrea Arcangeli put_page(pages[i]); 853b9bbfbe3SAndrea Arcangeli } 854b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_end(); 85571e3aac0SAndrea Arcangeli kfree(pages); 85671e3aac0SAndrea Arcangeli ret |= VM_FAULT_OOM; 85771e3aac0SAndrea Arcangeli goto out; 85871e3aac0SAndrea Arcangeli } 85971e3aac0SAndrea Arcangeli } 86071e3aac0SAndrea Arcangeli 86171e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 86271e3aac0SAndrea Arcangeli copy_user_highpage(pages[i], page + i, 8630089e485SHillf Danton haddr + PAGE_SIZE * i, vma); 86471e3aac0SAndrea Arcangeli __SetPageUptodate(pages[i]); 86571e3aac0SAndrea Arcangeli cond_resched(); 86671e3aac0SAndrea Arcangeli } 86771e3aac0SAndrea Arcangeli 86871e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 86971e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 87071e3aac0SAndrea Arcangeli goto out_free_pages; 87171e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 87271e3aac0SAndrea Arcangeli 87371e3aac0SAndrea Arcangeli pmdp_clear_flush_notify(vma, haddr, pmd); 87471e3aac0SAndrea Arcangeli /* leave pmd empty until pte is filled */ 87571e3aac0SAndrea Arcangeli 87671e3aac0SAndrea Arcangeli pgtable = get_pmd_huge_pte(mm); 87771e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 87871e3aac0SAndrea Arcangeli 87971e3aac0SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 88071e3aac0SAndrea Arcangeli pte_t *pte, entry; 88171e3aac0SAndrea Arcangeli entry = mk_pte(pages[i], vma->vm_page_prot); 88271e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 88371e3aac0SAndrea Arcangeli page_add_new_anon_rmap(pages[i], vma, haddr); 88471e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 88571e3aac0SAndrea Arcangeli VM_BUG_ON(!pte_none(*pte)); 88671e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 88771e3aac0SAndrea Arcangeli pte_unmap(pte); 88871e3aac0SAndrea Arcangeli } 88971e3aac0SAndrea Arcangeli kfree(pages); 89071e3aac0SAndrea Arcangeli 89171e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 89271e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 89371e3aac0SAndrea Arcangeli page_remove_rmap(page); 89471e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 89571e3aac0SAndrea Arcangeli 89671e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 89771e3aac0SAndrea Arcangeli put_page(page); 89871e3aac0SAndrea Arcangeli 89971e3aac0SAndrea Arcangeli out: 90071e3aac0SAndrea Arcangeli return ret; 90171e3aac0SAndrea Arcangeli 90271e3aac0SAndrea Arcangeli out_free_pages: 90371e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 904b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_start(); 905b9bbfbe3SAndrea Arcangeli for (i = 0; i < HPAGE_PMD_NR; i++) { 906b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(pages[i]); 90771e3aac0SAndrea Arcangeli put_page(pages[i]); 908b9bbfbe3SAndrea Arcangeli } 909b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_end(); 91071e3aac0SAndrea Arcangeli kfree(pages); 91171e3aac0SAndrea Arcangeli goto out; 91271e3aac0SAndrea Arcangeli } 91371e3aac0SAndrea Arcangeli 91471e3aac0SAndrea Arcangeli int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 91571e3aac0SAndrea Arcangeli unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 91671e3aac0SAndrea Arcangeli { 91771e3aac0SAndrea Arcangeli int ret = 0; 91871e3aac0SAndrea Arcangeli struct page *page, *new_page; 91971e3aac0SAndrea Arcangeli unsigned long haddr; 92071e3aac0SAndrea Arcangeli 92171e3aac0SAndrea Arcangeli VM_BUG_ON(!vma->anon_vma); 92271e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 92371e3aac0SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) 92471e3aac0SAndrea Arcangeli goto out_unlock; 92571e3aac0SAndrea Arcangeli 92671e3aac0SAndrea Arcangeli page = pmd_page(orig_pmd); 92771e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 92871e3aac0SAndrea Arcangeli haddr = address & HPAGE_PMD_MASK; 92971e3aac0SAndrea Arcangeli if (page_mapcount(page) == 1) { 93071e3aac0SAndrea Arcangeli pmd_t entry; 93171e3aac0SAndrea Arcangeli entry = pmd_mkyoung(orig_pmd); 93271e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 93371e3aac0SAndrea Arcangeli if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 93471e3aac0SAndrea Arcangeli update_mmu_cache(vma, address, entry); 93571e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 93671e3aac0SAndrea Arcangeli goto out_unlock; 93771e3aac0SAndrea Arcangeli } 93871e3aac0SAndrea Arcangeli get_page(page); 93971e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 94071e3aac0SAndrea Arcangeli 94171e3aac0SAndrea Arcangeli if (transparent_hugepage_enabled(vma) && 94271e3aac0SAndrea Arcangeli !transparent_hugepage_debug_cow()) 9430bbbc0b3SAndrea Arcangeli new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 944cc5d462fSAndi Kleen vma, haddr, numa_node_id(), 0); 94571e3aac0SAndrea Arcangeli else 94671e3aac0SAndrea Arcangeli new_page = NULL; 94771e3aac0SAndrea Arcangeli 94871e3aac0SAndrea Arcangeli if (unlikely(!new_page)) { 94981ab4201SAndi Kleen count_vm_event(THP_FAULT_FALLBACK); 95071e3aac0SAndrea Arcangeli ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 95171e3aac0SAndrea Arcangeli pmd, orig_pmd, page, haddr); 95271e3aac0SAndrea Arcangeli put_page(page); 95371e3aac0SAndrea Arcangeli goto out; 95471e3aac0SAndrea Arcangeli } 95581ab4201SAndi Kleen count_vm_event(THP_FAULT_ALLOC); 95671e3aac0SAndrea Arcangeli 957b9bbfbe3SAndrea Arcangeli if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 958b9bbfbe3SAndrea Arcangeli put_page(new_page); 959b9bbfbe3SAndrea Arcangeli put_page(page); 960b9bbfbe3SAndrea Arcangeli ret |= VM_FAULT_OOM; 961b9bbfbe3SAndrea Arcangeli goto out; 962b9bbfbe3SAndrea Arcangeli } 963b9bbfbe3SAndrea Arcangeli 96471e3aac0SAndrea Arcangeli copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 96571e3aac0SAndrea Arcangeli __SetPageUptodate(new_page); 96671e3aac0SAndrea Arcangeli 96771e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 96871e3aac0SAndrea Arcangeli put_page(page); 969b9bbfbe3SAndrea Arcangeli if (unlikely(!pmd_same(*pmd, orig_pmd))) { 970b9bbfbe3SAndrea Arcangeli mem_cgroup_uncharge_page(new_page); 97171e3aac0SAndrea Arcangeli put_page(new_page); 972b9bbfbe3SAndrea Arcangeli } else { 97371e3aac0SAndrea Arcangeli pmd_t entry; 97471e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 97571e3aac0SAndrea Arcangeli entry = mk_pmd(new_page, vma->vm_page_prot); 97671e3aac0SAndrea Arcangeli entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 97771e3aac0SAndrea Arcangeli entry = pmd_mkhuge(entry); 97871e3aac0SAndrea Arcangeli pmdp_clear_flush_notify(vma, haddr, pmd); 97971e3aac0SAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, haddr); 98071e3aac0SAndrea Arcangeli set_pmd_at(mm, haddr, pmd, entry); 98171e3aac0SAndrea Arcangeli update_mmu_cache(vma, address, entry); 98271e3aac0SAndrea Arcangeli page_remove_rmap(page); 98371e3aac0SAndrea Arcangeli put_page(page); 98471e3aac0SAndrea Arcangeli ret |= VM_FAULT_WRITE; 98571e3aac0SAndrea Arcangeli } 98671e3aac0SAndrea Arcangeli out_unlock: 98771e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 98871e3aac0SAndrea Arcangeli out: 98971e3aac0SAndrea Arcangeli return ret; 99071e3aac0SAndrea Arcangeli } 99171e3aac0SAndrea Arcangeli 99271e3aac0SAndrea Arcangeli struct page *follow_trans_huge_pmd(struct mm_struct *mm, 99371e3aac0SAndrea Arcangeli unsigned long addr, 99471e3aac0SAndrea Arcangeli pmd_t *pmd, 99571e3aac0SAndrea Arcangeli unsigned int flags) 99671e3aac0SAndrea Arcangeli { 99771e3aac0SAndrea Arcangeli struct page *page = NULL; 99871e3aac0SAndrea Arcangeli 99971e3aac0SAndrea Arcangeli assert_spin_locked(&mm->page_table_lock); 100071e3aac0SAndrea Arcangeli 100171e3aac0SAndrea Arcangeli if (flags & FOLL_WRITE && !pmd_write(*pmd)) 100271e3aac0SAndrea Arcangeli goto out; 100371e3aac0SAndrea Arcangeli 100471e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 100571e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 100671e3aac0SAndrea Arcangeli if (flags & FOLL_TOUCH) { 100771e3aac0SAndrea Arcangeli pmd_t _pmd; 100871e3aac0SAndrea Arcangeli /* 100971e3aac0SAndrea Arcangeli * We should set the dirty bit only for FOLL_WRITE but 101071e3aac0SAndrea Arcangeli * for now the dirty bit in the pmd is meaningless. 101171e3aac0SAndrea Arcangeli * And if the dirty bit will become meaningful and 101271e3aac0SAndrea Arcangeli * we'll only set it with FOLL_WRITE, an atomic 101371e3aac0SAndrea Arcangeli * set_bit will be required on the pmd to set the 101471e3aac0SAndrea Arcangeli * young bit, instead of the current set_pmd_at. 101571e3aac0SAndrea Arcangeli */ 101671e3aac0SAndrea Arcangeli _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 101771e3aac0SAndrea Arcangeli set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 101871e3aac0SAndrea Arcangeli } 101971e3aac0SAndrea Arcangeli page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 102071e3aac0SAndrea Arcangeli VM_BUG_ON(!PageCompound(page)); 102171e3aac0SAndrea Arcangeli if (flags & FOLL_GET) 102270b50f94SAndrea Arcangeli get_page_foll(page); 102371e3aac0SAndrea Arcangeli 102471e3aac0SAndrea Arcangeli out: 102571e3aac0SAndrea Arcangeli return page; 102671e3aac0SAndrea Arcangeli } 102771e3aac0SAndrea Arcangeli 102871e3aac0SAndrea Arcangeli int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1029f21760b1SShaohua Li pmd_t *pmd, unsigned long addr) 103071e3aac0SAndrea Arcangeli { 103171e3aac0SAndrea Arcangeli int ret = 0; 103271e3aac0SAndrea Arcangeli 1033025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 103471e3aac0SAndrea Arcangeli struct page *page; 103571e3aac0SAndrea Arcangeli pgtable_t pgtable; 103671e3aac0SAndrea Arcangeli pgtable = get_pmd_huge_pte(tlb->mm); 103771e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 103871e3aac0SAndrea Arcangeli pmd_clear(pmd); 1039f21760b1SShaohua Li tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 104071e3aac0SAndrea Arcangeli page_remove_rmap(page); 104171e3aac0SAndrea Arcangeli VM_BUG_ON(page_mapcount(page) < 0); 104271e3aac0SAndrea Arcangeli add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 104371e3aac0SAndrea Arcangeli VM_BUG_ON(!PageHead(page)); 10441c641e84SAndrea Arcangeli tlb->mm->nr_ptes--; 104571e3aac0SAndrea Arcangeli spin_unlock(&tlb->mm->page_table_lock); 104671e3aac0SAndrea Arcangeli tlb_remove_page(tlb, page); 104771e3aac0SAndrea Arcangeli pte_free(tlb->mm, pgtable); 104871e3aac0SAndrea Arcangeli ret = 1; 104971e3aac0SAndrea Arcangeli } 105071e3aac0SAndrea Arcangeli return ret; 105171e3aac0SAndrea Arcangeli } 105271e3aac0SAndrea Arcangeli 10530ca1634dSJohannes Weiner int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 10540ca1634dSJohannes Weiner unsigned long addr, unsigned long end, 10550ca1634dSJohannes Weiner unsigned char *vec) 10560ca1634dSJohannes Weiner { 10570ca1634dSJohannes Weiner int ret = 0; 10580ca1634dSJohannes Weiner 1059025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 10600ca1634dSJohannes Weiner /* 10610ca1634dSJohannes Weiner * All logical pages in the range are present 10620ca1634dSJohannes Weiner * if backed by a huge page. 10630ca1634dSJohannes Weiner */ 10640ca1634dSJohannes Weiner spin_unlock(&vma->vm_mm->page_table_lock); 1065025c5b24SNaoya Horiguchi memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1066025c5b24SNaoya Horiguchi ret = 1; 1067025c5b24SNaoya Horiguchi } 10680ca1634dSJohannes Weiner 10690ca1634dSJohannes Weiner return ret; 10700ca1634dSJohannes Weiner } 10710ca1634dSJohannes Weiner 107237a1c49aSAndrea Arcangeli int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 107337a1c49aSAndrea Arcangeli unsigned long old_addr, 107437a1c49aSAndrea Arcangeli unsigned long new_addr, unsigned long old_end, 107537a1c49aSAndrea Arcangeli pmd_t *old_pmd, pmd_t *new_pmd) 107637a1c49aSAndrea Arcangeli { 107737a1c49aSAndrea Arcangeli int ret = 0; 107837a1c49aSAndrea Arcangeli pmd_t pmd; 107937a1c49aSAndrea Arcangeli 108037a1c49aSAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 108137a1c49aSAndrea Arcangeli 108237a1c49aSAndrea Arcangeli if ((old_addr & ~HPAGE_PMD_MASK) || 108337a1c49aSAndrea Arcangeli (new_addr & ~HPAGE_PMD_MASK) || 108437a1c49aSAndrea Arcangeli old_end - old_addr < HPAGE_PMD_SIZE || 108537a1c49aSAndrea Arcangeli (new_vma->vm_flags & VM_NOHUGEPAGE)) 108637a1c49aSAndrea Arcangeli goto out; 108737a1c49aSAndrea Arcangeli 108837a1c49aSAndrea Arcangeli /* 108937a1c49aSAndrea Arcangeli * The destination pmd shouldn't be established, free_pgtables() 109037a1c49aSAndrea Arcangeli * should have release it. 109137a1c49aSAndrea Arcangeli */ 109237a1c49aSAndrea Arcangeli if (WARN_ON(!pmd_none(*new_pmd))) { 109337a1c49aSAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*new_pmd)); 109437a1c49aSAndrea Arcangeli goto out; 109537a1c49aSAndrea Arcangeli } 109637a1c49aSAndrea Arcangeli 1097025c5b24SNaoya Horiguchi ret = __pmd_trans_huge_lock(old_pmd, vma); 1098025c5b24SNaoya Horiguchi if (ret == 1) { 109937a1c49aSAndrea Arcangeli pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 110037a1c49aSAndrea Arcangeli VM_BUG_ON(!pmd_none(*new_pmd)); 110137a1c49aSAndrea Arcangeli set_pmd_at(mm, new_addr, new_pmd, pmd); 110237a1c49aSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 110337a1c49aSAndrea Arcangeli } 110437a1c49aSAndrea Arcangeli out: 110537a1c49aSAndrea Arcangeli return ret; 110637a1c49aSAndrea Arcangeli } 110737a1c49aSAndrea Arcangeli 1108cd7548abSJohannes Weiner int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1109cd7548abSJohannes Weiner unsigned long addr, pgprot_t newprot) 1110cd7548abSJohannes Weiner { 1111cd7548abSJohannes Weiner struct mm_struct *mm = vma->vm_mm; 1112cd7548abSJohannes Weiner int ret = 0; 1113cd7548abSJohannes Weiner 1114025c5b24SNaoya Horiguchi if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1115cd7548abSJohannes Weiner pmd_t entry; 1116cd7548abSJohannes Weiner entry = pmdp_get_and_clear(mm, addr, pmd); 1117cd7548abSJohannes Weiner entry = pmd_modify(entry, newprot); 1118cd7548abSJohannes Weiner set_pmd_at(mm, addr, pmd, entry); 1119cd7548abSJohannes Weiner spin_unlock(&vma->vm_mm->page_table_lock); 1120cd7548abSJohannes Weiner ret = 1; 1121cd7548abSJohannes Weiner } 1122cd7548abSJohannes Weiner 1123cd7548abSJohannes Weiner return ret; 1124cd7548abSJohannes Weiner } 1125cd7548abSJohannes Weiner 1126025c5b24SNaoya Horiguchi /* 1127025c5b24SNaoya Horiguchi * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1128025c5b24SNaoya Horiguchi * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1129025c5b24SNaoya Horiguchi * 1130025c5b24SNaoya Horiguchi * Note that if it returns 1, this routine returns without unlocking page 1131025c5b24SNaoya Horiguchi * table locks. So callers must unlock them. 1132025c5b24SNaoya Horiguchi */ 1133025c5b24SNaoya Horiguchi int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1134025c5b24SNaoya Horiguchi { 1135025c5b24SNaoya Horiguchi spin_lock(&vma->vm_mm->page_table_lock); 1136025c5b24SNaoya Horiguchi if (likely(pmd_trans_huge(*pmd))) { 1137025c5b24SNaoya Horiguchi if (unlikely(pmd_trans_splitting(*pmd))) { 1138025c5b24SNaoya Horiguchi spin_unlock(&vma->vm_mm->page_table_lock); 1139025c5b24SNaoya Horiguchi wait_split_huge_page(vma->anon_vma, pmd); 1140025c5b24SNaoya Horiguchi return -1; 1141025c5b24SNaoya Horiguchi } else { 1142025c5b24SNaoya Horiguchi /* Thp mapped by 'pmd' is stable, so we can 1143025c5b24SNaoya Horiguchi * handle it as it is. */ 1144025c5b24SNaoya Horiguchi return 1; 1145025c5b24SNaoya Horiguchi } 1146025c5b24SNaoya Horiguchi } 1147025c5b24SNaoya Horiguchi spin_unlock(&vma->vm_mm->page_table_lock); 1148025c5b24SNaoya Horiguchi return 0; 1149025c5b24SNaoya Horiguchi } 1150025c5b24SNaoya Horiguchi 115171e3aac0SAndrea Arcangeli pmd_t *page_check_address_pmd(struct page *page, 115271e3aac0SAndrea Arcangeli struct mm_struct *mm, 115371e3aac0SAndrea Arcangeli unsigned long address, 115471e3aac0SAndrea Arcangeli enum page_check_address_pmd_flag flag) 115571e3aac0SAndrea Arcangeli { 115671e3aac0SAndrea Arcangeli pgd_t *pgd; 115771e3aac0SAndrea Arcangeli pud_t *pud; 115871e3aac0SAndrea Arcangeli pmd_t *pmd, *ret = NULL; 115971e3aac0SAndrea Arcangeli 116071e3aac0SAndrea Arcangeli if (address & ~HPAGE_PMD_MASK) 116171e3aac0SAndrea Arcangeli goto out; 116271e3aac0SAndrea Arcangeli 116371e3aac0SAndrea Arcangeli pgd = pgd_offset(mm, address); 116471e3aac0SAndrea Arcangeli if (!pgd_present(*pgd)) 116571e3aac0SAndrea Arcangeli goto out; 116671e3aac0SAndrea Arcangeli 116771e3aac0SAndrea Arcangeli pud = pud_offset(pgd, address); 116871e3aac0SAndrea Arcangeli if (!pud_present(*pud)) 116971e3aac0SAndrea Arcangeli goto out; 117071e3aac0SAndrea Arcangeli 117171e3aac0SAndrea Arcangeli pmd = pmd_offset(pud, address); 117271e3aac0SAndrea Arcangeli if (pmd_none(*pmd)) 117371e3aac0SAndrea Arcangeli goto out; 117471e3aac0SAndrea Arcangeli if (pmd_page(*pmd) != page) 117571e3aac0SAndrea Arcangeli goto out; 117694fcc585SAndrea Arcangeli /* 117794fcc585SAndrea Arcangeli * split_vma() may create temporary aliased mappings. There is 117894fcc585SAndrea Arcangeli * no risk as long as all huge pmd are found and have their 117994fcc585SAndrea Arcangeli * splitting bit set before __split_huge_page_refcount 118094fcc585SAndrea Arcangeli * runs. Finding the same huge pmd more than once during the 118194fcc585SAndrea Arcangeli * same rmap walk is not a problem. 118294fcc585SAndrea Arcangeli */ 118394fcc585SAndrea Arcangeli if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 118494fcc585SAndrea Arcangeli pmd_trans_splitting(*pmd)) 118594fcc585SAndrea Arcangeli goto out; 118671e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) { 118771e3aac0SAndrea Arcangeli VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 118871e3aac0SAndrea Arcangeli !pmd_trans_splitting(*pmd)); 118971e3aac0SAndrea Arcangeli ret = pmd; 119071e3aac0SAndrea Arcangeli } 119171e3aac0SAndrea Arcangeli out: 119271e3aac0SAndrea Arcangeli return ret; 119371e3aac0SAndrea Arcangeli } 119471e3aac0SAndrea Arcangeli 119571e3aac0SAndrea Arcangeli static int __split_huge_page_splitting(struct page *page, 119671e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 119771e3aac0SAndrea Arcangeli unsigned long address) 119871e3aac0SAndrea Arcangeli { 119971e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 120071e3aac0SAndrea Arcangeli pmd_t *pmd; 120171e3aac0SAndrea Arcangeli int ret = 0; 120271e3aac0SAndrea Arcangeli 120371e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 120471e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 120571e3aac0SAndrea Arcangeli PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); 120671e3aac0SAndrea Arcangeli if (pmd) { 120771e3aac0SAndrea Arcangeli /* 120871e3aac0SAndrea Arcangeli * We can't temporarily set the pmd to null in order 120971e3aac0SAndrea Arcangeli * to split it, the pmd must remain marked huge at all 121071e3aac0SAndrea Arcangeli * times or the VM won't take the pmd_trans_huge paths 12112b575eb6SPeter Zijlstra * and it won't wait on the anon_vma->root->mutex to 121271e3aac0SAndrea Arcangeli * serialize against split_huge_page*. 121371e3aac0SAndrea Arcangeli */ 121471e3aac0SAndrea Arcangeli pmdp_splitting_flush_notify(vma, address, pmd); 121571e3aac0SAndrea Arcangeli ret = 1; 121671e3aac0SAndrea Arcangeli } 121771e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 121871e3aac0SAndrea Arcangeli 121971e3aac0SAndrea Arcangeli return ret; 122071e3aac0SAndrea Arcangeli } 122171e3aac0SAndrea Arcangeli 122271e3aac0SAndrea Arcangeli static void __split_huge_page_refcount(struct page *page) 122371e3aac0SAndrea Arcangeli { 122471e3aac0SAndrea Arcangeli int i; 122571e3aac0SAndrea Arcangeli struct zone *zone = page_zone(page); 122670b50f94SAndrea Arcangeli int tail_count = 0; 122771e3aac0SAndrea Arcangeli 122871e3aac0SAndrea Arcangeli /* prevent PageLRU to go away from under us, and freeze lru stats */ 122971e3aac0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 123071e3aac0SAndrea Arcangeli compound_lock(page); 1231e94c8a9cSKAMEZAWA Hiroyuki /* complete memcg works before add pages to LRU */ 1232e94c8a9cSKAMEZAWA Hiroyuki mem_cgroup_split_huge_fixup(page); 123371e3aac0SAndrea Arcangeli 123445676885SShaohua Li for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 123571e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 123671e3aac0SAndrea Arcangeli 123770b50f94SAndrea Arcangeli /* tail_page->_mapcount cannot change */ 123870b50f94SAndrea Arcangeli BUG_ON(page_mapcount(page_tail) < 0); 123970b50f94SAndrea Arcangeli tail_count += page_mapcount(page_tail); 124070b50f94SAndrea Arcangeli /* check for overflow */ 124170b50f94SAndrea Arcangeli BUG_ON(tail_count < 0); 124270b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page_tail->_count) != 0); 124370b50f94SAndrea Arcangeli /* 124470b50f94SAndrea Arcangeli * tail_page->_count is zero and not changing from 124570b50f94SAndrea Arcangeli * under us. But get_page_unless_zero() may be running 124670b50f94SAndrea Arcangeli * from under us on the tail_page. If we used 124770b50f94SAndrea Arcangeli * atomic_set() below instead of atomic_add(), we 124870b50f94SAndrea Arcangeli * would then run atomic_set() concurrently with 124970b50f94SAndrea Arcangeli * get_page_unless_zero(), and atomic_set() is 125070b50f94SAndrea Arcangeli * implemented in C not using locked ops. spin_unlock 125170b50f94SAndrea Arcangeli * on x86 sometime uses locked ops because of PPro 125270b50f94SAndrea Arcangeli * errata 66, 92, so unless somebody can guarantee 125370b50f94SAndrea Arcangeli * atomic_set() here would be safe on all archs (and 125470b50f94SAndrea Arcangeli * not only on x86), it's safer to use atomic_add(). 125570b50f94SAndrea Arcangeli */ 125670b50f94SAndrea Arcangeli atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 125770b50f94SAndrea Arcangeli &page_tail->_count); 125871e3aac0SAndrea Arcangeli 125971e3aac0SAndrea Arcangeli /* after clearing PageTail the gup refcount can be released */ 126071e3aac0SAndrea Arcangeli smp_mb(); 126171e3aac0SAndrea Arcangeli 1262a6d30dddSJin Dongming /* 1263a6d30dddSJin Dongming * retain hwpoison flag of the poisoned tail page: 1264a6d30dddSJin Dongming * fix for the unsuitable process killed on Guest Machine(KVM) 1265a6d30dddSJin Dongming * by the memory-failure. 1266a6d30dddSJin Dongming */ 1267a6d30dddSJin Dongming page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 126871e3aac0SAndrea Arcangeli page_tail->flags |= (page->flags & 126971e3aac0SAndrea Arcangeli ((1L << PG_referenced) | 127071e3aac0SAndrea Arcangeli (1L << PG_swapbacked) | 127171e3aac0SAndrea Arcangeli (1L << PG_mlocked) | 127271e3aac0SAndrea Arcangeli (1L << PG_uptodate))); 127371e3aac0SAndrea Arcangeli page_tail->flags |= (1L << PG_dirty); 127471e3aac0SAndrea Arcangeli 127570b50f94SAndrea Arcangeli /* clear PageTail before overwriting first_page */ 127671e3aac0SAndrea Arcangeli smp_wmb(); 127771e3aac0SAndrea Arcangeli 127871e3aac0SAndrea Arcangeli /* 127971e3aac0SAndrea Arcangeli * __split_huge_page_splitting() already set the 128071e3aac0SAndrea Arcangeli * splitting bit in all pmd that could map this 128171e3aac0SAndrea Arcangeli * hugepage, that will ensure no CPU can alter the 128271e3aac0SAndrea Arcangeli * mapcount on the head page. The mapcount is only 128371e3aac0SAndrea Arcangeli * accounted in the head page and it has to be 128471e3aac0SAndrea Arcangeli * transferred to all tail pages in the below code. So 128571e3aac0SAndrea Arcangeli * for this code to be safe, the split the mapcount 128671e3aac0SAndrea Arcangeli * can't change. But that doesn't mean userland can't 128771e3aac0SAndrea Arcangeli * keep changing and reading the page contents while 128871e3aac0SAndrea Arcangeli * we transfer the mapcount, so the pmd splitting 128971e3aac0SAndrea Arcangeli * status is achieved setting a reserved bit in the 129071e3aac0SAndrea Arcangeli * pmd, not by clearing the present bit. 129171e3aac0SAndrea Arcangeli */ 129271e3aac0SAndrea Arcangeli page_tail->_mapcount = page->_mapcount; 129371e3aac0SAndrea Arcangeli 129471e3aac0SAndrea Arcangeli BUG_ON(page_tail->mapping); 129571e3aac0SAndrea Arcangeli page_tail->mapping = page->mapping; 129671e3aac0SAndrea Arcangeli 129745676885SShaohua Li page_tail->index = page->index + i; 129871e3aac0SAndrea Arcangeli 129971e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page_tail)); 130071e3aac0SAndrea Arcangeli BUG_ON(!PageUptodate(page_tail)); 130171e3aac0SAndrea Arcangeli BUG_ON(!PageDirty(page_tail)); 130271e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page_tail)); 130371e3aac0SAndrea Arcangeli 1304ca3e0214SKAMEZAWA Hiroyuki 130571e3aac0SAndrea Arcangeli lru_add_page_tail(zone, page, page_tail); 130671e3aac0SAndrea Arcangeli } 130770b50f94SAndrea Arcangeli atomic_sub(tail_count, &page->_count); 130870b50f94SAndrea Arcangeli BUG_ON(atomic_read(&page->_count) <= 0); 130971e3aac0SAndrea Arcangeli 131079134171SAndrea Arcangeli __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 131179134171SAndrea Arcangeli __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 131279134171SAndrea Arcangeli 131371e3aac0SAndrea Arcangeli ClearPageCompound(page); 131471e3aac0SAndrea Arcangeli compound_unlock(page); 131571e3aac0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 131671e3aac0SAndrea Arcangeli 131771e3aac0SAndrea Arcangeli for (i = 1; i < HPAGE_PMD_NR; i++) { 131871e3aac0SAndrea Arcangeli struct page *page_tail = page + i; 131971e3aac0SAndrea Arcangeli BUG_ON(page_count(page_tail) <= 0); 132071e3aac0SAndrea Arcangeli /* 132171e3aac0SAndrea Arcangeli * Tail pages may be freed if there wasn't any mapping 132271e3aac0SAndrea Arcangeli * like if add_to_swap() is running on a lru page that 132371e3aac0SAndrea Arcangeli * had its mapping zapped. And freeing these pages 132471e3aac0SAndrea Arcangeli * requires taking the lru_lock so we do the put_page 132571e3aac0SAndrea Arcangeli * of the tail pages after the split is complete. 132671e3aac0SAndrea Arcangeli */ 132771e3aac0SAndrea Arcangeli put_page(page_tail); 132871e3aac0SAndrea Arcangeli } 132971e3aac0SAndrea Arcangeli 133071e3aac0SAndrea Arcangeli /* 133171e3aac0SAndrea Arcangeli * Only the head page (now become a regular page) is required 133271e3aac0SAndrea Arcangeli * to be pinned by the caller. 133371e3aac0SAndrea Arcangeli */ 133471e3aac0SAndrea Arcangeli BUG_ON(page_count(page) <= 0); 133571e3aac0SAndrea Arcangeli } 133671e3aac0SAndrea Arcangeli 133771e3aac0SAndrea Arcangeli static int __split_huge_page_map(struct page *page, 133871e3aac0SAndrea Arcangeli struct vm_area_struct *vma, 133971e3aac0SAndrea Arcangeli unsigned long address) 134071e3aac0SAndrea Arcangeli { 134171e3aac0SAndrea Arcangeli struct mm_struct *mm = vma->vm_mm; 134271e3aac0SAndrea Arcangeli pmd_t *pmd, _pmd; 134371e3aac0SAndrea Arcangeli int ret = 0, i; 134471e3aac0SAndrea Arcangeli pgtable_t pgtable; 134571e3aac0SAndrea Arcangeli unsigned long haddr; 134671e3aac0SAndrea Arcangeli 134771e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 134871e3aac0SAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 134971e3aac0SAndrea Arcangeli PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 135071e3aac0SAndrea Arcangeli if (pmd) { 135171e3aac0SAndrea Arcangeli pgtable = get_pmd_huge_pte(mm); 135271e3aac0SAndrea Arcangeli pmd_populate(mm, &_pmd, pgtable); 135371e3aac0SAndrea Arcangeli 135471e3aac0SAndrea Arcangeli for (i = 0, haddr = address; i < HPAGE_PMD_NR; 135571e3aac0SAndrea Arcangeli i++, haddr += PAGE_SIZE) { 135671e3aac0SAndrea Arcangeli pte_t *pte, entry; 135771e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page+i)); 135871e3aac0SAndrea Arcangeli entry = mk_pte(page + i, vma->vm_page_prot); 135971e3aac0SAndrea Arcangeli entry = maybe_mkwrite(pte_mkdirty(entry), vma); 136071e3aac0SAndrea Arcangeli if (!pmd_write(*pmd)) 136171e3aac0SAndrea Arcangeli entry = pte_wrprotect(entry); 136271e3aac0SAndrea Arcangeli else 136371e3aac0SAndrea Arcangeli BUG_ON(page_mapcount(page) != 1); 136471e3aac0SAndrea Arcangeli if (!pmd_young(*pmd)) 136571e3aac0SAndrea Arcangeli entry = pte_mkold(entry); 136671e3aac0SAndrea Arcangeli pte = pte_offset_map(&_pmd, haddr); 136771e3aac0SAndrea Arcangeli BUG_ON(!pte_none(*pte)); 136871e3aac0SAndrea Arcangeli set_pte_at(mm, haddr, pte, entry); 136971e3aac0SAndrea Arcangeli pte_unmap(pte); 137071e3aac0SAndrea Arcangeli } 137171e3aac0SAndrea Arcangeli 137271e3aac0SAndrea Arcangeli smp_wmb(); /* make pte visible before pmd */ 137371e3aac0SAndrea Arcangeli /* 137471e3aac0SAndrea Arcangeli * Up to this point the pmd is present and huge and 137571e3aac0SAndrea Arcangeli * userland has the whole access to the hugepage 137671e3aac0SAndrea Arcangeli * during the split (which happens in place). If we 137771e3aac0SAndrea Arcangeli * overwrite the pmd with the not-huge version 137871e3aac0SAndrea Arcangeli * pointing to the pte here (which of course we could 137971e3aac0SAndrea Arcangeli * if all CPUs were bug free), userland could trigger 138071e3aac0SAndrea Arcangeli * a small page size TLB miss on the small sized TLB 138171e3aac0SAndrea Arcangeli * while the hugepage TLB entry is still established 138271e3aac0SAndrea Arcangeli * in the huge TLB. Some CPU doesn't like that. See 138371e3aac0SAndrea Arcangeli * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 138471e3aac0SAndrea Arcangeli * Erratum 383 on page 93. Intel should be safe but is 138571e3aac0SAndrea Arcangeli * also warns that it's only safe if the permission 138671e3aac0SAndrea Arcangeli * and cache attributes of the two entries loaded in 138771e3aac0SAndrea Arcangeli * the two TLB is identical (which should be the case 138871e3aac0SAndrea Arcangeli * here). But it is generally safer to never allow 138971e3aac0SAndrea Arcangeli * small and huge TLB entries for the same virtual 139071e3aac0SAndrea Arcangeli * address to be loaded simultaneously. So instead of 139171e3aac0SAndrea Arcangeli * doing "pmd_populate(); flush_tlb_range();" we first 139271e3aac0SAndrea Arcangeli * mark the current pmd notpresent (atomically because 139371e3aac0SAndrea Arcangeli * here the pmd_trans_huge and pmd_trans_splitting 139471e3aac0SAndrea Arcangeli * must remain set at all times on the pmd until the 139571e3aac0SAndrea Arcangeli * split is complete for this pmd), then we flush the 139671e3aac0SAndrea Arcangeli * SMP TLB and finally we write the non-huge version 139771e3aac0SAndrea Arcangeli * of the pmd entry with pmd_populate. 139871e3aac0SAndrea Arcangeli */ 139971e3aac0SAndrea Arcangeli set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd)); 140071e3aac0SAndrea Arcangeli flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 140171e3aac0SAndrea Arcangeli pmd_populate(mm, pmd, pgtable); 140271e3aac0SAndrea Arcangeli ret = 1; 140371e3aac0SAndrea Arcangeli } 140471e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 140571e3aac0SAndrea Arcangeli 140671e3aac0SAndrea Arcangeli return ret; 140771e3aac0SAndrea Arcangeli } 140871e3aac0SAndrea Arcangeli 14092b575eb6SPeter Zijlstra /* must be called with anon_vma->root->mutex hold */ 141071e3aac0SAndrea Arcangeli static void __split_huge_page(struct page *page, 141171e3aac0SAndrea Arcangeli struct anon_vma *anon_vma) 141271e3aac0SAndrea Arcangeli { 141371e3aac0SAndrea Arcangeli int mapcount, mapcount2; 141471e3aac0SAndrea Arcangeli struct anon_vma_chain *avc; 141571e3aac0SAndrea Arcangeli 141671e3aac0SAndrea Arcangeli BUG_ON(!PageHead(page)); 141771e3aac0SAndrea Arcangeli BUG_ON(PageTail(page)); 141871e3aac0SAndrea Arcangeli 141971e3aac0SAndrea Arcangeli mapcount = 0; 142071e3aac0SAndrea Arcangeli list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 142171e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 142271e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 142371e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 142471e3aac0SAndrea Arcangeli if (addr == -EFAULT) 142571e3aac0SAndrea Arcangeli continue; 142671e3aac0SAndrea Arcangeli mapcount += __split_huge_page_splitting(page, vma, addr); 142771e3aac0SAndrea Arcangeli } 142805759d38SAndrea Arcangeli /* 142905759d38SAndrea Arcangeli * It is critical that new vmas are added to the tail of the 143005759d38SAndrea Arcangeli * anon_vma list. This guarantes that if copy_huge_pmd() runs 143105759d38SAndrea Arcangeli * and establishes a child pmd before 143205759d38SAndrea Arcangeli * __split_huge_page_splitting() freezes the parent pmd (so if 143305759d38SAndrea Arcangeli * we fail to prevent copy_huge_pmd() from running until the 143405759d38SAndrea Arcangeli * whole __split_huge_page() is complete), we will still see 143505759d38SAndrea Arcangeli * the newly established pmd of the child later during the 143605759d38SAndrea Arcangeli * walk, to be able to set it as pmd_trans_splitting too. 143705759d38SAndrea Arcangeli */ 143805759d38SAndrea Arcangeli if (mapcount != page_mapcount(page)) 143905759d38SAndrea Arcangeli printk(KERN_ERR "mapcount %d page_mapcount %d\n", 144005759d38SAndrea Arcangeli mapcount, page_mapcount(page)); 144171e3aac0SAndrea Arcangeli BUG_ON(mapcount != page_mapcount(page)); 144271e3aac0SAndrea Arcangeli 144371e3aac0SAndrea Arcangeli __split_huge_page_refcount(page); 144471e3aac0SAndrea Arcangeli 144571e3aac0SAndrea Arcangeli mapcount2 = 0; 144671e3aac0SAndrea Arcangeli list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 144771e3aac0SAndrea Arcangeli struct vm_area_struct *vma = avc->vma; 144871e3aac0SAndrea Arcangeli unsigned long addr = vma_address(page, vma); 144971e3aac0SAndrea Arcangeli BUG_ON(is_vma_temporary_stack(vma)); 145071e3aac0SAndrea Arcangeli if (addr == -EFAULT) 145171e3aac0SAndrea Arcangeli continue; 145271e3aac0SAndrea Arcangeli mapcount2 += __split_huge_page_map(page, vma, addr); 145371e3aac0SAndrea Arcangeli } 145405759d38SAndrea Arcangeli if (mapcount != mapcount2) 145505759d38SAndrea Arcangeli printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", 145605759d38SAndrea Arcangeli mapcount, mapcount2, page_mapcount(page)); 145771e3aac0SAndrea Arcangeli BUG_ON(mapcount != mapcount2); 145871e3aac0SAndrea Arcangeli } 145971e3aac0SAndrea Arcangeli 146071e3aac0SAndrea Arcangeli int split_huge_page(struct page *page) 146171e3aac0SAndrea Arcangeli { 146271e3aac0SAndrea Arcangeli struct anon_vma *anon_vma; 146371e3aac0SAndrea Arcangeli int ret = 1; 146471e3aac0SAndrea Arcangeli 146571e3aac0SAndrea Arcangeli BUG_ON(!PageAnon(page)); 146671e3aac0SAndrea Arcangeli anon_vma = page_lock_anon_vma(page); 146771e3aac0SAndrea Arcangeli if (!anon_vma) 146871e3aac0SAndrea Arcangeli goto out; 146971e3aac0SAndrea Arcangeli ret = 0; 147071e3aac0SAndrea Arcangeli if (!PageCompound(page)) 147171e3aac0SAndrea Arcangeli goto out_unlock; 147271e3aac0SAndrea Arcangeli 147371e3aac0SAndrea Arcangeli BUG_ON(!PageSwapBacked(page)); 147471e3aac0SAndrea Arcangeli __split_huge_page(page, anon_vma); 147581ab4201SAndi Kleen count_vm_event(THP_SPLIT); 147671e3aac0SAndrea Arcangeli 147771e3aac0SAndrea Arcangeli BUG_ON(PageCompound(page)); 147871e3aac0SAndrea Arcangeli out_unlock: 147971e3aac0SAndrea Arcangeli page_unlock_anon_vma(anon_vma); 148071e3aac0SAndrea Arcangeli out: 148171e3aac0SAndrea Arcangeli return ret; 148271e3aac0SAndrea Arcangeli } 148371e3aac0SAndrea Arcangeli 148478f11a25SAndrea Arcangeli #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ 148578f11a25SAndrea Arcangeli VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 148678f11a25SAndrea Arcangeli 148760ab3244SAndrea Arcangeli int hugepage_madvise(struct vm_area_struct *vma, 148860ab3244SAndrea Arcangeli unsigned long *vm_flags, int advice) 14890af4e98bSAndrea Arcangeli { 1490a664b2d8SAndrea Arcangeli switch (advice) { 1491a664b2d8SAndrea Arcangeli case MADV_HUGEPAGE: 14920af4e98bSAndrea Arcangeli /* 14930af4e98bSAndrea Arcangeli * Be somewhat over-protective like KSM for now! 14940af4e98bSAndrea Arcangeli */ 149578f11a25SAndrea Arcangeli if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 14960af4e98bSAndrea Arcangeli return -EINVAL; 1497a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_NOHUGEPAGE; 14980af4e98bSAndrea Arcangeli *vm_flags |= VM_HUGEPAGE; 149960ab3244SAndrea Arcangeli /* 150060ab3244SAndrea Arcangeli * If the vma become good for khugepaged to scan, 150160ab3244SAndrea Arcangeli * register it here without waiting a page fault that 150260ab3244SAndrea Arcangeli * may not happen any time soon. 150360ab3244SAndrea Arcangeli */ 150460ab3244SAndrea Arcangeli if (unlikely(khugepaged_enter_vma_merge(vma))) 150560ab3244SAndrea Arcangeli return -ENOMEM; 1506a664b2d8SAndrea Arcangeli break; 1507a664b2d8SAndrea Arcangeli case MADV_NOHUGEPAGE: 1508a664b2d8SAndrea Arcangeli /* 1509a664b2d8SAndrea Arcangeli * Be somewhat over-protective like KSM for now! 1510a664b2d8SAndrea Arcangeli */ 151178f11a25SAndrea Arcangeli if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1512a664b2d8SAndrea Arcangeli return -EINVAL; 1513a664b2d8SAndrea Arcangeli *vm_flags &= ~VM_HUGEPAGE; 1514a664b2d8SAndrea Arcangeli *vm_flags |= VM_NOHUGEPAGE; 151560ab3244SAndrea Arcangeli /* 151660ab3244SAndrea Arcangeli * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 151760ab3244SAndrea Arcangeli * this vma even if we leave the mm registered in khugepaged if 151860ab3244SAndrea Arcangeli * it got registered before VM_NOHUGEPAGE was set. 151960ab3244SAndrea Arcangeli */ 1520a664b2d8SAndrea Arcangeli break; 1521a664b2d8SAndrea Arcangeli } 15220af4e98bSAndrea Arcangeli 15230af4e98bSAndrea Arcangeli return 0; 15240af4e98bSAndrea Arcangeli } 15250af4e98bSAndrea Arcangeli 1526ba76149fSAndrea Arcangeli static int __init khugepaged_slab_init(void) 1527ba76149fSAndrea Arcangeli { 1528ba76149fSAndrea Arcangeli mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1529ba76149fSAndrea Arcangeli sizeof(struct mm_slot), 1530ba76149fSAndrea Arcangeli __alignof__(struct mm_slot), 0, NULL); 1531ba76149fSAndrea Arcangeli if (!mm_slot_cache) 1532ba76149fSAndrea Arcangeli return -ENOMEM; 1533ba76149fSAndrea Arcangeli 1534ba76149fSAndrea Arcangeli return 0; 1535ba76149fSAndrea Arcangeli } 1536ba76149fSAndrea Arcangeli 1537ba76149fSAndrea Arcangeli static void __init khugepaged_slab_free(void) 1538ba76149fSAndrea Arcangeli { 1539ba76149fSAndrea Arcangeli kmem_cache_destroy(mm_slot_cache); 1540ba76149fSAndrea Arcangeli mm_slot_cache = NULL; 1541ba76149fSAndrea Arcangeli } 1542ba76149fSAndrea Arcangeli 1543ba76149fSAndrea Arcangeli static inline struct mm_slot *alloc_mm_slot(void) 1544ba76149fSAndrea Arcangeli { 1545ba76149fSAndrea Arcangeli if (!mm_slot_cache) /* initialization failed */ 1546ba76149fSAndrea Arcangeli return NULL; 1547ba76149fSAndrea Arcangeli return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 1548ba76149fSAndrea Arcangeli } 1549ba76149fSAndrea Arcangeli 1550ba76149fSAndrea Arcangeli static inline void free_mm_slot(struct mm_slot *mm_slot) 1551ba76149fSAndrea Arcangeli { 1552ba76149fSAndrea Arcangeli kmem_cache_free(mm_slot_cache, mm_slot); 1553ba76149fSAndrea Arcangeli } 1554ba76149fSAndrea Arcangeli 1555ba76149fSAndrea Arcangeli static int __init mm_slots_hash_init(void) 1556ba76149fSAndrea Arcangeli { 1557ba76149fSAndrea Arcangeli mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), 1558ba76149fSAndrea Arcangeli GFP_KERNEL); 1559ba76149fSAndrea Arcangeli if (!mm_slots_hash) 1560ba76149fSAndrea Arcangeli return -ENOMEM; 1561ba76149fSAndrea Arcangeli return 0; 1562ba76149fSAndrea Arcangeli } 1563ba76149fSAndrea Arcangeli 1564ba76149fSAndrea Arcangeli #if 0 1565ba76149fSAndrea Arcangeli static void __init mm_slots_hash_free(void) 1566ba76149fSAndrea Arcangeli { 1567ba76149fSAndrea Arcangeli kfree(mm_slots_hash); 1568ba76149fSAndrea Arcangeli mm_slots_hash = NULL; 1569ba76149fSAndrea Arcangeli } 1570ba76149fSAndrea Arcangeli #endif 1571ba76149fSAndrea Arcangeli 1572ba76149fSAndrea Arcangeli static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1573ba76149fSAndrea Arcangeli { 1574ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 1575ba76149fSAndrea Arcangeli struct hlist_head *bucket; 1576ba76149fSAndrea Arcangeli struct hlist_node *node; 1577ba76149fSAndrea Arcangeli 1578ba76149fSAndrea Arcangeli bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1579ba76149fSAndrea Arcangeli % MM_SLOTS_HASH_HEADS]; 1580ba76149fSAndrea Arcangeli hlist_for_each_entry(mm_slot, node, bucket, hash) { 1581ba76149fSAndrea Arcangeli if (mm == mm_slot->mm) 1582ba76149fSAndrea Arcangeli return mm_slot; 1583ba76149fSAndrea Arcangeli } 1584ba76149fSAndrea Arcangeli return NULL; 1585ba76149fSAndrea Arcangeli } 1586ba76149fSAndrea Arcangeli 1587ba76149fSAndrea Arcangeli static void insert_to_mm_slots_hash(struct mm_struct *mm, 1588ba76149fSAndrea Arcangeli struct mm_slot *mm_slot) 1589ba76149fSAndrea Arcangeli { 1590ba76149fSAndrea Arcangeli struct hlist_head *bucket; 1591ba76149fSAndrea Arcangeli 1592ba76149fSAndrea Arcangeli bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1593ba76149fSAndrea Arcangeli % MM_SLOTS_HASH_HEADS]; 1594ba76149fSAndrea Arcangeli mm_slot->mm = mm; 1595ba76149fSAndrea Arcangeli hlist_add_head(&mm_slot->hash, bucket); 1596ba76149fSAndrea Arcangeli } 1597ba76149fSAndrea Arcangeli 1598ba76149fSAndrea Arcangeli static inline int khugepaged_test_exit(struct mm_struct *mm) 1599ba76149fSAndrea Arcangeli { 1600ba76149fSAndrea Arcangeli return atomic_read(&mm->mm_users) == 0; 1601ba76149fSAndrea Arcangeli } 1602ba76149fSAndrea Arcangeli 1603ba76149fSAndrea Arcangeli int __khugepaged_enter(struct mm_struct *mm) 1604ba76149fSAndrea Arcangeli { 1605ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 1606ba76149fSAndrea Arcangeli int wakeup; 1607ba76149fSAndrea Arcangeli 1608ba76149fSAndrea Arcangeli mm_slot = alloc_mm_slot(); 1609ba76149fSAndrea Arcangeli if (!mm_slot) 1610ba76149fSAndrea Arcangeli return -ENOMEM; 1611ba76149fSAndrea Arcangeli 1612ba76149fSAndrea Arcangeli /* __khugepaged_exit() must not run from under us */ 1613ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_test_exit(mm)); 1614ba76149fSAndrea Arcangeli if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 1615ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 1616ba76149fSAndrea Arcangeli return 0; 1617ba76149fSAndrea Arcangeli } 1618ba76149fSAndrea Arcangeli 1619ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 1620ba76149fSAndrea Arcangeli insert_to_mm_slots_hash(mm, mm_slot); 1621ba76149fSAndrea Arcangeli /* 1622ba76149fSAndrea Arcangeli * Insert just behind the scanning cursor, to let the area settle 1623ba76149fSAndrea Arcangeli * down a little. 1624ba76149fSAndrea Arcangeli */ 1625ba76149fSAndrea Arcangeli wakeup = list_empty(&khugepaged_scan.mm_head); 1626ba76149fSAndrea Arcangeli list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 1627ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 1628ba76149fSAndrea Arcangeli 1629ba76149fSAndrea Arcangeli atomic_inc(&mm->mm_count); 1630ba76149fSAndrea Arcangeli if (wakeup) 1631ba76149fSAndrea Arcangeli wake_up_interruptible(&khugepaged_wait); 1632ba76149fSAndrea Arcangeli 1633ba76149fSAndrea Arcangeli return 0; 1634ba76149fSAndrea Arcangeli } 1635ba76149fSAndrea Arcangeli 1636ba76149fSAndrea Arcangeli int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 1637ba76149fSAndrea Arcangeli { 1638ba76149fSAndrea Arcangeli unsigned long hstart, hend; 1639ba76149fSAndrea Arcangeli if (!vma->anon_vma) 1640ba76149fSAndrea Arcangeli /* 1641ba76149fSAndrea Arcangeli * Not yet faulted in so we will register later in the 1642ba76149fSAndrea Arcangeli * page fault if needed. 1643ba76149fSAndrea Arcangeli */ 1644ba76149fSAndrea Arcangeli return 0; 164578f11a25SAndrea Arcangeli if (vma->vm_ops) 1646ba76149fSAndrea Arcangeli /* khugepaged not yet working on file or special mappings */ 1647ba76149fSAndrea Arcangeli return 0; 164878f11a25SAndrea Arcangeli /* 164978f11a25SAndrea Arcangeli * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 165078f11a25SAndrea Arcangeli * true too, verify it here. 165178f11a25SAndrea Arcangeli */ 165278f11a25SAndrea Arcangeli VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1653ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1654ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 1655ba76149fSAndrea Arcangeli if (hstart < hend) 1656ba76149fSAndrea Arcangeli return khugepaged_enter(vma); 1657ba76149fSAndrea Arcangeli return 0; 1658ba76149fSAndrea Arcangeli } 1659ba76149fSAndrea Arcangeli 1660ba76149fSAndrea Arcangeli void __khugepaged_exit(struct mm_struct *mm) 1661ba76149fSAndrea Arcangeli { 1662ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 1663ba76149fSAndrea Arcangeli int free = 0; 1664ba76149fSAndrea Arcangeli 1665ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 1666ba76149fSAndrea Arcangeli mm_slot = get_mm_slot(mm); 1667ba76149fSAndrea Arcangeli if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 1668ba76149fSAndrea Arcangeli hlist_del(&mm_slot->hash); 1669ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 1670ba76149fSAndrea Arcangeli free = 1; 1671ba76149fSAndrea Arcangeli } 1672d788e80aSChris Wright spin_unlock(&khugepaged_mm_lock); 1673ba76149fSAndrea Arcangeli 1674ba76149fSAndrea Arcangeli if (free) { 1675ba76149fSAndrea Arcangeli clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1676ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 1677ba76149fSAndrea Arcangeli mmdrop(mm); 1678ba76149fSAndrea Arcangeli } else if (mm_slot) { 1679ba76149fSAndrea Arcangeli /* 1680ba76149fSAndrea Arcangeli * This is required to serialize against 1681ba76149fSAndrea Arcangeli * khugepaged_test_exit() (which is guaranteed to run 1682ba76149fSAndrea Arcangeli * under mmap sem read mode). Stop here (after we 1683ba76149fSAndrea Arcangeli * return all pagetables will be destroyed) until 1684ba76149fSAndrea Arcangeli * khugepaged has finished working on the pagetables 1685ba76149fSAndrea Arcangeli * under the mmap_sem. 1686ba76149fSAndrea Arcangeli */ 1687ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 1688ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 1689d788e80aSChris Wright } 1690ba76149fSAndrea Arcangeli } 1691ba76149fSAndrea Arcangeli 1692ba76149fSAndrea Arcangeli static void release_pte_page(struct page *page) 1693ba76149fSAndrea Arcangeli { 1694ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 1695ba76149fSAndrea Arcangeli dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 1696ba76149fSAndrea Arcangeli unlock_page(page); 1697ba76149fSAndrea Arcangeli putback_lru_page(page); 1698ba76149fSAndrea Arcangeli } 1699ba76149fSAndrea Arcangeli 1700ba76149fSAndrea Arcangeli static void release_pte_pages(pte_t *pte, pte_t *_pte) 1701ba76149fSAndrea Arcangeli { 1702ba76149fSAndrea Arcangeli while (--_pte >= pte) { 1703ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 1704ba76149fSAndrea Arcangeli if (!pte_none(pteval)) 1705ba76149fSAndrea Arcangeli release_pte_page(pte_page(pteval)); 1706ba76149fSAndrea Arcangeli } 1707ba76149fSAndrea Arcangeli } 1708ba76149fSAndrea Arcangeli 1709ba76149fSAndrea Arcangeli static void release_all_pte_pages(pte_t *pte) 1710ba76149fSAndrea Arcangeli { 1711ba76149fSAndrea Arcangeli release_pte_pages(pte, pte + HPAGE_PMD_NR); 1712ba76149fSAndrea Arcangeli } 1713ba76149fSAndrea Arcangeli 1714ba76149fSAndrea Arcangeli static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 1715ba76149fSAndrea Arcangeli unsigned long address, 1716ba76149fSAndrea Arcangeli pte_t *pte) 1717ba76149fSAndrea Arcangeli { 1718ba76149fSAndrea Arcangeli struct page *page; 1719ba76149fSAndrea Arcangeli pte_t *_pte; 1720ba76149fSAndrea Arcangeli int referenced = 0, isolated = 0, none = 0; 1721ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 1722ba76149fSAndrea Arcangeli _pte++, address += PAGE_SIZE) { 1723ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 1724ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 1725ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 1726ba76149fSAndrea Arcangeli continue; 1727ba76149fSAndrea Arcangeli else { 1728ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1729ba76149fSAndrea Arcangeli goto out; 1730ba76149fSAndrea Arcangeli } 1731ba76149fSAndrea Arcangeli } 1732ba76149fSAndrea Arcangeli if (!pte_present(pteval) || !pte_write(pteval)) { 1733ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1734ba76149fSAndrea Arcangeli goto out; 1735ba76149fSAndrea Arcangeli } 1736ba76149fSAndrea Arcangeli page = vm_normal_page(vma, address, pteval); 1737ba76149fSAndrea Arcangeli if (unlikely(!page)) { 1738ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1739ba76149fSAndrea Arcangeli goto out; 1740ba76149fSAndrea Arcangeli } 1741ba76149fSAndrea Arcangeli VM_BUG_ON(PageCompound(page)); 1742ba76149fSAndrea Arcangeli BUG_ON(!PageAnon(page)); 1743ba76149fSAndrea Arcangeli VM_BUG_ON(!PageSwapBacked(page)); 1744ba76149fSAndrea Arcangeli 1745ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 1746ba76149fSAndrea Arcangeli if (page_count(page) != 1) { 1747ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1748ba76149fSAndrea Arcangeli goto out; 1749ba76149fSAndrea Arcangeli } 1750ba76149fSAndrea Arcangeli /* 1751ba76149fSAndrea Arcangeli * We can do it before isolate_lru_page because the 1752ba76149fSAndrea Arcangeli * page can't be freed from under us. NOTE: PG_lock 1753ba76149fSAndrea Arcangeli * is needed to serialize against split_huge_page 1754ba76149fSAndrea Arcangeli * when invoked from the VM. 1755ba76149fSAndrea Arcangeli */ 1756ba76149fSAndrea Arcangeli if (!trylock_page(page)) { 1757ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1758ba76149fSAndrea Arcangeli goto out; 1759ba76149fSAndrea Arcangeli } 1760ba76149fSAndrea Arcangeli /* 1761ba76149fSAndrea Arcangeli * Isolate the page to avoid collapsing an hugepage 1762ba76149fSAndrea Arcangeli * currently in use by the VM. 1763ba76149fSAndrea Arcangeli */ 1764ba76149fSAndrea Arcangeli if (isolate_lru_page(page)) { 1765ba76149fSAndrea Arcangeli unlock_page(page); 1766ba76149fSAndrea Arcangeli release_pte_pages(pte, _pte); 1767ba76149fSAndrea Arcangeli goto out; 1768ba76149fSAndrea Arcangeli } 1769ba76149fSAndrea Arcangeli /* 0 stands for page_is_file_cache(page) == false */ 1770ba76149fSAndrea Arcangeli inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 1771ba76149fSAndrea Arcangeli VM_BUG_ON(!PageLocked(page)); 1772ba76149fSAndrea Arcangeli VM_BUG_ON(PageLRU(page)); 1773ba76149fSAndrea Arcangeli 1774ba76149fSAndrea Arcangeli /* If there is no mapped pte young don't collapse the page */ 17758ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 17768ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 1777ba76149fSAndrea Arcangeli referenced = 1; 1778ba76149fSAndrea Arcangeli } 1779ba76149fSAndrea Arcangeli if (unlikely(!referenced)) 1780ba76149fSAndrea Arcangeli release_all_pte_pages(pte); 1781ba76149fSAndrea Arcangeli else 1782ba76149fSAndrea Arcangeli isolated = 1; 1783ba76149fSAndrea Arcangeli out: 1784ba76149fSAndrea Arcangeli return isolated; 1785ba76149fSAndrea Arcangeli } 1786ba76149fSAndrea Arcangeli 1787ba76149fSAndrea Arcangeli static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 1788ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 1789ba76149fSAndrea Arcangeli unsigned long address, 1790ba76149fSAndrea Arcangeli spinlock_t *ptl) 1791ba76149fSAndrea Arcangeli { 1792ba76149fSAndrea Arcangeli pte_t *_pte; 1793ba76149fSAndrea Arcangeli for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 1794ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 1795ba76149fSAndrea Arcangeli struct page *src_page; 1796ba76149fSAndrea Arcangeli 1797ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 1798ba76149fSAndrea Arcangeli clear_user_highpage(page, address); 1799ba76149fSAndrea Arcangeli add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 1800ba76149fSAndrea Arcangeli } else { 1801ba76149fSAndrea Arcangeli src_page = pte_page(pteval); 1802ba76149fSAndrea Arcangeli copy_user_highpage(page, src_page, address, vma); 1803ba76149fSAndrea Arcangeli VM_BUG_ON(page_mapcount(src_page) != 1); 1804ba76149fSAndrea Arcangeli VM_BUG_ON(page_count(src_page) != 2); 1805ba76149fSAndrea Arcangeli release_pte_page(src_page); 1806ba76149fSAndrea Arcangeli /* 1807ba76149fSAndrea Arcangeli * ptl mostly unnecessary, but preempt has to 1808ba76149fSAndrea Arcangeli * be disabled to update the per-cpu stats 1809ba76149fSAndrea Arcangeli * inside page_remove_rmap(). 1810ba76149fSAndrea Arcangeli */ 1811ba76149fSAndrea Arcangeli spin_lock(ptl); 1812ba76149fSAndrea Arcangeli /* 1813ba76149fSAndrea Arcangeli * paravirt calls inside pte_clear here are 1814ba76149fSAndrea Arcangeli * superfluous. 1815ba76149fSAndrea Arcangeli */ 1816ba76149fSAndrea Arcangeli pte_clear(vma->vm_mm, address, _pte); 1817ba76149fSAndrea Arcangeli page_remove_rmap(src_page); 1818ba76149fSAndrea Arcangeli spin_unlock(ptl); 1819ba76149fSAndrea Arcangeli free_page_and_swap_cache(src_page); 1820ba76149fSAndrea Arcangeli } 1821ba76149fSAndrea Arcangeli 1822ba76149fSAndrea Arcangeli address += PAGE_SIZE; 1823ba76149fSAndrea Arcangeli page++; 1824ba76149fSAndrea Arcangeli } 1825ba76149fSAndrea Arcangeli } 1826ba76149fSAndrea Arcangeli 1827ba76149fSAndrea Arcangeli static void collapse_huge_page(struct mm_struct *mm, 1828ba76149fSAndrea Arcangeli unsigned long address, 1829ce83d217SAndrea Arcangeli struct page **hpage, 18305c4b4be3SAndi Kleen struct vm_area_struct *vma, 18315c4b4be3SAndi Kleen int node) 1832ba76149fSAndrea Arcangeli { 1833ba76149fSAndrea Arcangeli pgd_t *pgd; 1834ba76149fSAndrea Arcangeli pud_t *pud; 1835ba76149fSAndrea Arcangeli pmd_t *pmd, _pmd; 1836ba76149fSAndrea Arcangeli pte_t *pte; 1837ba76149fSAndrea Arcangeli pgtable_t pgtable; 1838ba76149fSAndrea Arcangeli struct page *new_page; 1839ba76149fSAndrea Arcangeli spinlock_t *ptl; 1840ba76149fSAndrea Arcangeli int isolated; 1841ba76149fSAndrea Arcangeli unsigned long hstart, hend; 1842ba76149fSAndrea Arcangeli 1843ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 18440bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 1845692e0b35SAndrea Arcangeli up_read(&mm->mmap_sem); 1846ba76149fSAndrea Arcangeli VM_BUG_ON(!*hpage); 1847ce83d217SAndrea Arcangeli new_page = *hpage; 18480bbbc0b3SAndrea Arcangeli #else 18490bbbc0b3SAndrea Arcangeli VM_BUG_ON(*hpage); 1850ce83d217SAndrea Arcangeli /* 1851ce83d217SAndrea Arcangeli * Allocate the page while the vma is still valid and under 1852ce83d217SAndrea Arcangeli * the mmap_sem read mode so there is no memory allocation 1853ce83d217SAndrea Arcangeli * later when we take the mmap_sem in write mode. This is more 1854ce83d217SAndrea Arcangeli * friendly behavior (OTOH it may actually hide bugs) to 1855ce83d217SAndrea Arcangeli * filesystems in userland with daemons allocating memory in 1856ce83d217SAndrea Arcangeli * the userland I/O paths. Allocating memory with the 1857ce83d217SAndrea Arcangeli * mmap_sem in read mode is good idea also to allow greater 1858ce83d217SAndrea Arcangeli * scalability. 1859ce83d217SAndrea Arcangeli */ 18605c4b4be3SAndi Kleen new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1861cc5d462fSAndi Kleen node, __GFP_OTHER_NODE); 1862692e0b35SAndrea Arcangeli 1863692e0b35SAndrea Arcangeli /* 1864692e0b35SAndrea Arcangeli * After allocating the hugepage, release the mmap_sem read lock in 1865692e0b35SAndrea Arcangeli * preparation for taking it in write mode. 1866692e0b35SAndrea Arcangeli */ 1867ce83d217SAndrea Arcangeli up_read(&mm->mmap_sem); 1868692e0b35SAndrea Arcangeli if (unlikely(!new_page)) { 186981ab4201SAndi Kleen count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1870ce83d217SAndrea Arcangeli *hpage = ERR_PTR(-ENOMEM); 1871ce83d217SAndrea Arcangeli return; 1872ce83d217SAndrea Arcangeli } 18732fbfac4eSHugh Dickins #endif 1874ce83d217SAndrea Arcangeli 1875692e0b35SAndrea Arcangeli count_vm_event(THP_COLLAPSE_ALLOC); 1876692e0b35SAndrea Arcangeli if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1877692e0b35SAndrea Arcangeli #ifdef CONFIG_NUMA 1878692e0b35SAndrea Arcangeli put_page(new_page); 1879692e0b35SAndrea Arcangeli #endif 1880692e0b35SAndrea Arcangeli return; 1881692e0b35SAndrea Arcangeli } 1882ba76149fSAndrea Arcangeli 1883ba76149fSAndrea Arcangeli /* 1884ba76149fSAndrea Arcangeli * Prevent all access to pagetables with the exception of 1885ba76149fSAndrea Arcangeli * gup_fast later hanlded by the ptep_clear_flush and the VM 1886ba76149fSAndrea Arcangeli * handled by the anon_vma lock + PG_lock. 1887ba76149fSAndrea Arcangeli */ 1888ba76149fSAndrea Arcangeli down_write(&mm->mmap_sem); 1889ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 1890ba76149fSAndrea Arcangeli goto out; 1891ba76149fSAndrea Arcangeli 1892ba76149fSAndrea Arcangeli vma = find_vma(mm, address); 1893ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1894ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 1895ba76149fSAndrea Arcangeli if (address < hstart || address + HPAGE_PMD_SIZE > hend) 1896ba76149fSAndrea Arcangeli goto out; 1897ba76149fSAndrea Arcangeli 189860ab3244SAndrea Arcangeli if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 189960ab3244SAndrea Arcangeli (vma->vm_flags & VM_NOHUGEPAGE)) 1900ba76149fSAndrea Arcangeli goto out; 1901ba76149fSAndrea Arcangeli 190278f11a25SAndrea Arcangeli if (!vma->anon_vma || vma->vm_ops) 1903ba76149fSAndrea Arcangeli goto out; 1904a7d6e4ecSAndrea Arcangeli if (is_vma_temporary_stack(vma)) 1905a7d6e4ecSAndrea Arcangeli goto out; 190678f11a25SAndrea Arcangeli /* 190778f11a25SAndrea Arcangeli * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 190878f11a25SAndrea Arcangeli * true too, verify it here. 190978f11a25SAndrea Arcangeli */ 191078f11a25SAndrea Arcangeli VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1911ba76149fSAndrea Arcangeli 1912ba76149fSAndrea Arcangeli pgd = pgd_offset(mm, address); 1913ba76149fSAndrea Arcangeli if (!pgd_present(*pgd)) 1914ba76149fSAndrea Arcangeli goto out; 1915ba76149fSAndrea Arcangeli 1916ba76149fSAndrea Arcangeli pud = pud_offset(pgd, address); 1917ba76149fSAndrea Arcangeli if (!pud_present(*pud)) 1918ba76149fSAndrea Arcangeli goto out; 1919ba76149fSAndrea Arcangeli 1920ba76149fSAndrea Arcangeli pmd = pmd_offset(pud, address); 1921ba76149fSAndrea Arcangeli /* pmd can't go away or become huge under us */ 1922ba76149fSAndrea Arcangeli if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) 1923ba76149fSAndrea Arcangeli goto out; 1924ba76149fSAndrea Arcangeli 1925ba76149fSAndrea Arcangeli anon_vma_lock(vma->anon_vma); 1926ba76149fSAndrea Arcangeli 1927ba76149fSAndrea Arcangeli pte = pte_offset_map(pmd, address); 1928ba76149fSAndrea Arcangeli ptl = pte_lockptr(mm, pmd); 1929ba76149fSAndrea Arcangeli 1930ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); /* probably unnecessary */ 1931ba76149fSAndrea Arcangeli /* 1932ba76149fSAndrea Arcangeli * After this gup_fast can't run anymore. This also removes 1933ba76149fSAndrea Arcangeli * any huge TLB entry from the CPU so we won't allow 1934ba76149fSAndrea Arcangeli * huge and small TLB entries for the same virtual address 1935ba76149fSAndrea Arcangeli * to avoid the risk of CPU bugs in that area. 1936ba76149fSAndrea Arcangeli */ 1937ba76149fSAndrea Arcangeli _pmd = pmdp_clear_flush_notify(vma, address, pmd); 1938ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 1939ba76149fSAndrea Arcangeli 1940ba76149fSAndrea Arcangeli spin_lock(ptl); 1941ba76149fSAndrea Arcangeli isolated = __collapse_huge_page_isolate(vma, address, pte); 1942ba76149fSAndrea Arcangeli spin_unlock(ptl); 1943ba76149fSAndrea Arcangeli 1944ba76149fSAndrea Arcangeli if (unlikely(!isolated)) { 1945453c7192SJohannes Weiner pte_unmap(pte); 1946ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); 1947ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 1948ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 1949ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 1950ba76149fSAndrea Arcangeli anon_vma_unlock(vma->anon_vma); 1951ce83d217SAndrea Arcangeli goto out; 1952ba76149fSAndrea Arcangeli } 1953ba76149fSAndrea Arcangeli 1954ba76149fSAndrea Arcangeli /* 1955ba76149fSAndrea Arcangeli * All pages are isolated and locked so anon_vma rmap 1956ba76149fSAndrea Arcangeli * can't run anymore. 1957ba76149fSAndrea Arcangeli */ 1958ba76149fSAndrea Arcangeli anon_vma_unlock(vma->anon_vma); 1959ba76149fSAndrea Arcangeli 1960ba76149fSAndrea Arcangeli __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 1961453c7192SJohannes Weiner pte_unmap(pte); 1962ba76149fSAndrea Arcangeli __SetPageUptodate(new_page); 1963ba76149fSAndrea Arcangeli pgtable = pmd_pgtable(_pmd); 1964ba76149fSAndrea Arcangeli VM_BUG_ON(page_count(pgtable) != 1); 1965ba76149fSAndrea Arcangeli VM_BUG_ON(page_mapcount(pgtable) != 0); 1966ba76149fSAndrea Arcangeli 1967ba76149fSAndrea Arcangeli _pmd = mk_pmd(new_page, vma->vm_page_prot); 1968ba76149fSAndrea Arcangeli _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1969ba76149fSAndrea Arcangeli _pmd = pmd_mkhuge(_pmd); 1970ba76149fSAndrea Arcangeli 1971ba76149fSAndrea Arcangeli /* 1972ba76149fSAndrea Arcangeli * spin_lock() below is not the equivalent of smp_wmb(), so 1973ba76149fSAndrea Arcangeli * this is needed to avoid the copy_huge_page writes to become 1974ba76149fSAndrea Arcangeli * visible after the set_pmd_at() write. 1975ba76149fSAndrea Arcangeli */ 1976ba76149fSAndrea Arcangeli smp_wmb(); 1977ba76149fSAndrea Arcangeli 1978ba76149fSAndrea Arcangeli spin_lock(&mm->page_table_lock); 1979ba76149fSAndrea Arcangeli BUG_ON(!pmd_none(*pmd)); 1980ba76149fSAndrea Arcangeli page_add_new_anon_rmap(new_page, vma, address); 1981ba76149fSAndrea Arcangeli set_pmd_at(mm, address, pmd, _pmd); 198235d8c7adSHillf Danton update_mmu_cache(vma, address, _pmd); 1983ba76149fSAndrea Arcangeli prepare_pmd_huge_pte(pgtable, mm); 1984ba76149fSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 1985ba76149fSAndrea Arcangeli 19860bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 1987ba76149fSAndrea Arcangeli *hpage = NULL; 19880bbbc0b3SAndrea Arcangeli #endif 1989ba76149fSAndrea Arcangeli khugepaged_pages_collapsed++; 1990ce83d217SAndrea Arcangeli out_up_write: 1991ba76149fSAndrea Arcangeli up_write(&mm->mmap_sem); 19920bbbc0b3SAndrea Arcangeli return; 19930bbbc0b3SAndrea Arcangeli 1994ce83d217SAndrea Arcangeli out: 1995678ff896SKAMEZAWA Hiroyuki mem_cgroup_uncharge_page(new_page); 19960bbbc0b3SAndrea Arcangeli #ifdef CONFIG_NUMA 19970bbbc0b3SAndrea Arcangeli put_page(new_page); 19980bbbc0b3SAndrea Arcangeli #endif 1999ce83d217SAndrea Arcangeli goto out_up_write; 2000ba76149fSAndrea Arcangeli } 2001ba76149fSAndrea Arcangeli 2002ba76149fSAndrea Arcangeli static int khugepaged_scan_pmd(struct mm_struct *mm, 2003ba76149fSAndrea Arcangeli struct vm_area_struct *vma, 2004ba76149fSAndrea Arcangeli unsigned long address, 2005ba76149fSAndrea Arcangeli struct page **hpage) 2006ba76149fSAndrea Arcangeli { 2007ba76149fSAndrea Arcangeli pgd_t *pgd; 2008ba76149fSAndrea Arcangeli pud_t *pud; 2009ba76149fSAndrea Arcangeli pmd_t *pmd; 2010ba76149fSAndrea Arcangeli pte_t *pte, *_pte; 2011ba76149fSAndrea Arcangeli int ret = 0, referenced = 0, none = 0; 2012ba76149fSAndrea Arcangeli struct page *page; 2013ba76149fSAndrea Arcangeli unsigned long _address; 2014ba76149fSAndrea Arcangeli spinlock_t *ptl; 20155c4b4be3SAndi Kleen int node = -1; 2016ba76149fSAndrea Arcangeli 2017ba76149fSAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2018ba76149fSAndrea Arcangeli 2019ba76149fSAndrea Arcangeli pgd = pgd_offset(mm, address); 2020ba76149fSAndrea Arcangeli if (!pgd_present(*pgd)) 2021ba76149fSAndrea Arcangeli goto out; 2022ba76149fSAndrea Arcangeli 2023ba76149fSAndrea Arcangeli pud = pud_offset(pgd, address); 2024ba76149fSAndrea Arcangeli if (!pud_present(*pud)) 2025ba76149fSAndrea Arcangeli goto out; 2026ba76149fSAndrea Arcangeli 2027ba76149fSAndrea Arcangeli pmd = pmd_offset(pud, address); 2028ba76149fSAndrea Arcangeli if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) 2029ba76149fSAndrea Arcangeli goto out; 2030ba76149fSAndrea Arcangeli 2031ba76149fSAndrea Arcangeli pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2032ba76149fSAndrea Arcangeli for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2033ba76149fSAndrea Arcangeli _pte++, _address += PAGE_SIZE) { 2034ba76149fSAndrea Arcangeli pte_t pteval = *_pte; 2035ba76149fSAndrea Arcangeli if (pte_none(pteval)) { 2036ba76149fSAndrea Arcangeli if (++none <= khugepaged_max_ptes_none) 2037ba76149fSAndrea Arcangeli continue; 2038ba76149fSAndrea Arcangeli else 2039ba76149fSAndrea Arcangeli goto out_unmap; 2040ba76149fSAndrea Arcangeli } 2041ba76149fSAndrea Arcangeli if (!pte_present(pteval) || !pte_write(pteval)) 2042ba76149fSAndrea Arcangeli goto out_unmap; 2043ba76149fSAndrea Arcangeli page = vm_normal_page(vma, _address, pteval); 2044ba76149fSAndrea Arcangeli if (unlikely(!page)) 2045ba76149fSAndrea Arcangeli goto out_unmap; 20465c4b4be3SAndi Kleen /* 20475c4b4be3SAndi Kleen * Chose the node of the first page. This could 20485c4b4be3SAndi Kleen * be more sophisticated and look at more pages, 20495c4b4be3SAndi Kleen * but isn't for now. 20505c4b4be3SAndi Kleen */ 20515c4b4be3SAndi Kleen if (node == -1) 20525c4b4be3SAndi Kleen node = page_to_nid(page); 2053ba76149fSAndrea Arcangeli VM_BUG_ON(PageCompound(page)); 2054ba76149fSAndrea Arcangeli if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2055ba76149fSAndrea Arcangeli goto out_unmap; 2056ba76149fSAndrea Arcangeli /* cannot use mapcount: can't collapse if there's a gup pin */ 2057ba76149fSAndrea Arcangeli if (page_count(page) != 1) 2058ba76149fSAndrea Arcangeli goto out_unmap; 20598ee53820SAndrea Arcangeli if (pte_young(pteval) || PageReferenced(page) || 20608ee53820SAndrea Arcangeli mmu_notifier_test_young(vma->vm_mm, address)) 2061ba76149fSAndrea Arcangeli referenced = 1; 2062ba76149fSAndrea Arcangeli } 2063ba76149fSAndrea Arcangeli if (referenced) 2064ba76149fSAndrea Arcangeli ret = 1; 2065ba76149fSAndrea Arcangeli out_unmap: 2066ba76149fSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 2067ce83d217SAndrea Arcangeli if (ret) 2068ce83d217SAndrea Arcangeli /* collapse_huge_page will return with the mmap_sem released */ 20695c4b4be3SAndi Kleen collapse_huge_page(mm, address, hpage, vma, node); 2070ba76149fSAndrea Arcangeli out: 2071ba76149fSAndrea Arcangeli return ret; 2072ba76149fSAndrea Arcangeli } 2073ba76149fSAndrea Arcangeli 2074ba76149fSAndrea Arcangeli static void collect_mm_slot(struct mm_slot *mm_slot) 2075ba76149fSAndrea Arcangeli { 2076ba76149fSAndrea Arcangeli struct mm_struct *mm = mm_slot->mm; 2077ba76149fSAndrea Arcangeli 2078b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2079ba76149fSAndrea Arcangeli 2080ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm)) { 2081ba76149fSAndrea Arcangeli /* free mm_slot */ 2082ba76149fSAndrea Arcangeli hlist_del(&mm_slot->hash); 2083ba76149fSAndrea Arcangeli list_del(&mm_slot->mm_node); 2084ba76149fSAndrea Arcangeli 2085ba76149fSAndrea Arcangeli /* 2086ba76149fSAndrea Arcangeli * Not strictly needed because the mm exited already. 2087ba76149fSAndrea Arcangeli * 2088ba76149fSAndrea Arcangeli * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2089ba76149fSAndrea Arcangeli */ 2090ba76149fSAndrea Arcangeli 2091ba76149fSAndrea Arcangeli /* khugepaged_mm_lock actually not necessary for the below */ 2092ba76149fSAndrea Arcangeli free_mm_slot(mm_slot); 2093ba76149fSAndrea Arcangeli mmdrop(mm); 2094ba76149fSAndrea Arcangeli } 2095ba76149fSAndrea Arcangeli } 2096ba76149fSAndrea Arcangeli 2097ba76149fSAndrea Arcangeli static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2098ba76149fSAndrea Arcangeli struct page **hpage) 20992f1da642SH Hartley Sweeten __releases(&khugepaged_mm_lock) 21002f1da642SH Hartley Sweeten __acquires(&khugepaged_mm_lock) 2101ba76149fSAndrea Arcangeli { 2102ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2103ba76149fSAndrea Arcangeli struct mm_struct *mm; 2104ba76149fSAndrea Arcangeli struct vm_area_struct *vma; 2105ba76149fSAndrea Arcangeli int progress = 0; 2106ba76149fSAndrea Arcangeli 2107ba76149fSAndrea Arcangeli VM_BUG_ON(!pages); 2108b9980cdcSHugh Dickins VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2109ba76149fSAndrea Arcangeli 2110ba76149fSAndrea Arcangeli if (khugepaged_scan.mm_slot) 2111ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2112ba76149fSAndrea Arcangeli else { 2113ba76149fSAndrea Arcangeli mm_slot = list_entry(khugepaged_scan.mm_head.next, 2114ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2115ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2116ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = mm_slot; 2117ba76149fSAndrea Arcangeli } 2118ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2119ba76149fSAndrea Arcangeli 2120ba76149fSAndrea Arcangeli mm = mm_slot->mm; 2121ba76149fSAndrea Arcangeli down_read(&mm->mmap_sem); 2122ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2123ba76149fSAndrea Arcangeli vma = NULL; 2124ba76149fSAndrea Arcangeli else 2125ba76149fSAndrea Arcangeli vma = find_vma(mm, khugepaged_scan.address); 2126ba76149fSAndrea Arcangeli 2127ba76149fSAndrea Arcangeli progress++; 2128ba76149fSAndrea Arcangeli for (; vma; vma = vma->vm_next) { 2129ba76149fSAndrea Arcangeli unsigned long hstart, hend; 2130ba76149fSAndrea Arcangeli 2131ba76149fSAndrea Arcangeli cond_resched(); 2132ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) { 2133ba76149fSAndrea Arcangeli progress++; 2134ba76149fSAndrea Arcangeli break; 2135ba76149fSAndrea Arcangeli } 2136ba76149fSAndrea Arcangeli 213760ab3244SAndrea Arcangeli if ((!(vma->vm_flags & VM_HUGEPAGE) && 213860ab3244SAndrea Arcangeli !khugepaged_always()) || 213960ab3244SAndrea Arcangeli (vma->vm_flags & VM_NOHUGEPAGE)) { 2140a7d6e4ecSAndrea Arcangeli skip: 2141ba76149fSAndrea Arcangeli progress++; 2142ba76149fSAndrea Arcangeli continue; 2143ba76149fSAndrea Arcangeli } 214478f11a25SAndrea Arcangeli if (!vma->anon_vma || vma->vm_ops) 2145a7d6e4ecSAndrea Arcangeli goto skip; 2146a7d6e4ecSAndrea Arcangeli if (is_vma_temporary_stack(vma)) 2147a7d6e4ecSAndrea Arcangeli goto skip; 214878f11a25SAndrea Arcangeli /* 214978f11a25SAndrea Arcangeli * If is_pfn_mapping() is true is_learn_pfn_mapping() 215078f11a25SAndrea Arcangeli * must be true too, verify it here. 215178f11a25SAndrea Arcangeli */ 215278f11a25SAndrea Arcangeli VM_BUG_ON(is_linear_pfn_mapping(vma) || 215378f11a25SAndrea Arcangeli vma->vm_flags & VM_NO_THP); 2154ba76149fSAndrea Arcangeli 2155ba76149fSAndrea Arcangeli hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2156ba76149fSAndrea Arcangeli hend = vma->vm_end & HPAGE_PMD_MASK; 2157a7d6e4ecSAndrea Arcangeli if (hstart >= hend) 2158a7d6e4ecSAndrea Arcangeli goto skip; 2159a7d6e4ecSAndrea Arcangeli if (khugepaged_scan.address > hend) 2160a7d6e4ecSAndrea Arcangeli goto skip; 2161ba76149fSAndrea Arcangeli if (khugepaged_scan.address < hstart) 2162ba76149fSAndrea Arcangeli khugepaged_scan.address = hstart; 2163a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2164ba76149fSAndrea Arcangeli 2165ba76149fSAndrea Arcangeli while (khugepaged_scan.address < hend) { 2166ba76149fSAndrea Arcangeli int ret; 2167ba76149fSAndrea Arcangeli cond_resched(); 2168ba76149fSAndrea Arcangeli if (unlikely(khugepaged_test_exit(mm))) 2169ba76149fSAndrea Arcangeli goto breakouterloop; 2170ba76149fSAndrea Arcangeli 2171ba76149fSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.address < hstart || 2172ba76149fSAndrea Arcangeli khugepaged_scan.address + HPAGE_PMD_SIZE > 2173ba76149fSAndrea Arcangeli hend); 2174ba76149fSAndrea Arcangeli ret = khugepaged_scan_pmd(mm, vma, 2175ba76149fSAndrea Arcangeli khugepaged_scan.address, 2176ba76149fSAndrea Arcangeli hpage); 2177ba76149fSAndrea Arcangeli /* move to next address */ 2178ba76149fSAndrea Arcangeli khugepaged_scan.address += HPAGE_PMD_SIZE; 2179ba76149fSAndrea Arcangeli progress += HPAGE_PMD_NR; 2180ba76149fSAndrea Arcangeli if (ret) 2181ba76149fSAndrea Arcangeli /* we released mmap_sem so break loop */ 2182ba76149fSAndrea Arcangeli goto breakouterloop_mmap_sem; 2183ba76149fSAndrea Arcangeli if (progress >= pages) 2184ba76149fSAndrea Arcangeli goto breakouterloop; 2185ba76149fSAndrea Arcangeli } 2186ba76149fSAndrea Arcangeli } 2187ba76149fSAndrea Arcangeli breakouterloop: 2188ba76149fSAndrea Arcangeli up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2189ba76149fSAndrea Arcangeli breakouterloop_mmap_sem: 2190ba76149fSAndrea Arcangeli 2191ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2192a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2193ba76149fSAndrea Arcangeli /* 2194ba76149fSAndrea Arcangeli * Release the current mm_slot if this mm is about to die, or 2195ba76149fSAndrea Arcangeli * if we scanned all vmas of this mm. 2196ba76149fSAndrea Arcangeli */ 2197ba76149fSAndrea Arcangeli if (khugepaged_test_exit(mm) || !vma) { 2198ba76149fSAndrea Arcangeli /* 2199ba76149fSAndrea Arcangeli * Make sure that if mm_users is reaching zero while 2200ba76149fSAndrea Arcangeli * khugepaged runs here, khugepaged_exit will find 2201ba76149fSAndrea Arcangeli * mm_slot not pointing to the exiting mm. 2202ba76149fSAndrea Arcangeli */ 2203ba76149fSAndrea Arcangeli if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2204ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = list_entry( 2205ba76149fSAndrea Arcangeli mm_slot->mm_node.next, 2206ba76149fSAndrea Arcangeli struct mm_slot, mm_node); 2207ba76149fSAndrea Arcangeli khugepaged_scan.address = 0; 2208ba76149fSAndrea Arcangeli } else { 2209ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2210ba76149fSAndrea Arcangeli khugepaged_full_scans++; 2211ba76149fSAndrea Arcangeli } 2212ba76149fSAndrea Arcangeli 2213ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2214ba76149fSAndrea Arcangeli } 2215ba76149fSAndrea Arcangeli 2216ba76149fSAndrea Arcangeli return progress; 2217ba76149fSAndrea Arcangeli } 2218ba76149fSAndrea Arcangeli 2219ba76149fSAndrea Arcangeli static int khugepaged_has_work(void) 2220ba76149fSAndrea Arcangeli { 2221ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) && 2222ba76149fSAndrea Arcangeli khugepaged_enabled(); 2223ba76149fSAndrea Arcangeli } 2224ba76149fSAndrea Arcangeli 2225ba76149fSAndrea Arcangeli static int khugepaged_wait_event(void) 2226ba76149fSAndrea Arcangeli { 2227ba76149fSAndrea Arcangeli return !list_empty(&khugepaged_scan.mm_head) || 2228ba76149fSAndrea Arcangeli !khugepaged_enabled(); 2229ba76149fSAndrea Arcangeli } 2230ba76149fSAndrea Arcangeli 2231ba76149fSAndrea Arcangeli static void khugepaged_do_scan(struct page **hpage) 2232ba76149fSAndrea Arcangeli { 2233ba76149fSAndrea Arcangeli unsigned int progress = 0, pass_through_head = 0; 2234ba76149fSAndrea Arcangeli unsigned int pages = khugepaged_pages_to_scan; 2235ba76149fSAndrea Arcangeli 2236ba76149fSAndrea Arcangeli barrier(); /* write khugepaged_pages_to_scan to local stack */ 2237ba76149fSAndrea Arcangeli 2238ba76149fSAndrea Arcangeli while (progress < pages) { 2239ba76149fSAndrea Arcangeli cond_resched(); 2240ba76149fSAndrea Arcangeli 22410bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 2242ba76149fSAndrea Arcangeli if (!*hpage) { 2243ba76149fSAndrea Arcangeli *hpage = alloc_hugepage(khugepaged_defrag()); 224481ab4201SAndi Kleen if (unlikely(!*hpage)) { 224581ab4201SAndi Kleen count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2246ba76149fSAndrea Arcangeli break; 2247ba76149fSAndrea Arcangeli } 224881ab4201SAndi Kleen count_vm_event(THP_COLLAPSE_ALLOC); 224981ab4201SAndi Kleen } 22500bbbc0b3SAndrea Arcangeli #else 22510bbbc0b3SAndrea Arcangeli if (IS_ERR(*hpage)) 22520bbbc0b3SAndrea Arcangeli break; 22530bbbc0b3SAndrea Arcangeli #endif 2254ba76149fSAndrea Arcangeli 2255878aee7dSAndrea Arcangeli if (unlikely(kthread_should_stop() || freezing(current))) 2256878aee7dSAndrea Arcangeli break; 2257878aee7dSAndrea Arcangeli 2258ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2259ba76149fSAndrea Arcangeli if (!khugepaged_scan.mm_slot) 2260ba76149fSAndrea Arcangeli pass_through_head++; 2261ba76149fSAndrea Arcangeli if (khugepaged_has_work() && 2262ba76149fSAndrea Arcangeli pass_through_head < 2) 2263ba76149fSAndrea Arcangeli progress += khugepaged_scan_mm_slot(pages - progress, 2264ba76149fSAndrea Arcangeli hpage); 2265ba76149fSAndrea Arcangeli else 2266ba76149fSAndrea Arcangeli progress = pages; 2267ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2268ba76149fSAndrea Arcangeli } 2269ba76149fSAndrea Arcangeli } 2270ba76149fSAndrea Arcangeli 22710bbbc0b3SAndrea Arcangeli static void khugepaged_alloc_sleep(void) 2272ba76149fSAndrea Arcangeli { 22731dfb059bSAndrea Arcangeli wait_event_freezable_timeout(khugepaged_wait, false, 22741dfb059bSAndrea Arcangeli msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 2275ba76149fSAndrea Arcangeli } 22760bbbc0b3SAndrea Arcangeli 22770bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 22780bbbc0b3SAndrea Arcangeli static struct page *khugepaged_alloc_hugepage(void) 22790bbbc0b3SAndrea Arcangeli { 22800bbbc0b3SAndrea Arcangeli struct page *hpage; 22810bbbc0b3SAndrea Arcangeli 22820bbbc0b3SAndrea Arcangeli do { 22830bbbc0b3SAndrea Arcangeli hpage = alloc_hugepage(khugepaged_defrag()); 228481ab4201SAndi Kleen if (!hpage) { 228581ab4201SAndi Kleen count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 22860bbbc0b3SAndrea Arcangeli khugepaged_alloc_sleep(); 228781ab4201SAndi Kleen } else 228881ab4201SAndi Kleen count_vm_event(THP_COLLAPSE_ALLOC); 2289ba76149fSAndrea Arcangeli } while (unlikely(!hpage) && 2290ba76149fSAndrea Arcangeli likely(khugepaged_enabled())); 2291ba76149fSAndrea Arcangeli return hpage; 2292ba76149fSAndrea Arcangeli } 22930bbbc0b3SAndrea Arcangeli #endif 2294ba76149fSAndrea Arcangeli 2295ba76149fSAndrea Arcangeli static void khugepaged_loop(void) 2296ba76149fSAndrea Arcangeli { 2297ba76149fSAndrea Arcangeli struct page *hpage; 2298ba76149fSAndrea Arcangeli 22990bbbc0b3SAndrea Arcangeli #ifdef CONFIG_NUMA 23000bbbc0b3SAndrea Arcangeli hpage = NULL; 23010bbbc0b3SAndrea Arcangeli #endif 2302ba76149fSAndrea Arcangeli while (likely(khugepaged_enabled())) { 23030bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 2304ba76149fSAndrea Arcangeli hpage = khugepaged_alloc_hugepage(); 2305f300ea49SAndrea Arcangeli if (unlikely(!hpage)) 2306ba76149fSAndrea Arcangeli break; 23070bbbc0b3SAndrea Arcangeli #else 23080bbbc0b3SAndrea Arcangeli if (IS_ERR(hpage)) { 23090bbbc0b3SAndrea Arcangeli khugepaged_alloc_sleep(); 23100bbbc0b3SAndrea Arcangeli hpage = NULL; 23110bbbc0b3SAndrea Arcangeli } 23120bbbc0b3SAndrea Arcangeli #endif 2313ba76149fSAndrea Arcangeli 2314ba76149fSAndrea Arcangeli khugepaged_do_scan(&hpage); 23150bbbc0b3SAndrea Arcangeli #ifndef CONFIG_NUMA 2316ba76149fSAndrea Arcangeli if (hpage) 2317ba76149fSAndrea Arcangeli put_page(hpage); 23180bbbc0b3SAndrea Arcangeli #endif 2319878aee7dSAndrea Arcangeli try_to_freeze(); 2320878aee7dSAndrea Arcangeli if (unlikely(kthread_should_stop())) 2321878aee7dSAndrea Arcangeli break; 2322ba76149fSAndrea Arcangeli if (khugepaged_has_work()) { 2323ba76149fSAndrea Arcangeli if (!khugepaged_scan_sleep_millisecs) 2324ba76149fSAndrea Arcangeli continue; 23251dfb059bSAndrea Arcangeli wait_event_freezable_timeout(khugepaged_wait, false, 23261dfb059bSAndrea Arcangeli msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 2327ba76149fSAndrea Arcangeli } else if (khugepaged_enabled()) 2328878aee7dSAndrea Arcangeli wait_event_freezable(khugepaged_wait, 2329ba76149fSAndrea Arcangeli khugepaged_wait_event()); 2330ba76149fSAndrea Arcangeli } 2331ba76149fSAndrea Arcangeli } 2332ba76149fSAndrea Arcangeli 2333ba76149fSAndrea Arcangeli static int khugepaged(void *none) 2334ba76149fSAndrea Arcangeli { 2335ba76149fSAndrea Arcangeli struct mm_slot *mm_slot; 2336ba76149fSAndrea Arcangeli 2337878aee7dSAndrea Arcangeli set_freezable(); 2338ba76149fSAndrea Arcangeli set_user_nice(current, 19); 2339ba76149fSAndrea Arcangeli 2340ba76149fSAndrea Arcangeli /* serialize with start_khugepaged() */ 2341ba76149fSAndrea Arcangeli mutex_lock(&khugepaged_mutex); 2342ba76149fSAndrea Arcangeli 2343ba76149fSAndrea Arcangeli for (;;) { 2344ba76149fSAndrea Arcangeli mutex_unlock(&khugepaged_mutex); 2345a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_thread != current); 2346ba76149fSAndrea Arcangeli khugepaged_loop(); 2347a7d6e4ecSAndrea Arcangeli VM_BUG_ON(khugepaged_thread != current); 2348ba76149fSAndrea Arcangeli 2349ba76149fSAndrea Arcangeli mutex_lock(&khugepaged_mutex); 2350ba76149fSAndrea Arcangeli if (!khugepaged_enabled()) 2351ba76149fSAndrea Arcangeli break; 2352878aee7dSAndrea Arcangeli if (unlikely(kthread_should_stop())) 2353878aee7dSAndrea Arcangeli break; 2354ba76149fSAndrea Arcangeli } 2355ba76149fSAndrea Arcangeli 2356ba76149fSAndrea Arcangeli spin_lock(&khugepaged_mm_lock); 2357ba76149fSAndrea Arcangeli mm_slot = khugepaged_scan.mm_slot; 2358ba76149fSAndrea Arcangeli khugepaged_scan.mm_slot = NULL; 2359ba76149fSAndrea Arcangeli if (mm_slot) 2360ba76149fSAndrea Arcangeli collect_mm_slot(mm_slot); 2361ba76149fSAndrea Arcangeli spin_unlock(&khugepaged_mm_lock); 2362ba76149fSAndrea Arcangeli 2363ba76149fSAndrea Arcangeli khugepaged_thread = NULL; 2364ba76149fSAndrea Arcangeli mutex_unlock(&khugepaged_mutex); 2365ba76149fSAndrea Arcangeli 2366ba76149fSAndrea Arcangeli return 0; 2367ba76149fSAndrea Arcangeli } 2368ba76149fSAndrea Arcangeli 236971e3aac0SAndrea Arcangeli void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) 237071e3aac0SAndrea Arcangeli { 237171e3aac0SAndrea Arcangeli struct page *page; 237271e3aac0SAndrea Arcangeli 237371e3aac0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 237471e3aac0SAndrea Arcangeli if (unlikely(!pmd_trans_huge(*pmd))) { 237571e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 237671e3aac0SAndrea Arcangeli return; 237771e3aac0SAndrea Arcangeli } 237871e3aac0SAndrea Arcangeli page = pmd_page(*pmd); 237971e3aac0SAndrea Arcangeli VM_BUG_ON(!page_count(page)); 238071e3aac0SAndrea Arcangeli get_page(page); 238171e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 238271e3aac0SAndrea Arcangeli 238371e3aac0SAndrea Arcangeli split_huge_page(page); 238471e3aac0SAndrea Arcangeli 238571e3aac0SAndrea Arcangeli put_page(page); 238671e3aac0SAndrea Arcangeli BUG_ON(pmd_trans_huge(*pmd)); 238771e3aac0SAndrea Arcangeli } 238894fcc585SAndrea Arcangeli 238994fcc585SAndrea Arcangeli static void split_huge_page_address(struct mm_struct *mm, 239094fcc585SAndrea Arcangeli unsigned long address) 239194fcc585SAndrea Arcangeli { 239294fcc585SAndrea Arcangeli pgd_t *pgd; 239394fcc585SAndrea Arcangeli pud_t *pud; 239494fcc585SAndrea Arcangeli pmd_t *pmd; 239594fcc585SAndrea Arcangeli 239694fcc585SAndrea Arcangeli VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 239794fcc585SAndrea Arcangeli 239894fcc585SAndrea Arcangeli pgd = pgd_offset(mm, address); 239994fcc585SAndrea Arcangeli if (!pgd_present(*pgd)) 240094fcc585SAndrea Arcangeli return; 240194fcc585SAndrea Arcangeli 240294fcc585SAndrea Arcangeli pud = pud_offset(pgd, address); 240394fcc585SAndrea Arcangeli if (!pud_present(*pud)) 240494fcc585SAndrea Arcangeli return; 240594fcc585SAndrea Arcangeli 240694fcc585SAndrea Arcangeli pmd = pmd_offset(pud, address); 240794fcc585SAndrea Arcangeli if (!pmd_present(*pmd)) 240894fcc585SAndrea Arcangeli return; 240994fcc585SAndrea Arcangeli /* 241094fcc585SAndrea Arcangeli * Caller holds the mmap_sem write mode, so a huge pmd cannot 241194fcc585SAndrea Arcangeli * materialize from under us. 241294fcc585SAndrea Arcangeli */ 241394fcc585SAndrea Arcangeli split_huge_page_pmd(mm, pmd); 241494fcc585SAndrea Arcangeli } 241594fcc585SAndrea Arcangeli 241694fcc585SAndrea Arcangeli void __vma_adjust_trans_huge(struct vm_area_struct *vma, 241794fcc585SAndrea Arcangeli unsigned long start, 241894fcc585SAndrea Arcangeli unsigned long end, 241994fcc585SAndrea Arcangeli long adjust_next) 242094fcc585SAndrea Arcangeli { 242194fcc585SAndrea Arcangeli /* 242294fcc585SAndrea Arcangeli * If the new start address isn't hpage aligned and it could 242394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 242494fcc585SAndrea Arcangeli * an huge pmd. 242594fcc585SAndrea Arcangeli */ 242694fcc585SAndrea Arcangeli if (start & ~HPAGE_PMD_MASK && 242794fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) >= vma->vm_start && 242894fcc585SAndrea Arcangeli (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 242994fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, start); 243094fcc585SAndrea Arcangeli 243194fcc585SAndrea Arcangeli /* 243294fcc585SAndrea Arcangeli * If the new end address isn't hpage aligned and it could 243394fcc585SAndrea Arcangeli * previously contain an hugepage: check if we need to split 243494fcc585SAndrea Arcangeli * an huge pmd. 243594fcc585SAndrea Arcangeli */ 243694fcc585SAndrea Arcangeli if (end & ~HPAGE_PMD_MASK && 243794fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) >= vma->vm_start && 243894fcc585SAndrea Arcangeli (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 243994fcc585SAndrea Arcangeli split_huge_page_address(vma->vm_mm, end); 244094fcc585SAndrea Arcangeli 244194fcc585SAndrea Arcangeli /* 244294fcc585SAndrea Arcangeli * If we're also updating the vma->vm_next->vm_start, if the new 244394fcc585SAndrea Arcangeli * vm_next->vm_start isn't page aligned and it could previously 244494fcc585SAndrea Arcangeli * contain an hugepage: check if we need to split an huge pmd. 244594fcc585SAndrea Arcangeli */ 244694fcc585SAndrea Arcangeli if (adjust_next > 0) { 244794fcc585SAndrea Arcangeli struct vm_area_struct *next = vma->vm_next; 244894fcc585SAndrea Arcangeli unsigned long nstart = next->vm_start; 244994fcc585SAndrea Arcangeli nstart += adjust_next << PAGE_SHIFT; 245094fcc585SAndrea Arcangeli if (nstart & ~HPAGE_PMD_MASK && 245194fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) >= next->vm_start && 245294fcc585SAndrea Arcangeli (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 245394fcc585SAndrea Arcangeli split_huge_page_address(next->vm_mm, nstart); 245494fcc585SAndrea Arcangeli } 245594fcc585SAndrea Arcangeli } 2456