1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3b46e756fSKirill A. Shutemov 4b46e756fSKirill A. Shutemov #include <linux/mm.h> 5b46e756fSKirill A. Shutemov #include <linux/sched.h> 66e84f315SIngo Molnar #include <linux/sched/mm.h> 7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h> 9b46e756fSKirill A. Shutemov #include <linux/rmap.h> 10b46e756fSKirill A. Shutemov #include <linux/swap.h> 11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h> 12b46e756fSKirill A. Shutemov #include <linux/kthread.h> 13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h> 14b46e756fSKirill A. Shutemov #include <linux/freezer.h> 15b46e756fSKirill A. Shutemov #include <linux/mman.h> 16b46e756fSKirill A. Shutemov #include <linux/hashtable.h> 17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h> 18b46e756fSKirill A. Shutemov #include <linux/page_idle.h> 1980110bbfSPasha Tatashin #include <linux/page_table_check.h> 20b46e756fSKirill A. Shutemov #include <linux/swapops.h> 21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h> 22b46e756fSKirill A. Shutemov 23b46e756fSKirill A. Shutemov #include <asm/tlb.h> 24b46e756fSKirill A. Shutemov #include <asm/pgalloc.h> 25b46e756fSKirill A. Shutemov #include "internal.h" 26b46e756fSKirill A. Shutemov 27b46e756fSKirill A. Shutemov enum scan_result { 28b46e756fSKirill A. Shutemov SCAN_FAIL, 29b46e756fSKirill A. Shutemov SCAN_SUCCEED, 30b46e756fSKirill A. Shutemov SCAN_PMD_NULL, 31b46e756fSKirill A. Shutemov SCAN_EXCEED_NONE_PTE, 3271a2c112SKirill A. Shutemov SCAN_EXCEED_SWAP_PTE, 3371a2c112SKirill A. Shutemov SCAN_EXCEED_SHARED_PTE, 34b46e756fSKirill A. Shutemov SCAN_PTE_NON_PRESENT, 35e1e267c7SPeter Xu SCAN_PTE_UFFD_WP, 36b46e756fSKirill A. Shutemov SCAN_PAGE_RO, 370db501f7SEbru Akagunduz SCAN_LACK_REFERENCED_PAGE, 38b46e756fSKirill A. Shutemov SCAN_PAGE_NULL, 39b46e756fSKirill A. Shutemov SCAN_SCAN_ABORT, 40b46e756fSKirill A. Shutemov SCAN_PAGE_COUNT, 41b46e756fSKirill A. Shutemov SCAN_PAGE_LRU, 42b46e756fSKirill A. Shutemov SCAN_PAGE_LOCK, 43b46e756fSKirill A. Shutemov SCAN_PAGE_ANON, 44b46e756fSKirill A. Shutemov SCAN_PAGE_COMPOUND, 45b46e756fSKirill A. Shutemov SCAN_ANY_PROCESS, 46b46e756fSKirill A. Shutemov SCAN_VMA_NULL, 47b46e756fSKirill A. Shutemov SCAN_VMA_CHECK, 48b46e756fSKirill A. Shutemov SCAN_ADDRESS_RANGE, 49b46e756fSKirill A. Shutemov SCAN_DEL_PAGE_LRU, 50b46e756fSKirill A. Shutemov SCAN_ALLOC_HUGE_PAGE_FAIL, 51b46e756fSKirill A. Shutemov SCAN_CGROUP_CHARGE_FAIL, 52f3f0e1d2SKirill A. Shutemov SCAN_TRUNCATED, 5399cb0dbdSSong Liu SCAN_PAGE_HAS_PRIVATE, 54b46e756fSKirill A. Shutemov }; 55b46e756fSKirill A. Shutemov 56b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS 57b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h> 58b46e756fSKirill A. Shutemov 594aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly; 604aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex); 614aab2be0SVijay Balakrishna 62b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */ 63b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly; 64b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed; 65b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans; 66b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 67b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */ 68b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 69b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire; 70b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock); 71b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 72b46e756fSKirill A. Shutemov /* 73b46e756fSKirill A. Shutemov * default collapse hugepages if there is at least one pte mapped like 74b46e756fSKirill A. Shutemov * it would have happened if the vma was large enough during page 75b46e756fSKirill A. Shutemov * fault. 76b46e756fSKirill A. Shutemov */ 77b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly; 78b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly; 7971a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly; 80b46e756fSKirill A. Shutemov 81b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10 82b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 83b46e756fSKirill A. Shutemov 84b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly; 85b46e756fSKirill A. Shutemov 8627e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8 8727e1f827SSong Liu 88b46e756fSKirill A. Shutemov /** 89b46e756fSKirill A. Shutemov * struct mm_slot - hash lookup from mm to mm_slot 90b46e756fSKirill A. Shutemov * @hash: hash collision list 91b46e756fSKirill A. Shutemov * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 92b46e756fSKirill A. Shutemov * @mm: the mm that this information is valid for 93336e6b53SAlex Shi * @nr_pte_mapped_thp: number of pte mapped THP 94336e6b53SAlex Shi * @pte_mapped_thp: address array corresponding pte mapped THP 95b46e756fSKirill A. Shutemov */ 96b46e756fSKirill A. Shutemov struct mm_slot { 97b46e756fSKirill A. Shutemov struct hlist_node hash; 98b46e756fSKirill A. Shutemov struct list_head mm_node; 99b46e756fSKirill A. Shutemov struct mm_struct *mm; 10027e1f827SSong Liu 10127e1f827SSong Liu /* pte-mapped THP in this mm */ 10227e1f827SSong Liu int nr_pte_mapped_thp; 10327e1f827SSong Liu unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 104b46e756fSKirill A. Shutemov }; 105b46e756fSKirill A. Shutemov 106b46e756fSKirill A. Shutemov /** 107b46e756fSKirill A. Shutemov * struct khugepaged_scan - cursor for scanning 108b46e756fSKirill A. Shutemov * @mm_head: the head of the mm list to scan 109b46e756fSKirill A. Shutemov * @mm_slot: the current mm_slot we are scanning 110b46e756fSKirill A. Shutemov * @address: the next address inside that to be scanned 111b46e756fSKirill A. Shutemov * 112b46e756fSKirill A. Shutemov * There is only the one khugepaged_scan instance of this cursor structure. 113b46e756fSKirill A. Shutemov */ 114b46e756fSKirill A. Shutemov struct khugepaged_scan { 115b46e756fSKirill A. Shutemov struct list_head mm_head; 116b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 117b46e756fSKirill A. Shutemov unsigned long address; 118b46e756fSKirill A. Shutemov }; 119b46e756fSKirill A. Shutemov 120b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = { 121b46e756fSKirill A. Shutemov .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 122b46e756fSKirill A. Shutemov }; 123b46e756fSKirill A. Shutemov 124e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS 125b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 126b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 127b46e756fSKirill A. Shutemov char *buf) 128b46e756fSKirill A. Shutemov { 129ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 130b46e756fSKirill A. Shutemov } 131b46e756fSKirill A. Shutemov 132b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 133b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 134b46e756fSKirill A. Shutemov const char *buf, size_t count) 135b46e756fSKirill A. Shutemov { 136dfefd226SAlexey Dobriyan unsigned int msecs; 137b46e756fSKirill A. Shutemov int err; 138b46e756fSKirill A. Shutemov 139dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 140dfefd226SAlexey Dobriyan if (err) 141b46e756fSKirill A. Shutemov return -EINVAL; 142b46e756fSKirill A. Shutemov 143b46e756fSKirill A. Shutemov khugepaged_scan_sleep_millisecs = msecs; 144b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 145b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 146b46e756fSKirill A. Shutemov 147b46e756fSKirill A. Shutemov return count; 148b46e756fSKirill A. Shutemov } 149b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr = 1506dcdc94dSMiaohe Lin __ATTR_RW(scan_sleep_millisecs); 151b46e756fSKirill A. Shutemov 152b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 153b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 154b46e756fSKirill A. Shutemov char *buf) 155b46e756fSKirill A. Shutemov { 156ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 157b46e756fSKirill A. Shutemov } 158b46e756fSKirill A. Shutemov 159b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 160b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 161b46e756fSKirill A. Shutemov const char *buf, size_t count) 162b46e756fSKirill A. Shutemov { 163dfefd226SAlexey Dobriyan unsigned int msecs; 164b46e756fSKirill A. Shutemov int err; 165b46e756fSKirill A. Shutemov 166dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 167dfefd226SAlexey Dobriyan if (err) 168b46e756fSKirill A. Shutemov return -EINVAL; 169b46e756fSKirill A. Shutemov 170b46e756fSKirill A. Shutemov khugepaged_alloc_sleep_millisecs = msecs; 171b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 172b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 173b46e756fSKirill A. Shutemov 174b46e756fSKirill A. Shutemov return count; 175b46e756fSKirill A. Shutemov } 176b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr = 1776dcdc94dSMiaohe Lin __ATTR_RW(alloc_sleep_millisecs); 178b46e756fSKirill A. Shutemov 179b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj, 180b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 181b46e756fSKirill A. Shutemov char *buf) 182b46e756fSKirill A. Shutemov { 183ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 184b46e756fSKirill A. Shutemov } 185b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj, 186b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 187b46e756fSKirill A. Shutemov const char *buf, size_t count) 188b46e756fSKirill A. Shutemov { 189dfefd226SAlexey Dobriyan unsigned int pages; 190b46e756fSKirill A. Shutemov int err; 191b46e756fSKirill A. Shutemov 192dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &pages); 193dfefd226SAlexey Dobriyan if (err || !pages) 194b46e756fSKirill A. Shutemov return -EINVAL; 195b46e756fSKirill A. Shutemov 196b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = pages; 197b46e756fSKirill A. Shutemov 198b46e756fSKirill A. Shutemov return count; 199b46e756fSKirill A. Shutemov } 200b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr = 2016dcdc94dSMiaohe Lin __ATTR_RW(pages_to_scan); 202b46e756fSKirill A. Shutemov 203b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj, 204b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 205b46e756fSKirill A. Shutemov char *buf) 206b46e756fSKirill A. Shutemov { 207ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 208b46e756fSKirill A. Shutemov } 209b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr = 210b46e756fSKirill A. Shutemov __ATTR_RO(pages_collapsed); 211b46e756fSKirill A. Shutemov 212b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj, 213b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 214b46e756fSKirill A. Shutemov char *buf) 215b46e756fSKirill A. Shutemov { 216ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 217b46e756fSKirill A. Shutemov } 218b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr = 219b46e756fSKirill A. Shutemov __ATTR_RO(full_scans); 220b46e756fSKirill A. Shutemov 2216dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj, 222b46e756fSKirill A. Shutemov struct kobj_attribute *attr, char *buf) 223b46e756fSKirill A. Shutemov { 224b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 225b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 226b46e756fSKirill A. Shutemov } 2276dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj, 228b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 229b46e756fSKirill A. Shutemov const char *buf, size_t count) 230b46e756fSKirill A. Shutemov { 231b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 232b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 233b46e756fSKirill A. Shutemov } 234b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr = 2356dcdc94dSMiaohe Lin __ATTR_RW(defrag); 236b46e756fSKirill A. Shutemov 237b46e756fSKirill A. Shutemov /* 238b46e756fSKirill A. Shutemov * max_ptes_none controls if khugepaged should collapse hugepages over 239b46e756fSKirill A. Shutemov * any unmapped ptes in turn potentially increasing the memory 240b46e756fSKirill A. Shutemov * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 241b46e756fSKirill A. Shutemov * reduce the available free memory in the system as it 242b46e756fSKirill A. Shutemov * runs. Increasing max_ptes_none will instead potentially reduce the 243b46e756fSKirill A. Shutemov * free memory in the system during the khugepaged scan. 244b46e756fSKirill A. Shutemov */ 2456dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj, 246b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 247b46e756fSKirill A. Shutemov char *buf) 248b46e756fSKirill A. Shutemov { 249ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 250b46e756fSKirill A. Shutemov } 2516dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj, 252b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 253b46e756fSKirill A. Shutemov const char *buf, size_t count) 254b46e756fSKirill A. Shutemov { 255b46e756fSKirill A. Shutemov int err; 256b46e756fSKirill A. Shutemov unsigned long max_ptes_none; 257b46e756fSKirill A. Shutemov 258b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_none); 259b46e756fSKirill A. Shutemov if (err || max_ptes_none > HPAGE_PMD_NR - 1) 260b46e756fSKirill A. Shutemov return -EINVAL; 261b46e756fSKirill A. Shutemov 262b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = max_ptes_none; 263b46e756fSKirill A. Shutemov 264b46e756fSKirill A. Shutemov return count; 265b46e756fSKirill A. Shutemov } 266b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr = 2676dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_none); 268b46e756fSKirill A. Shutemov 2696dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj, 270b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 271b46e756fSKirill A. Shutemov char *buf) 272b46e756fSKirill A. Shutemov { 273ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 274b46e756fSKirill A. Shutemov } 275b46e756fSKirill A. Shutemov 2766dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj, 277b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 278b46e756fSKirill A. Shutemov const char *buf, size_t count) 279b46e756fSKirill A. Shutemov { 280b46e756fSKirill A. Shutemov int err; 281b46e756fSKirill A. Shutemov unsigned long max_ptes_swap; 282b46e756fSKirill A. Shutemov 283b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_swap); 284b46e756fSKirill A. Shutemov if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 285b46e756fSKirill A. Shutemov return -EINVAL; 286b46e756fSKirill A. Shutemov 287b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = max_ptes_swap; 288b46e756fSKirill A. Shutemov 289b46e756fSKirill A. Shutemov return count; 290b46e756fSKirill A. Shutemov } 291b46e756fSKirill A. Shutemov 292b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr = 2936dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_swap); 294b46e756fSKirill A. Shutemov 2956dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj, 29671a2c112SKirill A. Shutemov struct kobj_attribute *attr, 29771a2c112SKirill A. Shutemov char *buf) 29871a2c112SKirill A. Shutemov { 299ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 30071a2c112SKirill A. Shutemov } 30171a2c112SKirill A. Shutemov 3026dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj, 30371a2c112SKirill A. Shutemov struct kobj_attribute *attr, 30471a2c112SKirill A. Shutemov const char *buf, size_t count) 30571a2c112SKirill A. Shutemov { 30671a2c112SKirill A. Shutemov int err; 30771a2c112SKirill A. Shutemov unsigned long max_ptes_shared; 30871a2c112SKirill A. Shutemov 30971a2c112SKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_shared); 31071a2c112SKirill A. Shutemov if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 31171a2c112SKirill A. Shutemov return -EINVAL; 31271a2c112SKirill A. Shutemov 31371a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = max_ptes_shared; 31471a2c112SKirill A. Shutemov 31571a2c112SKirill A. Shutemov return count; 31671a2c112SKirill A. Shutemov } 31771a2c112SKirill A. Shutemov 31871a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr = 3196dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_shared); 32071a2c112SKirill A. Shutemov 321b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = { 322b46e756fSKirill A. Shutemov &khugepaged_defrag_attr.attr, 323b46e756fSKirill A. Shutemov &khugepaged_max_ptes_none_attr.attr, 32471a2c112SKirill A. Shutemov &khugepaged_max_ptes_swap_attr.attr, 32571a2c112SKirill A. Shutemov &khugepaged_max_ptes_shared_attr.attr, 326b46e756fSKirill A. Shutemov &pages_to_scan_attr.attr, 327b46e756fSKirill A. Shutemov &pages_collapsed_attr.attr, 328b46e756fSKirill A. Shutemov &full_scans_attr.attr, 329b46e756fSKirill A. Shutemov &scan_sleep_millisecs_attr.attr, 330b46e756fSKirill A. Shutemov &alloc_sleep_millisecs_attr.attr, 331b46e756fSKirill A. Shutemov NULL, 332b46e756fSKirill A. Shutemov }; 333b46e756fSKirill A. Shutemov 334b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = { 335b46e756fSKirill A. Shutemov .attrs = khugepaged_attr, 336b46e756fSKirill A. Shutemov .name = "khugepaged", 337b46e756fSKirill A. Shutemov }; 338e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */ 339b46e756fSKirill A. Shutemov 340b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma, 341b46e756fSKirill A. Shutemov unsigned long *vm_flags, int advice) 342b46e756fSKirill A. Shutemov { 343b46e756fSKirill A. Shutemov switch (advice) { 344b46e756fSKirill A. Shutemov case MADV_HUGEPAGE: 345b46e756fSKirill A. Shutemov #ifdef CONFIG_S390 346b46e756fSKirill A. Shutemov /* 347b46e756fSKirill A. Shutemov * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 348b46e756fSKirill A. Shutemov * can't handle this properly after s390_enable_sie, so we simply 349b46e756fSKirill A. Shutemov * ignore the madvise to prevent qemu from causing a SIGSEGV. 350b46e756fSKirill A. Shutemov */ 351b46e756fSKirill A. Shutemov if (mm_has_pgste(vma->vm_mm)) 352b46e756fSKirill A. Shutemov return 0; 353b46e756fSKirill A. Shutemov #endif 354b46e756fSKirill A. Shutemov *vm_flags &= ~VM_NOHUGEPAGE; 355b46e756fSKirill A. Shutemov *vm_flags |= VM_HUGEPAGE; 356b46e756fSKirill A. Shutemov /* 357b46e756fSKirill A. Shutemov * If the vma become good for khugepaged to scan, 358b46e756fSKirill A. Shutemov * register it here without waiting a page fault that 359b46e756fSKirill A. Shutemov * may not happen any time soon. 360b46e756fSKirill A. Shutemov */ 361c791576cSYang Shi khugepaged_enter_vma(vma, *vm_flags); 362b46e756fSKirill A. Shutemov break; 363b46e756fSKirill A. Shutemov case MADV_NOHUGEPAGE: 364b46e756fSKirill A. Shutemov *vm_flags &= ~VM_HUGEPAGE; 365b46e756fSKirill A. Shutemov *vm_flags |= VM_NOHUGEPAGE; 366b46e756fSKirill A. Shutemov /* 367b46e756fSKirill A. Shutemov * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 368b46e756fSKirill A. Shutemov * this vma even if we leave the mm registered in khugepaged if 369b46e756fSKirill A. Shutemov * it got registered before VM_NOHUGEPAGE was set. 370b46e756fSKirill A. Shutemov */ 371b46e756fSKirill A. Shutemov break; 372b46e756fSKirill A. Shutemov } 373b46e756fSKirill A. Shutemov 374b46e756fSKirill A. Shutemov return 0; 375b46e756fSKirill A. Shutemov } 376b46e756fSKirill A. Shutemov 377b46e756fSKirill A. Shutemov int __init khugepaged_init(void) 378b46e756fSKirill A. Shutemov { 379b46e756fSKirill A. Shutemov mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 380b46e756fSKirill A. Shutemov sizeof(struct mm_slot), 381b46e756fSKirill A. Shutemov __alignof__(struct mm_slot), 0, NULL); 382b46e756fSKirill A. Shutemov if (!mm_slot_cache) 383b46e756fSKirill A. Shutemov return -ENOMEM; 384b46e756fSKirill A. Shutemov 385b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 386b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 387b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 38871a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 389b46e756fSKirill A. Shutemov 390b46e756fSKirill A. Shutemov return 0; 391b46e756fSKirill A. Shutemov } 392b46e756fSKirill A. Shutemov 393b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void) 394b46e756fSKirill A. Shutemov { 395b46e756fSKirill A. Shutemov kmem_cache_destroy(mm_slot_cache); 396b46e756fSKirill A. Shutemov } 397b46e756fSKirill A. Shutemov 398b46e756fSKirill A. Shutemov static inline struct mm_slot *alloc_mm_slot(void) 399b46e756fSKirill A. Shutemov { 400b46e756fSKirill A. Shutemov if (!mm_slot_cache) /* initialization failed */ 401b46e756fSKirill A. Shutemov return NULL; 402b46e756fSKirill A. Shutemov return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 403b46e756fSKirill A. Shutemov } 404b46e756fSKirill A. Shutemov 405b46e756fSKirill A. Shutemov static inline void free_mm_slot(struct mm_slot *mm_slot) 406b46e756fSKirill A. Shutemov { 407b46e756fSKirill A. Shutemov kmem_cache_free(mm_slot_cache, mm_slot); 408b46e756fSKirill A. Shutemov } 409b46e756fSKirill A. Shutemov 410b46e756fSKirill A. Shutemov static struct mm_slot *get_mm_slot(struct mm_struct *mm) 411b46e756fSKirill A. Shutemov { 412b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 413b46e756fSKirill A. Shutemov 414b46e756fSKirill A. Shutemov hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 415b46e756fSKirill A. Shutemov if (mm == mm_slot->mm) 416b46e756fSKirill A. Shutemov return mm_slot; 417b46e756fSKirill A. Shutemov 418b46e756fSKirill A. Shutemov return NULL; 419b46e756fSKirill A. Shutemov } 420b46e756fSKirill A. Shutemov 421b46e756fSKirill A. Shutemov static void insert_to_mm_slots_hash(struct mm_struct *mm, 422b46e756fSKirill A. Shutemov struct mm_slot *mm_slot) 423b46e756fSKirill A. Shutemov { 424b46e756fSKirill A. Shutemov mm_slot->mm = mm; 425b46e756fSKirill A. Shutemov hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 426b46e756fSKirill A. Shutemov } 427b46e756fSKirill A. Shutemov 428b46e756fSKirill A. Shutemov static inline int khugepaged_test_exit(struct mm_struct *mm) 429b46e756fSKirill A. Shutemov { 4304d45e75aSJann Horn return atomic_read(&mm->mm_users) == 0; 431b46e756fSKirill A. Shutemov } 432b46e756fSKirill A. Shutemov 4332647d11bSYang Shi bool hugepage_vma_check(struct vm_area_struct *vma, 43450f8b92fSSong Liu unsigned long vm_flags) 435c2231020SYang Shi { 436e6be37b2SMiaohe Lin if (!transhuge_vma_enabled(vma, vm_flags)) 437c2231020SYang Shi return false; 43899cb0dbdSSong Liu 439cb648754SYang Shi if (vm_flags & VM_NO_KHUGEPAGED) 440cb648754SYang Shi return false; 441cb648754SYang Shi 44252b52bf1SYang Shi /* Don't run khugepaged against DAX vma */ 44352b52bf1SYang Shi if (vma_is_dax(vma)) 44452b52bf1SYang Shi return false; 44552b52bf1SYang Shi 446a4aeaa06SYang Shi if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - 447a4aeaa06SYang Shi vma->vm_pgoff, HPAGE_PMD_NR)) 448a4aeaa06SYang Shi return false; 449a4aeaa06SYang Shi 450cd89fb06SRik van Riel /* Enabled via shmem mount options or sysfs settings. */ 451a4aeaa06SYang Shi if (shmem_file(vma->vm_file)) 452a4aeaa06SYang Shi return shmem_huge_enabled(vma); 453cd89fb06SRik van Riel 454cd89fb06SRik van Riel /* THP settings require madvise. */ 455cd89fb06SRik van Riel if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) 456cd89fb06SRik van Riel return false; 457cd89fb06SRik van Riel 458a4aeaa06SYang Shi /* Only regular file is valid */ 45978d12c19SYang Shi if (file_thp_enabled(vma)) 46078d12c19SYang Shi return true; 461cd89fb06SRik van Riel 46225fa414aSxu xin if (!vma->anon_vma || !vma_is_anonymous(vma)) 463c2231020SYang Shi return false; 464222100eeSAnshuman Khandual if (vma_is_temporary_stack(vma)) 465c2231020SYang Shi return false; 466cb648754SYang Shi 467cb648754SYang Shi return true; 468c2231020SYang Shi } 469c2231020SYang Shi 470d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm) 471b46e756fSKirill A. Shutemov { 472b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 473b46e756fSKirill A. Shutemov int wakeup; 474b46e756fSKirill A. Shutemov 475b46e756fSKirill A. Shutemov mm_slot = alloc_mm_slot(); 476b46e756fSKirill A. Shutemov if (!mm_slot) 477d2081b2bSYang Shi return; 478b46e756fSKirill A. Shutemov 479b46e756fSKirill A. Shutemov /* __khugepaged_exit() must not run from under us */ 48028ff0a3cSMiaohe Lin VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 481b46e756fSKirill A. Shutemov if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 482b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 483d2081b2bSYang Shi return; 484b46e756fSKirill A. Shutemov } 485b46e756fSKirill A. Shutemov 486b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 487b46e756fSKirill A. Shutemov insert_to_mm_slots_hash(mm, mm_slot); 488b46e756fSKirill A. Shutemov /* 489b46e756fSKirill A. Shutemov * Insert just behind the scanning cursor, to let the area settle 490b46e756fSKirill A. Shutemov * down a little. 491b46e756fSKirill A. Shutemov */ 492b46e756fSKirill A. Shutemov wakeup = list_empty(&khugepaged_scan.mm_head); 493b46e756fSKirill A. Shutemov list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 494b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 495b46e756fSKirill A. Shutemov 496f1f10076SVegard Nossum mmgrab(mm); 497b46e756fSKirill A. Shutemov if (wakeup) 498b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 499b46e756fSKirill A. Shutemov } 500b46e756fSKirill A. Shutemov 501c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma, 502b46e756fSKirill A. Shutemov unsigned long vm_flags) 503b46e756fSKirill A. Shutemov { 5042647d11bSYang Shi if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 5052647d11bSYang Shi khugepaged_enabled() && 5062647d11bSYang Shi (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 5072647d11bSYang Shi (vma->vm_end & HPAGE_PMD_MASK))) { 5082647d11bSYang Shi if (hugepage_vma_check(vma, vm_flags)) 5092647d11bSYang Shi __khugepaged_enter(vma->vm_mm); 5102647d11bSYang Shi } 511b46e756fSKirill A. Shutemov } 512b46e756fSKirill A. Shutemov 513b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm) 514b46e756fSKirill A. Shutemov { 515b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 516b46e756fSKirill A. Shutemov int free = 0; 517b46e756fSKirill A. Shutemov 518b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 519b46e756fSKirill A. Shutemov mm_slot = get_mm_slot(mm); 520b46e756fSKirill A. Shutemov if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 521b46e756fSKirill A. Shutemov hash_del(&mm_slot->hash); 522b46e756fSKirill A. Shutemov list_del(&mm_slot->mm_node); 523b46e756fSKirill A. Shutemov free = 1; 524b46e756fSKirill A. Shutemov } 525b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 526b46e756fSKirill A. Shutemov 527b46e756fSKirill A. Shutemov if (free) { 528b46e756fSKirill A. Shutemov clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 529b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 530b46e756fSKirill A. Shutemov mmdrop(mm); 531b46e756fSKirill A. Shutemov } else if (mm_slot) { 532b46e756fSKirill A. Shutemov /* 533b46e756fSKirill A. Shutemov * This is required to serialize against 534b46e756fSKirill A. Shutemov * khugepaged_test_exit() (which is guaranteed to run 535b46e756fSKirill A. Shutemov * under mmap sem read mode). Stop here (after we 536b46e756fSKirill A. Shutemov * return all pagetables will be destroyed) until 537b46e756fSKirill A. Shutemov * khugepaged has finished working on the pagetables 538c1e8d7c6SMichel Lespinasse * under the mmap_lock. 539b46e756fSKirill A. Shutemov */ 540d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 541d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 542b46e756fSKirill A. Shutemov } 543b46e756fSKirill A. Shutemov } 544b46e756fSKirill A. Shutemov 545b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page) 546b46e756fSKirill A. Shutemov { 5475503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page), 5485503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page), 5495503fbf2SKirill A. Shutemov -compound_nr(page)); 550b46e756fSKirill A. Shutemov unlock_page(page); 551b46e756fSKirill A. Shutemov putback_lru_page(page); 552b46e756fSKirill A. Shutemov } 553b46e756fSKirill A. Shutemov 5545503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte, 5555503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 556b46e756fSKirill A. Shutemov { 5575503fbf2SKirill A. Shutemov struct page *page, *tmp; 5585503fbf2SKirill A. Shutemov 559b46e756fSKirill A. Shutemov while (--_pte >= pte) { 560b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 5615503fbf2SKirill A. Shutemov 5625503fbf2SKirill A. Shutemov page = pte_page(pteval); 5635503fbf2SKirill A. Shutemov if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) && 5645503fbf2SKirill A. Shutemov !PageCompound(page)) 5655503fbf2SKirill A. Shutemov release_pte_page(page); 5665503fbf2SKirill A. Shutemov } 5675503fbf2SKirill A. Shutemov 5685503fbf2SKirill A. Shutemov list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { 5695503fbf2SKirill A. Shutemov list_del(&page->lru); 5705503fbf2SKirill A. Shutemov release_pte_page(page); 571b46e756fSKirill A. Shutemov } 572b46e756fSKirill A. Shutemov } 573b46e756fSKirill A. Shutemov 5749445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page) 5759445689fSKirill A. Shutemov { 5769445689fSKirill A. Shutemov int expected_refcount; 5779445689fSKirill A. Shutemov 5789445689fSKirill A. Shutemov expected_refcount = total_mapcount(page); 5799445689fSKirill A. Shutemov if (PageSwapCache(page)) 5809445689fSKirill A. Shutemov expected_refcount += compound_nr(page); 5819445689fSKirill A. Shutemov 5829445689fSKirill A. Shutemov return page_count(page) == expected_refcount; 5839445689fSKirill A. Shutemov } 5849445689fSKirill A. Shutemov 585b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 586b46e756fSKirill A. Shutemov unsigned long address, 5875503fbf2SKirill A. Shutemov pte_t *pte, 5885503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 589b46e756fSKirill A. Shutemov { 590b46e756fSKirill A. Shutemov struct page *page = NULL; 591b46e756fSKirill A. Shutemov pte_t *_pte; 59271a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0, result = 0, referenced = 0; 5930db501f7SEbru Akagunduz bool writable = false; 594b46e756fSKirill A. Shutemov 595b46e756fSKirill A. Shutemov for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 596b46e756fSKirill A. Shutemov _pte++, address += PAGE_SIZE) { 597b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 598b46e756fSKirill A. Shutemov if (pte_none(pteval) || (pte_present(pteval) && 599b46e756fSKirill A. Shutemov is_zero_pfn(pte_pfn(pteval)))) { 600b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 601b46e756fSKirill A. Shutemov ++none_or_zero <= khugepaged_max_ptes_none) { 602b46e756fSKirill A. Shutemov continue; 603b46e756fSKirill A. Shutemov } else { 604b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 605e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 606b46e756fSKirill A. Shutemov goto out; 607b46e756fSKirill A. Shutemov } 608b46e756fSKirill A. Shutemov } 609b46e756fSKirill A. Shutemov if (!pte_present(pteval)) { 610b46e756fSKirill A. Shutemov result = SCAN_PTE_NON_PRESENT; 611b46e756fSKirill A. Shutemov goto out; 612b46e756fSKirill A. Shutemov } 613b46e756fSKirill A. Shutemov page = vm_normal_page(vma, address, pteval); 614*3218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 615b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 616b46e756fSKirill A. Shutemov goto out; 617b46e756fSKirill A. Shutemov } 618b46e756fSKirill A. Shutemov 619b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageAnon(page), page); 620b46e756fSKirill A. Shutemov 62171a2c112SKirill A. Shutemov if (page_mapcount(page) > 1 && 62271a2c112SKirill A. Shutemov ++shared > khugepaged_max_ptes_shared) { 62371a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 624e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 62571a2c112SKirill A. Shutemov goto out; 62671a2c112SKirill A. Shutemov } 62771a2c112SKirill A. Shutemov 6285503fbf2SKirill A. Shutemov if (PageCompound(page)) { 6295503fbf2SKirill A. Shutemov struct page *p; 6305503fbf2SKirill A. Shutemov page = compound_head(page); 6315503fbf2SKirill A. Shutemov 6325503fbf2SKirill A. Shutemov /* 6335503fbf2SKirill A. Shutemov * Check if we have dealt with the compound page 6345503fbf2SKirill A. Shutemov * already 6355503fbf2SKirill A. Shutemov */ 6365503fbf2SKirill A. Shutemov list_for_each_entry(p, compound_pagelist, lru) { 6375503fbf2SKirill A. Shutemov if (page == p) 6385503fbf2SKirill A. Shutemov goto next; 6395503fbf2SKirill A. Shutemov } 6405503fbf2SKirill A. Shutemov } 6415503fbf2SKirill A. Shutemov 642b46e756fSKirill A. Shutemov /* 643b46e756fSKirill A. Shutemov * We can do it before isolate_lru_page because the 644b46e756fSKirill A. Shutemov * page can't be freed from under us. NOTE: PG_lock 645b46e756fSKirill A. Shutemov * is needed to serialize against split_huge_page 646b46e756fSKirill A. Shutemov * when invoked from the VM. 647b46e756fSKirill A. Shutemov */ 648b46e756fSKirill A. Shutemov if (!trylock_page(page)) { 649b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 650b46e756fSKirill A. Shutemov goto out; 651b46e756fSKirill A. Shutemov } 652b46e756fSKirill A. Shutemov 653b46e756fSKirill A. Shutemov /* 6549445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 6559445689fSKirill A. Shutemov * 6569445689fSKirill A. Shutemov * The page table that maps the page has been already unlinked 6579445689fSKirill A. Shutemov * from the page table tree and this process cannot get 658f0953a1bSIngo Molnar * an additional pin on the page. 6599445689fSKirill A. Shutemov * 6609445689fSKirill A. Shutemov * New pins can come later if the page is shared across fork, 6619445689fSKirill A. Shutemov * but not from this process. The other process cannot write to 6629445689fSKirill A. Shutemov * the page, only trigger CoW. 663b46e756fSKirill A. Shutemov */ 6649445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 665b46e756fSKirill A. Shutemov unlock_page(page); 666b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 667b46e756fSKirill A. Shutemov goto out; 668b46e756fSKirill A. Shutemov } 669b46e756fSKirill A. Shutemov 670b46e756fSKirill A. Shutemov /* 671b46e756fSKirill A. Shutemov * Isolate the page to avoid collapsing an hugepage 672b46e756fSKirill A. Shutemov * currently in use by the VM. 673b46e756fSKirill A. Shutemov */ 674b46e756fSKirill A. Shutemov if (isolate_lru_page(page)) { 675b46e756fSKirill A. Shutemov unlock_page(page); 676b46e756fSKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 677b46e756fSKirill A. Shutemov goto out; 678b46e756fSKirill A. Shutemov } 6795503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page), 6805503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page), 6815503fbf2SKirill A. Shutemov compound_nr(page)); 682b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 683b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(PageLRU(page), page); 684b46e756fSKirill A. Shutemov 6855503fbf2SKirill A. Shutemov if (PageCompound(page)) 6865503fbf2SKirill A. Shutemov list_add_tail(&page->lru, compound_pagelist); 6875503fbf2SKirill A. Shutemov next: 6880db501f7SEbru Akagunduz /* There should be enough young pte to collapse the page */ 689b46e756fSKirill A. Shutemov if (pte_young(pteval) || 690b46e756fSKirill A. Shutemov page_is_young(page) || PageReferenced(page) || 691b46e756fSKirill A. Shutemov mmu_notifier_test_young(vma->vm_mm, address)) 6920db501f7SEbru Akagunduz referenced++; 6935503fbf2SKirill A. Shutemov 6945503fbf2SKirill A. Shutemov if (pte_write(pteval)) 6955503fbf2SKirill A. Shutemov writable = true; 696b46e756fSKirill A. Shutemov } 69774e579bfSMiaohe Lin 69874e579bfSMiaohe Lin if (unlikely(!writable)) { 69974e579bfSMiaohe Lin result = SCAN_PAGE_RO; 70074e579bfSMiaohe Lin } else if (unlikely(!referenced)) { 70174e579bfSMiaohe Lin result = SCAN_LACK_REFERENCED_PAGE; 70274e579bfSMiaohe Lin } else { 703b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 704b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 705b46e756fSKirill A. Shutemov referenced, writable, result); 706b46e756fSKirill A. Shutemov return 1; 707b46e756fSKirill A. Shutemov } 708b46e756fSKirill A. Shutemov out: 7095503fbf2SKirill A. Shutemov release_pte_pages(pte, _pte, compound_pagelist); 710b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 711b46e756fSKirill A. Shutemov referenced, writable, result); 712b46e756fSKirill A. Shutemov return 0; 713b46e756fSKirill A. Shutemov } 714b46e756fSKirill A. Shutemov 715b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 716b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 717b46e756fSKirill A. Shutemov unsigned long address, 7185503fbf2SKirill A. Shutemov spinlock_t *ptl, 7195503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 720b46e756fSKirill A. Shutemov { 7215503fbf2SKirill A. Shutemov struct page *src_page, *tmp; 722b46e756fSKirill A. Shutemov pte_t *_pte; 723338a16baSDavid Rientjes for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 724338a16baSDavid Rientjes _pte++, page++, address += PAGE_SIZE) { 725b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 726b46e756fSKirill A. Shutemov 727b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 728b46e756fSKirill A. Shutemov clear_user_highpage(page, address); 729b46e756fSKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 730b46e756fSKirill A. Shutemov if (is_zero_pfn(pte_pfn(pteval))) { 731b46e756fSKirill A. Shutemov /* 732b46e756fSKirill A. Shutemov * ptl mostly unnecessary. 733b46e756fSKirill A. Shutemov */ 734b46e756fSKirill A. Shutemov spin_lock(ptl); 73508d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte); 736b46e756fSKirill A. Shutemov spin_unlock(ptl); 737b46e756fSKirill A. Shutemov } 738b46e756fSKirill A. Shutemov } else { 739b46e756fSKirill A. Shutemov src_page = pte_page(pteval); 740b46e756fSKirill A. Shutemov copy_user_highpage(page, src_page, address, vma); 7415503fbf2SKirill A. Shutemov if (!PageCompound(src_page)) 742b46e756fSKirill A. Shutemov release_pte_page(src_page); 743b46e756fSKirill A. Shutemov /* 744b46e756fSKirill A. Shutemov * ptl mostly unnecessary, but preempt has to 745b46e756fSKirill A. Shutemov * be disabled to update the per-cpu stats 746b46e756fSKirill A. Shutemov * inside page_remove_rmap(). 747b46e756fSKirill A. Shutemov */ 748b46e756fSKirill A. Shutemov spin_lock(ptl); 74908d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte); 750cea86fe2SHugh Dickins page_remove_rmap(src_page, vma, false); 751b46e756fSKirill A. Shutemov spin_unlock(ptl); 752b46e756fSKirill A. Shutemov free_page_and_swap_cache(src_page); 753b46e756fSKirill A. Shutemov } 754b46e756fSKirill A. Shutemov } 7555503fbf2SKirill A. Shutemov 7565503fbf2SKirill A. Shutemov list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 7575503fbf2SKirill A. Shutemov list_del(&src_page->lru); 7581baec203SMiaohe Lin mod_node_page_state(page_pgdat(src_page), 7591baec203SMiaohe Lin NR_ISOLATED_ANON + page_is_file_lru(src_page), 7601baec203SMiaohe Lin -compound_nr(src_page)); 7611baec203SMiaohe Lin unlock_page(src_page); 7621baec203SMiaohe Lin free_swap_cache(src_page); 7631baec203SMiaohe Lin putback_lru_page(src_page); 7645503fbf2SKirill A. Shutemov } 765b46e756fSKirill A. Shutemov } 766b46e756fSKirill A. Shutemov 767b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void) 768b46e756fSKirill A. Shutemov { 769b46e756fSKirill A. Shutemov DEFINE_WAIT(wait); 770b46e756fSKirill A. Shutemov 771b46e756fSKirill A. Shutemov add_wait_queue(&khugepaged_wait, &wait); 772b46e756fSKirill A. Shutemov freezable_schedule_timeout_interruptible( 773b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 774b46e756fSKirill A. Shutemov remove_wait_queue(&khugepaged_wait, &wait); 775b46e756fSKirill A. Shutemov } 776b46e756fSKirill A. Shutemov 777b46e756fSKirill A. Shutemov static int khugepaged_node_load[MAX_NUMNODES]; 778b46e756fSKirill A. Shutemov 779b46e756fSKirill A. Shutemov static bool khugepaged_scan_abort(int nid) 780b46e756fSKirill A. Shutemov { 781b46e756fSKirill A. Shutemov int i; 782b46e756fSKirill A. Shutemov 783b46e756fSKirill A. Shutemov /* 784a5f5f91dSMel Gorman * If node_reclaim_mode is disabled, then no extra effort is made to 785b46e756fSKirill A. Shutemov * allocate memory locally. 786b46e756fSKirill A. Shutemov */ 787202e35dbSDave Hansen if (!node_reclaim_enabled()) 788b46e756fSKirill A. Shutemov return false; 789b46e756fSKirill A. Shutemov 790b46e756fSKirill A. Shutemov /* If there is a count for this node already, it must be acceptable */ 791b46e756fSKirill A. Shutemov if (khugepaged_node_load[nid]) 792b46e756fSKirill A. Shutemov return false; 793b46e756fSKirill A. Shutemov 794b46e756fSKirill A. Shutemov for (i = 0; i < MAX_NUMNODES; i++) { 795b46e756fSKirill A. Shutemov if (!khugepaged_node_load[i]) 796b46e756fSKirill A. Shutemov continue; 797a55c7454SMatt Fleming if (node_distance(nid, i) > node_reclaim_distance) 798b46e756fSKirill A. Shutemov return true; 799b46e756fSKirill A. Shutemov } 800b46e756fSKirill A. Shutemov return false; 801b46e756fSKirill A. Shutemov } 802b46e756fSKirill A. Shutemov 803b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 804b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 805b46e756fSKirill A. Shutemov { 80625160354SVlastimil Babka return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 807b46e756fSKirill A. Shutemov } 808b46e756fSKirill A. Shutemov 809b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA 810b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void) 811b46e756fSKirill A. Shutemov { 812b46e756fSKirill A. Shutemov static int last_khugepaged_target_node = NUMA_NO_NODE; 813b46e756fSKirill A. Shutemov int nid, target_node = 0, max_value = 0; 814b46e756fSKirill A. Shutemov 815b46e756fSKirill A. Shutemov /* find first node with max normal pages hit */ 816b46e756fSKirill A. Shutemov for (nid = 0; nid < MAX_NUMNODES; nid++) 817b46e756fSKirill A. Shutemov if (khugepaged_node_load[nid] > max_value) { 818b46e756fSKirill A. Shutemov max_value = khugepaged_node_load[nid]; 819b46e756fSKirill A. Shutemov target_node = nid; 820b46e756fSKirill A. Shutemov } 821b46e756fSKirill A. Shutemov 822b46e756fSKirill A. Shutemov /* do some balance if several nodes have the same hit record */ 823b46e756fSKirill A. Shutemov if (target_node <= last_khugepaged_target_node) 824b46e756fSKirill A. Shutemov for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 825b46e756fSKirill A. Shutemov nid++) 826b46e756fSKirill A. Shutemov if (max_value == khugepaged_node_load[nid]) { 827b46e756fSKirill A. Shutemov target_node = nid; 828b46e756fSKirill A. Shutemov break; 829b46e756fSKirill A. Shutemov } 830b46e756fSKirill A. Shutemov 831b46e756fSKirill A. Shutemov last_khugepaged_target_node = target_node; 832b46e756fSKirill A. Shutemov return target_node; 833b46e756fSKirill A. Shutemov } 834b46e756fSKirill A. Shutemov 835b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 836b46e756fSKirill A. Shutemov { 837b46e756fSKirill A. Shutemov if (IS_ERR(*hpage)) { 838b46e756fSKirill A. Shutemov if (!*wait) 839b46e756fSKirill A. Shutemov return false; 840b46e756fSKirill A. Shutemov 841b46e756fSKirill A. Shutemov *wait = false; 842b46e756fSKirill A. Shutemov *hpage = NULL; 843b46e756fSKirill A. Shutemov khugepaged_alloc_sleep(); 844b46e756fSKirill A. Shutemov } else if (*hpage) { 845b46e756fSKirill A. Shutemov put_page(*hpage); 846b46e756fSKirill A. Shutemov *hpage = NULL; 847b46e756fSKirill A. Shutemov } 848b46e756fSKirill A. Shutemov 849b46e756fSKirill A. Shutemov return true; 850b46e756fSKirill A. Shutemov } 851b46e756fSKirill A. Shutemov 852b46e756fSKirill A. Shutemov static struct page * 853988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 854b46e756fSKirill A. Shutemov { 855b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(*hpage, *hpage); 856b46e756fSKirill A. Shutemov 857b46e756fSKirill A. Shutemov *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 858b46e756fSKirill A. Shutemov if (unlikely(!*hpage)) { 859b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 860b46e756fSKirill A. Shutemov *hpage = ERR_PTR(-ENOMEM); 861b46e756fSKirill A. Shutemov return NULL; 862b46e756fSKirill A. Shutemov } 863b46e756fSKirill A. Shutemov 864b46e756fSKirill A. Shutemov prep_transhuge_page(*hpage); 865b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC); 866b46e756fSKirill A. Shutemov return *hpage; 867b46e756fSKirill A. Shutemov } 868b46e756fSKirill A. Shutemov #else 869b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void) 870b46e756fSKirill A. Shutemov { 871b46e756fSKirill A. Shutemov return 0; 872b46e756fSKirill A. Shutemov } 873b46e756fSKirill A. Shutemov 874b46e756fSKirill A. Shutemov static inline struct page *alloc_khugepaged_hugepage(void) 875b46e756fSKirill A. Shutemov { 876b46e756fSKirill A. Shutemov struct page *page; 877b46e756fSKirill A. Shutemov 878b46e756fSKirill A. Shutemov page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), 879b46e756fSKirill A. Shutemov HPAGE_PMD_ORDER); 880b46e756fSKirill A. Shutemov if (page) 881b46e756fSKirill A. Shutemov prep_transhuge_page(page); 882b46e756fSKirill A. Shutemov return page; 883b46e756fSKirill A. Shutemov } 884b46e756fSKirill A. Shutemov 885b46e756fSKirill A. Shutemov static struct page *khugepaged_alloc_hugepage(bool *wait) 886b46e756fSKirill A. Shutemov { 887b46e756fSKirill A. Shutemov struct page *hpage; 888b46e756fSKirill A. Shutemov 889b46e756fSKirill A. Shutemov do { 890b46e756fSKirill A. Shutemov hpage = alloc_khugepaged_hugepage(); 891b46e756fSKirill A. Shutemov if (!hpage) { 892b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 893b46e756fSKirill A. Shutemov if (!*wait) 894b46e756fSKirill A. Shutemov return NULL; 895b46e756fSKirill A. Shutemov 896b46e756fSKirill A. Shutemov *wait = false; 897b46e756fSKirill A. Shutemov khugepaged_alloc_sleep(); 898b46e756fSKirill A. Shutemov } else 899b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC); 900b46e756fSKirill A. Shutemov } while (unlikely(!hpage) && likely(khugepaged_enabled())); 901b46e756fSKirill A. Shutemov 902b46e756fSKirill A. Shutemov return hpage; 903b46e756fSKirill A. Shutemov } 904b46e756fSKirill A. Shutemov 905b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 906b46e756fSKirill A. Shutemov { 907033b5d77SHugh Dickins /* 908033b5d77SHugh Dickins * If the hpage allocated earlier was briefly exposed in page cache 909033b5d77SHugh Dickins * before collapse_file() failed, it is possible that racing lookups 910033b5d77SHugh Dickins * have not yet completed, and would then be unpleasantly surprised by 911033b5d77SHugh Dickins * finding the hpage reused for the same mapping at a different offset. 912033b5d77SHugh Dickins * Just release the previous allocation if there is any danger of that. 913033b5d77SHugh Dickins */ 914033b5d77SHugh Dickins if (*hpage && page_count(*hpage) > 1) { 915033b5d77SHugh Dickins put_page(*hpage); 916033b5d77SHugh Dickins *hpage = NULL; 917033b5d77SHugh Dickins } 918033b5d77SHugh Dickins 919b46e756fSKirill A. Shutemov if (!*hpage) 920b46e756fSKirill A. Shutemov *hpage = khugepaged_alloc_hugepage(wait); 921b46e756fSKirill A. Shutemov 922b46e756fSKirill A. Shutemov if (unlikely(!*hpage)) 923b46e756fSKirill A. Shutemov return false; 924b46e756fSKirill A. Shutemov 925b46e756fSKirill A. Shutemov return true; 926b46e756fSKirill A. Shutemov } 927b46e756fSKirill A. Shutemov 928b46e756fSKirill A. Shutemov static struct page * 929988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 930b46e756fSKirill A. Shutemov { 931b46e756fSKirill A. Shutemov VM_BUG_ON(!*hpage); 932b46e756fSKirill A. Shutemov 933b46e756fSKirill A. Shutemov return *hpage; 934b46e756fSKirill A. Shutemov } 935b46e756fSKirill A. Shutemov #endif 936b46e756fSKirill A. Shutemov 937b46e756fSKirill A. Shutemov /* 938c1e8d7c6SMichel Lespinasse * If mmap_lock temporarily dropped, revalidate vma 939c1e8d7c6SMichel Lespinasse * before taking mmap_lock. 940b46e756fSKirill A. Shutemov * Return 0 if succeeds, otherwise return none-zero 941b46e756fSKirill A. Shutemov * value (scan code). 942b46e756fSKirill A. Shutemov */ 943b46e756fSKirill A. Shutemov 944c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 945c131f751SKirill A. Shutemov struct vm_area_struct **vmap) 946b46e756fSKirill A. Shutemov { 947b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 948b46e756fSKirill A. Shutemov unsigned long hstart, hend; 949b46e756fSKirill A. Shutemov 950b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) 951b46e756fSKirill A. Shutemov return SCAN_ANY_PROCESS; 952b46e756fSKirill A. Shutemov 953c131f751SKirill A. Shutemov *vmap = vma = find_vma(mm, address); 954b46e756fSKirill A. Shutemov if (!vma) 955b46e756fSKirill A. Shutemov return SCAN_VMA_NULL; 956b46e756fSKirill A. Shutemov 957b46e756fSKirill A. Shutemov hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 958b46e756fSKirill A. Shutemov hend = vma->vm_end & HPAGE_PMD_MASK; 959b46e756fSKirill A. Shutemov if (address < hstart || address + HPAGE_PMD_SIZE > hend) 960b46e756fSKirill A. Shutemov return SCAN_ADDRESS_RANGE; 96150f8b92fSSong Liu if (!hugepage_vma_check(vma, vma->vm_flags)) 962b46e756fSKirill A. Shutemov return SCAN_VMA_CHECK; 963594cced1SKirill A. Shutemov /* Anon VMA expected */ 96425fa414aSxu xin if (!vma->anon_vma || !vma_is_anonymous(vma)) 965594cced1SKirill A. Shutemov return SCAN_VMA_CHECK; 966b46e756fSKirill A. Shutemov return 0; 967b46e756fSKirill A. Shutemov } 968b46e756fSKirill A. Shutemov 969b46e756fSKirill A. Shutemov /* 970b46e756fSKirill A. Shutemov * Bring missing pages in from swap, to complete THP collapse. 971b46e756fSKirill A. Shutemov * Only done if khugepaged_scan_pmd believes it is worthwhile. 972b46e756fSKirill A. Shutemov * 9734d928e20SMiaohe Lin * Called and returns without pte mapped or spinlocks held. 9744d928e20SMiaohe Lin * Note that if false is returned, mmap_lock will be released. 975b46e756fSKirill A. Shutemov */ 976b46e756fSKirill A. Shutemov 977b46e756fSKirill A. Shutemov static bool __collapse_huge_page_swapin(struct mm_struct *mm, 978b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 9792b635dd3SWill Deacon unsigned long haddr, pmd_t *pmd, 9800db501f7SEbru Akagunduz int referenced) 981b46e756fSKirill A. Shutemov { 9822b740303SSouptick Joarder int swapped_in = 0; 9832b740303SSouptick Joarder vm_fault_t ret = 0; 9842b635dd3SWill Deacon unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 9852b635dd3SWill Deacon 9862b635dd3SWill Deacon for (address = haddr; address < end; address += PAGE_SIZE) { 98782b0f8c3SJan Kara struct vm_fault vmf = { 988b46e756fSKirill A. Shutemov .vma = vma, 989b46e756fSKirill A. Shutemov .address = address, 9902b635dd3SWill Deacon .pgoff = linear_page_index(vma, haddr), 991b46e756fSKirill A. Shutemov .flags = FAULT_FLAG_ALLOW_RETRY, 992b46e756fSKirill A. Shutemov .pmd = pmd, 993b46e756fSKirill A. Shutemov }; 994b46e756fSKirill A. Shutemov 99582b0f8c3SJan Kara vmf.pte = pte_offset_map(pmd, address); 9962994302bSJan Kara vmf.orig_pte = *vmf.pte; 9972b635dd3SWill Deacon if (!is_swap_pte(vmf.orig_pte)) { 9982b635dd3SWill Deacon pte_unmap(vmf.pte); 999b46e756fSKirill A. Shutemov continue; 10002b635dd3SWill Deacon } 10012994302bSJan Kara ret = do_swap_page(&vmf); 10020db501f7SEbru Akagunduz 10034d928e20SMiaohe Lin /* 10044d928e20SMiaohe Lin * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 10054d928e20SMiaohe Lin * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 10064d928e20SMiaohe Lin * we do not retry here and swap entry will remain in pagetable 10074d928e20SMiaohe Lin * resulting in later failure. 10084d928e20SMiaohe Lin */ 1009b46e756fSKirill A. Shutemov if (ret & VM_FAULT_RETRY) { 10100db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 1011b46e756fSKirill A. Shutemov return false; 101247f863eaSEbru Akagunduz } 1013b46e756fSKirill A. Shutemov if (ret & VM_FAULT_ERROR) { 10144d928e20SMiaohe Lin mmap_read_unlock(mm); 10150db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 1016b46e756fSKirill A. Shutemov return false; 1017b46e756fSKirill A. Shutemov } 10184d928e20SMiaohe Lin swapped_in++; 1019b46e756fSKirill A. Shutemov } 1020ae2c5d80SKirill A. Shutemov 1021ae2c5d80SKirill A. Shutemov /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ 1022ae2c5d80SKirill A. Shutemov if (swapped_in) 1023ae2c5d80SKirill A. Shutemov lru_add_drain(); 1024ae2c5d80SKirill A. Shutemov 10250db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 1026b46e756fSKirill A. Shutemov return true; 1027b46e756fSKirill A. Shutemov } 1028b46e756fSKirill A. Shutemov 1029b46e756fSKirill A. Shutemov static void collapse_huge_page(struct mm_struct *mm, 1030b46e756fSKirill A. Shutemov unsigned long address, 1031b46e756fSKirill A. Shutemov struct page **hpage, 1032ffe945e6SKirill A. Shutemov int node, int referenced, int unmapped) 1033b46e756fSKirill A. Shutemov { 10345503fbf2SKirill A. Shutemov LIST_HEAD(compound_pagelist); 1035b46e756fSKirill A. Shutemov pmd_t *pmd, _pmd; 1036b46e756fSKirill A. Shutemov pte_t *pte; 1037b46e756fSKirill A. Shutemov pgtable_t pgtable; 1038b46e756fSKirill A. Shutemov struct page *new_page; 1039b46e756fSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl; 1040b46e756fSKirill A. Shutemov int isolated = 0, result = 0; 1041c131f751SKirill A. Shutemov struct vm_area_struct *vma; 1042ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1043b46e756fSKirill A. Shutemov gfp_t gfp; 1044b46e756fSKirill A. Shutemov 1045b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1046b46e756fSKirill A. Shutemov 1047b46e756fSKirill A. Shutemov /* Only allocate from the target node */ 104841b6167eSMichal Hocko gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1049b46e756fSKirill A. Shutemov 1050988ddb71SKirill A. Shutemov /* 1051c1e8d7c6SMichel Lespinasse * Before allocating the hugepage, release the mmap_lock read lock. 1052988ddb71SKirill A. Shutemov * The allocation can take potentially a long time if it involves 1053c1e8d7c6SMichel Lespinasse * sync compaction, and we do not need to hold the mmap_lock during 1054988ddb71SKirill A. Shutemov * that. We will recheck the vma after taking it again in write mode. 1055988ddb71SKirill A. Shutemov */ 1056d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1057988ddb71SKirill A. Shutemov new_page = khugepaged_alloc_page(hpage, gfp, node); 1058b46e756fSKirill A. Shutemov if (!new_page) { 1059b46e756fSKirill A. Shutemov result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1060b46e756fSKirill A. Shutemov goto out_nolock; 1061b46e756fSKirill A. Shutemov } 1062b46e756fSKirill A. Shutemov 10638f425e4eSMatthew Wilcox (Oracle) if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1064b46e756fSKirill A. Shutemov result = SCAN_CGROUP_CHARGE_FAIL; 1065b46e756fSKirill A. Shutemov goto out_nolock; 1066b46e756fSKirill A. Shutemov } 10679d82c694SJohannes Weiner count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1068b46e756fSKirill A. Shutemov 1069d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1070c131f751SKirill A. Shutemov result = hugepage_vma_revalidate(mm, address, &vma); 1071b46e756fSKirill A. Shutemov if (result) { 1072d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1073b46e756fSKirill A. Shutemov goto out_nolock; 1074b46e756fSKirill A. Shutemov } 1075b46e756fSKirill A. Shutemov 1076b46e756fSKirill A. Shutemov pmd = mm_find_pmd(mm, address); 1077b46e756fSKirill A. Shutemov if (!pmd) { 1078b46e756fSKirill A. Shutemov result = SCAN_PMD_NULL; 1079d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1080b46e756fSKirill A. Shutemov goto out_nolock; 1081b46e756fSKirill A. Shutemov } 1082b46e756fSKirill A. Shutemov 1083b46e756fSKirill A. Shutemov /* 10844d928e20SMiaohe Lin * __collapse_huge_page_swapin will return with mmap_lock released 10854d928e20SMiaohe Lin * when it fails. So we jump out_nolock directly in that case. 1086b46e756fSKirill A. Shutemov * Continuing to collapse causes inconsistency. 1087b46e756fSKirill A. Shutemov */ 1088ffe945e6SKirill A. Shutemov if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, 1089ffe945e6SKirill A. Shutemov pmd, referenced)) { 1090b46e756fSKirill A. Shutemov goto out_nolock; 1091b46e756fSKirill A. Shutemov } 1092b46e756fSKirill A. Shutemov 1093d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1094b46e756fSKirill A. Shutemov /* 1095b46e756fSKirill A. Shutemov * Prevent all access to pagetables with the exception of 1096b46e756fSKirill A. Shutemov * gup_fast later handled by the ptep_clear_flush and the VM 1097b46e756fSKirill A. Shutemov * handled by the anon_vma lock + PG_lock. 1098b46e756fSKirill A. Shutemov */ 1099d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 1100c131f751SKirill A. Shutemov result = hugepage_vma_revalidate(mm, address, &vma); 1101b46e756fSKirill A. Shutemov if (result) 110218d24a7cSMiaohe Lin goto out_up_write; 1103b46e756fSKirill A. Shutemov /* check if the pmd is still valid */ 1104b46e756fSKirill A. Shutemov if (mm_find_pmd(mm, address) != pmd) 110518d24a7cSMiaohe Lin goto out_up_write; 1106b46e756fSKirill A. Shutemov 1107b46e756fSKirill A. Shutemov anon_vma_lock_write(vma->anon_vma); 1108b46e756fSKirill A. Shutemov 11097269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 11106f4f13e8SJérôme Glisse address, address + HPAGE_PMD_SIZE); 1111ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1112ec649c9dSVille Syrjälä 1113ec649c9dSVille Syrjälä pte = pte_offset_map(pmd, address); 1114ec649c9dSVille Syrjälä pte_ptl = pte_lockptr(mm, pmd); 1115ec649c9dSVille Syrjälä 1116b46e756fSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1117b46e756fSKirill A. Shutemov /* 1118b46e756fSKirill A. Shutemov * After this gup_fast can't run anymore. This also removes 1119b46e756fSKirill A. Shutemov * any huge TLB entry from the CPU so we won't allow 1120b46e756fSKirill A. Shutemov * huge and small TLB entries for the same virtual address 1121b46e756fSKirill A. Shutemov * to avoid the risk of CPU bugs in that area. 1122b46e756fSKirill A. Shutemov */ 1123b46e756fSKirill A. Shutemov _pmd = pmdp_collapse_flush(vma, address, pmd); 1124b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1125ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1126b46e756fSKirill A. Shutemov 1127b46e756fSKirill A. Shutemov spin_lock(pte_ptl); 11285503fbf2SKirill A. Shutemov isolated = __collapse_huge_page_isolate(vma, address, pte, 11295503fbf2SKirill A. Shutemov &compound_pagelist); 1130b46e756fSKirill A. Shutemov spin_unlock(pte_ptl); 1131b46e756fSKirill A. Shutemov 1132b46e756fSKirill A. Shutemov if (unlikely(!isolated)) { 1133b46e756fSKirill A. Shutemov pte_unmap(pte); 1134b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1135b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 1136b46e756fSKirill A. Shutemov /* 1137b46e756fSKirill A. Shutemov * We can only use set_pmd_at when establishing 1138b46e756fSKirill A. Shutemov * hugepmds and never for establishing regular pmds that 1139b46e756fSKirill A. Shutemov * points to regular pagetables. Use pmd_populate for that 1140b46e756fSKirill A. Shutemov */ 1141b46e756fSKirill A. Shutemov pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1142b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1143b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 1144b46e756fSKirill A. Shutemov result = SCAN_FAIL; 114518d24a7cSMiaohe Lin goto out_up_write; 1146b46e756fSKirill A. Shutemov } 1147b46e756fSKirill A. Shutemov 1148b46e756fSKirill A. Shutemov /* 1149b46e756fSKirill A. Shutemov * All pages are isolated and locked so anon_vma rmap 1150b46e756fSKirill A. Shutemov * can't run anymore. 1151b46e756fSKirill A. Shutemov */ 1152b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 1153b46e756fSKirill A. Shutemov 11545503fbf2SKirill A. Shutemov __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, 11555503fbf2SKirill A. Shutemov &compound_pagelist); 1156b46e756fSKirill A. Shutemov pte_unmap(pte); 1157588d01f9SMiaohe Lin /* 1158588d01f9SMiaohe Lin * spin_lock() below is not the equivalent of smp_wmb(), but 1159588d01f9SMiaohe Lin * the smp_wmb() inside __SetPageUptodate() can be reused to 1160588d01f9SMiaohe Lin * avoid the copy_huge_page writes to become visible after 1161588d01f9SMiaohe Lin * the set_pmd_at() write. 1162588d01f9SMiaohe Lin */ 1163b46e756fSKirill A. Shutemov __SetPageUptodate(new_page); 1164b46e756fSKirill A. Shutemov pgtable = pmd_pgtable(_pmd); 1165b46e756fSKirill A. Shutemov 1166b46e756fSKirill A. Shutemov _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1167f55e1014SLinus Torvalds _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1168b46e756fSKirill A. Shutemov 1169b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1170b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 117140f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(new_page, vma, address); 1172b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(new_page, vma); 1173b46e756fSKirill A. Shutemov pgtable_trans_huge_deposit(mm, pmd, pgtable); 1174b46e756fSKirill A. Shutemov set_pmd_at(mm, address, pmd, _pmd); 1175b46e756fSKirill A. Shutemov update_mmu_cache_pmd(vma, address, pmd); 1176b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1177b46e756fSKirill A. Shutemov 1178b46e756fSKirill A. Shutemov *hpage = NULL; 1179b46e756fSKirill A. Shutemov 1180b46e756fSKirill A. Shutemov khugepaged_pages_collapsed++; 1181b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1182b46e756fSKirill A. Shutemov out_up_write: 1183d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1184b46e756fSKirill A. Shutemov out_nolock: 11859d82c694SJohannes Weiner if (!IS_ERR_OR_NULL(*hpage)) 1186bbc6b703SMatthew Wilcox (Oracle) mem_cgroup_uncharge(page_folio(*hpage)); 1187b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page(mm, isolated, result); 1188b46e756fSKirill A. Shutemov return; 1189b46e756fSKirill A. Shutemov } 1190b46e756fSKirill A. Shutemov 1191b46e756fSKirill A. Shutemov static int khugepaged_scan_pmd(struct mm_struct *mm, 1192b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 1193b46e756fSKirill A. Shutemov unsigned long address, 1194b46e756fSKirill A. Shutemov struct page **hpage) 1195b46e756fSKirill A. Shutemov { 1196b46e756fSKirill A. Shutemov pmd_t *pmd; 1197b46e756fSKirill A. Shutemov pte_t *pte, *_pte; 119871a2c112SKirill A. Shutemov int ret = 0, result = 0, referenced = 0; 119971a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0; 1200b46e756fSKirill A. Shutemov struct page *page = NULL; 1201b46e756fSKirill A. Shutemov unsigned long _address; 1202b46e756fSKirill A. Shutemov spinlock_t *ptl; 1203b46e756fSKirill A. Shutemov int node = NUMA_NO_NODE, unmapped = 0; 12040db501f7SEbru Akagunduz bool writable = false; 1205b46e756fSKirill A. Shutemov 1206b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1207b46e756fSKirill A. Shutemov 1208b46e756fSKirill A. Shutemov pmd = mm_find_pmd(mm, address); 1209b46e756fSKirill A. Shutemov if (!pmd) { 1210b46e756fSKirill A. Shutemov result = SCAN_PMD_NULL; 1211b46e756fSKirill A. Shutemov goto out; 1212b46e756fSKirill A. Shutemov } 1213b46e756fSKirill A. Shutemov 1214b46e756fSKirill A. Shutemov memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1215b46e756fSKirill A. Shutemov pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1216b46e756fSKirill A. Shutemov for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1217b46e756fSKirill A. Shutemov _pte++, _address += PAGE_SIZE) { 1218b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 1219b46e756fSKirill A. Shutemov if (is_swap_pte(pteval)) { 1220b46e756fSKirill A. Shutemov if (++unmapped <= khugepaged_max_ptes_swap) { 1221e1e267c7SPeter Xu /* 1222e1e267c7SPeter Xu * Always be strict with uffd-wp 1223e1e267c7SPeter Xu * enabled swap entries. Please see 1224e1e267c7SPeter Xu * comment below for pte_uffd_wp(). 1225e1e267c7SPeter Xu */ 1226e1e267c7SPeter Xu if (pte_swp_uffd_wp(pteval)) { 1227e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1228e1e267c7SPeter Xu goto out_unmap; 1229e1e267c7SPeter Xu } 1230b46e756fSKirill A. Shutemov continue; 1231b46e756fSKirill A. Shutemov } else { 1232b46e756fSKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 1233e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1234b46e756fSKirill A. Shutemov goto out_unmap; 1235b46e756fSKirill A. Shutemov } 1236b46e756fSKirill A. Shutemov } 1237b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1238b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 1239b46e756fSKirill A. Shutemov ++none_or_zero <= khugepaged_max_ptes_none) { 1240b46e756fSKirill A. Shutemov continue; 1241b46e756fSKirill A. Shutemov } else { 1242b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 1243e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1244b46e756fSKirill A. Shutemov goto out_unmap; 1245b46e756fSKirill A. Shutemov } 1246b46e756fSKirill A. Shutemov } 1247e1e267c7SPeter Xu if (pte_uffd_wp(pteval)) { 1248e1e267c7SPeter Xu /* 1249e1e267c7SPeter Xu * Don't collapse the page if any of the small 1250e1e267c7SPeter Xu * PTEs are armed with uffd write protection. 1251e1e267c7SPeter Xu * Here we can also mark the new huge pmd as 1252e1e267c7SPeter Xu * write protected if any of the small ones is 12538958b249SHaitao Shi * marked but that could bring unknown 1254e1e267c7SPeter Xu * userfault messages that falls outside of 1255e1e267c7SPeter Xu * the registered range. So, just be simple. 1256e1e267c7SPeter Xu */ 1257e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1258e1e267c7SPeter Xu goto out_unmap; 1259e1e267c7SPeter Xu } 1260b46e756fSKirill A. Shutemov if (pte_write(pteval)) 1261b46e756fSKirill A. Shutemov writable = true; 1262b46e756fSKirill A. Shutemov 1263b46e756fSKirill A. Shutemov page = vm_normal_page(vma, _address, pteval); 1264*3218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1265b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 1266b46e756fSKirill A. Shutemov goto out_unmap; 1267b46e756fSKirill A. Shutemov } 1268b46e756fSKirill A. Shutemov 126971a2c112SKirill A. Shutemov if (page_mapcount(page) > 1 && 127071a2c112SKirill A. Shutemov ++shared > khugepaged_max_ptes_shared) { 127171a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 1272e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 127371a2c112SKirill A. Shutemov goto out_unmap; 127471a2c112SKirill A. Shutemov } 127571a2c112SKirill A. Shutemov 12765503fbf2SKirill A. Shutemov page = compound_head(page); 1277b46e756fSKirill A. Shutemov 1278b46e756fSKirill A. Shutemov /* 1279b46e756fSKirill A. Shutemov * Record which node the original page is from and save this 1280b46e756fSKirill A. Shutemov * information to khugepaged_node_load[]. 12810b8f0d87SQuanfa Fu * Khugepaged will allocate hugepage from the node has the max 1282b46e756fSKirill A. Shutemov * hit record. 1283b46e756fSKirill A. Shutemov */ 1284b46e756fSKirill A. Shutemov node = page_to_nid(page); 1285b46e756fSKirill A. Shutemov if (khugepaged_scan_abort(node)) { 1286b46e756fSKirill A. Shutemov result = SCAN_SCAN_ABORT; 1287b46e756fSKirill A. Shutemov goto out_unmap; 1288b46e756fSKirill A. Shutemov } 1289b46e756fSKirill A. Shutemov khugepaged_node_load[node]++; 1290b46e756fSKirill A. Shutemov if (!PageLRU(page)) { 1291b46e756fSKirill A. Shutemov result = SCAN_PAGE_LRU; 1292b46e756fSKirill A. Shutemov goto out_unmap; 1293b46e756fSKirill A. Shutemov } 1294b46e756fSKirill A. Shutemov if (PageLocked(page)) { 1295b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 1296b46e756fSKirill A. Shutemov goto out_unmap; 1297b46e756fSKirill A. Shutemov } 1298b46e756fSKirill A. Shutemov if (!PageAnon(page)) { 1299b46e756fSKirill A. Shutemov result = SCAN_PAGE_ANON; 1300b46e756fSKirill A. Shutemov goto out_unmap; 1301b46e756fSKirill A. Shutemov } 1302b46e756fSKirill A. Shutemov 1303b46e756fSKirill A. Shutemov /* 13049445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 13059445689fSKirill A. Shutemov * 130636ee2c78SMiaohe Lin * Here the check is racy it may see total_mapcount > refcount 13079445689fSKirill A. Shutemov * in some cases. 13089445689fSKirill A. Shutemov * For example, one process with one forked child process. 13099445689fSKirill A. Shutemov * The parent has the PMD split due to MADV_DONTNEED, then 13109445689fSKirill A. Shutemov * the child is trying unmap the whole PMD, but khugepaged 13119445689fSKirill A. Shutemov * may be scanning the parent between the child has 13129445689fSKirill A. Shutemov * PageDoubleMap flag cleared and dec the mapcount. So 13139445689fSKirill A. Shutemov * khugepaged may see total_mapcount > refcount. 13149445689fSKirill A. Shutemov * 13159445689fSKirill A. Shutemov * But such case is ephemeral we could always retry collapse 13169445689fSKirill A. Shutemov * later. However it may report false positive if the page 13179445689fSKirill A. Shutemov * has excessive GUP pins (i.e. 512). Anyway the same check 13189445689fSKirill A. Shutemov * will be done again later the risk seems low. 1319b46e756fSKirill A. Shutemov */ 13209445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 1321b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 1322b46e756fSKirill A. Shutemov goto out_unmap; 1323b46e756fSKirill A. Shutemov } 1324b46e756fSKirill A. Shutemov if (pte_young(pteval) || 1325b46e756fSKirill A. Shutemov page_is_young(page) || PageReferenced(page) || 1326b46e756fSKirill A. Shutemov mmu_notifier_test_young(vma->vm_mm, address)) 13270db501f7SEbru Akagunduz referenced++; 1328b46e756fSKirill A. Shutemov } 1329ffe945e6SKirill A. Shutemov if (!writable) { 1330ffe945e6SKirill A. Shutemov result = SCAN_PAGE_RO; 1331ffe945e6SKirill A. Shutemov } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { 1332ffe945e6SKirill A. Shutemov result = SCAN_LACK_REFERENCED_PAGE; 1333ffe945e6SKirill A. Shutemov } else { 1334b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1335b46e756fSKirill A. Shutemov ret = 1; 1336b46e756fSKirill A. Shutemov } 1337b46e756fSKirill A. Shutemov out_unmap: 1338b46e756fSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 1339b46e756fSKirill A. Shutemov if (ret) { 1340b46e756fSKirill A. Shutemov node = khugepaged_find_target_node(); 1341c1e8d7c6SMichel Lespinasse /* collapse_huge_page will return with the mmap_lock released */ 1342ffe945e6SKirill A. Shutemov collapse_huge_page(mm, address, hpage, node, 1343ffe945e6SKirill A. Shutemov referenced, unmapped); 1344b46e756fSKirill A. Shutemov } 1345b46e756fSKirill A. Shutemov out: 1346b46e756fSKirill A. Shutemov trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1347b46e756fSKirill A. Shutemov none_or_zero, result, unmapped); 1348b46e756fSKirill A. Shutemov return ret; 1349b46e756fSKirill A. Shutemov } 1350b46e756fSKirill A. Shutemov 1351b46e756fSKirill A. Shutemov static void collect_mm_slot(struct mm_slot *mm_slot) 1352b46e756fSKirill A. Shutemov { 1353b46e756fSKirill A. Shutemov struct mm_struct *mm = mm_slot->mm; 1354b46e756fSKirill A. Shutemov 135535f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 1356b46e756fSKirill A. Shutemov 1357b46e756fSKirill A. Shutemov if (khugepaged_test_exit(mm)) { 1358b46e756fSKirill A. Shutemov /* free mm_slot */ 1359b46e756fSKirill A. Shutemov hash_del(&mm_slot->hash); 1360b46e756fSKirill A. Shutemov list_del(&mm_slot->mm_node); 1361b46e756fSKirill A. Shutemov 1362b46e756fSKirill A. Shutemov /* 1363b46e756fSKirill A. Shutemov * Not strictly needed because the mm exited already. 1364b46e756fSKirill A. Shutemov * 1365b46e756fSKirill A. Shutemov * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1366b46e756fSKirill A. Shutemov */ 1367b46e756fSKirill A. Shutemov 1368b46e756fSKirill A. Shutemov /* khugepaged_mm_lock actually not necessary for the below */ 1369b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 1370b46e756fSKirill A. Shutemov mmdrop(mm); 1371b46e756fSKirill A. Shutemov } 1372b46e756fSKirill A. Shutemov } 1373b46e756fSKirill A. Shutemov 1374396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 137527e1f827SSong Liu /* 137627e1f827SSong Liu * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 137727e1f827SSong Liu * khugepaged should try to collapse the page table. 137827e1f827SSong Liu */ 1379081c3256SMiaohe Lin static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 138027e1f827SSong Liu unsigned long addr) 138127e1f827SSong Liu { 138227e1f827SSong Liu struct mm_slot *mm_slot; 138327e1f827SSong Liu 138427e1f827SSong Liu VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 138527e1f827SSong Liu 138627e1f827SSong Liu spin_lock(&khugepaged_mm_lock); 138727e1f827SSong Liu mm_slot = get_mm_slot(mm); 138827e1f827SSong Liu if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) 138927e1f827SSong Liu mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 139027e1f827SSong Liu spin_unlock(&khugepaged_mm_lock); 139127e1f827SSong Liu } 139227e1f827SSong Liu 1393e59a47b8SPasha Tatashin static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 1394e59a47b8SPasha Tatashin unsigned long addr, pmd_t *pmdp) 1395e59a47b8SPasha Tatashin { 1396e59a47b8SPasha Tatashin spinlock_t *ptl; 1397e59a47b8SPasha Tatashin pmd_t pmd; 1398e59a47b8SPasha Tatashin 139980110bbfSPasha Tatashin mmap_assert_write_locked(mm); 1400e59a47b8SPasha Tatashin ptl = pmd_lock(vma->vm_mm, pmdp); 1401e59a47b8SPasha Tatashin pmd = pmdp_collapse_flush(vma, addr, pmdp); 1402e59a47b8SPasha Tatashin spin_unlock(ptl); 1403e59a47b8SPasha Tatashin mm_dec_nr_ptes(mm); 140480110bbfSPasha Tatashin page_table_check_pte_clear_range(mm, addr, pmd); 1405e59a47b8SPasha Tatashin pte_free(mm, pmd_pgtable(pmd)); 1406e59a47b8SPasha Tatashin } 1407e59a47b8SPasha Tatashin 140827e1f827SSong Liu /** 1409336e6b53SAlex Shi * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1410336e6b53SAlex Shi * address haddr. 1411336e6b53SAlex Shi * 1412336e6b53SAlex Shi * @mm: process address space where collapse happens 1413336e6b53SAlex Shi * @addr: THP collapse address 141427e1f827SSong Liu * 141527e1f827SSong Liu * This function checks whether all the PTEs in the PMD are pointing to the 141627e1f827SSong Liu * right THP. If so, retract the page table so the THP can refault in with 141727e1f827SSong Liu * as pmd-mapped. 141827e1f827SSong Liu */ 141927e1f827SSong Liu void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) 142027e1f827SSong Liu { 142127e1f827SSong Liu unsigned long haddr = addr & HPAGE_PMD_MASK; 142227e1f827SSong Liu struct vm_area_struct *vma = find_vma(mm, haddr); 1423119a5fc1SHugh Dickins struct page *hpage; 142427e1f827SSong Liu pte_t *start_pte, *pte; 1425e59a47b8SPasha Tatashin pmd_t *pmd; 142627e1f827SSong Liu spinlock_t *ptl; 142727e1f827SSong Liu int count = 0; 142827e1f827SSong Liu int i; 142927e1f827SSong Liu 143027e1f827SSong Liu if (!vma || !vma->vm_file || 1431fef792a4SMiaohe Lin !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 143227e1f827SSong Liu return; 143327e1f827SSong Liu 143427e1f827SSong Liu /* 143527e1f827SSong Liu * This vm_flags may not have VM_HUGEPAGE if the page was not 143627e1f827SSong Liu * collapsed by this mm. But we can still collapse if the page is 143727e1f827SSong Liu * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() 143827e1f827SSong Liu * will not fail the vma for missing VM_HUGEPAGE 143927e1f827SSong Liu */ 144027e1f827SSong Liu if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) 144127e1f827SSong Liu return; 144227e1f827SSong Liu 1443deb4c93aSPeter Xu /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1444deb4c93aSPeter Xu if (userfaultfd_wp(vma)) 1445deb4c93aSPeter Xu return; 1446deb4c93aSPeter Xu 1447119a5fc1SHugh Dickins hpage = find_lock_page(vma->vm_file->f_mapping, 1448119a5fc1SHugh Dickins linear_page_index(vma, haddr)); 1449119a5fc1SHugh Dickins if (!hpage) 1450119a5fc1SHugh Dickins return; 1451119a5fc1SHugh Dickins 1452119a5fc1SHugh Dickins if (!PageHead(hpage)) 1453119a5fc1SHugh Dickins goto drop_hpage; 1454119a5fc1SHugh Dickins 145527e1f827SSong Liu pmd = mm_find_pmd(mm, haddr); 145627e1f827SSong Liu if (!pmd) 1457119a5fc1SHugh Dickins goto drop_hpage; 145827e1f827SSong Liu 145927e1f827SSong Liu start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 146027e1f827SSong Liu 146127e1f827SSong Liu /* step 1: check all mapped PTEs are to the right huge page */ 146227e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 146327e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 146427e1f827SSong Liu struct page *page; 146527e1f827SSong Liu 146627e1f827SSong Liu /* empty pte, skip */ 146727e1f827SSong Liu if (pte_none(*pte)) 146827e1f827SSong Liu continue; 146927e1f827SSong Liu 147027e1f827SSong Liu /* page swapped out, abort */ 147127e1f827SSong Liu if (!pte_present(*pte)) 147227e1f827SSong Liu goto abort; 147327e1f827SSong Liu 147427e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 1475*3218f871SAlex Sierra if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1476*3218f871SAlex Sierra page = NULL; 147727e1f827SSong Liu /* 1478119a5fc1SHugh Dickins * Note that uprobe, debugger, or MAP_PRIVATE may change the 1479119a5fc1SHugh Dickins * page table, but the new page will not be a subpage of hpage. 148027e1f827SSong Liu */ 1481119a5fc1SHugh Dickins if (hpage + i != page) 148227e1f827SSong Liu goto abort; 148327e1f827SSong Liu count++; 148427e1f827SSong Liu } 148527e1f827SSong Liu 148627e1f827SSong Liu /* step 2: adjust rmap */ 148727e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 148827e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 148927e1f827SSong Liu struct page *page; 149027e1f827SSong Liu 149127e1f827SSong Liu if (pte_none(*pte)) 149227e1f827SSong Liu continue; 149327e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 1494*3218f871SAlex Sierra if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1495*3218f871SAlex Sierra goto abort; 1496cea86fe2SHugh Dickins page_remove_rmap(page, vma, false); 149727e1f827SSong Liu } 149827e1f827SSong Liu 149927e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 150027e1f827SSong Liu 150127e1f827SSong Liu /* step 3: set proper refcount and mm_counters. */ 1502119a5fc1SHugh Dickins if (count) { 150327e1f827SSong Liu page_ref_sub(hpage, count); 150427e1f827SSong Liu add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 150527e1f827SSong Liu } 150627e1f827SSong Liu 150727e1f827SSong Liu /* step 4: collapse pmd */ 1508e59a47b8SPasha Tatashin collapse_and_free_pmd(mm, vma, haddr, pmd); 1509119a5fc1SHugh Dickins drop_hpage: 1510119a5fc1SHugh Dickins unlock_page(hpage); 1511119a5fc1SHugh Dickins put_page(hpage); 151227e1f827SSong Liu return; 151327e1f827SSong Liu 151427e1f827SSong Liu abort: 151527e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 1516119a5fc1SHugh Dickins goto drop_hpage; 151727e1f827SSong Liu } 151827e1f827SSong Liu 15190edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 152027e1f827SSong Liu { 152127e1f827SSong Liu struct mm_struct *mm = mm_slot->mm; 152227e1f827SSong Liu int i; 152327e1f827SSong Liu 152427e1f827SSong Liu if (likely(mm_slot->nr_pte_mapped_thp == 0)) 15250edf61e5SMiaohe Lin return; 152627e1f827SSong Liu 1527d8ed45c5SMichel Lespinasse if (!mmap_write_trylock(mm)) 15280edf61e5SMiaohe Lin return; 152927e1f827SSong Liu 153027e1f827SSong Liu if (unlikely(khugepaged_test_exit(mm))) 153127e1f827SSong Liu goto out; 153227e1f827SSong Liu 153327e1f827SSong Liu for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 153427e1f827SSong Liu collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); 153527e1f827SSong Liu 153627e1f827SSong Liu out: 153727e1f827SSong Liu mm_slot->nr_pte_mapped_thp = 0; 1538d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 153927e1f827SSong Liu } 154027e1f827SSong Liu 1541f3f0e1d2SKirill A. Shutemov static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1542f3f0e1d2SKirill A. Shutemov { 1543f3f0e1d2SKirill A. Shutemov struct vm_area_struct *vma; 154418e77600SHugh Dickins struct mm_struct *mm; 1545f3f0e1d2SKirill A. Shutemov unsigned long addr; 1546e59a47b8SPasha Tatashin pmd_t *pmd; 1547f3f0e1d2SKirill A. Shutemov 1548f3f0e1d2SKirill A. Shutemov i_mmap_lock_write(mapping); 1549f3f0e1d2SKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 155027e1f827SSong Liu /* 155127e1f827SSong Liu * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 155227e1f827SSong Liu * got written to. These VMAs are likely not worth investing 15533e4e28c5SMichel Lespinasse * mmap_write_lock(mm) as PMD-mapping is likely to be split 155427e1f827SSong Liu * later. 155527e1f827SSong Liu * 155636ee2c78SMiaohe Lin * Note that vma->anon_vma check is racy: it can be set up after 1557c1e8d7c6SMichel Lespinasse * the check but before we took mmap_lock by the fault path. 155827e1f827SSong Liu * But page lock would prevent establishing any new ptes of the 155927e1f827SSong Liu * page, so we are safe. 156027e1f827SSong Liu * 156127e1f827SSong Liu * An alternative would be drop the check, but check that page 156227e1f827SSong Liu * table is clear before calling pmdp_collapse_flush() under 156327e1f827SSong Liu * ptl. It has higher chance to recover THP for the VMA, but 156427e1f827SSong Liu * has higher cost too. 156527e1f827SSong Liu */ 1566f3f0e1d2SKirill A. Shutemov if (vma->anon_vma) 1567f3f0e1d2SKirill A. Shutemov continue; 1568f3f0e1d2SKirill A. Shutemov addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1569f3f0e1d2SKirill A. Shutemov if (addr & ~HPAGE_PMD_MASK) 1570f3f0e1d2SKirill A. Shutemov continue; 1571f3f0e1d2SKirill A. Shutemov if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1572f3f0e1d2SKirill A. Shutemov continue; 157318e77600SHugh Dickins mm = vma->vm_mm; 157418e77600SHugh Dickins pmd = mm_find_pmd(mm, addr); 1575f3f0e1d2SKirill A. Shutemov if (!pmd) 1576f3f0e1d2SKirill A. Shutemov continue; 1577f3f0e1d2SKirill A. Shutemov /* 1578c1e8d7c6SMichel Lespinasse * We need exclusive mmap_lock to retract page table. 157927e1f827SSong Liu * 158027e1f827SSong Liu * We use trylock due to lock inversion: we need to acquire 1581c1e8d7c6SMichel Lespinasse * mmap_lock while holding page lock. Fault path does it in 158227e1f827SSong Liu * reverse order. Trylock is a way to avoid deadlock. 1583f3f0e1d2SKirill A. Shutemov */ 158418e77600SHugh Dickins if (mmap_write_trylock(mm)) { 1585deb4c93aSPeter Xu /* 1586deb4c93aSPeter Xu * When a vma is registered with uffd-wp, we can't 1587deb4c93aSPeter Xu * recycle the pmd pgtable because there can be pte 1588deb4c93aSPeter Xu * markers installed. Skip it only, so the rest mm/vma 1589deb4c93aSPeter Xu * can still have the same file mapped hugely, however 1590deb4c93aSPeter Xu * it'll always mapped in small page size for uffd-wp 1591deb4c93aSPeter Xu * registered ranges. 1592deb4c93aSPeter Xu */ 1593deb4c93aSPeter Xu if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma)) 1594e59a47b8SPasha Tatashin collapse_and_free_pmd(mm, vma, addr, pmd); 159518e77600SHugh Dickins mmap_write_unlock(mm); 159627e1f827SSong Liu } else { 159727e1f827SSong Liu /* Try again later */ 159818e77600SHugh Dickins khugepaged_add_pte_mapped_thp(mm, addr); 1599f3f0e1d2SKirill A. Shutemov } 1600f3f0e1d2SKirill A. Shutemov } 1601f3f0e1d2SKirill A. Shutemov i_mmap_unlock_write(mapping); 1602f3f0e1d2SKirill A. Shutemov } 1603f3f0e1d2SKirill A. Shutemov 1604f3f0e1d2SKirill A. Shutemov /** 160599cb0dbdSSong Liu * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1606f3f0e1d2SKirill A. Shutemov * 1607336e6b53SAlex Shi * @mm: process address space where collapse happens 1608336e6b53SAlex Shi * @file: file that collapse on 1609336e6b53SAlex Shi * @start: collapse start address 1610336e6b53SAlex Shi * @hpage: new allocated huge page for collapse 1611336e6b53SAlex Shi * @node: appointed node the new huge page allocate from 1612336e6b53SAlex Shi * 1613f3f0e1d2SKirill A. Shutemov * Basic scheme is simple, details are more complex: 161487c460a0SHugh Dickins * - allocate and lock a new huge page; 161577da9389SMatthew Wilcox * - scan page cache replacing old pages with the new one 161699cb0dbdSSong Liu * + swap/gup in pages if necessary; 1617f3f0e1d2SKirill A. Shutemov * + fill in gaps; 161877da9389SMatthew Wilcox * + keep old pages around in case rollback is required; 161977da9389SMatthew Wilcox * - if replacing succeeds: 1620f3f0e1d2SKirill A. Shutemov * + copy data over; 1621f3f0e1d2SKirill A. Shutemov * + free old pages; 162287c460a0SHugh Dickins * + unlock huge page; 1623f3f0e1d2SKirill A. Shutemov * - if replacing failed; 1624f3f0e1d2SKirill A. Shutemov * + put all pages back and unfreeze them; 162577da9389SMatthew Wilcox * + restore gaps in the page cache; 162687c460a0SHugh Dickins * + unlock and free huge page; 1627f3f0e1d2SKirill A. Shutemov */ 1628579c571eSSong Liu static void collapse_file(struct mm_struct *mm, 1629579c571eSSong Liu struct file *file, pgoff_t start, 1630f3f0e1d2SKirill A. Shutemov struct page **hpage, int node) 1631f3f0e1d2SKirill A. Shutemov { 1632579c571eSSong Liu struct address_space *mapping = file->f_mapping; 1633f3f0e1d2SKirill A. Shutemov gfp_t gfp; 163477da9389SMatthew Wilcox struct page *new_page; 1635f3f0e1d2SKirill A. Shutemov pgoff_t index, end = start + HPAGE_PMD_NR; 1636f3f0e1d2SKirill A. Shutemov LIST_HEAD(pagelist); 163777da9389SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1638f3f0e1d2SKirill A. Shutemov int nr_none = 0, result = SCAN_SUCCEED; 163999cb0dbdSSong Liu bool is_shmem = shmem_file(file); 1640bf9eceadSMuchun Song int nr; 1641f3f0e1d2SKirill A. Shutemov 164299cb0dbdSSong Liu VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1643f3f0e1d2SKirill A. Shutemov VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1644f3f0e1d2SKirill A. Shutemov 1645f3f0e1d2SKirill A. Shutemov /* Only allocate from the target node */ 164641b6167eSMichal Hocko gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1647f3f0e1d2SKirill A. Shutemov 1648f3f0e1d2SKirill A. Shutemov new_page = khugepaged_alloc_page(hpage, gfp, node); 1649f3f0e1d2SKirill A. Shutemov if (!new_page) { 1650f3f0e1d2SKirill A. Shutemov result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1651f3f0e1d2SKirill A. Shutemov goto out; 1652f3f0e1d2SKirill A. Shutemov } 1653f3f0e1d2SKirill A. Shutemov 16548f425e4eSMatthew Wilcox (Oracle) if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1655f3f0e1d2SKirill A. Shutemov result = SCAN_CGROUP_CHARGE_FAIL; 1656f3f0e1d2SKirill A. Shutemov goto out; 1657f3f0e1d2SKirill A. Shutemov } 16589d82c694SJohannes Weiner count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1659f3f0e1d2SKirill A. Shutemov 16606b24ca4aSMatthew Wilcox (Oracle) /* 16616b24ca4aSMatthew Wilcox (Oracle) * Ensure we have slots for all the pages in the range. This is 16626b24ca4aSMatthew Wilcox (Oracle) * almost certainly a no-op because most of the pages must be present 16636b24ca4aSMatthew Wilcox (Oracle) */ 166495feeabbSHugh Dickins do { 166595feeabbSHugh Dickins xas_lock_irq(&xas); 166695feeabbSHugh Dickins xas_create_range(&xas); 166795feeabbSHugh Dickins if (!xas_error(&xas)) 166895feeabbSHugh Dickins break; 166995feeabbSHugh Dickins xas_unlock_irq(&xas); 167095feeabbSHugh Dickins if (!xas_nomem(&xas, GFP_KERNEL)) { 167195feeabbSHugh Dickins result = SCAN_FAIL; 167295feeabbSHugh Dickins goto out; 167395feeabbSHugh Dickins } 167495feeabbSHugh Dickins } while (1); 167595feeabbSHugh Dickins 1676042a3082SHugh Dickins __SetPageLocked(new_page); 167799cb0dbdSSong Liu if (is_shmem) 1678042a3082SHugh Dickins __SetPageSwapBacked(new_page); 1679f3f0e1d2SKirill A. Shutemov new_page->index = start; 1680f3f0e1d2SKirill A. Shutemov new_page->mapping = mapping; 1681f3f0e1d2SKirill A. Shutemov 1682f3f0e1d2SKirill A. Shutemov /* 168387c460a0SHugh Dickins * At this point the new_page is locked and not up-to-date. 168487c460a0SHugh Dickins * It's safe to insert it into the page cache, because nobody would 168587c460a0SHugh Dickins * be able to map it or use it in another way until we unlock it. 1686f3f0e1d2SKirill A. Shutemov */ 1687f3f0e1d2SKirill A. Shutemov 168877da9389SMatthew Wilcox xas_set(&xas, start); 168977da9389SMatthew Wilcox for (index = start; index < end; index++) { 169077da9389SMatthew Wilcox struct page *page = xas_next(&xas); 169177da9389SMatthew Wilcox 169277da9389SMatthew Wilcox VM_BUG_ON(index != xas.xa_index); 169399cb0dbdSSong Liu if (is_shmem) { 169477da9389SMatthew Wilcox if (!page) { 1695701270faSHugh Dickins /* 169699cb0dbdSSong Liu * Stop if extent has been truncated or 169799cb0dbdSSong Liu * hole-punched, and is now completely 169899cb0dbdSSong Liu * empty. 1699701270faSHugh Dickins */ 1700701270faSHugh Dickins if (index == start) { 1701701270faSHugh Dickins if (!xas_next_entry(&xas, end - 1)) { 1702701270faSHugh Dickins result = SCAN_TRUNCATED; 1703042a3082SHugh Dickins goto xa_locked; 1704701270faSHugh Dickins } 1705701270faSHugh Dickins xas_set(&xas, index); 1706701270faSHugh Dickins } 170777da9389SMatthew Wilcox if (!shmem_charge(mapping->host, 1)) { 1708f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 1709042a3082SHugh Dickins goto xa_locked; 1710f3f0e1d2SKirill A. Shutemov } 17114101196bSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 171277da9389SMatthew Wilcox nr_none++; 171377da9389SMatthew Wilcox continue; 1714f3f0e1d2SKirill A. Shutemov } 1715f3f0e1d2SKirill A. Shutemov 17163159f943SMatthew Wilcox if (xa_is_value(page) || !PageUptodate(page)) { 171777da9389SMatthew Wilcox xas_unlock_irq(&xas); 1718f3f0e1d2SKirill A. Shutemov /* swap in or instantiate fallocated page */ 1719f3f0e1d2SKirill A. Shutemov if (shmem_getpage(mapping->host, index, &page, 1720acdd9f8eSHugh Dickins SGP_NOALLOC)) { 1721f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 172277da9389SMatthew Wilcox goto xa_unlocked; 1723f3f0e1d2SKirill A. Shutemov } 1724f3f0e1d2SKirill A. Shutemov } else if (trylock_page(page)) { 1725f3f0e1d2SKirill A. Shutemov get_page(page); 1726042a3082SHugh Dickins xas_unlock_irq(&xas); 1727f3f0e1d2SKirill A. Shutemov } else { 1728f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LOCK; 1729042a3082SHugh Dickins goto xa_locked; 1730f3f0e1d2SKirill A. Shutemov } 173199cb0dbdSSong Liu } else { /* !is_shmem */ 173299cb0dbdSSong Liu if (!page || xa_is_value(page)) { 173399cb0dbdSSong Liu xas_unlock_irq(&xas); 173499cb0dbdSSong Liu page_cache_sync_readahead(mapping, &file->f_ra, 173599cb0dbdSSong Liu file, index, 1736e5a59d30SDavid Howells end - index); 173799cb0dbdSSong Liu /* drain pagevecs to help isolate_lru_page() */ 173899cb0dbdSSong Liu lru_add_drain(); 173999cb0dbdSSong Liu page = find_lock_page(mapping, index); 174099cb0dbdSSong Liu if (unlikely(page == NULL)) { 174199cb0dbdSSong Liu result = SCAN_FAIL; 174299cb0dbdSSong Liu goto xa_unlocked; 174399cb0dbdSSong Liu } 174475f36069SSong Liu } else if (PageDirty(page)) { 174575f36069SSong Liu /* 174675f36069SSong Liu * khugepaged only works on read-only fd, 174775f36069SSong Liu * so this page is dirty because it hasn't 174875f36069SSong Liu * been flushed since first write. There 174975f36069SSong Liu * won't be new dirty pages. 175075f36069SSong Liu * 175175f36069SSong Liu * Trigger async flush here and hope the 175275f36069SSong Liu * writeback is done when khugepaged 175375f36069SSong Liu * revisits this page. 175475f36069SSong Liu * 175575f36069SSong Liu * This is a one-off situation. We are not 175675f36069SSong Liu * forcing writeback in loop. 175775f36069SSong Liu */ 175875f36069SSong Liu xas_unlock_irq(&xas); 175975f36069SSong Liu filemap_flush(mapping); 176075f36069SSong Liu result = SCAN_FAIL; 176175f36069SSong Liu goto xa_unlocked; 176274c42e1bSRongwei Wang } else if (PageWriteback(page)) { 176374c42e1bSRongwei Wang xas_unlock_irq(&xas); 176474c42e1bSRongwei Wang result = SCAN_FAIL; 176574c42e1bSRongwei Wang goto xa_unlocked; 176699cb0dbdSSong Liu } else if (trylock_page(page)) { 176799cb0dbdSSong Liu get_page(page); 176899cb0dbdSSong Liu xas_unlock_irq(&xas); 176999cb0dbdSSong Liu } else { 177099cb0dbdSSong Liu result = SCAN_PAGE_LOCK; 177199cb0dbdSSong Liu goto xa_locked; 177299cb0dbdSSong Liu } 177399cb0dbdSSong Liu } 1774f3f0e1d2SKirill A. Shutemov 1775f3f0e1d2SKirill A. Shutemov /* 1776b93b0163SMatthew Wilcox * The page must be locked, so we can drop the i_pages lock 1777f3f0e1d2SKirill A. Shutemov * without racing with truncate. 1778f3f0e1d2SKirill A. Shutemov */ 1779f3f0e1d2SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 17804655e5e5SSong Liu 17814655e5e5SSong Liu /* make sure the page is up to date */ 17824655e5e5SSong Liu if (unlikely(!PageUptodate(page))) { 17834655e5e5SSong Liu result = SCAN_FAIL; 17844655e5e5SSong Liu goto out_unlock; 17854655e5e5SSong Liu } 178606a5e126SHugh Dickins 178706a5e126SHugh Dickins /* 178806a5e126SHugh Dickins * If file was truncated then extended, or hole-punched, before 178906a5e126SHugh Dickins * we locked the first page, then a THP might be there already. 179006a5e126SHugh Dickins */ 179106a5e126SHugh Dickins if (PageTransCompound(page)) { 179206a5e126SHugh Dickins result = SCAN_PAGE_COMPOUND; 179306a5e126SHugh Dickins goto out_unlock; 179406a5e126SHugh Dickins } 1795f3f0e1d2SKirill A. Shutemov 1796f3f0e1d2SKirill A. Shutemov if (page_mapping(page) != mapping) { 1797f3f0e1d2SKirill A. Shutemov result = SCAN_TRUNCATED; 1798f3f0e1d2SKirill A. Shutemov goto out_unlock; 1799f3f0e1d2SKirill A. Shutemov } 1800f3f0e1d2SKirill A. Shutemov 180174c42e1bSRongwei Wang if (!is_shmem && (PageDirty(page) || 180274c42e1bSRongwei Wang PageWriteback(page))) { 18034655e5e5SSong Liu /* 18044655e5e5SSong Liu * khugepaged only works on read-only fd, so this 18054655e5e5SSong Liu * page is dirty because it hasn't been flushed 18064655e5e5SSong Liu * since first write. 18074655e5e5SSong Liu */ 18084655e5e5SSong Liu result = SCAN_FAIL; 18094655e5e5SSong Liu goto out_unlock; 18104655e5e5SSong Liu } 18114655e5e5SSong Liu 1812f3f0e1d2SKirill A. Shutemov if (isolate_lru_page(page)) { 1813f3f0e1d2SKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 1814042a3082SHugh Dickins goto out_unlock; 1815f3f0e1d2SKirill A. Shutemov } 1816f3f0e1d2SKirill A. Shutemov 181799cb0dbdSSong Liu if (page_has_private(page) && 181899cb0dbdSSong Liu !try_to_release_page(page, GFP_KERNEL)) { 181999cb0dbdSSong Liu result = SCAN_PAGE_HAS_PRIVATE; 18202f33a706SHugh Dickins putback_lru_page(page); 182199cb0dbdSSong Liu goto out_unlock; 182299cb0dbdSSong Liu } 182399cb0dbdSSong Liu 1824f3f0e1d2SKirill A. Shutemov if (page_mapped(page)) 1825869f7ee6SMatthew Wilcox (Oracle) try_to_unmap(page_folio(page), 1826869f7ee6SMatthew Wilcox (Oracle) TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 1827f3f0e1d2SKirill A. Shutemov 182877da9389SMatthew Wilcox xas_lock_irq(&xas); 182977da9389SMatthew Wilcox xas_set(&xas, index); 1830f3f0e1d2SKirill A. Shutemov 183177da9389SMatthew Wilcox VM_BUG_ON_PAGE(page != xas_load(&xas), page); 1832f3f0e1d2SKirill A. Shutemov 1833f3f0e1d2SKirill A. Shutemov /* 1834f3f0e1d2SKirill A. Shutemov * The page is expected to have page_count() == 3: 1835f3f0e1d2SKirill A. Shutemov * - we hold a pin on it; 183677da9389SMatthew Wilcox * - one reference from page cache; 1837f3f0e1d2SKirill A. Shutemov * - one from isolate_lru_page; 1838f3f0e1d2SKirill A. Shutemov */ 1839f3f0e1d2SKirill A. Shutemov if (!page_ref_freeze(page, 3)) { 1840f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 1841042a3082SHugh Dickins xas_unlock_irq(&xas); 1842042a3082SHugh Dickins putback_lru_page(page); 1843042a3082SHugh Dickins goto out_unlock; 1844f3f0e1d2SKirill A. Shutemov } 1845f3f0e1d2SKirill A. Shutemov 1846f3f0e1d2SKirill A. Shutemov /* 1847f3f0e1d2SKirill A. Shutemov * Add the page to the list to be able to undo the collapse if 1848f3f0e1d2SKirill A. Shutemov * something go wrong. 1849f3f0e1d2SKirill A. Shutemov */ 1850f3f0e1d2SKirill A. Shutemov list_add_tail(&page->lru, &pagelist); 1851f3f0e1d2SKirill A. Shutemov 1852f3f0e1d2SKirill A. Shutemov /* Finally, replace with the new page. */ 18534101196bSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 1854f3f0e1d2SKirill A. Shutemov continue; 1855f3f0e1d2SKirill A. Shutemov out_unlock: 1856f3f0e1d2SKirill A. Shutemov unlock_page(page); 1857f3f0e1d2SKirill A. Shutemov put_page(page); 1858042a3082SHugh Dickins goto xa_unlocked; 1859f3f0e1d2SKirill A. Shutemov } 1860bf9eceadSMuchun Song nr = thp_nr_pages(new_page); 1861f3f0e1d2SKirill A. Shutemov 186299cb0dbdSSong Liu if (is_shmem) 186357b2847dSMuchun Song __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr); 186409d91cdaSSong Liu else { 1865bf9eceadSMuchun Song __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr); 186609d91cdaSSong Liu filemap_nr_thps_inc(mapping); 1867eb6ecbedSCollin Fijalkovich /* 1868eb6ecbedSCollin Fijalkovich * Paired with smp_mb() in do_dentry_open() to ensure 1869eb6ecbedSCollin Fijalkovich * i_writecount is up to date and the update to nr_thps is 1870eb6ecbedSCollin Fijalkovich * visible. Ensures the page cache will be truncated if the 1871eb6ecbedSCollin Fijalkovich * file is opened writable. 1872eb6ecbedSCollin Fijalkovich */ 1873eb6ecbedSCollin Fijalkovich smp_mb(); 1874eb6ecbedSCollin Fijalkovich if (inode_is_open_for_write(mapping->host)) { 1875eb6ecbedSCollin Fijalkovich result = SCAN_FAIL; 1876eb6ecbedSCollin Fijalkovich __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr); 1877eb6ecbedSCollin Fijalkovich filemap_nr_thps_dec(mapping); 1878eb6ecbedSCollin Fijalkovich goto xa_locked; 1879eb6ecbedSCollin Fijalkovich } 188009d91cdaSSong Liu } 188199cb0dbdSSong Liu 1882042a3082SHugh Dickins if (nr_none) { 18839d82c694SJohannes Weiner __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); 18842f55f070SMiaohe Lin /* nr_none is always 0 for non-shmem. */ 18859d82c694SJohannes Weiner __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); 1886042a3082SHugh Dickins } 1887042a3082SHugh Dickins 18886b24ca4aSMatthew Wilcox (Oracle) /* Join all the small entries into a single multi-index entry */ 18896b24ca4aSMatthew Wilcox (Oracle) xas_set_order(&xas, start, HPAGE_PMD_ORDER); 18906b24ca4aSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 1891042a3082SHugh Dickins xa_locked: 1892042a3082SHugh Dickins xas_unlock_irq(&xas); 189377da9389SMatthew Wilcox xa_unlocked: 1894042a3082SHugh Dickins 18956d9df8a5SHugh Dickins /* 18966d9df8a5SHugh Dickins * If collapse is successful, flush must be done now before copying. 18976d9df8a5SHugh Dickins * If collapse is unsuccessful, does flush actually need to be done? 18986d9df8a5SHugh Dickins * Do it anyway, to clear the state. 18996d9df8a5SHugh Dickins */ 19006d9df8a5SHugh Dickins try_to_unmap_flush(); 19016d9df8a5SHugh Dickins 1902f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 190377da9389SMatthew Wilcox struct page *page, *tmp; 1904f3f0e1d2SKirill A. Shutemov 1905f3f0e1d2SKirill A. Shutemov /* 190677da9389SMatthew Wilcox * Replacing old pages with new one has succeeded, now we 190777da9389SMatthew Wilcox * need to copy the content and free the old pages. 1908f3f0e1d2SKirill A. Shutemov */ 19092af8ff29SHugh Dickins index = start; 1910f3f0e1d2SKirill A. Shutemov list_for_each_entry_safe(page, tmp, &pagelist, lru) { 19112af8ff29SHugh Dickins while (index < page->index) { 19122af8ff29SHugh Dickins clear_highpage(new_page + (index % HPAGE_PMD_NR)); 19132af8ff29SHugh Dickins index++; 19142af8ff29SHugh Dickins } 1915f3f0e1d2SKirill A. Shutemov copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1916f3f0e1d2SKirill A. Shutemov page); 1917f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 1918f3f0e1d2SKirill A. Shutemov page->mapping = NULL; 1919042a3082SHugh Dickins page_ref_unfreeze(page, 1); 1920f3f0e1d2SKirill A. Shutemov ClearPageActive(page); 1921f3f0e1d2SKirill A. Shutemov ClearPageUnevictable(page); 1922042a3082SHugh Dickins unlock_page(page); 1923f3f0e1d2SKirill A. Shutemov put_page(page); 19242af8ff29SHugh Dickins index++; 19252af8ff29SHugh Dickins } 19262af8ff29SHugh Dickins while (index < end) { 19272af8ff29SHugh Dickins clear_highpage(new_page + (index % HPAGE_PMD_NR)); 19282af8ff29SHugh Dickins index++; 1929f3f0e1d2SKirill A. Shutemov } 1930f3f0e1d2SKirill A. Shutemov 1931f3f0e1d2SKirill A. Shutemov SetPageUptodate(new_page); 193287c460a0SHugh Dickins page_ref_add(new_page, HPAGE_PMD_NR - 1); 19336058eaecSJohannes Weiner if (is_shmem) 193499cb0dbdSSong Liu set_page_dirty(new_page); 19356058eaecSJohannes Weiner lru_cache_add(new_page); 1936f3f0e1d2SKirill A. Shutemov 1937042a3082SHugh Dickins /* 1938042a3082SHugh Dickins * Remove pte page tables, so we can re-fault the page as huge. 1939042a3082SHugh Dickins */ 1940042a3082SHugh Dickins retract_page_tables(mapping, start); 1941f3f0e1d2SKirill A. Shutemov *hpage = NULL; 194287aa7529SYang Shi 194387aa7529SYang Shi khugepaged_pages_collapsed++; 1944f3f0e1d2SKirill A. Shutemov } else { 194577da9389SMatthew Wilcox struct page *page; 1946aaa52e34SHugh Dickins 194777da9389SMatthew Wilcox /* Something went wrong: roll back page cache changes */ 194877da9389SMatthew Wilcox xas_lock_irq(&xas); 19492f55f070SMiaohe Lin if (nr_none) { 1950aaa52e34SHugh Dickins mapping->nrpages -= nr_none; 1951aaa52e34SHugh Dickins shmem_uncharge(mapping->host, nr_none); 19522f55f070SMiaohe Lin } 1953aaa52e34SHugh Dickins 195477da9389SMatthew Wilcox xas_set(&xas, start); 195577da9389SMatthew Wilcox xas_for_each(&xas, page, end - 1) { 1956f3f0e1d2SKirill A. Shutemov page = list_first_entry_or_null(&pagelist, 1957f3f0e1d2SKirill A. Shutemov struct page, lru); 195877da9389SMatthew Wilcox if (!page || xas.xa_index < page->index) { 1959f3f0e1d2SKirill A. Shutemov if (!nr_none) 1960f3f0e1d2SKirill A. Shutemov break; 1961f3f0e1d2SKirill A. Shutemov nr_none--; 196259749e6cSJohannes Weiner /* Put holes back where they were */ 196377da9389SMatthew Wilcox xas_store(&xas, NULL); 1964f3f0e1d2SKirill A. Shutemov continue; 1965f3f0e1d2SKirill A. Shutemov } 1966f3f0e1d2SKirill A. Shutemov 196777da9389SMatthew Wilcox VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 1968f3f0e1d2SKirill A. Shutemov 1969f3f0e1d2SKirill A. Shutemov /* Unfreeze the page. */ 1970f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 1971f3f0e1d2SKirill A. Shutemov page_ref_unfreeze(page, 2); 197277da9389SMatthew Wilcox xas_store(&xas, page); 197377da9389SMatthew Wilcox xas_pause(&xas); 197477da9389SMatthew Wilcox xas_unlock_irq(&xas); 1975f3f0e1d2SKirill A. Shutemov unlock_page(page); 1976042a3082SHugh Dickins putback_lru_page(page); 197777da9389SMatthew Wilcox xas_lock_irq(&xas); 1978f3f0e1d2SKirill A. Shutemov } 1979f3f0e1d2SKirill A. Shutemov VM_BUG_ON(nr_none); 198077da9389SMatthew Wilcox xas_unlock_irq(&xas); 1981f3f0e1d2SKirill A. Shutemov 1982f3f0e1d2SKirill A. Shutemov new_page->mapping = NULL; 1983f3f0e1d2SKirill A. Shutemov } 1984042a3082SHugh Dickins 1985042a3082SHugh Dickins unlock_page(new_page); 1986f3f0e1d2SKirill A. Shutemov out: 1987f3f0e1d2SKirill A. Shutemov VM_BUG_ON(!list_empty(&pagelist)); 19889d82c694SJohannes Weiner if (!IS_ERR_OR_NULL(*hpage)) 1989bbc6b703SMatthew Wilcox (Oracle) mem_cgroup_uncharge(page_folio(*hpage)); 1990f3f0e1d2SKirill A. Shutemov /* TODO: tracepoints */ 1991f3f0e1d2SKirill A. Shutemov } 1992f3f0e1d2SKirill A. Shutemov 1993579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm, 1994579c571eSSong Liu struct file *file, pgoff_t start, struct page **hpage) 1995f3f0e1d2SKirill A. Shutemov { 1996f3f0e1d2SKirill A. Shutemov struct page *page = NULL; 1997579c571eSSong Liu struct address_space *mapping = file->f_mapping; 199885b392dbSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 1999f3f0e1d2SKirill A. Shutemov int present, swap; 2000f3f0e1d2SKirill A. Shutemov int node = NUMA_NO_NODE; 2001f3f0e1d2SKirill A. Shutemov int result = SCAN_SUCCEED; 2002f3f0e1d2SKirill A. Shutemov 2003f3f0e1d2SKirill A. Shutemov present = 0; 2004f3f0e1d2SKirill A. Shutemov swap = 0; 2005f3f0e1d2SKirill A. Shutemov memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2006f3f0e1d2SKirill A. Shutemov rcu_read_lock(); 200785b392dbSMatthew Wilcox xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 200885b392dbSMatthew Wilcox if (xas_retry(&xas, page)) 2009f3f0e1d2SKirill A. Shutemov continue; 2010f3f0e1d2SKirill A. Shutemov 201185b392dbSMatthew Wilcox if (xa_is_value(page)) { 2012f3f0e1d2SKirill A. Shutemov if (++swap > khugepaged_max_ptes_swap) { 2013f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 2014e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 2015f3f0e1d2SKirill A. Shutemov break; 2016f3f0e1d2SKirill A. Shutemov } 2017f3f0e1d2SKirill A. Shutemov continue; 2018f3f0e1d2SKirill A. Shutemov } 2019f3f0e1d2SKirill A. Shutemov 20206b24ca4aSMatthew Wilcox (Oracle) /* 20216b24ca4aSMatthew Wilcox (Oracle) * XXX: khugepaged should compact smaller compound pages 20226b24ca4aSMatthew Wilcox (Oracle) * into a PMD sized page 20236b24ca4aSMatthew Wilcox (Oracle) */ 2024f3f0e1d2SKirill A. Shutemov if (PageTransCompound(page)) { 2025f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COMPOUND; 2026f3f0e1d2SKirill A. Shutemov break; 2027f3f0e1d2SKirill A. Shutemov } 2028f3f0e1d2SKirill A. Shutemov 2029f3f0e1d2SKirill A. Shutemov node = page_to_nid(page); 2030f3f0e1d2SKirill A. Shutemov if (khugepaged_scan_abort(node)) { 2031f3f0e1d2SKirill A. Shutemov result = SCAN_SCAN_ABORT; 2032f3f0e1d2SKirill A. Shutemov break; 2033f3f0e1d2SKirill A. Shutemov } 2034f3f0e1d2SKirill A. Shutemov khugepaged_node_load[node]++; 2035f3f0e1d2SKirill A. Shutemov 2036f3f0e1d2SKirill A. Shutemov if (!PageLRU(page)) { 2037f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LRU; 2038f3f0e1d2SKirill A. Shutemov break; 2039f3f0e1d2SKirill A. Shutemov } 2040f3f0e1d2SKirill A. Shutemov 204199cb0dbdSSong Liu if (page_count(page) != 204299cb0dbdSSong Liu 1 + page_mapcount(page) + page_has_private(page)) { 2043f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 2044f3f0e1d2SKirill A. Shutemov break; 2045f3f0e1d2SKirill A. Shutemov } 2046f3f0e1d2SKirill A. Shutemov 2047f3f0e1d2SKirill A. Shutemov /* 2048f3f0e1d2SKirill A. Shutemov * We probably should check if the page is referenced here, but 2049f3f0e1d2SKirill A. Shutemov * nobody would transfer pte_young() to PageReferenced() for us. 2050f3f0e1d2SKirill A. Shutemov * And rmap walk here is just too costly... 2051f3f0e1d2SKirill A. Shutemov */ 2052f3f0e1d2SKirill A. Shutemov 2053f3f0e1d2SKirill A. Shutemov present++; 2054f3f0e1d2SKirill A. Shutemov 2055f3f0e1d2SKirill A. Shutemov if (need_resched()) { 205685b392dbSMatthew Wilcox xas_pause(&xas); 2057f3f0e1d2SKirill A. Shutemov cond_resched_rcu(); 2058f3f0e1d2SKirill A. Shutemov } 2059f3f0e1d2SKirill A. Shutemov } 2060f3f0e1d2SKirill A. Shutemov rcu_read_unlock(); 2061f3f0e1d2SKirill A. Shutemov 2062f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 2063f3f0e1d2SKirill A. Shutemov if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2064f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 2065e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2066f3f0e1d2SKirill A. Shutemov } else { 2067f3f0e1d2SKirill A. Shutemov node = khugepaged_find_target_node(); 2068579c571eSSong Liu collapse_file(mm, file, start, hpage, node); 2069f3f0e1d2SKirill A. Shutemov } 2070f3f0e1d2SKirill A. Shutemov } 2071f3f0e1d2SKirill A. Shutemov 2072f3f0e1d2SKirill A. Shutemov /* TODO: tracepoints */ 2073f3f0e1d2SKirill A. Shutemov } 2074f3f0e1d2SKirill A. Shutemov #else 2075579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm, 2076579c571eSSong Liu struct file *file, pgoff_t start, struct page **hpage) 2077f3f0e1d2SKirill A. Shutemov { 2078f3f0e1d2SKirill A. Shutemov BUILD_BUG(); 2079f3f0e1d2SKirill A. Shutemov } 208027e1f827SSong Liu 20810edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 208227e1f827SSong Liu { 208327e1f827SSong Liu } 2084f3f0e1d2SKirill A. Shutemov #endif 2085f3f0e1d2SKirill A. Shutemov 2086b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2087b46e756fSKirill A. Shutemov struct page **hpage) 2088b46e756fSKirill A. Shutemov __releases(&khugepaged_mm_lock) 2089b46e756fSKirill A. Shutemov __acquires(&khugepaged_mm_lock) 2090b46e756fSKirill A. Shutemov { 2091b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 2092b46e756fSKirill A. Shutemov struct mm_struct *mm; 2093b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 2094b46e756fSKirill A. Shutemov int progress = 0; 2095b46e756fSKirill A. Shutemov 2096b46e756fSKirill A. Shutemov VM_BUG_ON(!pages); 209735f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 2098b46e756fSKirill A. Shutemov 2099b46e756fSKirill A. Shutemov if (khugepaged_scan.mm_slot) 2100b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2101b46e756fSKirill A. Shutemov else { 2102b46e756fSKirill A. Shutemov mm_slot = list_entry(khugepaged_scan.mm_head.next, 2103b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2104b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2105b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = mm_slot; 2106b46e756fSKirill A. Shutemov } 2107b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 210827e1f827SSong Liu khugepaged_collapse_pte_mapped_thps(mm_slot); 2109b46e756fSKirill A. Shutemov 2110b46e756fSKirill A. Shutemov mm = mm_slot->mm; 21113b454ad3SYang Shi /* 21123b454ad3SYang Shi * Don't wait for semaphore (to avoid long wait times). Just move to 21133b454ad3SYang Shi * the next mm on the list. 21143b454ad3SYang Shi */ 2115b46e756fSKirill A. Shutemov vma = NULL; 2116d8ed45c5SMichel Lespinasse if (unlikely(!mmap_read_trylock(mm))) 2117c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 21183b454ad3SYang Shi if (likely(!khugepaged_test_exit(mm))) 2119b46e756fSKirill A. Shutemov vma = find_vma(mm, khugepaged_scan.address); 2120b46e756fSKirill A. Shutemov 2121b46e756fSKirill A. Shutemov progress++; 2122b46e756fSKirill A. Shutemov for (; vma; vma = vma->vm_next) { 2123b46e756fSKirill A. Shutemov unsigned long hstart, hend; 2124b46e756fSKirill A. Shutemov 2125b46e756fSKirill A. Shutemov cond_resched(); 2126b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) { 2127b46e756fSKirill A. Shutemov progress++; 2128b46e756fSKirill A. Shutemov break; 2129b46e756fSKirill A. Shutemov } 213050f8b92fSSong Liu if (!hugepage_vma_check(vma, vma->vm_flags)) { 2131b46e756fSKirill A. Shutemov skip: 2132b46e756fSKirill A. Shutemov progress++; 2133b46e756fSKirill A. Shutemov continue; 2134b46e756fSKirill A. Shutemov } 2135b46e756fSKirill A. Shutemov hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2136b46e756fSKirill A. Shutemov hend = vma->vm_end & HPAGE_PMD_MASK; 2137b46e756fSKirill A. Shutemov if (hstart >= hend) 2138b46e756fSKirill A. Shutemov goto skip; 2139b46e756fSKirill A. Shutemov if (khugepaged_scan.address > hend) 2140b46e756fSKirill A. Shutemov goto skip; 2141b46e756fSKirill A. Shutemov if (khugepaged_scan.address < hstart) 2142b46e756fSKirill A. Shutemov khugepaged_scan.address = hstart; 2143b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2144b46e756fSKirill A. Shutemov 2145b46e756fSKirill A. Shutemov while (khugepaged_scan.address < hend) { 2146b46e756fSKirill A. Shutemov int ret; 2147b46e756fSKirill A. Shutemov cond_resched(); 2148b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) 2149b46e756fSKirill A. Shutemov goto breakouterloop; 2150b46e756fSKirill A. Shutemov 2151b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address < hstart || 2152b46e756fSKirill A. Shutemov khugepaged_scan.address + HPAGE_PMD_SIZE > 2153b46e756fSKirill A. Shutemov hend); 215499cb0dbdSSong Liu if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2155396bcc52SMatthew Wilcox (Oracle) struct file *file = get_file(vma->vm_file); 2156f3f0e1d2SKirill A. Shutemov pgoff_t pgoff = linear_page_index(vma, 2157f3f0e1d2SKirill A. Shutemov khugepaged_scan.address); 215899cb0dbdSSong Liu 2159d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2160f3f0e1d2SKirill A. Shutemov ret = 1; 2161579c571eSSong Liu khugepaged_scan_file(mm, file, pgoff, hpage); 2162f3f0e1d2SKirill A. Shutemov fput(file); 2163f3f0e1d2SKirill A. Shutemov } else { 2164b46e756fSKirill A. Shutemov ret = khugepaged_scan_pmd(mm, vma, 2165b46e756fSKirill A. Shutemov khugepaged_scan.address, 2166b46e756fSKirill A. Shutemov hpage); 2167f3f0e1d2SKirill A. Shutemov } 2168b46e756fSKirill A. Shutemov /* move to next address */ 2169b46e756fSKirill A. Shutemov khugepaged_scan.address += HPAGE_PMD_SIZE; 2170b46e756fSKirill A. Shutemov progress += HPAGE_PMD_NR; 2171b46e756fSKirill A. Shutemov if (ret) 2172c1e8d7c6SMichel Lespinasse /* we released mmap_lock so break loop */ 2173c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 2174b46e756fSKirill A. Shutemov if (progress >= pages) 2175b46e756fSKirill A. Shutemov goto breakouterloop; 2176b46e756fSKirill A. Shutemov } 2177b46e756fSKirill A. Shutemov } 2178b46e756fSKirill A. Shutemov breakouterloop: 2179d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2180c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock: 2181b46e756fSKirill A. Shutemov 2182b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2183b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2184b46e756fSKirill A. Shutemov /* 2185b46e756fSKirill A. Shutemov * Release the current mm_slot if this mm is about to die, or 2186b46e756fSKirill A. Shutemov * if we scanned all vmas of this mm. 2187b46e756fSKirill A. Shutemov */ 2188b46e756fSKirill A. Shutemov if (khugepaged_test_exit(mm) || !vma) { 2189b46e756fSKirill A. Shutemov /* 2190b46e756fSKirill A. Shutemov * Make sure that if mm_users is reaching zero while 2191b46e756fSKirill A. Shutemov * khugepaged runs here, khugepaged_exit will find 2192b46e756fSKirill A. Shutemov * mm_slot not pointing to the exiting mm. 2193b46e756fSKirill A. Shutemov */ 2194b46e756fSKirill A. Shutemov if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2195b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = list_entry( 2196b46e756fSKirill A. Shutemov mm_slot->mm_node.next, 2197b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2198b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2199b46e756fSKirill A. Shutemov } else { 2200b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2201b46e756fSKirill A. Shutemov khugepaged_full_scans++; 2202b46e756fSKirill A. Shutemov } 2203b46e756fSKirill A. Shutemov 2204b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2205b46e756fSKirill A. Shutemov } 2206b46e756fSKirill A. Shutemov 2207b46e756fSKirill A. Shutemov return progress; 2208b46e756fSKirill A. Shutemov } 2209b46e756fSKirill A. Shutemov 2210b46e756fSKirill A. Shutemov static int khugepaged_has_work(void) 2211b46e756fSKirill A. Shutemov { 2212b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) && 2213b46e756fSKirill A. Shutemov khugepaged_enabled(); 2214b46e756fSKirill A. Shutemov } 2215b46e756fSKirill A. Shutemov 2216b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void) 2217b46e756fSKirill A. Shutemov { 2218b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) || 2219b46e756fSKirill A. Shutemov kthread_should_stop(); 2220b46e756fSKirill A. Shutemov } 2221b46e756fSKirill A. Shutemov 2222b46e756fSKirill A. Shutemov static void khugepaged_do_scan(void) 2223b46e756fSKirill A. Shutemov { 2224b46e756fSKirill A. Shutemov struct page *hpage = NULL; 2225b46e756fSKirill A. Shutemov unsigned int progress = 0, pass_through_head = 0; 222689dc6a96SYanfei Xu unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2227b46e756fSKirill A. Shutemov bool wait = true; 2228b46e756fSKirill A. Shutemov 2229a980df33SKirill A. Shutemov lru_add_drain_all(); 2230a980df33SKirill A. Shutemov 2231b46e756fSKirill A. Shutemov while (progress < pages) { 2232b46e756fSKirill A. Shutemov if (!khugepaged_prealloc_page(&hpage, &wait)) 2233b46e756fSKirill A. Shutemov break; 2234b46e756fSKirill A. Shutemov 2235b46e756fSKirill A. Shutemov cond_resched(); 2236b46e756fSKirill A. Shutemov 2237b46e756fSKirill A. Shutemov if (unlikely(kthread_should_stop() || try_to_freeze())) 2238b46e756fSKirill A. Shutemov break; 2239b46e756fSKirill A. Shutemov 2240b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2241b46e756fSKirill A. Shutemov if (!khugepaged_scan.mm_slot) 2242b46e756fSKirill A. Shutemov pass_through_head++; 2243b46e756fSKirill A. Shutemov if (khugepaged_has_work() && 2244b46e756fSKirill A. Shutemov pass_through_head < 2) 2245b46e756fSKirill A. Shutemov progress += khugepaged_scan_mm_slot(pages - progress, 2246b46e756fSKirill A. Shutemov &hpage); 2247b46e756fSKirill A. Shutemov else 2248b46e756fSKirill A. Shutemov progress = pages; 2249b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2250b46e756fSKirill A. Shutemov } 2251b46e756fSKirill A. Shutemov 2252b46e756fSKirill A. Shutemov if (!IS_ERR_OR_NULL(hpage)) 2253b46e756fSKirill A. Shutemov put_page(hpage); 2254b46e756fSKirill A. Shutemov } 2255b46e756fSKirill A. Shutemov 2256b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void) 2257b46e756fSKirill A. Shutemov { 2258b46e756fSKirill A. Shutemov return kthread_should_stop() || 2259b46e756fSKirill A. Shutemov time_after_eq(jiffies, khugepaged_sleep_expire); 2260b46e756fSKirill A. Shutemov } 2261b46e756fSKirill A. Shutemov 2262b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void) 2263b46e756fSKirill A. Shutemov { 2264b46e756fSKirill A. Shutemov if (khugepaged_has_work()) { 2265b46e756fSKirill A. Shutemov const unsigned long scan_sleep_jiffies = 2266b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2267b46e756fSKirill A. Shutemov 2268b46e756fSKirill A. Shutemov if (!scan_sleep_jiffies) 2269b46e756fSKirill A. Shutemov return; 2270b46e756fSKirill A. Shutemov 2271b46e756fSKirill A. Shutemov khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2272b46e756fSKirill A. Shutemov wait_event_freezable_timeout(khugepaged_wait, 2273b46e756fSKirill A. Shutemov khugepaged_should_wakeup(), 2274b46e756fSKirill A. Shutemov scan_sleep_jiffies); 2275b46e756fSKirill A. Shutemov return; 2276b46e756fSKirill A. Shutemov } 2277b46e756fSKirill A. Shutemov 2278b46e756fSKirill A. Shutemov if (khugepaged_enabled()) 2279b46e756fSKirill A. Shutemov wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2280b46e756fSKirill A. Shutemov } 2281b46e756fSKirill A. Shutemov 2282b46e756fSKirill A. Shutemov static int khugepaged(void *none) 2283b46e756fSKirill A. Shutemov { 2284b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 2285b46e756fSKirill A. Shutemov 2286b46e756fSKirill A. Shutemov set_freezable(); 2287b46e756fSKirill A. Shutemov set_user_nice(current, MAX_NICE); 2288b46e756fSKirill A. Shutemov 2289b46e756fSKirill A. Shutemov while (!kthread_should_stop()) { 2290b46e756fSKirill A. Shutemov khugepaged_do_scan(); 2291b46e756fSKirill A. Shutemov khugepaged_wait_work(); 2292b46e756fSKirill A. Shutemov } 2293b46e756fSKirill A. Shutemov 2294b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2295b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2296b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2297b46e756fSKirill A. Shutemov if (mm_slot) 2298b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2299b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2300b46e756fSKirill A. Shutemov return 0; 2301b46e756fSKirill A. Shutemov } 2302b46e756fSKirill A. Shutemov 2303b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void) 2304b46e756fSKirill A. Shutemov { 2305b46e756fSKirill A. Shutemov struct zone *zone; 2306b46e756fSKirill A. Shutemov int nr_zones = 0; 2307b46e756fSKirill A. Shutemov unsigned long recommended_min; 2308b46e756fSKirill A. Shutemov 2309bd3400eaSLiangcai Fan if (!khugepaged_enabled()) { 2310bd3400eaSLiangcai Fan calculate_min_free_kbytes(); 2311bd3400eaSLiangcai Fan goto update_wmarks; 2312bd3400eaSLiangcai Fan } 2313bd3400eaSLiangcai Fan 2314b7d349c7SJoonsoo Kim for_each_populated_zone(zone) { 2315b7d349c7SJoonsoo Kim /* 2316b7d349c7SJoonsoo Kim * We don't need to worry about fragmentation of 2317b7d349c7SJoonsoo Kim * ZONE_MOVABLE since it only has movable pages. 2318b7d349c7SJoonsoo Kim */ 2319b7d349c7SJoonsoo Kim if (zone_idx(zone) > gfp_zone(GFP_USER)) 2320b7d349c7SJoonsoo Kim continue; 2321b7d349c7SJoonsoo Kim 2322b46e756fSKirill A. Shutemov nr_zones++; 2323b7d349c7SJoonsoo Kim } 2324b46e756fSKirill A. Shutemov 2325b46e756fSKirill A. Shutemov /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2326b46e756fSKirill A. Shutemov recommended_min = pageblock_nr_pages * nr_zones * 2; 2327b46e756fSKirill A. Shutemov 2328b46e756fSKirill A. Shutemov /* 2329b46e756fSKirill A. Shutemov * Make sure that on average at least two pageblocks are almost free 2330b46e756fSKirill A. Shutemov * of another type, one for a migratetype to fall back to and a 2331b46e756fSKirill A. Shutemov * second to avoid subsequent fallbacks of other types There are 3 2332b46e756fSKirill A. Shutemov * MIGRATE_TYPES we care about. 2333b46e756fSKirill A. Shutemov */ 2334b46e756fSKirill A. Shutemov recommended_min += pageblock_nr_pages * nr_zones * 2335b46e756fSKirill A. Shutemov MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2336b46e756fSKirill A. Shutemov 2337b46e756fSKirill A. Shutemov /* don't ever allow to reserve more than 5% of the lowmem */ 2338b46e756fSKirill A. Shutemov recommended_min = min(recommended_min, 2339b46e756fSKirill A. Shutemov (unsigned long) nr_free_buffer_pages() / 20); 2340b46e756fSKirill A. Shutemov recommended_min <<= (PAGE_SHIFT-10); 2341b46e756fSKirill A. Shutemov 2342b46e756fSKirill A. Shutemov if (recommended_min > min_free_kbytes) { 2343b46e756fSKirill A. Shutemov if (user_min_free_kbytes >= 0) 2344b46e756fSKirill A. Shutemov pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2345b46e756fSKirill A. Shutemov min_free_kbytes, recommended_min); 2346b46e756fSKirill A. Shutemov 2347b46e756fSKirill A. Shutemov min_free_kbytes = recommended_min; 2348b46e756fSKirill A. Shutemov } 2349bd3400eaSLiangcai Fan 2350bd3400eaSLiangcai Fan update_wmarks: 2351b46e756fSKirill A. Shutemov setup_per_zone_wmarks(); 2352b46e756fSKirill A. Shutemov } 2353b46e756fSKirill A. Shutemov 2354b46e756fSKirill A. Shutemov int start_stop_khugepaged(void) 2355b46e756fSKirill A. Shutemov { 2356b46e756fSKirill A. Shutemov int err = 0; 2357b46e756fSKirill A. Shutemov 2358b46e756fSKirill A. Shutemov mutex_lock(&khugepaged_mutex); 2359b46e756fSKirill A. Shutemov if (khugepaged_enabled()) { 2360b46e756fSKirill A. Shutemov if (!khugepaged_thread) 2361b46e756fSKirill A. Shutemov khugepaged_thread = kthread_run(khugepaged, NULL, 2362b46e756fSKirill A. Shutemov "khugepaged"); 2363b46e756fSKirill A. Shutemov if (IS_ERR(khugepaged_thread)) { 2364b46e756fSKirill A. Shutemov pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2365b46e756fSKirill A. Shutemov err = PTR_ERR(khugepaged_thread); 2366b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2367b46e756fSKirill A. Shutemov goto fail; 2368b46e756fSKirill A. Shutemov } 2369b46e756fSKirill A. Shutemov 2370b46e756fSKirill A. Shutemov if (!list_empty(&khugepaged_scan.mm_head)) 2371b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 2372b46e756fSKirill A. Shutemov } else if (khugepaged_thread) { 2373b46e756fSKirill A. Shutemov kthread_stop(khugepaged_thread); 2374b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2375b46e756fSKirill A. Shutemov } 2376bd3400eaSLiangcai Fan set_recommended_min_free_kbytes(); 2377b46e756fSKirill A. Shutemov fail: 2378b46e756fSKirill A. Shutemov mutex_unlock(&khugepaged_mutex); 2379b46e756fSKirill A. Shutemov return err; 2380b46e756fSKirill A. Shutemov } 23814aab2be0SVijay Balakrishna 23824aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void) 23834aab2be0SVijay Balakrishna { 23844aab2be0SVijay Balakrishna mutex_lock(&khugepaged_mutex); 23854aab2be0SVijay Balakrishna if (khugepaged_enabled() && khugepaged_thread) 23864aab2be0SVijay Balakrishna set_recommended_min_free_kbytes(); 23874aab2be0SVijay Balakrishna mutex_unlock(&khugepaged_mutex); 23884aab2be0SVijay Balakrishna } 2389