1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3b46e756fSKirill A. Shutemov 4b46e756fSKirill A. Shutemov #include <linux/mm.h> 5b46e756fSKirill A. Shutemov #include <linux/sched.h> 66e84f315SIngo Molnar #include <linux/sched/mm.h> 7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h> 9b46e756fSKirill A. Shutemov #include <linux/rmap.h> 10b46e756fSKirill A. Shutemov #include <linux/swap.h> 11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h> 12b46e756fSKirill A. Shutemov #include <linux/kthread.h> 13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h> 14b46e756fSKirill A. Shutemov #include <linux/freezer.h> 15b46e756fSKirill A. Shutemov #include <linux/mman.h> 16b46e756fSKirill A. Shutemov #include <linux/hashtable.h> 17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h> 18b46e756fSKirill A. Shutemov #include <linux/page_idle.h> 19b46e756fSKirill A. Shutemov #include <linux/swapops.h> 20f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h> 21b46e756fSKirill A. Shutemov 22b46e756fSKirill A. Shutemov #include <asm/tlb.h> 23b46e756fSKirill A. Shutemov #include <asm/pgalloc.h> 24b46e756fSKirill A. Shutemov #include "internal.h" 25b46e756fSKirill A. Shutemov 26b46e756fSKirill A. Shutemov enum scan_result { 27b46e756fSKirill A. Shutemov SCAN_FAIL, 28b46e756fSKirill A. Shutemov SCAN_SUCCEED, 29b46e756fSKirill A. Shutemov SCAN_PMD_NULL, 30b46e756fSKirill A. Shutemov SCAN_EXCEED_NONE_PTE, 3171a2c112SKirill A. Shutemov SCAN_EXCEED_SWAP_PTE, 3271a2c112SKirill A. Shutemov SCAN_EXCEED_SHARED_PTE, 33b46e756fSKirill A. Shutemov SCAN_PTE_NON_PRESENT, 34e1e267c7SPeter Xu SCAN_PTE_UFFD_WP, 35b46e756fSKirill A. Shutemov SCAN_PAGE_RO, 360db501f7SEbru Akagunduz SCAN_LACK_REFERENCED_PAGE, 37b46e756fSKirill A. Shutemov SCAN_PAGE_NULL, 38b46e756fSKirill A. Shutemov SCAN_SCAN_ABORT, 39b46e756fSKirill A. Shutemov SCAN_PAGE_COUNT, 40b46e756fSKirill A. Shutemov SCAN_PAGE_LRU, 41b46e756fSKirill A. Shutemov SCAN_PAGE_LOCK, 42b46e756fSKirill A. Shutemov SCAN_PAGE_ANON, 43b46e756fSKirill A. Shutemov SCAN_PAGE_COMPOUND, 44b46e756fSKirill A. Shutemov SCAN_ANY_PROCESS, 45b46e756fSKirill A. Shutemov SCAN_VMA_NULL, 46b46e756fSKirill A. Shutemov SCAN_VMA_CHECK, 47b46e756fSKirill A. Shutemov SCAN_ADDRESS_RANGE, 48b46e756fSKirill A. Shutemov SCAN_SWAP_CACHE_PAGE, 49b46e756fSKirill A. Shutemov SCAN_DEL_PAGE_LRU, 50b46e756fSKirill A. Shutemov SCAN_ALLOC_HUGE_PAGE_FAIL, 51b46e756fSKirill A. Shutemov SCAN_CGROUP_CHARGE_FAIL, 52f3f0e1d2SKirill A. Shutemov SCAN_TRUNCATED, 5399cb0dbdSSong Liu SCAN_PAGE_HAS_PRIVATE, 54b46e756fSKirill A. Shutemov }; 55b46e756fSKirill A. Shutemov 56b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS 57b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h> 58b46e756fSKirill A. Shutemov 594aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly; 604aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex); 614aab2be0SVijay Balakrishna 62b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */ 63b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly; 64b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed; 65b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans; 66b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 67b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */ 68b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 69b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire; 70b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock); 71b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 72b46e756fSKirill A. Shutemov /* 73b46e756fSKirill A. Shutemov * default collapse hugepages if there is at least one pte mapped like 74b46e756fSKirill A. Shutemov * it would have happened if the vma was large enough during page 75b46e756fSKirill A. Shutemov * fault. 76b46e756fSKirill A. Shutemov */ 77b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly; 78b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly; 7971a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly; 80b46e756fSKirill A. Shutemov 81b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10 82b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 83b46e756fSKirill A. Shutemov 84b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly; 85b46e756fSKirill A. Shutemov 8627e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8 8727e1f827SSong Liu 88b46e756fSKirill A. Shutemov /** 89b46e756fSKirill A. Shutemov * struct mm_slot - hash lookup from mm to mm_slot 90b46e756fSKirill A. Shutemov * @hash: hash collision list 91b46e756fSKirill A. Shutemov * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 92b46e756fSKirill A. Shutemov * @mm: the mm that this information is valid for 93336e6b53SAlex Shi * @nr_pte_mapped_thp: number of pte mapped THP 94336e6b53SAlex Shi * @pte_mapped_thp: address array corresponding pte mapped THP 95b46e756fSKirill A. Shutemov */ 96b46e756fSKirill A. Shutemov struct mm_slot { 97b46e756fSKirill A. Shutemov struct hlist_node hash; 98b46e756fSKirill A. Shutemov struct list_head mm_node; 99b46e756fSKirill A. Shutemov struct mm_struct *mm; 10027e1f827SSong Liu 10127e1f827SSong Liu /* pte-mapped THP in this mm */ 10227e1f827SSong Liu int nr_pte_mapped_thp; 10327e1f827SSong Liu unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 104b46e756fSKirill A. Shutemov }; 105b46e756fSKirill A. Shutemov 106b46e756fSKirill A. Shutemov /** 107b46e756fSKirill A. Shutemov * struct khugepaged_scan - cursor for scanning 108b46e756fSKirill A. Shutemov * @mm_head: the head of the mm list to scan 109b46e756fSKirill A. Shutemov * @mm_slot: the current mm_slot we are scanning 110b46e756fSKirill A. Shutemov * @address: the next address inside that to be scanned 111b46e756fSKirill A. Shutemov * 112b46e756fSKirill A. Shutemov * There is only the one khugepaged_scan instance of this cursor structure. 113b46e756fSKirill A. Shutemov */ 114b46e756fSKirill A. Shutemov struct khugepaged_scan { 115b46e756fSKirill A. Shutemov struct list_head mm_head; 116b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 117b46e756fSKirill A. Shutemov unsigned long address; 118b46e756fSKirill A. Shutemov }; 119b46e756fSKirill A. Shutemov 120b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = { 121b46e756fSKirill A. Shutemov .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 122b46e756fSKirill A. Shutemov }; 123b46e756fSKirill A. Shutemov 124e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS 125b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 126b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 127b46e756fSKirill A. Shutemov char *buf) 128b46e756fSKirill A. Shutemov { 129ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 130b46e756fSKirill A. Shutemov } 131b46e756fSKirill A. Shutemov 132b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 133b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 134b46e756fSKirill A. Shutemov const char *buf, size_t count) 135b46e756fSKirill A. Shutemov { 136dfefd226SAlexey Dobriyan unsigned int msecs; 137b46e756fSKirill A. Shutemov int err; 138b46e756fSKirill A. Shutemov 139dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 140dfefd226SAlexey Dobriyan if (err) 141b46e756fSKirill A. Shutemov return -EINVAL; 142b46e756fSKirill A. Shutemov 143b46e756fSKirill A. Shutemov khugepaged_scan_sleep_millisecs = msecs; 144b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 145b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 146b46e756fSKirill A. Shutemov 147b46e756fSKirill A. Shutemov return count; 148b46e756fSKirill A. Shutemov } 149b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr = 150b46e756fSKirill A. Shutemov __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 151b46e756fSKirill A. Shutemov scan_sleep_millisecs_store); 152b46e756fSKirill A. Shutemov 153b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 154b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 155b46e756fSKirill A. Shutemov char *buf) 156b46e756fSKirill A. Shutemov { 157ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 158b46e756fSKirill A. Shutemov } 159b46e756fSKirill A. Shutemov 160b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 161b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 162b46e756fSKirill A. Shutemov const char *buf, size_t count) 163b46e756fSKirill A. Shutemov { 164dfefd226SAlexey Dobriyan unsigned int msecs; 165b46e756fSKirill A. Shutemov int err; 166b46e756fSKirill A. Shutemov 167dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 168dfefd226SAlexey Dobriyan if (err) 169b46e756fSKirill A. Shutemov return -EINVAL; 170b46e756fSKirill A. Shutemov 171b46e756fSKirill A. Shutemov khugepaged_alloc_sleep_millisecs = msecs; 172b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 173b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 174b46e756fSKirill A. Shutemov 175b46e756fSKirill A. Shutemov return count; 176b46e756fSKirill A. Shutemov } 177b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr = 178b46e756fSKirill A. Shutemov __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 179b46e756fSKirill A. Shutemov alloc_sleep_millisecs_store); 180b46e756fSKirill A. Shutemov 181b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj, 182b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 183b46e756fSKirill A. Shutemov char *buf) 184b46e756fSKirill A. Shutemov { 185ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 186b46e756fSKirill A. Shutemov } 187b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj, 188b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 189b46e756fSKirill A. Shutemov const char *buf, size_t count) 190b46e756fSKirill A. Shutemov { 191dfefd226SAlexey Dobriyan unsigned int pages; 192b46e756fSKirill A. Shutemov int err; 193b46e756fSKirill A. Shutemov 194dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &pages); 195dfefd226SAlexey Dobriyan if (err || !pages) 196b46e756fSKirill A. Shutemov return -EINVAL; 197b46e756fSKirill A. Shutemov 198b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = pages; 199b46e756fSKirill A. Shutemov 200b46e756fSKirill A. Shutemov return count; 201b46e756fSKirill A. Shutemov } 202b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr = 203b46e756fSKirill A. Shutemov __ATTR(pages_to_scan, 0644, pages_to_scan_show, 204b46e756fSKirill A. Shutemov pages_to_scan_store); 205b46e756fSKirill A. Shutemov 206b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj, 207b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 208b46e756fSKirill A. Shutemov char *buf) 209b46e756fSKirill A. Shutemov { 210ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 211b46e756fSKirill A. Shutemov } 212b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr = 213b46e756fSKirill A. Shutemov __ATTR_RO(pages_collapsed); 214b46e756fSKirill A. Shutemov 215b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj, 216b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 217b46e756fSKirill A. Shutemov char *buf) 218b46e756fSKirill A. Shutemov { 219ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 220b46e756fSKirill A. Shutemov } 221b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr = 222b46e756fSKirill A. Shutemov __ATTR_RO(full_scans); 223b46e756fSKirill A. Shutemov 224b46e756fSKirill A. Shutemov static ssize_t khugepaged_defrag_show(struct kobject *kobj, 225b46e756fSKirill A. Shutemov struct kobj_attribute *attr, char *buf) 226b46e756fSKirill A. Shutemov { 227b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 228b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 229b46e756fSKirill A. Shutemov } 230b46e756fSKirill A. Shutemov static ssize_t khugepaged_defrag_store(struct kobject *kobj, 231b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 232b46e756fSKirill A. Shutemov const char *buf, size_t count) 233b46e756fSKirill A. Shutemov { 234b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 235b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 236b46e756fSKirill A. Shutemov } 237b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr = 238b46e756fSKirill A. Shutemov __ATTR(defrag, 0644, khugepaged_defrag_show, 239b46e756fSKirill A. Shutemov khugepaged_defrag_store); 240b46e756fSKirill A. Shutemov 241b46e756fSKirill A. Shutemov /* 242b46e756fSKirill A. Shutemov * max_ptes_none controls if khugepaged should collapse hugepages over 243b46e756fSKirill A. Shutemov * any unmapped ptes in turn potentially increasing the memory 244b46e756fSKirill A. Shutemov * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 245b46e756fSKirill A. Shutemov * reduce the available free memory in the system as it 246b46e756fSKirill A. Shutemov * runs. Increasing max_ptes_none will instead potentially reduce the 247b46e756fSKirill A. Shutemov * free memory in the system during the khugepaged scan. 248b46e756fSKirill A. Shutemov */ 249b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 250b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 251b46e756fSKirill A. Shutemov char *buf) 252b46e756fSKirill A. Shutemov { 253ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 254b46e756fSKirill A. Shutemov } 255b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 256b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 257b46e756fSKirill A. Shutemov const char *buf, size_t count) 258b46e756fSKirill A. Shutemov { 259b46e756fSKirill A. Shutemov int err; 260b46e756fSKirill A. Shutemov unsigned long max_ptes_none; 261b46e756fSKirill A. Shutemov 262b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_none); 263b46e756fSKirill A. Shutemov if (err || max_ptes_none > HPAGE_PMD_NR-1) 264b46e756fSKirill A. Shutemov return -EINVAL; 265b46e756fSKirill A. Shutemov 266b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = max_ptes_none; 267b46e756fSKirill A. Shutemov 268b46e756fSKirill A. Shutemov return count; 269b46e756fSKirill A. Shutemov } 270b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr = 271b46e756fSKirill A. Shutemov __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 272b46e756fSKirill A. Shutemov khugepaged_max_ptes_none_store); 273b46e756fSKirill A. Shutemov 274b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, 275b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 276b46e756fSKirill A. Shutemov char *buf) 277b46e756fSKirill A. Shutemov { 278ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 279b46e756fSKirill A. Shutemov } 280b46e756fSKirill A. Shutemov 281b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, 282b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 283b46e756fSKirill A. Shutemov const char *buf, size_t count) 284b46e756fSKirill A. Shutemov { 285b46e756fSKirill A. Shutemov int err; 286b46e756fSKirill A. Shutemov unsigned long max_ptes_swap; 287b46e756fSKirill A. Shutemov 288b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_swap); 289b46e756fSKirill A. Shutemov if (err || max_ptes_swap > HPAGE_PMD_NR-1) 290b46e756fSKirill A. Shutemov return -EINVAL; 291b46e756fSKirill A. Shutemov 292b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = max_ptes_swap; 293b46e756fSKirill A. Shutemov 294b46e756fSKirill A. Shutemov return count; 295b46e756fSKirill A. Shutemov } 296b46e756fSKirill A. Shutemov 297b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr = 298b46e756fSKirill A. Shutemov __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, 299b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap_store); 300b46e756fSKirill A. Shutemov 30171a2c112SKirill A. Shutemov static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj, 30271a2c112SKirill A. Shutemov struct kobj_attribute *attr, 30371a2c112SKirill A. Shutemov char *buf) 30471a2c112SKirill A. Shutemov { 305ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 30671a2c112SKirill A. Shutemov } 30771a2c112SKirill A. Shutemov 30871a2c112SKirill A. Shutemov static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, 30971a2c112SKirill A. Shutemov struct kobj_attribute *attr, 31071a2c112SKirill A. Shutemov const char *buf, size_t count) 31171a2c112SKirill A. Shutemov { 31271a2c112SKirill A. Shutemov int err; 31371a2c112SKirill A. Shutemov unsigned long max_ptes_shared; 31471a2c112SKirill A. Shutemov 31571a2c112SKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_shared); 31671a2c112SKirill A. Shutemov if (err || max_ptes_shared > HPAGE_PMD_NR-1) 31771a2c112SKirill A. Shutemov return -EINVAL; 31871a2c112SKirill A. Shutemov 31971a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = max_ptes_shared; 32071a2c112SKirill A. Shutemov 32171a2c112SKirill A. Shutemov return count; 32271a2c112SKirill A. Shutemov } 32371a2c112SKirill A. Shutemov 32471a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr = 32571a2c112SKirill A. Shutemov __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show, 32671a2c112SKirill A. Shutemov khugepaged_max_ptes_shared_store); 32771a2c112SKirill A. Shutemov 328b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = { 329b46e756fSKirill A. Shutemov &khugepaged_defrag_attr.attr, 330b46e756fSKirill A. Shutemov &khugepaged_max_ptes_none_attr.attr, 33171a2c112SKirill A. Shutemov &khugepaged_max_ptes_swap_attr.attr, 33271a2c112SKirill A. Shutemov &khugepaged_max_ptes_shared_attr.attr, 333b46e756fSKirill A. Shutemov &pages_to_scan_attr.attr, 334b46e756fSKirill A. Shutemov &pages_collapsed_attr.attr, 335b46e756fSKirill A. Shutemov &full_scans_attr.attr, 336b46e756fSKirill A. Shutemov &scan_sleep_millisecs_attr.attr, 337b46e756fSKirill A. Shutemov &alloc_sleep_millisecs_attr.attr, 338b46e756fSKirill A. Shutemov NULL, 339b46e756fSKirill A. Shutemov }; 340b46e756fSKirill A. Shutemov 341b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = { 342b46e756fSKirill A. Shutemov .attrs = khugepaged_attr, 343b46e756fSKirill A. Shutemov .name = "khugepaged", 344b46e756fSKirill A. Shutemov }; 345e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */ 346b46e756fSKirill A. Shutemov 347b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma, 348b46e756fSKirill A. Shutemov unsigned long *vm_flags, int advice) 349b46e756fSKirill A. Shutemov { 350b46e756fSKirill A. Shutemov switch (advice) { 351b46e756fSKirill A. Shutemov case MADV_HUGEPAGE: 352b46e756fSKirill A. Shutemov #ifdef CONFIG_S390 353b46e756fSKirill A. Shutemov /* 354b46e756fSKirill A. Shutemov * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 355b46e756fSKirill A. Shutemov * can't handle this properly after s390_enable_sie, so we simply 356b46e756fSKirill A. Shutemov * ignore the madvise to prevent qemu from causing a SIGSEGV. 357b46e756fSKirill A. Shutemov */ 358b46e756fSKirill A. Shutemov if (mm_has_pgste(vma->vm_mm)) 359b46e756fSKirill A. Shutemov return 0; 360b46e756fSKirill A. Shutemov #endif 361b46e756fSKirill A. Shutemov *vm_flags &= ~VM_NOHUGEPAGE; 362b46e756fSKirill A. Shutemov *vm_flags |= VM_HUGEPAGE; 363b46e756fSKirill A. Shutemov /* 364b46e756fSKirill A. Shutemov * If the vma become good for khugepaged to scan, 365b46e756fSKirill A. Shutemov * register it here without waiting a page fault that 366b46e756fSKirill A. Shutemov * may not happen any time soon. 367b46e756fSKirill A. Shutemov */ 368b46e756fSKirill A. Shutemov if (!(*vm_flags & VM_NO_KHUGEPAGED) && 369b46e756fSKirill A. Shutemov khugepaged_enter_vma_merge(vma, *vm_flags)) 370b46e756fSKirill A. Shutemov return -ENOMEM; 371b46e756fSKirill A. Shutemov break; 372b46e756fSKirill A. Shutemov case MADV_NOHUGEPAGE: 373b46e756fSKirill A. Shutemov *vm_flags &= ~VM_HUGEPAGE; 374b46e756fSKirill A. Shutemov *vm_flags |= VM_NOHUGEPAGE; 375b46e756fSKirill A. Shutemov /* 376b46e756fSKirill A. Shutemov * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 377b46e756fSKirill A. Shutemov * this vma even if we leave the mm registered in khugepaged if 378b46e756fSKirill A. Shutemov * it got registered before VM_NOHUGEPAGE was set. 379b46e756fSKirill A. Shutemov */ 380b46e756fSKirill A. Shutemov break; 381b46e756fSKirill A. Shutemov } 382b46e756fSKirill A. Shutemov 383b46e756fSKirill A. Shutemov return 0; 384b46e756fSKirill A. Shutemov } 385b46e756fSKirill A. Shutemov 386b46e756fSKirill A. Shutemov int __init khugepaged_init(void) 387b46e756fSKirill A. Shutemov { 388b46e756fSKirill A. Shutemov mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 389b46e756fSKirill A. Shutemov sizeof(struct mm_slot), 390b46e756fSKirill A. Shutemov __alignof__(struct mm_slot), 0, NULL); 391b46e756fSKirill A. Shutemov if (!mm_slot_cache) 392b46e756fSKirill A. Shutemov return -ENOMEM; 393b46e756fSKirill A. Shutemov 394b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 395b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 396b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 39771a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 398b46e756fSKirill A. Shutemov 399b46e756fSKirill A. Shutemov return 0; 400b46e756fSKirill A. Shutemov } 401b46e756fSKirill A. Shutemov 402b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void) 403b46e756fSKirill A. Shutemov { 404b46e756fSKirill A. Shutemov kmem_cache_destroy(mm_slot_cache); 405b46e756fSKirill A. Shutemov } 406b46e756fSKirill A. Shutemov 407b46e756fSKirill A. Shutemov static inline struct mm_slot *alloc_mm_slot(void) 408b46e756fSKirill A. Shutemov { 409b46e756fSKirill A. Shutemov if (!mm_slot_cache) /* initialization failed */ 410b46e756fSKirill A. Shutemov return NULL; 411b46e756fSKirill A. Shutemov return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 412b46e756fSKirill A. Shutemov } 413b46e756fSKirill A. Shutemov 414b46e756fSKirill A. Shutemov static inline void free_mm_slot(struct mm_slot *mm_slot) 415b46e756fSKirill A. Shutemov { 416b46e756fSKirill A. Shutemov kmem_cache_free(mm_slot_cache, mm_slot); 417b46e756fSKirill A. Shutemov } 418b46e756fSKirill A. Shutemov 419b46e756fSKirill A. Shutemov static struct mm_slot *get_mm_slot(struct mm_struct *mm) 420b46e756fSKirill A. Shutemov { 421b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 422b46e756fSKirill A. Shutemov 423b46e756fSKirill A. Shutemov hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 424b46e756fSKirill A. Shutemov if (mm == mm_slot->mm) 425b46e756fSKirill A. Shutemov return mm_slot; 426b46e756fSKirill A. Shutemov 427b46e756fSKirill A. Shutemov return NULL; 428b46e756fSKirill A. Shutemov } 429b46e756fSKirill A. Shutemov 430b46e756fSKirill A. Shutemov static void insert_to_mm_slots_hash(struct mm_struct *mm, 431b46e756fSKirill A. Shutemov struct mm_slot *mm_slot) 432b46e756fSKirill A. Shutemov { 433b46e756fSKirill A. Shutemov mm_slot->mm = mm; 434b46e756fSKirill A. Shutemov hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 435b46e756fSKirill A. Shutemov } 436b46e756fSKirill A. Shutemov 437b46e756fSKirill A. Shutemov static inline int khugepaged_test_exit(struct mm_struct *mm) 438b46e756fSKirill A. Shutemov { 4394d45e75aSJann Horn return atomic_read(&mm->mm_users) == 0; 440b46e756fSKirill A. Shutemov } 441b46e756fSKirill A. Shutemov 44250f8b92fSSong Liu static bool hugepage_vma_check(struct vm_area_struct *vma, 44350f8b92fSSong Liu unsigned long vm_flags) 444c2231020SYang Shi { 445e6be37b2SMiaohe Lin if (!transhuge_vma_enabled(vma, vm_flags)) 446c2231020SYang Shi return false; 44799cb0dbdSSong Liu 448a4aeaa06SYang Shi if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - 449a4aeaa06SYang Shi vma->vm_pgoff, HPAGE_PMD_NR)) 450a4aeaa06SYang Shi return false; 451a4aeaa06SYang Shi 452cd89fb06SRik van Riel /* Enabled via shmem mount options or sysfs settings. */ 453a4aeaa06SYang Shi if (shmem_file(vma->vm_file)) 454a4aeaa06SYang Shi return shmem_huge_enabled(vma); 455cd89fb06SRik van Riel 456cd89fb06SRik van Riel /* THP settings require madvise. */ 457cd89fb06SRik van Riel if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) 458cd89fb06SRik van Riel return false; 459cd89fb06SRik van Riel 460a4aeaa06SYang Shi /* Only regular file is valid */ 461cd89fb06SRik van Riel if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && 462eb6ecbedSCollin Fijalkovich (vm_flags & VM_EXEC)) { 463a4aeaa06SYang Shi struct inode *inode = vma->vm_file->f_inode; 464a4aeaa06SYang Shi 465a4aeaa06SYang Shi return !inode_is_open_for_write(inode) && 466a4aeaa06SYang Shi S_ISREG(inode->i_mode); 467cd89fb06SRik van Riel } 468cd89fb06SRik van Riel 469c2231020SYang Shi if (!vma->anon_vma || vma->vm_ops) 470c2231020SYang Shi return false; 471222100eeSAnshuman Khandual if (vma_is_temporary_stack(vma)) 472c2231020SYang Shi return false; 47350f8b92fSSong Liu return !(vm_flags & VM_NO_KHUGEPAGED); 474c2231020SYang Shi } 475c2231020SYang Shi 476b46e756fSKirill A. Shutemov int __khugepaged_enter(struct mm_struct *mm) 477b46e756fSKirill A. Shutemov { 478b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 479b46e756fSKirill A. Shutemov int wakeup; 480b46e756fSKirill A. Shutemov 481b46e756fSKirill A. Shutemov mm_slot = alloc_mm_slot(); 482b46e756fSKirill A. Shutemov if (!mm_slot) 483b46e756fSKirill A. Shutemov return -ENOMEM; 484b46e756fSKirill A. Shutemov 485b46e756fSKirill A. Shutemov /* __khugepaged_exit() must not run from under us */ 48628ff0a3cSMiaohe Lin VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 487b46e756fSKirill A. Shutemov if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 488b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 489b46e756fSKirill A. Shutemov return 0; 490b46e756fSKirill A. Shutemov } 491b46e756fSKirill A. Shutemov 492b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 493b46e756fSKirill A. Shutemov insert_to_mm_slots_hash(mm, mm_slot); 494b46e756fSKirill A. Shutemov /* 495b46e756fSKirill A. Shutemov * Insert just behind the scanning cursor, to let the area settle 496b46e756fSKirill A. Shutemov * down a little. 497b46e756fSKirill A. Shutemov */ 498b46e756fSKirill A. Shutemov wakeup = list_empty(&khugepaged_scan.mm_head); 499b46e756fSKirill A. Shutemov list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 500b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 501b46e756fSKirill A. Shutemov 502f1f10076SVegard Nossum mmgrab(mm); 503b46e756fSKirill A. Shutemov if (wakeup) 504b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 505b46e756fSKirill A. Shutemov 506b46e756fSKirill A. Shutemov return 0; 507b46e756fSKirill A. Shutemov } 508b46e756fSKirill A. Shutemov 509b46e756fSKirill A. Shutemov int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 510b46e756fSKirill A. Shutemov unsigned long vm_flags) 511b46e756fSKirill A. Shutemov { 512b46e756fSKirill A. Shutemov unsigned long hstart, hend; 513c2231020SYang Shi 514b46e756fSKirill A. Shutemov /* 51599cb0dbdSSong Liu * khugepaged only supports read-only files for non-shmem files. 51699cb0dbdSSong Liu * khugepaged does not yet work on special mappings. And 51799cb0dbdSSong Liu * file-private shmem THP is not supported. 518b46e756fSKirill A. Shutemov */ 51950f8b92fSSong Liu if (!hugepage_vma_check(vma, vm_flags)) 520b46e756fSKirill A. Shutemov return 0; 521c2231020SYang Shi 522b46e756fSKirill A. Shutemov hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 523b46e756fSKirill A. Shutemov hend = vma->vm_end & HPAGE_PMD_MASK; 524b46e756fSKirill A. Shutemov if (hstart < hend) 525b46e756fSKirill A. Shutemov return khugepaged_enter(vma, vm_flags); 526b46e756fSKirill A. Shutemov return 0; 527b46e756fSKirill A. Shutemov } 528b46e756fSKirill A. Shutemov 529b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm) 530b46e756fSKirill A. Shutemov { 531b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 532b46e756fSKirill A. Shutemov int free = 0; 533b46e756fSKirill A. Shutemov 534b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 535b46e756fSKirill A. Shutemov mm_slot = get_mm_slot(mm); 536b46e756fSKirill A. Shutemov if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 537b46e756fSKirill A. Shutemov hash_del(&mm_slot->hash); 538b46e756fSKirill A. Shutemov list_del(&mm_slot->mm_node); 539b46e756fSKirill A. Shutemov free = 1; 540b46e756fSKirill A. Shutemov } 541b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 542b46e756fSKirill A. Shutemov 543b46e756fSKirill A. Shutemov if (free) { 544b46e756fSKirill A. Shutemov clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 545b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 546b46e756fSKirill A. Shutemov mmdrop(mm); 547b46e756fSKirill A. Shutemov } else if (mm_slot) { 548b46e756fSKirill A. Shutemov /* 549b46e756fSKirill A. Shutemov * This is required to serialize against 550b46e756fSKirill A. Shutemov * khugepaged_test_exit() (which is guaranteed to run 551b46e756fSKirill A. Shutemov * under mmap sem read mode). Stop here (after we 552b46e756fSKirill A. Shutemov * return all pagetables will be destroyed) until 553b46e756fSKirill A. Shutemov * khugepaged has finished working on the pagetables 554c1e8d7c6SMichel Lespinasse * under the mmap_lock. 555b46e756fSKirill A. Shutemov */ 556d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 557d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 558b46e756fSKirill A. Shutemov } 559b46e756fSKirill A. Shutemov } 560b46e756fSKirill A. Shutemov 561b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page) 562b46e756fSKirill A. Shutemov { 5635503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page), 5645503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page), 5655503fbf2SKirill A. Shutemov -compound_nr(page)); 566b46e756fSKirill A. Shutemov unlock_page(page); 567b46e756fSKirill A. Shutemov putback_lru_page(page); 568b46e756fSKirill A. Shutemov } 569b46e756fSKirill A. Shutemov 5705503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte, 5715503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 572b46e756fSKirill A. Shutemov { 5735503fbf2SKirill A. Shutemov struct page *page, *tmp; 5745503fbf2SKirill A. Shutemov 575b46e756fSKirill A. Shutemov while (--_pte >= pte) { 576b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 5775503fbf2SKirill A. Shutemov 5785503fbf2SKirill A. Shutemov page = pte_page(pteval); 5795503fbf2SKirill A. Shutemov if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) && 5805503fbf2SKirill A. Shutemov !PageCompound(page)) 5815503fbf2SKirill A. Shutemov release_pte_page(page); 5825503fbf2SKirill A. Shutemov } 5835503fbf2SKirill A. Shutemov 5845503fbf2SKirill A. Shutemov list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { 5855503fbf2SKirill A. Shutemov list_del(&page->lru); 5865503fbf2SKirill A. Shutemov release_pte_page(page); 587b46e756fSKirill A. Shutemov } 588b46e756fSKirill A. Shutemov } 589b46e756fSKirill A. Shutemov 5909445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page) 5919445689fSKirill A. Shutemov { 5929445689fSKirill A. Shutemov int expected_refcount; 5939445689fSKirill A. Shutemov 5949445689fSKirill A. Shutemov expected_refcount = total_mapcount(page); 5959445689fSKirill A. Shutemov if (PageSwapCache(page)) 5969445689fSKirill A. Shutemov expected_refcount += compound_nr(page); 5979445689fSKirill A. Shutemov 5989445689fSKirill A. Shutemov return page_count(page) == expected_refcount; 5999445689fSKirill A. Shutemov } 6009445689fSKirill A. Shutemov 601b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 602b46e756fSKirill A. Shutemov unsigned long address, 6035503fbf2SKirill A. Shutemov pte_t *pte, 6045503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 605b46e756fSKirill A. Shutemov { 606b46e756fSKirill A. Shutemov struct page *page = NULL; 607b46e756fSKirill A. Shutemov pte_t *_pte; 60871a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0, result = 0, referenced = 0; 6090db501f7SEbru Akagunduz bool writable = false; 610b46e756fSKirill A. Shutemov 611b46e756fSKirill A. Shutemov for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 612b46e756fSKirill A. Shutemov _pte++, address += PAGE_SIZE) { 613b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 614b46e756fSKirill A. Shutemov if (pte_none(pteval) || (pte_present(pteval) && 615b46e756fSKirill A. Shutemov is_zero_pfn(pte_pfn(pteval)))) { 616b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 617b46e756fSKirill A. Shutemov ++none_or_zero <= khugepaged_max_ptes_none) { 618b46e756fSKirill A. Shutemov continue; 619b46e756fSKirill A. Shutemov } else { 620b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 621b46e756fSKirill A. Shutemov goto out; 622b46e756fSKirill A. Shutemov } 623b46e756fSKirill A. Shutemov } 624b46e756fSKirill A. Shutemov if (!pte_present(pteval)) { 625b46e756fSKirill A. Shutemov result = SCAN_PTE_NON_PRESENT; 626b46e756fSKirill A. Shutemov goto out; 627b46e756fSKirill A. Shutemov } 628b46e756fSKirill A. Shutemov page = vm_normal_page(vma, address, pteval); 629b46e756fSKirill A. Shutemov if (unlikely(!page)) { 630b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 631b46e756fSKirill A. Shutemov goto out; 632b46e756fSKirill A. Shutemov } 633b46e756fSKirill A. Shutemov 634b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageAnon(page), page); 635b46e756fSKirill A. Shutemov 63671a2c112SKirill A. Shutemov if (page_mapcount(page) > 1 && 63771a2c112SKirill A. Shutemov ++shared > khugepaged_max_ptes_shared) { 63871a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 63971a2c112SKirill A. Shutemov goto out; 64071a2c112SKirill A. Shutemov } 64171a2c112SKirill A. Shutemov 6425503fbf2SKirill A. Shutemov if (PageCompound(page)) { 6435503fbf2SKirill A. Shutemov struct page *p; 6445503fbf2SKirill A. Shutemov page = compound_head(page); 6455503fbf2SKirill A. Shutemov 6465503fbf2SKirill A. Shutemov /* 6475503fbf2SKirill A. Shutemov * Check if we have dealt with the compound page 6485503fbf2SKirill A. Shutemov * already 6495503fbf2SKirill A. Shutemov */ 6505503fbf2SKirill A. Shutemov list_for_each_entry(p, compound_pagelist, lru) { 6515503fbf2SKirill A. Shutemov if (page == p) 6525503fbf2SKirill A. Shutemov goto next; 6535503fbf2SKirill A. Shutemov } 6545503fbf2SKirill A. Shutemov } 6555503fbf2SKirill A. Shutemov 656b46e756fSKirill A. Shutemov /* 657b46e756fSKirill A. Shutemov * We can do it before isolate_lru_page because the 658b46e756fSKirill A. Shutemov * page can't be freed from under us. NOTE: PG_lock 659b46e756fSKirill A. Shutemov * is needed to serialize against split_huge_page 660b46e756fSKirill A. Shutemov * when invoked from the VM. 661b46e756fSKirill A. Shutemov */ 662b46e756fSKirill A. Shutemov if (!trylock_page(page)) { 663b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 664b46e756fSKirill A. Shutemov goto out; 665b46e756fSKirill A. Shutemov } 666b46e756fSKirill A. Shutemov 667b46e756fSKirill A. Shutemov /* 6689445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 6699445689fSKirill A. Shutemov * 6709445689fSKirill A. Shutemov * The page table that maps the page has been already unlinked 6719445689fSKirill A. Shutemov * from the page table tree and this process cannot get 672f0953a1bSIngo Molnar * an additional pin on the page. 6739445689fSKirill A. Shutemov * 6749445689fSKirill A. Shutemov * New pins can come later if the page is shared across fork, 6759445689fSKirill A. Shutemov * but not from this process. The other process cannot write to 6769445689fSKirill A. Shutemov * the page, only trigger CoW. 677b46e756fSKirill A. Shutemov */ 6789445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 679b46e756fSKirill A. Shutemov unlock_page(page); 680b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 681b46e756fSKirill A. Shutemov goto out; 682b46e756fSKirill A. Shutemov } 6835503fbf2SKirill A. Shutemov if (!pte_write(pteval) && PageSwapCache(page) && 684b46e756fSKirill A. Shutemov !reuse_swap_page(page, NULL)) { 6855503fbf2SKirill A. Shutemov /* 6865503fbf2SKirill A. Shutemov * Page is in the swap cache and cannot be re-used. 6875503fbf2SKirill A. Shutemov * It cannot be collapsed into a THP. 6885503fbf2SKirill A. Shutemov */ 689b46e756fSKirill A. Shutemov unlock_page(page); 690b46e756fSKirill A. Shutemov result = SCAN_SWAP_CACHE_PAGE; 691b46e756fSKirill A. Shutemov goto out; 692b46e756fSKirill A. Shutemov } 693b46e756fSKirill A. Shutemov 694b46e756fSKirill A. Shutemov /* 695b46e756fSKirill A. Shutemov * Isolate the page to avoid collapsing an hugepage 696b46e756fSKirill A. Shutemov * currently in use by the VM. 697b46e756fSKirill A. Shutemov */ 698b46e756fSKirill A. Shutemov if (isolate_lru_page(page)) { 699b46e756fSKirill A. Shutemov unlock_page(page); 700b46e756fSKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 701b46e756fSKirill A. Shutemov goto out; 702b46e756fSKirill A. Shutemov } 7035503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page), 7045503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page), 7055503fbf2SKirill A. Shutemov compound_nr(page)); 706b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 707b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(PageLRU(page), page); 708b46e756fSKirill A. Shutemov 7095503fbf2SKirill A. Shutemov if (PageCompound(page)) 7105503fbf2SKirill A. Shutemov list_add_tail(&page->lru, compound_pagelist); 7115503fbf2SKirill A. Shutemov next: 7120db501f7SEbru Akagunduz /* There should be enough young pte to collapse the page */ 713b46e756fSKirill A. Shutemov if (pte_young(pteval) || 714b46e756fSKirill A. Shutemov page_is_young(page) || PageReferenced(page) || 715b46e756fSKirill A. Shutemov mmu_notifier_test_young(vma->vm_mm, address)) 7160db501f7SEbru Akagunduz referenced++; 7175503fbf2SKirill A. Shutemov 7185503fbf2SKirill A. Shutemov if (pte_write(pteval)) 7195503fbf2SKirill A. Shutemov writable = true; 720b46e756fSKirill A. Shutemov } 72174e579bfSMiaohe Lin 72274e579bfSMiaohe Lin if (unlikely(!writable)) { 72374e579bfSMiaohe Lin result = SCAN_PAGE_RO; 72474e579bfSMiaohe Lin } else if (unlikely(!referenced)) { 72574e579bfSMiaohe Lin result = SCAN_LACK_REFERENCED_PAGE; 72674e579bfSMiaohe Lin } else { 727b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 728b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 729b46e756fSKirill A. Shutemov referenced, writable, result); 730b46e756fSKirill A. Shutemov return 1; 731b46e756fSKirill A. Shutemov } 732b46e756fSKirill A. Shutemov out: 7335503fbf2SKirill A. Shutemov release_pte_pages(pte, _pte, compound_pagelist); 734b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 735b46e756fSKirill A. Shutemov referenced, writable, result); 736b46e756fSKirill A. Shutemov return 0; 737b46e756fSKirill A. Shutemov } 738b46e756fSKirill A. Shutemov 739b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 740b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 741b46e756fSKirill A. Shutemov unsigned long address, 7425503fbf2SKirill A. Shutemov spinlock_t *ptl, 7435503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 744b46e756fSKirill A. Shutemov { 7455503fbf2SKirill A. Shutemov struct page *src_page, *tmp; 746b46e756fSKirill A. Shutemov pte_t *_pte; 747338a16baSDavid Rientjes for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 748338a16baSDavid Rientjes _pte++, page++, address += PAGE_SIZE) { 749b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 750b46e756fSKirill A. Shutemov 751b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 752b46e756fSKirill A. Shutemov clear_user_highpage(page, address); 753b46e756fSKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 754b46e756fSKirill A. Shutemov if (is_zero_pfn(pte_pfn(pteval))) { 755b46e756fSKirill A. Shutemov /* 756b46e756fSKirill A. Shutemov * ptl mostly unnecessary. 757b46e756fSKirill A. Shutemov */ 758b46e756fSKirill A. Shutemov spin_lock(ptl); 759b46e756fSKirill A. Shutemov /* 760b46e756fSKirill A. Shutemov * paravirt calls inside pte_clear here are 761b46e756fSKirill A. Shutemov * superfluous. 762b46e756fSKirill A. Shutemov */ 763b46e756fSKirill A. Shutemov pte_clear(vma->vm_mm, address, _pte); 764b46e756fSKirill A. Shutemov spin_unlock(ptl); 765b46e756fSKirill A. Shutemov } 766b46e756fSKirill A. Shutemov } else { 767b46e756fSKirill A. Shutemov src_page = pte_page(pteval); 768b46e756fSKirill A. Shutemov copy_user_highpage(page, src_page, address, vma); 7695503fbf2SKirill A. Shutemov if (!PageCompound(src_page)) 770b46e756fSKirill A. Shutemov release_pte_page(src_page); 771b46e756fSKirill A. Shutemov /* 772b46e756fSKirill A. Shutemov * ptl mostly unnecessary, but preempt has to 773b46e756fSKirill A. Shutemov * be disabled to update the per-cpu stats 774b46e756fSKirill A. Shutemov * inside page_remove_rmap(). 775b46e756fSKirill A. Shutemov */ 776b46e756fSKirill A. Shutemov spin_lock(ptl); 777b46e756fSKirill A. Shutemov /* 778b46e756fSKirill A. Shutemov * paravirt calls inside pte_clear here are 779b46e756fSKirill A. Shutemov * superfluous. 780b46e756fSKirill A. Shutemov */ 781b46e756fSKirill A. Shutemov pte_clear(vma->vm_mm, address, _pte); 782b46e756fSKirill A. Shutemov page_remove_rmap(src_page, false); 783b46e756fSKirill A. Shutemov spin_unlock(ptl); 784b46e756fSKirill A. Shutemov free_page_and_swap_cache(src_page); 785b46e756fSKirill A. Shutemov } 786b46e756fSKirill A. Shutemov } 7875503fbf2SKirill A. Shutemov 7885503fbf2SKirill A. Shutemov list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 7895503fbf2SKirill A. Shutemov list_del(&src_page->lru); 7905503fbf2SKirill A. Shutemov release_pte_page(src_page); 7915503fbf2SKirill A. Shutemov } 792b46e756fSKirill A. Shutemov } 793b46e756fSKirill A. Shutemov 794b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void) 795b46e756fSKirill A. Shutemov { 796b46e756fSKirill A. Shutemov DEFINE_WAIT(wait); 797b46e756fSKirill A. Shutemov 798b46e756fSKirill A. Shutemov add_wait_queue(&khugepaged_wait, &wait); 799b46e756fSKirill A. Shutemov freezable_schedule_timeout_interruptible( 800b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 801b46e756fSKirill A. Shutemov remove_wait_queue(&khugepaged_wait, &wait); 802b46e756fSKirill A. Shutemov } 803b46e756fSKirill A. Shutemov 804b46e756fSKirill A. Shutemov static int khugepaged_node_load[MAX_NUMNODES]; 805b46e756fSKirill A. Shutemov 806b46e756fSKirill A. Shutemov static bool khugepaged_scan_abort(int nid) 807b46e756fSKirill A. Shutemov { 808b46e756fSKirill A. Shutemov int i; 809b46e756fSKirill A. Shutemov 810b46e756fSKirill A. Shutemov /* 811a5f5f91dSMel Gorman * If node_reclaim_mode is disabled, then no extra effort is made to 812b46e756fSKirill A. Shutemov * allocate memory locally. 813b46e756fSKirill A. Shutemov */ 814202e35dbSDave Hansen if (!node_reclaim_enabled()) 815b46e756fSKirill A. Shutemov return false; 816b46e756fSKirill A. Shutemov 817b46e756fSKirill A. Shutemov /* If there is a count for this node already, it must be acceptable */ 818b46e756fSKirill A. Shutemov if (khugepaged_node_load[nid]) 819b46e756fSKirill A. Shutemov return false; 820b46e756fSKirill A. Shutemov 821b46e756fSKirill A. Shutemov for (i = 0; i < MAX_NUMNODES; i++) { 822b46e756fSKirill A. Shutemov if (!khugepaged_node_load[i]) 823b46e756fSKirill A. Shutemov continue; 824a55c7454SMatt Fleming if (node_distance(nid, i) > node_reclaim_distance) 825b46e756fSKirill A. Shutemov return true; 826b46e756fSKirill A. Shutemov } 827b46e756fSKirill A. Shutemov return false; 828b46e756fSKirill A. Shutemov } 829b46e756fSKirill A. Shutemov 830b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 831b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 832b46e756fSKirill A. Shutemov { 83325160354SVlastimil Babka return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 834b46e756fSKirill A. Shutemov } 835b46e756fSKirill A. Shutemov 836b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA 837b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void) 838b46e756fSKirill A. Shutemov { 839b46e756fSKirill A. Shutemov static int last_khugepaged_target_node = NUMA_NO_NODE; 840b46e756fSKirill A. Shutemov int nid, target_node = 0, max_value = 0; 841b46e756fSKirill A. Shutemov 842b46e756fSKirill A. Shutemov /* find first node with max normal pages hit */ 843b46e756fSKirill A. Shutemov for (nid = 0; nid < MAX_NUMNODES; nid++) 844b46e756fSKirill A. Shutemov if (khugepaged_node_load[nid] > max_value) { 845b46e756fSKirill A. Shutemov max_value = khugepaged_node_load[nid]; 846b46e756fSKirill A. Shutemov target_node = nid; 847b46e756fSKirill A. Shutemov } 848b46e756fSKirill A. Shutemov 849b46e756fSKirill A. Shutemov /* do some balance if several nodes have the same hit record */ 850b46e756fSKirill A. Shutemov if (target_node <= last_khugepaged_target_node) 851b46e756fSKirill A. Shutemov for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 852b46e756fSKirill A. Shutemov nid++) 853b46e756fSKirill A. Shutemov if (max_value == khugepaged_node_load[nid]) { 854b46e756fSKirill A. Shutemov target_node = nid; 855b46e756fSKirill A. Shutemov break; 856b46e756fSKirill A. Shutemov } 857b46e756fSKirill A. Shutemov 858b46e756fSKirill A. Shutemov last_khugepaged_target_node = target_node; 859b46e756fSKirill A. Shutemov return target_node; 860b46e756fSKirill A. Shutemov } 861b46e756fSKirill A. Shutemov 862b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 863b46e756fSKirill A. Shutemov { 864b46e756fSKirill A. Shutemov if (IS_ERR(*hpage)) { 865b46e756fSKirill A. Shutemov if (!*wait) 866b46e756fSKirill A. Shutemov return false; 867b46e756fSKirill A. Shutemov 868b46e756fSKirill A. Shutemov *wait = false; 869b46e756fSKirill A. Shutemov *hpage = NULL; 870b46e756fSKirill A. Shutemov khugepaged_alloc_sleep(); 871b46e756fSKirill A. Shutemov } else if (*hpage) { 872b46e756fSKirill A. Shutemov put_page(*hpage); 873b46e756fSKirill A. Shutemov *hpage = NULL; 874b46e756fSKirill A. Shutemov } 875b46e756fSKirill A. Shutemov 876b46e756fSKirill A. Shutemov return true; 877b46e756fSKirill A. Shutemov } 878b46e756fSKirill A. Shutemov 879b46e756fSKirill A. Shutemov static struct page * 880988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 881b46e756fSKirill A. Shutemov { 882b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(*hpage, *hpage); 883b46e756fSKirill A. Shutemov 884b46e756fSKirill A. Shutemov *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 885b46e756fSKirill A. Shutemov if (unlikely(!*hpage)) { 886b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 887b46e756fSKirill A. Shutemov *hpage = ERR_PTR(-ENOMEM); 888b46e756fSKirill A. Shutemov return NULL; 889b46e756fSKirill A. Shutemov } 890b46e756fSKirill A. Shutemov 891b46e756fSKirill A. Shutemov prep_transhuge_page(*hpage); 892b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC); 893b46e756fSKirill A. Shutemov return *hpage; 894b46e756fSKirill A. Shutemov } 895b46e756fSKirill A. Shutemov #else 896b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void) 897b46e756fSKirill A. Shutemov { 898b46e756fSKirill A. Shutemov return 0; 899b46e756fSKirill A. Shutemov } 900b46e756fSKirill A. Shutemov 901b46e756fSKirill A. Shutemov static inline struct page *alloc_khugepaged_hugepage(void) 902b46e756fSKirill A. Shutemov { 903b46e756fSKirill A. Shutemov struct page *page; 904b46e756fSKirill A. Shutemov 905b46e756fSKirill A. Shutemov page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), 906b46e756fSKirill A. Shutemov HPAGE_PMD_ORDER); 907b46e756fSKirill A. Shutemov if (page) 908b46e756fSKirill A. Shutemov prep_transhuge_page(page); 909b46e756fSKirill A. Shutemov return page; 910b46e756fSKirill A. Shutemov } 911b46e756fSKirill A. Shutemov 912b46e756fSKirill A. Shutemov static struct page *khugepaged_alloc_hugepage(bool *wait) 913b46e756fSKirill A. Shutemov { 914b46e756fSKirill A. Shutemov struct page *hpage; 915b46e756fSKirill A. Shutemov 916b46e756fSKirill A. Shutemov do { 917b46e756fSKirill A. Shutemov hpage = alloc_khugepaged_hugepage(); 918b46e756fSKirill A. Shutemov if (!hpage) { 919b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 920b46e756fSKirill A. Shutemov if (!*wait) 921b46e756fSKirill A. Shutemov return NULL; 922b46e756fSKirill A. Shutemov 923b46e756fSKirill A. Shutemov *wait = false; 924b46e756fSKirill A. Shutemov khugepaged_alloc_sleep(); 925b46e756fSKirill A. Shutemov } else 926b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC); 927b46e756fSKirill A. Shutemov } while (unlikely(!hpage) && likely(khugepaged_enabled())); 928b46e756fSKirill A. Shutemov 929b46e756fSKirill A. Shutemov return hpage; 930b46e756fSKirill A. Shutemov } 931b46e756fSKirill A. Shutemov 932b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 933b46e756fSKirill A. Shutemov { 934033b5d77SHugh Dickins /* 935033b5d77SHugh Dickins * If the hpage allocated earlier was briefly exposed in page cache 936033b5d77SHugh Dickins * before collapse_file() failed, it is possible that racing lookups 937033b5d77SHugh Dickins * have not yet completed, and would then be unpleasantly surprised by 938033b5d77SHugh Dickins * finding the hpage reused for the same mapping at a different offset. 939033b5d77SHugh Dickins * Just release the previous allocation if there is any danger of that. 940033b5d77SHugh Dickins */ 941033b5d77SHugh Dickins if (*hpage && page_count(*hpage) > 1) { 942033b5d77SHugh Dickins put_page(*hpage); 943033b5d77SHugh Dickins *hpage = NULL; 944033b5d77SHugh Dickins } 945033b5d77SHugh Dickins 946b46e756fSKirill A. Shutemov if (!*hpage) 947b46e756fSKirill A. Shutemov *hpage = khugepaged_alloc_hugepage(wait); 948b46e756fSKirill A. Shutemov 949b46e756fSKirill A. Shutemov if (unlikely(!*hpage)) 950b46e756fSKirill A. Shutemov return false; 951b46e756fSKirill A. Shutemov 952b46e756fSKirill A. Shutemov return true; 953b46e756fSKirill A. Shutemov } 954b46e756fSKirill A. Shutemov 955b46e756fSKirill A. Shutemov static struct page * 956988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 957b46e756fSKirill A. Shutemov { 958b46e756fSKirill A. Shutemov VM_BUG_ON(!*hpage); 959b46e756fSKirill A. Shutemov 960b46e756fSKirill A. Shutemov return *hpage; 961b46e756fSKirill A. Shutemov } 962b46e756fSKirill A. Shutemov #endif 963b46e756fSKirill A. Shutemov 964b46e756fSKirill A. Shutemov /* 965c1e8d7c6SMichel Lespinasse * If mmap_lock temporarily dropped, revalidate vma 966c1e8d7c6SMichel Lespinasse * before taking mmap_lock. 967b46e756fSKirill A. Shutemov * Return 0 if succeeds, otherwise return none-zero 968b46e756fSKirill A. Shutemov * value (scan code). 969b46e756fSKirill A. Shutemov */ 970b46e756fSKirill A. Shutemov 971c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 972c131f751SKirill A. Shutemov struct vm_area_struct **vmap) 973b46e756fSKirill A. Shutemov { 974b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 975b46e756fSKirill A. Shutemov unsigned long hstart, hend; 976b46e756fSKirill A. Shutemov 977b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) 978b46e756fSKirill A. Shutemov return SCAN_ANY_PROCESS; 979b46e756fSKirill A. Shutemov 980c131f751SKirill A. Shutemov *vmap = vma = find_vma(mm, address); 981b46e756fSKirill A. Shutemov if (!vma) 982b46e756fSKirill A. Shutemov return SCAN_VMA_NULL; 983b46e756fSKirill A. Shutemov 984b46e756fSKirill A. Shutemov hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 985b46e756fSKirill A. Shutemov hend = vma->vm_end & HPAGE_PMD_MASK; 986b46e756fSKirill A. Shutemov if (address < hstart || address + HPAGE_PMD_SIZE > hend) 987b46e756fSKirill A. Shutemov return SCAN_ADDRESS_RANGE; 98850f8b92fSSong Liu if (!hugepage_vma_check(vma, vma->vm_flags)) 989b46e756fSKirill A. Shutemov return SCAN_VMA_CHECK; 990594cced1SKirill A. Shutemov /* Anon VMA expected */ 991594cced1SKirill A. Shutemov if (!vma->anon_vma || vma->vm_ops) 992594cced1SKirill A. Shutemov return SCAN_VMA_CHECK; 993b46e756fSKirill A. Shutemov return 0; 994b46e756fSKirill A. Shutemov } 995b46e756fSKirill A. Shutemov 996b46e756fSKirill A. Shutemov /* 997b46e756fSKirill A. Shutemov * Bring missing pages in from swap, to complete THP collapse. 998b46e756fSKirill A. Shutemov * Only done if khugepaged_scan_pmd believes it is worthwhile. 999b46e756fSKirill A. Shutemov * 1000b46e756fSKirill A. Shutemov * Called and returns without pte mapped or spinlocks held, 1001c1e8d7c6SMichel Lespinasse * but with mmap_lock held to protect against vma changes. 1002b46e756fSKirill A. Shutemov */ 1003b46e756fSKirill A. Shutemov 1004b46e756fSKirill A. Shutemov static bool __collapse_huge_page_swapin(struct mm_struct *mm, 1005b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 10062b635dd3SWill Deacon unsigned long haddr, pmd_t *pmd, 10070db501f7SEbru Akagunduz int referenced) 1008b46e756fSKirill A. Shutemov { 10092b740303SSouptick Joarder int swapped_in = 0; 10102b740303SSouptick Joarder vm_fault_t ret = 0; 10112b635dd3SWill Deacon unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 10122b635dd3SWill Deacon 10132b635dd3SWill Deacon for (address = haddr; address < end; address += PAGE_SIZE) { 101482b0f8c3SJan Kara struct vm_fault vmf = { 1015b46e756fSKirill A. Shutemov .vma = vma, 1016b46e756fSKirill A. Shutemov .address = address, 10172b635dd3SWill Deacon .pgoff = linear_page_index(vma, haddr), 1018b46e756fSKirill A. Shutemov .flags = FAULT_FLAG_ALLOW_RETRY, 1019b46e756fSKirill A. Shutemov .pmd = pmd, 1020b46e756fSKirill A. Shutemov }; 1021b46e756fSKirill A. Shutemov 102282b0f8c3SJan Kara vmf.pte = pte_offset_map(pmd, address); 10232994302bSJan Kara vmf.orig_pte = *vmf.pte; 10242b635dd3SWill Deacon if (!is_swap_pte(vmf.orig_pte)) { 10252b635dd3SWill Deacon pte_unmap(vmf.pte); 1026b46e756fSKirill A. Shutemov continue; 10272b635dd3SWill Deacon } 1028b46e756fSKirill A. Shutemov swapped_in++; 10292994302bSJan Kara ret = do_swap_page(&vmf); 10300db501f7SEbru Akagunduz 1031c1e8d7c6SMichel Lespinasse /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */ 1032b46e756fSKirill A. Shutemov if (ret & VM_FAULT_RETRY) { 1033d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 10342b635dd3SWill Deacon if (hugepage_vma_revalidate(mm, haddr, &vma)) { 1035b46e756fSKirill A. Shutemov /* vma is no longer available, don't continue to swapin */ 10360db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 1037b46e756fSKirill A. Shutemov return false; 103847f863eaSEbru Akagunduz } 1039b46e756fSKirill A. Shutemov /* check if the pmd is still valid */ 10402b635dd3SWill Deacon if (mm_find_pmd(mm, haddr) != pmd) { 1041835152a2SSeongJae Park trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 1042b46e756fSKirill A. Shutemov return false; 1043b46e756fSKirill A. Shutemov } 1044835152a2SSeongJae Park } 1045b46e756fSKirill A. Shutemov if (ret & VM_FAULT_ERROR) { 10460db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 1047b46e756fSKirill A. Shutemov return false; 1048b46e756fSKirill A. Shutemov } 1049b46e756fSKirill A. Shutemov } 1050ae2c5d80SKirill A. Shutemov 1051ae2c5d80SKirill A. Shutemov /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ 1052ae2c5d80SKirill A. Shutemov if (swapped_in) 1053ae2c5d80SKirill A. Shutemov lru_add_drain(); 1054ae2c5d80SKirill A. Shutemov 10550db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 1056b46e756fSKirill A. Shutemov return true; 1057b46e756fSKirill A. Shutemov } 1058b46e756fSKirill A. Shutemov 1059b46e756fSKirill A. Shutemov static void collapse_huge_page(struct mm_struct *mm, 1060b46e756fSKirill A. Shutemov unsigned long address, 1061b46e756fSKirill A. Shutemov struct page **hpage, 1062ffe945e6SKirill A. Shutemov int node, int referenced, int unmapped) 1063b46e756fSKirill A. Shutemov { 10645503fbf2SKirill A. Shutemov LIST_HEAD(compound_pagelist); 1065b46e756fSKirill A. Shutemov pmd_t *pmd, _pmd; 1066b46e756fSKirill A. Shutemov pte_t *pte; 1067b46e756fSKirill A. Shutemov pgtable_t pgtable; 1068b46e756fSKirill A. Shutemov struct page *new_page; 1069b46e756fSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl; 1070b46e756fSKirill A. Shutemov int isolated = 0, result = 0; 1071c131f751SKirill A. Shutemov struct vm_area_struct *vma; 1072ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1073b46e756fSKirill A. Shutemov gfp_t gfp; 1074b46e756fSKirill A. Shutemov 1075b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1076b46e756fSKirill A. Shutemov 1077b46e756fSKirill A. Shutemov /* Only allocate from the target node */ 107841b6167eSMichal Hocko gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1079b46e756fSKirill A. Shutemov 1080988ddb71SKirill A. Shutemov /* 1081c1e8d7c6SMichel Lespinasse * Before allocating the hugepage, release the mmap_lock read lock. 1082988ddb71SKirill A. Shutemov * The allocation can take potentially a long time if it involves 1083c1e8d7c6SMichel Lespinasse * sync compaction, and we do not need to hold the mmap_lock during 1084988ddb71SKirill A. Shutemov * that. We will recheck the vma after taking it again in write mode. 1085988ddb71SKirill A. Shutemov */ 1086d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1087988ddb71SKirill A. Shutemov new_page = khugepaged_alloc_page(hpage, gfp, node); 1088b46e756fSKirill A. Shutemov if (!new_page) { 1089b46e756fSKirill A. Shutemov result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1090b46e756fSKirill A. Shutemov goto out_nolock; 1091b46e756fSKirill A. Shutemov } 1092b46e756fSKirill A. Shutemov 10938f425e4eSMatthew Wilcox (Oracle) if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1094b46e756fSKirill A. Shutemov result = SCAN_CGROUP_CHARGE_FAIL; 1095b46e756fSKirill A. Shutemov goto out_nolock; 1096b46e756fSKirill A. Shutemov } 10979d82c694SJohannes Weiner count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1098b46e756fSKirill A. Shutemov 1099d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1100c131f751SKirill A. Shutemov result = hugepage_vma_revalidate(mm, address, &vma); 1101b46e756fSKirill A. Shutemov if (result) { 1102d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1103b46e756fSKirill A. Shutemov goto out_nolock; 1104b46e756fSKirill A. Shutemov } 1105b46e756fSKirill A. Shutemov 1106b46e756fSKirill A. Shutemov pmd = mm_find_pmd(mm, address); 1107b46e756fSKirill A. Shutemov if (!pmd) { 1108b46e756fSKirill A. Shutemov result = SCAN_PMD_NULL; 1109d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1110b46e756fSKirill A. Shutemov goto out_nolock; 1111b46e756fSKirill A. Shutemov } 1112b46e756fSKirill A. Shutemov 1113b46e756fSKirill A. Shutemov /* 1114c1e8d7c6SMichel Lespinasse * __collapse_huge_page_swapin always returns with mmap_lock locked. 1115c1e8d7c6SMichel Lespinasse * If it fails, we release mmap_lock and jump out_nolock. 1116b46e756fSKirill A. Shutemov * Continuing to collapse causes inconsistency. 1117b46e756fSKirill A. Shutemov */ 1118ffe945e6SKirill A. Shutemov if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, 1119ffe945e6SKirill A. Shutemov pmd, referenced)) { 1120d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1121b46e756fSKirill A. Shutemov goto out_nolock; 1122b46e756fSKirill A. Shutemov } 1123b46e756fSKirill A. Shutemov 1124d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1125b46e756fSKirill A. Shutemov /* 1126b46e756fSKirill A. Shutemov * Prevent all access to pagetables with the exception of 1127b46e756fSKirill A. Shutemov * gup_fast later handled by the ptep_clear_flush and the VM 1128b46e756fSKirill A. Shutemov * handled by the anon_vma lock + PG_lock. 1129b46e756fSKirill A. Shutemov */ 1130d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 1131c131f751SKirill A. Shutemov result = hugepage_vma_revalidate(mm, address, &vma); 1132b46e756fSKirill A. Shutemov if (result) 113318d24a7cSMiaohe Lin goto out_up_write; 1134b46e756fSKirill A. Shutemov /* check if the pmd is still valid */ 1135b46e756fSKirill A. Shutemov if (mm_find_pmd(mm, address) != pmd) 113618d24a7cSMiaohe Lin goto out_up_write; 1137b46e756fSKirill A. Shutemov 1138b46e756fSKirill A. Shutemov anon_vma_lock_write(vma->anon_vma); 1139b46e756fSKirill A. Shutemov 11407269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 11416f4f13e8SJérôme Glisse address, address + HPAGE_PMD_SIZE); 1142ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1143ec649c9dSVille Syrjälä 1144ec649c9dSVille Syrjälä pte = pte_offset_map(pmd, address); 1145ec649c9dSVille Syrjälä pte_ptl = pte_lockptr(mm, pmd); 1146ec649c9dSVille Syrjälä 1147b46e756fSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1148b46e756fSKirill A. Shutemov /* 1149b46e756fSKirill A. Shutemov * After this gup_fast can't run anymore. This also removes 1150b46e756fSKirill A. Shutemov * any huge TLB entry from the CPU so we won't allow 1151b46e756fSKirill A. Shutemov * huge and small TLB entries for the same virtual address 1152b46e756fSKirill A. Shutemov * to avoid the risk of CPU bugs in that area. 1153b46e756fSKirill A. Shutemov */ 1154b46e756fSKirill A. Shutemov _pmd = pmdp_collapse_flush(vma, address, pmd); 1155b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1156ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1157b46e756fSKirill A. Shutemov 1158b46e756fSKirill A. Shutemov spin_lock(pte_ptl); 11595503fbf2SKirill A. Shutemov isolated = __collapse_huge_page_isolate(vma, address, pte, 11605503fbf2SKirill A. Shutemov &compound_pagelist); 1161b46e756fSKirill A. Shutemov spin_unlock(pte_ptl); 1162b46e756fSKirill A. Shutemov 1163b46e756fSKirill A. Shutemov if (unlikely(!isolated)) { 1164b46e756fSKirill A. Shutemov pte_unmap(pte); 1165b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1166b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 1167b46e756fSKirill A. Shutemov /* 1168b46e756fSKirill A. Shutemov * We can only use set_pmd_at when establishing 1169b46e756fSKirill A. Shutemov * hugepmds and never for establishing regular pmds that 1170b46e756fSKirill A. Shutemov * points to regular pagetables. Use pmd_populate for that 1171b46e756fSKirill A. Shutemov */ 1172b46e756fSKirill A. Shutemov pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1173b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1174b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 1175b46e756fSKirill A. Shutemov result = SCAN_FAIL; 117618d24a7cSMiaohe Lin goto out_up_write; 1177b46e756fSKirill A. Shutemov } 1178b46e756fSKirill A. Shutemov 1179b46e756fSKirill A. Shutemov /* 1180b46e756fSKirill A. Shutemov * All pages are isolated and locked so anon_vma rmap 1181b46e756fSKirill A. Shutemov * can't run anymore. 1182b46e756fSKirill A. Shutemov */ 1183b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 1184b46e756fSKirill A. Shutemov 11855503fbf2SKirill A. Shutemov __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, 11865503fbf2SKirill A. Shutemov &compound_pagelist); 1187b46e756fSKirill A. Shutemov pte_unmap(pte); 1188588d01f9SMiaohe Lin /* 1189588d01f9SMiaohe Lin * spin_lock() below is not the equivalent of smp_wmb(), but 1190588d01f9SMiaohe Lin * the smp_wmb() inside __SetPageUptodate() can be reused to 1191588d01f9SMiaohe Lin * avoid the copy_huge_page writes to become visible after 1192588d01f9SMiaohe Lin * the set_pmd_at() write. 1193588d01f9SMiaohe Lin */ 1194b46e756fSKirill A. Shutemov __SetPageUptodate(new_page); 1195b46e756fSKirill A. Shutemov pgtable = pmd_pgtable(_pmd); 1196b46e756fSKirill A. Shutemov 1197b46e756fSKirill A. Shutemov _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1198f55e1014SLinus Torvalds _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1199b46e756fSKirill A. Shutemov 1200b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1201b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 1202be5d0a74SJohannes Weiner page_add_new_anon_rmap(new_page, vma, address, true); 1203b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(new_page, vma); 1204b46e756fSKirill A. Shutemov pgtable_trans_huge_deposit(mm, pmd, pgtable); 1205b46e756fSKirill A. Shutemov set_pmd_at(mm, address, pmd, _pmd); 1206b46e756fSKirill A. Shutemov update_mmu_cache_pmd(vma, address, pmd); 1207b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1208b46e756fSKirill A. Shutemov 1209b46e756fSKirill A. Shutemov *hpage = NULL; 1210b46e756fSKirill A. Shutemov 1211b46e756fSKirill A. Shutemov khugepaged_pages_collapsed++; 1212b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1213b46e756fSKirill A. Shutemov out_up_write: 1214d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1215b46e756fSKirill A. Shutemov out_nolock: 12169d82c694SJohannes Weiner if (!IS_ERR_OR_NULL(*hpage)) 1217bbc6b703SMatthew Wilcox (Oracle) mem_cgroup_uncharge(page_folio(*hpage)); 1218b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page(mm, isolated, result); 1219b46e756fSKirill A. Shutemov return; 1220b46e756fSKirill A. Shutemov } 1221b46e756fSKirill A. Shutemov 1222b46e756fSKirill A. Shutemov static int khugepaged_scan_pmd(struct mm_struct *mm, 1223b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 1224b46e756fSKirill A. Shutemov unsigned long address, 1225b46e756fSKirill A. Shutemov struct page **hpage) 1226b46e756fSKirill A. Shutemov { 1227b46e756fSKirill A. Shutemov pmd_t *pmd; 1228b46e756fSKirill A. Shutemov pte_t *pte, *_pte; 122971a2c112SKirill A. Shutemov int ret = 0, result = 0, referenced = 0; 123071a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0; 1231b46e756fSKirill A. Shutemov struct page *page = NULL; 1232b46e756fSKirill A. Shutemov unsigned long _address; 1233b46e756fSKirill A. Shutemov spinlock_t *ptl; 1234b46e756fSKirill A. Shutemov int node = NUMA_NO_NODE, unmapped = 0; 12350db501f7SEbru Akagunduz bool writable = false; 1236b46e756fSKirill A. Shutemov 1237b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1238b46e756fSKirill A. Shutemov 1239b46e756fSKirill A. Shutemov pmd = mm_find_pmd(mm, address); 1240b46e756fSKirill A. Shutemov if (!pmd) { 1241b46e756fSKirill A. Shutemov result = SCAN_PMD_NULL; 1242b46e756fSKirill A. Shutemov goto out; 1243b46e756fSKirill A. Shutemov } 1244b46e756fSKirill A. Shutemov 1245b46e756fSKirill A. Shutemov memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1246b46e756fSKirill A. Shutemov pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1247b46e756fSKirill A. Shutemov for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 1248b46e756fSKirill A. Shutemov _pte++, _address += PAGE_SIZE) { 1249b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 1250b46e756fSKirill A. Shutemov if (is_swap_pte(pteval)) { 1251b46e756fSKirill A. Shutemov if (++unmapped <= khugepaged_max_ptes_swap) { 1252e1e267c7SPeter Xu /* 1253e1e267c7SPeter Xu * Always be strict with uffd-wp 1254e1e267c7SPeter Xu * enabled swap entries. Please see 1255e1e267c7SPeter Xu * comment below for pte_uffd_wp(). 1256e1e267c7SPeter Xu */ 1257e1e267c7SPeter Xu if (pte_swp_uffd_wp(pteval)) { 1258e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1259e1e267c7SPeter Xu goto out_unmap; 1260e1e267c7SPeter Xu } 1261b46e756fSKirill A. Shutemov continue; 1262b46e756fSKirill A. Shutemov } else { 1263b46e756fSKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 1264b46e756fSKirill A. Shutemov goto out_unmap; 1265b46e756fSKirill A. Shutemov } 1266b46e756fSKirill A. Shutemov } 1267b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1268b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 1269b46e756fSKirill A. Shutemov ++none_or_zero <= khugepaged_max_ptes_none) { 1270b46e756fSKirill A. Shutemov continue; 1271b46e756fSKirill A. Shutemov } else { 1272b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 1273b46e756fSKirill A. Shutemov goto out_unmap; 1274b46e756fSKirill A. Shutemov } 1275b46e756fSKirill A. Shutemov } 1276e1e267c7SPeter Xu if (pte_uffd_wp(pteval)) { 1277e1e267c7SPeter Xu /* 1278e1e267c7SPeter Xu * Don't collapse the page if any of the small 1279e1e267c7SPeter Xu * PTEs are armed with uffd write protection. 1280e1e267c7SPeter Xu * Here we can also mark the new huge pmd as 1281e1e267c7SPeter Xu * write protected if any of the small ones is 12828958b249SHaitao Shi * marked but that could bring unknown 1283e1e267c7SPeter Xu * userfault messages that falls outside of 1284e1e267c7SPeter Xu * the registered range. So, just be simple. 1285e1e267c7SPeter Xu */ 1286e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1287e1e267c7SPeter Xu goto out_unmap; 1288e1e267c7SPeter Xu } 1289b46e756fSKirill A. Shutemov if (pte_write(pteval)) 1290b46e756fSKirill A. Shutemov writable = true; 1291b46e756fSKirill A. Shutemov 1292b46e756fSKirill A. Shutemov page = vm_normal_page(vma, _address, pteval); 1293b46e756fSKirill A. Shutemov if (unlikely(!page)) { 1294b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 1295b46e756fSKirill A. Shutemov goto out_unmap; 1296b46e756fSKirill A. Shutemov } 1297b46e756fSKirill A. Shutemov 129871a2c112SKirill A. Shutemov if (page_mapcount(page) > 1 && 129971a2c112SKirill A. Shutemov ++shared > khugepaged_max_ptes_shared) { 130071a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 130171a2c112SKirill A. Shutemov goto out_unmap; 130271a2c112SKirill A. Shutemov } 130371a2c112SKirill A. Shutemov 13045503fbf2SKirill A. Shutemov page = compound_head(page); 1305b46e756fSKirill A. Shutemov 1306b46e756fSKirill A. Shutemov /* 1307b46e756fSKirill A. Shutemov * Record which node the original page is from and save this 1308b46e756fSKirill A. Shutemov * information to khugepaged_node_load[]. 1309b46e756fSKirill A. Shutemov * Khupaged will allocate hugepage from the node has the max 1310b46e756fSKirill A. Shutemov * hit record. 1311b46e756fSKirill A. Shutemov */ 1312b46e756fSKirill A. Shutemov node = page_to_nid(page); 1313b46e756fSKirill A. Shutemov if (khugepaged_scan_abort(node)) { 1314b46e756fSKirill A. Shutemov result = SCAN_SCAN_ABORT; 1315b46e756fSKirill A. Shutemov goto out_unmap; 1316b46e756fSKirill A. Shutemov } 1317b46e756fSKirill A. Shutemov khugepaged_node_load[node]++; 1318b46e756fSKirill A. Shutemov if (!PageLRU(page)) { 1319b46e756fSKirill A. Shutemov result = SCAN_PAGE_LRU; 1320b46e756fSKirill A. Shutemov goto out_unmap; 1321b46e756fSKirill A. Shutemov } 1322b46e756fSKirill A. Shutemov if (PageLocked(page)) { 1323b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 1324b46e756fSKirill A. Shutemov goto out_unmap; 1325b46e756fSKirill A. Shutemov } 1326b46e756fSKirill A. Shutemov if (!PageAnon(page)) { 1327b46e756fSKirill A. Shutemov result = SCAN_PAGE_ANON; 1328b46e756fSKirill A. Shutemov goto out_unmap; 1329b46e756fSKirill A. Shutemov } 1330b46e756fSKirill A. Shutemov 1331b46e756fSKirill A. Shutemov /* 13329445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 13339445689fSKirill A. Shutemov * 13349445689fSKirill A. Shutemov * Here the check is racy it may see totmal_mapcount > refcount 13359445689fSKirill A. Shutemov * in some cases. 13369445689fSKirill A. Shutemov * For example, one process with one forked child process. 13379445689fSKirill A. Shutemov * The parent has the PMD split due to MADV_DONTNEED, then 13389445689fSKirill A. Shutemov * the child is trying unmap the whole PMD, but khugepaged 13399445689fSKirill A. Shutemov * may be scanning the parent between the child has 13409445689fSKirill A. Shutemov * PageDoubleMap flag cleared and dec the mapcount. So 13419445689fSKirill A. Shutemov * khugepaged may see total_mapcount > refcount. 13429445689fSKirill A. Shutemov * 13439445689fSKirill A. Shutemov * But such case is ephemeral we could always retry collapse 13449445689fSKirill A. Shutemov * later. However it may report false positive if the page 13459445689fSKirill A. Shutemov * has excessive GUP pins (i.e. 512). Anyway the same check 13469445689fSKirill A. Shutemov * will be done again later the risk seems low. 1347b46e756fSKirill A. Shutemov */ 13489445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 1349b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 1350b46e756fSKirill A. Shutemov goto out_unmap; 1351b46e756fSKirill A. Shutemov } 1352b46e756fSKirill A. Shutemov if (pte_young(pteval) || 1353b46e756fSKirill A. Shutemov page_is_young(page) || PageReferenced(page) || 1354b46e756fSKirill A. Shutemov mmu_notifier_test_young(vma->vm_mm, address)) 13550db501f7SEbru Akagunduz referenced++; 1356b46e756fSKirill A. Shutemov } 1357ffe945e6SKirill A. Shutemov if (!writable) { 1358ffe945e6SKirill A. Shutemov result = SCAN_PAGE_RO; 1359ffe945e6SKirill A. Shutemov } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { 1360ffe945e6SKirill A. Shutemov result = SCAN_LACK_REFERENCED_PAGE; 1361ffe945e6SKirill A. Shutemov } else { 1362b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1363b46e756fSKirill A. Shutemov ret = 1; 1364b46e756fSKirill A. Shutemov } 1365b46e756fSKirill A. Shutemov out_unmap: 1366b46e756fSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 1367b46e756fSKirill A. Shutemov if (ret) { 1368b46e756fSKirill A. Shutemov node = khugepaged_find_target_node(); 1369c1e8d7c6SMichel Lespinasse /* collapse_huge_page will return with the mmap_lock released */ 1370ffe945e6SKirill A. Shutemov collapse_huge_page(mm, address, hpage, node, 1371ffe945e6SKirill A. Shutemov referenced, unmapped); 1372b46e756fSKirill A. Shutemov } 1373b46e756fSKirill A. Shutemov out: 1374b46e756fSKirill A. Shutemov trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1375b46e756fSKirill A. Shutemov none_or_zero, result, unmapped); 1376b46e756fSKirill A. Shutemov return ret; 1377b46e756fSKirill A. Shutemov } 1378b46e756fSKirill A. Shutemov 1379b46e756fSKirill A. Shutemov static void collect_mm_slot(struct mm_slot *mm_slot) 1380b46e756fSKirill A. Shutemov { 1381b46e756fSKirill A. Shutemov struct mm_struct *mm = mm_slot->mm; 1382b46e756fSKirill A. Shutemov 138335f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 1384b46e756fSKirill A. Shutemov 1385b46e756fSKirill A. Shutemov if (khugepaged_test_exit(mm)) { 1386b46e756fSKirill A. Shutemov /* free mm_slot */ 1387b46e756fSKirill A. Shutemov hash_del(&mm_slot->hash); 1388b46e756fSKirill A. Shutemov list_del(&mm_slot->mm_node); 1389b46e756fSKirill A. Shutemov 1390b46e756fSKirill A. Shutemov /* 1391b46e756fSKirill A. Shutemov * Not strictly needed because the mm exited already. 1392b46e756fSKirill A. Shutemov * 1393b46e756fSKirill A. Shutemov * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1394b46e756fSKirill A. Shutemov */ 1395b46e756fSKirill A. Shutemov 1396b46e756fSKirill A. Shutemov /* khugepaged_mm_lock actually not necessary for the below */ 1397b46e756fSKirill A. Shutemov free_mm_slot(mm_slot); 1398b46e756fSKirill A. Shutemov mmdrop(mm); 1399b46e756fSKirill A. Shutemov } 1400b46e756fSKirill A. Shutemov } 1401b46e756fSKirill A. Shutemov 1402396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 140327e1f827SSong Liu /* 140427e1f827SSong Liu * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 140527e1f827SSong Liu * khugepaged should try to collapse the page table. 140627e1f827SSong Liu */ 140727e1f827SSong Liu static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 140827e1f827SSong Liu unsigned long addr) 140927e1f827SSong Liu { 141027e1f827SSong Liu struct mm_slot *mm_slot; 141127e1f827SSong Liu 141227e1f827SSong Liu VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 141327e1f827SSong Liu 141427e1f827SSong Liu spin_lock(&khugepaged_mm_lock); 141527e1f827SSong Liu mm_slot = get_mm_slot(mm); 141627e1f827SSong Liu if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) 141727e1f827SSong Liu mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 141827e1f827SSong Liu spin_unlock(&khugepaged_mm_lock); 141927e1f827SSong Liu return 0; 142027e1f827SSong Liu } 142127e1f827SSong Liu 142227e1f827SSong Liu /** 1423336e6b53SAlex Shi * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1424336e6b53SAlex Shi * address haddr. 1425336e6b53SAlex Shi * 1426336e6b53SAlex Shi * @mm: process address space where collapse happens 1427336e6b53SAlex Shi * @addr: THP collapse address 142827e1f827SSong Liu * 142927e1f827SSong Liu * This function checks whether all the PTEs in the PMD are pointing to the 143027e1f827SSong Liu * right THP. If so, retract the page table so the THP can refault in with 143127e1f827SSong Liu * as pmd-mapped. 143227e1f827SSong Liu */ 143327e1f827SSong Liu void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) 143427e1f827SSong Liu { 143527e1f827SSong Liu unsigned long haddr = addr & HPAGE_PMD_MASK; 143627e1f827SSong Liu struct vm_area_struct *vma = find_vma(mm, haddr); 1437119a5fc1SHugh Dickins struct page *hpage; 143827e1f827SSong Liu pte_t *start_pte, *pte; 143927e1f827SSong Liu pmd_t *pmd, _pmd; 144027e1f827SSong Liu spinlock_t *ptl; 144127e1f827SSong Liu int count = 0; 144227e1f827SSong Liu int i; 144327e1f827SSong Liu 144427e1f827SSong Liu if (!vma || !vma->vm_file || 1445fef792a4SMiaohe Lin !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 144627e1f827SSong Liu return; 144727e1f827SSong Liu 144827e1f827SSong Liu /* 144927e1f827SSong Liu * This vm_flags may not have VM_HUGEPAGE if the page was not 145027e1f827SSong Liu * collapsed by this mm. But we can still collapse if the page is 145127e1f827SSong Liu * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() 145227e1f827SSong Liu * will not fail the vma for missing VM_HUGEPAGE 145327e1f827SSong Liu */ 145427e1f827SSong Liu if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) 145527e1f827SSong Liu return; 145627e1f827SSong Liu 1457119a5fc1SHugh Dickins hpage = find_lock_page(vma->vm_file->f_mapping, 1458119a5fc1SHugh Dickins linear_page_index(vma, haddr)); 1459119a5fc1SHugh Dickins if (!hpage) 1460119a5fc1SHugh Dickins return; 1461119a5fc1SHugh Dickins 1462119a5fc1SHugh Dickins if (!PageHead(hpage)) 1463119a5fc1SHugh Dickins goto drop_hpage; 1464119a5fc1SHugh Dickins 146527e1f827SSong Liu pmd = mm_find_pmd(mm, haddr); 146627e1f827SSong Liu if (!pmd) 1467119a5fc1SHugh Dickins goto drop_hpage; 146827e1f827SSong Liu 146927e1f827SSong Liu start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 147027e1f827SSong Liu 147127e1f827SSong Liu /* step 1: check all mapped PTEs are to the right huge page */ 147227e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 147327e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 147427e1f827SSong Liu struct page *page; 147527e1f827SSong Liu 147627e1f827SSong Liu /* empty pte, skip */ 147727e1f827SSong Liu if (pte_none(*pte)) 147827e1f827SSong Liu continue; 147927e1f827SSong Liu 148027e1f827SSong Liu /* page swapped out, abort */ 148127e1f827SSong Liu if (!pte_present(*pte)) 148227e1f827SSong Liu goto abort; 148327e1f827SSong Liu 148427e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 148527e1f827SSong Liu 148627e1f827SSong Liu /* 1487119a5fc1SHugh Dickins * Note that uprobe, debugger, or MAP_PRIVATE may change the 1488119a5fc1SHugh Dickins * page table, but the new page will not be a subpage of hpage. 148927e1f827SSong Liu */ 1490119a5fc1SHugh Dickins if (hpage + i != page) 149127e1f827SSong Liu goto abort; 149227e1f827SSong Liu count++; 149327e1f827SSong Liu } 149427e1f827SSong Liu 149527e1f827SSong Liu /* step 2: adjust rmap */ 149627e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 149727e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 149827e1f827SSong Liu struct page *page; 149927e1f827SSong Liu 150027e1f827SSong Liu if (pte_none(*pte)) 150127e1f827SSong Liu continue; 150227e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 150327e1f827SSong Liu page_remove_rmap(page, false); 150427e1f827SSong Liu } 150527e1f827SSong Liu 150627e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 150727e1f827SSong Liu 150827e1f827SSong Liu /* step 3: set proper refcount and mm_counters. */ 1509119a5fc1SHugh Dickins if (count) { 151027e1f827SSong Liu page_ref_sub(hpage, count); 151127e1f827SSong Liu add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 151227e1f827SSong Liu } 151327e1f827SSong Liu 151427e1f827SSong Liu /* step 4: collapse pmd */ 151527e1f827SSong Liu ptl = pmd_lock(vma->vm_mm, pmd); 1516723a80daSHugh Dickins _pmd = pmdp_collapse_flush(vma, haddr, pmd); 151727e1f827SSong Liu spin_unlock(ptl); 151827e1f827SSong Liu mm_dec_nr_ptes(mm); 151927e1f827SSong Liu pte_free(mm, pmd_pgtable(_pmd)); 1520119a5fc1SHugh Dickins 1521119a5fc1SHugh Dickins drop_hpage: 1522119a5fc1SHugh Dickins unlock_page(hpage); 1523119a5fc1SHugh Dickins put_page(hpage); 152427e1f827SSong Liu return; 152527e1f827SSong Liu 152627e1f827SSong Liu abort: 152727e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 1528119a5fc1SHugh Dickins goto drop_hpage; 152927e1f827SSong Liu } 153027e1f827SSong Liu 15310edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 153227e1f827SSong Liu { 153327e1f827SSong Liu struct mm_struct *mm = mm_slot->mm; 153427e1f827SSong Liu int i; 153527e1f827SSong Liu 153627e1f827SSong Liu if (likely(mm_slot->nr_pte_mapped_thp == 0)) 15370edf61e5SMiaohe Lin return; 153827e1f827SSong Liu 1539d8ed45c5SMichel Lespinasse if (!mmap_write_trylock(mm)) 15400edf61e5SMiaohe Lin return; 154127e1f827SSong Liu 154227e1f827SSong Liu if (unlikely(khugepaged_test_exit(mm))) 154327e1f827SSong Liu goto out; 154427e1f827SSong Liu 154527e1f827SSong Liu for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 154627e1f827SSong Liu collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); 154727e1f827SSong Liu 154827e1f827SSong Liu out: 154927e1f827SSong Liu mm_slot->nr_pte_mapped_thp = 0; 1550d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 155127e1f827SSong Liu } 155227e1f827SSong Liu 1553f3f0e1d2SKirill A. Shutemov static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1554f3f0e1d2SKirill A. Shutemov { 1555f3f0e1d2SKirill A. Shutemov struct vm_area_struct *vma; 155618e77600SHugh Dickins struct mm_struct *mm; 1557f3f0e1d2SKirill A. Shutemov unsigned long addr; 1558f3f0e1d2SKirill A. Shutemov pmd_t *pmd, _pmd; 1559f3f0e1d2SKirill A. Shutemov 1560f3f0e1d2SKirill A. Shutemov i_mmap_lock_write(mapping); 1561f3f0e1d2SKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 156227e1f827SSong Liu /* 156327e1f827SSong Liu * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 156427e1f827SSong Liu * got written to. These VMAs are likely not worth investing 15653e4e28c5SMichel Lespinasse * mmap_write_lock(mm) as PMD-mapping is likely to be split 156627e1f827SSong Liu * later. 156727e1f827SSong Liu * 156827e1f827SSong Liu * Not that vma->anon_vma check is racy: it can be set up after 1569c1e8d7c6SMichel Lespinasse * the check but before we took mmap_lock by the fault path. 157027e1f827SSong Liu * But page lock would prevent establishing any new ptes of the 157127e1f827SSong Liu * page, so we are safe. 157227e1f827SSong Liu * 157327e1f827SSong Liu * An alternative would be drop the check, but check that page 157427e1f827SSong Liu * table is clear before calling pmdp_collapse_flush() under 157527e1f827SSong Liu * ptl. It has higher chance to recover THP for the VMA, but 157627e1f827SSong Liu * has higher cost too. 157727e1f827SSong Liu */ 1578f3f0e1d2SKirill A. Shutemov if (vma->anon_vma) 1579f3f0e1d2SKirill A. Shutemov continue; 1580f3f0e1d2SKirill A. Shutemov addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1581f3f0e1d2SKirill A. Shutemov if (addr & ~HPAGE_PMD_MASK) 1582f3f0e1d2SKirill A. Shutemov continue; 1583f3f0e1d2SKirill A. Shutemov if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1584f3f0e1d2SKirill A. Shutemov continue; 158518e77600SHugh Dickins mm = vma->vm_mm; 158618e77600SHugh Dickins pmd = mm_find_pmd(mm, addr); 1587f3f0e1d2SKirill A. Shutemov if (!pmd) 1588f3f0e1d2SKirill A. Shutemov continue; 1589f3f0e1d2SKirill A. Shutemov /* 1590c1e8d7c6SMichel Lespinasse * We need exclusive mmap_lock to retract page table. 159127e1f827SSong Liu * 159227e1f827SSong Liu * We use trylock due to lock inversion: we need to acquire 1593c1e8d7c6SMichel Lespinasse * mmap_lock while holding page lock. Fault path does it in 159427e1f827SSong Liu * reverse order. Trylock is a way to avoid deadlock. 1595f3f0e1d2SKirill A. Shutemov */ 159618e77600SHugh Dickins if (mmap_write_trylock(mm)) { 159718e77600SHugh Dickins if (!khugepaged_test_exit(mm)) { 159818e77600SHugh Dickins spinlock_t *ptl = pmd_lock(mm, pmd); 1599f3f0e1d2SKirill A. Shutemov /* assume page table is clear */ 1600f3f0e1d2SKirill A. Shutemov _pmd = pmdp_collapse_flush(vma, addr, pmd); 1601f3f0e1d2SKirill A. Shutemov spin_unlock(ptl); 160218e77600SHugh Dickins mm_dec_nr_ptes(mm); 160318e77600SHugh Dickins pte_free(mm, pmd_pgtable(_pmd)); 160418e77600SHugh Dickins } 160518e77600SHugh Dickins mmap_write_unlock(mm); 160627e1f827SSong Liu } else { 160727e1f827SSong Liu /* Try again later */ 160818e77600SHugh Dickins khugepaged_add_pte_mapped_thp(mm, addr); 1609f3f0e1d2SKirill A. Shutemov } 1610f3f0e1d2SKirill A. Shutemov } 1611f3f0e1d2SKirill A. Shutemov i_mmap_unlock_write(mapping); 1612f3f0e1d2SKirill A. Shutemov } 1613f3f0e1d2SKirill A. Shutemov 1614f3f0e1d2SKirill A. Shutemov /** 161599cb0dbdSSong Liu * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1616f3f0e1d2SKirill A. Shutemov * 1617336e6b53SAlex Shi * @mm: process address space where collapse happens 1618336e6b53SAlex Shi * @file: file that collapse on 1619336e6b53SAlex Shi * @start: collapse start address 1620336e6b53SAlex Shi * @hpage: new allocated huge page for collapse 1621336e6b53SAlex Shi * @node: appointed node the new huge page allocate from 1622336e6b53SAlex Shi * 1623f3f0e1d2SKirill A. Shutemov * Basic scheme is simple, details are more complex: 162487c460a0SHugh Dickins * - allocate and lock a new huge page; 162577da9389SMatthew Wilcox * - scan page cache replacing old pages with the new one 162699cb0dbdSSong Liu * + swap/gup in pages if necessary; 1627f3f0e1d2SKirill A. Shutemov * + fill in gaps; 162877da9389SMatthew Wilcox * + keep old pages around in case rollback is required; 162977da9389SMatthew Wilcox * - if replacing succeeds: 1630f3f0e1d2SKirill A. Shutemov * + copy data over; 1631f3f0e1d2SKirill A. Shutemov * + free old pages; 163287c460a0SHugh Dickins * + unlock huge page; 1633f3f0e1d2SKirill A. Shutemov * - if replacing failed; 1634f3f0e1d2SKirill A. Shutemov * + put all pages back and unfreeze them; 163577da9389SMatthew Wilcox * + restore gaps in the page cache; 163687c460a0SHugh Dickins * + unlock and free huge page; 1637f3f0e1d2SKirill A. Shutemov */ 1638579c571eSSong Liu static void collapse_file(struct mm_struct *mm, 1639579c571eSSong Liu struct file *file, pgoff_t start, 1640f3f0e1d2SKirill A. Shutemov struct page **hpage, int node) 1641f3f0e1d2SKirill A. Shutemov { 1642579c571eSSong Liu struct address_space *mapping = file->f_mapping; 1643f3f0e1d2SKirill A. Shutemov gfp_t gfp; 164477da9389SMatthew Wilcox struct page *new_page; 1645f3f0e1d2SKirill A. Shutemov pgoff_t index, end = start + HPAGE_PMD_NR; 1646f3f0e1d2SKirill A. Shutemov LIST_HEAD(pagelist); 164777da9389SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1648f3f0e1d2SKirill A. Shutemov int nr_none = 0, result = SCAN_SUCCEED; 164999cb0dbdSSong Liu bool is_shmem = shmem_file(file); 1650bf9eceadSMuchun Song int nr; 1651f3f0e1d2SKirill A. Shutemov 165299cb0dbdSSong Liu VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1653f3f0e1d2SKirill A. Shutemov VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1654f3f0e1d2SKirill A. Shutemov 1655f3f0e1d2SKirill A. Shutemov /* Only allocate from the target node */ 165641b6167eSMichal Hocko gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1657f3f0e1d2SKirill A. Shutemov 1658f3f0e1d2SKirill A. Shutemov new_page = khugepaged_alloc_page(hpage, gfp, node); 1659f3f0e1d2SKirill A. Shutemov if (!new_page) { 1660f3f0e1d2SKirill A. Shutemov result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1661f3f0e1d2SKirill A. Shutemov goto out; 1662f3f0e1d2SKirill A. Shutemov } 1663f3f0e1d2SKirill A. Shutemov 16648f425e4eSMatthew Wilcox (Oracle) if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1665f3f0e1d2SKirill A. Shutemov result = SCAN_CGROUP_CHARGE_FAIL; 1666f3f0e1d2SKirill A. Shutemov goto out; 1667f3f0e1d2SKirill A. Shutemov } 16689d82c694SJohannes Weiner count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1669f3f0e1d2SKirill A. Shutemov 1670*6b24ca4aSMatthew Wilcox (Oracle) /* 1671*6b24ca4aSMatthew Wilcox (Oracle) * Ensure we have slots for all the pages in the range. This is 1672*6b24ca4aSMatthew Wilcox (Oracle) * almost certainly a no-op because most of the pages must be present 1673*6b24ca4aSMatthew Wilcox (Oracle) */ 167495feeabbSHugh Dickins do { 167595feeabbSHugh Dickins xas_lock_irq(&xas); 167695feeabbSHugh Dickins xas_create_range(&xas); 167795feeabbSHugh Dickins if (!xas_error(&xas)) 167895feeabbSHugh Dickins break; 167995feeabbSHugh Dickins xas_unlock_irq(&xas); 168095feeabbSHugh Dickins if (!xas_nomem(&xas, GFP_KERNEL)) { 168195feeabbSHugh Dickins result = SCAN_FAIL; 168295feeabbSHugh Dickins goto out; 168395feeabbSHugh Dickins } 168495feeabbSHugh Dickins } while (1); 168595feeabbSHugh Dickins 1686042a3082SHugh Dickins __SetPageLocked(new_page); 168799cb0dbdSSong Liu if (is_shmem) 1688042a3082SHugh Dickins __SetPageSwapBacked(new_page); 1689f3f0e1d2SKirill A. Shutemov new_page->index = start; 1690f3f0e1d2SKirill A. Shutemov new_page->mapping = mapping; 1691f3f0e1d2SKirill A. Shutemov 1692f3f0e1d2SKirill A. Shutemov /* 169387c460a0SHugh Dickins * At this point the new_page is locked and not up-to-date. 169487c460a0SHugh Dickins * It's safe to insert it into the page cache, because nobody would 169587c460a0SHugh Dickins * be able to map it or use it in another way until we unlock it. 1696f3f0e1d2SKirill A. Shutemov */ 1697f3f0e1d2SKirill A. Shutemov 169877da9389SMatthew Wilcox xas_set(&xas, start); 169977da9389SMatthew Wilcox for (index = start; index < end; index++) { 170077da9389SMatthew Wilcox struct page *page = xas_next(&xas); 170177da9389SMatthew Wilcox 170277da9389SMatthew Wilcox VM_BUG_ON(index != xas.xa_index); 170399cb0dbdSSong Liu if (is_shmem) { 170477da9389SMatthew Wilcox if (!page) { 1705701270faSHugh Dickins /* 170699cb0dbdSSong Liu * Stop if extent has been truncated or 170799cb0dbdSSong Liu * hole-punched, and is now completely 170899cb0dbdSSong Liu * empty. 1709701270faSHugh Dickins */ 1710701270faSHugh Dickins if (index == start) { 1711701270faSHugh Dickins if (!xas_next_entry(&xas, end - 1)) { 1712701270faSHugh Dickins result = SCAN_TRUNCATED; 1713042a3082SHugh Dickins goto xa_locked; 1714701270faSHugh Dickins } 1715701270faSHugh Dickins xas_set(&xas, index); 1716701270faSHugh Dickins } 171777da9389SMatthew Wilcox if (!shmem_charge(mapping->host, 1)) { 1718f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 1719042a3082SHugh Dickins goto xa_locked; 1720f3f0e1d2SKirill A. Shutemov } 17214101196bSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 172277da9389SMatthew Wilcox nr_none++; 172377da9389SMatthew Wilcox continue; 1724f3f0e1d2SKirill A. Shutemov } 1725f3f0e1d2SKirill A. Shutemov 17263159f943SMatthew Wilcox if (xa_is_value(page) || !PageUptodate(page)) { 172777da9389SMatthew Wilcox xas_unlock_irq(&xas); 1728f3f0e1d2SKirill A. Shutemov /* swap in or instantiate fallocated page */ 1729f3f0e1d2SKirill A. Shutemov if (shmem_getpage(mapping->host, index, &page, 1730acdd9f8eSHugh Dickins SGP_NOALLOC)) { 1731f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 173277da9389SMatthew Wilcox goto xa_unlocked; 1733f3f0e1d2SKirill A. Shutemov } 1734f3f0e1d2SKirill A. Shutemov } else if (trylock_page(page)) { 1735f3f0e1d2SKirill A. Shutemov get_page(page); 1736042a3082SHugh Dickins xas_unlock_irq(&xas); 1737f3f0e1d2SKirill A. Shutemov } else { 1738f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LOCK; 1739042a3082SHugh Dickins goto xa_locked; 1740f3f0e1d2SKirill A. Shutemov } 174199cb0dbdSSong Liu } else { /* !is_shmem */ 174299cb0dbdSSong Liu if (!page || xa_is_value(page)) { 174399cb0dbdSSong Liu xas_unlock_irq(&xas); 174499cb0dbdSSong Liu page_cache_sync_readahead(mapping, &file->f_ra, 174599cb0dbdSSong Liu file, index, 1746e5a59d30SDavid Howells end - index); 174799cb0dbdSSong Liu /* drain pagevecs to help isolate_lru_page() */ 174899cb0dbdSSong Liu lru_add_drain(); 174999cb0dbdSSong Liu page = find_lock_page(mapping, index); 175099cb0dbdSSong Liu if (unlikely(page == NULL)) { 175199cb0dbdSSong Liu result = SCAN_FAIL; 175299cb0dbdSSong Liu goto xa_unlocked; 175399cb0dbdSSong Liu } 175475f36069SSong Liu } else if (PageDirty(page)) { 175575f36069SSong Liu /* 175675f36069SSong Liu * khugepaged only works on read-only fd, 175775f36069SSong Liu * so this page is dirty because it hasn't 175875f36069SSong Liu * been flushed since first write. There 175975f36069SSong Liu * won't be new dirty pages. 176075f36069SSong Liu * 176175f36069SSong Liu * Trigger async flush here and hope the 176275f36069SSong Liu * writeback is done when khugepaged 176375f36069SSong Liu * revisits this page. 176475f36069SSong Liu * 176575f36069SSong Liu * This is a one-off situation. We are not 176675f36069SSong Liu * forcing writeback in loop. 176775f36069SSong Liu */ 176875f36069SSong Liu xas_unlock_irq(&xas); 176975f36069SSong Liu filemap_flush(mapping); 177075f36069SSong Liu result = SCAN_FAIL; 177175f36069SSong Liu goto xa_unlocked; 177274c42e1bSRongwei Wang } else if (PageWriteback(page)) { 177374c42e1bSRongwei Wang xas_unlock_irq(&xas); 177474c42e1bSRongwei Wang result = SCAN_FAIL; 177574c42e1bSRongwei Wang goto xa_unlocked; 177699cb0dbdSSong Liu } else if (trylock_page(page)) { 177799cb0dbdSSong Liu get_page(page); 177899cb0dbdSSong Liu xas_unlock_irq(&xas); 177999cb0dbdSSong Liu } else { 178099cb0dbdSSong Liu result = SCAN_PAGE_LOCK; 178199cb0dbdSSong Liu goto xa_locked; 178299cb0dbdSSong Liu } 178399cb0dbdSSong Liu } 1784f3f0e1d2SKirill A. Shutemov 1785f3f0e1d2SKirill A. Shutemov /* 1786b93b0163SMatthew Wilcox * The page must be locked, so we can drop the i_pages lock 1787f3f0e1d2SKirill A. Shutemov * without racing with truncate. 1788f3f0e1d2SKirill A. Shutemov */ 1789f3f0e1d2SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 17904655e5e5SSong Liu 17914655e5e5SSong Liu /* make sure the page is up to date */ 17924655e5e5SSong Liu if (unlikely(!PageUptodate(page))) { 17934655e5e5SSong Liu result = SCAN_FAIL; 17944655e5e5SSong Liu goto out_unlock; 17954655e5e5SSong Liu } 179606a5e126SHugh Dickins 179706a5e126SHugh Dickins /* 179806a5e126SHugh Dickins * If file was truncated then extended, or hole-punched, before 179906a5e126SHugh Dickins * we locked the first page, then a THP might be there already. 180006a5e126SHugh Dickins */ 180106a5e126SHugh Dickins if (PageTransCompound(page)) { 180206a5e126SHugh Dickins result = SCAN_PAGE_COMPOUND; 180306a5e126SHugh Dickins goto out_unlock; 180406a5e126SHugh Dickins } 1805f3f0e1d2SKirill A. Shutemov 1806f3f0e1d2SKirill A. Shutemov if (page_mapping(page) != mapping) { 1807f3f0e1d2SKirill A. Shutemov result = SCAN_TRUNCATED; 1808f3f0e1d2SKirill A. Shutemov goto out_unlock; 1809f3f0e1d2SKirill A. Shutemov } 1810f3f0e1d2SKirill A. Shutemov 181174c42e1bSRongwei Wang if (!is_shmem && (PageDirty(page) || 181274c42e1bSRongwei Wang PageWriteback(page))) { 18134655e5e5SSong Liu /* 18144655e5e5SSong Liu * khugepaged only works on read-only fd, so this 18154655e5e5SSong Liu * page is dirty because it hasn't been flushed 18164655e5e5SSong Liu * since first write. 18174655e5e5SSong Liu */ 18184655e5e5SSong Liu result = SCAN_FAIL; 18194655e5e5SSong Liu goto out_unlock; 18204655e5e5SSong Liu } 18214655e5e5SSong Liu 1822f3f0e1d2SKirill A. Shutemov if (isolate_lru_page(page)) { 1823f3f0e1d2SKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 1824042a3082SHugh Dickins goto out_unlock; 1825f3f0e1d2SKirill A. Shutemov } 1826f3f0e1d2SKirill A. Shutemov 182799cb0dbdSSong Liu if (page_has_private(page) && 182899cb0dbdSSong Liu !try_to_release_page(page, GFP_KERNEL)) { 182999cb0dbdSSong Liu result = SCAN_PAGE_HAS_PRIVATE; 18302f33a706SHugh Dickins putback_lru_page(page); 183199cb0dbdSSong Liu goto out_unlock; 183299cb0dbdSSong Liu } 183399cb0dbdSSong Liu 1834f3f0e1d2SKirill A. Shutemov if (page_mapped(page)) 1835977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, index, 1, false); 1836f3f0e1d2SKirill A. Shutemov 183777da9389SMatthew Wilcox xas_lock_irq(&xas); 183877da9389SMatthew Wilcox xas_set(&xas, index); 1839f3f0e1d2SKirill A. Shutemov 184077da9389SMatthew Wilcox VM_BUG_ON_PAGE(page != xas_load(&xas), page); 1841f3f0e1d2SKirill A. Shutemov VM_BUG_ON_PAGE(page_mapped(page), page); 1842f3f0e1d2SKirill A. Shutemov 1843f3f0e1d2SKirill A. Shutemov /* 1844f3f0e1d2SKirill A. Shutemov * The page is expected to have page_count() == 3: 1845f3f0e1d2SKirill A. Shutemov * - we hold a pin on it; 184677da9389SMatthew Wilcox * - one reference from page cache; 1847f3f0e1d2SKirill A. Shutemov * - one from isolate_lru_page; 1848f3f0e1d2SKirill A. Shutemov */ 1849f3f0e1d2SKirill A. Shutemov if (!page_ref_freeze(page, 3)) { 1850f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 1851042a3082SHugh Dickins xas_unlock_irq(&xas); 1852042a3082SHugh Dickins putback_lru_page(page); 1853042a3082SHugh Dickins goto out_unlock; 1854f3f0e1d2SKirill A. Shutemov } 1855f3f0e1d2SKirill A. Shutemov 1856f3f0e1d2SKirill A. Shutemov /* 1857f3f0e1d2SKirill A. Shutemov * Add the page to the list to be able to undo the collapse if 1858f3f0e1d2SKirill A. Shutemov * something go wrong. 1859f3f0e1d2SKirill A. Shutemov */ 1860f3f0e1d2SKirill A. Shutemov list_add_tail(&page->lru, &pagelist); 1861f3f0e1d2SKirill A. Shutemov 1862f3f0e1d2SKirill A. Shutemov /* Finally, replace with the new page. */ 18634101196bSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 1864f3f0e1d2SKirill A. Shutemov continue; 1865f3f0e1d2SKirill A. Shutemov out_unlock: 1866f3f0e1d2SKirill A. Shutemov unlock_page(page); 1867f3f0e1d2SKirill A. Shutemov put_page(page); 1868042a3082SHugh Dickins goto xa_unlocked; 1869f3f0e1d2SKirill A. Shutemov } 1870bf9eceadSMuchun Song nr = thp_nr_pages(new_page); 1871f3f0e1d2SKirill A. Shutemov 187299cb0dbdSSong Liu if (is_shmem) 187357b2847dSMuchun Song __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr); 187409d91cdaSSong Liu else { 1875bf9eceadSMuchun Song __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr); 187609d91cdaSSong Liu filemap_nr_thps_inc(mapping); 1877eb6ecbedSCollin Fijalkovich /* 1878eb6ecbedSCollin Fijalkovich * Paired with smp_mb() in do_dentry_open() to ensure 1879eb6ecbedSCollin Fijalkovich * i_writecount is up to date and the update to nr_thps is 1880eb6ecbedSCollin Fijalkovich * visible. Ensures the page cache will be truncated if the 1881eb6ecbedSCollin Fijalkovich * file is opened writable. 1882eb6ecbedSCollin Fijalkovich */ 1883eb6ecbedSCollin Fijalkovich smp_mb(); 1884eb6ecbedSCollin Fijalkovich if (inode_is_open_for_write(mapping->host)) { 1885eb6ecbedSCollin Fijalkovich result = SCAN_FAIL; 1886eb6ecbedSCollin Fijalkovich __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr); 1887eb6ecbedSCollin Fijalkovich filemap_nr_thps_dec(mapping); 1888eb6ecbedSCollin Fijalkovich goto xa_locked; 1889eb6ecbedSCollin Fijalkovich } 189009d91cdaSSong Liu } 189199cb0dbdSSong Liu 1892042a3082SHugh Dickins if (nr_none) { 18939d82c694SJohannes Weiner __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); 189499cb0dbdSSong Liu if (is_shmem) 18959d82c694SJohannes Weiner __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); 1896042a3082SHugh Dickins } 1897042a3082SHugh Dickins 1898*6b24ca4aSMatthew Wilcox (Oracle) /* Join all the small entries into a single multi-index entry */ 1899*6b24ca4aSMatthew Wilcox (Oracle) xas_set_order(&xas, start, HPAGE_PMD_ORDER); 1900*6b24ca4aSMatthew Wilcox (Oracle) xas_store(&xas, new_page); 1901042a3082SHugh Dickins xa_locked: 1902042a3082SHugh Dickins xas_unlock_irq(&xas); 190377da9389SMatthew Wilcox xa_unlocked: 1904042a3082SHugh Dickins 1905f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 190677da9389SMatthew Wilcox struct page *page, *tmp; 1907f3f0e1d2SKirill A. Shutemov 1908f3f0e1d2SKirill A. Shutemov /* 190977da9389SMatthew Wilcox * Replacing old pages with new one has succeeded, now we 191077da9389SMatthew Wilcox * need to copy the content and free the old pages. 1911f3f0e1d2SKirill A. Shutemov */ 19122af8ff29SHugh Dickins index = start; 1913f3f0e1d2SKirill A. Shutemov list_for_each_entry_safe(page, tmp, &pagelist, lru) { 19142af8ff29SHugh Dickins while (index < page->index) { 19152af8ff29SHugh Dickins clear_highpage(new_page + (index % HPAGE_PMD_NR)); 19162af8ff29SHugh Dickins index++; 19172af8ff29SHugh Dickins } 1918f3f0e1d2SKirill A. Shutemov copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1919f3f0e1d2SKirill A. Shutemov page); 1920f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 1921f3f0e1d2SKirill A. Shutemov page->mapping = NULL; 1922042a3082SHugh Dickins page_ref_unfreeze(page, 1); 1923f3f0e1d2SKirill A. Shutemov ClearPageActive(page); 1924f3f0e1d2SKirill A. Shutemov ClearPageUnevictable(page); 1925042a3082SHugh Dickins unlock_page(page); 1926f3f0e1d2SKirill A. Shutemov put_page(page); 19272af8ff29SHugh Dickins index++; 19282af8ff29SHugh Dickins } 19292af8ff29SHugh Dickins while (index < end) { 19302af8ff29SHugh Dickins clear_highpage(new_page + (index % HPAGE_PMD_NR)); 19312af8ff29SHugh Dickins index++; 1932f3f0e1d2SKirill A. Shutemov } 1933f3f0e1d2SKirill A. Shutemov 1934f3f0e1d2SKirill A. Shutemov SetPageUptodate(new_page); 193587c460a0SHugh Dickins page_ref_add(new_page, HPAGE_PMD_NR - 1); 19366058eaecSJohannes Weiner if (is_shmem) 193799cb0dbdSSong Liu set_page_dirty(new_page); 19386058eaecSJohannes Weiner lru_cache_add(new_page); 1939f3f0e1d2SKirill A. Shutemov 1940042a3082SHugh Dickins /* 1941042a3082SHugh Dickins * Remove pte page tables, so we can re-fault the page as huge. 1942042a3082SHugh Dickins */ 1943042a3082SHugh Dickins retract_page_tables(mapping, start); 1944f3f0e1d2SKirill A. Shutemov *hpage = NULL; 194587aa7529SYang Shi 194687aa7529SYang Shi khugepaged_pages_collapsed++; 1947f3f0e1d2SKirill A. Shutemov } else { 194877da9389SMatthew Wilcox struct page *page; 1949aaa52e34SHugh Dickins 195077da9389SMatthew Wilcox /* Something went wrong: roll back page cache changes */ 195177da9389SMatthew Wilcox xas_lock_irq(&xas); 1952aaa52e34SHugh Dickins mapping->nrpages -= nr_none; 195399cb0dbdSSong Liu 195499cb0dbdSSong Liu if (is_shmem) 1955aaa52e34SHugh Dickins shmem_uncharge(mapping->host, nr_none); 1956aaa52e34SHugh Dickins 195777da9389SMatthew Wilcox xas_set(&xas, start); 195877da9389SMatthew Wilcox xas_for_each(&xas, page, end - 1) { 1959f3f0e1d2SKirill A. Shutemov page = list_first_entry_or_null(&pagelist, 1960f3f0e1d2SKirill A. Shutemov struct page, lru); 196177da9389SMatthew Wilcox if (!page || xas.xa_index < page->index) { 1962f3f0e1d2SKirill A. Shutemov if (!nr_none) 1963f3f0e1d2SKirill A. Shutemov break; 1964f3f0e1d2SKirill A. Shutemov nr_none--; 196559749e6cSJohannes Weiner /* Put holes back where they were */ 196677da9389SMatthew Wilcox xas_store(&xas, NULL); 1967f3f0e1d2SKirill A. Shutemov continue; 1968f3f0e1d2SKirill A. Shutemov } 1969f3f0e1d2SKirill A. Shutemov 197077da9389SMatthew Wilcox VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 1971f3f0e1d2SKirill A. Shutemov 1972f3f0e1d2SKirill A. Shutemov /* Unfreeze the page. */ 1973f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 1974f3f0e1d2SKirill A. Shutemov page_ref_unfreeze(page, 2); 197577da9389SMatthew Wilcox xas_store(&xas, page); 197677da9389SMatthew Wilcox xas_pause(&xas); 197777da9389SMatthew Wilcox xas_unlock_irq(&xas); 1978f3f0e1d2SKirill A. Shutemov unlock_page(page); 1979042a3082SHugh Dickins putback_lru_page(page); 198077da9389SMatthew Wilcox xas_lock_irq(&xas); 1981f3f0e1d2SKirill A. Shutemov } 1982f3f0e1d2SKirill A. Shutemov VM_BUG_ON(nr_none); 198377da9389SMatthew Wilcox xas_unlock_irq(&xas); 1984f3f0e1d2SKirill A. Shutemov 1985f3f0e1d2SKirill A. Shutemov new_page->mapping = NULL; 1986f3f0e1d2SKirill A. Shutemov } 1987042a3082SHugh Dickins 1988042a3082SHugh Dickins unlock_page(new_page); 1989f3f0e1d2SKirill A. Shutemov out: 1990f3f0e1d2SKirill A. Shutemov VM_BUG_ON(!list_empty(&pagelist)); 19919d82c694SJohannes Weiner if (!IS_ERR_OR_NULL(*hpage)) 1992bbc6b703SMatthew Wilcox (Oracle) mem_cgroup_uncharge(page_folio(*hpage)); 1993f3f0e1d2SKirill A. Shutemov /* TODO: tracepoints */ 1994f3f0e1d2SKirill A. Shutemov } 1995f3f0e1d2SKirill A. Shutemov 1996579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm, 1997579c571eSSong Liu struct file *file, pgoff_t start, struct page **hpage) 1998f3f0e1d2SKirill A. Shutemov { 1999f3f0e1d2SKirill A. Shutemov struct page *page = NULL; 2000579c571eSSong Liu struct address_space *mapping = file->f_mapping; 200185b392dbSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 2002f3f0e1d2SKirill A. Shutemov int present, swap; 2003f3f0e1d2SKirill A. Shutemov int node = NUMA_NO_NODE; 2004f3f0e1d2SKirill A. Shutemov int result = SCAN_SUCCEED; 2005f3f0e1d2SKirill A. Shutemov 2006f3f0e1d2SKirill A. Shutemov present = 0; 2007f3f0e1d2SKirill A. Shutemov swap = 0; 2008f3f0e1d2SKirill A. Shutemov memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2009f3f0e1d2SKirill A. Shutemov rcu_read_lock(); 201085b392dbSMatthew Wilcox xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 201185b392dbSMatthew Wilcox if (xas_retry(&xas, page)) 2012f3f0e1d2SKirill A. Shutemov continue; 2013f3f0e1d2SKirill A. Shutemov 201485b392dbSMatthew Wilcox if (xa_is_value(page)) { 2015f3f0e1d2SKirill A. Shutemov if (++swap > khugepaged_max_ptes_swap) { 2016f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 2017f3f0e1d2SKirill A. Shutemov break; 2018f3f0e1d2SKirill A. Shutemov } 2019f3f0e1d2SKirill A. Shutemov continue; 2020f3f0e1d2SKirill A. Shutemov } 2021f3f0e1d2SKirill A. Shutemov 2022*6b24ca4aSMatthew Wilcox (Oracle) /* 2023*6b24ca4aSMatthew Wilcox (Oracle) * XXX: khugepaged should compact smaller compound pages 2024*6b24ca4aSMatthew Wilcox (Oracle) * into a PMD sized page 2025*6b24ca4aSMatthew Wilcox (Oracle) */ 2026f3f0e1d2SKirill A. Shutemov if (PageTransCompound(page)) { 2027f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COMPOUND; 2028f3f0e1d2SKirill A. Shutemov break; 2029f3f0e1d2SKirill A. Shutemov } 2030f3f0e1d2SKirill A. Shutemov 2031f3f0e1d2SKirill A. Shutemov node = page_to_nid(page); 2032f3f0e1d2SKirill A. Shutemov if (khugepaged_scan_abort(node)) { 2033f3f0e1d2SKirill A. Shutemov result = SCAN_SCAN_ABORT; 2034f3f0e1d2SKirill A. Shutemov break; 2035f3f0e1d2SKirill A. Shutemov } 2036f3f0e1d2SKirill A. Shutemov khugepaged_node_load[node]++; 2037f3f0e1d2SKirill A. Shutemov 2038f3f0e1d2SKirill A. Shutemov if (!PageLRU(page)) { 2039f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LRU; 2040f3f0e1d2SKirill A. Shutemov break; 2041f3f0e1d2SKirill A. Shutemov } 2042f3f0e1d2SKirill A. Shutemov 204399cb0dbdSSong Liu if (page_count(page) != 204499cb0dbdSSong Liu 1 + page_mapcount(page) + page_has_private(page)) { 2045f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 2046f3f0e1d2SKirill A. Shutemov break; 2047f3f0e1d2SKirill A. Shutemov } 2048f3f0e1d2SKirill A. Shutemov 2049f3f0e1d2SKirill A. Shutemov /* 2050f3f0e1d2SKirill A. Shutemov * We probably should check if the page is referenced here, but 2051f3f0e1d2SKirill A. Shutemov * nobody would transfer pte_young() to PageReferenced() for us. 2052f3f0e1d2SKirill A. Shutemov * And rmap walk here is just too costly... 2053f3f0e1d2SKirill A. Shutemov */ 2054f3f0e1d2SKirill A. Shutemov 2055f3f0e1d2SKirill A. Shutemov present++; 2056f3f0e1d2SKirill A. Shutemov 2057f3f0e1d2SKirill A. Shutemov if (need_resched()) { 205885b392dbSMatthew Wilcox xas_pause(&xas); 2059f3f0e1d2SKirill A. Shutemov cond_resched_rcu(); 2060f3f0e1d2SKirill A. Shutemov } 2061f3f0e1d2SKirill A. Shutemov } 2062f3f0e1d2SKirill A. Shutemov rcu_read_unlock(); 2063f3f0e1d2SKirill A. Shutemov 2064f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 2065f3f0e1d2SKirill A. Shutemov if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2066f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 2067f3f0e1d2SKirill A. Shutemov } else { 2068f3f0e1d2SKirill A. Shutemov node = khugepaged_find_target_node(); 2069579c571eSSong Liu collapse_file(mm, file, start, hpage, node); 2070f3f0e1d2SKirill A. Shutemov } 2071f3f0e1d2SKirill A. Shutemov } 2072f3f0e1d2SKirill A. Shutemov 2073f3f0e1d2SKirill A. Shutemov /* TODO: tracepoints */ 2074f3f0e1d2SKirill A. Shutemov } 2075f3f0e1d2SKirill A. Shutemov #else 2076579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm, 2077579c571eSSong Liu struct file *file, pgoff_t start, struct page **hpage) 2078f3f0e1d2SKirill A. Shutemov { 2079f3f0e1d2SKirill A. Shutemov BUILD_BUG(); 2080f3f0e1d2SKirill A. Shutemov } 208127e1f827SSong Liu 20820edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 208327e1f827SSong Liu { 208427e1f827SSong Liu } 2085f3f0e1d2SKirill A. Shutemov #endif 2086f3f0e1d2SKirill A. Shutemov 2087b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2088b46e756fSKirill A. Shutemov struct page **hpage) 2089b46e756fSKirill A. Shutemov __releases(&khugepaged_mm_lock) 2090b46e756fSKirill A. Shutemov __acquires(&khugepaged_mm_lock) 2091b46e756fSKirill A. Shutemov { 2092b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 2093b46e756fSKirill A. Shutemov struct mm_struct *mm; 2094b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 2095b46e756fSKirill A. Shutemov int progress = 0; 2096b46e756fSKirill A. Shutemov 2097b46e756fSKirill A. Shutemov VM_BUG_ON(!pages); 209835f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 2099b46e756fSKirill A. Shutemov 2100b46e756fSKirill A. Shutemov if (khugepaged_scan.mm_slot) 2101b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2102b46e756fSKirill A. Shutemov else { 2103b46e756fSKirill A. Shutemov mm_slot = list_entry(khugepaged_scan.mm_head.next, 2104b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2105b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2106b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = mm_slot; 2107b46e756fSKirill A. Shutemov } 2108b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 210927e1f827SSong Liu khugepaged_collapse_pte_mapped_thps(mm_slot); 2110b46e756fSKirill A. Shutemov 2111b46e756fSKirill A. Shutemov mm = mm_slot->mm; 21123b454ad3SYang Shi /* 21133b454ad3SYang Shi * Don't wait for semaphore (to avoid long wait times). Just move to 21143b454ad3SYang Shi * the next mm on the list. 21153b454ad3SYang Shi */ 2116b46e756fSKirill A. Shutemov vma = NULL; 2117d8ed45c5SMichel Lespinasse if (unlikely(!mmap_read_trylock(mm))) 2118c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 21193b454ad3SYang Shi if (likely(!khugepaged_test_exit(mm))) 2120b46e756fSKirill A. Shutemov vma = find_vma(mm, khugepaged_scan.address); 2121b46e756fSKirill A. Shutemov 2122b46e756fSKirill A. Shutemov progress++; 2123b46e756fSKirill A. Shutemov for (; vma; vma = vma->vm_next) { 2124b46e756fSKirill A. Shutemov unsigned long hstart, hend; 2125b46e756fSKirill A. Shutemov 2126b46e756fSKirill A. Shutemov cond_resched(); 2127b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) { 2128b46e756fSKirill A. Shutemov progress++; 2129b46e756fSKirill A. Shutemov break; 2130b46e756fSKirill A. Shutemov } 213150f8b92fSSong Liu if (!hugepage_vma_check(vma, vma->vm_flags)) { 2132b46e756fSKirill A. Shutemov skip: 2133b46e756fSKirill A. Shutemov progress++; 2134b46e756fSKirill A. Shutemov continue; 2135b46e756fSKirill A. Shutemov } 2136b46e756fSKirill A. Shutemov hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2137b46e756fSKirill A. Shutemov hend = vma->vm_end & HPAGE_PMD_MASK; 2138b46e756fSKirill A. Shutemov if (hstart >= hend) 2139b46e756fSKirill A. Shutemov goto skip; 2140b46e756fSKirill A. Shutemov if (khugepaged_scan.address > hend) 2141b46e756fSKirill A. Shutemov goto skip; 2142b46e756fSKirill A. Shutemov if (khugepaged_scan.address < hstart) 2143b46e756fSKirill A. Shutemov khugepaged_scan.address = hstart; 2144b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2145396bcc52SMatthew Wilcox (Oracle) if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) 2146396bcc52SMatthew Wilcox (Oracle) goto skip; 2147b46e756fSKirill A. Shutemov 2148b46e756fSKirill A. Shutemov while (khugepaged_scan.address < hend) { 2149b46e756fSKirill A. Shutemov int ret; 2150b46e756fSKirill A. Shutemov cond_resched(); 2151b46e756fSKirill A. Shutemov if (unlikely(khugepaged_test_exit(mm))) 2152b46e756fSKirill A. Shutemov goto breakouterloop; 2153b46e756fSKirill A. Shutemov 2154b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address < hstart || 2155b46e756fSKirill A. Shutemov khugepaged_scan.address + HPAGE_PMD_SIZE > 2156b46e756fSKirill A. Shutemov hend); 215799cb0dbdSSong Liu if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2158396bcc52SMatthew Wilcox (Oracle) struct file *file = get_file(vma->vm_file); 2159f3f0e1d2SKirill A. Shutemov pgoff_t pgoff = linear_page_index(vma, 2160f3f0e1d2SKirill A. Shutemov khugepaged_scan.address); 216199cb0dbdSSong Liu 2162d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2163f3f0e1d2SKirill A. Shutemov ret = 1; 2164579c571eSSong Liu khugepaged_scan_file(mm, file, pgoff, hpage); 2165f3f0e1d2SKirill A. Shutemov fput(file); 2166f3f0e1d2SKirill A. Shutemov } else { 2167b46e756fSKirill A. Shutemov ret = khugepaged_scan_pmd(mm, vma, 2168b46e756fSKirill A. Shutemov khugepaged_scan.address, 2169b46e756fSKirill A. Shutemov hpage); 2170f3f0e1d2SKirill A. Shutemov } 2171b46e756fSKirill A. Shutemov /* move to next address */ 2172b46e756fSKirill A. Shutemov khugepaged_scan.address += HPAGE_PMD_SIZE; 2173b46e756fSKirill A. Shutemov progress += HPAGE_PMD_NR; 2174b46e756fSKirill A. Shutemov if (ret) 2175c1e8d7c6SMichel Lespinasse /* we released mmap_lock so break loop */ 2176c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 2177b46e756fSKirill A. Shutemov if (progress >= pages) 2178b46e756fSKirill A. Shutemov goto breakouterloop; 2179b46e756fSKirill A. Shutemov } 2180b46e756fSKirill A. Shutemov } 2181b46e756fSKirill A. Shutemov breakouterloop: 2182d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2183c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock: 2184b46e756fSKirill A. Shutemov 2185b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2186b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2187b46e756fSKirill A. Shutemov /* 2188b46e756fSKirill A. Shutemov * Release the current mm_slot if this mm is about to die, or 2189b46e756fSKirill A. Shutemov * if we scanned all vmas of this mm. 2190b46e756fSKirill A. Shutemov */ 2191b46e756fSKirill A. Shutemov if (khugepaged_test_exit(mm) || !vma) { 2192b46e756fSKirill A. Shutemov /* 2193b46e756fSKirill A. Shutemov * Make sure that if mm_users is reaching zero while 2194b46e756fSKirill A. Shutemov * khugepaged runs here, khugepaged_exit will find 2195b46e756fSKirill A. Shutemov * mm_slot not pointing to the exiting mm. 2196b46e756fSKirill A. Shutemov */ 2197b46e756fSKirill A. Shutemov if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2198b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = list_entry( 2199b46e756fSKirill A. Shutemov mm_slot->mm_node.next, 2200b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2201b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2202b46e756fSKirill A. Shutemov } else { 2203b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2204b46e756fSKirill A. Shutemov khugepaged_full_scans++; 2205b46e756fSKirill A. Shutemov } 2206b46e756fSKirill A. Shutemov 2207b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2208b46e756fSKirill A. Shutemov } 2209b46e756fSKirill A. Shutemov 2210b46e756fSKirill A. Shutemov return progress; 2211b46e756fSKirill A. Shutemov } 2212b46e756fSKirill A. Shutemov 2213b46e756fSKirill A. Shutemov static int khugepaged_has_work(void) 2214b46e756fSKirill A. Shutemov { 2215b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) && 2216b46e756fSKirill A. Shutemov khugepaged_enabled(); 2217b46e756fSKirill A. Shutemov } 2218b46e756fSKirill A. Shutemov 2219b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void) 2220b46e756fSKirill A. Shutemov { 2221b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) || 2222b46e756fSKirill A. Shutemov kthread_should_stop(); 2223b46e756fSKirill A. Shutemov } 2224b46e756fSKirill A. Shutemov 2225b46e756fSKirill A. Shutemov static void khugepaged_do_scan(void) 2226b46e756fSKirill A. Shutemov { 2227b46e756fSKirill A. Shutemov struct page *hpage = NULL; 2228b46e756fSKirill A. Shutemov unsigned int progress = 0, pass_through_head = 0; 222989dc6a96SYanfei Xu unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2230b46e756fSKirill A. Shutemov bool wait = true; 2231b46e756fSKirill A. Shutemov 2232a980df33SKirill A. Shutemov lru_add_drain_all(); 2233a980df33SKirill A. Shutemov 2234b46e756fSKirill A. Shutemov while (progress < pages) { 2235b46e756fSKirill A. Shutemov if (!khugepaged_prealloc_page(&hpage, &wait)) 2236b46e756fSKirill A. Shutemov break; 2237b46e756fSKirill A. Shutemov 2238b46e756fSKirill A. Shutemov cond_resched(); 2239b46e756fSKirill A. Shutemov 2240b46e756fSKirill A. Shutemov if (unlikely(kthread_should_stop() || try_to_freeze())) 2241b46e756fSKirill A. Shutemov break; 2242b46e756fSKirill A. Shutemov 2243b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2244b46e756fSKirill A. Shutemov if (!khugepaged_scan.mm_slot) 2245b46e756fSKirill A. Shutemov pass_through_head++; 2246b46e756fSKirill A. Shutemov if (khugepaged_has_work() && 2247b46e756fSKirill A. Shutemov pass_through_head < 2) 2248b46e756fSKirill A. Shutemov progress += khugepaged_scan_mm_slot(pages - progress, 2249b46e756fSKirill A. Shutemov &hpage); 2250b46e756fSKirill A. Shutemov else 2251b46e756fSKirill A. Shutemov progress = pages; 2252b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2253b46e756fSKirill A. Shutemov } 2254b46e756fSKirill A. Shutemov 2255b46e756fSKirill A. Shutemov if (!IS_ERR_OR_NULL(hpage)) 2256b46e756fSKirill A. Shutemov put_page(hpage); 2257b46e756fSKirill A. Shutemov } 2258b46e756fSKirill A. Shutemov 2259b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void) 2260b46e756fSKirill A. Shutemov { 2261b46e756fSKirill A. Shutemov return kthread_should_stop() || 2262b46e756fSKirill A. Shutemov time_after_eq(jiffies, khugepaged_sleep_expire); 2263b46e756fSKirill A. Shutemov } 2264b46e756fSKirill A. Shutemov 2265b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void) 2266b46e756fSKirill A. Shutemov { 2267b46e756fSKirill A. Shutemov if (khugepaged_has_work()) { 2268b46e756fSKirill A. Shutemov const unsigned long scan_sleep_jiffies = 2269b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2270b46e756fSKirill A. Shutemov 2271b46e756fSKirill A. Shutemov if (!scan_sleep_jiffies) 2272b46e756fSKirill A. Shutemov return; 2273b46e756fSKirill A. Shutemov 2274b46e756fSKirill A. Shutemov khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2275b46e756fSKirill A. Shutemov wait_event_freezable_timeout(khugepaged_wait, 2276b46e756fSKirill A. Shutemov khugepaged_should_wakeup(), 2277b46e756fSKirill A. Shutemov scan_sleep_jiffies); 2278b46e756fSKirill A. Shutemov return; 2279b46e756fSKirill A. Shutemov } 2280b46e756fSKirill A. Shutemov 2281b46e756fSKirill A. Shutemov if (khugepaged_enabled()) 2282b46e756fSKirill A. Shutemov wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2283b46e756fSKirill A. Shutemov } 2284b46e756fSKirill A. Shutemov 2285b46e756fSKirill A. Shutemov static int khugepaged(void *none) 2286b46e756fSKirill A. Shutemov { 2287b46e756fSKirill A. Shutemov struct mm_slot *mm_slot; 2288b46e756fSKirill A. Shutemov 2289b46e756fSKirill A. Shutemov set_freezable(); 2290b46e756fSKirill A. Shutemov set_user_nice(current, MAX_NICE); 2291b46e756fSKirill A. Shutemov 2292b46e756fSKirill A. Shutemov while (!kthread_should_stop()) { 2293b46e756fSKirill A. Shutemov khugepaged_do_scan(); 2294b46e756fSKirill A. Shutemov khugepaged_wait_work(); 2295b46e756fSKirill A. Shutemov } 2296b46e756fSKirill A. Shutemov 2297b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2298b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2299b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2300b46e756fSKirill A. Shutemov if (mm_slot) 2301b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2302b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2303b46e756fSKirill A. Shutemov return 0; 2304b46e756fSKirill A. Shutemov } 2305b46e756fSKirill A. Shutemov 2306b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void) 2307b46e756fSKirill A. Shutemov { 2308b46e756fSKirill A. Shutemov struct zone *zone; 2309b46e756fSKirill A. Shutemov int nr_zones = 0; 2310b46e756fSKirill A. Shutemov unsigned long recommended_min; 2311b46e756fSKirill A. Shutemov 2312bd3400eaSLiangcai Fan if (!khugepaged_enabled()) { 2313bd3400eaSLiangcai Fan calculate_min_free_kbytes(); 2314bd3400eaSLiangcai Fan goto update_wmarks; 2315bd3400eaSLiangcai Fan } 2316bd3400eaSLiangcai Fan 2317b7d349c7SJoonsoo Kim for_each_populated_zone(zone) { 2318b7d349c7SJoonsoo Kim /* 2319b7d349c7SJoonsoo Kim * We don't need to worry about fragmentation of 2320b7d349c7SJoonsoo Kim * ZONE_MOVABLE since it only has movable pages. 2321b7d349c7SJoonsoo Kim */ 2322b7d349c7SJoonsoo Kim if (zone_idx(zone) > gfp_zone(GFP_USER)) 2323b7d349c7SJoonsoo Kim continue; 2324b7d349c7SJoonsoo Kim 2325b46e756fSKirill A. Shutemov nr_zones++; 2326b7d349c7SJoonsoo Kim } 2327b46e756fSKirill A. Shutemov 2328b46e756fSKirill A. Shutemov /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2329b46e756fSKirill A. Shutemov recommended_min = pageblock_nr_pages * nr_zones * 2; 2330b46e756fSKirill A. Shutemov 2331b46e756fSKirill A. Shutemov /* 2332b46e756fSKirill A. Shutemov * Make sure that on average at least two pageblocks are almost free 2333b46e756fSKirill A. Shutemov * of another type, one for a migratetype to fall back to and a 2334b46e756fSKirill A. Shutemov * second to avoid subsequent fallbacks of other types There are 3 2335b46e756fSKirill A. Shutemov * MIGRATE_TYPES we care about. 2336b46e756fSKirill A. Shutemov */ 2337b46e756fSKirill A. Shutemov recommended_min += pageblock_nr_pages * nr_zones * 2338b46e756fSKirill A. Shutemov MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2339b46e756fSKirill A. Shutemov 2340b46e756fSKirill A. Shutemov /* don't ever allow to reserve more than 5% of the lowmem */ 2341b46e756fSKirill A. Shutemov recommended_min = min(recommended_min, 2342b46e756fSKirill A. Shutemov (unsigned long) nr_free_buffer_pages() / 20); 2343b46e756fSKirill A. Shutemov recommended_min <<= (PAGE_SHIFT-10); 2344b46e756fSKirill A. Shutemov 2345b46e756fSKirill A. Shutemov if (recommended_min > min_free_kbytes) { 2346b46e756fSKirill A. Shutemov if (user_min_free_kbytes >= 0) 2347b46e756fSKirill A. Shutemov pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2348b46e756fSKirill A. Shutemov min_free_kbytes, recommended_min); 2349b46e756fSKirill A. Shutemov 2350b46e756fSKirill A. Shutemov min_free_kbytes = recommended_min; 2351b46e756fSKirill A. Shutemov } 2352bd3400eaSLiangcai Fan 2353bd3400eaSLiangcai Fan update_wmarks: 2354b46e756fSKirill A. Shutemov setup_per_zone_wmarks(); 2355b46e756fSKirill A. Shutemov } 2356b46e756fSKirill A. Shutemov 2357b46e756fSKirill A. Shutemov int start_stop_khugepaged(void) 2358b46e756fSKirill A. Shutemov { 2359b46e756fSKirill A. Shutemov int err = 0; 2360b46e756fSKirill A. Shutemov 2361b46e756fSKirill A. Shutemov mutex_lock(&khugepaged_mutex); 2362b46e756fSKirill A. Shutemov if (khugepaged_enabled()) { 2363b46e756fSKirill A. Shutemov if (!khugepaged_thread) 2364b46e756fSKirill A. Shutemov khugepaged_thread = kthread_run(khugepaged, NULL, 2365b46e756fSKirill A. Shutemov "khugepaged"); 2366b46e756fSKirill A. Shutemov if (IS_ERR(khugepaged_thread)) { 2367b46e756fSKirill A. Shutemov pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2368b46e756fSKirill A. Shutemov err = PTR_ERR(khugepaged_thread); 2369b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2370b46e756fSKirill A. Shutemov goto fail; 2371b46e756fSKirill A. Shutemov } 2372b46e756fSKirill A. Shutemov 2373b46e756fSKirill A. Shutemov if (!list_empty(&khugepaged_scan.mm_head)) 2374b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 2375b46e756fSKirill A. Shutemov } else if (khugepaged_thread) { 2376b46e756fSKirill A. Shutemov kthread_stop(khugepaged_thread); 2377b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2378b46e756fSKirill A. Shutemov } 2379bd3400eaSLiangcai Fan set_recommended_min_free_kbytes(); 2380b46e756fSKirill A. Shutemov fail: 2381b46e756fSKirill A. Shutemov mutex_unlock(&khugepaged_mutex); 2382b46e756fSKirill A. Shutemov return err; 2383b46e756fSKirill A. Shutemov } 23844aab2be0SVijay Balakrishna 23854aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void) 23864aab2be0SVijay Balakrishna { 23874aab2be0SVijay Balakrishna mutex_lock(&khugepaged_mutex); 23884aab2be0SVijay Balakrishna if (khugepaged_enabled() && khugepaged_thread) 23894aab2be0SVijay Balakrishna set_recommended_min_free_kbytes(); 23904aab2be0SVijay Balakrishna mutex_unlock(&khugepaged_mutex); 23914aab2be0SVijay Balakrishna } 2392