1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3b46e756fSKirill A. Shutemov 4b46e756fSKirill A. Shutemov #include <linux/mm.h> 5b46e756fSKirill A. Shutemov #include <linux/sched.h> 66e84f315SIngo Molnar #include <linux/sched/mm.h> 7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h> 9b46e756fSKirill A. Shutemov #include <linux/rmap.h> 10b46e756fSKirill A. Shutemov #include <linux/swap.h> 11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h> 12b46e756fSKirill A. Shutemov #include <linux/kthread.h> 13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h> 14b46e756fSKirill A. Shutemov #include <linux/freezer.h> 15b46e756fSKirill A. Shutemov #include <linux/mman.h> 16b46e756fSKirill A. Shutemov #include <linux/hashtable.h> 17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h> 18b46e756fSKirill A. Shutemov #include <linux/page_idle.h> 1980110bbfSPasha Tatashin #include <linux/page_table_check.h> 20b46e756fSKirill A. Shutemov #include <linux/swapops.h> 21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h> 22b46e756fSKirill A. Shutemov 23b46e756fSKirill A. Shutemov #include <asm/tlb.h> 24b46e756fSKirill A. Shutemov #include <asm/pgalloc.h> 25b46e756fSKirill A. Shutemov #include "internal.h" 26b26e2701SQi Zheng #include "mm_slot.h" 27b46e756fSKirill A. Shutemov 28b46e756fSKirill A. Shutemov enum scan_result { 29b46e756fSKirill A. Shutemov SCAN_FAIL, 30b46e756fSKirill A. Shutemov SCAN_SUCCEED, 31b46e756fSKirill A. Shutemov SCAN_PMD_NULL, 3234488399SZach O'Keefe SCAN_PMD_NONE, 3350722804SZach O'Keefe SCAN_PMD_MAPPED, 34b46e756fSKirill A. Shutemov SCAN_EXCEED_NONE_PTE, 3571a2c112SKirill A. Shutemov SCAN_EXCEED_SWAP_PTE, 3671a2c112SKirill A. Shutemov SCAN_EXCEED_SHARED_PTE, 37b46e756fSKirill A. Shutemov SCAN_PTE_NON_PRESENT, 38e1e267c7SPeter Xu SCAN_PTE_UFFD_WP, 3958ac9a89SZach O'Keefe SCAN_PTE_MAPPED_HUGEPAGE, 40b46e756fSKirill A. Shutemov SCAN_PAGE_RO, 410db501f7SEbru Akagunduz SCAN_LACK_REFERENCED_PAGE, 42b46e756fSKirill A. Shutemov SCAN_PAGE_NULL, 43b46e756fSKirill A. Shutemov SCAN_SCAN_ABORT, 44b46e756fSKirill A. Shutemov SCAN_PAGE_COUNT, 45b46e756fSKirill A. Shutemov SCAN_PAGE_LRU, 46b46e756fSKirill A. Shutemov SCAN_PAGE_LOCK, 47b46e756fSKirill A. Shutemov SCAN_PAGE_ANON, 48b46e756fSKirill A. Shutemov SCAN_PAGE_COMPOUND, 49b46e756fSKirill A. Shutemov SCAN_ANY_PROCESS, 50b46e756fSKirill A. Shutemov SCAN_VMA_NULL, 51b46e756fSKirill A. Shutemov SCAN_VMA_CHECK, 52b46e756fSKirill A. Shutemov SCAN_ADDRESS_RANGE, 53b46e756fSKirill A. Shutemov SCAN_DEL_PAGE_LRU, 54b46e756fSKirill A. Shutemov SCAN_ALLOC_HUGE_PAGE_FAIL, 55b46e756fSKirill A. Shutemov SCAN_CGROUP_CHARGE_FAIL, 56f3f0e1d2SKirill A. Shutemov SCAN_TRUNCATED, 5799cb0dbdSSong Liu SCAN_PAGE_HAS_PRIVATE, 58b46e756fSKirill A. Shutemov }; 59b46e756fSKirill A. Shutemov 60b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS 61b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h> 62b46e756fSKirill A. Shutemov 634aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly; 644aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex); 654aab2be0SVijay Balakrishna 66b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */ 67b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly; 68b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed; 69b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans; 70b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 71b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */ 72b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 73b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire; 74b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock); 75b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 76b46e756fSKirill A. Shutemov /* 77b46e756fSKirill A. Shutemov * default collapse hugepages if there is at least one pte mapped like 78b46e756fSKirill A. Shutemov * it would have happened if the vma was large enough during page 79b46e756fSKirill A. Shutemov * fault. 80d8ea7cc8SZach O'Keefe * 81d8ea7cc8SZach O'Keefe * Note that these are only respected if collapse was initiated by khugepaged. 82b46e756fSKirill A. Shutemov */ 83b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly; 84b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly; 8571a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly; 86b46e756fSKirill A. Shutemov 87b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10 88b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 89b46e756fSKirill A. Shutemov 90b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly; 91b46e756fSKirill A. Shutemov 9227e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8 9327e1f827SSong Liu 9434d6b470SZach O'Keefe struct collapse_control { 95d8ea7cc8SZach O'Keefe bool is_khugepaged; 96d8ea7cc8SZach O'Keefe 9734d6b470SZach O'Keefe /* Num pages scanned per node */ 9834d6b470SZach O'Keefe u32 node_load[MAX_NUMNODES]; 9934d6b470SZach O'Keefe 100e031ff96SYang Shi /* nodemask for allocation fallback */ 101e031ff96SYang Shi nodemask_t alloc_nmask; 10234d6b470SZach O'Keefe }; 10334d6b470SZach O'Keefe 104b46e756fSKirill A. Shutemov /** 105b26e2701SQi Zheng * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned 106b26e2701SQi Zheng * @slot: hash lookup from mm to mm_slot 107336e6b53SAlex Shi * @nr_pte_mapped_thp: number of pte mapped THP 108336e6b53SAlex Shi * @pte_mapped_thp: address array corresponding pte mapped THP 109b46e756fSKirill A. Shutemov */ 110b26e2701SQi Zheng struct khugepaged_mm_slot { 111b26e2701SQi Zheng struct mm_slot slot; 11227e1f827SSong Liu 11327e1f827SSong Liu /* pte-mapped THP in this mm */ 11427e1f827SSong Liu int nr_pte_mapped_thp; 11527e1f827SSong Liu unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 116b46e756fSKirill A. Shutemov }; 117b46e756fSKirill A. Shutemov 118b46e756fSKirill A. Shutemov /** 119b46e756fSKirill A. Shutemov * struct khugepaged_scan - cursor for scanning 120b46e756fSKirill A. Shutemov * @mm_head: the head of the mm list to scan 121b46e756fSKirill A. Shutemov * @mm_slot: the current mm_slot we are scanning 122b46e756fSKirill A. Shutemov * @address: the next address inside that to be scanned 123b46e756fSKirill A. Shutemov * 124b46e756fSKirill A. Shutemov * There is only the one khugepaged_scan instance of this cursor structure. 125b46e756fSKirill A. Shutemov */ 126b46e756fSKirill A. Shutemov struct khugepaged_scan { 127b46e756fSKirill A. Shutemov struct list_head mm_head; 128b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 129b46e756fSKirill A. Shutemov unsigned long address; 130b46e756fSKirill A. Shutemov }; 131b46e756fSKirill A. Shutemov 132b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = { 133b46e756fSKirill A. Shutemov .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 134b46e756fSKirill A. Shutemov }; 135b46e756fSKirill A. Shutemov 136e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS 137b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 138b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 139b46e756fSKirill A. Shutemov char *buf) 140b46e756fSKirill A. Shutemov { 141ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 142b46e756fSKirill A. Shutemov } 143b46e756fSKirill A. Shutemov 144b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 145b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 146b46e756fSKirill A. Shutemov const char *buf, size_t count) 147b46e756fSKirill A. Shutemov { 148dfefd226SAlexey Dobriyan unsigned int msecs; 149b46e756fSKirill A. Shutemov int err; 150b46e756fSKirill A. Shutemov 151dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 152dfefd226SAlexey Dobriyan if (err) 153b46e756fSKirill A. Shutemov return -EINVAL; 154b46e756fSKirill A. Shutemov 155b46e756fSKirill A. Shutemov khugepaged_scan_sleep_millisecs = msecs; 156b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 157b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 158b46e756fSKirill A. Shutemov 159b46e756fSKirill A. Shutemov return count; 160b46e756fSKirill A. Shutemov } 161b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr = 1626dcdc94dSMiaohe Lin __ATTR_RW(scan_sleep_millisecs); 163b46e756fSKirill A. Shutemov 164b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 165b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 166b46e756fSKirill A. Shutemov char *buf) 167b46e756fSKirill A. Shutemov { 168ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 169b46e756fSKirill A. Shutemov } 170b46e756fSKirill A. Shutemov 171b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 172b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 173b46e756fSKirill A. Shutemov const char *buf, size_t count) 174b46e756fSKirill A. Shutemov { 175dfefd226SAlexey Dobriyan unsigned int msecs; 176b46e756fSKirill A. Shutemov int err; 177b46e756fSKirill A. Shutemov 178dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 179dfefd226SAlexey Dobriyan if (err) 180b46e756fSKirill A. Shutemov return -EINVAL; 181b46e756fSKirill A. Shutemov 182b46e756fSKirill A. Shutemov khugepaged_alloc_sleep_millisecs = msecs; 183b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0; 184b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 185b46e756fSKirill A. Shutemov 186b46e756fSKirill A. Shutemov return count; 187b46e756fSKirill A. Shutemov } 188b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr = 1896dcdc94dSMiaohe Lin __ATTR_RW(alloc_sleep_millisecs); 190b46e756fSKirill A. Shutemov 191b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj, 192b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 193b46e756fSKirill A. Shutemov char *buf) 194b46e756fSKirill A. Shutemov { 195ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 196b46e756fSKirill A. Shutemov } 197b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj, 198b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 199b46e756fSKirill A. Shutemov const char *buf, size_t count) 200b46e756fSKirill A. Shutemov { 201dfefd226SAlexey Dobriyan unsigned int pages; 202b46e756fSKirill A. Shutemov int err; 203b46e756fSKirill A. Shutemov 204dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &pages); 205dfefd226SAlexey Dobriyan if (err || !pages) 206b46e756fSKirill A. Shutemov return -EINVAL; 207b46e756fSKirill A. Shutemov 208b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = pages; 209b46e756fSKirill A. Shutemov 210b46e756fSKirill A. Shutemov return count; 211b46e756fSKirill A. Shutemov } 212b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr = 2136dcdc94dSMiaohe Lin __ATTR_RW(pages_to_scan); 214b46e756fSKirill A. Shutemov 215b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj, 216b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 217b46e756fSKirill A. Shutemov char *buf) 218b46e756fSKirill A. Shutemov { 219ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 220b46e756fSKirill A. Shutemov } 221b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr = 222b46e756fSKirill A. Shutemov __ATTR_RO(pages_collapsed); 223b46e756fSKirill A. Shutemov 224b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj, 225b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 226b46e756fSKirill A. Shutemov char *buf) 227b46e756fSKirill A. Shutemov { 228ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 229b46e756fSKirill A. Shutemov } 230b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr = 231b46e756fSKirill A. Shutemov __ATTR_RO(full_scans); 232b46e756fSKirill A. Shutemov 2336dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj, 234b46e756fSKirill A. Shutemov struct kobj_attribute *attr, char *buf) 235b46e756fSKirill A. Shutemov { 236b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf, 237b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 238b46e756fSKirill A. Shutemov } 2396dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj, 240b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 241b46e756fSKirill A. Shutemov const char *buf, size_t count) 242b46e756fSKirill A. Shutemov { 243b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count, 244b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 245b46e756fSKirill A. Shutemov } 246b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr = 2476dcdc94dSMiaohe Lin __ATTR_RW(defrag); 248b46e756fSKirill A. Shutemov 249b46e756fSKirill A. Shutemov /* 250b46e756fSKirill A. Shutemov * max_ptes_none controls if khugepaged should collapse hugepages over 251b46e756fSKirill A. Shutemov * any unmapped ptes in turn potentially increasing the memory 252b46e756fSKirill A. Shutemov * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 253b46e756fSKirill A. Shutemov * reduce the available free memory in the system as it 254b46e756fSKirill A. Shutemov * runs. Increasing max_ptes_none will instead potentially reduce the 255b46e756fSKirill A. Shutemov * free memory in the system during the khugepaged scan. 256b46e756fSKirill A. Shutemov */ 2576dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj, 258b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 259b46e756fSKirill A. Shutemov char *buf) 260b46e756fSKirill A. Shutemov { 261ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 262b46e756fSKirill A. Shutemov } 2636dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj, 264b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 265b46e756fSKirill A. Shutemov const char *buf, size_t count) 266b46e756fSKirill A. Shutemov { 267b46e756fSKirill A. Shutemov int err; 268b46e756fSKirill A. Shutemov unsigned long max_ptes_none; 269b46e756fSKirill A. Shutemov 270b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_none); 271b46e756fSKirill A. Shutemov if (err || max_ptes_none > HPAGE_PMD_NR - 1) 272b46e756fSKirill A. Shutemov return -EINVAL; 273b46e756fSKirill A. Shutemov 274b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = max_ptes_none; 275b46e756fSKirill A. Shutemov 276b46e756fSKirill A. Shutemov return count; 277b46e756fSKirill A. Shutemov } 278b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr = 2796dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_none); 280b46e756fSKirill A. Shutemov 2816dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj, 282b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 283b46e756fSKirill A. Shutemov char *buf) 284b46e756fSKirill A. Shutemov { 285ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 286b46e756fSKirill A. Shutemov } 287b46e756fSKirill A. Shutemov 2886dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj, 289b46e756fSKirill A. Shutemov struct kobj_attribute *attr, 290b46e756fSKirill A. Shutemov const char *buf, size_t count) 291b46e756fSKirill A. Shutemov { 292b46e756fSKirill A. Shutemov int err; 293b46e756fSKirill A. Shutemov unsigned long max_ptes_swap; 294b46e756fSKirill A. Shutemov 295b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_swap); 296b46e756fSKirill A. Shutemov if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 297b46e756fSKirill A. Shutemov return -EINVAL; 298b46e756fSKirill A. Shutemov 299b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = max_ptes_swap; 300b46e756fSKirill A. Shutemov 301b46e756fSKirill A. Shutemov return count; 302b46e756fSKirill A. Shutemov } 303b46e756fSKirill A. Shutemov 304b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr = 3056dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_swap); 306b46e756fSKirill A. Shutemov 3076dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj, 30871a2c112SKirill A. Shutemov struct kobj_attribute *attr, 30971a2c112SKirill A. Shutemov char *buf) 31071a2c112SKirill A. Shutemov { 311ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 31271a2c112SKirill A. Shutemov } 31371a2c112SKirill A. Shutemov 3146dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj, 31571a2c112SKirill A. Shutemov struct kobj_attribute *attr, 31671a2c112SKirill A. Shutemov const char *buf, size_t count) 31771a2c112SKirill A. Shutemov { 31871a2c112SKirill A. Shutemov int err; 31971a2c112SKirill A. Shutemov unsigned long max_ptes_shared; 32071a2c112SKirill A. Shutemov 32171a2c112SKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_shared); 32271a2c112SKirill A. Shutemov if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 32371a2c112SKirill A. Shutemov return -EINVAL; 32471a2c112SKirill A. Shutemov 32571a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = max_ptes_shared; 32671a2c112SKirill A. Shutemov 32771a2c112SKirill A. Shutemov return count; 32871a2c112SKirill A. Shutemov } 32971a2c112SKirill A. Shutemov 33071a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr = 3316dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_shared); 33271a2c112SKirill A. Shutemov 333b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = { 334b46e756fSKirill A. Shutemov &khugepaged_defrag_attr.attr, 335b46e756fSKirill A. Shutemov &khugepaged_max_ptes_none_attr.attr, 33671a2c112SKirill A. Shutemov &khugepaged_max_ptes_swap_attr.attr, 33771a2c112SKirill A. Shutemov &khugepaged_max_ptes_shared_attr.attr, 338b46e756fSKirill A. Shutemov &pages_to_scan_attr.attr, 339b46e756fSKirill A. Shutemov &pages_collapsed_attr.attr, 340b46e756fSKirill A. Shutemov &full_scans_attr.attr, 341b46e756fSKirill A. Shutemov &scan_sleep_millisecs_attr.attr, 342b46e756fSKirill A. Shutemov &alloc_sleep_millisecs_attr.attr, 343b46e756fSKirill A. Shutemov NULL, 344b46e756fSKirill A. Shutemov }; 345b46e756fSKirill A. Shutemov 346b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = { 347b46e756fSKirill A. Shutemov .attrs = khugepaged_attr, 348b46e756fSKirill A. Shutemov .name = "khugepaged", 349b46e756fSKirill A. Shutemov }; 350e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */ 351b46e756fSKirill A. Shutemov 352b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma, 353b46e756fSKirill A. Shutemov unsigned long *vm_flags, int advice) 354b46e756fSKirill A. Shutemov { 355b46e756fSKirill A. Shutemov switch (advice) { 356b46e756fSKirill A. Shutemov case MADV_HUGEPAGE: 357b46e756fSKirill A. Shutemov #ifdef CONFIG_S390 358b46e756fSKirill A. Shutemov /* 359b46e756fSKirill A. Shutemov * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 360b46e756fSKirill A. Shutemov * can't handle this properly after s390_enable_sie, so we simply 361b46e756fSKirill A. Shutemov * ignore the madvise to prevent qemu from causing a SIGSEGV. 362b46e756fSKirill A. Shutemov */ 363b46e756fSKirill A. Shutemov if (mm_has_pgste(vma->vm_mm)) 364b46e756fSKirill A. Shutemov return 0; 365b46e756fSKirill A. Shutemov #endif 366b46e756fSKirill A. Shutemov *vm_flags &= ~VM_NOHUGEPAGE; 367b46e756fSKirill A. Shutemov *vm_flags |= VM_HUGEPAGE; 368b46e756fSKirill A. Shutemov /* 369b46e756fSKirill A. Shutemov * If the vma become good for khugepaged to scan, 370b46e756fSKirill A. Shutemov * register it here without waiting a page fault that 371b46e756fSKirill A. Shutemov * may not happen any time soon. 372b46e756fSKirill A. Shutemov */ 373c791576cSYang Shi khugepaged_enter_vma(vma, *vm_flags); 374b46e756fSKirill A. Shutemov break; 375b46e756fSKirill A. Shutemov case MADV_NOHUGEPAGE: 376b46e756fSKirill A. Shutemov *vm_flags &= ~VM_HUGEPAGE; 377b46e756fSKirill A. Shutemov *vm_flags |= VM_NOHUGEPAGE; 378b46e756fSKirill A. Shutemov /* 379b46e756fSKirill A. Shutemov * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 380b46e756fSKirill A. Shutemov * this vma even if we leave the mm registered in khugepaged if 381b46e756fSKirill A. Shutemov * it got registered before VM_NOHUGEPAGE was set. 382b46e756fSKirill A. Shutemov */ 383b46e756fSKirill A. Shutemov break; 384b46e756fSKirill A. Shutemov } 385b46e756fSKirill A. Shutemov 386b46e756fSKirill A. Shutemov return 0; 387b46e756fSKirill A. Shutemov } 388b46e756fSKirill A. Shutemov 389b46e756fSKirill A. Shutemov int __init khugepaged_init(void) 390b46e756fSKirill A. Shutemov { 391b46e756fSKirill A. Shutemov mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 392b26e2701SQi Zheng sizeof(struct khugepaged_mm_slot), 393b26e2701SQi Zheng __alignof__(struct khugepaged_mm_slot), 394b26e2701SQi Zheng 0, NULL); 395b46e756fSKirill A. Shutemov if (!mm_slot_cache) 396b46e756fSKirill A. Shutemov return -ENOMEM; 397b46e756fSKirill A. Shutemov 398b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 399b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 400b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 40171a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 402b46e756fSKirill A. Shutemov 403b46e756fSKirill A. Shutemov return 0; 404b46e756fSKirill A. Shutemov } 405b46e756fSKirill A. Shutemov 406b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void) 407b46e756fSKirill A. Shutemov { 408b46e756fSKirill A. Shutemov kmem_cache_destroy(mm_slot_cache); 409b46e756fSKirill A. Shutemov } 410b46e756fSKirill A. Shutemov 4117d2c4385SZach O'Keefe static inline int hpage_collapse_test_exit(struct mm_struct *mm) 412b46e756fSKirill A. Shutemov { 4134d45e75aSJann Horn return atomic_read(&mm->mm_users) == 0; 414b46e756fSKirill A. Shutemov } 415b46e756fSKirill A. Shutemov 416d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm) 417b46e756fSKirill A. Shutemov { 418b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 419b26e2701SQi Zheng struct mm_slot *slot; 420b46e756fSKirill A. Shutemov int wakeup; 421b46e756fSKirill A. Shutemov 422b26e2701SQi Zheng mm_slot = mm_slot_alloc(mm_slot_cache); 423b46e756fSKirill A. Shutemov if (!mm_slot) 424d2081b2bSYang Shi return; 425b46e756fSKirill A. Shutemov 426b26e2701SQi Zheng slot = &mm_slot->slot; 427b26e2701SQi Zheng 428b46e756fSKirill A. Shutemov /* __khugepaged_exit() must not run from under us */ 4297d2c4385SZach O'Keefe VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); 430b46e756fSKirill A. Shutemov if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 431b26e2701SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 432d2081b2bSYang Shi return; 433b46e756fSKirill A. Shutemov } 434b46e756fSKirill A. Shutemov 435b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 436b26e2701SQi Zheng mm_slot_insert(mm_slots_hash, mm, slot); 437b46e756fSKirill A. Shutemov /* 438b46e756fSKirill A. Shutemov * Insert just behind the scanning cursor, to let the area settle 439b46e756fSKirill A. Shutemov * down a little. 440b46e756fSKirill A. Shutemov */ 441b46e756fSKirill A. Shutemov wakeup = list_empty(&khugepaged_scan.mm_head); 442b26e2701SQi Zheng list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head); 443b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 444b46e756fSKirill A. Shutemov 445f1f10076SVegard Nossum mmgrab(mm); 446b46e756fSKirill A. Shutemov if (wakeup) 447b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 448b46e756fSKirill A. Shutemov } 449b46e756fSKirill A. Shutemov 450c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma, 451b46e756fSKirill A. Shutemov unsigned long vm_flags) 452b46e756fSKirill A. Shutemov { 4532647d11bSYang Shi if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 4541064026bSYang Shi hugepage_flags_enabled()) { 455a7f4e6e4SZach O'Keefe if (hugepage_vma_check(vma, vm_flags, false, false, true)) 4562647d11bSYang Shi __khugepaged_enter(vma->vm_mm); 4572647d11bSYang Shi } 458b46e756fSKirill A. Shutemov } 459b46e756fSKirill A. Shutemov 460b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm) 461b46e756fSKirill A. Shutemov { 462b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 463b26e2701SQi Zheng struct mm_slot *slot; 464b46e756fSKirill A. Shutemov int free = 0; 465b46e756fSKirill A. Shutemov 466b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 467b26e2701SQi Zheng slot = mm_slot_lookup(mm_slots_hash, mm); 468b26e2701SQi Zheng mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 469b46e756fSKirill A. Shutemov if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 470b26e2701SQi Zheng hash_del(&slot->hash); 471b26e2701SQi Zheng list_del(&slot->mm_node); 472b46e756fSKirill A. Shutemov free = 1; 473b46e756fSKirill A. Shutemov } 474b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 475b46e756fSKirill A. Shutemov 476b46e756fSKirill A. Shutemov if (free) { 477b46e756fSKirill A. Shutemov clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 478b26e2701SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 479b46e756fSKirill A. Shutemov mmdrop(mm); 480b46e756fSKirill A. Shutemov } else if (mm_slot) { 481b46e756fSKirill A. Shutemov /* 482b46e756fSKirill A. Shutemov * This is required to serialize against 4837d2c4385SZach O'Keefe * hpage_collapse_test_exit() (which is guaranteed to run 4847d2c4385SZach O'Keefe * under mmap sem read mode). Stop here (after we return all 4857d2c4385SZach O'Keefe * pagetables will be destroyed) until khugepaged has finished 4867d2c4385SZach O'Keefe * working on the pagetables under the mmap_lock. 487b46e756fSKirill A. Shutemov */ 488d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 489d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 490b46e756fSKirill A. Shutemov } 491b46e756fSKirill A. Shutemov } 492b46e756fSKirill A. Shutemov 49392644f58SVishal Moola (Oracle) static void release_pte_folio(struct folio *folio) 49492644f58SVishal Moola (Oracle) { 49592644f58SVishal Moola (Oracle) node_stat_mod_folio(folio, 49692644f58SVishal Moola (Oracle) NR_ISOLATED_ANON + folio_is_file_lru(folio), 49792644f58SVishal Moola (Oracle) -folio_nr_pages(folio)); 49892644f58SVishal Moola (Oracle) folio_unlock(folio); 49992644f58SVishal Moola (Oracle) folio_putback_lru(folio); 50092644f58SVishal Moola (Oracle) } 50192644f58SVishal Moola (Oracle) 502b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page) 503b46e756fSKirill A. Shutemov { 50492644f58SVishal Moola (Oracle) release_pte_folio(page_folio(page)); 505b46e756fSKirill A. Shutemov } 506b46e756fSKirill A. Shutemov 5075503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte, 5085503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 509b46e756fSKirill A. Shutemov { 5109bdfeea4SVishal Moola (Oracle) struct folio *folio, *tmp; 5115503fbf2SKirill A. Shutemov 512b46e756fSKirill A. Shutemov while (--_pte >= pte) { 513b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 514f528260bSVishal Moola (Oracle) unsigned long pfn; 5155503fbf2SKirill A. Shutemov 516f528260bSVishal Moola (Oracle) if (pte_none(pteval)) 517f528260bSVishal Moola (Oracle) continue; 518f528260bSVishal Moola (Oracle) pfn = pte_pfn(pteval); 519f528260bSVishal Moola (Oracle) if (is_zero_pfn(pfn)) 520f528260bSVishal Moola (Oracle) continue; 521f528260bSVishal Moola (Oracle) folio = pfn_folio(pfn); 522f528260bSVishal Moola (Oracle) if (folio_test_large(folio)) 523f528260bSVishal Moola (Oracle) continue; 5249bdfeea4SVishal Moola (Oracle) release_pte_folio(folio); 5255503fbf2SKirill A. Shutemov } 5265503fbf2SKirill A. Shutemov 5279bdfeea4SVishal Moola (Oracle) list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) { 5289bdfeea4SVishal Moola (Oracle) list_del(&folio->lru); 5299bdfeea4SVishal Moola (Oracle) release_pte_folio(folio); 530b46e756fSKirill A. Shutemov } 531b46e756fSKirill A. Shutemov } 532b46e756fSKirill A. Shutemov 5339445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page) 5349445689fSKirill A. Shutemov { 5359445689fSKirill A. Shutemov int expected_refcount; 5369445689fSKirill A. Shutemov 5379445689fSKirill A. Shutemov expected_refcount = total_mapcount(page); 5389445689fSKirill A. Shutemov if (PageSwapCache(page)) 5399445689fSKirill A. Shutemov expected_refcount += compound_nr(page); 5409445689fSKirill A. Shutemov 5419445689fSKirill A. Shutemov return page_count(page) == expected_refcount; 5429445689fSKirill A. Shutemov } 5439445689fSKirill A. Shutemov 544b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 545b46e756fSKirill A. Shutemov unsigned long address, 5465503fbf2SKirill A. Shutemov pte_t *pte, 547d8ea7cc8SZach O'Keefe struct collapse_control *cc, 5485503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 549b46e756fSKirill A. Shutemov { 550b46e756fSKirill A. Shutemov struct page *page = NULL; 551b46e756fSKirill A. Shutemov pte_t *_pte; 55250ad2f24SZach O'Keefe int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; 5530db501f7SEbru Akagunduz bool writable = false; 554b46e756fSKirill A. Shutemov 555b46e756fSKirill A. Shutemov for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 556b46e756fSKirill A. Shutemov _pte++, address += PAGE_SIZE) { 557b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 558b46e756fSKirill A. Shutemov if (pte_none(pteval) || (pte_present(pteval) && 559b46e756fSKirill A. Shutemov is_zero_pfn(pte_pfn(pteval)))) { 560d8ea7cc8SZach O'Keefe ++none_or_zero; 561b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 562d8ea7cc8SZach O'Keefe (!cc->is_khugepaged || 563d8ea7cc8SZach O'Keefe none_or_zero <= khugepaged_max_ptes_none)) { 564b46e756fSKirill A. Shutemov continue; 565b46e756fSKirill A. Shutemov } else { 566b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 567e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 568b46e756fSKirill A. Shutemov goto out; 569b46e756fSKirill A. Shutemov } 570b46e756fSKirill A. Shutemov } 571b46e756fSKirill A. Shutemov if (!pte_present(pteval)) { 572b46e756fSKirill A. Shutemov result = SCAN_PTE_NON_PRESENT; 573b46e756fSKirill A. Shutemov goto out; 574b46e756fSKirill A. Shutemov } 575b46e756fSKirill A. Shutemov page = vm_normal_page(vma, address, pteval); 5763218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 577b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 578b46e756fSKirill A. Shutemov goto out; 579b46e756fSKirill A. Shutemov } 580b46e756fSKirill A. Shutemov 581b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageAnon(page), page); 582b46e756fSKirill A. Shutemov 583d8ea7cc8SZach O'Keefe if (page_mapcount(page) > 1) { 584d8ea7cc8SZach O'Keefe ++shared; 585d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 586d8ea7cc8SZach O'Keefe shared > khugepaged_max_ptes_shared) { 58771a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 588e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 58971a2c112SKirill A. Shutemov goto out; 59071a2c112SKirill A. Shutemov } 591d8ea7cc8SZach O'Keefe } 59271a2c112SKirill A. Shutemov 5935503fbf2SKirill A. Shutemov if (PageCompound(page)) { 5945503fbf2SKirill A. Shutemov struct page *p; 5955503fbf2SKirill A. Shutemov page = compound_head(page); 5965503fbf2SKirill A. Shutemov 5975503fbf2SKirill A. Shutemov /* 5985503fbf2SKirill A. Shutemov * Check if we have dealt with the compound page 5995503fbf2SKirill A. Shutemov * already 6005503fbf2SKirill A. Shutemov */ 6015503fbf2SKirill A. Shutemov list_for_each_entry(p, compound_pagelist, lru) { 6025503fbf2SKirill A. Shutemov if (page == p) 6035503fbf2SKirill A. Shutemov goto next; 6045503fbf2SKirill A. Shutemov } 6055503fbf2SKirill A. Shutemov } 6065503fbf2SKirill A. Shutemov 607b46e756fSKirill A. Shutemov /* 608b46e756fSKirill A. Shutemov * We can do it before isolate_lru_page because the 609b46e756fSKirill A. Shutemov * page can't be freed from under us. NOTE: PG_lock 610b46e756fSKirill A. Shutemov * is needed to serialize against split_huge_page 611b46e756fSKirill A. Shutemov * when invoked from the VM. 612b46e756fSKirill A. Shutemov */ 613b46e756fSKirill A. Shutemov if (!trylock_page(page)) { 614b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 615b46e756fSKirill A. Shutemov goto out; 616b46e756fSKirill A. Shutemov } 617b46e756fSKirill A. Shutemov 618b46e756fSKirill A. Shutemov /* 6199445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 6209445689fSKirill A. Shutemov * 6219445689fSKirill A. Shutemov * The page table that maps the page has been already unlinked 6229445689fSKirill A. Shutemov * from the page table tree and this process cannot get 623f0953a1bSIngo Molnar * an additional pin on the page. 6249445689fSKirill A. Shutemov * 6259445689fSKirill A. Shutemov * New pins can come later if the page is shared across fork, 6269445689fSKirill A. Shutemov * but not from this process. The other process cannot write to 6279445689fSKirill A. Shutemov * the page, only trigger CoW. 628b46e756fSKirill A. Shutemov */ 6299445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 630b46e756fSKirill A. Shutemov unlock_page(page); 631b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 632b46e756fSKirill A. Shutemov goto out; 633b46e756fSKirill A. Shutemov } 634b46e756fSKirill A. Shutemov 635b46e756fSKirill A. Shutemov /* 636b46e756fSKirill A. Shutemov * Isolate the page to avoid collapsing an hugepage 637b46e756fSKirill A. Shutemov * currently in use by the VM. 638b46e756fSKirill A. Shutemov */ 639f7f9c00dSBaolin Wang if (!isolate_lru_page(page)) { 640b46e756fSKirill A. Shutemov unlock_page(page); 641b46e756fSKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 642b46e756fSKirill A. Shutemov goto out; 643b46e756fSKirill A. Shutemov } 6445503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page), 6455503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page), 6465503fbf2SKirill A. Shutemov compound_nr(page)); 647b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 648b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(PageLRU(page), page); 649b46e756fSKirill A. Shutemov 6505503fbf2SKirill A. Shutemov if (PageCompound(page)) 6515503fbf2SKirill A. Shutemov list_add_tail(&page->lru, compound_pagelist); 6525503fbf2SKirill A. Shutemov next: 653d8ea7cc8SZach O'Keefe /* 654d8ea7cc8SZach O'Keefe * If collapse was initiated by khugepaged, check that there is 655d8ea7cc8SZach O'Keefe * enough young pte to justify collapsing the page 656d8ea7cc8SZach O'Keefe */ 657d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 658d8ea7cc8SZach O'Keefe (pte_young(pteval) || page_is_young(page) || 659d8ea7cc8SZach O'Keefe PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 660d8ea7cc8SZach O'Keefe address))) 6610db501f7SEbru Akagunduz referenced++; 6625503fbf2SKirill A. Shutemov 6635503fbf2SKirill A. Shutemov if (pte_write(pteval)) 6645503fbf2SKirill A. Shutemov writable = true; 665b46e756fSKirill A. Shutemov } 66674e579bfSMiaohe Lin 66774e579bfSMiaohe Lin if (unlikely(!writable)) { 66874e579bfSMiaohe Lin result = SCAN_PAGE_RO; 669d8ea7cc8SZach O'Keefe } else if (unlikely(cc->is_khugepaged && !referenced)) { 67074e579bfSMiaohe Lin result = SCAN_LACK_REFERENCED_PAGE; 67174e579bfSMiaohe Lin } else { 672b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 673b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 674b46e756fSKirill A. Shutemov referenced, writable, result); 67550ad2f24SZach O'Keefe return result; 676b46e756fSKirill A. Shutemov } 677b46e756fSKirill A. Shutemov out: 6785503fbf2SKirill A. Shutemov release_pte_pages(pte, _pte, compound_pagelist); 679b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero, 680b46e756fSKirill A. Shutemov referenced, writable, result); 68150ad2f24SZach O'Keefe return result; 682b46e756fSKirill A. Shutemov } 683b46e756fSKirill A. Shutemov 684b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 685b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 686b46e756fSKirill A. Shutemov unsigned long address, 6875503fbf2SKirill A. Shutemov spinlock_t *ptl, 6885503fbf2SKirill A. Shutemov struct list_head *compound_pagelist) 689b46e756fSKirill A. Shutemov { 6905503fbf2SKirill A. Shutemov struct page *src_page, *tmp; 691b46e756fSKirill A. Shutemov pte_t *_pte; 692338a16baSDavid Rientjes for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 693338a16baSDavid Rientjes _pte++, page++, address += PAGE_SIZE) { 694b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 695b46e756fSKirill A. Shutemov 696b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 697b46e756fSKirill A. Shutemov clear_user_highpage(page, address); 698b46e756fSKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 699b46e756fSKirill A. Shutemov if (is_zero_pfn(pte_pfn(pteval))) { 700b46e756fSKirill A. Shutemov /* 701b46e756fSKirill A. Shutemov * ptl mostly unnecessary. 702b46e756fSKirill A. Shutemov */ 703b46e756fSKirill A. Shutemov spin_lock(ptl); 70408d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte); 705b46e756fSKirill A. Shutemov spin_unlock(ptl); 706b46e756fSKirill A. Shutemov } 707b46e756fSKirill A. Shutemov } else { 708b46e756fSKirill A. Shutemov src_page = pte_page(pteval); 709b46e756fSKirill A. Shutemov copy_user_highpage(page, src_page, address, vma); 7105503fbf2SKirill A. Shutemov if (!PageCompound(src_page)) 711b46e756fSKirill A. Shutemov release_pte_page(src_page); 712b46e756fSKirill A. Shutemov /* 713b46e756fSKirill A. Shutemov * ptl mostly unnecessary, but preempt has to 714b46e756fSKirill A. Shutemov * be disabled to update the per-cpu stats 715b46e756fSKirill A. Shutemov * inside page_remove_rmap(). 716b46e756fSKirill A. Shutemov */ 717b46e756fSKirill A. Shutemov spin_lock(ptl); 71808d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte); 719cea86fe2SHugh Dickins page_remove_rmap(src_page, vma, false); 720b46e756fSKirill A. Shutemov spin_unlock(ptl); 721b46e756fSKirill A. Shutemov free_page_and_swap_cache(src_page); 722b46e756fSKirill A. Shutemov } 723b46e756fSKirill A. Shutemov } 7245503fbf2SKirill A. Shutemov 7255503fbf2SKirill A. Shutemov list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 7265503fbf2SKirill A. Shutemov list_del(&src_page->lru); 7271baec203SMiaohe Lin mod_node_page_state(page_pgdat(src_page), 7281baec203SMiaohe Lin NR_ISOLATED_ANON + page_is_file_lru(src_page), 7291baec203SMiaohe Lin -compound_nr(src_page)); 7301baec203SMiaohe Lin unlock_page(src_page); 7311baec203SMiaohe Lin free_swap_cache(src_page); 7321baec203SMiaohe Lin putback_lru_page(src_page); 7335503fbf2SKirill A. Shutemov } 734b46e756fSKirill A. Shutemov } 735b46e756fSKirill A. Shutemov 736b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void) 737b46e756fSKirill A. Shutemov { 738b46e756fSKirill A. Shutemov DEFINE_WAIT(wait); 739b46e756fSKirill A. Shutemov 740b46e756fSKirill A. Shutemov add_wait_queue(&khugepaged_wait, &wait); 741f5d39b02SPeter Zijlstra __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 742f5d39b02SPeter Zijlstra schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 743b46e756fSKirill A. Shutemov remove_wait_queue(&khugepaged_wait, &wait); 744b46e756fSKirill A. Shutemov } 745b46e756fSKirill A. Shutemov 74634d6b470SZach O'Keefe struct collapse_control khugepaged_collapse_control = { 747d8ea7cc8SZach O'Keefe .is_khugepaged = true, 74834d6b470SZach O'Keefe }; 749b46e756fSKirill A. Shutemov 7507d2c4385SZach O'Keefe static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) 751b46e756fSKirill A. Shutemov { 752b46e756fSKirill A. Shutemov int i; 753b46e756fSKirill A. Shutemov 754b46e756fSKirill A. Shutemov /* 755a5f5f91dSMel Gorman * If node_reclaim_mode is disabled, then no extra effort is made to 756b46e756fSKirill A. Shutemov * allocate memory locally. 757b46e756fSKirill A. Shutemov */ 758202e35dbSDave Hansen if (!node_reclaim_enabled()) 759b46e756fSKirill A. Shutemov return false; 760b46e756fSKirill A. Shutemov 761b46e756fSKirill A. Shutemov /* If there is a count for this node already, it must be acceptable */ 76234d6b470SZach O'Keefe if (cc->node_load[nid]) 763b46e756fSKirill A. Shutemov return false; 764b46e756fSKirill A. Shutemov 765b46e756fSKirill A. Shutemov for (i = 0; i < MAX_NUMNODES; i++) { 76634d6b470SZach O'Keefe if (!cc->node_load[i]) 767b46e756fSKirill A. Shutemov continue; 768a55c7454SMatt Fleming if (node_distance(nid, i) > node_reclaim_distance) 769b46e756fSKirill A. Shutemov return true; 770b46e756fSKirill A. Shutemov } 771b46e756fSKirill A. Shutemov return false; 772b46e756fSKirill A. Shutemov } 773b46e756fSKirill A. Shutemov 7741064026bSYang Shi #define khugepaged_defrag() \ 7751064026bSYang Shi (transparent_hugepage_flags & \ 7761064026bSYang Shi (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) 7771064026bSYang Shi 778b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 779b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 780b46e756fSKirill A. Shutemov { 78125160354SVlastimil Babka return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 782b46e756fSKirill A. Shutemov } 783b46e756fSKirill A. Shutemov 784b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA 7857d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc) 786b46e756fSKirill A. Shutemov { 787b46e756fSKirill A. Shutemov int nid, target_node = 0, max_value = 0; 788b46e756fSKirill A. Shutemov 789b46e756fSKirill A. Shutemov /* find first node with max normal pages hit */ 790b46e756fSKirill A. Shutemov for (nid = 0; nid < MAX_NUMNODES; nid++) 79134d6b470SZach O'Keefe if (cc->node_load[nid] > max_value) { 79234d6b470SZach O'Keefe max_value = cc->node_load[nid]; 793b46e756fSKirill A. Shutemov target_node = nid; 794b46e756fSKirill A. Shutemov } 795b46e756fSKirill A. Shutemov 796e031ff96SYang Shi for_each_online_node(nid) { 797e031ff96SYang Shi if (max_value == cc->node_load[nid]) 798e031ff96SYang Shi node_set(nid, cc->alloc_nmask); 799b46e756fSKirill A. Shutemov } 800b46e756fSKirill A. Shutemov 801b46e756fSKirill A. Shutemov return target_node; 802b46e756fSKirill A. Shutemov } 803c6a7f445SYang Shi #else 8047d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc) 805b46e756fSKirill A. Shutemov { 806c6a7f445SYang Shi return 0; 807b46e756fSKirill A. Shutemov } 808c6a7f445SYang Shi #endif 809b46e756fSKirill A. Shutemov 810e031ff96SYang Shi static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, 811e031ff96SYang Shi nodemask_t *nmask) 812b46e756fSKirill A. Shutemov { 813e031ff96SYang Shi *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); 814b46e756fSKirill A. Shutemov if (unlikely(!*hpage)) { 815b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 8169710a78aSZach O'Keefe return false; 817b46e756fSKirill A. Shutemov } 818b46e756fSKirill A. Shutemov 819b46e756fSKirill A. Shutemov prep_transhuge_page(*hpage); 820b46e756fSKirill A. Shutemov count_vm_event(THP_COLLAPSE_ALLOC); 821b46e756fSKirill A. Shutemov return true; 822b46e756fSKirill A. Shutemov } 823b46e756fSKirill A. Shutemov 824b46e756fSKirill A. Shutemov /* 825c1e8d7c6SMichel Lespinasse * If mmap_lock temporarily dropped, revalidate vma 826c1e8d7c6SMichel Lespinasse * before taking mmap_lock. 82750ad2f24SZach O'Keefe * Returns enum scan_result value. 828b46e756fSKirill A. Shutemov */ 829b46e756fSKirill A. Shutemov 830c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 83134488399SZach O'Keefe bool expect_anon, 832a7f4e6e4SZach O'Keefe struct vm_area_struct **vmap, 833a7f4e6e4SZach O'Keefe struct collapse_control *cc) 834b46e756fSKirill A. Shutemov { 835b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 836b46e756fSKirill A. Shutemov 8377d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm))) 838b46e756fSKirill A. Shutemov return SCAN_ANY_PROCESS; 839b46e756fSKirill A. Shutemov 840c131f751SKirill A. Shutemov *vmap = vma = find_vma(mm, address); 841b46e756fSKirill A. Shutemov if (!vma) 842b46e756fSKirill A. Shutemov return SCAN_VMA_NULL; 843b46e756fSKirill A. Shutemov 8444fa6893fSYang Shi if (!transhuge_vma_suitable(vma, address)) 845b46e756fSKirill A. Shutemov return SCAN_ADDRESS_RANGE; 846a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, 847a7f4e6e4SZach O'Keefe cc->is_khugepaged)) 848b46e756fSKirill A. Shutemov return SCAN_VMA_CHECK; 849f707fa49SYang Shi /* 850f707fa49SYang Shi * Anon VMA expected, the address may be unmapped then 851f707fa49SYang Shi * remapped to file after khugepaged reaquired the mmap_lock. 852f707fa49SYang Shi * 853f707fa49SYang Shi * hugepage_vma_check may return true for qualified file 854f707fa49SYang Shi * vmas. 855f707fa49SYang Shi */ 85634488399SZach O'Keefe if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) 85734488399SZach O'Keefe return SCAN_PAGE_ANON; 85850ad2f24SZach O'Keefe return SCAN_SUCCEED; 859b46e756fSKirill A. Shutemov } 860b46e756fSKirill A. Shutemov 861edb5d0cfSZach O'Keefe /* 862edb5d0cfSZach O'Keefe * See pmd_trans_unstable() for how the result may change out from 863edb5d0cfSZach O'Keefe * underneath us, even if we hold mmap_lock in read. 864edb5d0cfSZach O'Keefe */ 86550722804SZach O'Keefe static int find_pmd_or_thp_or_none(struct mm_struct *mm, 86650722804SZach O'Keefe unsigned long address, 86750722804SZach O'Keefe pmd_t **pmd) 86850722804SZach O'Keefe { 86950722804SZach O'Keefe pmd_t pmde; 87050722804SZach O'Keefe 87150722804SZach O'Keefe *pmd = mm_find_pmd(mm, address); 87250722804SZach O'Keefe if (!*pmd) 87350722804SZach O'Keefe return SCAN_PMD_NULL; 87450722804SZach O'Keefe 875dab6e717SPeter Zijlstra pmde = pmdp_get_lockless(*pmd); 87650722804SZach O'Keefe 87750722804SZach O'Keefe #ifdef CONFIG_TRANSPARENT_HUGEPAGE 87850722804SZach O'Keefe /* See comments in pmd_none_or_trans_huge_or_clear_bad() */ 87950722804SZach O'Keefe barrier(); 88050722804SZach O'Keefe #endif 88134488399SZach O'Keefe if (pmd_none(pmde)) 88234488399SZach O'Keefe return SCAN_PMD_NONE; 883edb5d0cfSZach O'Keefe if (!pmd_present(pmde)) 884edb5d0cfSZach O'Keefe return SCAN_PMD_NULL; 88550722804SZach O'Keefe if (pmd_trans_huge(pmde)) 88650722804SZach O'Keefe return SCAN_PMD_MAPPED; 887edb5d0cfSZach O'Keefe if (pmd_devmap(pmde)) 888edb5d0cfSZach O'Keefe return SCAN_PMD_NULL; 88950722804SZach O'Keefe if (pmd_bad(pmde)) 89050722804SZach O'Keefe return SCAN_PMD_NULL; 89150722804SZach O'Keefe return SCAN_SUCCEED; 89250722804SZach O'Keefe } 89350722804SZach O'Keefe 89450722804SZach O'Keefe static int check_pmd_still_valid(struct mm_struct *mm, 89550722804SZach O'Keefe unsigned long address, 89650722804SZach O'Keefe pmd_t *pmd) 89750722804SZach O'Keefe { 89850722804SZach O'Keefe pmd_t *new_pmd; 89950722804SZach O'Keefe int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); 90050722804SZach O'Keefe 90150722804SZach O'Keefe if (result != SCAN_SUCCEED) 90250722804SZach O'Keefe return result; 90350722804SZach O'Keefe if (new_pmd != pmd) 90450722804SZach O'Keefe return SCAN_FAIL; 90550722804SZach O'Keefe return SCAN_SUCCEED; 906b46e756fSKirill A. Shutemov } 907b46e756fSKirill A. Shutemov 908b46e756fSKirill A. Shutemov /* 909b46e756fSKirill A. Shutemov * Bring missing pages in from swap, to complete THP collapse. 9107d2c4385SZach O'Keefe * Only done if hpage_collapse_scan_pmd believes it is worthwhile. 911b46e756fSKirill A. Shutemov * 9124d928e20SMiaohe Lin * Called and returns without pte mapped or spinlocks held. 9134d928e20SMiaohe Lin * Note that if false is returned, mmap_lock will be released. 914b46e756fSKirill A. Shutemov */ 915b46e756fSKirill A. Shutemov 91650ad2f24SZach O'Keefe static int __collapse_huge_page_swapin(struct mm_struct *mm, 917b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 9182b635dd3SWill Deacon unsigned long haddr, pmd_t *pmd, 9190db501f7SEbru Akagunduz int referenced) 920b46e756fSKirill A. Shutemov { 9212b740303SSouptick Joarder int swapped_in = 0; 9222b740303SSouptick Joarder vm_fault_t ret = 0; 9232b635dd3SWill Deacon unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 9242b635dd3SWill Deacon 9252b635dd3SWill Deacon for (address = haddr; address < end; address += PAGE_SIZE) { 92682b0f8c3SJan Kara struct vm_fault vmf = { 927b46e756fSKirill A. Shutemov .vma = vma, 928b46e756fSKirill A. Shutemov .address = address, 9292b635dd3SWill Deacon .pgoff = linear_page_index(vma, haddr), 930b46e756fSKirill A. Shutemov .flags = FAULT_FLAG_ALLOW_RETRY, 931b46e756fSKirill A. Shutemov .pmd = pmd, 932b46e756fSKirill A. Shutemov }; 933b46e756fSKirill A. Shutemov 93482b0f8c3SJan Kara vmf.pte = pte_offset_map(pmd, address); 9352994302bSJan Kara vmf.orig_pte = *vmf.pte; 9362b635dd3SWill Deacon if (!is_swap_pte(vmf.orig_pte)) { 9372b635dd3SWill Deacon pte_unmap(vmf.pte); 938b46e756fSKirill A. Shutemov continue; 9392b635dd3SWill Deacon } 9402994302bSJan Kara ret = do_swap_page(&vmf); 9410db501f7SEbru Akagunduz 9424d928e20SMiaohe Lin /* 9434d928e20SMiaohe Lin * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 9444d928e20SMiaohe Lin * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 9454d928e20SMiaohe Lin * we do not retry here and swap entry will remain in pagetable 9464d928e20SMiaohe Lin * resulting in later failure. 9474d928e20SMiaohe Lin */ 948b46e756fSKirill A. Shutemov if (ret & VM_FAULT_RETRY) { 9490db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 95050ad2f24SZach O'Keefe /* Likely, but not guaranteed, that page lock failed */ 95150ad2f24SZach O'Keefe return SCAN_PAGE_LOCK; 95247f863eaSEbru Akagunduz } 953b46e756fSKirill A. Shutemov if (ret & VM_FAULT_ERROR) { 9544d928e20SMiaohe Lin mmap_read_unlock(mm); 9550db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 95650ad2f24SZach O'Keefe return SCAN_FAIL; 957b46e756fSKirill A. Shutemov } 9584d928e20SMiaohe Lin swapped_in++; 959b46e756fSKirill A. Shutemov } 960ae2c5d80SKirill A. Shutemov 961ae2c5d80SKirill A. Shutemov /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ 962ae2c5d80SKirill A. Shutemov if (swapped_in) 963ae2c5d80SKirill A. Shutemov lru_add_drain(); 964ae2c5d80SKirill A. Shutemov 9650db501f7SEbru Akagunduz trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 96650ad2f24SZach O'Keefe return SCAN_SUCCEED; 967b46e756fSKirill A. Shutemov } 968b46e756fSKirill A. Shutemov 9699710a78aSZach O'Keefe static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, 9709710a78aSZach O'Keefe struct collapse_control *cc) 9719710a78aSZach O'Keefe { 9727d8faaf1SZach O'Keefe gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : 973e031ff96SYang Shi GFP_TRANSHUGE); 9747d2c4385SZach O'Keefe int node = hpage_collapse_find_target_node(cc); 97594c02ad7SPeter Xu struct folio *folio; 9769710a78aSZach O'Keefe 977e031ff96SYang Shi if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) 9789710a78aSZach O'Keefe return SCAN_ALLOC_HUGE_PAGE_FAIL; 97994c02ad7SPeter Xu 98094c02ad7SPeter Xu folio = page_folio(*hpage); 98194c02ad7SPeter Xu if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { 98294c02ad7SPeter Xu folio_put(folio); 98394c02ad7SPeter Xu *hpage = NULL; 9849710a78aSZach O'Keefe return SCAN_CGROUP_CHARGE_FAIL; 98594c02ad7SPeter Xu } 9869710a78aSZach O'Keefe count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); 98794c02ad7SPeter Xu 9889710a78aSZach O'Keefe return SCAN_SUCCEED; 9899710a78aSZach O'Keefe } 9909710a78aSZach O'Keefe 99150ad2f24SZach O'Keefe static int collapse_huge_page(struct mm_struct *mm, unsigned long address, 99250ad2f24SZach O'Keefe int referenced, int unmapped, 99350ad2f24SZach O'Keefe struct collapse_control *cc) 994b46e756fSKirill A. Shutemov { 9955503fbf2SKirill A. Shutemov LIST_HEAD(compound_pagelist); 996b46e756fSKirill A. Shutemov pmd_t *pmd, _pmd; 997b46e756fSKirill A. Shutemov pte_t *pte; 998b46e756fSKirill A. Shutemov pgtable_t pgtable; 99950ad2f24SZach O'Keefe struct page *hpage; 1000b46e756fSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl; 100150ad2f24SZach O'Keefe int result = SCAN_FAIL; 1002c131f751SKirill A. Shutemov struct vm_area_struct *vma; 1003ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1004b46e756fSKirill A. Shutemov 1005b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1006b46e756fSKirill A. Shutemov 1007988ddb71SKirill A. Shutemov /* 1008c1e8d7c6SMichel Lespinasse * Before allocating the hugepage, release the mmap_lock read lock. 1009988ddb71SKirill A. Shutemov * The allocation can take potentially a long time if it involves 1010c1e8d7c6SMichel Lespinasse * sync compaction, and we do not need to hold the mmap_lock during 1011988ddb71SKirill A. Shutemov * that. We will recheck the vma after taking it again in write mode. 1012988ddb71SKirill A. Shutemov */ 1013d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1014b46e756fSKirill A. Shutemov 101550ad2f24SZach O'Keefe result = alloc_charge_hpage(&hpage, mm, cc); 10169710a78aSZach O'Keefe if (result != SCAN_SUCCEED) 1017b46e756fSKirill A. Shutemov goto out_nolock; 1018b46e756fSKirill A. Shutemov 1019d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 102034488399SZach O'Keefe result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 102150ad2f24SZach O'Keefe if (result != SCAN_SUCCEED) { 1022d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1023b46e756fSKirill A. Shutemov goto out_nolock; 1024b46e756fSKirill A. Shutemov } 1025b46e756fSKirill A. Shutemov 102650722804SZach O'Keefe result = find_pmd_or_thp_or_none(mm, address, &pmd); 102750722804SZach O'Keefe if (result != SCAN_SUCCEED) { 1028d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1029b46e756fSKirill A. Shutemov goto out_nolock; 1030b46e756fSKirill A. Shutemov } 1031b46e756fSKirill A. Shutemov 103250ad2f24SZach O'Keefe if (unmapped) { 1033b46e756fSKirill A. Shutemov /* 103450ad2f24SZach O'Keefe * __collapse_huge_page_swapin will return with mmap_lock 103550ad2f24SZach O'Keefe * released when it fails. So we jump out_nolock directly in 103650ad2f24SZach O'Keefe * that case. Continuing to collapse causes inconsistency. 1037b46e756fSKirill A. Shutemov */ 103850ad2f24SZach O'Keefe result = __collapse_huge_page_swapin(mm, vma, address, pmd, 103950ad2f24SZach O'Keefe referenced); 104050ad2f24SZach O'Keefe if (result != SCAN_SUCCEED) 1041b46e756fSKirill A. Shutemov goto out_nolock; 1042b46e756fSKirill A. Shutemov } 1043b46e756fSKirill A. Shutemov 1044d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1045b46e756fSKirill A. Shutemov /* 1046b46e756fSKirill A. Shutemov * Prevent all access to pagetables with the exception of 1047b46e756fSKirill A. Shutemov * gup_fast later handled by the ptep_clear_flush and the VM 1048b46e756fSKirill A. Shutemov * handled by the anon_vma lock + PG_lock. 1049b46e756fSKirill A. Shutemov */ 1050d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 105134488399SZach O'Keefe result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 105250ad2f24SZach O'Keefe if (result != SCAN_SUCCEED) 105318d24a7cSMiaohe Lin goto out_up_write; 1054b46e756fSKirill A. Shutemov /* check if the pmd is still valid */ 105550722804SZach O'Keefe result = check_pmd_still_valid(mm, address, pmd); 105650722804SZach O'Keefe if (result != SCAN_SUCCEED) 105718d24a7cSMiaohe Lin goto out_up_write; 1058b46e756fSKirill A. Shutemov 1059*55fd6fccSSuren Baghdasaryan vma_start_write(vma); 1060b46e756fSKirill A. Shutemov anon_vma_lock_write(vma->anon_vma); 1061b46e756fSKirill A. Shutemov 10627d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, 10637d4a8be0SAlistair Popple address + HPAGE_PMD_SIZE); 1064ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1065ec649c9dSVille Syrjälä 1066ec649c9dSVille Syrjälä pte = pte_offset_map(pmd, address); 1067ec649c9dSVille Syrjälä pte_ptl = pte_lockptr(mm, pmd); 1068ec649c9dSVille Syrjälä 1069b46e756fSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1070b46e756fSKirill A. Shutemov /* 107170cbc3ccSYang Shi * This removes any huge TLB entry from the CPU so we won't allow 107270cbc3ccSYang Shi * huge and small TLB entries for the same virtual address to 107370cbc3ccSYang Shi * avoid the risk of CPU bugs in that area. 107470cbc3ccSYang Shi * 107570cbc3ccSYang Shi * Parallel fast GUP is fine since fast GUP will back off when 107670cbc3ccSYang Shi * it detects PMD is changed. 1077b46e756fSKirill A. Shutemov */ 1078b46e756fSKirill A. Shutemov _pmd = pmdp_collapse_flush(vma, address, pmd); 1079b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1080ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 10812ba99c5eSJann Horn tlb_remove_table_sync_one(); 1082b46e756fSKirill A. Shutemov 1083b46e756fSKirill A. Shutemov spin_lock(pte_ptl); 1084d8ea7cc8SZach O'Keefe result = __collapse_huge_page_isolate(vma, address, pte, cc, 10855503fbf2SKirill A. Shutemov &compound_pagelist); 1086b46e756fSKirill A. Shutemov spin_unlock(pte_ptl); 1087b46e756fSKirill A. Shutemov 108850ad2f24SZach O'Keefe if (unlikely(result != SCAN_SUCCEED)) { 1089b46e756fSKirill A. Shutemov pte_unmap(pte); 1090b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1091b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 1092b46e756fSKirill A. Shutemov /* 1093b46e756fSKirill A. Shutemov * We can only use set_pmd_at when establishing 1094b46e756fSKirill A. Shutemov * hugepmds and never for establishing regular pmds that 1095b46e756fSKirill A. Shutemov * points to regular pagetables. Use pmd_populate for that 1096b46e756fSKirill A. Shutemov */ 1097b46e756fSKirill A. Shutemov pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1098b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1099b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 110018d24a7cSMiaohe Lin goto out_up_write; 1101b46e756fSKirill A. Shutemov } 1102b46e756fSKirill A. Shutemov 1103b46e756fSKirill A. Shutemov /* 1104b46e756fSKirill A. Shutemov * All pages are isolated and locked so anon_vma rmap 1105b46e756fSKirill A. Shutemov * can't run anymore. 1106b46e756fSKirill A. Shutemov */ 1107b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma); 1108b46e756fSKirill A. Shutemov 110950ad2f24SZach O'Keefe __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl, 11105503fbf2SKirill A. Shutemov &compound_pagelist); 1111b46e756fSKirill A. Shutemov pte_unmap(pte); 1112588d01f9SMiaohe Lin /* 1113588d01f9SMiaohe Lin * spin_lock() below is not the equivalent of smp_wmb(), but 1114588d01f9SMiaohe Lin * the smp_wmb() inside __SetPageUptodate() can be reused to 1115588d01f9SMiaohe Lin * avoid the copy_huge_page writes to become visible after 1116588d01f9SMiaohe Lin * the set_pmd_at() write. 1117588d01f9SMiaohe Lin */ 111850ad2f24SZach O'Keefe __SetPageUptodate(hpage); 1119b46e756fSKirill A. Shutemov pgtable = pmd_pgtable(_pmd); 1120b46e756fSKirill A. Shutemov 112150ad2f24SZach O'Keefe _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); 1122f55e1014SLinus Torvalds _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1123b46e756fSKirill A. Shutemov 1124b46e756fSKirill A. Shutemov spin_lock(pmd_ptl); 1125b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd)); 112650ad2f24SZach O'Keefe page_add_new_anon_rmap(hpage, vma, address); 112750ad2f24SZach O'Keefe lru_cache_add_inactive_or_unevictable(hpage, vma); 1128b46e756fSKirill A. Shutemov pgtable_trans_huge_deposit(mm, pmd, pgtable); 1129b46e756fSKirill A. Shutemov set_pmd_at(mm, address, pmd, _pmd); 1130b46e756fSKirill A. Shutemov update_mmu_cache_pmd(vma, address, pmd); 1131b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl); 1132b46e756fSKirill A. Shutemov 113350ad2f24SZach O'Keefe hpage = NULL; 1134b46e756fSKirill A. Shutemov 1135b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1136b46e756fSKirill A. Shutemov out_up_write: 1137d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1138b46e756fSKirill A. Shutemov out_nolock: 11397cb1d7efSPeter Xu if (hpage) 114050ad2f24SZach O'Keefe put_page(hpage); 114150ad2f24SZach O'Keefe trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); 114250ad2f24SZach O'Keefe return result; 1143b46e756fSKirill A. Shutemov } 1144b46e756fSKirill A. Shutemov 11457d2c4385SZach O'Keefe static int hpage_collapse_scan_pmd(struct mm_struct *mm, 1146b46e756fSKirill A. Shutemov struct vm_area_struct *vma, 114750ad2f24SZach O'Keefe unsigned long address, bool *mmap_locked, 114834d6b470SZach O'Keefe struct collapse_control *cc) 1149b46e756fSKirill A. Shutemov { 1150b46e756fSKirill A. Shutemov pmd_t *pmd; 1151b46e756fSKirill A. Shutemov pte_t *pte, *_pte; 115250ad2f24SZach O'Keefe int result = SCAN_FAIL, referenced = 0; 115371a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0; 1154b46e756fSKirill A. Shutemov struct page *page = NULL; 1155b46e756fSKirill A. Shutemov unsigned long _address; 1156b46e756fSKirill A. Shutemov spinlock_t *ptl; 1157b46e756fSKirill A. Shutemov int node = NUMA_NO_NODE, unmapped = 0; 11580db501f7SEbru Akagunduz bool writable = false; 1159b46e756fSKirill A. Shutemov 1160b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1161b46e756fSKirill A. Shutemov 116250722804SZach O'Keefe result = find_pmd_or_thp_or_none(mm, address, &pmd); 116350722804SZach O'Keefe if (result != SCAN_SUCCEED) 1164b46e756fSKirill A. Shutemov goto out; 1165b46e756fSKirill A. Shutemov 116634d6b470SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load)); 1167e031ff96SYang Shi nodes_clear(cc->alloc_nmask); 1168b46e756fSKirill A. Shutemov pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1169b46e756fSKirill A. Shutemov for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1170b46e756fSKirill A. Shutemov _pte++, _address += PAGE_SIZE) { 1171b46e756fSKirill A. Shutemov pte_t pteval = *_pte; 1172b46e756fSKirill A. Shutemov if (is_swap_pte(pteval)) { 1173d8ea7cc8SZach O'Keefe ++unmapped; 1174d8ea7cc8SZach O'Keefe if (!cc->is_khugepaged || 1175d8ea7cc8SZach O'Keefe unmapped <= khugepaged_max_ptes_swap) { 1176e1e267c7SPeter Xu /* 1177e1e267c7SPeter Xu * Always be strict with uffd-wp 1178e1e267c7SPeter Xu * enabled swap entries. Please see 1179e1e267c7SPeter Xu * comment below for pte_uffd_wp(). 1180e1e267c7SPeter Xu */ 11812bad466cSPeter Xu if (pte_swp_uffd_wp_any(pteval)) { 1182e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1183e1e267c7SPeter Xu goto out_unmap; 1184e1e267c7SPeter Xu } 1185b46e756fSKirill A. Shutemov continue; 1186b46e756fSKirill A. Shutemov } else { 1187b46e756fSKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 1188e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1189b46e756fSKirill A. Shutemov goto out_unmap; 1190b46e756fSKirill A. Shutemov } 1191b46e756fSKirill A. Shutemov } 1192b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1193d8ea7cc8SZach O'Keefe ++none_or_zero; 1194b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) && 1195d8ea7cc8SZach O'Keefe (!cc->is_khugepaged || 1196d8ea7cc8SZach O'Keefe none_or_zero <= khugepaged_max_ptes_none)) { 1197b46e756fSKirill A. Shutemov continue; 1198b46e756fSKirill A. Shutemov } else { 1199b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 1200e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1201b46e756fSKirill A. Shutemov goto out_unmap; 1202b46e756fSKirill A. Shutemov } 1203b46e756fSKirill A. Shutemov } 1204e1e267c7SPeter Xu if (pte_uffd_wp(pteval)) { 1205e1e267c7SPeter Xu /* 1206e1e267c7SPeter Xu * Don't collapse the page if any of the small 1207e1e267c7SPeter Xu * PTEs are armed with uffd write protection. 1208e1e267c7SPeter Xu * Here we can also mark the new huge pmd as 1209e1e267c7SPeter Xu * write protected if any of the small ones is 12108958b249SHaitao Shi * marked but that could bring unknown 1211e1e267c7SPeter Xu * userfault messages that falls outside of 1212e1e267c7SPeter Xu * the registered range. So, just be simple. 1213e1e267c7SPeter Xu */ 1214e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP; 1215e1e267c7SPeter Xu goto out_unmap; 1216e1e267c7SPeter Xu } 1217b46e756fSKirill A. Shutemov if (pte_write(pteval)) 1218b46e756fSKirill A. Shutemov writable = true; 1219b46e756fSKirill A. Shutemov 1220b46e756fSKirill A. Shutemov page = vm_normal_page(vma, _address, pteval); 12213218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1222b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL; 1223b46e756fSKirill A. Shutemov goto out_unmap; 1224b46e756fSKirill A. Shutemov } 1225b46e756fSKirill A. Shutemov 1226d8ea7cc8SZach O'Keefe if (page_mapcount(page) > 1) { 1227d8ea7cc8SZach O'Keefe ++shared; 1228d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 1229d8ea7cc8SZach O'Keefe shared > khugepaged_max_ptes_shared) { 123071a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE; 1231e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 123271a2c112SKirill A. Shutemov goto out_unmap; 123371a2c112SKirill A. Shutemov } 1234d8ea7cc8SZach O'Keefe } 123571a2c112SKirill A. Shutemov 12365503fbf2SKirill A. Shutemov page = compound_head(page); 1237b46e756fSKirill A. Shutemov 1238b46e756fSKirill A. Shutemov /* 1239b46e756fSKirill A. Shutemov * Record which node the original page is from and save this 124034d6b470SZach O'Keefe * information to cc->node_load[]. 12410b8f0d87SQuanfa Fu * Khugepaged will allocate hugepage from the node has the max 1242b46e756fSKirill A. Shutemov * hit record. 1243b46e756fSKirill A. Shutemov */ 1244b46e756fSKirill A. Shutemov node = page_to_nid(page); 12457d2c4385SZach O'Keefe if (hpage_collapse_scan_abort(node, cc)) { 1246b46e756fSKirill A. Shutemov result = SCAN_SCAN_ABORT; 1247b46e756fSKirill A. Shutemov goto out_unmap; 1248b46e756fSKirill A. Shutemov } 124934d6b470SZach O'Keefe cc->node_load[node]++; 1250b46e756fSKirill A. Shutemov if (!PageLRU(page)) { 1251b46e756fSKirill A. Shutemov result = SCAN_PAGE_LRU; 1252b46e756fSKirill A. Shutemov goto out_unmap; 1253b46e756fSKirill A. Shutemov } 1254b46e756fSKirill A. Shutemov if (PageLocked(page)) { 1255b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK; 1256b46e756fSKirill A. Shutemov goto out_unmap; 1257b46e756fSKirill A. Shutemov } 1258b46e756fSKirill A. Shutemov if (!PageAnon(page)) { 1259b46e756fSKirill A. Shutemov result = SCAN_PAGE_ANON; 1260b46e756fSKirill A. Shutemov goto out_unmap; 1261b46e756fSKirill A. Shutemov } 1262b46e756fSKirill A. Shutemov 1263b46e756fSKirill A. Shutemov /* 12649445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins. 12659445689fSKirill A. Shutemov * 1266cb67f428SHugh Dickins * Here the check may be racy: 1267cb67f428SHugh Dickins * it may see total_mapcount > refcount in some cases? 12689445689fSKirill A. Shutemov * But such case is ephemeral we could always retry collapse 12699445689fSKirill A. Shutemov * later. However it may report false positive if the page 12709445689fSKirill A. Shutemov * has excessive GUP pins (i.e. 512). Anyway the same check 12719445689fSKirill A. Shutemov * will be done again later the risk seems low. 1272b46e756fSKirill A. Shutemov */ 12739445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) { 1274b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT; 1275b46e756fSKirill A. Shutemov goto out_unmap; 1276b46e756fSKirill A. Shutemov } 1277d8ea7cc8SZach O'Keefe 1278d8ea7cc8SZach O'Keefe /* 1279d8ea7cc8SZach O'Keefe * If collapse was initiated by khugepaged, check that there is 1280d8ea7cc8SZach O'Keefe * enough young pte to justify collapsing the page 1281d8ea7cc8SZach O'Keefe */ 1282d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 1283d8ea7cc8SZach O'Keefe (pte_young(pteval) || page_is_young(page) || 1284d8ea7cc8SZach O'Keefe PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 1285d8ea7cc8SZach O'Keefe address))) 12860db501f7SEbru Akagunduz referenced++; 1287b46e756fSKirill A. Shutemov } 1288ffe945e6SKirill A. Shutemov if (!writable) { 1289ffe945e6SKirill A. Shutemov result = SCAN_PAGE_RO; 1290d8ea7cc8SZach O'Keefe } else if (cc->is_khugepaged && 1291d8ea7cc8SZach O'Keefe (!referenced || 1292d8ea7cc8SZach O'Keefe (unmapped && referenced < HPAGE_PMD_NR / 2))) { 1293ffe945e6SKirill A. Shutemov result = SCAN_LACK_REFERENCED_PAGE; 1294ffe945e6SKirill A. Shutemov } else { 1295b46e756fSKirill A. Shutemov result = SCAN_SUCCEED; 1296b46e756fSKirill A. Shutemov } 1297b46e756fSKirill A. Shutemov out_unmap: 1298b46e756fSKirill A. Shutemov pte_unmap_unlock(pte, ptl); 129950ad2f24SZach O'Keefe if (result == SCAN_SUCCEED) { 130050ad2f24SZach O'Keefe result = collapse_huge_page(mm, address, referenced, 130150ad2f24SZach O'Keefe unmapped, cc); 1302c1e8d7c6SMichel Lespinasse /* collapse_huge_page will return with the mmap_lock released */ 130350ad2f24SZach O'Keefe *mmap_locked = false; 1304b46e756fSKirill A. Shutemov } 1305b46e756fSKirill A. Shutemov out: 1306b46e756fSKirill A. Shutemov trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1307b46e756fSKirill A. Shutemov none_or_zero, result, unmapped); 130850ad2f24SZach O'Keefe return result; 1309b46e756fSKirill A. Shutemov } 1310b46e756fSKirill A. Shutemov 1311b26e2701SQi Zheng static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) 1312b46e756fSKirill A. Shutemov { 1313b26e2701SQi Zheng struct mm_slot *slot = &mm_slot->slot; 1314b26e2701SQi Zheng struct mm_struct *mm = slot->mm; 1315b46e756fSKirill A. Shutemov 131635f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 1317b46e756fSKirill A. Shutemov 13187d2c4385SZach O'Keefe if (hpage_collapse_test_exit(mm)) { 1319b46e756fSKirill A. Shutemov /* free mm_slot */ 1320b26e2701SQi Zheng hash_del(&slot->hash); 1321b26e2701SQi Zheng list_del(&slot->mm_node); 1322b46e756fSKirill A. Shutemov 1323b46e756fSKirill A. Shutemov /* 1324b46e756fSKirill A. Shutemov * Not strictly needed because the mm exited already. 1325b46e756fSKirill A. Shutemov * 1326b46e756fSKirill A. Shutemov * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1327b46e756fSKirill A. Shutemov */ 1328b46e756fSKirill A. Shutemov 1329b46e756fSKirill A. Shutemov /* khugepaged_mm_lock actually not necessary for the below */ 1330b26e2701SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 1331b46e756fSKirill A. Shutemov mmdrop(mm); 1332b46e756fSKirill A. Shutemov } 1333b46e756fSKirill A. Shutemov } 1334b46e756fSKirill A. Shutemov 1335396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM 133627e1f827SSong Liu /* 133727e1f827SSong Liu * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 133827e1f827SSong Liu * khugepaged should try to collapse the page table. 133934488399SZach O'Keefe * 134034488399SZach O'Keefe * Note that following race exists: 134134488399SZach O'Keefe * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A, 134234488399SZach O'Keefe * emptying the A's ->pte_mapped_thp[] array. 134334488399SZach O'Keefe * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and 134434488399SZach O'Keefe * retract_page_tables() finds a VMA in mm_struct A mapping the same extent 134534488399SZach O'Keefe * (at virtual address X) and adds an entry (for X) into mm_struct A's 134634488399SZach O'Keefe * ->pte-mapped_thp[] array. 134734488399SZach O'Keefe * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X, 134834488399SZach O'Keefe * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry 134934488399SZach O'Keefe * (for X) into mm_struct A's ->pte-mapped_thp[] array. 135034488399SZach O'Keefe * Thus, it's possible the same address is added multiple times for the same 135134488399SZach O'Keefe * mm_struct. Should this happen, we'll simply attempt 135234488399SZach O'Keefe * collapse_pte_mapped_thp() multiple times for the same address, under the same 135334488399SZach O'Keefe * exclusive mmap_lock, and assuming the first call is successful, subsequent 135434488399SZach O'Keefe * attempts will return quickly (without grabbing any additional locks) when 135534488399SZach O'Keefe * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap 135634488399SZach O'Keefe * check, and since this is a rare occurrence, the cost of preventing this 135734488399SZach O'Keefe * "multiple-add" is thought to be more expensive than just handling it, should 135834488399SZach O'Keefe * it occur. 135927e1f827SSong Liu */ 136058ac9a89SZach O'Keefe static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 136127e1f827SSong Liu unsigned long addr) 136227e1f827SSong Liu { 1363b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 1364b26e2701SQi Zheng struct mm_slot *slot; 136558ac9a89SZach O'Keefe bool ret = false; 136627e1f827SSong Liu 136727e1f827SSong Liu VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 136827e1f827SSong Liu 136927e1f827SSong Liu spin_lock(&khugepaged_mm_lock); 1370b26e2701SQi Zheng slot = mm_slot_lookup(mm_slots_hash, mm); 1371b26e2701SQi Zheng mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 137258ac9a89SZach O'Keefe if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) { 137327e1f827SSong Liu mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 137458ac9a89SZach O'Keefe ret = true; 137558ac9a89SZach O'Keefe } 137627e1f827SSong Liu spin_unlock(&khugepaged_mm_lock); 137758ac9a89SZach O'Keefe return ret; 137827e1f827SSong Liu } 137927e1f827SSong Liu 138034488399SZach O'Keefe /* hpage must be locked, and mmap_lock must be held in write */ 138134488399SZach O'Keefe static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, 138234488399SZach O'Keefe pmd_t *pmdp, struct page *hpage) 138334488399SZach O'Keefe { 138434488399SZach O'Keefe struct vm_fault vmf = { 138534488399SZach O'Keefe .vma = vma, 138634488399SZach O'Keefe .address = addr, 138734488399SZach O'Keefe .flags = 0, 138834488399SZach O'Keefe .pmd = pmdp, 138934488399SZach O'Keefe }; 139034488399SZach O'Keefe 139134488399SZach O'Keefe VM_BUG_ON(!PageTransHuge(hpage)); 139234488399SZach O'Keefe mmap_assert_write_locked(vma->vm_mm); 139334488399SZach O'Keefe 139434488399SZach O'Keefe if (do_set_pmd(&vmf, hpage)) 139534488399SZach O'Keefe return SCAN_FAIL; 139634488399SZach O'Keefe 139734488399SZach O'Keefe get_page(hpage); 139834488399SZach O'Keefe return SCAN_SUCCEED; 139927e1f827SSong Liu } 140027e1f827SSong Liu 14018d3c106eSJann Horn /* 14028d3c106eSJann Horn * A note about locking: 14038d3c106eSJann Horn * Trying to take the page table spinlocks would be useless here because those 14048d3c106eSJann Horn * are only used to synchronize: 14058d3c106eSJann Horn * 14068d3c106eSJann Horn * - modifying terminal entries (ones that point to a data page, not to another 14078d3c106eSJann Horn * page table) 14088d3c106eSJann Horn * - installing *new* non-terminal entries 14098d3c106eSJann Horn * 14108d3c106eSJann Horn * Instead, we need roughly the same kind of protection as free_pgtables() or 14118d3c106eSJann Horn * mm_take_all_locks() (but only for a single VMA): 14128d3c106eSJann Horn * The mmap lock together with this VMA's rmap locks covers all paths towards 14138d3c106eSJann Horn * the page table entries we're messing with here, except for hardware page 14148d3c106eSJann Horn * table walks and lockless_pages_from_mm(). 14158d3c106eSJann Horn */ 1416e59a47b8SPasha Tatashin static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 1417e59a47b8SPasha Tatashin unsigned long addr, pmd_t *pmdp) 1418e59a47b8SPasha Tatashin { 1419e59a47b8SPasha Tatashin pmd_t pmd; 1420f268f6cfSJann Horn struct mmu_notifier_range range; 1421e59a47b8SPasha Tatashin 142280110bbfSPasha Tatashin mmap_assert_write_locked(mm); 14238d3c106eSJann Horn if (vma->vm_file) 14248d3c106eSJann Horn lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem); 14258d3c106eSJann Horn /* 14268d3c106eSJann Horn * All anon_vmas attached to the VMA have the same root and are 14278d3c106eSJann Horn * therefore locked by the same lock. 14288d3c106eSJann Horn */ 14298d3c106eSJann Horn if (vma->anon_vma) 14308d3c106eSJann Horn lockdep_assert_held_write(&vma->anon_vma->root->rwsem); 14318d3c106eSJann Horn 14327d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 1433f268f6cfSJann Horn addr + HPAGE_PMD_SIZE); 1434f268f6cfSJann Horn mmu_notifier_invalidate_range_start(&range); 1435e59a47b8SPasha Tatashin pmd = pmdp_collapse_flush(vma, addr, pmdp); 14362ba99c5eSJann Horn tlb_remove_table_sync_one(); 1437f268f6cfSJann Horn mmu_notifier_invalidate_range_end(&range); 1438e59a47b8SPasha Tatashin mm_dec_nr_ptes(mm); 143980110bbfSPasha Tatashin page_table_check_pte_clear_range(mm, addr, pmd); 1440e59a47b8SPasha Tatashin pte_free(mm, pmd_pgtable(pmd)); 1441e59a47b8SPasha Tatashin } 1442e59a47b8SPasha Tatashin 144327e1f827SSong Liu /** 1444336e6b53SAlex Shi * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1445336e6b53SAlex Shi * address haddr. 1446336e6b53SAlex Shi * 1447336e6b53SAlex Shi * @mm: process address space where collapse happens 1448336e6b53SAlex Shi * @addr: THP collapse address 144934488399SZach O'Keefe * @install_pmd: If a huge PMD should be installed 145027e1f827SSong Liu * 145127e1f827SSong Liu * This function checks whether all the PTEs in the PMD are pointing to the 145227e1f827SSong Liu * right THP. If so, retract the page table so the THP can refault in with 145334488399SZach O'Keefe * as pmd-mapped. Possibly install a huge PMD mapping the THP. 145427e1f827SSong Liu */ 145534488399SZach O'Keefe int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, 145634488399SZach O'Keefe bool install_pmd) 145727e1f827SSong Liu { 145827e1f827SSong Liu unsigned long haddr = addr & HPAGE_PMD_MASK; 145994d815b2SLiam R. Howlett struct vm_area_struct *vma = vma_lookup(mm, haddr); 1460119a5fc1SHugh Dickins struct page *hpage; 146127e1f827SSong Liu pte_t *start_pte, *pte; 1462e59a47b8SPasha Tatashin pmd_t *pmd; 146327e1f827SSong Liu spinlock_t *ptl; 146458ac9a89SZach O'Keefe int count = 0, result = SCAN_FAIL; 146527e1f827SSong Liu int i; 146627e1f827SSong Liu 146758ac9a89SZach O'Keefe mmap_assert_write_locked(mm); 146858ac9a89SZach O'Keefe 146934488399SZach O'Keefe /* Fast check before locking page if already PMD-mapped */ 147058ac9a89SZach O'Keefe result = find_pmd_or_thp_or_none(mm, haddr, &pmd); 147134488399SZach O'Keefe if (result == SCAN_PMD_MAPPED) 147234488399SZach O'Keefe return result; 147358ac9a89SZach O'Keefe 147427e1f827SSong Liu if (!vma || !vma->vm_file || 1475fef792a4SMiaohe Lin !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 147634488399SZach O'Keefe return SCAN_VMA_CHECK; 147727e1f827SSong Liu 147827e1f827SSong Liu /* 1479a7f4e6e4SZach O'Keefe * If we are here, we've succeeded in replacing all the native pages 1480a7f4e6e4SZach O'Keefe * in the page cache with a single hugepage. If a mm were to fault-in 1481a7f4e6e4SZach O'Keefe * this memory (mapped by a suitably aligned VMA), we'd get the hugepage 1482a7f4e6e4SZach O'Keefe * and map it by a PMD, regardless of sysfs THP settings. As such, let's 1483a7f4e6e4SZach O'Keefe * analogously elide sysfs THP settings here. 148427e1f827SSong Liu */ 1485a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 148634488399SZach O'Keefe return SCAN_VMA_CHECK; 148727e1f827SSong Liu 1488deb4c93aSPeter Xu /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1489deb4c93aSPeter Xu if (userfaultfd_wp(vma)) 149034488399SZach O'Keefe return SCAN_PTE_UFFD_WP; 1491deb4c93aSPeter Xu 1492119a5fc1SHugh Dickins hpage = find_lock_page(vma->vm_file->f_mapping, 1493119a5fc1SHugh Dickins linear_page_index(vma, haddr)); 1494119a5fc1SHugh Dickins if (!hpage) 149534488399SZach O'Keefe return SCAN_PAGE_NULL; 1496119a5fc1SHugh Dickins 149734488399SZach O'Keefe if (!PageHead(hpage)) { 149834488399SZach O'Keefe result = SCAN_FAIL; 1499119a5fc1SHugh Dickins goto drop_hpage; 150034488399SZach O'Keefe } 1501119a5fc1SHugh Dickins 150234488399SZach O'Keefe if (compound_order(hpage) != HPAGE_PMD_ORDER) { 150334488399SZach O'Keefe result = SCAN_PAGE_COMPOUND; 1504119a5fc1SHugh Dickins goto drop_hpage; 150534488399SZach O'Keefe } 1506780a4b6fSZach O'Keefe 150734488399SZach O'Keefe switch (result) { 150834488399SZach O'Keefe case SCAN_SUCCEED: 150934488399SZach O'Keefe break; 151034488399SZach O'Keefe case SCAN_PMD_NONE: 151134488399SZach O'Keefe /* 151234488399SZach O'Keefe * In MADV_COLLAPSE path, possible race with khugepaged where 151334488399SZach O'Keefe * all pte entries have been removed and pmd cleared. If so, 151434488399SZach O'Keefe * skip all the pte checks and just update the pmd mapping. 151534488399SZach O'Keefe */ 151634488399SZach O'Keefe goto maybe_install_pmd; 151734488399SZach O'Keefe default: 151827e1f827SSong Liu goto drop_hpage; 151934488399SZach O'Keefe } 152027e1f827SSong Liu 1521*55fd6fccSSuren Baghdasaryan /* Lock the vma before taking i_mmap and page table locks */ 1522*55fd6fccSSuren Baghdasaryan vma_start_write(vma); 1523*55fd6fccSSuren Baghdasaryan 15248d3c106eSJann Horn /* 15258d3c106eSJann Horn * We need to lock the mapping so that from here on, only GUP-fast and 15268d3c106eSJann Horn * hardware page walks can access the parts of the page tables that 15278d3c106eSJann Horn * we're operating on. 15288d3c106eSJann Horn * See collapse_and_free_pmd(). 15298d3c106eSJann Horn */ 15308d3c106eSJann Horn i_mmap_lock_write(vma->vm_file->f_mapping); 15318d3c106eSJann Horn 15328d3c106eSJann Horn /* 15338d3c106eSJann Horn * This spinlock should be unnecessary: Nobody else should be accessing 15348d3c106eSJann Horn * the page tables under spinlock protection here, only 15358d3c106eSJann Horn * lockless_pages_from_mm() and the hardware page walker can access page 15368d3c106eSJann Horn * tables while all the high-level locks are held in write mode. 15378d3c106eSJann Horn */ 153827e1f827SSong Liu start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 153934488399SZach O'Keefe result = SCAN_FAIL; 154027e1f827SSong Liu 154127e1f827SSong Liu /* step 1: check all mapped PTEs are to the right huge page */ 154227e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 154327e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 154427e1f827SSong Liu struct page *page; 154527e1f827SSong Liu 154627e1f827SSong Liu /* empty pte, skip */ 154727e1f827SSong Liu if (pte_none(*pte)) 154827e1f827SSong Liu continue; 154927e1f827SSong Liu 155027e1f827SSong Liu /* page swapped out, abort */ 155134488399SZach O'Keefe if (!pte_present(*pte)) { 155234488399SZach O'Keefe result = SCAN_PTE_NON_PRESENT; 155327e1f827SSong Liu goto abort; 155434488399SZach O'Keefe } 155527e1f827SSong Liu 155627e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 15573218f871SAlex Sierra if (WARN_ON_ONCE(page && is_zone_device_page(page))) 15583218f871SAlex Sierra page = NULL; 155927e1f827SSong Liu /* 1560119a5fc1SHugh Dickins * Note that uprobe, debugger, or MAP_PRIVATE may change the 1561119a5fc1SHugh Dickins * page table, but the new page will not be a subpage of hpage. 156227e1f827SSong Liu */ 1563119a5fc1SHugh Dickins if (hpage + i != page) 156427e1f827SSong Liu goto abort; 156527e1f827SSong Liu count++; 156627e1f827SSong Liu } 156727e1f827SSong Liu 156827e1f827SSong Liu /* step 2: adjust rmap */ 156927e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte; 157027e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 157127e1f827SSong Liu struct page *page; 157227e1f827SSong Liu 157327e1f827SSong Liu if (pte_none(*pte)) 157427e1f827SSong Liu continue; 157527e1f827SSong Liu page = vm_normal_page(vma, addr, *pte); 15763218f871SAlex Sierra if (WARN_ON_ONCE(page && is_zone_device_page(page))) 15773218f871SAlex Sierra goto abort; 1578cea86fe2SHugh Dickins page_remove_rmap(page, vma, false); 157927e1f827SSong Liu } 158027e1f827SSong Liu 158127e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 158227e1f827SSong Liu 158327e1f827SSong Liu /* step 3: set proper refcount and mm_counters. */ 1584119a5fc1SHugh Dickins if (count) { 158527e1f827SSong Liu page_ref_sub(hpage, count); 158627e1f827SSong Liu add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 158727e1f827SSong Liu } 158827e1f827SSong Liu 158934488399SZach O'Keefe /* step 4: remove pte entries */ 1590ab0c3f12SHugh Dickins /* we make no change to anon, but protect concurrent anon page lookup */ 1591ab0c3f12SHugh Dickins if (vma->anon_vma) 1592ab0c3f12SHugh Dickins anon_vma_lock_write(vma->anon_vma); 1593ab0c3f12SHugh Dickins 1594e59a47b8SPasha Tatashin collapse_and_free_pmd(mm, vma, haddr, pmd); 159534488399SZach O'Keefe 1596ab0c3f12SHugh Dickins if (vma->anon_vma) 1597ab0c3f12SHugh Dickins anon_vma_unlock_write(vma->anon_vma); 15988d3c106eSJann Horn i_mmap_unlock_write(vma->vm_file->f_mapping); 15998d3c106eSJann Horn 160034488399SZach O'Keefe maybe_install_pmd: 160134488399SZach O'Keefe /* step 5: install pmd entry */ 160234488399SZach O'Keefe result = install_pmd 160334488399SZach O'Keefe ? set_huge_pmd(vma, haddr, pmd, hpage) 160434488399SZach O'Keefe : SCAN_SUCCEED; 160534488399SZach O'Keefe 1606119a5fc1SHugh Dickins drop_hpage: 1607119a5fc1SHugh Dickins unlock_page(hpage); 1608119a5fc1SHugh Dickins put_page(hpage); 160934488399SZach O'Keefe return result; 161027e1f827SSong Liu 161127e1f827SSong Liu abort: 161227e1f827SSong Liu pte_unmap_unlock(start_pte, ptl); 16138d3c106eSJann Horn i_mmap_unlock_write(vma->vm_file->f_mapping); 1614119a5fc1SHugh Dickins goto drop_hpage; 161527e1f827SSong Liu } 161627e1f827SSong Liu 1617b26e2701SQi Zheng static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot) 161827e1f827SSong Liu { 1619b26e2701SQi Zheng struct mm_slot *slot = &mm_slot->slot; 1620b26e2701SQi Zheng struct mm_struct *mm = slot->mm; 162127e1f827SSong Liu int i; 162227e1f827SSong Liu 162327e1f827SSong Liu if (likely(mm_slot->nr_pte_mapped_thp == 0)) 16240edf61e5SMiaohe Lin return; 162527e1f827SSong Liu 1626d8ed45c5SMichel Lespinasse if (!mmap_write_trylock(mm)) 16270edf61e5SMiaohe Lin return; 162827e1f827SSong Liu 16297d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm))) 163027e1f827SSong Liu goto out; 163127e1f827SSong Liu 163227e1f827SSong Liu for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 163334488399SZach O'Keefe collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false); 163427e1f827SSong Liu 163527e1f827SSong Liu out: 163627e1f827SSong Liu mm_slot->nr_pte_mapped_thp = 0; 1637d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 163827e1f827SSong Liu } 163927e1f827SSong Liu 164034488399SZach O'Keefe static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff, 164134488399SZach O'Keefe struct mm_struct *target_mm, 164234488399SZach O'Keefe unsigned long target_addr, struct page *hpage, 164334488399SZach O'Keefe struct collapse_control *cc) 1644f3f0e1d2SKirill A. Shutemov { 1645f3f0e1d2SKirill A. Shutemov struct vm_area_struct *vma; 164634488399SZach O'Keefe int target_result = SCAN_FAIL; 1647f3f0e1d2SKirill A. Shutemov 1648f3f0e1d2SKirill A. Shutemov i_mmap_lock_write(mapping); 1649f3f0e1d2SKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 165034488399SZach O'Keefe int result = SCAN_FAIL; 165134488399SZach O'Keefe struct mm_struct *mm = NULL; 165234488399SZach O'Keefe unsigned long addr = 0; 165334488399SZach O'Keefe pmd_t *pmd; 165434488399SZach O'Keefe bool is_target = false; 165534488399SZach O'Keefe 165627e1f827SSong Liu /* 165727e1f827SSong Liu * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 165827e1f827SSong Liu * got written to. These VMAs are likely not worth investing 16593e4e28c5SMichel Lespinasse * mmap_write_lock(mm) as PMD-mapping is likely to be split 166027e1f827SSong Liu * later. 166127e1f827SSong Liu * 166236ee2c78SMiaohe Lin * Note that vma->anon_vma check is racy: it can be set up after 1663c1e8d7c6SMichel Lespinasse * the check but before we took mmap_lock by the fault path. 166427e1f827SSong Liu * But page lock would prevent establishing any new ptes of the 166527e1f827SSong Liu * page, so we are safe. 166627e1f827SSong Liu * 166727e1f827SSong Liu * An alternative would be drop the check, but check that page 166827e1f827SSong Liu * table is clear before calling pmdp_collapse_flush() under 166927e1f827SSong Liu * ptl. It has higher chance to recover THP for the VMA, but 16708d3c106eSJann Horn * has higher cost too. It would also probably require locking 16718d3c106eSJann Horn * the anon_vma. 167227e1f827SSong Liu */ 1673023f47a8SJann Horn if (READ_ONCE(vma->anon_vma)) { 167434488399SZach O'Keefe result = SCAN_PAGE_ANON; 167534488399SZach O'Keefe goto next; 167634488399SZach O'Keefe } 1677f3f0e1d2SKirill A. Shutemov addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 167834488399SZach O'Keefe if (addr & ~HPAGE_PMD_MASK || 167934488399SZach O'Keefe vma->vm_end < addr + HPAGE_PMD_SIZE) { 168034488399SZach O'Keefe result = SCAN_VMA_CHECK; 168134488399SZach O'Keefe goto next; 168234488399SZach O'Keefe } 168318e77600SHugh Dickins mm = vma->vm_mm; 168434488399SZach O'Keefe is_target = mm == target_mm && addr == target_addr; 168534488399SZach O'Keefe result = find_pmd_or_thp_or_none(mm, addr, &pmd); 168634488399SZach O'Keefe if (result != SCAN_SUCCEED) 168734488399SZach O'Keefe goto next; 1688f3f0e1d2SKirill A. Shutemov /* 1689c1e8d7c6SMichel Lespinasse * We need exclusive mmap_lock to retract page table. 169027e1f827SSong Liu * 169127e1f827SSong Liu * We use trylock due to lock inversion: we need to acquire 1692c1e8d7c6SMichel Lespinasse * mmap_lock while holding page lock. Fault path does it in 169327e1f827SSong Liu * reverse order. Trylock is a way to avoid deadlock. 169434488399SZach O'Keefe * 169534488399SZach O'Keefe * Also, it's not MADV_COLLAPSE's job to collapse other 169634488399SZach O'Keefe * mappings - let khugepaged take care of them later. 1697f3f0e1d2SKirill A. Shutemov */ 169834488399SZach O'Keefe result = SCAN_PTE_MAPPED_HUGEPAGE; 169934488399SZach O'Keefe if ((cc->is_khugepaged || is_target) && 170034488399SZach O'Keefe mmap_write_trylock(mm)) { 1701*55fd6fccSSuren Baghdasaryan /* trylock for the same lock inversion as above */ 1702*55fd6fccSSuren Baghdasaryan if (!vma_try_start_write(vma)) 1703*55fd6fccSSuren Baghdasaryan goto unlock_next; 1704*55fd6fccSSuren Baghdasaryan 1705deb4c93aSPeter Xu /* 1706023f47a8SJann Horn * Re-check whether we have an ->anon_vma, because 1707023f47a8SJann Horn * collapse_and_free_pmd() requires that either no 1708023f47a8SJann Horn * ->anon_vma exists or the anon_vma is locked. 1709023f47a8SJann Horn * We already checked ->anon_vma above, but that check 1710023f47a8SJann Horn * is racy because ->anon_vma can be populated under the 1711023f47a8SJann Horn * mmap lock in read mode. 1712023f47a8SJann Horn */ 1713023f47a8SJann Horn if (vma->anon_vma) { 1714023f47a8SJann Horn result = SCAN_PAGE_ANON; 1715023f47a8SJann Horn goto unlock_next; 1716023f47a8SJann Horn } 1717023f47a8SJann Horn /* 1718deb4c93aSPeter Xu * When a vma is registered with uffd-wp, we can't 1719deb4c93aSPeter Xu * recycle the pmd pgtable because there can be pte 1720deb4c93aSPeter Xu * markers installed. Skip it only, so the rest mm/vma 1721deb4c93aSPeter Xu * can still have the same file mapped hugely, however 1722deb4c93aSPeter Xu * it'll always mapped in small page size for uffd-wp 1723deb4c93aSPeter Xu * registered ranges. 1724deb4c93aSPeter Xu */ 172534488399SZach O'Keefe if (hpage_collapse_test_exit(mm)) { 172634488399SZach O'Keefe result = SCAN_ANY_PROCESS; 172734488399SZach O'Keefe goto unlock_next; 1728f3f0e1d2SKirill A. Shutemov } 172934488399SZach O'Keefe if (userfaultfd_wp(vma)) { 173034488399SZach O'Keefe result = SCAN_PTE_UFFD_WP; 173134488399SZach O'Keefe goto unlock_next; 173234488399SZach O'Keefe } 173334488399SZach O'Keefe collapse_and_free_pmd(mm, vma, addr, pmd); 173434488399SZach O'Keefe if (!cc->is_khugepaged && is_target) 173534488399SZach O'Keefe result = set_huge_pmd(vma, addr, pmd, hpage); 173634488399SZach O'Keefe else 173734488399SZach O'Keefe result = SCAN_SUCCEED; 173834488399SZach O'Keefe 173934488399SZach O'Keefe unlock_next: 174034488399SZach O'Keefe mmap_write_unlock(mm); 174134488399SZach O'Keefe goto next; 174234488399SZach O'Keefe } 174334488399SZach O'Keefe /* 174434488399SZach O'Keefe * Calling context will handle target mm/addr. Otherwise, let 174534488399SZach O'Keefe * khugepaged try again later. 174634488399SZach O'Keefe */ 174734488399SZach O'Keefe if (!is_target) { 174834488399SZach O'Keefe khugepaged_add_pte_mapped_thp(mm, addr); 174934488399SZach O'Keefe continue; 175034488399SZach O'Keefe } 175134488399SZach O'Keefe next: 175234488399SZach O'Keefe if (is_target) 175334488399SZach O'Keefe target_result = result; 1754f3f0e1d2SKirill A. Shutemov } 1755f3f0e1d2SKirill A. Shutemov i_mmap_unlock_write(mapping); 175634488399SZach O'Keefe return target_result; 1757f3f0e1d2SKirill A. Shutemov } 1758f3f0e1d2SKirill A. Shutemov 1759f3f0e1d2SKirill A. Shutemov /** 176099cb0dbdSSong Liu * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1761f3f0e1d2SKirill A. Shutemov * 1762336e6b53SAlex Shi * @mm: process address space where collapse happens 176334488399SZach O'Keefe * @addr: virtual collapse start address 1764336e6b53SAlex Shi * @file: file that collapse on 1765336e6b53SAlex Shi * @start: collapse start address 17669710a78aSZach O'Keefe * @cc: collapse context and scratchpad 1767336e6b53SAlex Shi * 1768f3f0e1d2SKirill A. Shutemov * Basic scheme is simple, details are more complex: 176987c460a0SHugh Dickins * - allocate and lock a new huge page; 177077da9389SMatthew Wilcox * - scan page cache replacing old pages with the new one 177199cb0dbdSSong Liu * + swap/gup in pages if necessary; 1772f3f0e1d2SKirill A. Shutemov * + fill in gaps; 177377da9389SMatthew Wilcox * + keep old pages around in case rollback is required; 177477da9389SMatthew Wilcox * - if replacing succeeds: 1775f3f0e1d2SKirill A. Shutemov * + copy data over; 1776f3f0e1d2SKirill A. Shutemov * + free old pages; 177787c460a0SHugh Dickins * + unlock huge page; 1778f3f0e1d2SKirill A. Shutemov * - if replacing failed; 1779f3f0e1d2SKirill A. Shutemov * + put all pages back and unfreeze them; 178077da9389SMatthew Wilcox * + restore gaps in the page cache; 178187c460a0SHugh Dickins * + unlock and free huge page; 1782f3f0e1d2SKirill A. Shutemov */ 178334488399SZach O'Keefe static int collapse_file(struct mm_struct *mm, unsigned long addr, 1784579c571eSSong Liu struct file *file, pgoff_t start, 178534488399SZach O'Keefe struct collapse_control *cc) 1786f3f0e1d2SKirill A. Shutemov { 1787579c571eSSong Liu struct address_space *mapping = file->f_mapping; 178850ad2f24SZach O'Keefe struct page *hpage; 17894c9473e8SGautam Menghani pgoff_t index = 0, end = start + HPAGE_PMD_NR; 1790f3f0e1d2SKirill A. Shutemov LIST_HEAD(pagelist); 179177da9389SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1792f3f0e1d2SKirill A. Shutemov int nr_none = 0, result = SCAN_SUCCEED; 179399cb0dbdSSong Liu bool is_shmem = shmem_file(file); 17944c9473e8SGautam Menghani int nr = 0; 1795f3f0e1d2SKirill A. Shutemov 179699cb0dbdSSong Liu VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1797f3f0e1d2SKirill A. Shutemov VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1798f3f0e1d2SKirill A. Shutemov 179950ad2f24SZach O'Keefe result = alloc_charge_hpage(&hpage, mm, cc); 18009710a78aSZach O'Keefe if (result != SCAN_SUCCEED) 1801f3f0e1d2SKirill A. Shutemov goto out; 1802f3f0e1d2SKirill A. Shutemov 18036b24ca4aSMatthew Wilcox (Oracle) /* 18046b24ca4aSMatthew Wilcox (Oracle) * Ensure we have slots for all the pages in the range. This is 18056b24ca4aSMatthew Wilcox (Oracle) * almost certainly a no-op because most of the pages must be present 18066b24ca4aSMatthew Wilcox (Oracle) */ 180795feeabbSHugh Dickins do { 180895feeabbSHugh Dickins xas_lock_irq(&xas); 180995feeabbSHugh Dickins xas_create_range(&xas); 181095feeabbSHugh Dickins if (!xas_error(&xas)) 181195feeabbSHugh Dickins break; 181295feeabbSHugh Dickins xas_unlock_irq(&xas); 181395feeabbSHugh Dickins if (!xas_nomem(&xas, GFP_KERNEL)) { 181495feeabbSHugh Dickins result = SCAN_FAIL; 181595feeabbSHugh Dickins goto out; 181695feeabbSHugh Dickins } 181795feeabbSHugh Dickins } while (1); 181895feeabbSHugh Dickins 181950ad2f24SZach O'Keefe __SetPageLocked(hpage); 182099cb0dbdSSong Liu if (is_shmem) 182150ad2f24SZach O'Keefe __SetPageSwapBacked(hpage); 182250ad2f24SZach O'Keefe hpage->index = start; 182350ad2f24SZach O'Keefe hpage->mapping = mapping; 1824f3f0e1d2SKirill A. Shutemov 1825f3f0e1d2SKirill A. Shutemov /* 182650ad2f24SZach O'Keefe * At this point the hpage is locked and not up-to-date. 182787c460a0SHugh Dickins * It's safe to insert it into the page cache, because nobody would 182887c460a0SHugh Dickins * be able to map it or use it in another way until we unlock it. 1829f3f0e1d2SKirill A. Shutemov */ 1830f3f0e1d2SKirill A. Shutemov 183177da9389SMatthew Wilcox xas_set(&xas, start); 183277da9389SMatthew Wilcox for (index = start; index < end; index++) { 183377da9389SMatthew Wilcox struct page *page = xas_next(&xas); 183464ab3195SVishal Moola (Oracle) struct folio *folio; 183577da9389SMatthew Wilcox 183677da9389SMatthew Wilcox VM_BUG_ON(index != xas.xa_index); 183799cb0dbdSSong Liu if (is_shmem) { 183877da9389SMatthew Wilcox if (!page) { 1839701270faSHugh Dickins /* 184099cb0dbdSSong Liu * Stop if extent has been truncated or 184199cb0dbdSSong Liu * hole-punched, and is now completely 184299cb0dbdSSong Liu * empty. 1843701270faSHugh Dickins */ 1844701270faSHugh Dickins if (index == start) { 1845701270faSHugh Dickins if (!xas_next_entry(&xas, end - 1)) { 1846701270faSHugh Dickins result = SCAN_TRUNCATED; 1847042a3082SHugh Dickins goto xa_locked; 1848701270faSHugh Dickins } 1849701270faSHugh Dickins xas_set(&xas, index); 1850701270faSHugh Dickins } 185177da9389SMatthew Wilcox if (!shmem_charge(mapping->host, 1)) { 1852f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 1853042a3082SHugh Dickins goto xa_locked; 1854f3f0e1d2SKirill A. Shutemov } 185550ad2f24SZach O'Keefe xas_store(&xas, hpage); 185677da9389SMatthew Wilcox nr_none++; 185777da9389SMatthew Wilcox continue; 1858f3f0e1d2SKirill A. Shutemov } 1859f3f0e1d2SKirill A. Shutemov 18603159f943SMatthew Wilcox if (xa_is_value(page) || !PageUptodate(page)) { 186177da9389SMatthew Wilcox xas_unlock_irq(&xas); 1862f3f0e1d2SKirill A. Shutemov /* swap in or instantiate fallocated page */ 18637459c149SMatthew Wilcox (Oracle) if (shmem_get_folio(mapping->host, index, 18647459c149SMatthew Wilcox (Oracle) &folio, SGP_NOALLOC)) { 1865f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL; 186677da9389SMatthew Wilcox goto xa_unlocked; 1867f3f0e1d2SKirill A. Shutemov } 18687459c149SMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 1869f3f0e1d2SKirill A. Shutemov } else if (trylock_page(page)) { 1870f3f0e1d2SKirill A. Shutemov get_page(page); 1871042a3082SHugh Dickins xas_unlock_irq(&xas); 1872f3f0e1d2SKirill A. Shutemov } else { 1873f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LOCK; 1874042a3082SHugh Dickins goto xa_locked; 1875f3f0e1d2SKirill A. Shutemov } 187699cb0dbdSSong Liu } else { /* !is_shmem */ 187799cb0dbdSSong Liu if (!page || xa_is_value(page)) { 187899cb0dbdSSong Liu xas_unlock_irq(&xas); 187999cb0dbdSSong Liu page_cache_sync_readahead(mapping, &file->f_ra, 188099cb0dbdSSong Liu file, index, 1881e5a59d30SDavid Howells end - index); 188299cb0dbdSSong Liu /* drain pagevecs to help isolate_lru_page() */ 188399cb0dbdSSong Liu lru_add_drain(); 188499cb0dbdSSong Liu page = find_lock_page(mapping, index); 188599cb0dbdSSong Liu if (unlikely(page == NULL)) { 188699cb0dbdSSong Liu result = SCAN_FAIL; 188799cb0dbdSSong Liu goto xa_unlocked; 188899cb0dbdSSong Liu } 188975f36069SSong Liu } else if (PageDirty(page)) { 189075f36069SSong Liu /* 189175f36069SSong Liu * khugepaged only works on read-only fd, 189275f36069SSong Liu * so this page is dirty because it hasn't 189375f36069SSong Liu * been flushed since first write. There 189475f36069SSong Liu * won't be new dirty pages. 189575f36069SSong Liu * 189675f36069SSong Liu * Trigger async flush here and hope the 189775f36069SSong Liu * writeback is done when khugepaged 189875f36069SSong Liu * revisits this page. 189975f36069SSong Liu * 190075f36069SSong Liu * This is a one-off situation. We are not 190175f36069SSong Liu * forcing writeback in loop. 190275f36069SSong Liu */ 190375f36069SSong Liu xas_unlock_irq(&xas); 190475f36069SSong Liu filemap_flush(mapping); 190575f36069SSong Liu result = SCAN_FAIL; 190675f36069SSong Liu goto xa_unlocked; 190774c42e1bSRongwei Wang } else if (PageWriteback(page)) { 190874c42e1bSRongwei Wang xas_unlock_irq(&xas); 190974c42e1bSRongwei Wang result = SCAN_FAIL; 191074c42e1bSRongwei Wang goto xa_unlocked; 191199cb0dbdSSong Liu } else if (trylock_page(page)) { 191299cb0dbdSSong Liu get_page(page); 191399cb0dbdSSong Liu xas_unlock_irq(&xas); 191499cb0dbdSSong Liu } else { 191599cb0dbdSSong Liu result = SCAN_PAGE_LOCK; 191699cb0dbdSSong Liu goto xa_locked; 191799cb0dbdSSong Liu } 191899cb0dbdSSong Liu } 1919f3f0e1d2SKirill A. Shutemov 1920f3f0e1d2SKirill A. Shutemov /* 1921b93b0163SMatthew Wilcox * The page must be locked, so we can drop the i_pages lock 1922f3f0e1d2SKirill A. Shutemov * without racing with truncate. 1923f3f0e1d2SKirill A. Shutemov */ 1924f3f0e1d2SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 19254655e5e5SSong Liu 19264655e5e5SSong Liu /* make sure the page is up to date */ 19274655e5e5SSong Liu if (unlikely(!PageUptodate(page))) { 19284655e5e5SSong Liu result = SCAN_FAIL; 19294655e5e5SSong Liu goto out_unlock; 19304655e5e5SSong Liu } 193106a5e126SHugh Dickins 193206a5e126SHugh Dickins /* 193306a5e126SHugh Dickins * If file was truncated then extended, or hole-punched, before 193406a5e126SHugh Dickins * we locked the first page, then a THP might be there already. 193558ac9a89SZach O'Keefe * This will be discovered on the first iteration. 193606a5e126SHugh Dickins */ 193706a5e126SHugh Dickins if (PageTransCompound(page)) { 193858ac9a89SZach O'Keefe struct page *head = compound_head(page); 193958ac9a89SZach O'Keefe 194058ac9a89SZach O'Keefe result = compound_order(head) == HPAGE_PMD_ORDER && 194158ac9a89SZach O'Keefe head->index == start 194258ac9a89SZach O'Keefe /* Maybe PMD-mapped */ 194358ac9a89SZach O'Keefe ? SCAN_PTE_MAPPED_HUGEPAGE 194458ac9a89SZach O'Keefe : SCAN_PAGE_COMPOUND; 194506a5e126SHugh Dickins goto out_unlock; 194606a5e126SHugh Dickins } 1947f3f0e1d2SKirill A. Shutemov 194864ab3195SVishal Moola (Oracle) folio = page_folio(page); 194964ab3195SVishal Moola (Oracle) 195064ab3195SVishal Moola (Oracle) if (folio_mapping(folio) != mapping) { 1951f3f0e1d2SKirill A. Shutemov result = SCAN_TRUNCATED; 1952f3f0e1d2SKirill A. Shutemov goto out_unlock; 1953f3f0e1d2SKirill A. Shutemov } 1954f3f0e1d2SKirill A. Shutemov 195564ab3195SVishal Moola (Oracle) if (!is_shmem && (folio_test_dirty(folio) || 195664ab3195SVishal Moola (Oracle) folio_test_writeback(folio))) { 19574655e5e5SSong Liu /* 19584655e5e5SSong Liu * khugepaged only works on read-only fd, so this 19594655e5e5SSong Liu * page is dirty because it hasn't been flushed 19604655e5e5SSong Liu * since first write. 19614655e5e5SSong Liu */ 19624655e5e5SSong Liu result = SCAN_FAIL; 19634655e5e5SSong Liu goto out_unlock; 19644655e5e5SSong Liu } 19654655e5e5SSong Liu 1966be2d5756SBaolin Wang if (!folio_isolate_lru(folio)) { 1967f3f0e1d2SKirill A. Shutemov result = SCAN_DEL_PAGE_LRU; 1968042a3082SHugh Dickins goto out_unlock; 1969f3f0e1d2SKirill A. Shutemov } 1970f3f0e1d2SKirill A. Shutemov 197164ab3195SVishal Moola (Oracle) if (folio_has_private(folio) && 197264ab3195SVishal Moola (Oracle) !filemap_release_folio(folio, GFP_KERNEL)) { 197399cb0dbdSSong Liu result = SCAN_PAGE_HAS_PRIVATE; 197464ab3195SVishal Moola (Oracle) folio_putback_lru(folio); 197599cb0dbdSSong Liu goto out_unlock; 197699cb0dbdSSong Liu } 197799cb0dbdSSong Liu 197864ab3195SVishal Moola (Oracle) if (folio_mapped(folio)) 197964ab3195SVishal Moola (Oracle) try_to_unmap(folio, 1980869f7ee6SMatthew Wilcox (Oracle) TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 1981f3f0e1d2SKirill A. Shutemov 198277da9389SMatthew Wilcox xas_lock_irq(&xas); 198377da9389SMatthew Wilcox xas_set(&xas, index); 1984f3f0e1d2SKirill A. Shutemov 198577da9389SMatthew Wilcox VM_BUG_ON_PAGE(page != xas_load(&xas), page); 1986f3f0e1d2SKirill A. Shutemov 1987f3f0e1d2SKirill A. Shutemov /* 1988f3f0e1d2SKirill A. Shutemov * The page is expected to have page_count() == 3: 1989f3f0e1d2SKirill A. Shutemov * - we hold a pin on it; 199077da9389SMatthew Wilcox * - one reference from page cache; 1991f3f0e1d2SKirill A. Shutemov * - one from isolate_lru_page; 1992f3f0e1d2SKirill A. Shutemov */ 1993f3f0e1d2SKirill A. Shutemov if (!page_ref_freeze(page, 3)) { 1994f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 1995042a3082SHugh Dickins xas_unlock_irq(&xas); 1996042a3082SHugh Dickins putback_lru_page(page); 1997042a3082SHugh Dickins goto out_unlock; 1998f3f0e1d2SKirill A. Shutemov } 1999f3f0e1d2SKirill A. Shutemov 2000f3f0e1d2SKirill A. Shutemov /* 2001f3f0e1d2SKirill A. Shutemov * Add the page to the list to be able to undo the collapse if 2002f3f0e1d2SKirill A. Shutemov * something go wrong. 2003f3f0e1d2SKirill A. Shutemov */ 2004f3f0e1d2SKirill A. Shutemov list_add_tail(&page->lru, &pagelist); 2005f3f0e1d2SKirill A. Shutemov 2006f3f0e1d2SKirill A. Shutemov /* Finally, replace with the new page. */ 200750ad2f24SZach O'Keefe xas_store(&xas, hpage); 2008f3f0e1d2SKirill A. Shutemov continue; 2009f3f0e1d2SKirill A. Shutemov out_unlock: 2010f3f0e1d2SKirill A. Shutemov unlock_page(page); 2011f3f0e1d2SKirill A. Shutemov put_page(page); 2012042a3082SHugh Dickins goto xa_unlocked; 2013f3f0e1d2SKirill A. Shutemov } 201450ad2f24SZach O'Keefe nr = thp_nr_pages(hpage); 2015f3f0e1d2SKirill A. Shutemov 201699cb0dbdSSong Liu if (is_shmem) 201750ad2f24SZach O'Keefe __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); 201809d91cdaSSong Liu else { 201950ad2f24SZach O'Keefe __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr); 202009d91cdaSSong Liu filemap_nr_thps_inc(mapping); 2021eb6ecbedSCollin Fijalkovich /* 2022eb6ecbedSCollin Fijalkovich * Paired with smp_mb() in do_dentry_open() to ensure 2023eb6ecbedSCollin Fijalkovich * i_writecount is up to date and the update to nr_thps is 2024eb6ecbedSCollin Fijalkovich * visible. Ensures the page cache will be truncated if the 2025eb6ecbedSCollin Fijalkovich * file is opened writable. 2026eb6ecbedSCollin Fijalkovich */ 2027eb6ecbedSCollin Fijalkovich smp_mb(); 2028eb6ecbedSCollin Fijalkovich if (inode_is_open_for_write(mapping->host)) { 2029eb6ecbedSCollin Fijalkovich result = SCAN_FAIL; 203050ad2f24SZach O'Keefe __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr); 2031eb6ecbedSCollin Fijalkovich filemap_nr_thps_dec(mapping); 2032eb6ecbedSCollin Fijalkovich goto xa_locked; 2033eb6ecbedSCollin Fijalkovich } 203409d91cdaSSong Liu } 203599cb0dbdSSong Liu 2036042a3082SHugh Dickins if (nr_none) { 203750ad2f24SZach O'Keefe __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none); 20382f55f070SMiaohe Lin /* nr_none is always 0 for non-shmem. */ 203950ad2f24SZach O'Keefe __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); 2040042a3082SHugh Dickins } 2041042a3082SHugh Dickins 20426b24ca4aSMatthew Wilcox (Oracle) /* Join all the small entries into a single multi-index entry */ 20436b24ca4aSMatthew Wilcox (Oracle) xas_set_order(&xas, start, HPAGE_PMD_ORDER); 204450ad2f24SZach O'Keefe xas_store(&xas, hpage); 2045042a3082SHugh Dickins xa_locked: 2046042a3082SHugh Dickins xas_unlock_irq(&xas); 204777da9389SMatthew Wilcox xa_unlocked: 2048042a3082SHugh Dickins 20496d9df8a5SHugh Dickins /* 20506d9df8a5SHugh Dickins * If collapse is successful, flush must be done now before copying. 20516d9df8a5SHugh Dickins * If collapse is unsuccessful, does flush actually need to be done? 20526d9df8a5SHugh Dickins * Do it anyway, to clear the state. 20536d9df8a5SHugh Dickins */ 20546d9df8a5SHugh Dickins try_to_unmap_flush(); 20556d9df8a5SHugh Dickins 2056f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 205777da9389SMatthew Wilcox struct page *page, *tmp; 2058284a344eSVishal Moola (Oracle) struct folio *folio; 2059f3f0e1d2SKirill A. Shutemov 2060f3f0e1d2SKirill A. Shutemov /* 206177da9389SMatthew Wilcox * Replacing old pages with new one has succeeded, now we 206277da9389SMatthew Wilcox * need to copy the content and free the old pages. 2063f3f0e1d2SKirill A. Shutemov */ 20642af8ff29SHugh Dickins index = start; 2065f3f0e1d2SKirill A. Shutemov list_for_each_entry_safe(page, tmp, &pagelist, lru) { 20662af8ff29SHugh Dickins while (index < page->index) { 206750ad2f24SZach O'Keefe clear_highpage(hpage + (index % HPAGE_PMD_NR)); 20682af8ff29SHugh Dickins index++; 20692af8ff29SHugh Dickins } 207050ad2f24SZach O'Keefe copy_highpage(hpage + (page->index % HPAGE_PMD_NR), 2071f3f0e1d2SKirill A. Shutemov page); 2072f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 2073f3f0e1d2SKirill A. Shutemov page->mapping = NULL; 2074042a3082SHugh Dickins page_ref_unfreeze(page, 1); 2075f3f0e1d2SKirill A. Shutemov ClearPageActive(page); 2076f3f0e1d2SKirill A. Shutemov ClearPageUnevictable(page); 2077042a3082SHugh Dickins unlock_page(page); 2078f3f0e1d2SKirill A. Shutemov put_page(page); 20792af8ff29SHugh Dickins index++; 20802af8ff29SHugh Dickins } 20812af8ff29SHugh Dickins while (index < end) { 208250ad2f24SZach O'Keefe clear_highpage(hpage + (index % HPAGE_PMD_NR)); 20832af8ff29SHugh Dickins index++; 2084f3f0e1d2SKirill A. Shutemov } 2085f3f0e1d2SKirill A. Shutemov 2086284a344eSVishal Moola (Oracle) folio = page_folio(hpage); 2087284a344eSVishal Moola (Oracle) folio_mark_uptodate(folio); 2088284a344eSVishal Moola (Oracle) folio_ref_add(folio, HPAGE_PMD_NR - 1); 2089284a344eSVishal Moola (Oracle) 20906058eaecSJohannes Weiner if (is_shmem) 2091284a344eSVishal Moola (Oracle) folio_mark_dirty(folio); 2092284a344eSVishal Moola (Oracle) folio_add_lru(folio); 2093f3f0e1d2SKirill A. Shutemov 2094042a3082SHugh Dickins /* 2095042a3082SHugh Dickins * Remove pte page tables, so we can re-fault the page as huge. 2096042a3082SHugh Dickins */ 209734488399SZach O'Keefe result = retract_page_tables(mapping, start, mm, addr, hpage, 209834488399SZach O'Keefe cc); 209950ad2f24SZach O'Keefe unlock_page(hpage); 210050ad2f24SZach O'Keefe hpage = NULL; 2101f3f0e1d2SKirill A. Shutemov } else { 210277da9389SMatthew Wilcox struct page *page; 2103aaa52e34SHugh Dickins 210477da9389SMatthew Wilcox /* Something went wrong: roll back page cache changes */ 210577da9389SMatthew Wilcox xas_lock_irq(&xas); 21062f55f070SMiaohe Lin if (nr_none) { 2107aaa52e34SHugh Dickins mapping->nrpages -= nr_none; 2108aaa52e34SHugh Dickins shmem_uncharge(mapping->host, nr_none); 21092f55f070SMiaohe Lin } 2110aaa52e34SHugh Dickins 211177da9389SMatthew Wilcox xas_set(&xas, start); 211277da9389SMatthew Wilcox xas_for_each(&xas, page, end - 1) { 2113f3f0e1d2SKirill A. Shutemov page = list_first_entry_or_null(&pagelist, 2114f3f0e1d2SKirill A. Shutemov struct page, lru); 211577da9389SMatthew Wilcox if (!page || xas.xa_index < page->index) { 2116f3f0e1d2SKirill A. Shutemov if (!nr_none) 2117f3f0e1d2SKirill A. Shutemov break; 2118f3f0e1d2SKirill A. Shutemov nr_none--; 211959749e6cSJohannes Weiner /* Put holes back where they were */ 212077da9389SMatthew Wilcox xas_store(&xas, NULL); 2121f3f0e1d2SKirill A. Shutemov continue; 2122f3f0e1d2SKirill A. Shutemov } 2123f3f0e1d2SKirill A. Shutemov 212477da9389SMatthew Wilcox VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 2125f3f0e1d2SKirill A. Shutemov 2126f3f0e1d2SKirill A. Shutemov /* Unfreeze the page. */ 2127f3f0e1d2SKirill A. Shutemov list_del(&page->lru); 2128f3f0e1d2SKirill A. Shutemov page_ref_unfreeze(page, 2); 212977da9389SMatthew Wilcox xas_store(&xas, page); 213077da9389SMatthew Wilcox xas_pause(&xas); 213177da9389SMatthew Wilcox xas_unlock_irq(&xas); 2132f3f0e1d2SKirill A. Shutemov unlock_page(page); 2133042a3082SHugh Dickins putback_lru_page(page); 213477da9389SMatthew Wilcox xas_lock_irq(&xas); 2135f3f0e1d2SKirill A. Shutemov } 2136f3f0e1d2SKirill A. Shutemov VM_BUG_ON(nr_none); 213777da9389SMatthew Wilcox xas_unlock_irq(&xas); 2138f3f0e1d2SKirill A. Shutemov 213950ad2f24SZach O'Keefe hpage->mapping = NULL; 2140f3f0e1d2SKirill A. Shutemov } 2141042a3082SHugh Dickins 214250ad2f24SZach O'Keefe if (hpage) 214350ad2f24SZach O'Keefe unlock_page(hpage); 2144f3f0e1d2SKirill A. Shutemov out: 2145f3f0e1d2SKirill A. Shutemov VM_BUG_ON(!list_empty(&pagelist)); 21467cb1d7efSPeter Xu if (hpage) 214750ad2f24SZach O'Keefe put_page(hpage); 21484c9473e8SGautam Menghani 21494c9473e8SGautam Menghani trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); 215050ad2f24SZach O'Keefe return result; 2151f3f0e1d2SKirill A. Shutemov } 2152f3f0e1d2SKirill A. Shutemov 215334488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 215434488399SZach O'Keefe struct file *file, pgoff_t start, 215534488399SZach O'Keefe struct collapse_control *cc) 2156f3f0e1d2SKirill A. Shutemov { 2157f3f0e1d2SKirill A. Shutemov struct page *page = NULL; 2158579c571eSSong Liu struct address_space *mapping = file->f_mapping; 215985b392dbSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 2160f3f0e1d2SKirill A. Shutemov int present, swap; 2161f3f0e1d2SKirill A. Shutemov int node = NUMA_NO_NODE; 2162f3f0e1d2SKirill A. Shutemov int result = SCAN_SUCCEED; 2163f3f0e1d2SKirill A. Shutemov 2164f3f0e1d2SKirill A. Shutemov present = 0; 2165f3f0e1d2SKirill A. Shutemov swap = 0; 216634d6b470SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load)); 2167e031ff96SYang Shi nodes_clear(cc->alloc_nmask); 2168f3f0e1d2SKirill A. Shutemov rcu_read_lock(); 216985b392dbSMatthew Wilcox xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 217085b392dbSMatthew Wilcox if (xas_retry(&xas, page)) 2171f3f0e1d2SKirill A. Shutemov continue; 2172f3f0e1d2SKirill A. Shutemov 217385b392dbSMatthew Wilcox if (xa_is_value(page)) { 2174d8ea7cc8SZach O'Keefe ++swap; 2175d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 2176d8ea7cc8SZach O'Keefe swap > khugepaged_max_ptes_swap) { 2177f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE; 2178e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 2179f3f0e1d2SKirill A. Shutemov break; 2180f3f0e1d2SKirill A. Shutemov } 2181f3f0e1d2SKirill A. Shutemov continue; 2182f3f0e1d2SKirill A. Shutemov } 2183f3f0e1d2SKirill A. Shutemov 21846b24ca4aSMatthew Wilcox (Oracle) /* 218558ac9a89SZach O'Keefe * TODO: khugepaged should compact smaller compound pages 21866b24ca4aSMatthew Wilcox (Oracle) * into a PMD sized page 21876b24ca4aSMatthew Wilcox (Oracle) */ 2188f3f0e1d2SKirill A. Shutemov if (PageTransCompound(page)) { 218958ac9a89SZach O'Keefe struct page *head = compound_head(page); 219058ac9a89SZach O'Keefe 219158ac9a89SZach O'Keefe result = compound_order(head) == HPAGE_PMD_ORDER && 219258ac9a89SZach O'Keefe head->index == start 219358ac9a89SZach O'Keefe /* Maybe PMD-mapped */ 219458ac9a89SZach O'Keefe ? SCAN_PTE_MAPPED_HUGEPAGE 219558ac9a89SZach O'Keefe : SCAN_PAGE_COMPOUND; 219658ac9a89SZach O'Keefe /* 219758ac9a89SZach O'Keefe * For SCAN_PTE_MAPPED_HUGEPAGE, further processing 219858ac9a89SZach O'Keefe * by the caller won't touch the page cache, and so 219958ac9a89SZach O'Keefe * it's safe to skip LRU and refcount checks before 220058ac9a89SZach O'Keefe * returning. 220158ac9a89SZach O'Keefe */ 2202f3f0e1d2SKirill A. Shutemov break; 2203f3f0e1d2SKirill A. Shutemov } 2204f3f0e1d2SKirill A. Shutemov 2205f3f0e1d2SKirill A. Shutemov node = page_to_nid(page); 22067d2c4385SZach O'Keefe if (hpage_collapse_scan_abort(node, cc)) { 2207f3f0e1d2SKirill A. Shutemov result = SCAN_SCAN_ABORT; 2208f3f0e1d2SKirill A. Shutemov break; 2209f3f0e1d2SKirill A. Shutemov } 221034d6b470SZach O'Keefe cc->node_load[node]++; 2211f3f0e1d2SKirill A. Shutemov 2212f3f0e1d2SKirill A. Shutemov if (!PageLRU(page)) { 2213f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LRU; 2214f3f0e1d2SKirill A. Shutemov break; 2215f3f0e1d2SKirill A. Shutemov } 2216f3f0e1d2SKirill A. Shutemov 221799cb0dbdSSong Liu if (page_count(page) != 221899cb0dbdSSong Liu 1 + page_mapcount(page) + page_has_private(page)) { 2219f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT; 2220f3f0e1d2SKirill A. Shutemov break; 2221f3f0e1d2SKirill A. Shutemov } 2222f3f0e1d2SKirill A. Shutemov 2223f3f0e1d2SKirill A. Shutemov /* 2224f3f0e1d2SKirill A. Shutemov * We probably should check if the page is referenced here, but 2225f3f0e1d2SKirill A. Shutemov * nobody would transfer pte_young() to PageReferenced() for us. 2226f3f0e1d2SKirill A. Shutemov * And rmap walk here is just too costly... 2227f3f0e1d2SKirill A. Shutemov */ 2228f3f0e1d2SKirill A. Shutemov 2229f3f0e1d2SKirill A. Shutemov present++; 2230f3f0e1d2SKirill A. Shutemov 2231f3f0e1d2SKirill A. Shutemov if (need_resched()) { 223285b392dbSMatthew Wilcox xas_pause(&xas); 2233f3f0e1d2SKirill A. Shutemov cond_resched_rcu(); 2234f3f0e1d2SKirill A. Shutemov } 2235f3f0e1d2SKirill A. Shutemov } 2236f3f0e1d2SKirill A. Shutemov rcu_read_unlock(); 2237f3f0e1d2SKirill A. Shutemov 2238f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) { 2239d8ea7cc8SZach O'Keefe if (cc->is_khugepaged && 2240d8ea7cc8SZach O'Keefe present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2241f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE; 2242e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2243f3f0e1d2SKirill A. Shutemov } else { 224434488399SZach O'Keefe result = collapse_file(mm, addr, file, start, cc); 2245f3f0e1d2SKirill A. Shutemov } 2246f3f0e1d2SKirill A. Shutemov } 2247f3f0e1d2SKirill A. Shutemov 2248045634ffSGautam Menghani trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); 224950ad2f24SZach O'Keefe return result; 2250f3f0e1d2SKirill A. Shutemov } 2251f3f0e1d2SKirill A. Shutemov #else 225234488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 225334488399SZach O'Keefe struct file *file, pgoff_t start, 225434488399SZach O'Keefe struct collapse_control *cc) 2255f3f0e1d2SKirill A. Shutemov { 2256f3f0e1d2SKirill A. Shutemov BUILD_BUG(); 2257f3f0e1d2SKirill A. Shutemov } 225827e1f827SSong Liu 2259b26e2701SQi Zheng static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot) 226027e1f827SSong Liu { 226127e1f827SSong Liu } 226258ac9a89SZach O'Keefe 226358ac9a89SZach O'Keefe static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 226458ac9a89SZach O'Keefe unsigned long addr) 226558ac9a89SZach O'Keefe { 226658ac9a89SZach O'Keefe return false; 226758ac9a89SZach O'Keefe } 2268f3f0e1d2SKirill A. Shutemov #endif 2269f3f0e1d2SKirill A. Shutemov 227050ad2f24SZach O'Keefe static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, 227134d6b470SZach O'Keefe struct collapse_control *cc) 2272b46e756fSKirill A. Shutemov __releases(&khugepaged_mm_lock) 2273b46e756fSKirill A. Shutemov __acquires(&khugepaged_mm_lock) 2274b46e756fSKirill A. Shutemov { 227568540502SMatthew Wilcox (Oracle) struct vma_iterator vmi; 2276b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 2277b26e2701SQi Zheng struct mm_slot *slot; 2278b46e756fSKirill A. Shutemov struct mm_struct *mm; 2279b46e756fSKirill A. Shutemov struct vm_area_struct *vma; 2280b46e756fSKirill A. Shutemov int progress = 0; 2281b46e756fSKirill A. Shutemov 2282b46e756fSKirill A. Shutemov VM_BUG_ON(!pages); 228335f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock); 228450ad2f24SZach O'Keefe *result = SCAN_FAIL; 2285b46e756fSKirill A. Shutemov 2286b26e2701SQi Zheng if (khugepaged_scan.mm_slot) { 2287b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2288b26e2701SQi Zheng slot = &mm_slot->slot; 2289b26e2701SQi Zheng } else { 2290b26e2701SQi Zheng slot = list_entry(khugepaged_scan.mm_head.next, 2291b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2292b26e2701SQi Zheng mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2293b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2294b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = mm_slot; 2295b46e756fSKirill A. Shutemov } 2296b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 229727e1f827SSong Liu khugepaged_collapse_pte_mapped_thps(mm_slot); 2298b46e756fSKirill A. Shutemov 2299b26e2701SQi Zheng mm = slot->mm; 23003b454ad3SYang Shi /* 23013b454ad3SYang Shi * Don't wait for semaphore (to avoid long wait times). Just move to 23023b454ad3SYang Shi * the next mm on the list. 23033b454ad3SYang Shi */ 2304b46e756fSKirill A. Shutemov vma = NULL; 2305d8ed45c5SMichel Lespinasse if (unlikely(!mmap_read_trylock(mm))) 2306c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 2307b46e756fSKirill A. Shutemov 2308b46e756fSKirill A. Shutemov progress++; 230968540502SMatthew Wilcox (Oracle) if (unlikely(hpage_collapse_test_exit(mm))) 231068540502SMatthew Wilcox (Oracle) goto breakouterloop; 231168540502SMatthew Wilcox (Oracle) 231268540502SMatthew Wilcox (Oracle) vma_iter_init(&vmi, mm, khugepaged_scan.address); 231368540502SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) { 2314b46e756fSKirill A. Shutemov unsigned long hstart, hend; 2315b46e756fSKirill A. Shutemov 2316b46e756fSKirill A. Shutemov cond_resched(); 23177d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm))) { 2318b46e756fSKirill A. Shutemov progress++; 2319b46e756fSKirill A. Shutemov break; 2320b46e756fSKirill A. Shutemov } 2321a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { 2322b46e756fSKirill A. Shutemov skip: 2323b46e756fSKirill A. Shutemov progress++; 2324b46e756fSKirill A. Shutemov continue; 2325b46e756fSKirill A. Shutemov } 23264fa6893fSYang Shi hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); 23274fa6893fSYang Shi hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); 2328b46e756fSKirill A. Shutemov if (khugepaged_scan.address > hend) 2329b46e756fSKirill A. Shutemov goto skip; 2330b46e756fSKirill A. Shutemov if (khugepaged_scan.address < hstart) 2331b46e756fSKirill A. Shutemov khugepaged_scan.address = hstart; 2332b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2333b46e756fSKirill A. Shutemov 2334b46e756fSKirill A. Shutemov while (khugepaged_scan.address < hend) { 233550ad2f24SZach O'Keefe bool mmap_locked = true; 233650ad2f24SZach O'Keefe 2337b46e756fSKirill A. Shutemov cond_resched(); 23387d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm))) 2339b46e756fSKirill A. Shutemov goto breakouterloop; 2340b46e756fSKirill A. Shutemov 2341b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address < hstart || 2342b46e756fSKirill A. Shutemov khugepaged_scan.address + HPAGE_PMD_SIZE > 2343b46e756fSKirill A. Shutemov hend); 234499cb0dbdSSong Liu if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2345396bcc52SMatthew Wilcox (Oracle) struct file *file = get_file(vma->vm_file); 2346f3f0e1d2SKirill A. Shutemov pgoff_t pgoff = linear_page_index(vma, 2347f3f0e1d2SKirill A. Shutemov khugepaged_scan.address); 234899cb0dbdSSong Liu 2349d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 235034488399SZach O'Keefe *result = hpage_collapse_scan_file(mm, 235134488399SZach O'Keefe khugepaged_scan.address, 235234488399SZach O'Keefe file, pgoff, cc); 235350ad2f24SZach O'Keefe mmap_locked = false; 2354f3f0e1d2SKirill A. Shutemov fput(file); 2355f3f0e1d2SKirill A. Shutemov } else { 23567d2c4385SZach O'Keefe *result = hpage_collapse_scan_pmd(mm, vma, 2357b46e756fSKirill A. Shutemov khugepaged_scan.address, 23587d2c4385SZach O'Keefe &mmap_locked, 23597d2c4385SZach O'Keefe cc); 2360f3f0e1d2SKirill A. Shutemov } 236158ac9a89SZach O'Keefe switch (*result) { 236258ac9a89SZach O'Keefe case SCAN_PTE_MAPPED_HUGEPAGE: { 236358ac9a89SZach O'Keefe pmd_t *pmd; 236458ac9a89SZach O'Keefe 236558ac9a89SZach O'Keefe *result = find_pmd_or_thp_or_none(mm, 236658ac9a89SZach O'Keefe khugepaged_scan.address, 236758ac9a89SZach O'Keefe &pmd); 236858ac9a89SZach O'Keefe if (*result != SCAN_SUCCEED) 236958ac9a89SZach O'Keefe break; 237058ac9a89SZach O'Keefe if (!khugepaged_add_pte_mapped_thp(mm, 237158ac9a89SZach O'Keefe khugepaged_scan.address)) 237258ac9a89SZach O'Keefe break; 237358ac9a89SZach O'Keefe } fallthrough; 237458ac9a89SZach O'Keefe case SCAN_SUCCEED: 237550ad2f24SZach O'Keefe ++khugepaged_pages_collapsed; 237658ac9a89SZach O'Keefe break; 237758ac9a89SZach O'Keefe default: 237858ac9a89SZach O'Keefe break; 237958ac9a89SZach O'Keefe } 238058ac9a89SZach O'Keefe 2381b46e756fSKirill A. Shutemov /* move to next address */ 2382b46e756fSKirill A. Shutemov khugepaged_scan.address += HPAGE_PMD_SIZE; 2383b46e756fSKirill A. Shutemov progress += HPAGE_PMD_NR; 238450ad2f24SZach O'Keefe if (!mmap_locked) 238550ad2f24SZach O'Keefe /* 238650ad2f24SZach O'Keefe * We released mmap_lock so break loop. Note 238750ad2f24SZach O'Keefe * that we drop mmap_lock before all hugepage 238850ad2f24SZach O'Keefe * allocations, so if allocation fails, we are 238950ad2f24SZach O'Keefe * guaranteed to break here and report the 239050ad2f24SZach O'Keefe * correct result back to caller. 239150ad2f24SZach O'Keefe */ 2392c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock; 2393b46e756fSKirill A. Shutemov if (progress >= pages) 2394b46e756fSKirill A. Shutemov goto breakouterloop; 2395b46e756fSKirill A. Shutemov } 2396b46e756fSKirill A. Shutemov } 2397b46e756fSKirill A. Shutemov breakouterloop: 2398d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2399c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock: 2400b46e756fSKirill A. Shutemov 2401b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2402b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2403b46e756fSKirill A. Shutemov /* 2404b46e756fSKirill A. Shutemov * Release the current mm_slot if this mm is about to die, or 2405b46e756fSKirill A. Shutemov * if we scanned all vmas of this mm. 2406b46e756fSKirill A. Shutemov */ 24077d2c4385SZach O'Keefe if (hpage_collapse_test_exit(mm) || !vma) { 2408b46e756fSKirill A. Shutemov /* 2409b46e756fSKirill A. Shutemov * Make sure that if mm_users is reaching zero while 2410b46e756fSKirill A. Shutemov * khugepaged runs here, khugepaged_exit will find 2411b46e756fSKirill A. Shutemov * mm_slot not pointing to the exiting mm. 2412b46e756fSKirill A. Shutemov */ 2413b26e2701SQi Zheng if (slot->mm_node.next != &khugepaged_scan.mm_head) { 2414b26e2701SQi Zheng slot = list_entry(slot->mm_node.next, 2415b46e756fSKirill A. Shutemov struct mm_slot, mm_node); 2416b26e2701SQi Zheng khugepaged_scan.mm_slot = 2417b26e2701SQi Zheng mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2418b46e756fSKirill A. Shutemov khugepaged_scan.address = 0; 2419b46e756fSKirill A. Shutemov } else { 2420b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2421b46e756fSKirill A. Shutemov khugepaged_full_scans++; 2422b46e756fSKirill A. Shutemov } 2423b46e756fSKirill A. Shutemov 2424b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2425b46e756fSKirill A. Shutemov } 2426b46e756fSKirill A. Shutemov 2427b46e756fSKirill A. Shutemov return progress; 2428b46e756fSKirill A. Shutemov } 2429b46e756fSKirill A. Shutemov 2430b46e756fSKirill A. Shutemov static int khugepaged_has_work(void) 2431b46e756fSKirill A. Shutemov { 2432b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) && 24331064026bSYang Shi hugepage_flags_enabled(); 2434b46e756fSKirill A. Shutemov } 2435b46e756fSKirill A. Shutemov 2436b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void) 2437b46e756fSKirill A. Shutemov { 2438b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) || 2439b46e756fSKirill A. Shutemov kthread_should_stop(); 2440b46e756fSKirill A. Shutemov } 2441b46e756fSKirill A. Shutemov 244234d6b470SZach O'Keefe static void khugepaged_do_scan(struct collapse_control *cc) 2443b46e756fSKirill A. Shutemov { 2444b46e756fSKirill A. Shutemov unsigned int progress = 0, pass_through_head = 0; 244589dc6a96SYanfei Xu unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2446b46e756fSKirill A. Shutemov bool wait = true; 244750ad2f24SZach O'Keefe int result = SCAN_SUCCEED; 2448b46e756fSKirill A. Shutemov 2449a980df33SKirill A. Shutemov lru_add_drain_all(); 2450a980df33SKirill A. Shutemov 2451c6a7f445SYang Shi while (true) { 2452b46e756fSKirill A. Shutemov cond_resched(); 2453b46e756fSKirill A. Shutemov 2454b46e756fSKirill A. Shutemov if (unlikely(kthread_should_stop() || try_to_freeze())) 2455b46e756fSKirill A. Shutemov break; 2456b46e756fSKirill A. Shutemov 2457b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2458b46e756fSKirill A. Shutemov if (!khugepaged_scan.mm_slot) 2459b46e756fSKirill A. Shutemov pass_through_head++; 2460b46e756fSKirill A. Shutemov if (khugepaged_has_work() && 2461b46e756fSKirill A. Shutemov pass_through_head < 2) 2462b46e756fSKirill A. Shutemov progress += khugepaged_scan_mm_slot(pages - progress, 246350ad2f24SZach O'Keefe &result, cc); 2464b46e756fSKirill A. Shutemov else 2465b46e756fSKirill A. Shutemov progress = pages; 2466b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2467b46e756fSKirill A. Shutemov 2468c6a7f445SYang Shi if (progress >= pages) 2469c6a7f445SYang Shi break; 2470c6a7f445SYang Shi 247150ad2f24SZach O'Keefe if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { 2472c6a7f445SYang Shi /* 2473c6a7f445SYang Shi * If fail to allocate the first time, try to sleep for 2474c6a7f445SYang Shi * a while. When hit again, cancel the scan. 2475c6a7f445SYang Shi */ 2476c6a7f445SYang Shi if (!wait) 2477c6a7f445SYang Shi break; 2478c6a7f445SYang Shi wait = false; 2479c6a7f445SYang Shi khugepaged_alloc_sleep(); 2480c6a7f445SYang Shi } 2481c6a7f445SYang Shi } 2482b46e756fSKirill A. Shutemov } 2483b46e756fSKirill A. Shutemov 2484b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void) 2485b46e756fSKirill A. Shutemov { 2486b46e756fSKirill A. Shutemov return kthread_should_stop() || 2487b46e756fSKirill A. Shutemov time_after_eq(jiffies, khugepaged_sleep_expire); 2488b46e756fSKirill A. Shutemov } 2489b46e756fSKirill A. Shutemov 2490b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void) 2491b46e756fSKirill A. Shutemov { 2492b46e756fSKirill A. Shutemov if (khugepaged_has_work()) { 2493b46e756fSKirill A. Shutemov const unsigned long scan_sleep_jiffies = 2494b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2495b46e756fSKirill A. Shutemov 2496b46e756fSKirill A. Shutemov if (!scan_sleep_jiffies) 2497b46e756fSKirill A. Shutemov return; 2498b46e756fSKirill A. Shutemov 2499b46e756fSKirill A. Shutemov khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2500b46e756fSKirill A. Shutemov wait_event_freezable_timeout(khugepaged_wait, 2501b46e756fSKirill A. Shutemov khugepaged_should_wakeup(), 2502b46e756fSKirill A. Shutemov scan_sleep_jiffies); 2503b46e756fSKirill A. Shutemov return; 2504b46e756fSKirill A. Shutemov } 2505b46e756fSKirill A. Shutemov 25061064026bSYang Shi if (hugepage_flags_enabled()) 2507b46e756fSKirill A. Shutemov wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2508b46e756fSKirill A. Shutemov } 2509b46e756fSKirill A. Shutemov 2510b46e756fSKirill A. Shutemov static int khugepaged(void *none) 2511b46e756fSKirill A. Shutemov { 2512b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot; 2513b46e756fSKirill A. Shutemov 2514b46e756fSKirill A. Shutemov set_freezable(); 2515b46e756fSKirill A. Shutemov set_user_nice(current, MAX_NICE); 2516b46e756fSKirill A. Shutemov 2517b46e756fSKirill A. Shutemov while (!kthread_should_stop()) { 251834d6b470SZach O'Keefe khugepaged_do_scan(&khugepaged_collapse_control); 2519b46e756fSKirill A. Shutemov khugepaged_wait_work(); 2520b46e756fSKirill A. Shutemov } 2521b46e756fSKirill A. Shutemov 2522b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock); 2523b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot; 2524b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL; 2525b46e756fSKirill A. Shutemov if (mm_slot) 2526b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot); 2527b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock); 2528b46e756fSKirill A. Shutemov return 0; 2529b46e756fSKirill A. Shutemov } 2530b46e756fSKirill A. Shutemov 2531b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void) 2532b46e756fSKirill A. Shutemov { 2533b46e756fSKirill A. Shutemov struct zone *zone; 2534b46e756fSKirill A. Shutemov int nr_zones = 0; 2535b46e756fSKirill A. Shutemov unsigned long recommended_min; 2536b46e756fSKirill A. Shutemov 25371064026bSYang Shi if (!hugepage_flags_enabled()) { 2538bd3400eaSLiangcai Fan calculate_min_free_kbytes(); 2539bd3400eaSLiangcai Fan goto update_wmarks; 2540bd3400eaSLiangcai Fan } 2541bd3400eaSLiangcai Fan 2542b7d349c7SJoonsoo Kim for_each_populated_zone(zone) { 2543b7d349c7SJoonsoo Kim /* 2544b7d349c7SJoonsoo Kim * We don't need to worry about fragmentation of 2545b7d349c7SJoonsoo Kim * ZONE_MOVABLE since it only has movable pages. 2546b7d349c7SJoonsoo Kim */ 2547b7d349c7SJoonsoo Kim if (zone_idx(zone) > gfp_zone(GFP_USER)) 2548b7d349c7SJoonsoo Kim continue; 2549b7d349c7SJoonsoo Kim 2550b46e756fSKirill A. Shutemov nr_zones++; 2551b7d349c7SJoonsoo Kim } 2552b46e756fSKirill A. Shutemov 2553b46e756fSKirill A. Shutemov /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2554b46e756fSKirill A. Shutemov recommended_min = pageblock_nr_pages * nr_zones * 2; 2555b46e756fSKirill A. Shutemov 2556b46e756fSKirill A. Shutemov /* 2557b46e756fSKirill A. Shutemov * Make sure that on average at least two pageblocks are almost free 2558b46e756fSKirill A. Shutemov * of another type, one for a migratetype to fall back to and a 2559b46e756fSKirill A. Shutemov * second to avoid subsequent fallbacks of other types There are 3 2560b46e756fSKirill A. Shutemov * MIGRATE_TYPES we care about. 2561b46e756fSKirill A. Shutemov */ 2562b46e756fSKirill A. Shutemov recommended_min += pageblock_nr_pages * nr_zones * 2563b46e756fSKirill A. Shutemov MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2564b46e756fSKirill A. Shutemov 2565b46e756fSKirill A. Shutemov /* don't ever allow to reserve more than 5% of the lowmem */ 2566b46e756fSKirill A. Shutemov recommended_min = min(recommended_min, 2567b46e756fSKirill A. Shutemov (unsigned long) nr_free_buffer_pages() / 20); 2568b46e756fSKirill A. Shutemov recommended_min <<= (PAGE_SHIFT-10); 2569b46e756fSKirill A. Shutemov 2570b46e756fSKirill A. Shutemov if (recommended_min > min_free_kbytes) { 2571b46e756fSKirill A. Shutemov if (user_min_free_kbytes >= 0) 2572b46e756fSKirill A. Shutemov pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2573b46e756fSKirill A. Shutemov min_free_kbytes, recommended_min); 2574b46e756fSKirill A. Shutemov 2575b46e756fSKirill A. Shutemov min_free_kbytes = recommended_min; 2576b46e756fSKirill A. Shutemov } 2577bd3400eaSLiangcai Fan 2578bd3400eaSLiangcai Fan update_wmarks: 2579b46e756fSKirill A. Shutemov setup_per_zone_wmarks(); 2580b46e756fSKirill A. Shutemov } 2581b46e756fSKirill A. Shutemov 2582b46e756fSKirill A. Shutemov int start_stop_khugepaged(void) 2583b46e756fSKirill A. Shutemov { 2584b46e756fSKirill A. Shutemov int err = 0; 2585b46e756fSKirill A. Shutemov 2586b46e756fSKirill A. Shutemov mutex_lock(&khugepaged_mutex); 25871064026bSYang Shi if (hugepage_flags_enabled()) { 2588b46e756fSKirill A. Shutemov if (!khugepaged_thread) 2589b46e756fSKirill A. Shutemov khugepaged_thread = kthread_run(khugepaged, NULL, 2590b46e756fSKirill A. Shutemov "khugepaged"); 2591b46e756fSKirill A. Shutemov if (IS_ERR(khugepaged_thread)) { 2592b46e756fSKirill A. Shutemov pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2593b46e756fSKirill A. Shutemov err = PTR_ERR(khugepaged_thread); 2594b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2595b46e756fSKirill A. Shutemov goto fail; 2596b46e756fSKirill A. Shutemov } 2597b46e756fSKirill A. Shutemov 2598b46e756fSKirill A. Shutemov if (!list_empty(&khugepaged_scan.mm_head)) 2599b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait); 2600b46e756fSKirill A. Shutemov } else if (khugepaged_thread) { 2601b46e756fSKirill A. Shutemov kthread_stop(khugepaged_thread); 2602b46e756fSKirill A. Shutemov khugepaged_thread = NULL; 2603b46e756fSKirill A. Shutemov } 2604bd3400eaSLiangcai Fan set_recommended_min_free_kbytes(); 2605b46e756fSKirill A. Shutemov fail: 2606b46e756fSKirill A. Shutemov mutex_unlock(&khugepaged_mutex); 2607b46e756fSKirill A. Shutemov return err; 2608b46e756fSKirill A. Shutemov } 26094aab2be0SVijay Balakrishna 26104aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void) 26114aab2be0SVijay Balakrishna { 26124aab2be0SVijay Balakrishna mutex_lock(&khugepaged_mutex); 26131064026bSYang Shi if (hugepage_flags_enabled() && khugepaged_thread) 26144aab2be0SVijay Balakrishna set_recommended_min_free_kbytes(); 26154aab2be0SVijay Balakrishna mutex_unlock(&khugepaged_mutex); 26164aab2be0SVijay Balakrishna } 26177d8faaf1SZach O'Keefe 261857e9cc50SJohannes Weiner bool current_is_khugepaged(void) 261957e9cc50SJohannes Weiner { 262057e9cc50SJohannes Weiner return kthread_func(current) == khugepaged; 262157e9cc50SJohannes Weiner } 262257e9cc50SJohannes Weiner 26237d8faaf1SZach O'Keefe static int madvise_collapse_errno(enum scan_result r) 26247d8faaf1SZach O'Keefe { 26257d8faaf1SZach O'Keefe /* 26267d8faaf1SZach O'Keefe * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide 26277d8faaf1SZach O'Keefe * actionable feedback to caller, so they may take an appropriate 26287d8faaf1SZach O'Keefe * fallback measure depending on the nature of the failure. 26297d8faaf1SZach O'Keefe */ 26307d8faaf1SZach O'Keefe switch (r) { 26317d8faaf1SZach O'Keefe case SCAN_ALLOC_HUGE_PAGE_FAIL: 26327d8faaf1SZach O'Keefe return -ENOMEM; 26337d8faaf1SZach O'Keefe case SCAN_CGROUP_CHARGE_FAIL: 26347d8faaf1SZach O'Keefe return -EBUSY; 26357d8faaf1SZach O'Keefe /* Resource temporary unavailable - trying again might succeed */ 2636ae63c898SZach O'Keefe case SCAN_PAGE_COUNT: 26377d8faaf1SZach O'Keefe case SCAN_PAGE_LOCK: 26387d8faaf1SZach O'Keefe case SCAN_PAGE_LRU: 26390f3e2a2cSZach O'Keefe case SCAN_DEL_PAGE_LRU: 26407d8faaf1SZach O'Keefe return -EAGAIN; 26417d8faaf1SZach O'Keefe /* 26427d8faaf1SZach O'Keefe * Other: Trying again likely not to succeed / error intrinsic to 26437d8faaf1SZach O'Keefe * specified memory range. khugepaged likely won't be able to collapse 26447d8faaf1SZach O'Keefe * either. 26457d8faaf1SZach O'Keefe */ 26467d8faaf1SZach O'Keefe default: 26477d8faaf1SZach O'Keefe return -EINVAL; 26487d8faaf1SZach O'Keefe } 26497d8faaf1SZach O'Keefe } 26507d8faaf1SZach O'Keefe 26517d8faaf1SZach O'Keefe int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, 26527d8faaf1SZach O'Keefe unsigned long start, unsigned long end) 26537d8faaf1SZach O'Keefe { 26547d8faaf1SZach O'Keefe struct collapse_control *cc; 26557d8faaf1SZach O'Keefe struct mm_struct *mm = vma->vm_mm; 26567d8faaf1SZach O'Keefe unsigned long hstart, hend, addr; 26577d8faaf1SZach O'Keefe int thps = 0, last_fail = SCAN_FAIL; 26587d8faaf1SZach O'Keefe bool mmap_locked = true; 26597d8faaf1SZach O'Keefe 26607d8faaf1SZach O'Keefe BUG_ON(vma->vm_start > start); 26617d8faaf1SZach O'Keefe BUG_ON(vma->vm_end < end); 26627d8faaf1SZach O'Keefe 26637d8faaf1SZach O'Keefe *prev = vma; 26647d8faaf1SZach O'Keefe 26657d8faaf1SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 26667d8faaf1SZach O'Keefe return -EINVAL; 26677d8faaf1SZach O'Keefe 26687d8faaf1SZach O'Keefe cc = kmalloc(sizeof(*cc), GFP_KERNEL); 26697d8faaf1SZach O'Keefe if (!cc) 26707d8faaf1SZach O'Keefe return -ENOMEM; 26717d8faaf1SZach O'Keefe cc->is_khugepaged = false; 26727d8faaf1SZach O'Keefe 26737d8faaf1SZach O'Keefe mmgrab(mm); 26747d8faaf1SZach O'Keefe lru_add_drain_all(); 26757d8faaf1SZach O'Keefe 26767d8faaf1SZach O'Keefe hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 26777d8faaf1SZach O'Keefe hend = end & HPAGE_PMD_MASK; 26787d8faaf1SZach O'Keefe 26797d8faaf1SZach O'Keefe for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { 26807d8faaf1SZach O'Keefe int result = SCAN_FAIL; 26817d8faaf1SZach O'Keefe 26827d8faaf1SZach O'Keefe if (!mmap_locked) { 26837d8faaf1SZach O'Keefe cond_resched(); 26847d8faaf1SZach O'Keefe mmap_read_lock(mm); 26857d8faaf1SZach O'Keefe mmap_locked = true; 268634488399SZach O'Keefe result = hugepage_vma_revalidate(mm, addr, false, &vma, 268734488399SZach O'Keefe cc); 26887d8faaf1SZach O'Keefe if (result != SCAN_SUCCEED) { 26897d8faaf1SZach O'Keefe last_fail = result; 26907d8faaf1SZach O'Keefe goto out_nolock; 26917d8faaf1SZach O'Keefe } 26924d24de94SYang Shi 269352dc0310SZach O'Keefe hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); 26947d8faaf1SZach O'Keefe } 26957d8faaf1SZach O'Keefe mmap_assert_locked(mm); 26967d8faaf1SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load)); 2697e031ff96SYang Shi nodes_clear(cc->alloc_nmask); 269834488399SZach O'Keefe if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 269934488399SZach O'Keefe struct file *file = get_file(vma->vm_file); 270034488399SZach O'Keefe pgoff_t pgoff = linear_page_index(vma, addr); 270134488399SZach O'Keefe 270234488399SZach O'Keefe mmap_read_unlock(mm); 270334488399SZach O'Keefe mmap_locked = false; 270434488399SZach O'Keefe result = hpage_collapse_scan_file(mm, addr, file, pgoff, 27057d2c4385SZach O'Keefe cc); 270634488399SZach O'Keefe fput(file); 270734488399SZach O'Keefe } else { 270834488399SZach O'Keefe result = hpage_collapse_scan_pmd(mm, vma, addr, 270934488399SZach O'Keefe &mmap_locked, cc); 271034488399SZach O'Keefe } 27117d8faaf1SZach O'Keefe if (!mmap_locked) 27127d8faaf1SZach O'Keefe *prev = NULL; /* Tell caller we dropped mmap_lock */ 27137d8faaf1SZach O'Keefe 271434488399SZach O'Keefe handle_result: 27157d8faaf1SZach O'Keefe switch (result) { 27167d8faaf1SZach O'Keefe case SCAN_SUCCEED: 27177d8faaf1SZach O'Keefe case SCAN_PMD_MAPPED: 27187d8faaf1SZach O'Keefe ++thps; 27197d8faaf1SZach O'Keefe break; 272034488399SZach O'Keefe case SCAN_PTE_MAPPED_HUGEPAGE: 272134488399SZach O'Keefe BUG_ON(mmap_locked); 272234488399SZach O'Keefe BUG_ON(*prev); 272334488399SZach O'Keefe mmap_write_lock(mm); 272434488399SZach O'Keefe result = collapse_pte_mapped_thp(mm, addr, true); 272534488399SZach O'Keefe mmap_write_unlock(mm); 272634488399SZach O'Keefe goto handle_result; 27277d8faaf1SZach O'Keefe /* Whitelisted set of results where continuing OK */ 27287d8faaf1SZach O'Keefe case SCAN_PMD_NULL: 27297d8faaf1SZach O'Keefe case SCAN_PTE_NON_PRESENT: 27307d8faaf1SZach O'Keefe case SCAN_PTE_UFFD_WP: 27317d8faaf1SZach O'Keefe case SCAN_PAGE_RO: 27327d8faaf1SZach O'Keefe case SCAN_LACK_REFERENCED_PAGE: 27337d8faaf1SZach O'Keefe case SCAN_PAGE_NULL: 27347d8faaf1SZach O'Keefe case SCAN_PAGE_COUNT: 27357d8faaf1SZach O'Keefe case SCAN_PAGE_LOCK: 27367d8faaf1SZach O'Keefe case SCAN_PAGE_COMPOUND: 27377d8faaf1SZach O'Keefe case SCAN_PAGE_LRU: 27380f3e2a2cSZach O'Keefe case SCAN_DEL_PAGE_LRU: 27397d8faaf1SZach O'Keefe last_fail = result; 27407d8faaf1SZach O'Keefe break; 27417d8faaf1SZach O'Keefe default: 27427d8faaf1SZach O'Keefe last_fail = result; 27437d8faaf1SZach O'Keefe /* Other error, exit */ 27447d8faaf1SZach O'Keefe goto out_maybelock; 27457d8faaf1SZach O'Keefe } 27467d8faaf1SZach O'Keefe } 27477d8faaf1SZach O'Keefe 27487d8faaf1SZach O'Keefe out_maybelock: 27497d8faaf1SZach O'Keefe /* Caller expects us to hold mmap_lock on return */ 27507d8faaf1SZach O'Keefe if (!mmap_locked) 27517d8faaf1SZach O'Keefe mmap_read_lock(mm); 27527d8faaf1SZach O'Keefe out_nolock: 27537d8faaf1SZach O'Keefe mmap_assert_locked(mm); 27547d8faaf1SZach O'Keefe mmdrop(mm); 27557d8faaf1SZach O'Keefe kfree(cc); 27567d8faaf1SZach O'Keefe 27577d8faaf1SZach O'Keefe return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 27587d8faaf1SZach O'Keefe : madvise_collapse_errno(last_fail); 27597d8faaf1SZach O'Keefe } 2760