17a338472SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f8af4da3SHugh Dickins /* 331dbd01fSIzik Eidus * Memory merging support. 431dbd01fSIzik Eidus * 531dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 631dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 731dbd01fSIzik Eidus * 836b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 931dbd01fSIzik Eidus * Authors: 1031dbd01fSIzik Eidus * Izik Eidus 1131dbd01fSIzik Eidus * Andrea Arcangeli 1231dbd01fSIzik Eidus * Chris Wright 1336b2528dSIzik Eidus * Hugh Dickins 14f8af4da3SHugh Dickins */ 15f8af4da3SHugh Dickins 16f8af4da3SHugh Dickins #include <linux/errno.h> 1731dbd01fSIzik Eidus #include <linux/mm.h> 1836090defSArnd Bergmann #include <linux/mm_inline.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 226e84f315SIngo Molnar #include <linux/sched/mm.h> 23f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 2431dbd01fSIzik Eidus #include <linux/rwsem.h> 2531dbd01fSIzik Eidus #include <linux/pagemap.h> 2631dbd01fSIzik Eidus #include <linux/rmap.h> 2731dbd01fSIzik Eidus #include <linux/spinlock.h> 2859e1a2f4STimofey Titovets #include <linux/xxhash.h> 2931dbd01fSIzik Eidus #include <linux/delay.h> 3031dbd01fSIzik Eidus #include <linux/kthread.h> 3131dbd01fSIzik Eidus #include <linux/wait.h> 3231dbd01fSIzik Eidus #include <linux/slab.h> 3331dbd01fSIzik Eidus #include <linux/rbtree.h> 3462b61f61SHugh Dickins #include <linux/memory.h> 3531dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 362c6854fdSIzik Eidus #include <linux/swap.h> 37f8af4da3SHugh Dickins #include <linux/ksm.h> 384ca3a69bSSasha Levin #include <linux/hashtable.h> 39878aee7dSAndrea Arcangeli #include <linux/freezer.h> 4072788c38SDavid Rientjes #include <linux/oom.h> 4190bd6fd3SPetr Holasek #include <linux/numa.h> 42d7c0e68dSDavid Hildenbrand #include <linux/pagewalk.h> 43f8af4da3SHugh Dickins 4431dbd01fSIzik Eidus #include <asm/tlbflush.h> 4573848b46SHugh Dickins #include "internal.h" 4658730ab6SQi Zheng #include "mm_slot.h" 4731dbd01fSIzik Eidus 48739100c8SStefan Roesch #define CREATE_TRACE_POINTS 49739100c8SStefan Roesch #include <trace/events/ksm.h> 50739100c8SStefan Roesch 51e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 52e850dcf5SHugh Dickins #define NUMA(x) (x) 53e850dcf5SHugh Dickins #define DO_NUMA(x) do { (x); } while (0) 54e850dcf5SHugh Dickins #else 55e850dcf5SHugh Dickins #define NUMA(x) (0) 56e850dcf5SHugh Dickins #define DO_NUMA(x) do { } while (0) 57e850dcf5SHugh Dickins #endif 58e850dcf5SHugh Dickins 595a2ca3efSMike Rapoport /** 605a2ca3efSMike Rapoport * DOC: Overview 615a2ca3efSMike Rapoport * 6231dbd01fSIzik Eidus * A few notes about the KSM scanning process, 6331dbd01fSIzik Eidus * to make it easier to understand the data structures below: 6431dbd01fSIzik Eidus * 6531dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 6631dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 6731dbd01fSIzik Eidus * 6831dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 6931dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 7031dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 7131dbd01fSIzik Eidus * 7231dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 7331dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 7431dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 7531dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 7631dbd01fSIzik Eidus * 775a2ca3efSMike Rapoport * The stable tree node includes information required for reverse 785a2ca3efSMike Rapoport * mapping from a KSM page to virtual addresses that map this page. 795a2ca3efSMike Rapoport * 805a2ca3efSMike Rapoport * In order to avoid large latencies of the rmap walks on KSM pages, 815a2ca3efSMike Rapoport * KSM maintains two types of nodes in the stable tree: 825a2ca3efSMike Rapoport * 835a2ca3efSMike Rapoport * * the regular nodes that keep the reverse mapping structures in a 845a2ca3efSMike Rapoport * linked list 855a2ca3efSMike Rapoport * * the "chains" that link nodes ("dups") that represent the same 865a2ca3efSMike Rapoport * write protected memory content, but each "dup" corresponds to a 875a2ca3efSMike Rapoport * different KSM page copy of that content 885a2ca3efSMike Rapoport * 895a2ca3efSMike Rapoport * Internally, the regular nodes, "dups" and "chains" are represented 9021fbd591SQi Zheng * using the same struct ksm_stable_node structure. 915a2ca3efSMike Rapoport * 9231dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 9331dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 9431dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 9531dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 9631dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 9731dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 9831dbd01fSIzik Eidus * 9931dbd01fSIzik Eidus * KSM solves this problem by several techniques: 10031dbd01fSIzik Eidus * 10131dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 10231dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 10331dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 10431dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 10531dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 10631dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 10731dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 10831dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 10931dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 11031dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 11131dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 11231dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 11331dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 1148fdb3dbfSHugh Dickins * 1158fdb3dbfSHugh Dickins * If the merge_across_nodes tunable is unset, then KSM maintains multiple 1168fdb3dbfSHugh Dickins * stable trees and multiple unstable trees: one of each for each NUMA node. 11731dbd01fSIzik Eidus */ 11831dbd01fSIzik Eidus 11931dbd01fSIzik Eidus /** 12021fbd591SQi Zheng * struct ksm_mm_slot - ksm information per mm that is being scanned 12158730ab6SQi Zheng * @slot: hash lookup from mm to mm_slot 1226514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 12331dbd01fSIzik Eidus */ 12421fbd591SQi Zheng struct ksm_mm_slot { 12558730ab6SQi Zheng struct mm_slot slot; 12621fbd591SQi Zheng struct ksm_rmap_item *rmap_list; 12731dbd01fSIzik Eidus }; 12831dbd01fSIzik Eidus 12931dbd01fSIzik Eidus /** 13031dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 13131dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 13231dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1336514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 13431dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 13531dbd01fSIzik Eidus * 13631dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 13731dbd01fSIzik Eidus */ 13831dbd01fSIzik Eidus struct ksm_scan { 13921fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 14031dbd01fSIzik Eidus unsigned long address; 14121fbd591SQi Zheng struct ksm_rmap_item **rmap_list; 14231dbd01fSIzik Eidus unsigned long seqnr; 14331dbd01fSIzik Eidus }; 14431dbd01fSIzik Eidus 14531dbd01fSIzik Eidus /** 14621fbd591SQi Zheng * struct ksm_stable_node - node of the stable rbtree 1477b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1484146d2d6SHugh Dickins * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 1492c653d0eSAndrea Arcangeli * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 1504146d2d6SHugh Dickins * @list: linked into migrate_nodes, pending placement in the proper node tree 1517b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1524146d2d6SHugh Dickins * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 1532c653d0eSAndrea Arcangeli * @chain_prune_time: time of the last full garbage collection 1542c653d0eSAndrea Arcangeli * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 1554146d2d6SHugh Dickins * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 1567b6ba2c7SHugh Dickins */ 15721fbd591SQi Zheng struct ksm_stable_node { 1584146d2d6SHugh Dickins union { 1594146d2d6SHugh Dickins struct rb_node node; /* when node of stable tree */ 1604146d2d6SHugh Dickins struct { /* when listed for migration */ 1614146d2d6SHugh Dickins struct list_head *head; 1622c653d0eSAndrea Arcangeli struct { 1632c653d0eSAndrea Arcangeli struct hlist_node hlist_dup; 1644146d2d6SHugh Dickins struct list_head list; 1654146d2d6SHugh Dickins }; 1664146d2d6SHugh Dickins }; 1672c653d0eSAndrea Arcangeli }; 1687b6ba2c7SHugh Dickins struct hlist_head hlist; 1692c653d0eSAndrea Arcangeli union { 17062b61f61SHugh Dickins unsigned long kpfn; 1712c653d0eSAndrea Arcangeli unsigned long chain_prune_time; 1722c653d0eSAndrea Arcangeli }; 1732c653d0eSAndrea Arcangeli /* 1742c653d0eSAndrea Arcangeli * STABLE_NODE_CHAIN can be any negative number in 1752c653d0eSAndrea Arcangeli * rmap_hlist_len negative range, but better not -1 to be able 1762c653d0eSAndrea Arcangeli * to reliably detect underflows. 1772c653d0eSAndrea Arcangeli */ 1782c653d0eSAndrea Arcangeli #define STABLE_NODE_CHAIN -1024 1792c653d0eSAndrea Arcangeli int rmap_hlist_len; 1804146d2d6SHugh Dickins #ifdef CONFIG_NUMA 1814146d2d6SHugh Dickins int nid; 1824146d2d6SHugh Dickins #endif 1837b6ba2c7SHugh Dickins }; 1847b6ba2c7SHugh Dickins 1857b6ba2c7SHugh Dickins /** 18621fbd591SQi Zheng * struct ksm_rmap_item - reverse mapping item for virtual addresses 1876514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 188db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 189bc56620bSHugh Dickins * @nid: NUMA node id of unstable tree in which linked (may not match page) 19031dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 19131dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 19231dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1937b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1947b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1957b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 19631dbd01fSIzik Eidus */ 19721fbd591SQi Zheng struct ksm_rmap_item { 19821fbd591SQi Zheng struct ksm_rmap_item *rmap_list; 199bc56620bSHugh Dickins union { 200db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 201bc56620bSHugh Dickins #ifdef CONFIG_NUMA 202bc56620bSHugh Dickins int nid; /* when node of unstable tree */ 203bc56620bSHugh Dickins #endif 204bc56620bSHugh Dickins }; 20531dbd01fSIzik Eidus struct mm_struct *mm; 20631dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 20731dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 20831dbd01fSIzik Eidus union { 2097b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 2107b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 21121fbd591SQi Zheng struct ksm_stable_node *head; 2127b6ba2c7SHugh Dickins struct hlist_node hlist; 2137b6ba2c7SHugh Dickins }; 21431dbd01fSIzik Eidus }; 21531dbd01fSIzik Eidus }; 21631dbd01fSIzik Eidus 21731dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 2187b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 2197b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 22031dbd01fSIzik Eidus 22131dbd01fSIzik Eidus /* The stable and unstable tree heads */ 222ef53d16cSHugh Dickins static struct rb_root one_stable_tree[1] = { RB_ROOT }; 223ef53d16cSHugh Dickins static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 224ef53d16cSHugh Dickins static struct rb_root *root_stable_tree = one_stable_tree; 225ef53d16cSHugh Dickins static struct rb_root *root_unstable_tree = one_unstable_tree; 22631dbd01fSIzik Eidus 2274146d2d6SHugh Dickins /* Recently migrated nodes of stable tree, pending proper placement */ 2284146d2d6SHugh Dickins static LIST_HEAD(migrate_nodes); 2292c653d0eSAndrea Arcangeli #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 2304146d2d6SHugh Dickins 2314ca3a69bSSasha Levin #define MM_SLOTS_HASH_BITS 10 2324ca3a69bSSasha Levin static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 23331dbd01fSIzik Eidus 23421fbd591SQi Zheng static struct ksm_mm_slot ksm_mm_head = { 23558730ab6SQi Zheng .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), 23631dbd01fSIzik Eidus }; 23731dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 23831dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 23931dbd01fSIzik Eidus }; 24031dbd01fSIzik Eidus 24131dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 2427b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 24331dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 24431dbd01fSIzik Eidus 24531dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 246b4028260SHugh Dickins static unsigned long ksm_pages_shared; 24731dbd01fSIzik Eidus 248e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 249b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 25031dbd01fSIzik Eidus 251473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 252473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 253473b0ce4SHugh Dickins 254473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 255473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 256473b0ce4SHugh Dickins 2572c653d0eSAndrea Arcangeli /* The number of stable_node chains */ 2582c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_chains; 2592c653d0eSAndrea Arcangeli 2602c653d0eSAndrea Arcangeli /* The number of stable_node dups linked to the stable_node chains */ 2612c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_dups; 2622c653d0eSAndrea Arcangeli 2632c653d0eSAndrea Arcangeli /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 264584ff0dfSZhansaya Bagdauletkyzy static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; 2652c653d0eSAndrea Arcangeli 2662c653d0eSAndrea Arcangeli /* Maximum number of page slots sharing a stable node */ 2672c653d0eSAndrea Arcangeli static int ksm_max_page_sharing = 256; 2682c653d0eSAndrea Arcangeli 26931dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 2702c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 27131dbd01fSIzik Eidus 27231dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 2732ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 27431dbd01fSIzik Eidus 275e86c59b1SClaudio Imbrenda /* Checksum of an empty (zeroed) page */ 276e86c59b1SClaudio Imbrenda static unsigned int zero_checksum __read_mostly; 277e86c59b1SClaudio Imbrenda 278e86c59b1SClaudio Imbrenda /* Whether to merge empty (zeroed) pages with actual zero pages */ 279e86c59b1SClaudio Imbrenda static bool ksm_use_zero_pages __read_mostly; 280e86c59b1SClaudio Imbrenda 281*e2942062Sxu xin /* The number of zero pages which is placed by KSM */ 282*e2942062Sxu xin unsigned long ksm_zero_pages; 283*e2942062Sxu xin 284e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 28590bd6fd3SPetr Holasek /* Zeroed when merging across nodes is not allowed */ 28690bd6fd3SPetr Holasek static unsigned int ksm_merge_across_nodes = 1; 287ef53d16cSHugh Dickins static int ksm_nr_node_ids = 1; 288e850dcf5SHugh Dickins #else 289e850dcf5SHugh Dickins #define ksm_merge_across_nodes 1U 290ef53d16cSHugh Dickins #define ksm_nr_node_ids 1 291e850dcf5SHugh Dickins #endif 29290bd6fd3SPetr Holasek 29331dbd01fSIzik Eidus #define KSM_RUN_STOP 0 29431dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 29531dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 296ef4d43a8SHugh Dickins #define KSM_RUN_OFFLINE 4 297ef4d43a8SHugh Dickins static unsigned long ksm_run = KSM_RUN_STOP; 298ef4d43a8SHugh Dickins static void wait_while_offlining(void); 29931dbd01fSIzik Eidus 30031dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 301fcf9a0efSKirill Tkhai static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); 30231dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 30331dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 30431dbd01fSIzik Eidus 30521fbd591SQi Zheng #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 30631dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 30731dbd01fSIzik Eidus (__flags), NULL) 30831dbd01fSIzik Eidus 30931dbd01fSIzik Eidus static int __init ksm_slab_init(void) 31031dbd01fSIzik Eidus { 31121fbd591SQi Zheng rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); 31231dbd01fSIzik Eidus if (!rmap_item_cache) 31331dbd01fSIzik Eidus goto out; 31431dbd01fSIzik Eidus 31521fbd591SQi Zheng stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); 3167b6ba2c7SHugh Dickins if (!stable_node_cache) 3177b6ba2c7SHugh Dickins goto out_free1; 3187b6ba2c7SHugh Dickins 31921fbd591SQi Zheng mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); 32031dbd01fSIzik Eidus if (!mm_slot_cache) 3217b6ba2c7SHugh Dickins goto out_free2; 32231dbd01fSIzik Eidus 32331dbd01fSIzik Eidus return 0; 32431dbd01fSIzik Eidus 3257b6ba2c7SHugh Dickins out_free2: 3267b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 3277b6ba2c7SHugh Dickins out_free1: 32831dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 32931dbd01fSIzik Eidus out: 33031dbd01fSIzik Eidus return -ENOMEM; 33131dbd01fSIzik Eidus } 33231dbd01fSIzik Eidus 33331dbd01fSIzik Eidus static void __init ksm_slab_free(void) 33431dbd01fSIzik Eidus { 33531dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 3367b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 33731dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 33831dbd01fSIzik Eidus mm_slot_cache = NULL; 33931dbd01fSIzik Eidus } 34031dbd01fSIzik Eidus 34121fbd591SQi Zheng static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) 3422c653d0eSAndrea Arcangeli { 3432c653d0eSAndrea Arcangeli return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 3442c653d0eSAndrea Arcangeli } 3452c653d0eSAndrea Arcangeli 34621fbd591SQi Zheng static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) 3472c653d0eSAndrea Arcangeli { 3482c653d0eSAndrea Arcangeli return dup->head == STABLE_NODE_DUP_HEAD; 3492c653d0eSAndrea Arcangeli } 3502c653d0eSAndrea Arcangeli 35121fbd591SQi Zheng static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, 35221fbd591SQi Zheng struct ksm_stable_node *chain) 3532c653d0eSAndrea Arcangeli { 3542c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(dup)); 3552c653d0eSAndrea Arcangeli dup->head = STABLE_NODE_DUP_HEAD; 3562c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(chain)); 3572c653d0eSAndrea Arcangeli hlist_add_head(&dup->hlist_dup, &chain->hlist); 3582c653d0eSAndrea Arcangeli ksm_stable_node_dups++; 3592c653d0eSAndrea Arcangeli } 3602c653d0eSAndrea Arcangeli 36121fbd591SQi Zheng static inline void __stable_node_dup_del(struct ksm_stable_node *dup) 3622c653d0eSAndrea Arcangeli { 363b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 3642c653d0eSAndrea Arcangeli hlist_del(&dup->hlist_dup); 3652c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 3662c653d0eSAndrea Arcangeli } 3672c653d0eSAndrea Arcangeli 36821fbd591SQi Zheng static inline void stable_node_dup_del(struct ksm_stable_node *dup) 3692c653d0eSAndrea Arcangeli { 3702c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 3712c653d0eSAndrea Arcangeli if (is_stable_node_dup(dup)) 3722c653d0eSAndrea Arcangeli __stable_node_dup_del(dup); 3732c653d0eSAndrea Arcangeli else 3742c653d0eSAndrea Arcangeli rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 3752c653d0eSAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 3762c653d0eSAndrea Arcangeli dup->head = NULL; 3772c653d0eSAndrea Arcangeli #endif 3782c653d0eSAndrea Arcangeli } 3792c653d0eSAndrea Arcangeli 38021fbd591SQi Zheng static inline struct ksm_rmap_item *alloc_rmap_item(void) 38131dbd01fSIzik Eidus { 38221fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 383473b0ce4SHugh Dickins 3845b398e41Szhong jiang rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 3855b398e41Szhong jiang __GFP_NORETRY | __GFP_NOWARN); 386473b0ce4SHugh Dickins if (rmap_item) 387473b0ce4SHugh Dickins ksm_rmap_items++; 388473b0ce4SHugh Dickins return rmap_item; 38931dbd01fSIzik Eidus } 39031dbd01fSIzik Eidus 39121fbd591SQi Zheng static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) 39231dbd01fSIzik Eidus { 393473b0ce4SHugh Dickins ksm_rmap_items--; 394cb4df4caSxu xin rmap_item->mm->ksm_rmap_items--; 39531dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 39631dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 39731dbd01fSIzik Eidus } 39831dbd01fSIzik Eidus 39921fbd591SQi Zheng static inline struct ksm_stable_node *alloc_stable_node(void) 4007b6ba2c7SHugh Dickins { 4016213055fSzhong jiang /* 4026213055fSzhong jiang * The allocation can take too long with GFP_KERNEL when memory is under 4036213055fSzhong jiang * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 4046213055fSzhong jiang * grants access to memory reserves, helping to avoid this problem. 4056213055fSzhong jiang */ 4066213055fSzhong jiang return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 4077b6ba2c7SHugh Dickins } 4087b6ba2c7SHugh Dickins 40921fbd591SQi Zheng static inline void free_stable_node(struct ksm_stable_node *stable_node) 4107b6ba2c7SHugh Dickins { 4112c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len && 4122c653d0eSAndrea Arcangeli !is_stable_node_chain(stable_node)); 4137b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 4147b6ba2c7SHugh Dickins } 4157b6ba2c7SHugh Dickins 41631dbd01fSIzik Eidus /* 417a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 418a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 419c1e8d7c6SMichel Lespinasse * takes mmap_lock briefly to serialize against them. ksm_exit() does not set 420a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 421a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 422a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 423a913e182SHugh Dickins */ 424a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 425a913e182SHugh Dickins { 426a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 427a913e182SHugh Dickins } 428a913e182SHugh Dickins 429d7c0e68dSDavid Hildenbrand static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, 430d7c0e68dSDavid Hildenbrand struct mm_walk *walk) 431d7c0e68dSDavid Hildenbrand { 432d7c0e68dSDavid Hildenbrand struct page *page = NULL; 433d7c0e68dSDavid Hildenbrand spinlock_t *ptl; 434d7c0e68dSDavid Hildenbrand pte_t *pte; 435c33c7948SRyan Roberts pte_t ptent; 436d7c0e68dSDavid Hildenbrand int ret; 437d7c0e68dSDavid Hildenbrand 438d7c0e68dSDavid Hildenbrand pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 43904dee9e8SHugh Dickins if (!pte) 44004dee9e8SHugh Dickins return 0; 441c33c7948SRyan Roberts ptent = ptep_get(pte); 442c33c7948SRyan Roberts if (pte_present(ptent)) { 443c33c7948SRyan Roberts page = vm_normal_page(walk->vma, addr, ptent); 444c33c7948SRyan Roberts } else if (!pte_none(ptent)) { 445c33c7948SRyan Roberts swp_entry_t entry = pte_to_swp_entry(ptent); 446d7c0e68dSDavid Hildenbrand 447d7c0e68dSDavid Hildenbrand /* 448d7c0e68dSDavid Hildenbrand * As KSM pages remain KSM pages until freed, no need to wait 449d7c0e68dSDavid Hildenbrand * here for migration to end. 450d7c0e68dSDavid Hildenbrand */ 451d7c0e68dSDavid Hildenbrand if (is_migration_entry(entry)) 452d7c0e68dSDavid Hildenbrand page = pfn_swap_entry_to_page(entry); 453d7c0e68dSDavid Hildenbrand } 45479271476Sxu xin /* return 1 if the page is an normal ksm page or KSM-placed zero page */ 45579271476Sxu xin ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); 456d7c0e68dSDavid Hildenbrand pte_unmap_unlock(pte, ptl); 457d7c0e68dSDavid Hildenbrand return ret; 458d7c0e68dSDavid Hildenbrand } 459d7c0e68dSDavid Hildenbrand 460d7c0e68dSDavid Hildenbrand static const struct mm_walk_ops break_ksm_ops = { 461d7c0e68dSDavid Hildenbrand .pmd_entry = break_ksm_pmd_entry, 462d7c0e68dSDavid Hildenbrand }; 463d7c0e68dSDavid Hildenbrand 464a913e182SHugh Dickins /* 4656cce3314SDavid Hildenbrand * We use break_ksm to break COW on a ksm page by triggering unsharing, 4666cce3314SDavid Hildenbrand * such that the ksm page will get replaced by an exclusive anonymous page. 46731dbd01fSIzik Eidus * 4686cce3314SDavid Hildenbrand * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, 46931dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 47031dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 471bbcd53c9SDavid Hildenbrand * mmap of /dev/mem, where we would not want to touch it. 4721b2ee126SDave Hansen * 4736cce3314SDavid Hildenbrand * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context 4741b2ee126SDave Hansen * of the process that owns 'vma'. We also do not want to enforce 4751b2ee126SDave Hansen * protection keys here anyway. 47631dbd01fSIzik Eidus */ 477d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 47831dbd01fSIzik Eidus { 47950a7ca3cSSouptick Joarder vm_fault_t ret = 0; 48031dbd01fSIzik Eidus 48131dbd01fSIzik Eidus do { 482d7c0e68dSDavid Hildenbrand int ksm_page; 48358f595c6SDavid Hildenbrand 48431dbd01fSIzik Eidus cond_resched(); 485d7c0e68dSDavid Hildenbrand ksm_page = walk_page_range_vma(vma, addr, addr + 1, 486d7c0e68dSDavid Hildenbrand &break_ksm_ops, NULL); 487d7c0e68dSDavid Hildenbrand if (WARN_ON_ONCE(ksm_page < 0)) 488d7c0e68dSDavid Hildenbrand return ksm_page; 48958f595c6SDavid Hildenbrand if (!ksm_page) 49058f595c6SDavid Hildenbrand return 0; 491dcddffd4SKirill A. Shutemov ret = handle_mm_fault(vma, addr, 4926cce3314SDavid Hildenbrand FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 493bce617edSPeter Xu NULL); 49458f595c6SDavid Hildenbrand } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 495d952b791SHugh Dickins /* 49658f595c6SDavid Hildenbrand * We must loop until we no longer find a KSM page because 49758f595c6SDavid Hildenbrand * handle_mm_fault() may back out if there's any difficulty e.g. if 49858f595c6SDavid Hildenbrand * pte accessed bit gets updated concurrently. 499d952b791SHugh Dickins * 500d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 501d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 502d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 503d952b791SHugh Dickins * 504d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 505d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 506d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 507d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 508d952b791SHugh Dickins * 509d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 510d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 511d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 512d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 513d952b791SHugh Dickins * 514d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 515d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 516d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 517d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 518d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 519d952b791SHugh Dickins */ 520d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 52131dbd01fSIzik Eidus } 52231dbd01fSIzik Eidus 523d7597f59SStefan Roesch static bool vma_ksm_compatible(struct vm_area_struct *vma) 524d7597f59SStefan Roesch { 525d7597f59SStefan Roesch if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | 526d7597f59SStefan Roesch VM_IO | VM_DONTEXPAND | VM_HUGETLB | 527d7597f59SStefan Roesch VM_MIXEDMAP)) 528d7597f59SStefan Roesch return false; /* just ignore the advice */ 529d7597f59SStefan Roesch 530d7597f59SStefan Roesch if (vma_is_dax(vma)) 531d7597f59SStefan Roesch return false; 532d7597f59SStefan Roesch 533d7597f59SStefan Roesch #ifdef VM_SAO 534d7597f59SStefan Roesch if (vma->vm_flags & VM_SAO) 535d7597f59SStefan Roesch return false; 536d7597f59SStefan Roesch #endif 537d7597f59SStefan Roesch #ifdef VM_SPARC_ADI 538d7597f59SStefan Roesch if (vma->vm_flags & VM_SPARC_ADI) 539d7597f59SStefan Roesch return false; 540d7597f59SStefan Roesch #endif 541d7597f59SStefan Roesch 542d7597f59SStefan Roesch return true; 543d7597f59SStefan Roesch } 544d7597f59SStefan Roesch 545ef694222SBob Liu static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 546ef694222SBob Liu unsigned long addr) 547ef694222SBob Liu { 548ef694222SBob Liu struct vm_area_struct *vma; 549ef694222SBob Liu if (ksm_test_exit(mm)) 550ef694222SBob Liu return NULL; 551ff69fb81SLiam Howlett vma = vma_lookup(mm, addr); 552ff69fb81SLiam Howlett if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 553ef694222SBob Liu return NULL; 554ef694222SBob Liu return vma; 555ef694222SBob Liu } 556ef694222SBob Liu 55721fbd591SQi Zheng static void break_cow(struct ksm_rmap_item *rmap_item) 55831dbd01fSIzik Eidus { 5598dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 5608dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 56131dbd01fSIzik Eidus struct vm_area_struct *vma; 56231dbd01fSIzik Eidus 5634035c07aSHugh Dickins /* 5644035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 5654035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 5664035c07aSHugh Dickins */ 5679e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 5684035c07aSHugh Dickins 569d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 570ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 571ef694222SBob Liu if (vma) 57231dbd01fSIzik Eidus break_ksm(vma, addr); 573d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 57431dbd01fSIzik Eidus } 57531dbd01fSIzik Eidus 57621fbd591SQi Zheng static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) 57731dbd01fSIzik Eidus { 57831dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 57931dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 58031dbd01fSIzik Eidus struct vm_area_struct *vma; 58131dbd01fSIzik Eidus struct page *page; 58231dbd01fSIzik Eidus 583d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 584ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 585ef694222SBob Liu if (!vma) 58631dbd01fSIzik Eidus goto out; 58731dbd01fSIzik Eidus 58831dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 589f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(page)) 59031dbd01fSIzik Eidus goto out; 591f7091ed6SHaiyue Wang if (is_zone_device_page(page)) 592f7091ed6SHaiyue Wang goto out_putpage; 593f765f540SKirill A. Shutemov if (PageAnon(page)) { 59431dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 59531dbd01fSIzik Eidus flush_dcache_page(page); 59631dbd01fSIzik Eidus } else { 597f7091ed6SHaiyue Wang out_putpage: 59831dbd01fSIzik Eidus put_page(page); 599c8f95ed1SAndrea Arcangeli out: 600c8f95ed1SAndrea Arcangeli page = NULL; 60131dbd01fSIzik Eidus } 602d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 60331dbd01fSIzik Eidus return page; 60431dbd01fSIzik Eidus } 60531dbd01fSIzik Eidus 60690bd6fd3SPetr Holasek /* 60790bd6fd3SPetr Holasek * This helper is used for getting right index into array of tree roots. 60890bd6fd3SPetr Holasek * When merge_across_nodes knob is set to 1, there are only two rb-trees for 60990bd6fd3SPetr Holasek * stable and unstable pages from all nodes with roots in index 0. Otherwise, 61090bd6fd3SPetr Holasek * every node has its own stable and unstable tree. 61190bd6fd3SPetr Holasek */ 61290bd6fd3SPetr Holasek static inline int get_kpfn_nid(unsigned long kpfn) 61390bd6fd3SPetr Holasek { 614d8fc16a8SHugh Dickins return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 61590bd6fd3SPetr Holasek } 61690bd6fd3SPetr Holasek 61721fbd591SQi Zheng static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, 6182c653d0eSAndrea Arcangeli struct rb_root *root) 6192c653d0eSAndrea Arcangeli { 62021fbd591SQi Zheng struct ksm_stable_node *chain = alloc_stable_node(); 6212c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 6222c653d0eSAndrea Arcangeli if (likely(chain)) { 6232c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&chain->hlist); 6242c653d0eSAndrea Arcangeli chain->chain_prune_time = jiffies; 6252c653d0eSAndrea Arcangeli chain->rmap_hlist_len = STABLE_NODE_CHAIN; 6262c653d0eSAndrea Arcangeli #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 62798fa15f3SAnshuman Khandual chain->nid = NUMA_NO_NODE; /* debug */ 6282c653d0eSAndrea Arcangeli #endif 6292c653d0eSAndrea Arcangeli ksm_stable_node_chains++; 6302c653d0eSAndrea Arcangeli 6312c653d0eSAndrea Arcangeli /* 6322c653d0eSAndrea Arcangeli * Put the stable node chain in the first dimension of 6332c653d0eSAndrea Arcangeli * the stable tree and at the same time remove the old 6342c653d0eSAndrea Arcangeli * stable node. 6352c653d0eSAndrea Arcangeli */ 6362c653d0eSAndrea Arcangeli rb_replace_node(&dup->node, &chain->node, root); 6372c653d0eSAndrea Arcangeli 6382c653d0eSAndrea Arcangeli /* 6392c653d0eSAndrea Arcangeli * Move the old stable node to the second dimension 6402c653d0eSAndrea Arcangeli * queued in the hlist_dup. The invariant is that all 6412c653d0eSAndrea Arcangeli * dup stable_nodes in the chain->hlist point to pages 642457aef94SEthon Paul * that are write protected and have the exact same 6432c653d0eSAndrea Arcangeli * content. 6442c653d0eSAndrea Arcangeli */ 6452c653d0eSAndrea Arcangeli stable_node_chain_add_dup(dup, chain); 6462c653d0eSAndrea Arcangeli } 6472c653d0eSAndrea Arcangeli return chain; 6482c653d0eSAndrea Arcangeli } 6492c653d0eSAndrea Arcangeli 65021fbd591SQi Zheng static inline void free_stable_node_chain(struct ksm_stable_node *chain, 6512c653d0eSAndrea Arcangeli struct rb_root *root) 6522c653d0eSAndrea Arcangeli { 6532c653d0eSAndrea Arcangeli rb_erase(&chain->node, root); 6542c653d0eSAndrea Arcangeli free_stable_node(chain); 6552c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 6562c653d0eSAndrea Arcangeli } 6572c653d0eSAndrea Arcangeli 65821fbd591SQi Zheng static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) 6594035c07aSHugh Dickins { 66021fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 6614035c07aSHugh Dickins 6622c653d0eSAndrea Arcangeli /* check it's not STABLE_NODE_CHAIN or negative */ 6632c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 6642c653d0eSAndrea Arcangeli 665b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 666739100c8SStefan Roesch if (rmap_item->hlist.next) { 6674035c07aSHugh Dickins ksm_pages_sharing--; 668739100c8SStefan Roesch trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); 669739100c8SStefan Roesch } else { 6704035c07aSHugh Dickins ksm_pages_shared--; 671739100c8SStefan Roesch } 67276093853Sxu xin 67376093853Sxu xin rmap_item->mm->ksm_merging_pages--; 67476093853Sxu xin 6752c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 6762c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 6779e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 6784035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 6794035c07aSHugh Dickins cond_resched(); 6804035c07aSHugh Dickins } 6814035c07aSHugh Dickins 6822c653d0eSAndrea Arcangeli /* 6832c653d0eSAndrea Arcangeli * We need the second aligned pointer of the migrate_nodes 6842c653d0eSAndrea Arcangeli * list_head to stay clear from the rb_parent_color union 6852c653d0eSAndrea Arcangeli * (aligned and different than any node) and also different 6862c653d0eSAndrea Arcangeli * from &migrate_nodes. This will verify that future list.h changes 687815f0ddbSNick Desaulniers * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 6882c653d0eSAndrea Arcangeli */ 6892c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 6902c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 6912c653d0eSAndrea Arcangeli 692739100c8SStefan Roesch trace_ksm_remove_ksm_page(stable_node->kpfn); 6934146d2d6SHugh Dickins if (stable_node->head == &migrate_nodes) 6944146d2d6SHugh Dickins list_del(&stable_node->list); 6954146d2d6SHugh Dickins else 6962c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 6974035c07aSHugh Dickins free_stable_node(stable_node); 6984035c07aSHugh Dickins } 6994035c07aSHugh Dickins 7002cee57d1SYang Shi enum get_ksm_page_flags { 7012cee57d1SYang Shi GET_KSM_PAGE_NOLOCK, 7022cee57d1SYang Shi GET_KSM_PAGE_LOCK, 7032cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK 7042cee57d1SYang Shi }; 7052cee57d1SYang Shi 7064035c07aSHugh Dickins /* 7074035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 7084035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 7094035c07aSHugh Dickins * In which case we can trust the content of the page, and it 7104035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 7114035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 712c8d6553bSHugh Dickins * But beware, the stable node's page might be being migrated. 7134035c07aSHugh Dickins * 7144035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 7154035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 7164035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 7174035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 7184035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 7194035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 7204035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 7214035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 7224035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 7234035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 7244035c07aSHugh Dickins */ 72521fbd591SQi Zheng static struct page *get_ksm_page(struct ksm_stable_node *stable_node, 7262cee57d1SYang Shi enum get_ksm_page_flags flags) 7274035c07aSHugh Dickins { 7284035c07aSHugh Dickins struct page *page; 7294035c07aSHugh Dickins void *expected_mapping; 730c8d6553bSHugh Dickins unsigned long kpfn; 7314035c07aSHugh Dickins 732bda807d4SMinchan Kim expected_mapping = (void *)((unsigned long)stable_node | 733bda807d4SMinchan Kim PAGE_MAPPING_KSM); 734c8d6553bSHugh Dickins again: 73508df4774SPaul E. McKenney kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 736c8d6553bSHugh Dickins page = pfn_to_page(kpfn); 7374db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) 7384035c07aSHugh Dickins goto stale; 739c8d6553bSHugh Dickins 740c8d6553bSHugh Dickins /* 741c8d6553bSHugh Dickins * We cannot do anything with the page while its refcount is 0. 742c8d6553bSHugh Dickins * Usually 0 means free, or tail of a higher-order page: in which 743c8d6553bSHugh Dickins * case this node is no longer referenced, and should be freed; 7441c4c3b99SJiang Biao * however, it might mean that the page is under page_ref_freeze(). 745c8d6553bSHugh Dickins * The __remove_mapping() case is easy, again the node is now stale; 74652d1e606SKirill Tkhai * the same is in reuse_ksm_page() case; but if page is swapcache 7479800562fSMatthew Wilcox (Oracle) * in folio_migrate_mapping(), it might still be our page, 74852d1e606SKirill Tkhai * in which case it's essential to keep the node. 749c8d6553bSHugh Dickins */ 750c8d6553bSHugh Dickins while (!get_page_unless_zero(page)) { 751c8d6553bSHugh Dickins /* 752c8d6553bSHugh Dickins * Another check for page->mapping != expected_mapping would 753c8d6553bSHugh Dickins * work here too. We have chosen the !PageSwapCache test to 754c8d6553bSHugh Dickins * optimize the common case, when the page is or is about to 755c8d6553bSHugh Dickins * be freed: PageSwapCache is cleared (under spin_lock_irq) 7561c4c3b99SJiang Biao * in the ref_freeze section of __remove_mapping(); but Anon 757c8d6553bSHugh Dickins * page->mapping reset to NULL later, in free_pages_prepare(). 758c8d6553bSHugh Dickins */ 759c8d6553bSHugh Dickins if (!PageSwapCache(page)) 7604035c07aSHugh Dickins goto stale; 761c8d6553bSHugh Dickins cpu_relax(); 762c8d6553bSHugh Dickins } 763c8d6553bSHugh Dickins 7644db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7654035c07aSHugh Dickins put_page(page); 7664035c07aSHugh Dickins goto stale; 7674035c07aSHugh Dickins } 768c8d6553bSHugh Dickins 7692cee57d1SYang Shi if (flags == GET_KSM_PAGE_TRYLOCK) { 7702cee57d1SYang Shi if (!trylock_page(page)) { 7712cee57d1SYang Shi put_page(page); 7722cee57d1SYang Shi return ERR_PTR(-EBUSY); 7732cee57d1SYang Shi } 7742cee57d1SYang Shi } else if (flags == GET_KSM_PAGE_LOCK) 7758aafa6a4SHugh Dickins lock_page(page); 7762cee57d1SYang Shi 7772cee57d1SYang Shi if (flags != GET_KSM_PAGE_NOLOCK) { 7784db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7798aafa6a4SHugh Dickins unlock_page(page); 7808aafa6a4SHugh Dickins put_page(page); 7818aafa6a4SHugh Dickins goto stale; 7828aafa6a4SHugh Dickins } 7838aafa6a4SHugh Dickins } 7844035c07aSHugh Dickins return page; 785c8d6553bSHugh Dickins 7864035c07aSHugh Dickins stale: 787c8d6553bSHugh Dickins /* 788c8d6553bSHugh Dickins * We come here from above when page->mapping or !PageSwapCache 789c8d6553bSHugh Dickins * suggests that the node is stale; but it might be under migration. 79019138349SMatthew Wilcox (Oracle) * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), 791c8d6553bSHugh Dickins * before checking whether node->kpfn has been changed. 792c8d6553bSHugh Dickins */ 793c8d6553bSHugh Dickins smp_rmb(); 7944db0c3c2SJason Low if (READ_ONCE(stable_node->kpfn) != kpfn) 795c8d6553bSHugh Dickins goto again; 7964035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 7974035c07aSHugh Dickins return NULL; 7984035c07aSHugh Dickins } 7994035c07aSHugh Dickins 80031dbd01fSIzik Eidus /* 80131dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 80231dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 80331dbd01fSIzik Eidus */ 80421fbd591SQi Zheng static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) 80531dbd01fSIzik Eidus { 8067b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 80721fbd591SQi Zheng struct ksm_stable_node *stable_node; 8085ad64688SHugh Dickins struct page *page; 80931dbd01fSIzik Eidus 8107b6ba2c7SHugh Dickins stable_node = rmap_item->head; 81162862290SHugh Dickins page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 8124035c07aSHugh Dickins if (!page) 8134035c07aSHugh Dickins goto out; 8145ad64688SHugh Dickins 8157b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 81662862290SHugh Dickins unlock_page(page); 8175ad64688SHugh Dickins put_page(page); 81808beca44SHugh Dickins 81998666f8aSAndrea Arcangeli if (!hlist_empty(&stable_node->hlist)) 8204035c07aSHugh Dickins ksm_pages_sharing--; 8214035c07aSHugh Dickins else 822b4028260SHugh Dickins ksm_pages_shared--; 82376093853Sxu xin 82476093853Sxu xin rmap_item->mm->ksm_merging_pages--; 82576093853Sxu xin 8262c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 8272c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 82831dbd01fSIzik Eidus 8299e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 830c89a384eSMiaohe Lin rmap_item->head = NULL; 83193d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 83231dbd01fSIzik Eidus 8337b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 83431dbd01fSIzik Eidus unsigned char age; 83531dbd01fSIzik Eidus /* 8369ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 83731dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 8389ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 8399ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 8409ba69294SHugh Dickins * than left over from before. 84131dbd01fSIzik Eidus */ 84231dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 843cd551f97SHugh Dickins BUG_ON(age > 1); 84431dbd01fSIzik Eidus if (!age) 84590bd6fd3SPetr Holasek rb_erase(&rmap_item->node, 846ef53d16cSHugh Dickins root_unstable_tree + NUMA(rmap_item->nid)); 84793d17715SHugh Dickins ksm_pages_unshared--; 84831dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 84993d17715SHugh Dickins } 8504035c07aSHugh Dickins out: 85131dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 85231dbd01fSIzik Eidus } 85331dbd01fSIzik Eidus 85421fbd591SQi Zheng static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) 85531dbd01fSIzik Eidus { 8566514d511SHugh Dickins while (*rmap_list) { 85721fbd591SQi Zheng struct ksm_rmap_item *rmap_item = *rmap_list; 8586514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 85931dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 86031dbd01fSIzik Eidus free_rmap_item(rmap_item); 86131dbd01fSIzik Eidus } 86231dbd01fSIzik Eidus } 86331dbd01fSIzik Eidus 86431dbd01fSIzik Eidus /* 865e850dcf5SHugh Dickins * Though it's very tempting to unmerge rmap_items from stable tree rather 86631dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 86731dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 868c1e8d7c6SMichel Lespinasse * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing 86931dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 87031dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 87181464e30SHugh Dickins * 87281464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 87381464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 87481464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 87581464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 87631dbd01fSIzik Eidus */ 877d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 87831dbd01fSIzik Eidus unsigned long start, unsigned long end) 87931dbd01fSIzik Eidus { 88031dbd01fSIzik Eidus unsigned long addr; 881d952b791SHugh Dickins int err = 0; 88231dbd01fSIzik Eidus 883d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 8849ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 8859ba69294SHugh Dickins break; 886d952b791SHugh Dickins if (signal_pending(current)) 887d952b791SHugh Dickins err = -ERESTARTSYS; 888d952b791SHugh Dickins else 889d952b791SHugh Dickins err = break_ksm(vma, addr); 890d952b791SHugh Dickins } 891d952b791SHugh Dickins return err; 89231dbd01fSIzik Eidus } 89331dbd01fSIzik Eidus 89421fbd591SQi Zheng static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) 89519138349SMatthew Wilcox (Oracle) { 89619138349SMatthew Wilcox (Oracle) return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; 89719138349SMatthew Wilcox (Oracle) } 89819138349SMatthew Wilcox (Oracle) 89921fbd591SQi Zheng static inline struct ksm_stable_node *page_stable_node(struct page *page) 90088484826SMike Rapoport { 90119138349SMatthew Wilcox (Oracle) return folio_stable_node(page_folio(page)); 90288484826SMike Rapoport } 90388484826SMike Rapoport 90488484826SMike Rapoport static inline void set_page_stable_node(struct page *page, 90521fbd591SQi Zheng struct ksm_stable_node *stable_node) 90688484826SMike Rapoport { 9076c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); 90888484826SMike Rapoport page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); 90988484826SMike Rapoport } 91088484826SMike Rapoport 9112ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 9122ffd8679SHugh Dickins /* 9132ffd8679SHugh Dickins * Only called through the sysfs control interface: 9142ffd8679SHugh Dickins */ 91521fbd591SQi Zheng static int remove_stable_node(struct ksm_stable_node *stable_node) 916cbf86cfeSHugh Dickins { 917cbf86cfeSHugh Dickins struct page *page; 918cbf86cfeSHugh Dickins int err; 919cbf86cfeSHugh Dickins 9202cee57d1SYang Shi page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 921cbf86cfeSHugh Dickins if (!page) { 922cbf86cfeSHugh Dickins /* 923cbf86cfeSHugh Dickins * get_ksm_page did remove_node_from_stable_tree itself. 924cbf86cfeSHugh Dickins */ 925cbf86cfeSHugh Dickins return 0; 926cbf86cfeSHugh Dickins } 927cbf86cfeSHugh Dickins 928cbf86cfeSHugh Dickins /* 9299a63236fSAndrey Ryabinin * Page could be still mapped if this races with __mmput() running in 9309a63236fSAndrey Ryabinin * between ksm_exit() and exit_mmap(). Just refuse to let 9319a63236fSAndrey Ryabinin * merge_across_nodes/max_page_sharing be switched. 9328fdb3dbfSHugh Dickins */ 9338fdb3dbfSHugh Dickins err = -EBUSY; 9349a63236fSAndrey Ryabinin if (!page_mapped(page)) { 9358fdb3dbfSHugh Dickins /* 9368fdb3dbfSHugh Dickins * The stable node did not yet appear stale to get_ksm_page(), 9378fdb3dbfSHugh Dickins * since that allows for an unmapped ksm page to be recognized 9388fdb3dbfSHugh Dickins * right up until it is freed; but the node is safe to remove. 9391fec6890SMatthew Wilcox (Oracle) * This page might be in an LRU cache waiting to be freed, 940cbf86cfeSHugh Dickins * or it might be PageSwapCache (perhaps under writeback), 941cbf86cfeSHugh Dickins * or it might have been removed from swapcache a moment ago. 942cbf86cfeSHugh Dickins */ 943cbf86cfeSHugh Dickins set_page_stable_node(page, NULL); 944cbf86cfeSHugh Dickins remove_node_from_stable_tree(stable_node); 945cbf86cfeSHugh Dickins err = 0; 946cbf86cfeSHugh Dickins } 947cbf86cfeSHugh Dickins 948cbf86cfeSHugh Dickins unlock_page(page); 949cbf86cfeSHugh Dickins put_page(page); 950cbf86cfeSHugh Dickins return err; 951cbf86cfeSHugh Dickins } 952cbf86cfeSHugh Dickins 95321fbd591SQi Zheng static int remove_stable_node_chain(struct ksm_stable_node *stable_node, 9542c653d0eSAndrea Arcangeli struct rb_root *root) 9552c653d0eSAndrea Arcangeli { 95621fbd591SQi Zheng struct ksm_stable_node *dup; 9572c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 9582c653d0eSAndrea Arcangeli 9592c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 9602c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 9612c653d0eSAndrea Arcangeli if (remove_stable_node(stable_node)) 9622c653d0eSAndrea Arcangeli return true; 9632c653d0eSAndrea Arcangeli else 9642c653d0eSAndrea Arcangeli return false; 9652c653d0eSAndrea Arcangeli } 9662c653d0eSAndrea Arcangeli 9672c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 9682c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 9692c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 9702c653d0eSAndrea Arcangeli if (remove_stable_node(dup)) 9712c653d0eSAndrea Arcangeli return true; 9722c653d0eSAndrea Arcangeli } 9732c653d0eSAndrea Arcangeli BUG_ON(!hlist_empty(&stable_node->hlist)); 9742c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 9752c653d0eSAndrea Arcangeli return false; 9762c653d0eSAndrea Arcangeli } 9772c653d0eSAndrea Arcangeli 978cbf86cfeSHugh Dickins static int remove_all_stable_nodes(void) 979cbf86cfeSHugh Dickins { 98021fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 981cbf86cfeSHugh Dickins int nid; 982cbf86cfeSHugh Dickins int err = 0; 983cbf86cfeSHugh Dickins 984ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 985cbf86cfeSHugh Dickins while (root_stable_tree[nid].rb_node) { 986cbf86cfeSHugh Dickins stable_node = rb_entry(root_stable_tree[nid].rb_node, 98721fbd591SQi Zheng struct ksm_stable_node, node); 9882c653d0eSAndrea Arcangeli if (remove_stable_node_chain(stable_node, 9892c653d0eSAndrea Arcangeli root_stable_tree + nid)) { 990cbf86cfeSHugh Dickins err = -EBUSY; 991cbf86cfeSHugh Dickins break; /* proceed to next nid */ 992cbf86cfeSHugh Dickins } 993cbf86cfeSHugh Dickins cond_resched(); 994cbf86cfeSHugh Dickins } 995cbf86cfeSHugh Dickins } 99603640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 9974146d2d6SHugh Dickins if (remove_stable_node(stable_node)) 9984146d2d6SHugh Dickins err = -EBUSY; 9994146d2d6SHugh Dickins cond_resched(); 10004146d2d6SHugh Dickins } 1001cbf86cfeSHugh Dickins return err; 1002cbf86cfeSHugh Dickins } 1003cbf86cfeSHugh Dickins 1004d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 100531dbd01fSIzik Eidus { 100621fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 100758730ab6SQi Zheng struct mm_slot *slot; 100831dbd01fSIzik Eidus struct mm_struct *mm; 100931dbd01fSIzik Eidus struct vm_area_struct *vma; 1010d952b791SHugh Dickins int err = 0; 101131dbd01fSIzik Eidus 1012d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 101358730ab6SQi Zheng slot = list_entry(ksm_mm_head.slot.mm_node.next, 101458730ab6SQi Zheng struct mm_slot, mm_node); 101558730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 1016d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1017d952b791SHugh Dickins 1018a5f18ba0SMatthew Wilcox (Oracle) for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; 1019a5f18ba0SMatthew Wilcox (Oracle) mm_slot = ksm_scan.mm_slot) { 102058730ab6SQi Zheng VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); 1021a5f18ba0SMatthew Wilcox (Oracle) 102258730ab6SQi Zheng mm = mm_slot->slot.mm; 1023d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 10246db504ceSLiam R. Howlett 10256db504ceSLiam R. Howlett /* 10266db504ceSLiam R. Howlett * Exit right away if mm is exiting to avoid lockdep issue in 10276db504ceSLiam R. Howlett * the maple tree 10286db504ceSLiam R. Howlett */ 10299ba69294SHugh Dickins if (ksm_test_exit(mm)) 10306db504ceSLiam R. Howlett goto mm_exiting; 10316db504ceSLiam R. Howlett 10326db504ceSLiam R. Howlett for_each_vma(vmi, vma) { 103331dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 103431dbd01fSIzik Eidus continue; 1035d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 1036d952b791SHugh Dickins vma->vm_start, vma->vm_end); 10379ba69294SHugh Dickins if (err) 10389ba69294SHugh Dickins goto error; 1039d952b791SHugh Dickins } 10409ba69294SHugh Dickins 10416db504ceSLiam R. Howlett mm_exiting: 1042420be4edSChengyang Fan remove_trailing_rmap_items(&mm_slot->rmap_list); 1043d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 104431dbd01fSIzik Eidus 104531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 104658730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 104758730ab6SQi Zheng struct mm_slot, mm_node); 104858730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 10499ba69294SHugh Dickins if (ksm_test_exit(mm)) { 105058730ab6SQi Zheng hash_del(&mm_slot->slot.hash); 105158730ab6SQi Zheng list_del(&mm_slot->slot.mm_node); 105231dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 10539ba69294SHugh Dickins 105458730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 10559ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1056d7597f59SStefan Roesch clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 10579ba69294SHugh Dickins mmdrop(mm); 10587496fea9SZhou Chengming } else 10599ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 106031dbd01fSIzik Eidus } 106131dbd01fSIzik Eidus 1062cbf86cfeSHugh Dickins /* Clean up stable nodes, but don't worry if some are still busy */ 1063cbf86cfeSHugh Dickins remove_all_stable_nodes(); 1064d952b791SHugh Dickins ksm_scan.seqnr = 0; 10659ba69294SHugh Dickins return 0; 10669ba69294SHugh Dickins 10679ba69294SHugh Dickins error: 1068d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1069d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 1070d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 1071d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1072d952b791SHugh Dickins return err; 1073d952b791SHugh Dickins } 10742ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 1075d952b791SHugh Dickins 107631dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 107731dbd01fSIzik Eidus { 107831dbd01fSIzik Eidus u32 checksum; 10799b04c5feSCong Wang void *addr = kmap_atomic(page); 108059e1a2f4STimofey Titovets checksum = xxhash(addr, PAGE_SIZE, 0); 10819b04c5feSCong Wang kunmap_atomic(addr); 108231dbd01fSIzik Eidus return checksum; 108331dbd01fSIzik Eidus } 108431dbd01fSIzik Eidus 108531dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 108631dbd01fSIzik Eidus pte_t *orig_pte) 108731dbd01fSIzik Eidus { 108831dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 1089eed05e54SMatthew Wilcox (Oracle) DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); 109031dbd01fSIzik Eidus int swapped; 109131dbd01fSIzik Eidus int err = -EFAULT; 1092ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 10936c287605SDavid Hildenbrand bool anon_exclusive; 1094c33c7948SRyan Roberts pte_t entry; 109531dbd01fSIzik Eidus 109636eaff33SKirill A. Shutemov pvmw.address = page_address_in_vma(page, vma); 109736eaff33SKirill A. Shutemov if (pvmw.address == -EFAULT) 109831dbd01fSIzik Eidus goto out; 109931dbd01fSIzik Eidus 110029ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 11016bdb913fSHaggai Eran 11027d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, 1103ac46d4f3SJérôme Glisse pvmw.address + PAGE_SIZE); 1104ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 11056bdb913fSHaggai Eran 110636eaff33SKirill A. Shutemov if (!page_vma_mapped_walk(&pvmw)) 11076bdb913fSHaggai Eran goto out_mn; 110836eaff33SKirill A. Shutemov if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 110936eaff33SKirill A. Shutemov goto out_unlock; 111031dbd01fSIzik Eidus 11116c287605SDavid Hildenbrand anon_exclusive = PageAnonExclusive(page); 1112c33c7948SRyan Roberts entry = ptep_get(pvmw.pte); 1113c33c7948SRyan Roberts if (pte_write(entry) || pte_dirty(entry) || 11146c287605SDavid Hildenbrand anon_exclusive || mm_tlb_flush_pending(mm)) { 111531dbd01fSIzik Eidus swapped = PageSwapCache(page); 111636eaff33SKirill A. Shutemov flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 111731dbd01fSIzik Eidus /* 111825985edcSLucas De Marchi * Ok this is tricky, when get_user_pages_fast() run it doesn't 111931dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 1120f0953a1bSIngo Molnar * with the pagecount against the mapcount is racy and 112131dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 112231dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 112331dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 112431dbd01fSIzik Eidus * or in the middle of the check. 11250f10851eSJérôme Glisse * 11260f10851eSJérôme Glisse * No need to notify as we are downgrading page table to read 11270f10851eSJérôme Glisse * only not changing it to point to a new page. 11280f10851eSJérôme Glisse * 1129ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 113031dbd01fSIzik Eidus */ 11310f10851eSJérôme Glisse entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 113231dbd01fSIzik Eidus /* 113331dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 113431dbd01fSIzik Eidus * page 113531dbd01fSIzik Eidus */ 113631e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 113736eaff33SKirill A. Shutemov set_pte_at(mm, pvmw.address, pvmw.pte, entry); 113831dbd01fSIzik Eidus goto out_unlock; 113931dbd01fSIzik Eidus } 11406c287605SDavid Hildenbrand 1141088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 11426c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 11436c287605SDavid Hildenbrand set_pte_at(mm, pvmw.address, pvmw.pte, entry); 11446c287605SDavid Hildenbrand goto out_unlock; 11456c287605SDavid Hildenbrand } 11466c287605SDavid Hildenbrand 11474e31635cSHugh Dickins if (pte_dirty(entry)) 11484e31635cSHugh Dickins set_page_dirty(page); 11496a56ccbcSDavid Hildenbrand entry = pte_mkclean(entry); 1150595cd8f2SAneesh Kumar K.V 11516a56ccbcSDavid Hildenbrand if (pte_write(entry)) 11526a56ccbcSDavid Hildenbrand entry = pte_wrprotect(entry); 11536a56ccbcSDavid Hildenbrand 115436eaff33SKirill A. Shutemov set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); 115531dbd01fSIzik Eidus } 1156c33c7948SRyan Roberts *orig_pte = entry; 115731dbd01fSIzik Eidus err = 0; 115831dbd01fSIzik Eidus 115931dbd01fSIzik Eidus out_unlock: 116036eaff33SKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 11616bdb913fSHaggai Eran out_mn: 1162ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 116331dbd01fSIzik Eidus out: 116431dbd01fSIzik Eidus return err; 116531dbd01fSIzik Eidus } 116631dbd01fSIzik Eidus 116731dbd01fSIzik Eidus /** 116831dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 11698dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 11708dd3557aSHugh Dickins * @page: the page we are replacing by kpage 11718dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 117231dbd01fSIzik Eidus * @orig_pte: the original value of the pte 117331dbd01fSIzik Eidus * 117431dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 117531dbd01fSIzik Eidus */ 11768dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 11778dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 117831dbd01fSIzik Eidus { 117931dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 1180b4e6f66eSMatthew Wilcox (Oracle) struct folio *folio; 118131dbd01fSIzik Eidus pmd_t *pmd; 118250722804SZach O'Keefe pmd_t pmde; 118331dbd01fSIzik Eidus pte_t *ptep; 1184e86c59b1SClaudio Imbrenda pte_t newpte; 118531dbd01fSIzik Eidus spinlock_t *ptl; 118631dbd01fSIzik Eidus unsigned long addr; 118731dbd01fSIzik Eidus int err = -EFAULT; 1188ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 118931dbd01fSIzik Eidus 11908dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 119131dbd01fSIzik Eidus if (addr == -EFAULT) 119231dbd01fSIzik Eidus goto out; 119331dbd01fSIzik Eidus 11946219049aSBob Liu pmd = mm_find_pmd(mm, addr); 11956219049aSBob Liu if (!pmd) 119631dbd01fSIzik Eidus goto out; 119750722804SZach O'Keefe /* 119850722804SZach O'Keefe * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 119950722804SZach O'Keefe * without holding anon_vma lock for write. So when looking for a 120050722804SZach O'Keefe * genuine pmde (in which to find pte), test present and !THP together. 120150722804SZach O'Keefe */ 120226e1a0c3SHugh Dickins pmde = pmdp_get_lockless(pmd); 120350722804SZach O'Keefe if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 120450722804SZach O'Keefe goto out; 120531dbd01fSIzik Eidus 12067d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 12076f4f13e8SJérôme Glisse addr + PAGE_SIZE); 1208ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 12096bdb913fSHaggai Eran 121031dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 121104dee9e8SHugh Dickins if (!ptep) 121204dee9e8SHugh Dickins goto out_mn; 1213c33c7948SRyan Roberts if (!pte_same(ptep_get(ptep), orig_pte)) { 121431dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 12156bdb913fSHaggai Eran goto out_mn; 121631dbd01fSIzik Eidus } 12176c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnonExclusive(page), page); 12186c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); 121931dbd01fSIzik Eidus 1220e86c59b1SClaudio Imbrenda /* 1221e86c59b1SClaudio Imbrenda * No need to check ksm_use_zero_pages here: we can only have a 1222457aef94SEthon Paul * zero_page here if ksm_use_zero_pages was enabled already. 1223e86c59b1SClaudio Imbrenda */ 1224e86c59b1SClaudio Imbrenda if (!is_zero_pfn(page_to_pfn(kpage))) { 12258dd3557aSHugh Dickins get_page(kpage); 1226f1e2db12SDavid Hildenbrand page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); 1227e86c59b1SClaudio Imbrenda newpte = mk_pte(kpage, vma->vm_page_prot); 1228e86c59b1SClaudio Imbrenda } else { 122979271476Sxu xin /* 123079271476Sxu xin * Use pte_mkdirty to mark the zero page mapped by KSM, and then 123179271476Sxu xin * we can easily track all KSM-placed zero pages by checking if 123279271476Sxu xin * the dirty bit in zero page's PTE is set. 123379271476Sxu xin */ 123479271476Sxu xin newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); 1235*e2942062Sxu xin ksm_zero_pages++; 1236a38c015fSClaudio Imbrenda /* 1237a38c015fSClaudio Imbrenda * We're replacing an anonymous page with a zero page, which is 1238a38c015fSClaudio Imbrenda * not anonymous. We need to do proper accounting otherwise we 1239a38c015fSClaudio Imbrenda * will get wrong values in /proc, and a BUG message in dmesg 1240a38c015fSClaudio Imbrenda * when tearing down the mm. 1241a38c015fSClaudio Imbrenda */ 1242a38c015fSClaudio Imbrenda dec_mm_counter(mm, MM_ANONPAGES); 1243e86c59b1SClaudio Imbrenda } 124431dbd01fSIzik Eidus 1245c33c7948SRyan Roberts flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); 12460f10851eSJérôme Glisse /* 12470f10851eSJérôme Glisse * No need to notify as we are replacing a read only page with another 12480f10851eSJérôme Glisse * read only page with the same content. 12490f10851eSJérôme Glisse * 1250ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 12510f10851eSJérôme Glisse */ 12520f10851eSJérôme Glisse ptep_clear_flush(vma, addr, ptep); 1253e86c59b1SClaudio Imbrenda set_pte_at_notify(mm, addr, ptep, newpte); 125431dbd01fSIzik Eidus 1255b4e6f66eSMatthew Wilcox (Oracle) folio = page_folio(page); 1256cea86fe2SHugh Dickins page_remove_rmap(page, vma, false); 1257b4e6f66eSMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 1258b4e6f66eSMatthew Wilcox (Oracle) folio_free_swap(folio); 1259b4e6f66eSMatthew Wilcox (Oracle) folio_put(folio); 126031dbd01fSIzik Eidus 126131dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 126231dbd01fSIzik Eidus err = 0; 12636bdb913fSHaggai Eran out_mn: 1264ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 126531dbd01fSIzik Eidus out: 126631dbd01fSIzik Eidus return err; 126731dbd01fSIzik Eidus } 126831dbd01fSIzik Eidus 126931dbd01fSIzik Eidus /* 127031dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 12718dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 12728dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 127380e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 127480e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 127531dbd01fSIzik Eidus * 127631dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 127731dbd01fSIzik Eidus */ 127831dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 12798dd3557aSHugh Dickins struct page *page, struct page *kpage) 128031dbd01fSIzik Eidus { 128131dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 128231dbd01fSIzik Eidus int err = -EFAULT; 128331dbd01fSIzik Eidus 1284db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 1285db114b83SHugh Dickins return 0; 1286db114b83SHugh Dickins 12878dd3557aSHugh Dickins if (!PageAnon(page)) 128831dbd01fSIzik Eidus goto out; 128931dbd01fSIzik Eidus 129031dbd01fSIzik Eidus /* 129131dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 129231dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 129331dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 129431dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 129531dbd01fSIzik Eidus * then come back to this page when it is unlocked. 129631dbd01fSIzik Eidus */ 12978dd3557aSHugh Dickins if (!trylock_page(page)) 129831e855eaSHugh Dickins goto out; 1299f765f540SKirill A. Shutemov 1300f765f540SKirill A. Shutemov if (PageTransCompound(page)) { 1301a7306c34SAndrea Arcangeli if (split_huge_page(page)) 1302f765f540SKirill A. Shutemov goto out_unlock; 1303f765f540SKirill A. Shutemov } 1304f765f540SKirill A. Shutemov 130531dbd01fSIzik Eidus /* 130631dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 130731dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 130831dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 130931dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 131031dbd01fSIzik Eidus */ 131180e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 131280e14822SHugh Dickins if (!kpage) { 131380e14822SHugh Dickins /* 131480e14822SHugh Dickins * While we hold page lock, upgrade page from 131580e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 131680e14822SHugh Dickins * stable_tree_insert() will update stable_node. 131780e14822SHugh Dickins */ 131880e14822SHugh Dickins set_page_stable_node(page, NULL); 131980e14822SHugh Dickins mark_page_accessed(page); 1320337ed7ebSMinchan Kim /* 1321337ed7ebSMinchan Kim * Page reclaim just frees a clean page with no dirty 1322337ed7ebSMinchan Kim * ptes: make sure that the ksm page would be swapped. 1323337ed7ebSMinchan Kim */ 1324337ed7ebSMinchan Kim if (!PageDirty(page)) 1325337ed7ebSMinchan Kim SetPageDirty(page); 132680e14822SHugh Dickins err = 0; 132780e14822SHugh Dickins } else if (pages_identical(page, kpage)) 13288dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 132980e14822SHugh Dickins } 133031dbd01fSIzik Eidus 1331f765f540SKirill A. Shutemov out_unlock: 13328dd3557aSHugh Dickins unlock_page(page); 133331dbd01fSIzik Eidus out: 133431dbd01fSIzik Eidus return err; 133531dbd01fSIzik Eidus } 133631dbd01fSIzik Eidus 133731dbd01fSIzik Eidus /* 133881464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 133981464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 13408dd3557aSHugh Dickins * 13418dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 134281464e30SHugh Dickins */ 134321fbd591SQi Zheng static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, 13448dd3557aSHugh Dickins struct page *page, struct page *kpage) 134581464e30SHugh Dickins { 13468dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 134781464e30SHugh Dickins struct vm_area_struct *vma; 134881464e30SHugh Dickins int err = -EFAULT; 134981464e30SHugh Dickins 1350d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 135185c6e8ddSAndrea Arcangeli vma = find_mergeable_vma(mm, rmap_item->address); 135285c6e8ddSAndrea Arcangeli if (!vma) 13539ba69294SHugh Dickins goto out; 13549ba69294SHugh Dickins 13558dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 1356db114b83SHugh Dickins if (err) 1357db114b83SHugh Dickins goto out; 1358db114b83SHugh Dickins 1359bc56620bSHugh Dickins /* Unstable nid is in union with stable anon_vma: remove first */ 1360bc56620bSHugh Dickins remove_rmap_item_from_tree(rmap_item); 1361bc56620bSHugh Dickins 1362c1e8d7c6SMichel Lespinasse /* Must get reference to anon_vma while still holding mmap_lock */ 13639e60109fSPeter Zijlstra rmap_item->anon_vma = vma->anon_vma; 13649e60109fSPeter Zijlstra get_anon_vma(vma->anon_vma); 136581464e30SHugh Dickins out: 1366d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1367739100c8SStefan Roesch trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), 1368739100c8SStefan Roesch rmap_item, mm, err); 136981464e30SHugh Dickins return err; 137081464e30SHugh Dickins } 137181464e30SHugh Dickins 137281464e30SHugh Dickins /* 137331dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 137431dbd01fSIzik Eidus * to be merged into one page. 137531dbd01fSIzik Eidus * 13768dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 13778dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 137831dbd01fSIzik Eidus * 137980e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 138031dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 138131dbd01fSIzik Eidus */ 138221fbd591SQi Zheng static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, 13838dd3557aSHugh Dickins struct page *page, 138421fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item, 13858dd3557aSHugh Dickins struct page *tree_page) 138631dbd01fSIzik Eidus { 138780e14822SHugh Dickins int err; 138831dbd01fSIzik Eidus 138980e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 139031dbd01fSIzik Eidus if (!err) { 13918dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 139280e14822SHugh Dickins tree_page, page); 139331dbd01fSIzik Eidus /* 139481464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 139581464e30SHugh Dickins * pointing to it: so break it. 139631dbd01fSIzik Eidus */ 13974035c07aSHugh Dickins if (err) 13988dd3557aSHugh Dickins break_cow(rmap_item); 139931dbd01fSIzik Eidus } 140080e14822SHugh Dickins return err ? NULL : page; 140131dbd01fSIzik Eidus } 140231dbd01fSIzik Eidus 14032c653d0eSAndrea Arcangeli static __always_inline 140421fbd591SQi Zheng bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) 14052c653d0eSAndrea Arcangeli { 14062c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len < 0); 14072c653d0eSAndrea Arcangeli /* 14082c653d0eSAndrea Arcangeli * Check that at least one mapping still exists, otherwise 14092c653d0eSAndrea Arcangeli * there's no much point to merge and share with this 14102c653d0eSAndrea Arcangeli * stable_node, as the underlying tree_page of the other 14112c653d0eSAndrea Arcangeli * sharer is going to be freed soon. 14122c653d0eSAndrea Arcangeli */ 14132c653d0eSAndrea Arcangeli return stable_node->rmap_hlist_len && 14142c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 14152c653d0eSAndrea Arcangeli } 14162c653d0eSAndrea Arcangeli 14172c653d0eSAndrea Arcangeli static __always_inline 141821fbd591SQi Zheng bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) 14192c653d0eSAndrea Arcangeli { 14202c653d0eSAndrea Arcangeli return __is_page_sharing_candidate(stable_node, 0); 14212c653d0eSAndrea Arcangeli } 14222c653d0eSAndrea Arcangeli 142321fbd591SQi Zheng static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, 142421fbd591SQi Zheng struct ksm_stable_node **_stable_node, 14252c653d0eSAndrea Arcangeli struct rb_root *root, 14262c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 14272c653d0eSAndrea Arcangeli { 142821fbd591SQi Zheng struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; 14292c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 14308dc5ffcdSAndrea Arcangeli struct page *_tree_page, *tree_page = NULL; 14312c653d0eSAndrea Arcangeli int nr = 0; 14322c653d0eSAndrea Arcangeli int found_rmap_hlist_len; 14332c653d0eSAndrea Arcangeli 14342c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes || 14352c653d0eSAndrea Arcangeli time_before(jiffies, stable_node->chain_prune_time + 14362c653d0eSAndrea Arcangeli msecs_to_jiffies( 14372c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs))) 14382c653d0eSAndrea Arcangeli prune_stale_stable_nodes = false; 14392c653d0eSAndrea Arcangeli else 14402c653d0eSAndrea Arcangeli stable_node->chain_prune_time = jiffies; 14412c653d0eSAndrea Arcangeli 14422c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 14432c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 14442c653d0eSAndrea Arcangeli cond_resched(); 14452c653d0eSAndrea Arcangeli /* 14462c653d0eSAndrea Arcangeli * We must walk all stable_node_dup to prune the stale 14472c653d0eSAndrea Arcangeli * stable nodes during lookup. 14482c653d0eSAndrea Arcangeli * 14492c653d0eSAndrea Arcangeli * get_ksm_page can drop the nodes from the 14502c653d0eSAndrea Arcangeli * stable_node->hlist if they point to freed pages 14512c653d0eSAndrea Arcangeli * (that's why we do a _safe walk). The "dup" 14522c653d0eSAndrea Arcangeli * stable_node parameter itself will be freed from 14532c653d0eSAndrea Arcangeli * under us if it returns NULL. 14542c653d0eSAndrea Arcangeli */ 14552cee57d1SYang Shi _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); 14562c653d0eSAndrea Arcangeli if (!_tree_page) 14572c653d0eSAndrea Arcangeli continue; 14582c653d0eSAndrea Arcangeli nr += 1; 14592c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(dup)) { 14602c653d0eSAndrea Arcangeli if (!found || 14612c653d0eSAndrea Arcangeli dup->rmap_hlist_len > found_rmap_hlist_len) { 14622c653d0eSAndrea Arcangeli if (found) 14638dc5ffcdSAndrea Arcangeli put_page(tree_page); 14642c653d0eSAndrea Arcangeli found = dup; 14652c653d0eSAndrea Arcangeli found_rmap_hlist_len = found->rmap_hlist_len; 14668dc5ffcdSAndrea Arcangeli tree_page = _tree_page; 14672c653d0eSAndrea Arcangeli 14688dc5ffcdSAndrea Arcangeli /* skip put_page for found dup */ 14692c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes) 14702c653d0eSAndrea Arcangeli break; 14712c653d0eSAndrea Arcangeli continue; 14722c653d0eSAndrea Arcangeli } 14732c653d0eSAndrea Arcangeli } 14742c653d0eSAndrea Arcangeli put_page(_tree_page); 14752c653d0eSAndrea Arcangeli } 14762c653d0eSAndrea Arcangeli 147780b18dfaSAndrea Arcangeli if (found) { 14782c653d0eSAndrea Arcangeli /* 147980b18dfaSAndrea Arcangeli * nr is counting all dups in the chain only if 148080b18dfaSAndrea Arcangeli * prune_stale_stable_nodes is true, otherwise we may 148180b18dfaSAndrea Arcangeli * break the loop at nr == 1 even if there are 148280b18dfaSAndrea Arcangeli * multiple entries. 14832c653d0eSAndrea Arcangeli */ 148480b18dfaSAndrea Arcangeli if (prune_stale_stable_nodes && nr == 1) { 14852c653d0eSAndrea Arcangeli /* 14862c653d0eSAndrea Arcangeli * If there's not just one entry it would 14872c653d0eSAndrea Arcangeli * corrupt memory, better BUG_ON. In KSM 14882c653d0eSAndrea Arcangeli * context with no lock held it's not even 14892c653d0eSAndrea Arcangeli * fatal. 14902c653d0eSAndrea Arcangeli */ 14912c653d0eSAndrea Arcangeli BUG_ON(stable_node->hlist.first->next); 14922c653d0eSAndrea Arcangeli 14932c653d0eSAndrea Arcangeli /* 14942c653d0eSAndrea Arcangeli * There's just one entry and it is below the 14952c653d0eSAndrea Arcangeli * deduplication limit so drop the chain. 14962c653d0eSAndrea Arcangeli */ 14972c653d0eSAndrea Arcangeli rb_replace_node(&stable_node->node, &found->node, 14982c653d0eSAndrea Arcangeli root); 14992c653d0eSAndrea Arcangeli free_stable_node(stable_node); 15002c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 15012c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 1502b4fecc67SAndrea Arcangeli /* 15030ba1d0f7SAndrea Arcangeli * NOTE: the caller depends on the stable_node 15040ba1d0f7SAndrea Arcangeli * to be equal to stable_node_dup if the chain 15050ba1d0f7SAndrea Arcangeli * was collapsed. 1506b4fecc67SAndrea Arcangeli */ 15070ba1d0f7SAndrea Arcangeli *_stable_node = found; 15080ba1d0f7SAndrea Arcangeli /* 1509f0953a1bSIngo Molnar * Just for robustness, as stable_node is 15100ba1d0f7SAndrea Arcangeli * otherwise left as a stable pointer, the 15110ba1d0f7SAndrea Arcangeli * compiler shall optimize it away at build 15120ba1d0f7SAndrea Arcangeli * time. 15130ba1d0f7SAndrea Arcangeli */ 15140ba1d0f7SAndrea Arcangeli stable_node = NULL; 151580b18dfaSAndrea Arcangeli } else if (stable_node->hlist.first != &found->hlist_dup && 151680b18dfaSAndrea Arcangeli __is_page_sharing_candidate(found, 1)) { 15172c653d0eSAndrea Arcangeli /* 151880b18dfaSAndrea Arcangeli * If the found stable_node dup can accept one 151980b18dfaSAndrea Arcangeli * more future merge (in addition to the one 152080b18dfaSAndrea Arcangeli * that is underway) and is not at the head of 152180b18dfaSAndrea Arcangeli * the chain, put it there so next search will 152280b18dfaSAndrea Arcangeli * be quicker in the !prune_stale_stable_nodes 152380b18dfaSAndrea Arcangeli * case. 152480b18dfaSAndrea Arcangeli * 152580b18dfaSAndrea Arcangeli * NOTE: it would be inaccurate to use nr > 1 152680b18dfaSAndrea Arcangeli * instead of checking the hlist.first pointer 152780b18dfaSAndrea Arcangeli * directly, because in the 152880b18dfaSAndrea Arcangeli * prune_stale_stable_nodes case "nr" isn't 152980b18dfaSAndrea Arcangeli * the position of the found dup in the chain, 153080b18dfaSAndrea Arcangeli * but the total number of dups in the chain. 15312c653d0eSAndrea Arcangeli */ 15322c653d0eSAndrea Arcangeli hlist_del(&found->hlist_dup); 15332c653d0eSAndrea Arcangeli hlist_add_head(&found->hlist_dup, 15342c653d0eSAndrea Arcangeli &stable_node->hlist); 15352c653d0eSAndrea Arcangeli } 15362c653d0eSAndrea Arcangeli } 15372c653d0eSAndrea Arcangeli 15388dc5ffcdSAndrea Arcangeli *_stable_node_dup = found; 15398dc5ffcdSAndrea Arcangeli return tree_page; 15402c653d0eSAndrea Arcangeli } 15412c653d0eSAndrea Arcangeli 154221fbd591SQi Zheng static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, 15432c653d0eSAndrea Arcangeli struct rb_root *root) 15442c653d0eSAndrea Arcangeli { 15452c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) 15462c653d0eSAndrea Arcangeli return stable_node; 15472c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 15482c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 15492c653d0eSAndrea Arcangeli return NULL; 15502c653d0eSAndrea Arcangeli } 15512c653d0eSAndrea Arcangeli return hlist_entry(stable_node->hlist.first, 15522c653d0eSAndrea Arcangeli typeof(*stable_node), hlist_dup); 15532c653d0eSAndrea Arcangeli } 15542c653d0eSAndrea Arcangeli 15558dc5ffcdSAndrea Arcangeli /* 15568dc5ffcdSAndrea Arcangeli * Like for get_ksm_page, this function can free the *_stable_node and 15578dc5ffcdSAndrea Arcangeli * *_stable_node_dup if the returned tree_page is NULL. 15588dc5ffcdSAndrea Arcangeli * 15598dc5ffcdSAndrea Arcangeli * It can also free and overwrite *_stable_node with the found 15608dc5ffcdSAndrea Arcangeli * stable_node_dup if the chain is collapsed (in which case 15618dc5ffcdSAndrea Arcangeli * *_stable_node will be equal to *_stable_node_dup like if the chain 15628dc5ffcdSAndrea Arcangeli * never existed). It's up to the caller to verify tree_page is not 15638dc5ffcdSAndrea Arcangeli * NULL before dereferencing *_stable_node or *_stable_node_dup. 15648dc5ffcdSAndrea Arcangeli * 15658dc5ffcdSAndrea Arcangeli * *_stable_node_dup is really a second output parameter of this 15668dc5ffcdSAndrea Arcangeli * function and will be overwritten in all cases, the caller doesn't 15678dc5ffcdSAndrea Arcangeli * need to initialize it. 15688dc5ffcdSAndrea Arcangeli */ 156921fbd591SQi Zheng static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, 157021fbd591SQi Zheng struct ksm_stable_node **_stable_node, 15712c653d0eSAndrea Arcangeli struct rb_root *root, 15722c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 15732c653d0eSAndrea Arcangeli { 157421fbd591SQi Zheng struct ksm_stable_node *stable_node = *_stable_node; 15752c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 15762c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(stable_node)) { 15778dc5ffcdSAndrea Arcangeli *_stable_node_dup = stable_node; 15782cee57d1SYang Shi return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); 15792c653d0eSAndrea Arcangeli } 15808dc5ffcdSAndrea Arcangeli /* 15818dc5ffcdSAndrea Arcangeli * _stable_node_dup set to NULL means the stable_node 15828dc5ffcdSAndrea Arcangeli * reached the ksm_max_page_sharing limit. 15838dc5ffcdSAndrea Arcangeli */ 15848dc5ffcdSAndrea Arcangeli *_stable_node_dup = NULL; 15852c653d0eSAndrea Arcangeli return NULL; 15862c653d0eSAndrea Arcangeli } 15878dc5ffcdSAndrea Arcangeli return stable_node_dup(_stable_node_dup, _stable_node, root, 15882c653d0eSAndrea Arcangeli prune_stale_stable_nodes); 15892c653d0eSAndrea Arcangeli } 15902c653d0eSAndrea Arcangeli 159121fbd591SQi Zheng static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, 159221fbd591SQi Zheng struct ksm_stable_node **s_n, 15932c653d0eSAndrea Arcangeli struct rb_root *root) 15942c653d0eSAndrea Arcangeli { 15958dc5ffcdSAndrea Arcangeli return __stable_node_chain(s_n_d, s_n, root, true); 15962c653d0eSAndrea Arcangeli } 15972c653d0eSAndrea Arcangeli 159821fbd591SQi Zheng static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, 159921fbd591SQi Zheng struct ksm_stable_node *s_n, 16002c653d0eSAndrea Arcangeli struct rb_root *root) 16012c653d0eSAndrea Arcangeli { 160221fbd591SQi Zheng struct ksm_stable_node *old_stable_node = s_n; 16038dc5ffcdSAndrea Arcangeli struct page *tree_page; 16048dc5ffcdSAndrea Arcangeli 16058dc5ffcdSAndrea Arcangeli tree_page = __stable_node_chain(s_n_d, &s_n, root, false); 16068dc5ffcdSAndrea Arcangeli /* not pruning dups so s_n cannot have changed */ 16078dc5ffcdSAndrea Arcangeli VM_BUG_ON(s_n != old_stable_node); 16088dc5ffcdSAndrea Arcangeli return tree_page; 16092c653d0eSAndrea Arcangeli } 16102c653d0eSAndrea Arcangeli 161131dbd01fSIzik Eidus /* 16128dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 161331dbd01fSIzik Eidus * 161431dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 161531dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 161631dbd01fSIzik Eidus * 16177b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 161831dbd01fSIzik Eidus * NULL otherwise. 161931dbd01fSIzik Eidus */ 162062b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 162131dbd01fSIzik Eidus { 162290bd6fd3SPetr Holasek int nid; 1623ef53d16cSHugh Dickins struct rb_root *root; 16244146d2d6SHugh Dickins struct rb_node **new; 16254146d2d6SHugh Dickins struct rb_node *parent; 162621fbd591SQi Zheng struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 162721fbd591SQi Zheng struct ksm_stable_node *page_node; 162831dbd01fSIzik Eidus 16294146d2d6SHugh Dickins page_node = page_stable_node(page); 16304146d2d6SHugh Dickins if (page_node && page_node->head != &migrate_nodes) { 16314146d2d6SHugh Dickins /* ksm page forked */ 163208beca44SHugh Dickins get_page(page); 163362b61f61SHugh Dickins return page; 163408beca44SHugh Dickins } 163508beca44SHugh Dickins 163690bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1637ef53d16cSHugh Dickins root = root_stable_tree + nid; 16384146d2d6SHugh Dickins again: 1639ef53d16cSHugh Dickins new = &root->rb_node; 16404146d2d6SHugh Dickins parent = NULL; 164190bd6fd3SPetr Holasek 16424146d2d6SHugh Dickins while (*new) { 16434035c07aSHugh Dickins struct page *tree_page; 164431dbd01fSIzik Eidus int ret; 164531dbd01fSIzik Eidus 164631dbd01fSIzik Eidus cond_resched(); 164721fbd591SQi Zheng stable_node = rb_entry(*new, struct ksm_stable_node, node); 16482c653d0eSAndrea Arcangeli stable_node_any = NULL; 16498dc5ffcdSAndrea Arcangeli tree_page = chain_prune(&stable_node_dup, &stable_node, root); 1650b4fecc67SAndrea Arcangeli /* 1651b4fecc67SAndrea Arcangeli * NOTE: stable_node may have been freed by 1652b4fecc67SAndrea Arcangeli * chain_prune() if the returned stable_node_dup is 1653b4fecc67SAndrea Arcangeli * not NULL. stable_node_dup may have been inserted in 1654b4fecc67SAndrea Arcangeli * the rbtree instead as a regular stable_node (in 1655b4fecc67SAndrea Arcangeli * order to collapse the stable_node chain if a single 16560ba1d0f7SAndrea Arcangeli * stable_node dup was found in it). In such case the 16573413b2c8SJulia Lawall * stable_node is overwritten by the callee to point 16580ba1d0f7SAndrea Arcangeli * to the stable_node_dup that was collapsed in the 16590ba1d0f7SAndrea Arcangeli * stable rbtree and stable_node will be equal to 16600ba1d0f7SAndrea Arcangeli * stable_node_dup like if the chain never existed. 1661b4fecc67SAndrea Arcangeli */ 16622c653d0eSAndrea Arcangeli if (!stable_node_dup) { 16632c653d0eSAndrea Arcangeli /* 16642c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 16652c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 16662c653d0eSAndrea Arcangeli * empty and should be rb_erased. 16672c653d0eSAndrea Arcangeli */ 16682c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 16692c653d0eSAndrea Arcangeli root); 16702c653d0eSAndrea Arcangeli if (!stable_node_any) { 16712c653d0eSAndrea Arcangeli /* rb_erase just run */ 16722c653d0eSAndrea Arcangeli goto again; 16732c653d0eSAndrea Arcangeli } 16742c653d0eSAndrea Arcangeli /* 16752c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 16762c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 16772c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 16782c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 16792c653d0eSAndrea Arcangeli * have the same content and they're 1680457aef94SEthon Paul * write protected at all times. Any will work 16812c653d0eSAndrea Arcangeli * fine to continue the walk. 16822c653d0eSAndrea Arcangeli */ 16832cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 16842cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 16852c653d0eSAndrea Arcangeli } 16862c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1687f2e5ff85SAndrea Arcangeli if (!tree_page) { 1688f2e5ff85SAndrea Arcangeli /* 1689f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1690f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1691f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1692f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1693f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1694f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1695f2e5ff85SAndrea Arcangeli * stable_node was stale. 1696f2e5ff85SAndrea Arcangeli */ 1697f2e5ff85SAndrea Arcangeli goto again; 1698f2e5ff85SAndrea Arcangeli } 169931dbd01fSIzik Eidus 17004035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 1701c8d6553bSHugh Dickins put_page(tree_page); 170231dbd01fSIzik Eidus 17034146d2d6SHugh Dickins parent = *new; 1704c8d6553bSHugh Dickins if (ret < 0) 17054146d2d6SHugh Dickins new = &parent->rb_left; 1706c8d6553bSHugh Dickins else if (ret > 0) 17074146d2d6SHugh Dickins new = &parent->rb_right; 1708c8d6553bSHugh Dickins else { 17092c653d0eSAndrea Arcangeli if (page_node) { 17102c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17112c653d0eSAndrea Arcangeli /* 17122c653d0eSAndrea Arcangeli * Test if the migrated page should be merged 17132c653d0eSAndrea Arcangeli * into a stable node dup. If the mapcount is 17142c653d0eSAndrea Arcangeli * 1 we can migrate it with another KSM page 17152c653d0eSAndrea Arcangeli * without adding it to the chain. 17162c653d0eSAndrea Arcangeli */ 17172c653d0eSAndrea Arcangeli if (page_mapcount(page) > 1) 17182c653d0eSAndrea Arcangeli goto chain_append; 17192c653d0eSAndrea Arcangeli } 17202c653d0eSAndrea Arcangeli 17212c653d0eSAndrea Arcangeli if (!stable_node_dup) { 17222c653d0eSAndrea Arcangeli /* 17232c653d0eSAndrea Arcangeli * If the stable_node is a chain and 17242c653d0eSAndrea Arcangeli * we got a payload match in memcmp 17252c653d0eSAndrea Arcangeli * but we cannot merge the scanned 17262c653d0eSAndrea Arcangeli * page in any of the existing 17272c653d0eSAndrea Arcangeli * stable_node dups because they're 17282c653d0eSAndrea Arcangeli * all full, we need to wait the 17292c653d0eSAndrea Arcangeli * scanned page to find itself a match 17302c653d0eSAndrea Arcangeli * in the unstable tree to create a 17312c653d0eSAndrea Arcangeli * brand new KSM page to add later to 17322c653d0eSAndrea Arcangeli * the dups of this stable_node. 17332c653d0eSAndrea Arcangeli */ 17342c653d0eSAndrea Arcangeli return NULL; 17352c653d0eSAndrea Arcangeli } 17362c653d0eSAndrea Arcangeli 1737c8d6553bSHugh Dickins /* 1738c8d6553bSHugh Dickins * Lock and unlock the stable_node's page (which 1739c8d6553bSHugh Dickins * might already have been migrated) so that page 1740c8d6553bSHugh Dickins * migration is sure to notice its raised count. 1741c8d6553bSHugh Dickins * It would be more elegant to return stable_node 1742c8d6553bSHugh Dickins * than kpage, but that involves more changes. 1743c8d6553bSHugh Dickins */ 17442cee57d1SYang Shi tree_page = get_ksm_page(stable_node_dup, 17452cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK); 17462cee57d1SYang Shi 17472cee57d1SYang Shi if (PTR_ERR(tree_page) == -EBUSY) 17482cee57d1SYang Shi return ERR_PTR(-EBUSY); 17492cee57d1SYang Shi 17502c653d0eSAndrea Arcangeli if (unlikely(!tree_page)) 17512c653d0eSAndrea Arcangeli /* 17522c653d0eSAndrea Arcangeli * The tree may have been rebalanced, 17532c653d0eSAndrea Arcangeli * so re-evaluate parent and new. 17542c653d0eSAndrea Arcangeli */ 17552c653d0eSAndrea Arcangeli goto again; 1756c8d6553bSHugh Dickins unlock_page(tree_page); 17572c653d0eSAndrea Arcangeli 17582c653d0eSAndrea Arcangeli if (get_kpfn_nid(stable_node_dup->kpfn) != 17592c653d0eSAndrea Arcangeli NUMA(stable_node_dup->nid)) { 17604146d2d6SHugh Dickins put_page(tree_page); 17614146d2d6SHugh Dickins goto replace; 17624146d2d6SHugh Dickins } 176362b61f61SHugh Dickins return tree_page; 176431dbd01fSIzik Eidus } 1765c8d6553bSHugh Dickins } 176631dbd01fSIzik Eidus 17674146d2d6SHugh Dickins if (!page_node) 176831dbd01fSIzik Eidus return NULL; 17694146d2d6SHugh Dickins 17704146d2d6SHugh Dickins list_del(&page_node->list); 17714146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 17724146d2d6SHugh Dickins rb_link_node(&page_node->node, parent, new); 1773ef53d16cSHugh Dickins rb_insert_color(&page_node->node, root); 17742c653d0eSAndrea Arcangeli out: 17752c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) { 17764146d2d6SHugh Dickins get_page(page); 17774146d2d6SHugh Dickins return page; 17782c653d0eSAndrea Arcangeli } else 17792c653d0eSAndrea Arcangeli return NULL; 17804146d2d6SHugh Dickins 17814146d2d6SHugh Dickins replace: 1782b4fecc67SAndrea Arcangeli /* 1783b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17840ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17850ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17860ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17870ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17880ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1789b4fecc67SAndrea Arcangeli */ 17900ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1791b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1792b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17932c653d0eSAndrea Arcangeli /* there is no chain */ 17944146d2d6SHugh Dickins if (page_node) { 17952c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17964146d2d6SHugh Dickins list_del(&page_node->list); 17974146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 1798b4fecc67SAndrea Arcangeli rb_replace_node(&stable_node_dup->node, 1799b4fecc67SAndrea Arcangeli &page_node->node, 18002c653d0eSAndrea Arcangeli root); 18012c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 18024146d2d6SHugh Dickins get_page(page); 18032c653d0eSAndrea Arcangeli else 18042c653d0eSAndrea Arcangeli page = NULL; 18054146d2d6SHugh Dickins } else { 1806b4fecc67SAndrea Arcangeli rb_erase(&stable_node_dup->node, root); 18074146d2d6SHugh Dickins page = NULL; 18084146d2d6SHugh Dickins } 18092c653d0eSAndrea Arcangeli } else { 18102c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(stable_node)); 18112c653d0eSAndrea Arcangeli __stable_node_dup_del(stable_node_dup); 18122c653d0eSAndrea Arcangeli if (page_node) { 18132c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 18142c653d0eSAndrea Arcangeli list_del(&page_node->list); 18152c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 18162c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 18172c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 18182c653d0eSAndrea Arcangeli get_page(page); 18192c653d0eSAndrea Arcangeli else 18202c653d0eSAndrea Arcangeli page = NULL; 18212c653d0eSAndrea Arcangeli } else { 18222c653d0eSAndrea Arcangeli page = NULL; 18232c653d0eSAndrea Arcangeli } 18242c653d0eSAndrea Arcangeli } 18252c653d0eSAndrea Arcangeli stable_node_dup->head = &migrate_nodes; 18262c653d0eSAndrea Arcangeli list_add(&stable_node_dup->list, stable_node_dup->head); 18274146d2d6SHugh Dickins return page; 18282c653d0eSAndrea Arcangeli 18292c653d0eSAndrea Arcangeli chain_append: 18302c653d0eSAndrea Arcangeli /* stable_node_dup could be null if it reached the limit */ 18312c653d0eSAndrea Arcangeli if (!stable_node_dup) 18322c653d0eSAndrea Arcangeli stable_node_dup = stable_node_any; 1833b4fecc67SAndrea Arcangeli /* 1834b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 18350ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 18360ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 18370ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 18380ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 18390ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1840b4fecc67SAndrea Arcangeli */ 18410ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1842b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 18432c653d0eSAndrea Arcangeli /* chain is missing so create it */ 18442c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(stable_node_dup, 18452c653d0eSAndrea Arcangeli root); 18462c653d0eSAndrea Arcangeli if (!stable_node) 18472c653d0eSAndrea Arcangeli return NULL; 18482c653d0eSAndrea Arcangeli } 18492c653d0eSAndrea Arcangeli /* 18502c653d0eSAndrea Arcangeli * Add this stable_node dup that was 18512c653d0eSAndrea Arcangeli * migrated to the stable_node chain 18522c653d0eSAndrea Arcangeli * of the current nid for this page 18532c653d0eSAndrea Arcangeli * content. 18542c653d0eSAndrea Arcangeli */ 1855b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 18562c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 18572c653d0eSAndrea Arcangeli list_del(&page_node->list); 18582c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 18592c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 18602c653d0eSAndrea Arcangeli goto out; 186131dbd01fSIzik Eidus } 186231dbd01fSIzik Eidus 186331dbd01fSIzik Eidus /* 1864e850dcf5SHugh Dickins * stable_tree_insert - insert stable tree node pointing to new ksm page 186531dbd01fSIzik Eidus * into the stable tree. 186631dbd01fSIzik Eidus * 18677b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 18687b6ba2c7SHugh Dickins * NULL otherwise. 186931dbd01fSIzik Eidus */ 187021fbd591SQi Zheng static struct ksm_stable_node *stable_tree_insert(struct page *kpage) 187131dbd01fSIzik Eidus { 187290bd6fd3SPetr Holasek int nid; 187390bd6fd3SPetr Holasek unsigned long kpfn; 1874ef53d16cSHugh Dickins struct rb_root *root; 187590bd6fd3SPetr Holasek struct rb_node **new; 1876f2e5ff85SAndrea Arcangeli struct rb_node *parent; 187721fbd591SQi Zheng struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 18782c653d0eSAndrea Arcangeli bool need_chain = false; 187931dbd01fSIzik Eidus 188090bd6fd3SPetr Holasek kpfn = page_to_pfn(kpage); 188190bd6fd3SPetr Holasek nid = get_kpfn_nid(kpfn); 1882ef53d16cSHugh Dickins root = root_stable_tree + nid; 1883f2e5ff85SAndrea Arcangeli again: 1884f2e5ff85SAndrea Arcangeli parent = NULL; 1885ef53d16cSHugh Dickins new = &root->rb_node; 188690bd6fd3SPetr Holasek 188731dbd01fSIzik Eidus while (*new) { 18884035c07aSHugh Dickins struct page *tree_page; 188931dbd01fSIzik Eidus int ret; 189031dbd01fSIzik Eidus 189131dbd01fSIzik Eidus cond_resched(); 189221fbd591SQi Zheng stable_node = rb_entry(*new, struct ksm_stable_node, node); 18932c653d0eSAndrea Arcangeli stable_node_any = NULL; 18948dc5ffcdSAndrea Arcangeli tree_page = chain(&stable_node_dup, stable_node, root); 18952c653d0eSAndrea Arcangeli if (!stable_node_dup) { 18962c653d0eSAndrea Arcangeli /* 18972c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 18982c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 18992c653d0eSAndrea Arcangeli * empty and should be rb_erased. 19002c653d0eSAndrea Arcangeli */ 19012c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 19022c653d0eSAndrea Arcangeli root); 19032c653d0eSAndrea Arcangeli if (!stable_node_any) { 19042c653d0eSAndrea Arcangeli /* rb_erase just run */ 19052c653d0eSAndrea Arcangeli goto again; 19062c653d0eSAndrea Arcangeli } 19072c653d0eSAndrea Arcangeli /* 19082c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 19092c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 19102c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 19112c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 19122c653d0eSAndrea Arcangeli * have the same content and they're 1913457aef94SEthon Paul * write protected at all times. Any will work 19142c653d0eSAndrea Arcangeli * fine to continue the walk. 19152c653d0eSAndrea Arcangeli */ 19162cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 19172cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 19182c653d0eSAndrea Arcangeli } 19192c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1920f2e5ff85SAndrea Arcangeli if (!tree_page) { 1921f2e5ff85SAndrea Arcangeli /* 1922f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1923f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1924f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1925f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1926f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1927f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1928f2e5ff85SAndrea Arcangeli * stable_node was stale. 1929f2e5ff85SAndrea Arcangeli */ 1930f2e5ff85SAndrea Arcangeli goto again; 1931f2e5ff85SAndrea Arcangeli } 193231dbd01fSIzik Eidus 19334035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 19344035c07aSHugh Dickins put_page(tree_page); 193531dbd01fSIzik Eidus 193631dbd01fSIzik Eidus parent = *new; 193731dbd01fSIzik Eidus if (ret < 0) 193831dbd01fSIzik Eidus new = &parent->rb_left; 193931dbd01fSIzik Eidus else if (ret > 0) 194031dbd01fSIzik Eidus new = &parent->rb_right; 194131dbd01fSIzik Eidus else { 19422c653d0eSAndrea Arcangeli need_chain = true; 19432c653d0eSAndrea Arcangeli break; 194431dbd01fSIzik Eidus } 194531dbd01fSIzik Eidus } 194631dbd01fSIzik Eidus 19472c653d0eSAndrea Arcangeli stable_node_dup = alloc_stable_node(); 19482c653d0eSAndrea Arcangeli if (!stable_node_dup) 19497b6ba2c7SHugh Dickins return NULL; 195031dbd01fSIzik Eidus 19512c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&stable_node_dup->hlist); 19522c653d0eSAndrea Arcangeli stable_node_dup->kpfn = kpfn; 19532c653d0eSAndrea Arcangeli set_page_stable_node(kpage, stable_node_dup); 19542c653d0eSAndrea Arcangeli stable_node_dup->rmap_hlist_len = 0; 19552c653d0eSAndrea Arcangeli DO_NUMA(stable_node_dup->nid = nid); 19562c653d0eSAndrea Arcangeli if (!need_chain) { 19572c653d0eSAndrea Arcangeli rb_link_node(&stable_node_dup->node, parent, new); 19582c653d0eSAndrea Arcangeli rb_insert_color(&stable_node_dup->node, root); 19592c653d0eSAndrea Arcangeli } else { 19602c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 196121fbd591SQi Zheng struct ksm_stable_node *orig = stable_node; 19622c653d0eSAndrea Arcangeli /* chain is missing so create it */ 19632c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(orig, root); 19642c653d0eSAndrea Arcangeli if (!stable_node) { 19652c653d0eSAndrea Arcangeli free_stable_node(stable_node_dup); 19662c653d0eSAndrea Arcangeli return NULL; 19672c653d0eSAndrea Arcangeli } 19682c653d0eSAndrea Arcangeli } 19692c653d0eSAndrea Arcangeli stable_node_chain_add_dup(stable_node_dup, stable_node); 19702c653d0eSAndrea Arcangeli } 197108beca44SHugh Dickins 19722c653d0eSAndrea Arcangeli return stable_node_dup; 197331dbd01fSIzik Eidus } 197431dbd01fSIzik Eidus 197531dbd01fSIzik Eidus /* 19768dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 19778dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 197831dbd01fSIzik Eidus * 197931dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 198031dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 198131dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 198231dbd01fSIzik Eidus * 198331dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 198431dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 198531dbd01fSIzik Eidus * 198631dbd01fSIzik Eidus * This function does both searching and inserting, because they share 198731dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 198831dbd01fSIzik Eidus */ 19898dd3557aSHugh Dickins static 199021fbd591SQi Zheng struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, 19918dd3557aSHugh Dickins struct page *page, 19928dd3557aSHugh Dickins struct page **tree_pagep) 199331dbd01fSIzik Eidus { 199490bd6fd3SPetr Holasek struct rb_node **new; 199590bd6fd3SPetr Holasek struct rb_root *root; 199631dbd01fSIzik Eidus struct rb_node *parent = NULL; 199790bd6fd3SPetr Holasek int nid; 199890bd6fd3SPetr Holasek 199990bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 2000ef53d16cSHugh Dickins root = root_unstable_tree + nid; 200190bd6fd3SPetr Holasek new = &root->rb_node; 200231dbd01fSIzik Eidus 200331dbd01fSIzik Eidus while (*new) { 200421fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item; 20058dd3557aSHugh Dickins struct page *tree_page; 200631dbd01fSIzik Eidus int ret; 200731dbd01fSIzik Eidus 2008d178f27fSHugh Dickins cond_resched(); 200921fbd591SQi Zheng tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); 20108dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 2011c8f95ed1SAndrea Arcangeli if (!tree_page) 201231dbd01fSIzik Eidus return NULL; 201331dbd01fSIzik Eidus 201431dbd01fSIzik Eidus /* 20158dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 201631dbd01fSIzik Eidus */ 20178dd3557aSHugh Dickins if (page == tree_page) { 20188dd3557aSHugh Dickins put_page(tree_page); 201931dbd01fSIzik Eidus return NULL; 202031dbd01fSIzik Eidus } 202131dbd01fSIzik Eidus 20228dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 202331dbd01fSIzik Eidus 202431dbd01fSIzik Eidus parent = *new; 202531dbd01fSIzik Eidus if (ret < 0) { 20268dd3557aSHugh Dickins put_page(tree_page); 202731dbd01fSIzik Eidus new = &parent->rb_left; 202831dbd01fSIzik Eidus } else if (ret > 0) { 20298dd3557aSHugh Dickins put_page(tree_page); 203031dbd01fSIzik Eidus new = &parent->rb_right; 2031b599cbdfSHugh Dickins } else if (!ksm_merge_across_nodes && 2032b599cbdfSHugh Dickins page_to_nid(tree_page) != nid) { 2033b599cbdfSHugh Dickins /* 2034b599cbdfSHugh Dickins * If tree_page has been migrated to another NUMA node, 2035b599cbdfSHugh Dickins * it will be flushed out and put in the right unstable 2036b599cbdfSHugh Dickins * tree next time: only merge with it when across_nodes. 2037b599cbdfSHugh Dickins */ 2038b599cbdfSHugh Dickins put_page(tree_page); 2039b599cbdfSHugh Dickins return NULL; 204031dbd01fSIzik Eidus } else { 20418dd3557aSHugh Dickins *tree_pagep = tree_page; 204231dbd01fSIzik Eidus return tree_rmap_item; 204331dbd01fSIzik Eidus } 204431dbd01fSIzik Eidus } 204531dbd01fSIzik Eidus 20467b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 204731dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 2048e850dcf5SHugh Dickins DO_NUMA(rmap_item->nid = nid); 204931dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 205090bd6fd3SPetr Holasek rb_insert_color(&rmap_item->node, root); 205131dbd01fSIzik Eidus 2052473b0ce4SHugh Dickins ksm_pages_unshared++; 205331dbd01fSIzik Eidus return NULL; 205431dbd01fSIzik Eidus } 205531dbd01fSIzik Eidus 205631dbd01fSIzik Eidus /* 205731dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 205831dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 205931dbd01fSIzik Eidus * the same ksm page. 206031dbd01fSIzik Eidus */ 206121fbd591SQi Zheng static void stable_tree_append(struct ksm_rmap_item *rmap_item, 206221fbd591SQi Zheng struct ksm_stable_node *stable_node, 20632c653d0eSAndrea Arcangeli bool max_page_sharing_bypass) 206431dbd01fSIzik Eidus { 20652c653d0eSAndrea Arcangeli /* 20662c653d0eSAndrea Arcangeli * rmap won't find this mapping if we don't insert the 20672c653d0eSAndrea Arcangeli * rmap_item in the right stable_node 20682c653d0eSAndrea Arcangeli * duplicate. page_migration could break later if rmap breaks, 20692c653d0eSAndrea Arcangeli * so we can as well crash here. We really need to check for 20702c653d0eSAndrea Arcangeli * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 2071457aef94SEthon Paul * for other negative values as an underflow if detected here 20722c653d0eSAndrea Arcangeli * for the first time (and not when decreasing rmap_hlist_len) 20732c653d0eSAndrea Arcangeli * would be sign of memory corruption in the stable_node. 20742c653d0eSAndrea Arcangeli */ 20752c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 20762c653d0eSAndrea Arcangeli 20772c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len++; 20782c653d0eSAndrea Arcangeli if (!max_page_sharing_bypass) 20792c653d0eSAndrea Arcangeli /* possibly non fatal but unexpected overflow, only warn */ 20802c653d0eSAndrea Arcangeli WARN_ON_ONCE(stable_node->rmap_hlist_len > 20812c653d0eSAndrea Arcangeli ksm_max_page_sharing); 20822c653d0eSAndrea Arcangeli 20837b6ba2c7SHugh Dickins rmap_item->head = stable_node; 208431dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 20857b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 2086e178dfdeSHugh Dickins 20877b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 2088e178dfdeSHugh Dickins ksm_pages_sharing++; 20897b6ba2c7SHugh Dickins else 20907b6ba2c7SHugh Dickins ksm_pages_shared++; 209176093853Sxu xin 209276093853Sxu xin rmap_item->mm->ksm_merging_pages++; 209331dbd01fSIzik Eidus } 209431dbd01fSIzik Eidus 209531dbd01fSIzik Eidus /* 209681464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 209781464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 209881464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 209981464e30SHugh Dickins * both transferred to the stable tree. 210031dbd01fSIzik Eidus * 210131dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 210231dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 210331dbd01fSIzik Eidus */ 210421fbd591SQi Zheng static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) 210531dbd01fSIzik Eidus { 21064b22927fSKirill Tkhai struct mm_struct *mm = rmap_item->mm; 210721fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item; 21088dd3557aSHugh Dickins struct page *tree_page = NULL; 210921fbd591SQi Zheng struct ksm_stable_node *stable_node; 21108dd3557aSHugh Dickins struct page *kpage; 211131dbd01fSIzik Eidus unsigned int checksum; 211231dbd01fSIzik Eidus int err; 21132c653d0eSAndrea Arcangeli bool max_page_sharing_bypass = false; 211431dbd01fSIzik Eidus 21154146d2d6SHugh Dickins stable_node = page_stable_node(page); 21164146d2d6SHugh Dickins if (stable_node) { 21174146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 21182c653d0eSAndrea Arcangeli get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 21192c653d0eSAndrea Arcangeli NUMA(stable_node->nid)) { 21202c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 21214146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 21224146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 21234146d2d6SHugh Dickins } 21244146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 21254146d2d6SHugh Dickins rmap_item->head == stable_node) 21264146d2d6SHugh Dickins return; 21272c653d0eSAndrea Arcangeli /* 21282c653d0eSAndrea Arcangeli * If it's a KSM fork, allow it to go over the sharing limit 21292c653d0eSAndrea Arcangeli * without warnings. 21302c653d0eSAndrea Arcangeli */ 21312c653d0eSAndrea Arcangeli if (!is_page_sharing_candidate(stable_node)) 21322c653d0eSAndrea Arcangeli max_page_sharing_bypass = true; 21334146d2d6SHugh Dickins } 213431dbd01fSIzik Eidus 213531dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 213662b61f61SHugh Dickins kpage = stable_tree_search(page); 21374146d2d6SHugh Dickins if (kpage == page && rmap_item->head == stable_node) { 21384146d2d6SHugh Dickins put_page(kpage); 21394146d2d6SHugh Dickins return; 21404146d2d6SHugh Dickins } 21414146d2d6SHugh Dickins 21424146d2d6SHugh Dickins remove_rmap_item_from_tree(rmap_item); 21434146d2d6SHugh Dickins 214462b61f61SHugh Dickins if (kpage) { 21452cee57d1SYang Shi if (PTR_ERR(kpage) == -EBUSY) 21462cee57d1SYang Shi return; 21472cee57d1SYang Shi 214808beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 214931dbd01fSIzik Eidus if (!err) { 215031dbd01fSIzik Eidus /* 215131dbd01fSIzik Eidus * The page was successfully merged: 215231dbd01fSIzik Eidus * add its rmap_item to the stable tree. 215331dbd01fSIzik Eidus */ 21545ad64688SHugh Dickins lock_page(kpage); 21552c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, page_stable_node(kpage), 21562c653d0eSAndrea Arcangeli max_page_sharing_bypass); 21575ad64688SHugh Dickins unlock_page(kpage); 215831dbd01fSIzik Eidus } 21598dd3557aSHugh Dickins put_page(kpage); 216031dbd01fSIzik Eidus return; 216131dbd01fSIzik Eidus } 216231dbd01fSIzik Eidus 216331dbd01fSIzik Eidus /* 21644035c07aSHugh Dickins * If the hash value of the page has changed from the last time 21654035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 21664035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 21674035c07aSHugh Dickins * to waste our time searching for something identical to it there. 216831dbd01fSIzik Eidus */ 216931dbd01fSIzik Eidus checksum = calc_checksum(page); 217031dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 217131dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 217231dbd01fSIzik Eidus return; 217331dbd01fSIzik Eidus } 217431dbd01fSIzik Eidus 2175e86c59b1SClaudio Imbrenda /* 2176e86c59b1SClaudio Imbrenda * Same checksum as an empty page. We attempt to merge it with the 2177e86c59b1SClaudio Imbrenda * appropriate zero page if the user enabled this via sysfs. 2178e86c59b1SClaudio Imbrenda */ 2179e86c59b1SClaudio Imbrenda if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2180e86c59b1SClaudio Imbrenda struct vm_area_struct *vma; 2181e86c59b1SClaudio Imbrenda 2182d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 21834b22927fSKirill Tkhai vma = find_mergeable_vma(mm, rmap_item->address); 218456df70a6SMuchun Song if (vma) { 2185e86c59b1SClaudio Imbrenda err = try_to_merge_one_page(vma, page, 2186e86c59b1SClaudio Imbrenda ZERO_PAGE(rmap_item->address)); 2187739100c8SStefan Roesch trace_ksm_merge_one_page( 2188739100c8SStefan Roesch page_to_pfn(ZERO_PAGE(rmap_item->address)), 2189739100c8SStefan Roesch rmap_item, mm, err); 219056df70a6SMuchun Song } else { 219156df70a6SMuchun Song /* 219256df70a6SMuchun Song * If the vma is out of date, we do not need to 219356df70a6SMuchun Song * continue. 219456df70a6SMuchun Song */ 219556df70a6SMuchun Song err = 0; 219656df70a6SMuchun Song } 2197d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2198e86c59b1SClaudio Imbrenda /* 2199e86c59b1SClaudio Imbrenda * In case of failure, the page was not really empty, so we 2200e86c59b1SClaudio Imbrenda * need to continue. Otherwise we're done. 2201e86c59b1SClaudio Imbrenda */ 2202e86c59b1SClaudio Imbrenda if (!err) 2203e86c59b1SClaudio Imbrenda return; 2204e86c59b1SClaudio Imbrenda } 22058dd3557aSHugh Dickins tree_rmap_item = 22068dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 220731dbd01fSIzik Eidus if (tree_rmap_item) { 220877da2ba0SClaudio Imbrenda bool split; 220977da2ba0SClaudio Imbrenda 22108dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 22118dd3557aSHugh Dickins tree_rmap_item, tree_page); 221277da2ba0SClaudio Imbrenda /* 221377da2ba0SClaudio Imbrenda * If both pages we tried to merge belong to the same compound 221477da2ba0SClaudio Imbrenda * page, then we actually ended up increasing the reference 221577da2ba0SClaudio Imbrenda * count of the same compound page twice, and split_huge_page 221677da2ba0SClaudio Imbrenda * failed. 221777da2ba0SClaudio Imbrenda * Here we set a flag if that happened, and we use it later to 221877da2ba0SClaudio Imbrenda * try split_huge_page again. Since we call put_page right 221977da2ba0SClaudio Imbrenda * afterwards, the reference count will be correct and 222077da2ba0SClaudio Imbrenda * split_huge_page should succeed. 222177da2ba0SClaudio Imbrenda */ 222277da2ba0SClaudio Imbrenda split = PageTransCompound(page) 222377da2ba0SClaudio Imbrenda && compound_head(page) == compound_head(tree_page); 22248dd3557aSHugh Dickins put_page(tree_page); 22258dd3557aSHugh Dickins if (kpage) { 2226bc56620bSHugh Dickins /* 2227bc56620bSHugh Dickins * The pages were successfully merged: insert new 2228bc56620bSHugh Dickins * node in the stable tree and add both rmap_items. 2229bc56620bSHugh Dickins */ 22305ad64688SHugh Dickins lock_page(kpage); 22317b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 22327b6ba2c7SHugh Dickins if (stable_node) { 22332c653d0eSAndrea Arcangeli stable_tree_append(tree_rmap_item, stable_node, 22342c653d0eSAndrea Arcangeli false); 22352c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, stable_node, 22362c653d0eSAndrea Arcangeli false); 22377b6ba2c7SHugh Dickins } 22385ad64688SHugh Dickins unlock_page(kpage); 22397b6ba2c7SHugh Dickins 224031dbd01fSIzik Eidus /* 224131dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 224231dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 224331dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 224431dbd01fSIzik Eidus * in which case we need to break_cow on both. 224531dbd01fSIzik Eidus */ 22467b6ba2c7SHugh Dickins if (!stable_node) { 22478dd3557aSHugh Dickins break_cow(tree_rmap_item); 22488dd3557aSHugh Dickins break_cow(rmap_item); 224931dbd01fSIzik Eidus } 225077da2ba0SClaudio Imbrenda } else if (split) { 225177da2ba0SClaudio Imbrenda /* 225277da2ba0SClaudio Imbrenda * We are here if we tried to merge two pages and 225377da2ba0SClaudio Imbrenda * failed because they both belonged to the same 225477da2ba0SClaudio Imbrenda * compound page. We will split the page now, but no 225577da2ba0SClaudio Imbrenda * merging will take place. 225677da2ba0SClaudio Imbrenda * We do not want to add the cost of a full lock; if 225777da2ba0SClaudio Imbrenda * the page is locked, it is better to skip it and 225877da2ba0SClaudio Imbrenda * perhaps try again later. 225977da2ba0SClaudio Imbrenda */ 226077da2ba0SClaudio Imbrenda if (!trylock_page(page)) 226177da2ba0SClaudio Imbrenda return; 226277da2ba0SClaudio Imbrenda split_huge_page(page); 226377da2ba0SClaudio Imbrenda unlock_page(page); 226431dbd01fSIzik Eidus } 226531dbd01fSIzik Eidus } 226631dbd01fSIzik Eidus } 226731dbd01fSIzik Eidus 226821fbd591SQi Zheng static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, 226921fbd591SQi Zheng struct ksm_rmap_item **rmap_list, 227031dbd01fSIzik Eidus unsigned long addr) 227131dbd01fSIzik Eidus { 227221fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 227331dbd01fSIzik Eidus 22746514d511SHugh Dickins while (*rmap_list) { 22756514d511SHugh Dickins rmap_item = *rmap_list; 227693d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 227731dbd01fSIzik Eidus return rmap_item; 227831dbd01fSIzik Eidus if (rmap_item->address > addr) 227931dbd01fSIzik Eidus break; 22806514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 228131dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 228231dbd01fSIzik Eidus free_rmap_item(rmap_item); 228331dbd01fSIzik Eidus } 228431dbd01fSIzik Eidus 228531dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 228631dbd01fSIzik Eidus if (rmap_item) { 228731dbd01fSIzik Eidus /* It has already been zeroed */ 228858730ab6SQi Zheng rmap_item->mm = mm_slot->slot.mm; 2289cb4df4caSxu xin rmap_item->mm->ksm_rmap_items++; 229031dbd01fSIzik Eidus rmap_item->address = addr; 22916514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 22926514d511SHugh Dickins *rmap_list = rmap_item; 229331dbd01fSIzik Eidus } 229431dbd01fSIzik Eidus return rmap_item; 229531dbd01fSIzik Eidus } 229631dbd01fSIzik Eidus 229721fbd591SQi Zheng static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) 229831dbd01fSIzik Eidus { 229931dbd01fSIzik Eidus struct mm_struct *mm; 230058730ab6SQi Zheng struct ksm_mm_slot *mm_slot; 230158730ab6SQi Zheng struct mm_slot *slot; 230231dbd01fSIzik Eidus struct vm_area_struct *vma; 230321fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 2304a5f18ba0SMatthew Wilcox (Oracle) struct vma_iterator vmi; 230590bd6fd3SPetr Holasek int nid; 230631dbd01fSIzik Eidus 230758730ab6SQi Zheng if (list_empty(&ksm_mm_head.slot.mm_node)) 230831dbd01fSIzik Eidus return NULL; 230931dbd01fSIzik Eidus 231058730ab6SQi Zheng mm_slot = ksm_scan.mm_slot; 231158730ab6SQi Zheng if (mm_slot == &ksm_mm_head) { 2312739100c8SStefan Roesch trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); 2313739100c8SStefan Roesch 23142919bfd0SHugh Dickins /* 23151fec6890SMatthew Wilcox (Oracle) * A number of pages can hang around indefinitely in per-cpu 23161fec6890SMatthew Wilcox (Oracle) * LRU cache, raised page count preventing write_protect_page 23172919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 23182919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 23192919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 23202919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 23212919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 23222919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 23232919bfd0SHugh Dickins */ 23242919bfd0SHugh Dickins lru_add_drain_all(); 23252919bfd0SHugh Dickins 23264146d2d6SHugh Dickins /* 23274146d2d6SHugh Dickins * Whereas stale stable_nodes on the stable_tree itself 23284146d2d6SHugh Dickins * get pruned in the regular course of stable_tree_search(), 23294146d2d6SHugh Dickins * those moved out to the migrate_nodes list can accumulate: 23304146d2d6SHugh Dickins * so prune them once before each full scan. 23314146d2d6SHugh Dickins */ 23324146d2d6SHugh Dickins if (!ksm_merge_across_nodes) { 233321fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 23344146d2d6SHugh Dickins struct page *page; 23354146d2d6SHugh Dickins 233603640418SGeliang Tang list_for_each_entry_safe(stable_node, next, 233703640418SGeliang Tang &migrate_nodes, list) { 23382cee57d1SYang Shi page = get_ksm_page(stable_node, 23392cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 23404146d2d6SHugh Dickins if (page) 23414146d2d6SHugh Dickins put_page(page); 23424146d2d6SHugh Dickins cond_resched(); 23434146d2d6SHugh Dickins } 23444146d2d6SHugh Dickins } 23454146d2d6SHugh Dickins 2346ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) 234790bd6fd3SPetr Holasek root_unstable_tree[nid] = RB_ROOT; 234831dbd01fSIzik Eidus 234931dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 235058730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 235158730ab6SQi Zheng struct mm_slot, mm_node); 235258730ab6SQi Zheng mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 235358730ab6SQi Zheng ksm_scan.mm_slot = mm_slot; 235431dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 23552b472611SHugh Dickins /* 23562b472611SHugh Dickins * Although we tested list_empty() above, a racing __ksm_exit 23572b472611SHugh Dickins * of the last mm on the list may have removed it since then. 23582b472611SHugh Dickins */ 235958730ab6SQi Zheng if (mm_slot == &ksm_mm_head) 23602b472611SHugh Dickins return NULL; 236131dbd01fSIzik Eidus next_mm: 236231dbd01fSIzik Eidus ksm_scan.address = 0; 236358730ab6SQi Zheng ksm_scan.rmap_list = &mm_slot->rmap_list; 236431dbd01fSIzik Eidus } 236531dbd01fSIzik Eidus 236658730ab6SQi Zheng slot = &mm_slot->slot; 236731dbd01fSIzik Eidus mm = slot->mm; 2368a5f18ba0SMatthew Wilcox (Oracle) vma_iter_init(&vmi, mm, ksm_scan.address); 2369a5f18ba0SMatthew Wilcox (Oracle) 2370d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 23719ba69294SHugh Dickins if (ksm_test_exit(mm)) 2372a5f18ba0SMatthew Wilcox (Oracle) goto no_vmas; 23739ba69294SHugh Dickins 2374a5f18ba0SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) { 237531dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 237631dbd01fSIzik Eidus continue; 237731dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 237831dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 237931dbd01fSIzik Eidus if (!vma->anon_vma) 238031dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 238131dbd01fSIzik Eidus 238231dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 23839ba69294SHugh Dickins if (ksm_test_exit(mm)) 23849ba69294SHugh Dickins break; 238531dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 2386f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(*page)) { 238721ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 238821ae5b01SAndrea Arcangeli cond_resched(); 238921ae5b01SAndrea Arcangeli continue; 239021ae5b01SAndrea Arcangeli } 2391f7091ed6SHaiyue Wang if (is_zone_device_page(*page)) 2392f7091ed6SHaiyue Wang goto next_page; 2393f765f540SKirill A. Shutemov if (PageAnon(*page)) { 239431dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 239531dbd01fSIzik Eidus flush_dcache_page(*page); 239658730ab6SQi Zheng rmap_item = get_next_rmap_item(mm_slot, 23976514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 239831dbd01fSIzik Eidus if (rmap_item) { 23996514d511SHugh Dickins ksm_scan.rmap_list = 24006514d511SHugh Dickins &rmap_item->rmap_list; 240131dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 240231dbd01fSIzik Eidus } else 240331dbd01fSIzik Eidus put_page(*page); 2404d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 240531dbd01fSIzik Eidus return rmap_item; 240631dbd01fSIzik Eidus } 2407f7091ed6SHaiyue Wang next_page: 240831dbd01fSIzik Eidus put_page(*page); 240931dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 241031dbd01fSIzik Eidus cond_resched(); 241131dbd01fSIzik Eidus } 241231dbd01fSIzik Eidus } 241331dbd01fSIzik Eidus 24149ba69294SHugh Dickins if (ksm_test_exit(mm)) { 2415a5f18ba0SMatthew Wilcox (Oracle) no_vmas: 24169ba69294SHugh Dickins ksm_scan.address = 0; 241758730ab6SQi Zheng ksm_scan.rmap_list = &mm_slot->rmap_list; 24189ba69294SHugh Dickins } 241931dbd01fSIzik Eidus /* 242031dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 242131dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 242231dbd01fSIzik Eidus */ 2423420be4edSChengyang Fan remove_trailing_rmap_items(ksm_scan.rmap_list); 242431dbd01fSIzik Eidus 242531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 242658730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 242758730ab6SQi Zheng struct mm_slot, mm_node); 242858730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2429cd551f97SHugh Dickins if (ksm_scan.address == 0) { 2430cd551f97SHugh Dickins /* 2431c1e8d7c6SMichel Lespinasse * We've completed a full scan of all vmas, holding mmap_lock 2432cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 2433cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 24349ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 24359ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 24369ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 2437c1e8d7c6SMichel Lespinasse * mmap_lock then protects against race with MADV_MERGEABLE). 2438cd551f97SHugh Dickins */ 243958730ab6SQi Zheng hash_del(&mm_slot->slot.hash); 244058730ab6SQi Zheng list_del(&mm_slot->slot.mm_node); 24419ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 24429ba69294SHugh Dickins 244358730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 2444cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2445d7597f59SStefan Roesch clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2446d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 24479ba69294SHugh Dickins mmdrop(mm); 24489ba69294SHugh Dickins } else { 2449d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 24507496fea9SZhou Chengming /* 24513e4e28c5SMichel Lespinasse * mmap_read_unlock(mm) first because after 24527496fea9SZhou Chengming * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 24537496fea9SZhou Chengming * already have been freed under us by __ksm_exit() 24547496fea9SZhou Chengming * because the "mm_slot" is still hashed and 24557496fea9SZhou Chengming * ksm_scan.mm_slot doesn't point to it anymore. 24567496fea9SZhou Chengming */ 24577496fea9SZhou Chengming spin_unlock(&ksm_mmlist_lock); 24589ba69294SHugh Dickins } 245931dbd01fSIzik Eidus 246031dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 246158730ab6SQi Zheng mm_slot = ksm_scan.mm_slot; 246258730ab6SQi Zheng if (mm_slot != &ksm_mm_head) 246331dbd01fSIzik Eidus goto next_mm; 246431dbd01fSIzik Eidus 2465739100c8SStefan Roesch trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); 246631dbd01fSIzik Eidus ksm_scan.seqnr++; 246731dbd01fSIzik Eidus return NULL; 246831dbd01fSIzik Eidus } 246931dbd01fSIzik Eidus 247031dbd01fSIzik Eidus /** 247131dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 2472b7701a5fSMike Rapoport * @scan_npages: number of pages we want to scan before we return. 247331dbd01fSIzik Eidus */ 247431dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 247531dbd01fSIzik Eidus { 247621fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 24773f649ab7SKees Cook struct page *page; 247831dbd01fSIzik Eidus 2479878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 248031dbd01fSIzik Eidus cond_resched(); 248131dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 248231dbd01fSIzik Eidus if (!rmap_item) 248331dbd01fSIzik Eidus return; 248431dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 248531dbd01fSIzik Eidus put_page(page); 248631dbd01fSIzik Eidus } 248731dbd01fSIzik Eidus } 248831dbd01fSIzik Eidus 24896e158384SHugh Dickins static int ksmd_should_run(void) 24906e158384SHugh Dickins { 249158730ab6SQi Zheng return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); 24926e158384SHugh Dickins } 24936e158384SHugh Dickins 249431dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 249531dbd01fSIzik Eidus { 2496fcf9a0efSKirill Tkhai unsigned int sleep_ms; 2497fcf9a0efSKirill Tkhai 2498878aee7dSAndrea Arcangeli set_freezable(); 2499339aa624SIzik Eidus set_user_nice(current, 5); 250031dbd01fSIzik Eidus 250131dbd01fSIzik Eidus while (!kthread_should_stop()) { 250231dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2503ef4d43a8SHugh Dickins wait_while_offlining(); 25046e158384SHugh Dickins if (ksmd_should_run()) 250531dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 250631dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 25076e158384SHugh Dickins 2508878aee7dSAndrea Arcangeli try_to_freeze(); 2509878aee7dSAndrea Arcangeli 25106e158384SHugh Dickins if (ksmd_should_run()) { 2511fcf9a0efSKirill Tkhai sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); 2512fcf9a0efSKirill Tkhai wait_event_interruptible_timeout(ksm_iter_wait, 2513fcf9a0efSKirill Tkhai sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), 2514fcf9a0efSKirill Tkhai msecs_to_jiffies(sleep_ms)); 251531dbd01fSIzik Eidus } else { 2516878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 25176e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 251831dbd01fSIzik Eidus } 251931dbd01fSIzik Eidus } 252031dbd01fSIzik Eidus return 0; 252131dbd01fSIzik Eidus } 252231dbd01fSIzik Eidus 2523d7597f59SStefan Roesch static void __ksm_add_vma(struct vm_area_struct *vma) 2524d7597f59SStefan Roesch { 2525d7597f59SStefan Roesch unsigned long vm_flags = vma->vm_flags; 2526d7597f59SStefan Roesch 2527d7597f59SStefan Roesch if (vm_flags & VM_MERGEABLE) 2528d7597f59SStefan Roesch return; 2529d7597f59SStefan Roesch 2530d7597f59SStefan Roesch if (vma_ksm_compatible(vma)) 2531d7597f59SStefan Roesch vm_flags_set(vma, VM_MERGEABLE); 2532d7597f59SStefan Roesch } 2533d7597f59SStefan Roesch 253424139c07SDavid Hildenbrand static int __ksm_del_vma(struct vm_area_struct *vma) 253524139c07SDavid Hildenbrand { 253624139c07SDavid Hildenbrand int err; 253724139c07SDavid Hildenbrand 253824139c07SDavid Hildenbrand if (!(vma->vm_flags & VM_MERGEABLE)) 253924139c07SDavid Hildenbrand return 0; 254024139c07SDavid Hildenbrand 254124139c07SDavid Hildenbrand if (vma->anon_vma) { 254224139c07SDavid Hildenbrand err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); 254324139c07SDavid Hildenbrand if (err) 254424139c07SDavid Hildenbrand return err; 254524139c07SDavid Hildenbrand } 254624139c07SDavid Hildenbrand 254724139c07SDavid Hildenbrand vm_flags_clear(vma, VM_MERGEABLE); 254824139c07SDavid Hildenbrand return 0; 254924139c07SDavid Hildenbrand } 2550d7597f59SStefan Roesch /** 2551d7597f59SStefan Roesch * ksm_add_vma - Mark vma as mergeable if compatible 2552d7597f59SStefan Roesch * 2553d7597f59SStefan Roesch * @vma: Pointer to vma 2554d7597f59SStefan Roesch */ 2555d7597f59SStefan Roesch void ksm_add_vma(struct vm_area_struct *vma) 2556d7597f59SStefan Roesch { 2557d7597f59SStefan Roesch struct mm_struct *mm = vma->vm_mm; 2558d7597f59SStefan Roesch 2559d7597f59SStefan Roesch if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2560d7597f59SStefan Roesch __ksm_add_vma(vma); 2561d7597f59SStefan Roesch } 2562d7597f59SStefan Roesch 2563d7597f59SStefan Roesch static void ksm_add_vmas(struct mm_struct *mm) 2564d7597f59SStefan Roesch { 2565d7597f59SStefan Roesch struct vm_area_struct *vma; 2566d7597f59SStefan Roesch 2567d7597f59SStefan Roesch VMA_ITERATOR(vmi, mm, 0); 2568d7597f59SStefan Roesch for_each_vma(vmi, vma) 2569d7597f59SStefan Roesch __ksm_add_vma(vma); 2570d7597f59SStefan Roesch } 2571d7597f59SStefan Roesch 257224139c07SDavid Hildenbrand static int ksm_del_vmas(struct mm_struct *mm) 257324139c07SDavid Hildenbrand { 257424139c07SDavid Hildenbrand struct vm_area_struct *vma; 257524139c07SDavid Hildenbrand int err; 257624139c07SDavid Hildenbrand 257724139c07SDavid Hildenbrand VMA_ITERATOR(vmi, mm, 0); 257824139c07SDavid Hildenbrand for_each_vma(vmi, vma) { 257924139c07SDavid Hildenbrand err = __ksm_del_vma(vma); 258024139c07SDavid Hildenbrand if (err) 258124139c07SDavid Hildenbrand return err; 258224139c07SDavid Hildenbrand } 258324139c07SDavid Hildenbrand return 0; 258424139c07SDavid Hildenbrand } 258524139c07SDavid Hildenbrand 2586d7597f59SStefan Roesch /** 2587d7597f59SStefan Roesch * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all 2588d7597f59SStefan Roesch * compatible VMA's 2589d7597f59SStefan Roesch * 2590d7597f59SStefan Roesch * @mm: Pointer to mm 2591d7597f59SStefan Roesch * 2592d7597f59SStefan Roesch * Returns 0 on success, otherwise error code 2593d7597f59SStefan Roesch */ 2594d7597f59SStefan Roesch int ksm_enable_merge_any(struct mm_struct *mm) 2595d7597f59SStefan Roesch { 2596d7597f59SStefan Roesch int err; 2597d7597f59SStefan Roesch 2598d7597f59SStefan Roesch if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2599d7597f59SStefan Roesch return 0; 2600d7597f59SStefan Roesch 2601d7597f59SStefan Roesch if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2602d7597f59SStefan Roesch err = __ksm_enter(mm); 2603d7597f59SStefan Roesch if (err) 2604d7597f59SStefan Roesch return err; 2605d7597f59SStefan Roesch } 2606d7597f59SStefan Roesch 2607d7597f59SStefan Roesch set_bit(MMF_VM_MERGE_ANY, &mm->flags); 2608d7597f59SStefan Roesch ksm_add_vmas(mm); 2609d7597f59SStefan Roesch 2610d7597f59SStefan Roesch return 0; 2611d7597f59SStefan Roesch } 2612d7597f59SStefan Roesch 261324139c07SDavid Hildenbrand /** 261424139c07SDavid Hildenbrand * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, 261524139c07SDavid Hildenbrand * previously enabled via ksm_enable_merge_any(). 261624139c07SDavid Hildenbrand * 261724139c07SDavid Hildenbrand * Disabling merging implies unmerging any merged pages, like setting 261824139c07SDavid Hildenbrand * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and 261924139c07SDavid Hildenbrand * merging on all compatible VMA's remains enabled. 262024139c07SDavid Hildenbrand * 262124139c07SDavid Hildenbrand * @mm: Pointer to mm 262224139c07SDavid Hildenbrand * 262324139c07SDavid Hildenbrand * Returns 0 on success, otherwise error code 262424139c07SDavid Hildenbrand */ 262524139c07SDavid Hildenbrand int ksm_disable_merge_any(struct mm_struct *mm) 262624139c07SDavid Hildenbrand { 262724139c07SDavid Hildenbrand int err; 262824139c07SDavid Hildenbrand 262924139c07SDavid Hildenbrand if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 263024139c07SDavid Hildenbrand return 0; 263124139c07SDavid Hildenbrand 263224139c07SDavid Hildenbrand err = ksm_del_vmas(mm); 263324139c07SDavid Hildenbrand if (err) { 263424139c07SDavid Hildenbrand ksm_add_vmas(mm); 263524139c07SDavid Hildenbrand return err; 263624139c07SDavid Hildenbrand } 263724139c07SDavid Hildenbrand 263824139c07SDavid Hildenbrand clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 263924139c07SDavid Hildenbrand return 0; 264024139c07SDavid Hildenbrand } 264124139c07SDavid Hildenbrand 26422c281f54SDavid Hildenbrand int ksm_disable(struct mm_struct *mm) 26432c281f54SDavid Hildenbrand { 26442c281f54SDavid Hildenbrand mmap_assert_write_locked(mm); 26452c281f54SDavid Hildenbrand 26462c281f54SDavid Hildenbrand if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) 26472c281f54SDavid Hildenbrand return 0; 26482c281f54SDavid Hildenbrand if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 26492c281f54SDavid Hildenbrand return ksm_disable_merge_any(mm); 26502c281f54SDavid Hildenbrand return ksm_del_vmas(mm); 26512c281f54SDavid Hildenbrand } 26522c281f54SDavid Hildenbrand 2653f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2654f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 2655f8af4da3SHugh Dickins { 2656f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 2657d952b791SHugh Dickins int err; 2658f8af4da3SHugh Dickins 2659f8af4da3SHugh Dickins switch (advice) { 2660f8af4da3SHugh Dickins case MADV_MERGEABLE: 2661d7597f59SStefan Roesch if (vma->vm_flags & VM_MERGEABLE) 2662e1fb4a08SDave Jiang return 0; 2663d7597f59SStefan Roesch if (!vma_ksm_compatible(vma)) 266412564485SShawn Anastasio return 0; 2665cc2383ecSKonstantin Khlebnikov 2666d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2667d952b791SHugh Dickins err = __ksm_enter(mm); 2668d952b791SHugh Dickins if (err) 2669d952b791SHugh Dickins return err; 2670d952b791SHugh Dickins } 2671f8af4da3SHugh Dickins 2672f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 2673f8af4da3SHugh Dickins break; 2674f8af4da3SHugh Dickins 2675f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 2676f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 2677f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2678f8af4da3SHugh Dickins 2679d952b791SHugh Dickins if (vma->anon_vma) { 2680d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 2681d952b791SHugh Dickins if (err) 2682d952b791SHugh Dickins return err; 2683d952b791SHugh Dickins } 2684f8af4da3SHugh Dickins 2685f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 2686f8af4da3SHugh Dickins break; 2687f8af4da3SHugh Dickins } 2688f8af4da3SHugh Dickins 2689f8af4da3SHugh Dickins return 0; 2690f8af4da3SHugh Dickins } 269133cf1707SBharata B Rao EXPORT_SYMBOL_GPL(ksm_madvise); 2692f8af4da3SHugh Dickins 2693f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 2694f8af4da3SHugh Dickins { 269521fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 269658730ab6SQi Zheng struct mm_slot *slot; 26976e158384SHugh Dickins int needs_wakeup; 26986e158384SHugh Dickins 269958730ab6SQi Zheng mm_slot = mm_slot_alloc(mm_slot_cache); 270031dbd01fSIzik Eidus if (!mm_slot) 270131dbd01fSIzik Eidus return -ENOMEM; 270231dbd01fSIzik Eidus 270358730ab6SQi Zheng slot = &mm_slot->slot; 270458730ab6SQi Zheng 27056e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 270658730ab6SQi Zheng needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); 27076e158384SHugh Dickins 270831dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 270958730ab6SQi Zheng mm_slot_insert(mm_slots_hash, mm, slot); 271031dbd01fSIzik Eidus /* 2711cbf86cfeSHugh Dickins * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2712cbf86cfeSHugh Dickins * insert just behind the scanning cursor, to let the area settle 271331dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 271431dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 2715cbf86cfeSHugh Dickins * 2716cbf86cfeSHugh Dickins * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2717cbf86cfeSHugh Dickins * scanning cursor, otherwise KSM pages in newly forked mms will be 2718cbf86cfeSHugh Dickins * missed: then we might as well insert at the end of the list. 271931dbd01fSIzik Eidus */ 2720cbf86cfeSHugh Dickins if (ksm_run & KSM_RUN_UNMERGE) 272158730ab6SQi Zheng list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); 2722cbf86cfeSHugh Dickins else 272358730ab6SQi Zheng list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); 272431dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 272531dbd01fSIzik Eidus 2726f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 2727f1f10076SVegard Nossum mmgrab(mm); 27286e158384SHugh Dickins 27296e158384SHugh Dickins if (needs_wakeup) 27306e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 27316e158384SHugh Dickins 2732739100c8SStefan Roesch trace_ksm_enter(mm); 2733f8af4da3SHugh Dickins return 0; 2734f8af4da3SHugh Dickins } 2735f8af4da3SHugh Dickins 27361c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 2737f8af4da3SHugh Dickins { 273821fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 273958730ab6SQi Zheng struct mm_slot *slot; 27409ba69294SHugh Dickins int easy_to_free = 0; 2741cd551f97SHugh Dickins 274231dbd01fSIzik Eidus /* 27439ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 27449ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 27459ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 2746c1e8d7c6SMichel Lespinasse * mmap_lock to synchronize with any break_cows before pagetables 27479ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 27489ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 274931dbd01fSIzik Eidus */ 27509ba69294SHugh Dickins 2751cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 275258730ab6SQi Zheng slot = mm_slot_lookup(mm_slots_hash, mm); 275358730ab6SQi Zheng mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 27549ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 27556514d511SHugh Dickins if (!mm_slot->rmap_list) { 275658730ab6SQi Zheng hash_del(&slot->hash); 275758730ab6SQi Zheng list_del(&slot->mm_node); 27589ba69294SHugh Dickins easy_to_free = 1; 27599ba69294SHugh Dickins } else { 276058730ab6SQi Zheng list_move(&slot->mm_node, 276158730ab6SQi Zheng &ksm_scan.mm_slot->slot.mm_node); 27629ba69294SHugh Dickins } 27639ba69294SHugh Dickins } 2764cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 2765cd551f97SHugh Dickins 27669ba69294SHugh Dickins if (easy_to_free) { 276758730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 2768d7597f59SStefan Roesch clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2769cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 27709ba69294SHugh Dickins mmdrop(mm); 27719ba69294SHugh Dickins } else if (mm_slot) { 2772d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 2773d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 27749ba69294SHugh Dickins } 2775739100c8SStefan Roesch 2776739100c8SStefan Roesch trace_ksm_exit(mm); 2777f8af4da3SHugh Dickins } 277831dbd01fSIzik Eidus 2779cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 27805ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 27815ad64688SHugh Dickins { 2782e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2783e05b3453SMatthew Wilcox (Oracle) struct anon_vma *anon_vma = folio_anon_vma(folio); 27845ad64688SHugh Dickins struct page *new_page; 27855ad64688SHugh Dickins 2786cbf86cfeSHugh Dickins if (PageKsm(page)) { 2787cbf86cfeSHugh Dickins if (page_stable_node(page) && 2788cbf86cfeSHugh Dickins !(ksm_run & KSM_RUN_UNMERGE)) 2789cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2790cbf86cfeSHugh Dickins } else if (!anon_vma) { 2791cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2792e1c63e11SNanyong Sun } else if (page->index == linear_page_index(vma, address) && 2793e1c63e11SNanyong Sun anon_vma->root == vma->anon_vma->root) { 2794cbf86cfeSHugh Dickins return page; /* still no need to copy it */ 2795cbf86cfeSHugh Dickins } 2796cbf86cfeSHugh Dickins if (!PageUptodate(page)) 2797cbf86cfeSHugh Dickins return page; /* let do_swap_page report the error */ 2798cbf86cfeSHugh Dickins 27995ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 28008f425e4eSMatthew Wilcox (Oracle) if (new_page && 28018f425e4eSMatthew Wilcox (Oracle) mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { 280262fdb163SHugh Dickins put_page(new_page); 280362fdb163SHugh Dickins new_page = NULL; 280462fdb163SHugh Dickins } 28055ad64688SHugh Dickins if (new_page) { 28066b970599SKefeng Wang if (copy_mc_user_highpage(new_page, page, address, vma)) { 28076b970599SKefeng Wang put_page(new_page); 28086b970599SKefeng Wang memory_failure_queue(page_to_pfn(page), 0); 28096b970599SKefeng Wang return ERR_PTR(-EHWPOISON); 28106b970599SKefeng Wang } 28115ad64688SHugh Dickins SetPageDirty(new_page); 28125ad64688SHugh Dickins __SetPageUptodate(new_page); 281348c935adSKirill A. Shutemov __SetPageLocked(new_page); 28144d45c3afSYang Yang #ifdef CONFIG_SWAP 28154d45c3afSYang Yang count_vm_event(KSM_SWPIN_COPY); 28164d45c3afSYang Yang #endif 28175ad64688SHugh Dickins } 28185ad64688SHugh Dickins 28195ad64688SHugh Dickins return new_page; 28205ad64688SHugh Dickins } 28215ad64688SHugh Dickins 28226d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) 2823e9995ef9SHugh Dickins { 282421fbd591SQi Zheng struct ksm_stable_node *stable_node; 282521fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 2826e9995ef9SHugh Dickins int search_new_forks = 0; 2827e9995ef9SHugh Dickins 28282f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); 28299f32624bSJoonsoo Kim 28309f32624bSJoonsoo Kim /* 28319f32624bSJoonsoo Kim * Rely on the page lock to protect against concurrent modifications 28329f32624bSJoonsoo Kim * to that page's node of the stable tree. 28339f32624bSJoonsoo Kim */ 28342f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2835e9995ef9SHugh Dickins 28362f031c6fSMatthew Wilcox (Oracle) stable_node = folio_stable_node(folio); 2837e9995ef9SHugh Dickins if (!stable_node) 28381df631aeSMinchan Kim return; 2839e9995ef9SHugh Dickins again: 2840b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2841e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 28425beb4930SRik van Riel struct anon_vma_chain *vmac; 2843e9995ef9SHugh Dickins struct vm_area_struct *vma; 2844e9995ef9SHugh Dickins 2845ad12695fSAndrea Arcangeli cond_resched(); 28466d4675e6SMinchan Kim if (!anon_vma_trylock_read(anon_vma)) { 28476d4675e6SMinchan Kim if (rwc->try_lock) { 28486d4675e6SMinchan Kim rwc->contended = true; 28496d4675e6SMinchan Kim return; 28506d4675e6SMinchan Kim } 2851b6b19f25SHugh Dickins anon_vma_lock_read(anon_vma); 28526d4675e6SMinchan Kim } 2853bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2854bf181b9fSMichel Lespinasse 0, ULONG_MAX) { 28551105a2fcSJia He unsigned long addr; 28561105a2fcSJia He 2857ad12695fSAndrea Arcangeli cond_resched(); 28585beb4930SRik van Riel vma = vmac->vma; 28591105a2fcSJia He 28601105a2fcSJia He /* Ignore the stable/unstable/sqnr flags */ 2861cd7fae26SMiaohe Lin addr = rmap_item->address & PAGE_MASK; 28621105a2fcSJia He 28631105a2fcSJia He if (addr < vma->vm_start || addr >= vma->vm_end) 2864e9995ef9SHugh Dickins continue; 2865e9995ef9SHugh Dickins /* 2866e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 2867e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 2868e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 2869e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 2870e9995ef9SHugh Dickins */ 2871e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2872e9995ef9SHugh Dickins continue; 2873e9995ef9SHugh Dickins 28740dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 28750dd1c7bbSJoonsoo Kim continue; 28760dd1c7bbSJoonsoo Kim 28772f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { 2878b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 28791df631aeSMinchan Kim return; 2880e9995ef9SHugh Dickins } 28812f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) { 28820dd1c7bbSJoonsoo Kim anon_vma_unlock_read(anon_vma); 28831df631aeSMinchan Kim return; 28840dd1c7bbSJoonsoo Kim } 2885e9995ef9SHugh Dickins } 2886b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 2887e9995ef9SHugh Dickins } 2888e9995ef9SHugh Dickins if (!search_new_forks++) 2889e9995ef9SHugh Dickins goto again; 2890e9995ef9SHugh Dickins } 2891e9995ef9SHugh Dickins 28924248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE 28934248d008SLonglong Xia /* 28944248d008SLonglong Xia * Collect processes when the error hit an ksm page. 28954248d008SLonglong Xia */ 28964248d008SLonglong Xia void collect_procs_ksm(struct page *page, struct list_head *to_kill, 28974248d008SLonglong Xia int force_early) 28984248d008SLonglong Xia { 28994248d008SLonglong Xia struct ksm_stable_node *stable_node; 29004248d008SLonglong Xia struct ksm_rmap_item *rmap_item; 29014248d008SLonglong Xia struct folio *folio = page_folio(page); 29024248d008SLonglong Xia struct vm_area_struct *vma; 29034248d008SLonglong Xia struct task_struct *tsk; 29044248d008SLonglong Xia 29054248d008SLonglong Xia stable_node = folio_stable_node(folio); 29064248d008SLonglong Xia if (!stable_node) 29074248d008SLonglong Xia return; 29084248d008SLonglong Xia hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 29094248d008SLonglong Xia struct anon_vma *av = rmap_item->anon_vma; 29104248d008SLonglong Xia 29114248d008SLonglong Xia anon_vma_lock_read(av); 29124248d008SLonglong Xia read_lock(&tasklist_lock); 29134248d008SLonglong Xia for_each_process(tsk) { 29144248d008SLonglong Xia struct anon_vma_chain *vmac; 29154248d008SLonglong Xia unsigned long addr; 29164248d008SLonglong Xia struct task_struct *t = 29174248d008SLonglong Xia task_early_kill(tsk, force_early); 29184248d008SLonglong Xia if (!t) 29194248d008SLonglong Xia continue; 29204248d008SLonglong Xia anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, 29214248d008SLonglong Xia ULONG_MAX) 29224248d008SLonglong Xia { 29234248d008SLonglong Xia vma = vmac->vma; 29244248d008SLonglong Xia if (vma->vm_mm == t->mm) { 29254248d008SLonglong Xia addr = rmap_item->address & PAGE_MASK; 29264248d008SLonglong Xia add_to_kill_ksm(t, page, vma, to_kill, 29274248d008SLonglong Xia addr); 29284248d008SLonglong Xia } 29294248d008SLonglong Xia } 29304248d008SLonglong Xia } 29314248d008SLonglong Xia read_unlock(&tasklist_lock); 29324248d008SLonglong Xia anon_vma_unlock_read(av); 29334248d008SLonglong Xia } 29344248d008SLonglong Xia } 29354248d008SLonglong Xia #endif 29364248d008SLonglong Xia 293752629506SJoonsoo Kim #ifdef CONFIG_MIGRATION 293819138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) 2939e9995ef9SHugh Dickins { 294021fbd591SQi Zheng struct ksm_stable_node *stable_node; 2941e9995ef9SHugh Dickins 294219138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 294319138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); 294419138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); 2945e9995ef9SHugh Dickins 294619138349SMatthew Wilcox (Oracle) stable_node = folio_stable_node(folio); 2947e9995ef9SHugh Dickins if (stable_node) { 294819138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); 294919138349SMatthew Wilcox (Oracle) stable_node->kpfn = folio_pfn(newfolio); 2950c8d6553bSHugh Dickins /* 295119138349SMatthew Wilcox (Oracle) * newfolio->mapping was set in advance; now we need smp_wmb() 2952c8d6553bSHugh Dickins * to make sure that the new stable_node->kpfn is visible 295319138349SMatthew Wilcox (Oracle) * to get_ksm_page() before it can see that folio->mapping 295419138349SMatthew Wilcox (Oracle) * has gone stale (or that folio_test_swapcache has been cleared). 2955c8d6553bSHugh Dickins */ 2956c8d6553bSHugh Dickins smp_wmb(); 295719138349SMatthew Wilcox (Oracle) set_page_stable_node(&folio->page, NULL); 2958e9995ef9SHugh Dickins } 2959e9995ef9SHugh Dickins } 2960e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 2961e9995ef9SHugh Dickins 296262b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 2963ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2964ef4d43a8SHugh Dickins { 2965ef4d43a8SHugh Dickins while (ksm_run & KSM_RUN_OFFLINE) { 2966ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2967ef4d43a8SHugh Dickins wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 296874316201SNeilBrown TASK_UNINTERRUPTIBLE); 2969ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2970ef4d43a8SHugh Dickins } 2971ef4d43a8SHugh Dickins } 2972ef4d43a8SHugh Dickins 297321fbd591SQi Zheng static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, 29742c653d0eSAndrea Arcangeli unsigned long start_pfn, 29752c653d0eSAndrea Arcangeli unsigned long end_pfn) 29762c653d0eSAndrea Arcangeli { 29772c653d0eSAndrea Arcangeli if (stable_node->kpfn >= start_pfn && 29782c653d0eSAndrea Arcangeli stable_node->kpfn < end_pfn) { 29792c653d0eSAndrea Arcangeli /* 29802c653d0eSAndrea Arcangeli * Don't get_ksm_page, page has already gone: 29812c653d0eSAndrea Arcangeli * which is why we keep kpfn instead of page* 29822c653d0eSAndrea Arcangeli */ 29832c653d0eSAndrea Arcangeli remove_node_from_stable_tree(stable_node); 29842c653d0eSAndrea Arcangeli return true; 29852c653d0eSAndrea Arcangeli } 29862c653d0eSAndrea Arcangeli return false; 29872c653d0eSAndrea Arcangeli } 29882c653d0eSAndrea Arcangeli 298921fbd591SQi Zheng static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, 29902c653d0eSAndrea Arcangeli unsigned long start_pfn, 29912c653d0eSAndrea Arcangeli unsigned long end_pfn, 29922c653d0eSAndrea Arcangeli struct rb_root *root) 29932c653d0eSAndrea Arcangeli { 299421fbd591SQi Zheng struct ksm_stable_node *dup; 29952c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 29962c653d0eSAndrea Arcangeli 29972c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 29982c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 29992c653d0eSAndrea Arcangeli return stable_node_dup_remove_range(stable_node, start_pfn, 30002c653d0eSAndrea Arcangeli end_pfn); 30012c653d0eSAndrea Arcangeli } 30022c653d0eSAndrea Arcangeli 30032c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 30042c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 30052c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 30062c653d0eSAndrea Arcangeli stable_node_dup_remove_range(dup, start_pfn, end_pfn); 30072c653d0eSAndrea Arcangeli } 30082c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 30092c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 30102c653d0eSAndrea Arcangeli return true; /* notify caller that tree was rebalanced */ 30112c653d0eSAndrea Arcangeli } else 30122c653d0eSAndrea Arcangeli return false; 30132c653d0eSAndrea Arcangeli } 30142c653d0eSAndrea Arcangeli 3015ee0ea59cSHugh Dickins static void ksm_check_stable_tree(unsigned long start_pfn, 301662b61f61SHugh Dickins unsigned long end_pfn) 301762b61f61SHugh Dickins { 301821fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 301962b61f61SHugh Dickins struct rb_node *node; 302090bd6fd3SPetr Holasek int nid; 302162b61f61SHugh Dickins 3022ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 3023ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 3024ee0ea59cSHugh Dickins while (node) { 302521fbd591SQi Zheng stable_node = rb_entry(node, struct ksm_stable_node, node); 30262c653d0eSAndrea Arcangeli if (stable_node_chain_remove_range(stable_node, 30272c653d0eSAndrea Arcangeli start_pfn, end_pfn, 30282c653d0eSAndrea Arcangeli root_stable_tree + 30292c653d0eSAndrea Arcangeli nid)) 3030ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 30312c653d0eSAndrea Arcangeli else 3032ee0ea59cSHugh Dickins node = rb_next(node); 3033ee0ea59cSHugh Dickins cond_resched(); 303462b61f61SHugh Dickins } 3035ee0ea59cSHugh Dickins } 303603640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 30374146d2d6SHugh Dickins if (stable_node->kpfn >= start_pfn && 30384146d2d6SHugh Dickins stable_node->kpfn < end_pfn) 30394146d2d6SHugh Dickins remove_node_from_stable_tree(stable_node); 30404146d2d6SHugh Dickins cond_resched(); 30414146d2d6SHugh Dickins } 304262b61f61SHugh Dickins } 304362b61f61SHugh Dickins 304462b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 304562b61f61SHugh Dickins unsigned long action, void *arg) 304662b61f61SHugh Dickins { 304762b61f61SHugh Dickins struct memory_notify *mn = arg; 304862b61f61SHugh Dickins 304962b61f61SHugh Dickins switch (action) { 305062b61f61SHugh Dickins case MEM_GOING_OFFLINE: 305162b61f61SHugh Dickins /* 3052ef4d43a8SHugh Dickins * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 3053ef4d43a8SHugh Dickins * and remove_all_stable_nodes() while memory is going offline: 3054ef4d43a8SHugh Dickins * it is unsafe for them to touch the stable tree at this time. 3055ef4d43a8SHugh Dickins * But unmerge_ksm_pages(), rmap lookups and other entry points 3056ef4d43a8SHugh Dickins * which do not need the ksm_thread_mutex are all safe. 305762b61f61SHugh Dickins */ 3058ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 3059ef4d43a8SHugh Dickins ksm_run |= KSM_RUN_OFFLINE; 3060ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 306162b61f61SHugh Dickins break; 306262b61f61SHugh Dickins 306362b61f61SHugh Dickins case MEM_OFFLINE: 306462b61f61SHugh Dickins /* 306562b61f61SHugh Dickins * Most of the work is done by page migration; but there might 306662b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 3067ee0ea59cSHugh Dickins * pages which have been offlined: prune those from the tree, 3068ee0ea59cSHugh Dickins * otherwise get_ksm_page() might later try to access a 3069ee0ea59cSHugh Dickins * non-existent struct page. 307062b61f61SHugh Dickins */ 3071ee0ea59cSHugh Dickins ksm_check_stable_tree(mn->start_pfn, 3072ee0ea59cSHugh Dickins mn->start_pfn + mn->nr_pages); 3073e4a9bc58SJoe Perches fallthrough; 307462b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 3075ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 3076ef4d43a8SHugh Dickins ksm_run &= ~KSM_RUN_OFFLINE; 307762b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 3078ef4d43a8SHugh Dickins 3079ef4d43a8SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 3080ef4d43a8SHugh Dickins wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 308162b61f61SHugh Dickins break; 308262b61f61SHugh Dickins } 308362b61f61SHugh Dickins return NOTIFY_OK; 308462b61f61SHugh Dickins } 3085ef4d43a8SHugh Dickins #else 3086ef4d43a8SHugh Dickins static void wait_while_offlining(void) 3087ef4d43a8SHugh Dickins { 3088ef4d43a8SHugh Dickins } 308962b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 309062b61f61SHugh Dickins 3091d21077fbSStefan Roesch #ifdef CONFIG_PROC_FS 3092d21077fbSStefan Roesch long ksm_process_profit(struct mm_struct *mm) 3093d21077fbSStefan Roesch { 3094d21077fbSStefan Roesch return mm->ksm_merging_pages * PAGE_SIZE - 3095d21077fbSStefan Roesch mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); 3096d21077fbSStefan Roesch } 3097d21077fbSStefan Roesch #endif /* CONFIG_PROC_FS */ 3098d21077fbSStefan Roesch 30992ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 31002ffd8679SHugh Dickins /* 31012ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 31022ffd8679SHugh Dickins */ 31032ffd8679SHugh Dickins 310431dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 310531dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 310631dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 31071bad2e5cSMiaohe Lin static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 310831dbd01fSIzik Eidus 310931dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 311031dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 311131dbd01fSIzik Eidus { 3112ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); 311331dbd01fSIzik Eidus } 311431dbd01fSIzik Eidus 311531dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 311631dbd01fSIzik Eidus struct kobj_attribute *attr, 311731dbd01fSIzik Eidus const char *buf, size_t count) 311831dbd01fSIzik Eidus { 3119dfefd226SAlexey Dobriyan unsigned int msecs; 312031dbd01fSIzik Eidus int err; 312131dbd01fSIzik Eidus 3122dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 3123dfefd226SAlexey Dobriyan if (err) 312431dbd01fSIzik Eidus return -EINVAL; 312531dbd01fSIzik Eidus 312631dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 3127fcf9a0efSKirill Tkhai wake_up_interruptible(&ksm_iter_wait); 312831dbd01fSIzik Eidus 312931dbd01fSIzik Eidus return count; 313031dbd01fSIzik Eidus } 313131dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 313231dbd01fSIzik Eidus 313331dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 313431dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 313531dbd01fSIzik Eidus { 3136ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); 313731dbd01fSIzik Eidus } 313831dbd01fSIzik Eidus 313931dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 314031dbd01fSIzik Eidus struct kobj_attribute *attr, 314131dbd01fSIzik Eidus const char *buf, size_t count) 314231dbd01fSIzik Eidus { 3143dfefd226SAlexey Dobriyan unsigned int nr_pages; 314431dbd01fSIzik Eidus int err; 314531dbd01fSIzik Eidus 3146dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &nr_pages); 3147dfefd226SAlexey Dobriyan if (err) 314831dbd01fSIzik Eidus return -EINVAL; 314931dbd01fSIzik Eidus 315031dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 315131dbd01fSIzik Eidus 315231dbd01fSIzik Eidus return count; 315331dbd01fSIzik Eidus } 315431dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 315531dbd01fSIzik Eidus 315631dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 315731dbd01fSIzik Eidus char *buf) 315831dbd01fSIzik Eidus { 3159ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_run); 316031dbd01fSIzik Eidus } 316131dbd01fSIzik Eidus 316231dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 316331dbd01fSIzik Eidus const char *buf, size_t count) 316431dbd01fSIzik Eidus { 3165dfefd226SAlexey Dobriyan unsigned int flags; 316631dbd01fSIzik Eidus int err; 316731dbd01fSIzik Eidus 3168dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &flags); 3169dfefd226SAlexey Dobriyan if (err) 317031dbd01fSIzik Eidus return -EINVAL; 317131dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 317231dbd01fSIzik Eidus return -EINVAL; 317331dbd01fSIzik Eidus 317431dbd01fSIzik Eidus /* 317531dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 317631dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 3177d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 3178d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 317931dbd01fSIzik Eidus */ 318031dbd01fSIzik Eidus 318131dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 3182ef4d43a8SHugh Dickins wait_while_offlining(); 318331dbd01fSIzik Eidus if (ksm_run != flags) { 318431dbd01fSIzik Eidus ksm_run = flags; 3185d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 3186e1e12d2fSDavid Rientjes set_current_oom_origin(); 3187d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 3188e1e12d2fSDavid Rientjes clear_current_oom_origin(); 3189d952b791SHugh Dickins if (err) { 3190d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 3191d952b791SHugh Dickins count = err; 3192d952b791SHugh Dickins } 3193d952b791SHugh Dickins } 319431dbd01fSIzik Eidus } 319531dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 319631dbd01fSIzik Eidus 319731dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 319831dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 319931dbd01fSIzik Eidus 320031dbd01fSIzik Eidus return count; 320131dbd01fSIzik Eidus } 320231dbd01fSIzik Eidus KSM_ATTR(run); 320331dbd01fSIzik Eidus 320490bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 320590bd6fd3SPetr Holasek static ssize_t merge_across_nodes_show(struct kobject *kobj, 320690bd6fd3SPetr Holasek struct kobj_attribute *attr, char *buf) 320790bd6fd3SPetr Holasek { 3208ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); 320990bd6fd3SPetr Holasek } 321090bd6fd3SPetr Holasek 321190bd6fd3SPetr Holasek static ssize_t merge_across_nodes_store(struct kobject *kobj, 321290bd6fd3SPetr Holasek struct kobj_attribute *attr, 321390bd6fd3SPetr Holasek const char *buf, size_t count) 321490bd6fd3SPetr Holasek { 321590bd6fd3SPetr Holasek int err; 321690bd6fd3SPetr Holasek unsigned long knob; 321790bd6fd3SPetr Holasek 321890bd6fd3SPetr Holasek err = kstrtoul(buf, 10, &knob); 321990bd6fd3SPetr Holasek if (err) 322090bd6fd3SPetr Holasek return err; 322190bd6fd3SPetr Holasek if (knob > 1) 322290bd6fd3SPetr Holasek return -EINVAL; 322390bd6fd3SPetr Holasek 322490bd6fd3SPetr Holasek mutex_lock(&ksm_thread_mutex); 3225ef4d43a8SHugh Dickins wait_while_offlining(); 322690bd6fd3SPetr Holasek if (ksm_merge_across_nodes != knob) { 3227cbf86cfeSHugh Dickins if (ksm_pages_shared || remove_all_stable_nodes()) 322890bd6fd3SPetr Holasek err = -EBUSY; 3229ef53d16cSHugh Dickins else if (root_stable_tree == one_stable_tree) { 3230ef53d16cSHugh Dickins struct rb_root *buf; 3231ef53d16cSHugh Dickins /* 3232ef53d16cSHugh Dickins * This is the first time that we switch away from the 3233ef53d16cSHugh Dickins * default of merging across nodes: must now allocate 3234ef53d16cSHugh Dickins * a buffer to hold as many roots as may be needed. 3235ef53d16cSHugh Dickins * Allocate stable and unstable together: 3236ef53d16cSHugh Dickins * MAXSMP NODES_SHIFT 10 will use 16kB. 3237ef53d16cSHugh Dickins */ 3238bafe1e14SJoe Perches buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 3239bafe1e14SJoe Perches GFP_KERNEL); 3240ef53d16cSHugh Dickins /* Let us assume that RB_ROOT is NULL is zero */ 3241ef53d16cSHugh Dickins if (!buf) 3242ef53d16cSHugh Dickins err = -ENOMEM; 3243ef53d16cSHugh Dickins else { 3244ef53d16cSHugh Dickins root_stable_tree = buf; 3245ef53d16cSHugh Dickins root_unstable_tree = buf + nr_node_ids; 3246ef53d16cSHugh Dickins /* Stable tree is empty but not the unstable */ 3247ef53d16cSHugh Dickins root_unstable_tree[0] = one_unstable_tree[0]; 3248ef53d16cSHugh Dickins } 3249ef53d16cSHugh Dickins } 3250ef53d16cSHugh Dickins if (!err) { 325190bd6fd3SPetr Holasek ksm_merge_across_nodes = knob; 3252ef53d16cSHugh Dickins ksm_nr_node_ids = knob ? 1 : nr_node_ids; 3253ef53d16cSHugh Dickins } 325490bd6fd3SPetr Holasek } 325590bd6fd3SPetr Holasek mutex_unlock(&ksm_thread_mutex); 325690bd6fd3SPetr Holasek 325790bd6fd3SPetr Holasek return err ? err : count; 325890bd6fd3SPetr Holasek } 325990bd6fd3SPetr Holasek KSM_ATTR(merge_across_nodes); 326090bd6fd3SPetr Holasek #endif 326190bd6fd3SPetr Holasek 3262e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_show(struct kobject *kobj, 3263e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, char *buf) 3264e86c59b1SClaudio Imbrenda { 3265ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); 3266e86c59b1SClaudio Imbrenda } 3267e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_store(struct kobject *kobj, 3268e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, 3269e86c59b1SClaudio Imbrenda const char *buf, size_t count) 3270e86c59b1SClaudio Imbrenda { 3271e86c59b1SClaudio Imbrenda int err; 3272e86c59b1SClaudio Imbrenda bool value; 3273e86c59b1SClaudio Imbrenda 3274e86c59b1SClaudio Imbrenda err = kstrtobool(buf, &value); 3275e86c59b1SClaudio Imbrenda if (err) 3276e86c59b1SClaudio Imbrenda return -EINVAL; 3277e86c59b1SClaudio Imbrenda 3278e86c59b1SClaudio Imbrenda ksm_use_zero_pages = value; 3279e86c59b1SClaudio Imbrenda 3280e86c59b1SClaudio Imbrenda return count; 3281e86c59b1SClaudio Imbrenda } 3282e86c59b1SClaudio Imbrenda KSM_ATTR(use_zero_pages); 3283e86c59b1SClaudio Imbrenda 32842c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_show(struct kobject *kobj, 32852c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 32862c653d0eSAndrea Arcangeli { 3287ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); 32882c653d0eSAndrea Arcangeli } 32892c653d0eSAndrea Arcangeli 32902c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_store(struct kobject *kobj, 32912c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 32922c653d0eSAndrea Arcangeli const char *buf, size_t count) 32932c653d0eSAndrea Arcangeli { 32942c653d0eSAndrea Arcangeli int err; 32952c653d0eSAndrea Arcangeli int knob; 32962c653d0eSAndrea Arcangeli 32972c653d0eSAndrea Arcangeli err = kstrtoint(buf, 10, &knob); 32982c653d0eSAndrea Arcangeli if (err) 32992c653d0eSAndrea Arcangeli return err; 33002c653d0eSAndrea Arcangeli /* 33012c653d0eSAndrea Arcangeli * When a KSM page is created it is shared by 2 mappings. This 33022c653d0eSAndrea Arcangeli * being a signed comparison, it implicitly verifies it's not 33032c653d0eSAndrea Arcangeli * negative. 33042c653d0eSAndrea Arcangeli */ 33052c653d0eSAndrea Arcangeli if (knob < 2) 33062c653d0eSAndrea Arcangeli return -EINVAL; 33072c653d0eSAndrea Arcangeli 33082c653d0eSAndrea Arcangeli if (READ_ONCE(ksm_max_page_sharing) == knob) 33092c653d0eSAndrea Arcangeli return count; 33102c653d0eSAndrea Arcangeli 33112c653d0eSAndrea Arcangeli mutex_lock(&ksm_thread_mutex); 33122c653d0eSAndrea Arcangeli wait_while_offlining(); 33132c653d0eSAndrea Arcangeli if (ksm_max_page_sharing != knob) { 33142c653d0eSAndrea Arcangeli if (ksm_pages_shared || remove_all_stable_nodes()) 33152c653d0eSAndrea Arcangeli err = -EBUSY; 33162c653d0eSAndrea Arcangeli else 33172c653d0eSAndrea Arcangeli ksm_max_page_sharing = knob; 33182c653d0eSAndrea Arcangeli } 33192c653d0eSAndrea Arcangeli mutex_unlock(&ksm_thread_mutex); 33202c653d0eSAndrea Arcangeli 33212c653d0eSAndrea Arcangeli return err ? err : count; 33222c653d0eSAndrea Arcangeli } 33232c653d0eSAndrea Arcangeli KSM_ATTR(max_page_sharing); 33242c653d0eSAndrea Arcangeli 3325b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 3326b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3327b4028260SHugh Dickins { 3328ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_shared); 3329b4028260SHugh Dickins } 3330b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 3331b4028260SHugh Dickins 3332b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 3333b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3334b4028260SHugh Dickins { 3335ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); 3336b4028260SHugh Dickins } 3337b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 3338b4028260SHugh Dickins 3339473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 3340473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3341473b0ce4SHugh Dickins { 3342ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); 3343473b0ce4SHugh Dickins } 3344473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 3345473b0ce4SHugh Dickins 3346473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 3347473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3348473b0ce4SHugh Dickins { 3349473b0ce4SHugh Dickins long ksm_pages_volatile; 3350473b0ce4SHugh Dickins 3351473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3352473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 3353473b0ce4SHugh Dickins /* 3354473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 3355473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 3356473b0ce4SHugh Dickins */ 3357473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 3358473b0ce4SHugh Dickins ksm_pages_volatile = 0; 3359ae7a927dSJoe Perches return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); 3360473b0ce4SHugh Dickins } 3361473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 3362473b0ce4SHugh Dickins 3363*e2942062Sxu xin static ssize_t ksm_zero_pages_show(struct kobject *kobj, 3364*e2942062Sxu xin struct kobj_attribute *attr, char *buf) 3365*e2942062Sxu xin { 3366*e2942062Sxu xin return sysfs_emit(buf, "%ld\n", ksm_zero_pages); 3367*e2942062Sxu xin } 3368*e2942062Sxu xin KSM_ATTR_RO(ksm_zero_pages); 3369*e2942062Sxu xin 3370d21077fbSStefan Roesch static ssize_t general_profit_show(struct kobject *kobj, 3371d21077fbSStefan Roesch struct kobj_attribute *attr, char *buf) 3372d21077fbSStefan Roesch { 3373d21077fbSStefan Roesch long general_profit; 3374d21077fbSStefan Roesch 3375d21077fbSStefan Roesch general_profit = ksm_pages_sharing * PAGE_SIZE - 3376d21077fbSStefan Roesch ksm_rmap_items * sizeof(struct ksm_rmap_item); 3377d21077fbSStefan Roesch 3378d21077fbSStefan Roesch return sysfs_emit(buf, "%ld\n", general_profit); 3379d21077fbSStefan Roesch } 3380d21077fbSStefan Roesch KSM_ATTR_RO(general_profit); 3381d21077fbSStefan Roesch 33822c653d0eSAndrea Arcangeli static ssize_t stable_node_dups_show(struct kobject *kobj, 33832c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 33842c653d0eSAndrea Arcangeli { 3385ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); 33862c653d0eSAndrea Arcangeli } 33872c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_dups); 33882c653d0eSAndrea Arcangeli 33892c653d0eSAndrea Arcangeli static ssize_t stable_node_chains_show(struct kobject *kobj, 33902c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 33912c653d0eSAndrea Arcangeli { 3392ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); 33932c653d0eSAndrea Arcangeli } 33942c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_chains); 33952c653d0eSAndrea Arcangeli 33962c653d0eSAndrea Arcangeli static ssize_t 33972c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_show(struct kobject *kobj, 33982c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 33992c653d0eSAndrea Arcangeli char *buf) 34002c653d0eSAndrea Arcangeli { 3401ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 34022c653d0eSAndrea Arcangeli } 34032c653d0eSAndrea Arcangeli 34042c653d0eSAndrea Arcangeli static ssize_t 34052c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_store(struct kobject *kobj, 34062c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 34072c653d0eSAndrea Arcangeli const char *buf, size_t count) 34082c653d0eSAndrea Arcangeli { 3409584ff0dfSZhansaya Bagdauletkyzy unsigned int msecs; 34102c653d0eSAndrea Arcangeli int err; 34112c653d0eSAndrea Arcangeli 3412584ff0dfSZhansaya Bagdauletkyzy err = kstrtouint(buf, 10, &msecs); 3413584ff0dfSZhansaya Bagdauletkyzy if (err) 34142c653d0eSAndrea Arcangeli return -EINVAL; 34152c653d0eSAndrea Arcangeli 34162c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs = msecs; 34172c653d0eSAndrea Arcangeli 34182c653d0eSAndrea Arcangeli return count; 34192c653d0eSAndrea Arcangeli } 34202c653d0eSAndrea Arcangeli KSM_ATTR(stable_node_chains_prune_millisecs); 34212c653d0eSAndrea Arcangeli 3422473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 3423473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3424473b0ce4SHugh Dickins { 3425ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); 3426473b0ce4SHugh Dickins } 3427473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 3428473b0ce4SHugh Dickins 342931dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 343031dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 343131dbd01fSIzik Eidus &pages_to_scan_attr.attr, 343231dbd01fSIzik Eidus &run_attr.attr, 3433b4028260SHugh Dickins &pages_shared_attr.attr, 3434b4028260SHugh Dickins &pages_sharing_attr.attr, 3435473b0ce4SHugh Dickins &pages_unshared_attr.attr, 3436473b0ce4SHugh Dickins &pages_volatile_attr.attr, 3437*e2942062Sxu xin &ksm_zero_pages_attr.attr, 3438473b0ce4SHugh Dickins &full_scans_attr.attr, 343990bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 344090bd6fd3SPetr Holasek &merge_across_nodes_attr.attr, 344190bd6fd3SPetr Holasek #endif 34422c653d0eSAndrea Arcangeli &max_page_sharing_attr.attr, 34432c653d0eSAndrea Arcangeli &stable_node_chains_attr.attr, 34442c653d0eSAndrea Arcangeli &stable_node_dups_attr.attr, 34452c653d0eSAndrea Arcangeli &stable_node_chains_prune_millisecs_attr.attr, 3446e86c59b1SClaudio Imbrenda &use_zero_pages_attr.attr, 3447d21077fbSStefan Roesch &general_profit_attr.attr, 344831dbd01fSIzik Eidus NULL, 344931dbd01fSIzik Eidus }; 345031dbd01fSIzik Eidus 3451f907c26aSArvind Yadav static const struct attribute_group ksm_attr_group = { 345231dbd01fSIzik Eidus .attrs = ksm_attrs, 345331dbd01fSIzik Eidus .name = "ksm", 345431dbd01fSIzik Eidus }; 34552ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 345631dbd01fSIzik Eidus 345731dbd01fSIzik Eidus static int __init ksm_init(void) 345831dbd01fSIzik Eidus { 345931dbd01fSIzik Eidus struct task_struct *ksm_thread; 346031dbd01fSIzik Eidus int err; 346131dbd01fSIzik Eidus 3462e86c59b1SClaudio Imbrenda /* The correct value depends on page size and endianness */ 3463e86c59b1SClaudio Imbrenda zero_checksum = calc_checksum(ZERO_PAGE(0)); 3464e86c59b1SClaudio Imbrenda /* Default to false for backwards compatibility */ 3465e86c59b1SClaudio Imbrenda ksm_use_zero_pages = false; 3466e86c59b1SClaudio Imbrenda 346731dbd01fSIzik Eidus err = ksm_slab_init(); 346831dbd01fSIzik Eidus if (err) 346931dbd01fSIzik Eidus goto out; 347031dbd01fSIzik Eidus 347131dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 347231dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 347325acde31SPaul McQuade pr_err("ksm: creating kthread failed\n"); 347431dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 3475d9f8984cSLai Jiangshan goto out_free; 347631dbd01fSIzik Eidus } 347731dbd01fSIzik Eidus 34782ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 347931dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 348031dbd01fSIzik Eidus if (err) { 348125acde31SPaul McQuade pr_err("ksm: register sysfs failed\n"); 34822ffd8679SHugh Dickins kthread_stop(ksm_thread); 3483d9f8984cSLai Jiangshan goto out_free; 348431dbd01fSIzik Eidus } 3485c73602adSHugh Dickins #else 3486c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3487c73602adSHugh Dickins 34882ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 348931dbd01fSIzik Eidus 349062b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 3491ef4d43a8SHugh Dickins /* There is no significance to this priority 100 */ 34921eeaa4fdSLiu Shixin hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); 349362b61f61SHugh Dickins #endif 349431dbd01fSIzik Eidus return 0; 349531dbd01fSIzik Eidus 3496d9f8984cSLai Jiangshan out_free: 349731dbd01fSIzik Eidus ksm_slab_free(); 349831dbd01fSIzik Eidus out: 349931dbd01fSIzik Eidus return err; 350031dbd01fSIzik Eidus } 3501a64fb3cdSPaul Gortmaker subsys_initcall(ksm_init); 3502