17a338472SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f8af4da3SHugh Dickins /* 331dbd01fSIzik Eidus * Memory merging support. 431dbd01fSIzik Eidus * 531dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 631dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 731dbd01fSIzik Eidus * 836b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 931dbd01fSIzik Eidus * Authors: 1031dbd01fSIzik Eidus * Izik Eidus 1131dbd01fSIzik Eidus * Andrea Arcangeli 1231dbd01fSIzik Eidus * Chris Wright 1336b2528dSIzik Eidus * Hugh Dickins 14f8af4da3SHugh Dickins */ 15f8af4da3SHugh Dickins 16f8af4da3SHugh Dickins #include <linux/errno.h> 1731dbd01fSIzik Eidus #include <linux/mm.h> 1836090defSArnd Bergmann #include <linux/mm_inline.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 226e84f315SIngo Molnar #include <linux/sched/mm.h> 23f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 2431dbd01fSIzik Eidus #include <linux/rwsem.h> 2531dbd01fSIzik Eidus #include <linux/pagemap.h> 2631dbd01fSIzik Eidus #include <linux/rmap.h> 2731dbd01fSIzik Eidus #include <linux/spinlock.h> 2859e1a2f4STimofey Titovets #include <linux/xxhash.h> 2931dbd01fSIzik Eidus #include <linux/delay.h> 3031dbd01fSIzik Eidus #include <linux/kthread.h> 3131dbd01fSIzik Eidus #include <linux/wait.h> 3231dbd01fSIzik Eidus #include <linux/slab.h> 3331dbd01fSIzik Eidus #include <linux/rbtree.h> 3462b61f61SHugh Dickins #include <linux/memory.h> 3531dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 362c6854fdSIzik Eidus #include <linux/swap.h> 37f8af4da3SHugh Dickins #include <linux/ksm.h> 384ca3a69bSSasha Levin #include <linux/hashtable.h> 39878aee7dSAndrea Arcangeli #include <linux/freezer.h> 4072788c38SDavid Rientjes #include <linux/oom.h> 4190bd6fd3SPetr Holasek #include <linux/numa.h> 42d7c0e68dSDavid Hildenbrand #include <linux/pagewalk.h> 43f8af4da3SHugh Dickins 4431dbd01fSIzik Eidus #include <asm/tlbflush.h> 4573848b46SHugh Dickins #include "internal.h" 4658730ab6SQi Zheng #include "mm_slot.h" 4731dbd01fSIzik Eidus 48e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 49e850dcf5SHugh Dickins #define NUMA(x) (x) 50e850dcf5SHugh Dickins #define DO_NUMA(x) do { (x); } while (0) 51e850dcf5SHugh Dickins #else 52e850dcf5SHugh Dickins #define NUMA(x) (0) 53e850dcf5SHugh Dickins #define DO_NUMA(x) do { } while (0) 54e850dcf5SHugh Dickins #endif 55e850dcf5SHugh Dickins 565a2ca3efSMike Rapoport /** 575a2ca3efSMike Rapoport * DOC: Overview 585a2ca3efSMike Rapoport * 5931dbd01fSIzik Eidus * A few notes about the KSM scanning process, 6031dbd01fSIzik Eidus * to make it easier to understand the data structures below: 6131dbd01fSIzik Eidus * 6231dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 6331dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 6431dbd01fSIzik Eidus * 6531dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 6631dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 6731dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 6831dbd01fSIzik Eidus * 6931dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 7031dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 7131dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 7231dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 7331dbd01fSIzik Eidus * 745a2ca3efSMike Rapoport * The stable tree node includes information required for reverse 755a2ca3efSMike Rapoport * mapping from a KSM page to virtual addresses that map this page. 765a2ca3efSMike Rapoport * 775a2ca3efSMike Rapoport * In order to avoid large latencies of the rmap walks on KSM pages, 785a2ca3efSMike Rapoport * KSM maintains two types of nodes in the stable tree: 795a2ca3efSMike Rapoport * 805a2ca3efSMike Rapoport * * the regular nodes that keep the reverse mapping structures in a 815a2ca3efSMike Rapoport * linked list 825a2ca3efSMike Rapoport * * the "chains" that link nodes ("dups") that represent the same 835a2ca3efSMike Rapoport * write protected memory content, but each "dup" corresponds to a 845a2ca3efSMike Rapoport * different KSM page copy of that content 855a2ca3efSMike Rapoport * 865a2ca3efSMike Rapoport * Internally, the regular nodes, "dups" and "chains" are represented 8721fbd591SQi Zheng * using the same struct ksm_stable_node structure. 885a2ca3efSMike Rapoport * 8931dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 9031dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 9131dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 9231dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 9331dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 9431dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 9531dbd01fSIzik Eidus * 9631dbd01fSIzik Eidus * KSM solves this problem by several techniques: 9731dbd01fSIzik Eidus * 9831dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 9931dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 10031dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 10131dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 10231dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 10331dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 10431dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 10531dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 10631dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 10731dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 10831dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 10931dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 11031dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 1118fdb3dbfSHugh Dickins * 1128fdb3dbfSHugh Dickins * If the merge_across_nodes tunable is unset, then KSM maintains multiple 1138fdb3dbfSHugh Dickins * stable trees and multiple unstable trees: one of each for each NUMA node. 11431dbd01fSIzik Eidus */ 11531dbd01fSIzik Eidus 11631dbd01fSIzik Eidus /** 11721fbd591SQi Zheng * struct ksm_mm_slot - ksm information per mm that is being scanned 11858730ab6SQi Zheng * @slot: hash lookup from mm to mm_slot 1196514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 12031dbd01fSIzik Eidus */ 12121fbd591SQi Zheng struct ksm_mm_slot { 12258730ab6SQi Zheng struct mm_slot slot; 12321fbd591SQi Zheng struct ksm_rmap_item *rmap_list; 12431dbd01fSIzik Eidus }; 12531dbd01fSIzik Eidus 12631dbd01fSIzik Eidus /** 12731dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 12831dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 12931dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1306514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 13131dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 13231dbd01fSIzik Eidus * 13331dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 13431dbd01fSIzik Eidus */ 13531dbd01fSIzik Eidus struct ksm_scan { 13621fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 13731dbd01fSIzik Eidus unsigned long address; 13821fbd591SQi Zheng struct ksm_rmap_item **rmap_list; 13931dbd01fSIzik Eidus unsigned long seqnr; 14031dbd01fSIzik Eidus }; 14131dbd01fSIzik Eidus 14231dbd01fSIzik Eidus /** 14321fbd591SQi Zheng * struct ksm_stable_node - node of the stable rbtree 1447b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1454146d2d6SHugh Dickins * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 1462c653d0eSAndrea Arcangeli * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 1474146d2d6SHugh Dickins * @list: linked into migrate_nodes, pending placement in the proper node tree 1487b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1494146d2d6SHugh Dickins * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 1502c653d0eSAndrea Arcangeli * @chain_prune_time: time of the last full garbage collection 1512c653d0eSAndrea Arcangeli * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 1524146d2d6SHugh Dickins * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 1537b6ba2c7SHugh Dickins */ 15421fbd591SQi Zheng struct ksm_stable_node { 1554146d2d6SHugh Dickins union { 1564146d2d6SHugh Dickins struct rb_node node; /* when node of stable tree */ 1574146d2d6SHugh Dickins struct { /* when listed for migration */ 1584146d2d6SHugh Dickins struct list_head *head; 1592c653d0eSAndrea Arcangeli struct { 1602c653d0eSAndrea Arcangeli struct hlist_node hlist_dup; 1614146d2d6SHugh Dickins struct list_head list; 1624146d2d6SHugh Dickins }; 1634146d2d6SHugh Dickins }; 1642c653d0eSAndrea Arcangeli }; 1657b6ba2c7SHugh Dickins struct hlist_head hlist; 1662c653d0eSAndrea Arcangeli union { 16762b61f61SHugh Dickins unsigned long kpfn; 1682c653d0eSAndrea Arcangeli unsigned long chain_prune_time; 1692c653d0eSAndrea Arcangeli }; 1702c653d0eSAndrea Arcangeli /* 1712c653d0eSAndrea Arcangeli * STABLE_NODE_CHAIN can be any negative number in 1722c653d0eSAndrea Arcangeli * rmap_hlist_len negative range, but better not -1 to be able 1732c653d0eSAndrea Arcangeli * to reliably detect underflows. 1742c653d0eSAndrea Arcangeli */ 1752c653d0eSAndrea Arcangeli #define STABLE_NODE_CHAIN -1024 1762c653d0eSAndrea Arcangeli int rmap_hlist_len; 1774146d2d6SHugh Dickins #ifdef CONFIG_NUMA 1784146d2d6SHugh Dickins int nid; 1794146d2d6SHugh Dickins #endif 1807b6ba2c7SHugh Dickins }; 1817b6ba2c7SHugh Dickins 1827b6ba2c7SHugh Dickins /** 18321fbd591SQi Zheng * struct ksm_rmap_item - reverse mapping item for virtual addresses 1846514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 185db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 186bc56620bSHugh Dickins * @nid: NUMA node id of unstable tree in which linked (may not match page) 18731dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 18831dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 18931dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1907b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1917b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1927b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 19331dbd01fSIzik Eidus */ 19421fbd591SQi Zheng struct ksm_rmap_item { 19521fbd591SQi Zheng struct ksm_rmap_item *rmap_list; 196bc56620bSHugh Dickins union { 197db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 198bc56620bSHugh Dickins #ifdef CONFIG_NUMA 199bc56620bSHugh Dickins int nid; /* when node of unstable tree */ 200bc56620bSHugh Dickins #endif 201bc56620bSHugh Dickins }; 20231dbd01fSIzik Eidus struct mm_struct *mm; 20331dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 20431dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 20531dbd01fSIzik Eidus union { 2067b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 2077b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 20821fbd591SQi Zheng struct ksm_stable_node *head; 2097b6ba2c7SHugh Dickins struct hlist_node hlist; 2107b6ba2c7SHugh Dickins }; 21131dbd01fSIzik Eidus }; 21231dbd01fSIzik Eidus }; 21331dbd01fSIzik Eidus 21431dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 2157b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 2167b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 21731dbd01fSIzik Eidus 21831dbd01fSIzik Eidus /* The stable and unstable tree heads */ 219ef53d16cSHugh Dickins static struct rb_root one_stable_tree[1] = { RB_ROOT }; 220ef53d16cSHugh Dickins static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 221ef53d16cSHugh Dickins static struct rb_root *root_stable_tree = one_stable_tree; 222ef53d16cSHugh Dickins static struct rb_root *root_unstable_tree = one_unstable_tree; 22331dbd01fSIzik Eidus 2244146d2d6SHugh Dickins /* Recently migrated nodes of stable tree, pending proper placement */ 2254146d2d6SHugh Dickins static LIST_HEAD(migrate_nodes); 2262c653d0eSAndrea Arcangeli #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 2274146d2d6SHugh Dickins 2284ca3a69bSSasha Levin #define MM_SLOTS_HASH_BITS 10 2294ca3a69bSSasha Levin static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 23031dbd01fSIzik Eidus 23121fbd591SQi Zheng static struct ksm_mm_slot ksm_mm_head = { 23258730ab6SQi Zheng .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), 23331dbd01fSIzik Eidus }; 23431dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 23531dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 23631dbd01fSIzik Eidus }; 23731dbd01fSIzik Eidus 23831dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 2397b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 24031dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 24131dbd01fSIzik Eidus 24231dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 243b4028260SHugh Dickins static unsigned long ksm_pages_shared; 24431dbd01fSIzik Eidus 245e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 246b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 24731dbd01fSIzik Eidus 248473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 249473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 250473b0ce4SHugh Dickins 251473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 252473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 253473b0ce4SHugh Dickins 2542c653d0eSAndrea Arcangeli /* The number of stable_node chains */ 2552c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_chains; 2562c653d0eSAndrea Arcangeli 2572c653d0eSAndrea Arcangeli /* The number of stable_node dups linked to the stable_node chains */ 2582c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_dups; 2592c653d0eSAndrea Arcangeli 2602c653d0eSAndrea Arcangeli /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 261584ff0dfSZhansaya Bagdauletkyzy static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; 2622c653d0eSAndrea Arcangeli 2632c653d0eSAndrea Arcangeli /* Maximum number of page slots sharing a stable node */ 2642c653d0eSAndrea Arcangeli static int ksm_max_page_sharing = 256; 2652c653d0eSAndrea Arcangeli 26631dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 2672c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 26831dbd01fSIzik Eidus 26931dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 2702ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 27131dbd01fSIzik Eidus 272e86c59b1SClaudio Imbrenda /* Checksum of an empty (zeroed) page */ 273e86c59b1SClaudio Imbrenda static unsigned int zero_checksum __read_mostly; 274e86c59b1SClaudio Imbrenda 275e86c59b1SClaudio Imbrenda /* Whether to merge empty (zeroed) pages with actual zero pages */ 276e86c59b1SClaudio Imbrenda static bool ksm_use_zero_pages __read_mostly; 277e86c59b1SClaudio Imbrenda 278e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 27990bd6fd3SPetr Holasek /* Zeroed when merging across nodes is not allowed */ 28090bd6fd3SPetr Holasek static unsigned int ksm_merge_across_nodes = 1; 281ef53d16cSHugh Dickins static int ksm_nr_node_ids = 1; 282e850dcf5SHugh Dickins #else 283e850dcf5SHugh Dickins #define ksm_merge_across_nodes 1U 284ef53d16cSHugh Dickins #define ksm_nr_node_ids 1 285e850dcf5SHugh Dickins #endif 28690bd6fd3SPetr Holasek 28731dbd01fSIzik Eidus #define KSM_RUN_STOP 0 28831dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 28931dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 290ef4d43a8SHugh Dickins #define KSM_RUN_OFFLINE 4 291ef4d43a8SHugh Dickins static unsigned long ksm_run = KSM_RUN_STOP; 292ef4d43a8SHugh Dickins static void wait_while_offlining(void); 29331dbd01fSIzik Eidus 29431dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 295fcf9a0efSKirill Tkhai static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); 29631dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 29731dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 29831dbd01fSIzik Eidus 29921fbd591SQi Zheng #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 30031dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 30131dbd01fSIzik Eidus (__flags), NULL) 30231dbd01fSIzik Eidus 30331dbd01fSIzik Eidus static int __init ksm_slab_init(void) 30431dbd01fSIzik Eidus { 30521fbd591SQi Zheng rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); 30631dbd01fSIzik Eidus if (!rmap_item_cache) 30731dbd01fSIzik Eidus goto out; 30831dbd01fSIzik Eidus 30921fbd591SQi Zheng stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); 3107b6ba2c7SHugh Dickins if (!stable_node_cache) 3117b6ba2c7SHugh Dickins goto out_free1; 3127b6ba2c7SHugh Dickins 31321fbd591SQi Zheng mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); 31431dbd01fSIzik Eidus if (!mm_slot_cache) 3157b6ba2c7SHugh Dickins goto out_free2; 31631dbd01fSIzik Eidus 31731dbd01fSIzik Eidus return 0; 31831dbd01fSIzik Eidus 3197b6ba2c7SHugh Dickins out_free2: 3207b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 3217b6ba2c7SHugh Dickins out_free1: 32231dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 32331dbd01fSIzik Eidus out: 32431dbd01fSIzik Eidus return -ENOMEM; 32531dbd01fSIzik Eidus } 32631dbd01fSIzik Eidus 32731dbd01fSIzik Eidus static void __init ksm_slab_free(void) 32831dbd01fSIzik Eidus { 32931dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 3307b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 33131dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 33231dbd01fSIzik Eidus mm_slot_cache = NULL; 33331dbd01fSIzik Eidus } 33431dbd01fSIzik Eidus 33521fbd591SQi Zheng static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) 3362c653d0eSAndrea Arcangeli { 3372c653d0eSAndrea Arcangeli return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 3382c653d0eSAndrea Arcangeli } 3392c653d0eSAndrea Arcangeli 34021fbd591SQi Zheng static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) 3412c653d0eSAndrea Arcangeli { 3422c653d0eSAndrea Arcangeli return dup->head == STABLE_NODE_DUP_HEAD; 3432c653d0eSAndrea Arcangeli } 3442c653d0eSAndrea Arcangeli 34521fbd591SQi Zheng static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, 34621fbd591SQi Zheng struct ksm_stable_node *chain) 3472c653d0eSAndrea Arcangeli { 3482c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(dup)); 3492c653d0eSAndrea Arcangeli dup->head = STABLE_NODE_DUP_HEAD; 3502c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(chain)); 3512c653d0eSAndrea Arcangeli hlist_add_head(&dup->hlist_dup, &chain->hlist); 3522c653d0eSAndrea Arcangeli ksm_stable_node_dups++; 3532c653d0eSAndrea Arcangeli } 3542c653d0eSAndrea Arcangeli 35521fbd591SQi Zheng static inline void __stable_node_dup_del(struct ksm_stable_node *dup) 3562c653d0eSAndrea Arcangeli { 357b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 3582c653d0eSAndrea Arcangeli hlist_del(&dup->hlist_dup); 3592c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 3602c653d0eSAndrea Arcangeli } 3612c653d0eSAndrea Arcangeli 36221fbd591SQi Zheng static inline void stable_node_dup_del(struct ksm_stable_node *dup) 3632c653d0eSAndrea Arcangeli { 3642c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 3652c653d0eSAndrea Arcangeli if (is_stable_node_dup(dup)) 3662c653d0eSAndrea Arcangeli __stable_node_dup_del(dup); 3672c653d0eSAndrea Arcangeli else 3682c653d0eSAndrea Arcangeli rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 3692c653d0eSAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 3702c653d0eSAndrea Arcangeli dup->head = NULL; 3712c653d0eSAndrea Arcangeli #endif 3722c653d0eSAndrea Arcangeli } 3732c653d0eSAndrea Arcangeli 37421fbd591SQi Zheng static inline struct ksm_rmap_item *alloc_rmap_item(void) 37531dbd01fSIzik Eidus { 37621fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 377473b0ce4SHugh Dickins 3785b398e41Szhong jiang rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 3795b398e41Szhong jiang __GFP_NORETRY | __GFP_NOWARN); 380473b0ce4SHugh Dickins if (rmap_item) 381473b0ce4SHugh Dickins ksm_rmap_items++; 382473b0ce4SHugh Dickins return rmap_item; 38331dbd01fSIzik Eidus } 38431dbd01fSIzik Eidus 38521fbd591SQi Zheng static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) 38631dbd01fSIzik Eidus { 387473b0ce4SHugh Dickins ksm_rmap_items--; 388cb4df4caSxu xin rmap_item->mm->ksm_rmap_items--; 38931dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 39031dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 39131dbd01fSIzik Eidus } 39231dbd01fSIzik Eidus 39321fbd591SQi Zheng static inline struct ksm_stable_node *alloc_stable_node(void) 3947b6ba2c7SHugh Dickins { 3956213055fSzhong jiang /* 3966213055fSzhong jiang * The allocation can take too long with GFP_KERNEL when memory is under 3976213055fSzhong jiang * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 3986213055fSzhong jiang * grants access to memory reserves, helping to avoid this problem. 3996213055fSzhong jiang */ 4006213055fSzhong jiang return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 4017b6ba2c7SHugh Dickins } 4027b6ba2c7SHugh Dickins 40321fbd591SQi Zheng static inline void free_stable_node(struct ksm_stable_node *stable_node) 4047b6ba2c7SHugh Dickins { 4052c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len && 4062c653d0eSAndrea Arcangeli !is_stable_node_chain(stable_node)); 4077b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 4087b6ba2c7SHugh Dickins } 4097b6ba2c7SHugh Dickins 41031dbd01fSIzik Eidus /* 411a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 412a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 413c1e8d7c6SMichel Lespinasse * takes mmap_lock briefly to serialize against them. ksm_exit() does not set 414a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 415a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 416a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 417a913e182SHugh Dickins */ 418a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 419a913e182SHugh Dickins { 420a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 421a913e182SHugh Dickins } 422a913e182SHugh Dickins 423d7c0e68dSDavid Hildenbrand static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, 424d7c0e68dSDavid Hildenbrand struct mm_walk *walk) 425d7c0e68dSDavid Hildenbrand { 426d7c0e68dSDavid Hildenbrand struct page *page = NULL; 427d7c0e68dSDavid Hildenbrand spinlock_t *ptl; 428d7c0e68dSDavid Hildenbrand pte_t *pte; 429d7c0e68dSDavid Hildenbrand int ret; 430d7c0e68dSDavid Hildenbrand 431d7c0e68dSDavid Hildenbrand if (pmd_leaf(*pmd) || !pmd_present(*pmd)) 432d7c0e68dSDavid Hildenbrand return 0; 433d7c0e68dSDavid Hildenbrand 434d7c0e68dSDavid Hildenbrand pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 435d7c0e68dSDavid Hildenbrand if (pte_present(*pte)) { 436d7c0e68dSDavid Hildenbrand page = vm_normal_page(walk->vma, addr, *pte); 437d7c0e68dSDavid Hildenbrand } else if (!pte_none(*pte)) { 438d7c0e68dSDavid Hildenbrand swp_entry_t entry = pte_to_swp_entry(*pte); 439d7c0e68dSDavid Hildenbrand 440d7c0e68dSDavid Hildenbrand /* 441d7c0e68dSDavid Hildenbrand * As KSM pages remain KSM pages until freed, no need to wait 442d7c0e68dSDavid Hildenbrand * here for migration to end. 443d7c0e68dSDavid Hildenbrand */ 444d7c0e68dSDavid Hildenbrand if (is_migration_entry(entry)) 445d7c0e68dSDavid Hildenbrand page = pfn_swap_entry_to_page(entry); 446d7c0e68dSDavid Hildenbrand } 447d7c0e68dSDavid Hildenbrand ret = page && PageKsm(page); 448d7c0e68dSDavid Hildenbrand pte_unmap_unlock(pte, ptl); 449d7c0e68dSDavid Hildenbrand return ret; 450d7c0e68dSDavid Hildenbrand } 451d7c0e68dSDavid Hildenbrand 452d7c0e68dSDavid Hildenbrand static const struct mm_walk_ops break_ksm_ops = { 453d7c0e68dSDavid Hildenbrand .pmd_entry = break_ksm_pmd_entry, 454d7c0e68dSDavid Hildenbrand }; 455d7c0e68dSDavid Hildenbrand 456a913e182SHugh Dickins /* 4576cce3314SDavid Hildenbrand * We use break_ksm to break COW on a ksm page by triggering unsharing, 4586cce3314SDavid Hildenbrand * such that the ksm page will get replaced by an exclusive anonymous page. 45931dbd01fSIzik Eidus * 4606cce3314SDavid Hildenbrand * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, 46131dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 46231dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 463bbcd53c9SDavid Hildenbrand * mmap of /dev/mem, where we would not want to touch it. 4641b2ee126SDave Hansen * 4656cce3314SDavid Hildenbrand * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context 4661b2ee126SDave Hansen * of the process that owns 'vma'. We also do not want to enforce 4671b2ee126SDave Hansen * protection keys here anyway. 46831dbd01fSIzik Eidus */ 469d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 47031dbd01fSIzik Eidus { 47150a7ca3cSSouptick Joarder vm_fault_t ret = 0; 47231dbd01fSIzik Eidus 47331dbd01fSIzik Eidus do { 474d7c0e68dSDavid Hildenbrand int ksm_page; 47558f595c6SDavid Hildenbrand 47631dbd01fSIzik Eidus cond_resched(); 477d7c0e68dSDavid Hildenbrand ksm_page = walk_page_range_vma(vma, addr, addr + 1, 478d7c0e68dSDavid Hildenbrand &break_ksm_ops, NULL); 479d7c0e68dSDavid Hildenbrand if (WARN_ON_ONCE(ksm_page < 0)) 480d7c0e68dSDavid Hildenbrand return ksm_page; 48158f595c6SDavid Hildenbrand if (!ksm_page) 48258f595c6SDavid Hildenbrand return 0; 483dcddffd4SKirill A. Shutemov ret = handle_mm_fault(vma, addr, 4846cce3314SDavid Hildenbrand FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 485bce617edSPeter Xu NULL); 48658f595c6SDavid Hildenbrand } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 487d952b791SHugh Dickins /* 48858f595c6SDavid Hildenbrand * We must loop until we no longer find a KSM page because 48958f595c6SDavid Hildenbrand * handle_mm_fault() may back out if there's any difficulty e.g. if 49058f595c6SDavid Hildenbrand * pte accessed bit gets updated concurrently. 491d952b791SHugh Dickins * 492d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 493d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 494d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 495d952b791SHugh Dickins * 496d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 497d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 498d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 499d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 500d952b791SHugh Dickins * 501d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 502d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 503d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 504d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 505d952b791SHugh Dickins * 506d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 507d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 508d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 509d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 510d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 511d952b791SHugh Dickins */ 512d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 51331dbd01fSIzik Eidus } 51431dbd01fSIzik Eidus 515ef694222SBob Liu static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 516ef694222SBob Liu unsigned long addr) 517ef694222SBob Liu { 518ef694222SBob Liu struct vm_area_struct *vma; 519ef694222SBob Liu if (ksm_test_exit(mm)) 520ef694222SBob Liu return NULL; 521ff69fb81SLiam Howlett vma = vma_lookup(mm, addr); 522ff69fb81SLiam Howlett if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 523ef694222SBob Liu return NULL; 524ef694222SBob Liu return vma; 525ef694222SBob Liu } 526ef694222SBob Liu 52721fbd591SQi Zheng static void break_cow(struct ksm_rmap_item *rmap_item) 52831dbd01fSIzik Eidus { 5298dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 5308dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 53131dbd01fSIzik Eidus struct vm_area_struct *vma; 53231dbd01fSIzik Eidus 5334035c07aSHugh Dickins /* 5344035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 5354035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 5364035c07aSHugh Dickins */ 5379e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 5384035c07aSHugh Dickins 539d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 540ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 541ef694222SBob Liu if (vma) 54231dbd01fSIzik Eidus break_ksm(vma, addr); 543d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 54431dbd01fSIzik Eidus } 54531dbd01fSIzik Eidus 54621fbd591SQi Zheng static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) 54731dbd01fSIzik Eidus { 54831dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 54931dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 55031dbd01fSIzik Eidus struct vm_area_struct *vma; 55131dbd01fSIzik Eidus struct page *page; 55231dbd01fSIzik Eidus 553d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 554ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 555ef694222SBob Liu if (!vma) 55631dbd01fSIzik Eidus goto out; 55731dbd01fSIzik Eidus 55831dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 559f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(page)) 56031dbd01fSIzik Eidus goto out; 561f7091ed6SHaiyue Wang if (is_zone_device_page(page)) 562f7091ed6SHaiyue Wang goto out_putpage; 563f765f540SKirill A. Shutemov if (PageAnon(page)) { 56431dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 56531dbd01fSIzik Eidus flush_dcache_page(page); 56631dbd01fSIzik Eidus } else { 567f7091ed6SHaiyue Wang out_putpage: 56831dbd01fSIzik Eidus put_page(page); 569c8f95ed1SAndrea Arcangeli out: 570c8f95ed1SAndrea Arcangeli page = NULL; 57131dbd01fSIzik Eidus } 572d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 57331dbd01fSIzik Eidus return page; 57431dbd01fSIzik Eidus } 57531dbd01fSIzik Eidus 57690bd6fd3SPetr Holasek /* 57790bd6fd3SPetr Holasek * This helper is used for getting right index into array of tree roots. 57890bd6fd3SPetr Holasek * When merge_across_nodes knob is set to 1, there are only two rb-trees for 57990bd6fd3SPetr Holasek * stable and unstable pages from all nodes with roots in index 0. Otherwise, 58090bd6fd3SPetr Holasek * every node has its own stable and unstable tree. 58190bd6fd3SPetr Holasek */ 58290bd6fd3SPetr Holasek static inline int get_kpfn_nid(unsigned long kpfn) 58390bd6fd3SPetr Holasek { 584d8fc16a8SHugh Dickins return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 58590bd6fd3SPetr Holasek } 58690bd6fd3SPetr Holasek 58721fbd591SQi Zheng static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, 5882c653d0eSAndrea Arcangeli struct rb_root *root) 5892c653d0eSAndrea Arcangeli { 59021fbd591SQi Zheng struct ksm_stable_node *chain = alloc_stable_node(); 5912c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 5922c653d0eSAndrea Arcangeli if (likely(chain)) { 5932c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&chain->hlist); 5942c653d0eSAndrea Arcangeli chain->chain_prune_time = jiffies; 5952c653d0eSAndrea Arcangeli chain->rmap_hlist_len = STABLE_NODE_CHAIN; 5962c653d0eSAndrea Arcangeli #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 59798fa15f3SAnshuman Khandual chain->nid = NUMA_NO_NODE; /* debug */ 5982c653d0eSAndrea Arcangeli #endif 5992c653d0eSAndrea Arcangeli ksm_stable_node_chains++; 6002c653d0eSAndrea Arcangeli 6012c653d0eSAndrea Arcangeli /* 6022c653d0eSAndrea Arcangeli * Put the stable node chain in the first dimension of 6032c653d0eSAndrea Arcangeli * the stable tree and at the same time remove the old 6042c653d0eSAndrea Arcangeli * stable node. 6052c653d0eSAndrea Arcangeli */ 6062c653d0eSAndrea Arcangeli rb_replace_node(&dup->node, &chain->node, root); 6072c653d0eSAndrea Arcangeli 6082c653d0eSAndrea Arcangeli /* 6092c653d0eSAndrea Arcangeli * Move the old stable node to the second dimension 6102c653d0eSAndrea Arcangeli * queued in the hlist_dup. The invariant is that all 6112c653d0eSAndrea Arcangeli * dup stable_nodes in the chain->hlist point to pages 612457aef94SEthon Paul * that are write protected and have the exact same 6132c653d0eSAndrea Arcangeli * content. 6142c653d0eSAndrea Arcangeli */ 6152c653d0eSAndrea Arcangeli stable_node_chain_add_dup(dup, chain); 6162c653d0eSAndrea Arcangeli } 6172c653d0eSAndrea Arcangeli return chain; 6182c653d0eSAndrea Arcangeli } 6192c653d0eSAndrea Arcangeli 62021fbd591SQi Zheng static inline void free_stable_node_chain(struct ksm_stable_node *chain, 6212c653d0eSAndrea Arcangeli struct rb_root *root) 6222c653d0eSAndrea Arcangeli { 6232c653d0eSAndrea Arcangeli rb_erase(&chain->node, root); 6242c653d0eSAndrea Arcangeli free_stable_node(chain); 6252c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 6262c653d0eSAndrea Arcangeli } 6272c653d0eSAndrea Arcangeli 62821fbd591SQi Zheng static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) 6294035c07aSHugh Dickins { 63021fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 6314035c07aSHugh Dickins 6322c653d0eSAndrea Arcangeli /* check it's not STABLE_NODE_CHAIN or negative */ 6332c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 6342c653d0eSAndrea Arcangeli 635b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 6364035c07aSHugh Dickins if (rmap_item->hlist.next) 6374035c07aSHugh Dickins ksm_pages_sharing--; 6384035c07aSHugh Dickins else 6394035c07aSHugh Dickins ksm_pages_shared--; 64076093853Sxu xin 64176093853Sxu xin rmap_item->mm->ksm_merging_pages--; 64276093853Sxu xin 6432c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 6442c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 6459e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 6464035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 6474035c07aSHugh Dickins cond_resched(); 6484035c07aSHugh Dickins } 6494035c07aSHugh Dickins 6502c653d0eSAndrea Arcangeli /* 6512c653d0eSAndrea Arcangeli * We need the second aligned pointer of the migrate_nodes 6522c653d0eSAndrea Arcangeli * list_head to stay clear from the rb_parent_color union 6532c653d0eSAndrea Arcangeli * (aligned and different than any node) and also different 6542c653d0eSAndrea Arcangeli * from &migrate_nodes. This will verify that future list.h changes 655815f0ddbSNick Desaulniers * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 6562c653d0eSAndrea Arcangeli */ 6572c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 6582c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 6592c653d0eSAndrea Arcangeli 6604146d2d6SHugh Dickins if (stable_node->head == &migrate_nodes) 6614146d2d6SHugh Dickins list_del(&stable_node->list); 6624146d2d6SHugh Dickins else 6632c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 6644035c07aSHugh Dickins free_stable_node(stable_node); 6654035c07aSHugh Dickins } 6664035c07aSHugh Dickins 6672cee57d1SYang Shi enum get_ksm_page_flags { 6682cee57d1SYang Shi GET_KSM_PAGE_NOLOCK, 6692cee57d1SYang Shi GET_KSM_PAGE_LOCK, 6702cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK 6712cee57d1SYang Shi }; 6722cee57d1SYang Shi 6734035c07aSHugh Dickins /* 6744035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 6754035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 6764035c07aSHugh Dickins * In which case we can trust the content of the page, and it 6774035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 6784035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 679c8d6553bSHugh Dickins * But beware, the stable node's page might be being migrated. 6804035c07aSHugh Dickins * 6814035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 6824035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 6834035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 6844035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 6854035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 6864035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 6874035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 6884035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 6894035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 6904035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 6914035c07aSHugh Dickins */ 69221fbd591SQi Zheng static struct page *get_ksm_page(struct ksm_stable_node *stable_node, 6932cee57d1SYang Shi enum get_ksm_page_flags flags) 6944035c07aSHugh Dickins { 6954035c07aSHugh Dickins struct page *page; 6964035c07aSHugh Dickins void *expected_mapping; 697c8d6553bSHugh Dickins unsigned long kpfn; 6984035c07aSHugh Dickins 699bda807d4SMinchan Kim expected_mapping = (void *)((unsigned long)stable_node | 700bda807d4SMinchan Kim PAGE_MAPPING_KSM); 701c8d6553bSHugh Dickins again: 70208df4774SPaul E. McKenney kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 703c8d6553bSHugh Dickins page = pfn_to_page(kpfn); 7044db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) 7054035c07aSHugh Dickins goto stale; 706c8d6553bSHugh Dickins 707c8d6553bSHugh Dickins /* 708c8d6553bSHugh Dickins * We cannot do anything with the page while its refcount is 0. 709c8d6553bSHugh Dickins * Usually 0 means free, or tail of a higher-order page: in which 710c8d6553bSHugh Dickins * case this node is no longer referenced, and should be freed; 7111c4c3b99SJiang Biao * however, it might mean that the page is under page_ref_freeze(). 712c8d6553bSHugh Dickins * The __remove_mapping() case is easy, again the node is now stale; 71352d1e606SKirill Tkhai * the same is in reuse_ksm_page() case; but if page is swapcache 7149800562fSMatthew Wilcox (Oracle) * in folio_migrate_mapping(), it might still be our page, 71552d1e606SKirill Tkhai * in which case it's essential to keep the node. 716c8d6553bSHugh Dickins */ 717c8d6553bSHugh Dickins while (!get_page_unless_zero(page)) { 718c8d6553bSHugh Dickins /* 719c8d6553bSHugh Dickins * Another check for page->mapping != expected_mapping would 720c8d6553bSHugh Dickins * work here too. We have chosen the !PageSwapCache test to 721c8d6553bSHugh Dickins * optimize the common case, when the page is or is about to 722c8d6553bSHugh Dickins * be freed: PageSwapCache is cleared (under spin_lock_irq) 7231c4c3b99SJiang Biao * in the ref_freeze section of __remove_mapping(); but Anon 724c8d6553bSHugh Dickins * page->mapping reset to NULL later, in free_pages_prepare(). 725c8d6553bSHugh Dickins */ 726c8d6553bSHugh Dickins if (!PageSwapCache(page)) 7274035c07aSHugh Dickins goto stale; 728c8d6553bSHugh Dickins cpu_relax(); 729c8d6553bSHugh Dickins } 730c8d6553bSHugh Dickins 7314db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7324035c07aSHugh Dickins put_page(page); 7334035c07aSHugh Dickins goto stale; 7344035c07aSHugh Dickins } 735c8d6553bSHugh Dickins 7362cee57d1SYang Shi if (flags == GET_KSM_PAGE_TRYLOCK) { 7372cee57d1SYang Shi if (!trylock_page(page)) { 7382cee57d1SYang Shi put_page(page); 7392cee57d1SYang Shi return ERR_PTR(-EBUSY); 7402cee57d1SYang Shi } 7412cee57d1SYang Shi } else if (flags == GET_KSM_PAGE_LOCK) 7428aafa6a4SHugh Dickins lock_page(page); 7432cee57d1SYang Shi 7442cee57d1SYang Shi if (flags != GET_KSM_PAGE_NOLOCK) { 7454db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7468aafa6a4SHugh Dickins unlock_page(page); 7478aafa6a4SHugh Dickins put_page(page); 7488aafa6a4SHugh Dickins goto stale; 7498aafa6a4SHugh Dickins } 7508aafa6a4SHugh Dickins } 7514035c07aSHugh Dickins return page; 752c8d6553bSHugh Dickins 7534035c07aSHugh Dickins stale: 754c8d6553bSHugh Dickins /* 755c8d6553bSHugh Dickins * We come here from above when page->mapping or !PageSwapCache 756c8d6553bSHugh Dickins * suggests that the node is stale; but it might be under migration. 75719138349SMatthew Wilcox (Oracle) * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), 758c8d6553bSHugh Dickins * before checking whether node->kpfn has been changed. 759c8d6553bSHugh Dickins */ 760c8d6553bSHugh Dickins smp_rmb(); 7614db0c3c2SJason Low if (READ_ONCE(stable_node->kpfn) != kpfn) 762c8d6553bSHugh Dickins goto again; 7634035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 7644035c07aSHugh Dickins return NULL; 7654035c07aSHugh Dickins } 7664035c07aSHugh Dickins 76731dbd01fSIzik Eidus /* 76831dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 76931dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 77031dbd01fSIzik Eidus */ 77121fbd591SQi Zheng static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) 77231dbd01fSIzik Eidus { 7737b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 77421fbd591SQi Zheng struct ksm_stable_node *stable_node; 7755ad64688SHugh Dickins struct page *page; 77631dbd01fSIzik Eidus 7777b6ba2c7SHugh Dickins stable_node = rmap_item->head; 77862862290SHugh Dickins page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 7794035c07aSHugh Dickins if (!page) 7804035c07aSHugh Dickins goto out; 7815ad64688SHugh Dickins 7827b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 78362862290SHugh Dickins unlock_page(page); 7845ad64688SHugh Dickins put_page(page); 78508beca44SHugh Dickins 78698666f8aSAndrea Arcangeli if (!hlist_empty(&stable_node->hlist)) 7874035c07aSHugh Dickins ksm_pages_sharing--; 7884035c07aSHugh Dickins else 789b4028260SHugh Dickins ksm_pages_shared--; 79076093853Sxu xin 79176093853Sxu xin rmap_item->mm->ksm_merging_pages--; 79276093853Sxu xin 7932c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 7942c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 79531dbd01fSIzik Eidus 7969e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 797c89a384eSMiaohe Lin rmap_item->head = NULL; 79893d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 79931dbd01fSIzik Eidus 8007b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 80131dbd01fSIzik Eidus unsigned char age; 80231dbd01fSIzik Eidus /* 8039ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 80431dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 8059ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 8069ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 8079ba69294SHugh Dickins * than left over from before. 80831dbd01fSIzik Eidus */ 80931dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 810cd551f97SHugh Dickins BUG_ON(age > 1); 81131dbd01fSIzik Eidus if (!age) 81290bd6fd3SPetr Holasek rb_erase(&rmap_item->node, 813ef53d16cSHugh Dickins root_unstable_tree + NUMA(rmap_item->nid)); 81493d17715SHugh Dickins ksm_pages_unshared--; 81531dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 81693d17715SHugh Dickins } 8174035c07aSHugh Dickins out: 81831dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 81931dbd01fSIzik Eidus } 82031dbd01fSIzik Eidus 82121fbd591SQi Zheng static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) 82231dbd01fSIzik Eidus { 8236514d511SHugh Dickins while (*rmap_list) { 82421fbd591SQi Zheng struct ksm_rmap_item *rmap_item = *rmap_list; 8256514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 82631dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 82731dbd01fSIzik Eidus free_rmap_item(rmap_item); 82831dbd01fSIzik Eidus } 82931dbd01fSIzik Eidus } 83031dbd01fSIzik Eidus 83131dbd01fSIzik Eidus /* 832e850dcf5SHugh Dickins * Though it's very tempting to unmerge rmap_items from stable tree rather 83331dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 83431dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 835c1e8d7c6SMichel Lespinasse * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing 83631dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 83731dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 83881464e30SHugh Dickins * 83981464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 84081464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 84181464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 84281464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 84331dbd01fSIzik Eidus */ 844d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 84531dbd01fSIzik Eidus unsigned long start, unsigned long end) 84631dbd01fSIzik Eidus { 84731dbd01fSIzik Eidus unsigned long addr; 848d952b791SHugh Dickins int err = 0; 84931dbd01fSIzik Eidus 850d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 8519ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 8529ba69294SHugh Dickins break; 853d952b791SHugh Dickins if (signal_pending(current)) 854d952b791SHugh Dickins err = -ERESTARTSYS; 855d952b791SHugh Dickins else 856d952b791SHugh Dickins err = break_ksm(vma, addr); 857d952b791SHugh Dickins } 858d952b791SHugh Dickins return err; 85931dbd01fSIzik Eidus } 86031dbd01fSIzik Eidus 86121fbd591SQi Zheng static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) 86219138349SMatthew Wilcox (Oracle) { 86319138349SMatthew Wilcox (Oracle) return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; 86419138349SMatthew Wilcox (Oracle) } 86519138349SMatthew Wilcox (Oracle) 86621fbd591SQi Zheng static inline struct ksm_stable_node *page_stable_node(struct page *page) 86788484826SMike Rapoport { 86819138349SMatthew Wilcox (Oracle) return folio_stable_node(page_folio(page)); 86988484826SMike Rapoport } 87088484826SMike Rapoport 87188484826SMike Rapoport static inline void set_page_stable_node(struct page *page, 87221fbd591SQi Zheng struct ksm_stable_node *stable_node) 87388484826SMike Rapoport { 8746c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); 87588484826SMike Rapoport page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); 87688484826SMike Rapoport } 87788484826SMike Rapoport 8782ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 8792ffd8679SHugh Dickins /* 8802ffd8679SHugh Dickins * Only called through the sysfs control interface: 8812ffd8679SHugh Dickins */ 88221fbd591SQi Zheng static int remove_stable_node(struct ksm_stable_node *stable_node) 883cbf86cfeSHugh Dickins { 884cbf86cfeSHugh Dickins struct page *page; 885cbf86cfeSHugh Dickins int err; 886cbf86cfeSHugh Dickins 8872cee57d1SYang Shi page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 888cbf86cfeSHugh Dickins if (!page) { 889cbf86cfeSHugh Dickins /* 890cbf86cfeSHugh Dickins * get_ksm_page did remove_node_from_stable_tree itself. 891cbf86cfeSHugh Dickins */ 892cbf86cfeSHugh Dickins return 0; 893cbf86cfeSHugh Dickins } 894cbf86cfeSHugh Dickins 895cbf86cfeSHugh Dickins /* 8969a63236fSAndrey Ryabinin * Page could be still mapped if this races with __mmput() running in 8979a63236fSAndrey Ryabinin * between ksm_exit() and exit_mmap(). Just refuse to let 8989a63236fSAndrey Ryabinin * merge_across_nodes/max_page_sharing be switched. 8998fdb3dbfSHugh Dickins */ 9008fdb3dbfSHugh Dickins err = -EBUSY; 9019a63236fSAndrey Ryabinin if (!page_mapped(page)) { 9028fdb3dbfSHugh Dickins /* 9038fdb3dbfSHugh Dickins * The stable node did not yet appear stale to get_ksm_page(), 9048fdb3dbfSHugh Dickins * since that allows for an unmapped ksm page to be recognized 9058fdb3dbfSHugh Dickins * right up until it is freed; but the node is safe to remove. 906cbf86cfeSHugh Dickins * This page might be in a pagevec waiting to be freed, 907cbf86cfeSHugh Dickins * or it might be PageSwapCache (perhaps under writeback), 908cbf86cfeSHugh Dickins * or it might have been removed from swapcache a moment ago. 909cbf86cfeSHugh Dickins */ 910cbf86cfeSHugh Dickins set_page_stable_node(page, NULL); 911cbf86cfeSHugh Dickins remove_node_from_stable_tree(stable_node); 912cbf86cfeSHugh Dickins err = 0; 913cbf86cfeSHugh Dickins } 914cbf86cfeSHugh Dickins 915cbf86cfeSHugh Dickins unlock_page(page); 916cbf86cfeSHugh Dickins put_page(page); 917cbf86cfeSHugh Dickins return err; 918cbf86cfeSHugh Dickins } 919cbf86cfeSHugh Dickins 92021fbd591SQi Zheng static int remove_stable_node_chain(struct ksm_stable_node *stable_node, 9212c653d0eSAndrea Arcangeli struct rb_root *root) 9222c653d0eSAndrea Arcangeli { 92321fbd591SQi Zheng struct ksm_stable_node *dup; 9242c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 9252c653d0eSAndrea Arcangeli 9262c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 9272c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 9282c653d0eSAndrea Arcangeli if (remove_stable_node(stable_node)) 9292c653d0eSAndrea Arcangeli return true; 9302c653d0eSAndrea Arcangeli else 9312c653d0eSAndrea Arcangeli return false; 9322c653d0eSAndrea Arcangeli } 9332c653d0eSAndrea Arcangeli 9342c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 9352c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 9362c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 9372c653d0eSAndrea Arcangeli if (remove_stable_node(dup)) 9382c653d0eSAndrea Arcangeli return true; 9392c653d0eSAndrea Arcangeli } 9402c653d0eSAndrea Arcangeli BUG_ON(!hlist_empty(&stable_node->hlist)); 9412c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 9422c653d0eSAndrea Arcangeli return false; 9432c653d0eSAndrea Arcangeli } 9442c653d0eSAndrea Arcangeli 945cbf86cfeSHugh Dickins static int remove_all_stable_nodes(void) 946cbf86cfeSHugh Dickins { 94721fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 948cbf86cfeSHugh Dickins int nid; 949cbf86cfeSHugh Dickins int err = 0; 950cbf86cfeSHugh Dickins 951ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 952cbf86cfeSHugh Dickins while (root_stable_tree[nid].rb_node) { 953cbf86cfeSHugh Dickins stable_node = rb_entry(root_stable_tree[nid].rb_node, 95421fbd591SQi Zheng struct ksm_stable_node, node); 9552c653d0eSAndrea Arcangeli if (remove_stable_node_chain(stable_node, 9562c653d0eSAndrea Arcangeli root_stable_tree + nid)) { 957cbf86cfeSHugh Dickins err = -EBUSY; 958cbf86cfeSHugh Dickins break; /* proceed to next nid */ 959cbf86cfeSHugh Dickins } 960cbf86cfeSHugh Dickins cond_resched(); 961cbf86cfeSHugh Dickins } 962cbf86cfeSHugh Dickins } 96303640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 9644146d2d6SHugh Dickins if (remove_stable_node(stable_node)) 9654146d2d6SHugh Dickins err = -EBUSY; 9664146d2d6SHugh Dickins cond_resched(); 9674146d2d6SHugh Dickins } 968cbf86cfeSHugh Dickins return err; 969cbf86cfeSHugh Dickins } 970cbf86cfeSHugh Dickins 971d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 97231dbd01fSIzik Eidus { 97321fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 97458730ab6SQi Zheng struct mm_slot *slot; 97531dbd01fSIzik Eidus struct mm_struct *mm; 97631dbd01fSIzik Eidus struct vm_area_struct *vma; 977d952b791SHugh Dickins int err = 0; 97831dbd01fSIzik Eidus 979d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 98058730ab6SQi Zheng slot = list_entry(ksm_mm_head.slot.mm_node.next, 98158730ab6SQi Zheng struct mm_slot, mm_node); 98258730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 983d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 984d952b791SHugh Dickins 985a5f18ba0SMatthew Wilcox (Oracle) for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; 986a5f18ba0SMatthew Wilcox (Oracle) mm_slot = ksm_scan.mm_slot) { 98758730ab6SQi Zheng VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); 988a5f18ba0SMatthew Wilcox (Oracle) 98958730ab6SQi Zheng mm = mm_slot->slot.mm; 990d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 991a5f18ba0SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) { 9929ba69294SHugh Dickins if (ksm_test_exit(mm)) 9939ba69294SHugh Dickins break; 99431dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 99531dbd01fSIzik Eidus continue; 996d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 997d952b791SHugh Dickins vma->vm_start, vma->vm_end); 9989ba69294SHugh Dickins if (err) 9999ba69294SHugh Dickins goto error; 1000d952b791SHugh Dickins } 10019ba69294SHugh Dickins 1002420be4edSChengyang Fan remove_trailing_rmap_items(&mm_slot->rmap_list); 1003d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 100431dbd01fSIzik Eidus 100531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 100658730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 100758730ab6SQi Zheng struct mm_slot, mm_node); 100858730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 10099ba69294SHugh Dickins if (ksm_test_exit(mm)) { 101058730ab6SQi Zheng hash_del(&mm_slot->slot.hash); 101158730ab6SQi Zheng list_del(&mm_slot->slot.mm_node); 101231dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 10139ba69294SHugh Dickins 101458730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 10159ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 10169ba69294SHugh Dickins mmdrop(mm); 10177496fea9SZhou Chengming } else 10189ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 101931dbd01fSIzik Eidus } 102031dbd01fSIzik Eidus 1021cbf86cfeSHugh Dickins /* Clean up stable nodes, but don't worry if some are still busy */ 1022cbf86cfeSHugh Dickins remove_all_stable_nodes(); 1023d952b791SHugh Dickins ksm_scan.seqnr = 0; 10249ba69294SHugh Dickins return 0; 10259ba69294SHugh Dickins 10269ba69294SHugh Dickins error: 1027d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1028d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 1029d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 1030d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1031d952b791SHugh Dickins return err; 1032d952b791SHugh Dickins } 10332ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 1034d952b791SHugh Dickins 103531dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 103631dbd01fSIzik Eidus { 103731dbd01fSIzik Eidus u32 checksum; 10389b04c5feSCong Wang void *addr = kmap_atomic(page); 103959e1a2f4STimofey Titovets checksum = xxhash(addr, PAGE_SIZE, 0); 10409b04c5feSCong Wang kunmap_atomic(addr); 104131dbd01fSIzik Eidus return checksum; 104231dbd01fSIzik Eidus } 104331dbd01fSIzik Eidus 104431dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 104531dbd01fSIzik Eidus pte_t *orig_pte) 104631dbd01fSIzik Eidus { 104731dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 1048eed05e54SMatthew Wilcox (Oracle) DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); 104931dbd01fSIzik Eidus int swapped; 105031dbd01fSIzik Eidus int err = -EFAULT; 1051ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 10526c287605SDavid Hildenbrand bool anon_exclusive; 105331dbd01fSIzik Eidus 105436eaff33SKirill A. Shutemov pvmw.address = page_address_in_vma(page, vma); 105536eaff33SKirill A. Shutemov if (pvmw.address == -EFAULT) 105631dbd01fSIzik Eidus goto out; 105731dbd01fSIzik Eidus 105829ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 10596bdb913fSHaggai Eran 1060*7d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, 1061ac46d4f3SJérôme Glisse pvmw.address + PAGE_SIZE); 1062ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 10636bdb913fSHaggai Eran 106436eaff33SKirill A. Shutemov if (!page_vma_mapped_walk(&pvmw)) 10656bdb913fSHaggai Eran goto out_mn; 106636eaff33SKirill A. Shutemov if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 106736eaff33SKirill A. Shutemov goto out_unlock; 106831dbd01fSIzik Eidus 10696c287605SDavid Hildenbrand anon_exclusive = PageAnonExclusive(page); 1070595cd8f2SAneesh Kumar K.V if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 10716c287605SDavid Hildenbrand anon_exclusive || mm_tlb_flush_pending(mm)) { 107231dbd01fSIzik Eidus pte_t entry; 107331dbd01fSIzik Eidus 107431dbd01fSIzik Eidus swapped = PageSwapCache(page); 107536eaff33SKirill A. Shutemov flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 107631dbd01fSIzik Eidus /* 107725985edcSLucas De Marchi * Ok this is tricky, when get_user_pages_fast() run it doesn't 107831dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 1079f0953a1bSIngo Molnar * with the pagecount against the mapcount is racy and 108031dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 108131dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 108231dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 108331dbd01fSIzik Eidus * or in the middle of the check. 10840f10851eSJérôme Glisse * 10850f10851eSJérôme Glisse * No need to notify as we are downgrading page table to read 10860f10851eSJérôme Glisse * only not changing it to point to a new page. 10870f10851eSJérôme Glisse * 1088ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 108931dbd01fSIzik Eidus */ 10900f10851eSJérôme Glisse entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 109131dbd01fSIzik Eidus /* 109231dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 109331dbd01fSIzik Eidus * page 109431dbd01fSIzik Eidus */ 109531e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 109636eaff33SKirill A. Shutemov set_pte_at(mm, pvmw.address, pvmw.pte, entry); 109731dbd01fSIzik Eidus goto out_unlock; 109831dbd01fSIzik Eidus } 10996c287605SDavid Hildenbrand 1100088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 11016c287605SDavid Hildenbrand if (anon_exclusive && page_try_share_anon_rmap(page)) { 11026c287605SDavid Hildenbrand set_pte_at(mm, pvmw.address, pvmw.pte, entry); 11036c287605SDavid Hildenbrand goto out_unlock; 11046c287605SDavid Hildenbrand } 11056c287605SDavid Hildenbrand 11064e31635cSHugh Dickins if (pte_dirty(entry)) 11074e31635cSHugh Dickins set_page_dirty(page); 11086a56ccbcSDavid Hildenbrand entry = pte_mkclean(entry); 1109595cd8f2SAneesh Kumar K.V 11106a56ccbcSDavid Hildenbrand if (pte_write(entry)) 11116a56ccbcSDavid Hildenbrand entry = pte_wrprotect(entry); 11126a56ccbcSDavid Hildenbrand 111336eaff33SKirill A. Shutemov set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); 111431dbd01fSIzik Eidus } 111536eaff33SKirill A. Shutemov *orig_pte = *pvmw.pte; 111631dbd01fSIzik Eidus err = 0; 111731dbd01fSIzik Eidus 111831dbd01fSIzik Eidus out_unlock: 111936eaff33SKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 11206bdb913fSHaggai Eran out_mn: 1121ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 112231dbd01fSIzik Eidus out: 112331dbd01fSIzik Eidus return err; 112431dbd01fSIzik Eidus } 112531dbd01fSIzik Eidus 112631dbd01fSIzik Eidus /** 112731dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 11288dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 11298dd3557aSHugh Dickins * @page: the page we are replacing by kpage 11308dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 113131dbd01fSIzik Eidus * @orig_pte: the original value of the pte 113231dbd01fSIzik Eidus * 113331dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 113431dbd01fSIzik Eidus */ 11358dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 11368dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 113731dbd01fSIzik Eidus { 113831dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 1139b4e6f66eSMatthew Wilcox (Oracle) struct folio *folio; 114031dbd01fSIzik Eidus pmd_t *pmd; 114150722804SZach O'Keefe pmd_t pmde; 114231dbd01fSIzik Eidus pte_t *ptep; 1143e86c59b1SClaudio Imbrenda pte_t newpte; 114431dbd01fSIzik Eidus spinlock_t *ptl; 114531dbd01fSIzik Eidus unsigned long addr; 114631dbd01fSIzik Eidus int err = -EFAULT; 1147ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 114831dbd01fSIzik Eidus 11498dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 115031dbd01fSIzik Eidus if (addr == -EFAULT) 115131dbd01fSIzik Eidus goto out; 115231dbd01fSIzik Eidus 11536219049aSBob Liu pmd = mm_find_pmd(mm, addr); 11546219049aSBob Liu if (!pmd) 115531dbd01fSIzik Eidus goto out; 115650722804SZach O'Keefe /* 115750722804SZach O'Keefe * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 115850722804SZach O'Keefe * without holding anon_vma lock for write. So when looking for a 115950722804SZach O'Keefe * genuine pmde (in which to find pte), test present and !THP together. 116050722804SZach O'Keefe */ 116150722804SZach O'Keefe pmde = *pmd; 116250722804SZach O'Keefe barrier(); 116350722804SZach O'Keefe if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 116450722804SZach O'Keefe goto out; 116531dbd01fSIzik Eidus 1166*7d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 11676f4f13e8SJérôme Glisse addr + PAGE_SIZE); 1168ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 11696bdb913fSHaggai Eran 117031dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 117131dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 117231dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 11736bdb913fSHaggai Eran goto out_mn; 117431dbd01fSIzik Eidus } 11756c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnonExclusive(page), page); 11766c287605SDavid Hildenbrand VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); 117731dbd01fSIzik Eidus 1178e86c59b1SClaudio Imbrenda /* 1179e86c59b1SClaudio Imbrenda * No need to check ksm_use_zero_pages here: we can only have a 1180457aef94SEthon Paul * zero_page here if ksm_use_zero_pages was enabled already. 1181e86c59b1SClaudio Imbrenda */ 1182e86c59b1SClaudio Imbrenda if (!is_zero_pfn(page_to_pfn(kpage))) { 11838dd3557aSHugh Dickins get_page(kpage); 1184f1e2db12SDavid Hildenbrand page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); 1185e86c59b1SClaudio Imbrenda newpte = mk_pte(kpage, vma->vm_page_prot); 1186e86c59b1SClaudio Imbrenda } else { 1187e86c59b1SClaudio Imbrenda newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), 1188e86c59b1SClaudio Imbrenda vma->vm_page_prot)); 1189a38c015fSClaudio Imbrenda /* 1190a38c015fSClaudio Imbrenda * We're replacing an anonymous page with a zero page, which is 1191a38c015fSClaudio Imbrenda * not anonymous. We need to do proper accounting otherwise we 1192a38c015fSClaudio Imbrenda * will get wrong values in /proc, and a BUG message in dmesg 1193a38c015fSClaudio Imbrenda * when tearing down the mm. 1194a38c015fSClaudio Imbrenda */ 1195a38c015fSClaudio Imbrenda dec_mm_counter(mm, MM_ANONPAGES); 1196e86c59b1SClaudio Imbrenda } 119731dbd01fSIzik Eidus 119831dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 11990f10851eSJérôme Glisse /* 12000f10851eSJérôme Glisse * No need to notify as we are replacing a read only page with another 12010f10851eSJérôme Glisse * read only page with the same content. 12020f10851eSJérôme Glisse * 1203ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 12040f10851eSJérôme Glisse */ 12050f10851eSJérôme Glisse ptep_clear_flush(vma, addr, ptep); 1206e86c59b1SClaudio Imbrenda set_pte_at_notify(mm, addr, ptep, newpte); 120731dbd01fSIzik Eidus 1208b4e6f66eSMatthew Wilcox (Oracle) folio = page_folio(page); 1209cea86fe2SHugh Dickins page_remove_rmap(page, vma, false); 1210b4e6f66eSMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 1211b4e6f66eSMatthew Wilcox (Oracle) folio_free_swap(folio); 1212b4e6f66eSMatthew Wilcox (Oracle) folio_put(folio); 121331dbd01fSIzik Eidus 121431dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 121531dbd01fSIzik Eidus err = 0; 12166bdb913fSHaggai Eran out_mn: 1217ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 121831dbd01fSIzik Eidus out: 121931dbd01fSIzik Eidus return err; 122031dbd01fSIzik Eidus } 122131dbd01fSIzik Eidus 122231dbd01fSIzik Eidus /* 122331dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 12248dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 12258dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 122680e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 122780e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 122831dbd01fSIzik Eidus * 122931dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 123031dbd01fSIzik Eidus */ 123131dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 12328dd3557aSHugh Dickins struct page *page, struct page *kpage) 123331dbd01fSIzik Eidus { 123431dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 123531dbd01fSIzik Eidus int err = -EFAULT; 123631dbd01fSIzik Eidus 1237db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 1238db114b83SHugh Dickins return 0; 1239db114b83SHugh Dickins 12408dd3557aSHugh Dickins if (!PageAnon(page)) 124131dbd01fSIzik Eidus goto out; 124231dbd01fSIzik Eidus 124331dbd01fSIzik Eidus /* 124431dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 124531dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 124631dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 124731dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 124831dbd01fSIzik Eidus * then come back to this page when it is unlocked. 124931dbd01fSIzik Eidus */ 12508dd3557aSHugh Dickins if (!trylock_page(page)) 125131e855eaSHugh Dickins goto out; 1252f765f540SKirill A. Shutemov 1253f765f540SKirill A. Shutemov if (PageTransCompound(page)) { 1254a7306c34SAndrea Arcangeli if (split_huge_page(page)) 1255f765f540SKirill A. Shutemov goto out_unlock; 1256f765f540SKirill A. Shutemov } 1257f765f540SKirill A. Shutemov 125831dbd01fSIzik Eidus /* 125931dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 126031dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 126131dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 126231dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 126331dbd01fSIzik Eidus */ 126480e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 126580e14822SHugh Dickins if (!kpage) { 126680e14822SHugh Dickins /* 126780e14822SHugh Dickins * While we hold page lock, upgrade page from 126880e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 126980e14822SHugh Dickins * stable_tree_insert() will update stable_node. 127080e14822SHugh Dickins */ 127180e14822SHugh Dickins set_page_stable_node(page, NULL); 127280e14822SHugh Dickins mark_page_accessed(page); 1273337ed7ebSMinchan Kim /* 1274337ed7ebSMinchan Kim * Page reclaim just frees a clean page with no dirty 1275337ed7ebSMinchan Kim * ptes: make sure that the ksm page would be swapped. 1276337ed7ebSMinchan Kim */ 1277337ed7ebSMinchan Kim if (!PageDirty(page)) 1278337ed7ebSMinchan Kim SetPageDirty(page); 127980e14822SHugh Dickins err = 0; 128080e14822SHugh Dickins } else if (pages_identical(page, kpage)) 12818dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 128280e14822SHugh Dickins } 128331dbd01fSIzik Eidus 1284f765f540SKirill A. Shutemov out_unlock: 12858dd3557aSHugh Dickins unlock_page(page); 128631dbd01fSIzik Eidus out: 128731dbd01fSIzik Eidus return err; 128831dbd01fSIzik Eidus } 128931dbd01fSIzik Eidus 129031dbd01fSIzik Eidus /* 129181464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 129281464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 12938dd3557aSHugh Dickins * 12948dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 129581464e30SHugh Dickins */ 129621fbd591SQi Zheng static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, 12978dd3557aSHugh Dickins struct page *page, struct page *kpage) 129881464e30SHugh Dickins { 12998dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 130081464e30SHugh Dickins struct vm_area_struct *vma; 130181464e30SHugh Dickins int err = -EFAULT; 130281464e30SHugh Dickins 1303d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 130485c6e8ddSAndrea Arcangeli vma = find_mergeable_vma(mm, rmap_item->address); 130585c6e8ddSAndrea Arcangeli if (!vma) 13069ba69294SHugh Dickins goto out; 13079ba69294SHugh Dickins 13088dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 1309db114b83SHugh Dickins if (err) 1310db114b83SHugh Dickins goto out; 1311db114b83SHugh Dickins 1312bc56620bSHugh Dickins /* Unstable nid is in union with stable anon_vma: remove first */ 1313bc56620bSHugh Dickins remove_rmap_item_from_tree(rmap_item); 1314bc56620bSHugh Dickins 1315c1e8d7c6SMichel Lespinasse /* Must get reference to anon_vma while still holding mmap_lock */ 13169e60109fSPeter Zijlstra rmap_item->anon_vma = vma->anon_vma; 13179e60109fSPeter Zijlstra get_anon_vma(vma->anon_vma); 131881464e30SHugh Dickins out: 1319d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 132081464e30SHugh Dickins return err; 132181464e30SHugh Dickins } 132281464e30SHugh Dickins 132381464e30SHugh Dickins /* 132431dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 132531dbd01fSIzik Eidus * to be merged into one page. 132631dbd01fSIzik Eidus * 13278dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 13288dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 132931dbd01fSIzik Eidus * 133080e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 133131dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 133231dbd01fSIzik Eidus */ 133321fbd591SQi Zheng static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, 13348dd3557aSHugh Dickins struct page *page, 133521fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item, 13368dd3557aSHugh Dickins struct page *tree_page) 133731dbd01fSIzik Eidus { 133880e14822SHugh Dickins int err; 133931dbd01fSIzik Eidus 134080e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 134131dbd01fSIzik Eidus if (!err) { 13428dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 134380e14822SHugh Dickins tree_page, page); 134431dbd01fSIzik Eidus /* 134581464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 134681464e30SHugh Dickins * pointing to it: so break it. 134731dbd01fSIzik Eidus */ 13484035c07aSHugh Dickins if (err) 13498dd3557aSHugh Dickins break_cow(rmap_item); 135031dbd01fSIzik Eidus } 135180e14822SHugh Dickins return err ? NULL : page; 135231dbd01fSIzik Eidus } 135331dbd01fSIzik Eidus 13542c653d0eSAndrea Arcangeli static __always_inline 135521fbd591SQi Zheng bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) 13562c653d0eSAndrea Arcangeli { 13572c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len < 0); 13582c653d0eSAndrea Arcangeli /* 13592c653d0eSAndrea Arcangeli * Check that at least one mapping still exists, otherwise 13602c653d0eSAndrea Arcangeli * there's no much point to merge and share with this 13612c653d0eSAndrea Arcangeli * stable_node, as the underlying tree_page of the other 13622c653d0eSAndrea Arcangeli * sharer is going to be freed soon. 13632c653d0eSAndrea Arcangeli */ 13642c653d0eSAndrea Arcangeli return stable_node->rmap_hlist_len && 13652c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 13662c653d0eSAndrea Arcangeli } 13672c653d0eSAndrea Arcangeli 13682c653d0eSAndrea Arcangeli static __always_inline 136921fbd591SQi Zheng bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) 13702c653d0eSAndrea Arcangeli { 13712c653d0eSAndrea Arcangeli return __is_page_sharing_candidate(stable_node, 0); 13722c653d0eSAndrea Arcangeli } 13732c653d0eSAndrea Arcangeli 137421fbd591SQi Zheng static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, 137521fbd591SQi Zheng struct ksm_stable_node **_stable_node, 13762c653d0eSAndrea Arcangeli struct rb_root *root, 13772c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 13782c653d0eSAndrea Arcangeli { 137921fbd591SQi Zheng struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; 13802c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 13818dc5ffcdSAndrea Arcangeli struct page *_tree_page, *tree_page = NULL; 13822c653d0eSAndrea Arcangeli int nr = 0; 13832c653d0eSAndrea Arcangeli int found_rmap_hlist_len; 13842c653d0eSAndrea Arcangeli 13852c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes || 13862c653d0eSAndrea Arcangeli time_before(jiffies, stable_node->chain_prune_time + 13872c653d0eSAndrea Arcangeli msecs_to_jiffies( 13882c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs))) 13892c653d0eSAndrea Arcangeli prune_stale_stable_nodes = false; 13902c653d0eSAndrea Arcangeli else 13912c653d0eSAndrea Arcangeli stable_node->chain_prune_time = jiffies; 13922c653d0eSAndrea Arcangeli 13932c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 13942c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 13952c653d0eSAndrea Arcangeli cond_resched(); 13962c653d0eSAndrea Arcangeli /* 13972c653d0eSAndrea Arcangeli * We must walk all stable_node_dup to prune the stale 13982c653d0eSAndrea Arcangeli * stable nodes during lookup. 13992c653d0eSAndrea Arcangeli * 14002c653d0eSAndrea Arcangeli * get_ksm_page can drop the nodes from the 14012c653d0eSAndrea Arcangeli * stable_node->hlist if they point to freed pages 14022c653d0eSAndrea Arcangeli * (that's why we do a _safe walk). The "dup" 14032c653d0eSAndrea Arcangeli * stable_node parameter itself will be freed from 14042c653d0eSAndrea Arcangeli * under us if it returns NULL. 14052c653d0eSAndrea Arcangeli */ 14062cee57d1SYang Shi _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); 14072c653d0eSAndrea Arcangeli if (!_tree_page) 14082c653d0eSAndrea Arcangeli continue; 14092c653d0eSAndrea Arcangeli nr += 1; 14102c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(dup)) { 14112c653d0eSAndrea Arcangeli if (!found || 14122c653d0eSAndrea Arcangeli dup->rmap_hlist_len > found_rmap_hlist_len) { 14132c653d0eSAndrea Arcangeli if (found) 14148dc5ffcdSAndrea Arcangeli put_page(tree_page); 14152c653d0eSAndrea Arcangeli found = dup; 14162c653d0eSAndrea Arcangeli found_rmap_hlist_len = found->rmap_hlist_len; 14178dc5ffcdSAndrea Arcangeli tree_page = _tree_page; 14182c653d0eSAndrea Arcangeli 14198dc5ffcdSAndrea Arcangeli /* skip put_page for found dup */ 14202c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes) 14212c653d0eSAndrea Arcangeli break; 14222c653d0eSAndrea Arcangeli continue; 14232c653d0eSAndrea Arcangeli } 14242c653d0eSAndrea Arcangeli } 14252c653d0eSAndrea Arcangeli put_page(_tree_page); 14262c653d0eSAndrea Arcangeli } 14272c653d0eSAndrea Arcangeli 142880b18dfaSAndrea Arcangeli if (found) { 14292c653d0eSAndrea Arcangeli /* 143080b18dfaSAndrea Arcangeli * nr is counting all dups in the chain only if 143180b18dfaSAndrea Arcangeli * prune_stale_stable_nodes is true, otherwise we may 143280b18dfaSAndrea Arcangeli * break the loop at nr == 1 even if there are 143380b18dfaSAndrea Arcangeli * multiple entries. 14342c653d0eSAndrea Arcangeli */ 143580b18dfaSAndrea Arcangeli if (prune_stale_stable_nodes && nr == 1) { 14362c653d0eSAndrea Arcangeli /* 14372c653d0eSAndrea Arcangeli * If there's not just one entry it would 14382c653d0eSAndrea Arcangeli * corrupt memory, better BUG_ON. In KSM 14392c653d0eSAndrea Arcangeli * context with no lock held it's not even 14402c653d0eSAndrea Arcangeli * fatal. 14412c653d0eSAndrea Arcangeli */ 14422c653d0eSAndrea Arcangeli BUG_ON(stable_node->hlist.first->next); 14432c653d0eSAndrea Arcangeli 14442c653d0eSAndrea Arcangeli /* 14452c653d0eSAndrea Arcangeli * There's just one entry and it is below the 14462c653d0eSAndrea Arcangeli * deduplication limit so drop the chain. 14472c653d0eSAndrea Arcangeli */ 14482c653d0eSAndrea Arcangeli rb_replace_node(&stable_node->node, &found->node, 14492c653d0eSAndrea Arcangeli root); 14502c653d0eSAndrea Arcangeli free_stable_node(stable_node); 14512c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 14522c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 1453b4fecc67SAndrea Arcangeli /* 14540ba1d0f7SAndrea Arcangeli * NOTE: the caller depends on the stable_node 14550ba1d0f7SAndrea Arcangeli * to be equal to stable_node_dup if the chain 14560ba1d0f7SAndrea Arcangeli * was collapsed. 1457b4fecc67SAndrea Arcangeli */ 14580ba1d0f7SAndrea Arcangeli *_stable_node = found; 14590ba1d0f7SAndrea Arcangeli /* 1460f0953a1bSIngo Molnar * Just for robustness, as stable_node is 14610ba1d0f7SAndrea Arcangeli * otherwise left as a stable pointer, the 14620ba1d0f7SAndrea Arcangeli * compiler shall optimize it away at build 14630ba1d0f7SAndrea Arcangeli * time. 14640ba1d0f7SAndrea Arcangeli */ 14650ba1d0f7SAndrea Arcangeli stable_node = NULL; 146680b18dfaSAndrea Arcangeli } else if (stable_node->hlist.first != &found->hlist_dup && 146780b18dfaSAndrea Arcangeli __is_page_sharing_candidate(found, 1)) { 14682c653d0eSAndrea Arcangeli /* 146980b18dfaSAndrea Arcangeli * If the found stable_node dup can accept one 147080b18dfaSAndrea Arcangeli * more future merge (in addition to the one 147180b18dfaSAndrea Arcangeli * that is underway) and is not at the head of 147280b18dfaSAndrea Arcangeli * the chain, put it there so next search will 147380b18dfaSAndrea Arcangeli * be quicker in the !prune_stale_stable_nodes 147480b18dfaSAndrea Arcangeli * case. 147580b18dfaSAndrea Arcangeli * 147680b18dfaSAndrea Arcangeli * NOTE: it would be inaccurate to use nr > 1 147780b18dfaSAndrea Arcangeli * instead of checking the hlist.first pointer 147880b18dfaSAndrea Arcangeli * directly, because in the 147980b18dfaSAndrea Arcangeli * prune_stale_stable_nodes case "nr" isn't 148080b18dfaSAndrea Arcangeli * the position of the found dup in the chain, 148180b18dfaSAndrea Arcangeli * but the total number of dups in the chain. 14822c653d0eSAndrea Arcangeli */ 14832c653d0eSAndrea Arcangeli hlist_del(&found->hlist_dup); 14842c653d0eSAndrea Arcangeli hlist_add_head(&found->hlist_dup, 14852c653d0eSAndrea Arcangeli &stable_node->hlist); 14862c653d0eSAndrea Arcangeli } 14872c653d0eSAndrea Arcangeli } 14882c653d0eSAndrea Arcangeli 14898dc5ffcdSAndrea Arcangeli *_stable_node_dup = found; 14908dc5ffcdSAndrea Arcangeli return tree_page; 14912c653d0eSAndrea Arcangeli } 14922c653d0eSAndrea Arcangeli 149321fbd591SQi Zheng static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, 14942c653d0eSAndrea Arcangeli struct rb_root *root) 14952c653d0eSAndrea Arcangeli { 14962c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) 14972c653d0eSAndrea Arcangeli return stable_node; 14982c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 14992c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 15002c653d0eSAndrea Arcangeli return NULL; 15012c653d0eSAndrea Arcangeli } 15022c653d0eSAndrea Arcangeli return hlist_entry(stable_node->hlist.first, 15032c653d0eSAndrea Arcangeli typeof(*stable_node), hlist_dup); 15042c653d0eSAndrea Arcangeli } 15052c653d0eSAndrea Arcangeli 15068dc5ffcdSAndrea Arcangeli /* 15078dc5ffcdSAndrea Arcangeli * Like for get_ksm_page, this function can free the *_stable_node and 15088dc5ffcdSAndrea Arcangeli * *_stable_node_dup if the returned tree_page is NULL. 15098dc5ffcdSAndrea Arcangeli * 15108dc5ffcdSAndrea Arcangeli * It can also free and overwrite *_stable_node with the found 15118dc5ffcdSAndrea Arcangeli * stable_node_dup if the chain is collapsed (in which case 15128dc5ffcdSAndrea Arcangeli * *_stable_node will be equal to *_stable_node_dup like if the chain 15138dc5ffcdSAndrea Arcangeli * never existed). It's up to the caller to verify tree_page is not 15148dc5ffcdSAndrea Arcangeli * NULL before dereferencing *_stable_node or *_stable_node_dup. 15158dc5ffcdSAndrea Arcangeli * 15168dc5ffcdSAndrea Arcangeli * *_stable_node_dup is really a second output parameter of this 15178dc5ffcdSAndrea Arcangeli * function and will be overwritten in all cases, the caller doesn't 15188dc5ffcdSAndrea Arcangeli * need to initialize it. 15198dc5ffcdSAndrea Arcangeli */ 152021fbd591SQi Zheng static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, 152121fbd591SQi Zheng struct ksm_stable_node **_stable_node, 15222c653d0eSAndrea Arcangeli struct rb_root *root, 15232c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 15242c653d0eSAndrea Arcangeli { 152521fbd591SQi Zheng struct ksm_stable_node *stable_node = *_stable_node; 15262c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 15272c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(stable_node)) { 15288dc5ffcdSAndrea Arcangeli *_stable_node_dup = stable_node; 15292cee57d1SYang Shi return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); 15302c653d0eSAndrea Arcangeli } 15318dc5ffcdSAndrea Arcangeli /* 15328dc5ffcdSAndrea Arcangeli * _stable_node_dup set to NULL means the stable_node 15338dc5ffcdSAndrea Arcangeli * reached the ksm_max_page_sharing limit. 15348dc5ffcdSAndrea Arcangeli */ 15358dc5ffcdSAndrea Arcangeli *_stable_node_dup = NULL; 15362c653d0eSAndrea Arcangeli return NULL; 15372c653d0eSAndrea Arcangeli } 15388dc5ffcdSAndrea Arcangeli return stable_node_dup(_stable_node_dup, _stable_node, root, 15392c653d0eSAndrea Arcangeli prune_stale_stable_nodes); 15402c653d0eSAndrea Arcangeli } 15412c653d0eSAndrea Arcangeli 154221fbd591SQi Zheng static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, 154321fbd591SQi Zheng struct ksm_stable_node **s_n, 15442c653d0eSAndrea Arcangeli struct rb_root *root) 15452c653d0eSAndrea Arcangeli { 15468dc5ffcdSAndrea Arcangeli return __stable_node_chain(s_n_d, s_n, root, true); 15472c653d0eSAndrea Arcangeli } 15482c653d0eSAndrea Arcangeli 154921fbd591SQi Zheng static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, 155021fbd591SQi Zheng struct ksm_stable_node *s_n, 15512c653d0eSAndrea Arcangeli struct rb_root *root) 15522c653d0eSAndrea Arcangeli { 155321fbd591SQi Zheng struct ksm_stable_node *old_stable_node = s_n; 15548dc5ffcdSAndrea Arcangeli struct page *tree_page; 15558dc5ffcdSAndrea Arcangeli 15568dc5ffcdSAndrea Arcangeli tree_page = __stable_node_chain(s_n_d, &s_n, root, false); 15578dc5ffcdSAndrea Arcangeli /* not pruning dups so s_n cannot have changed */ 15588dc5ffcdSAndrea Arcangeli VM_BUG_ON(s_n != old_stable_node); 15598dc5ffcdSAndrea Arcangeli return tree_page; 15602c653d0eSAndrea Arcangeli } 15612c653d0eSAndrea Arcangeli 156231dbd01fSIzik Eidus /* 15638dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 156431dbd01fSIzik Eidus * 156531dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 156631dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 156731dbd01fSIzik Eidus * 15687b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 156931dbd01fSIzik Eidus * NULL otherwise. 157031dbd01fSIzik Eidus */ 157162b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 157231dbd01fSIzik Eidus { 157390bd6fd3SPetr Holasek int nid; 1574ef53d16cSHugh Dickins struct rb_root *root; 15754146d2d6SHugh Dickins struct rb_node **new; 15764146d2d6SHugh Dickins struct rb_node *parent; 157721fbd591SQi Zheng struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 157821fbd591SQi Zheng struct ksm_stable_node *page_node; 157931dbd01fSIzik Eidus 15804146d2d6SHugh Dickins page_node = page_stable_node(page); 15814146d2d6SHugh Dickins if (page_node && page_node->head != &migrate_nodes) { 15824146d2d6SHugh Dickins /* ksm page forked */ 158308beca44SHugh Dickins get_page(page); 158462b61f61SHugh Dickins return page; 158508beca44SHugh Dickins } 158608beca44SHugh Dickins 158790bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1588ef53d16cSHugh Dickins root = root_stable_tree + nid; 15894146d2d6SHugh Dickins again: 1590ef53d16cSHugh Dickins new = &root->rb_node; 15914146d2d6SHugh Dickins parent = NULL; 159290bd6fd3SPetr Holasek 15934146d2d6SHugh Dickins while (*new) { 15944035c07aSHugh Dickins struct page *tree_page; 159531dbd01fSIzik Eidus int ret; 159631dbd01fSIzik Eidus 159731dbd01fSIzik Eidus cond_resched(); 159821fbd591SQi Zheng stable_node = rb_entry(*new, struct ksm_stable_node, node); 15992c653d0eSAndrea Arcangeli stable_node_any = NULL; 16008dc5ffcdSAndrea Arcangeli tree_page = chain_prune(&stable_node_dup, &stable_node, root); 1601b4fecc67SAndrea Arcangeli /* 1602b4fecc67SAndrea Arcangeli * NOTE: stable_node may have been freed by 1603b4fecc67SAndrea Arcangeli * chain_prune() if the returned stable_node_dup is 1604b4fecc67SAndrea Arcangeli * not NULL. stable_node_dup may have been inserted in 1605b4fecc67SAndrea Arcangeli * the rbtree instead as a regular stable_node (in 1606b4fecc67SAndrea Arcangeli * order to collapse the stable_node chain if a single 16070ba1d0f7SAndrea Arcangeli * stable_node dup was found in it). In such case the 16083413b2c8SJulia Lawall * stable_node is overwritten by the callee to point 16090ba1d0f7SAndrea Arcangeli * to the stable_node_dup that was collapsed in the 16100ba1d0f7SAndrea Arcangeli * stable rbtree and stable_node will be equal to 16110ba1d0f7SAndrea Arcangeli * stable_node_dup like if the chain never existed. 1612b4fecc67SAndrea Arcangeli */ 16132c653d0eSAndrea Arcangeli if (!stable_node_dup) { 16142c653d0eSAndrea Arcangeli /* 16152c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 16162c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 16172c653d0eSAndrea Arcangeli * empty and should be rb_erased. 16182c653d0eSAndrea Arcangeli */ 16192c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 16202c653d0eSAndrea Arcangeli root); 16212c653d0eSAndrea Arcangeli if (!stable_node_any) { 16222c653d0eSAndrea Arcangeli /* rb_erase just run */ 16232c653d0eSAndrea Arcangeli goto again; 16242c653d0eSAndrea Arcangeli } 16252c653d0eSAndrea Arcangeli /* 16262c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 16272c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 16282c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 16292c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 16302c653d0eSAndrea Arcangeli * have the same content and they're 1631457aef94SEthon Paul * write protected at all times. Any will work 16322c653d0eSAndrea Arcangeli * fine to continue the walk. 16332c653d0eSAndrea Arcangeli */ 16342cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 16352cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 16362c653d0eSAndrea Arcangeli } 16372c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1638f2e5ff85SAndrea Arcangeli if (!tree_page) { 1639f2e5ff85SAndrea Arcangeli /* 1640f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1641f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1642f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1643f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1644f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1645f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1646f2e5ff85SAndrea Arcangeli * stable_node was stale. 1647f2e5ff85SAndrea Arcangeli */ 1648f2e5ff85SAndrea Arcangeli goto again; 1649f2e5ff85SAndrea Arcangeli } 165031dbd01fSIzik Eidus 16514035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 1652c8d6553bSHugh Dickins put_page(tree_page); 165331dbd01fSIzik Eidus 16544146d2d6SHugh Dickins parent = *new; 1655c8d6553bSHugh Dickins if (ret < 0) 16564146d2d6SHugh Dickins new = &parent->rb_left; 1657c8d6553bSHugh Dickins else if (ret > 0) 16584146d2d6SHugh Dickins new = &parent->rb_right; 1659c8d6553bSHugh Dickins else { 16602c653d0eSAndrea Arcangeli if (page_node) { 16612c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 16622c653d0eSAndrea Arcangeli /* 16632c653d0eSAndrea Arcangeli * Test if the migrated page should be merged 16642c653d0eSAndrea Arcangeli * into a stable node dup. If the mapcount is 16652c653d0eSAndrea Arcangeli * 1 we can migrate it with another KSM page 16662c653d0eSAndrea Arcangeli * without adding it to the chain. 16672c653d0eSAndrea Arcangeli */ 16682c653d0eSAndrea Arcangeli if (page_mapcount(page) > 1) 16692c653d0eSAndrea Arcangeli goto chain_append; 16702c653d0eSAndrea Arcangeli } 16712c653d0eSAndrea Arcangeli 16722c653d0eSAndrea Arcangeli if (!stable_node_dup) { 16732c653d0eSAndrea Arcangeli /* 16742c653d0eSAndrea Arcangeli * If the stable_node is a chain and 16752c653d0eSAndrea Arcangeli * we got a payload match in memcmp 16762c653d0eSAndrea Arcangeli * but we cannot merge the scanned 16772c653d0eSAndrea Arcangeli * page in any of the existing 16782c653d0eSAndrea Arcangeli * stable_node dups because they're 16792c653d0eSAndrea Arcangeli * all full, we need to wait the 16802c653d0eSAndrea Arcangeli * scanned page to find itself a match 16812c653d0eSAndrea Arcangeli * in the unstable tree to create a 16822c653d0eSAndrea Arcangeli * brand new KSM page to add later to 16832c653d0eSAndrea Arcangeli * the dups of this stable_node. 16842c653d0eSAndrea Arcangeli */ 16852c653d0eSAndrea Arcangeli return NULL; 16862c653d0eSAndrea Arcangeli } 16872c653d0eSAndrea Arcangeli 1688c8d6553bSHugh Dickins /* 1689c8d6553bSHugh Dickins * Lock and unlock the stable_node's page (which 1690c8d6553bSHugh Dickins * might already have been migrated) so that page 1691c8d6553bSHugh Dickins * migration is sure to notice its raised count. 1692c8d6553bSHugh Dickins * It would be more elegant to return stable_node 1693c8d6553bSHugh Dickins * than kpage, but that involves more changes. 1694c8d6553bSHugh Dickins */ 16952cee57d1SYang Shi tree_page = get_ksm_page(stable_node_dup, 16962cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK); 16972cee57d1SYang Shi 16982cee57d1SYang Shi if (PTR_ERR(tree_page) == -EBUSY) 16992cee57d1SYang Shi return ERR_PTR(-EBUSY); 17002cee57d1SYang Shi 17012c653d0eSAndrea Arcangeli if (unlikely(!tree_page)) 17022c653d0eSAndrea Arcangeli /* 17032c653d0eSAndrea Arcangeli * The tree may have been rebalanced, 17042c653d0eSAndrea Arcangeli * so re-evaluate parent and new. 17052c653d0eSAndrea Arcangeli */ 17062c653d0eSAndrea Arcangeli goto again; 1707c8d6553bSHugh Dickins unlock_page(tree_page); 17082c653d0eSAndrea Arcangeli 17092c653d0eSAndrea Arcangeli if (get_kpfn_nid(stable_node_dup->kpfn) != 17102c653d0eSAndrea Arcangeli NUMA(stable_node_dup->nid)) { 17114146d2d6SHugh Dickins put_page(tree_page); 17124146d2d6SHugh Dickins goto replace; 17134146d2d6SHugh Dickins } 171462b61f61SHugh Dickins return tree_page; 171531dbd01fSIzik Eidus } 1716c8d6553bSHugh Dickins } 171731dbd01fSIzik Eidus 17184146d2d6SHugh Dickins if (!page_node) 171931dbd01fSIzik Eidus return NULL; 17204146d2d6SHugh Dickins 17214146d2d6SHugh Dickins list_del(&page_node->list); 17224146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 17234146d2d6SHugh Dickins rb_link_node(&page_node->node, parent, new); 1724ef53d16cSHugh Dickins rb_insert_color(&page_node->node, root); 17252c653d0eSAndrea Arcangeli out: 17262c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) { 17274146d2d6SHugh Dickins get_page(page); 17284146d2d6SHugh Dickins return page; 17292c653d0eSAndrea Arcangeli } else 17302c653d0eSAndrea Arcangeli return NULL; 17314146d2d6SHugh Dickins 17324146d2d6SHugh Dickins replace: 1733b4fecc67SAndrea Arcangeli /* 1734b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17350ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17360ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17370ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17380ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17390ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1740b4fecc67SAndrea Arcangeli */ 17410ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1742b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1743b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17442c653d0eSAndrea Arcangeli /* there is no chain */ 17454146d2d6SHugh Dickins if (page_node) { 17462c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17474146d2d6SHugh Dickins list_del(&page_node->list); 17484146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 1749b4fecc67SAndrea Arcangeli rb_replace_node(&stable_node_dup->node, 1750b4fecc67SAndrea Arcangeli &page_node->node, 17512c653d0eSAndrea Arcangeli root); 17522c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17534146d2d6SHugh Dickins get_page(page); 17542c653d0eSAndrea Arcangeli else 17552c653d0eSAndrea Arcangeli page = NULL; 17564146d2d6SHugh Dickins } else { 1757b4fecc67SAndrea Arcangeli rb_erase(&stable_node_dup->node, root); 17584146d2d6SHugh Dickins page = NULL; 17594146d2d6SHugh Dickins } 17602c653d0eSAndrea Arcangeli } else { 17612c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(stable_node)); 17622c653d0eSAndrea Arcangeli __stable_node_dup_del(stable_node_dup); 17632c653d0eSAndrea Arcangeli if (page_node) { 17642c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17652c653d0eSAndrea Arcangeli list_del(&page_node->list); 17662c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 17672c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 17682c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17692c653d0eSAndrea Arcangeli get_page(page); 17702c653d0eSAndrea Arcangeli else 17712c653d0eSAndrea Arcangeli page = NULL; 17722c653d0eSAndrea Arcangeli } else { 17732c653d0eSAndrea Arcangeli page = NULL; 17742c653d0eSAndrea Arcangeli } 17752c653d0eSAndrea Arcangeli } 17762c653d0eSAndrea Arcangeli stable_node_dup->head = &migrate_nodes; 17772c653d0eSAndrea Arcangeli list_add(&stable_node_dup->list, stable_node_dup->head); 17784146d2d6SHugh Dickins return page; 17792c653d0eSAndrea Arcangeli 17802c653d0eSAndrea Arcangeli chain_append: 17812c653d0eSAndrea Arcangeli /* stable_node_dup could be null if it reached the limit */ 17822c653d0eSAndrea Arcangeli if (!stable_node_dup) 17832c653d0eSAndrea Arcangeli stable_node_dup = stable_node_any; 1784b4fecc67SAndrea Arcangeli /* 1785b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17860ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17870ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17880ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17890ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17900ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1791b4fecc67SAndrea Arcangeli */ 17920ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1793b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17942c653d0eSAndrea Arcangeli /* chain is missing so create it */ 17952c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(stable_node_dup, 17962c653d0eSAndrea Arcangeli root); 17972c653d0eSAndrea Arcangeli if (!stable_node) 17982c653d0eSAndrea Arcangeli return NULL; 17992c653d0eSAndrea Arcangeli } 18002c653d0eSAndrea Arcangeli /* 18012c653d0eSAndrea Arcangeli * Add this stable_node dup that was 18022c653d0eSAndrea Arcangeli * migrated to the stable_node chain 18032c653d0eSAndrea Arcangeli * of the current nid for this page 18042c653d0eSAndrea Arcangeli * content. 18052c653d0eSAndrea Arcangeli */ 1806b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 18072c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 18082c653d0eSAndrea Arcangeli list_del(&page_node->list); 18092c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 18102c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 18112c653d0eSAndrea Arcangeli goto out; 181231dbd01fSIzik Eidus } 181331dbd01fSIzik Eidus 181431dbd01fSIzik Eidus /* 1815e850dcf5SHugh Dickins * stable_tree_insert - insert stable tree node pointing to new ksm page 181631dbd01fSIzik Eidus * into the stable tree. 181731dbd01fSIzik Eidus * 18187b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 18197b6ba2c7SHugh Dickins * NULL otherwise. 182031dbd01fSIzik Eidus */ 182121fbd591SQi Zheng static struct ksm_stable_node *stable_tree_insert(struct page *kpage) 182231dbd01fSIzik Eidus { 182390bd6fd3SPetr Holasek int nid; 182490bd6fd3SPetr Holasek unsigned long kpfn; 1825ef53d16cSHugh Dickins struct rb_root *root; 182690bd6fd3SPetr Holasek struct rb_node **new; 1827f2e5ff85SAndrea Arcangeli struct rb_node *parent; 182821fbd591SQi Zheng struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 18292c653d0eSAndrea Arcangeli bool need_chain = false; 183031dbd01fSIzik Eidus 183190bd6fd3SPetr Holasek kpfn = page_to_pfn(kpage); 183290bd6fd3SPetr Holasek nid = get_kpfn_nid(kpfn); 1833ef53d16cSHugh Dickins root = root_stable_tree + nid; 1834f2e5ff85SAndrea Arcangeli again: 1835f2e5ff85SAndrea Arcangeli parent = NULL; 1836ef53d16cSHugh Dickins new = &root->rb_node; 183790bd6fd3SPetr Holasek 183831dbd01fSIzik Eidus while (*new) { 18394035c07aSHugh Dickins struct page *tree_page; 184031dbd01fSIzik Eidus int ret; 184131dbd01fSIzik Eidus 184231dbd01fSIzik Eidus cond_resched(); 184321fbd591SQi Zheng stable_node = rb_entry(*new, struct ksm_stable_node, node); 18442c653d0eSAndrea Arcangeli stable_node_any = NULL; 18458dc5ffcdSAndrea Arcangeli tree_page = chain(&stable_node_dup, stable_node, root); 18462c653d0eSAndrea Arcangeli if (!stable_node_dup) { 18472c653d0eSAndrea Arcangeli /* 18482c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 18492c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 18502c653d0eSAndrea Arcangeli * empty and should be rb_erased. 18512c653d0eSAndrea Arcangeli */ 18522c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 18532c653d0eSAndrea Arcangeli root); 18542c653d0eSAndrea Arcangeli if (!stable_node_any) { 18552c653d0eSAndrea Arcangeli /* rb_erase just run */ 18562c653d0eSAndrea Arcangeli goto again; 18572c653d0eSAndrea Arcangeli } 18582c653d0eSAndrea Arcangeli /* 18592c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 18602c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 18612c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 18622c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 18632c653d0eSAndrea Arcangeli * have the same content and they're 1864457aef94SEthon Paul * write protected at all times. Any will work 18652c653d0eSAndrea Arcangeli * fine to continue the walk. 18662c653d0eSAndrea Arcangeli */ 18672cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 18682cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 18692c653d0eSAndrea Arcangeli } 18702c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1871f2e5ff85SAndrea Arcangeli if (!tree_page) { 1872f2e5ff85SAndrea Arcangeli /* 1873f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1874f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1875f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1876f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1877f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1878f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1879f2e5ff85SAndrea Arcangeli * stable_node was stale. 1880f2e5ff85SAndrea Arcangeli */ 1881f2e5ff85SAndrea Arcangeli goto again; 1882f2e5ff85SAndrea Arcangeli } 188331dbd01fSIzik Eidus 18844035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 18854035c07aSHugh Dickins put_page(tree_page); 188631dbd01fSIzik Eidus 188731dbd01fSIzik Eidus parent = *new; 188831dbd01fSIzik Eidus if (ret < 0) 188931dbd01fSIzik Eidus new = &parent->rb_left; 189031dbd01fSIzik Eidus else if (ret > 0) 189131dbd01fSIzik Eidus new = &parent->rb_right; 189231dbd01fSIzik Eidus else { 18932c653d0eSAndrea Arcangeli need_chain = true; 18942c653d0eSAndrea Arcangeli break; 189531dbd01fSIzik Eidus } 189631dbd01fSIzik Eidus } 189731dbd01fSIzik Eidus 18982c653d0eSAndrea Arcangeli stable_node_dup = alloc_stable_node(); 18992c653d0eSAndrea Arcangeli if (!stable_node_dup) 19007b6ba2c7SHugh Dickins return NULL; 190131dbd01fSIzik Eidus 19022c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&stable_node_dup->hlist); 19032c653d0eSAndrea Arcangeli stable_node_dup->kpfn = kpfn; 19042c653d0eSAndrea Arcangeli set_page_stable_node(kpage, stable_node_dup); 19052c653d0eSAndrea Arcangeli stable_node_dup->rmap_hlist_len = 0; 19062c653d0eSAndrea Arcangeli DO_NUMA(stable_node_dup->nid = nid); 19072c653d0eSAndrea Arcangeli if (!need_chain) { 19082c653d0eSAndrea Arcangeli rb_link_node(&stable_node_dup->node, parent, new); 19092c653d0eSAndrea Arcangeli rb_insert_color(&stable_node_dup->node, root); 19102c653d0eSAndrea Arcangeli } else { 19112c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 191221fbd591SQi Zheng struct ksm_stable_node *orig = stable_node; 19132c653d0eSAndrea Arcangeli /* chain is missing so create it */ 19142c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(orig, root); 19152c653d0eSAndrea Arcangeli if (!stable_node) { 19162c653d0eSAndrea Arcangeli free_stable_node(stable_node_dup); 19172c653d0eSAndrea Arcangeli return NULL; 19182c653d0eSAndrea Arcangeli } 19192c653d0eSAndrea Arcangeli } 19202c653d0eSAndrea Arcangeli stable_node_chain_add_dup(stable_node_dup, stable_node); 19212c653d0eSAndrea Arcangeli } 192208beca44SHugh Dickins 19232c653d0eSAndrea Arcangeli return stable_node_dup; 192431dbd01fSIzik Eidus } 192531dbd01fSIzik Eidus 192631dbd01fSIzik Eidus /* 19278dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 19288dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 192931dbd01fSIzik Eidus * 193031dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 193131dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 193231dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 193331dbd01fSIzik Eidus * 193431dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 193531dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 193631dbd01fSIzik Eidus * 193731dbd01fSIzik Eidus * This function does both searching and inserting, because they share 193831dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 193931dbd01fSIzik Eidus */ 19408dd3557aSHugh Dickins static 194121fbd591SQi Zheng struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, 19428dd3557aSHugh Dickins struct page *page, 19438dd3557aSHugh Dickins struct page **tree_pagep) 194431dbd01fSIzik Eidus { 194590bd6fd3SPetr Holasek struct rb_node **new; 194690bd6fd3SPetr Holasek struct rb_root *root; 194731dbd01fSIzik Eidus struct rb_node *parent = NULL; 194890bd6fd3SPetr Holasek int nid; 194990bd6fd3SPetr Holasek 195090bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1951ef53d16cSHugh Dickins root = root_unstable_tree + nid; 195290bd6fd3SPetr Holasek new = &root->rb_node; 195331dbd01fSIzik Eidus 195431dbd01fSIzik Eidus while (*new) { 195521fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item; 19568dd3557aSHugh Dickins struct page *tree_page; 195731dbd01fSIzik Eidus int ret; 195831dbd01fSIzik Eidus 1959d178f27fSHugh Dickins cond_resched(); 196021fbd591SQi Zheng tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); 19618dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 1962c8f95ed1SAndrea Arcangeli if (!tree_page) 196331dbd01fSIzik Eidus return NULL; 196431dbd01fSIzik Eidus 196531dbd01fSIzik Eidus /* 19668dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 196731dbd01fSIzik Eidus */ 19688dd3557aSHugh Dickins if (page == tree_page) { 19698dd3557aSHugh Dickins put_page(tree_page); 197031dbd01fSIzik Eidus return NULL; 197131dbd01fSIzik Eidus } 197231dbd01fSIzik Eidus 19738dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 197431dbd01fSIzik Eidus 197531dbd01fSIzik Eidus parent = *new; 197631dbd01fSIzik Eidus if (ret < 0) { 19778dd3557aSHugh Dickins put_page(tree_page); 197831dbd01fSIzik Eidus new = &parent->rb_left; 197931dbd01fSIzik Eidus } else if (ret > 0) { 19808dd3557aSHugh Dickins put_page(tree_page); 198131dbd01fSIzik Eidus new = &parent->rb_right; 1982b599cbdfSHugh Dickins } else if (!ksm_merge_across_nodes && 1983b599cbdfSHugh Dickins page_to_nid(tree_page) != nid) { 1984b599cbdfSHugh Dickins /* 1985b599cbdfSHugh Dickins * If tree_page has been migrated to another NUMA node, 1986b599cbdfSHugh Dickins * it will be flushed out and put in the right unstable 1987b599cbdfSHugh Dickins * tree next time: only merge with it when across_nodes. 1988b599cbdfSHugh Dickins */ 1989b599cbdfSHugh Dickins put_page(tree_page); 1990b599cbdfSHugh Dickins return NULL; 199131dbd01fSIzik Eidus } else { 19928dd3557aSHugh Dickins *tree_pagep = tree_page; 199331dbd01fSIzik Eidus return tree_rmap_item; 199431dbd01fSIzik Eidus } 199531dbd01fSIzik Eidus } 199631dbd01fSIzik Eidus 19977b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 199831dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1999e850dcf5SHugh Dickins DO_NUMA(rmap_item->nid = nid); 200031dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 200190bd6fd3SPetr Holasek rb_insert_color(&rmap_item->node, root); 200231dbd01fSIzik Eidus 2003473b0ce4SHugh Dickins ksm_pages_unshared++; 200431dbd01fSIzik Eidus return NULL; 200531dbd01fSIzik Eidus } 200631dbd01fSIzik Eidus 200731dbd01fSIzik Eidus /* 200831dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 200931dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 201031dbd01fSIzik Eidus * the same ksm page. 201131dbd01fSIzik Eidus */ 201221fbd591SQi Zheng static void stable_tree_append(struct ksm_rmap_item *rmap_item, 201321fbd591SQi Zheng struct ksm_stable_node *stable_node, 20142c653d0eSAndrea Arcangeli bool max_page_sharing_bypass) 201531dbd01fSIzik Eidus { 20162c653d0eSAndrea Arcangeli /* 20172c653d0eSAndrea Arcangeli * rmap won't find this mapping if we don't insert the 20182c653d0eSAndrea Arcangeli * rmap_item in the right stable_node 20192c653d0eSAndrea Arcangeli * duplicate. page_migration could break later if rmap breaks, 20202c653d0eSAndrea Arcangeli * so we can as well crash here. We really need to check for 20212c653d0eSAndrea Arcangeli * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 2022457aef94SEthon Paul * for other negative values as an underflow if detected here 20232c653d0eSAndrea Arcangeli * for the first time (and not when decreasing rmap_hlist_len) 20242c653d0eSAndrea Arcangeli * would be sign of memory corruption in the stable_node. 20252c653d0eSAndrea Arcangeli */ 20262c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 20272c653d0eSAndrea Arcangeli 20282c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len++; 20292c653d0eSAndrea Arcangeli if (!max_page_sharing_bypass) 20302c653d0eSAndrea Arcangeli /* possibly non fatal but unexpected overflow, only warn */ 20312c653d0eSAndrea Arcangeli WARN_ON_ONCE(stable_node->rmap_hlist_len > 20322c653d0eSAndrea Arcangeli ksm_max_page_sharing); 20332c653d0eSAndrea Arcangeli 20347b6ba2c7SHugh Dickins rmap_item->head = stable_node; 203531dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 20367b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 2037e178dfdeSHugh Dickins 20387b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 2039e178dfdeSHugh Dickins ksm_pages_sharing++; 20407b6ba2c7SHugh Dickins else 20417b6ba2c7SHugh Dickins ksm_pages_shared++; 204276093853Sxu xin 204376093853Sxu xin rmap_item->mm->ksm_merging_pages++; 204431dbd01fSIzik Eidus } 204531dbd01fSIzik Eidus 204631dbd01fSIzik Eidus /* 204781464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 204881464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 204981464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 205081464e30SHugh Dickins * both transferred to the stable tree. 205131dbd01fSIzik Eidus * 205231dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 205331dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 205431dbd01fSIzik Eidus */ 205521fbd591SQi Zheng static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) 205631dbd01fSIzik Eidus { 20574b22927fSKirill Tkhai struct mm_struct *mm = rmap_item->mm; 205821fbd591SQi Zheng struct ksm_rmap_item *tree_rmap_item; 20598dd3557aSHugh Dickins struct page *tree_page = NULL; 206021fbd591SQi Zheng struct ksm_stable_node *stable_node; 20618dd3557aSHugh Dickins struct page *kpage; 206231dbd01fSIzik Eidus unsigned int checksum; 206331dbd01fSIzik Eidus int err; 20642c653d0eSAndrea Arcangeli bool max_page_sharing_bypass = false; 206531dbd01fSIzik Eidus 20664146d2d6SHugh Dickins stable_node = page_stable_node(page); 20674146d2d6SHugh Dickins if (stable_node) { 20684146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20692c653d0eSAndrea Arcangeli get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 20702c653d0eSAndrea Arcangeli NUMA(stable_node->nid)) { 20712c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 20724146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 20734146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 20744146d2d6SHugh Dickins } 20754146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20764146d2d6SHugh Dickins rmap_item->head == stable_node) 20774146d2d6SHugh Dickins return; 20782c653d0eSAndrea Arcangeli /* 20792c653d0eSAndrea Arcangeli * If it's a KSM fork, allow it to go over the sharing limit 20802c653d0eSAndrea Arcangeli * without warnings. 20812c653d0eSAndrea Arcangeli */ 20822c653d0eSAndrea Arcangeli if (!is_page_sharing_candidate(stable_node)) 20832c653d0eSAndrea Arcangeli max_page_sharing_bypass = true; 20844146d2d6SHugh Dickins } 208531dbd01fSIzik Eidus 208631dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 208762b61f61SHugh Dickins kpage = stable_tree_search(page); 20884146d2d6SHugh Dickins if (kpage == page && rmap_item->head == stable_node) { 20894146d2d6SHugh Dickins put_page(kpage); 20904146d2d6SHugh Dickins return; 20914146d2d6SHugh Dickins } 20924146d2d6SHugh Dickins 20934146d2d6SHugh Dickins remove_rmap_item_from_tree(rmap_item); 20944146d2d6SHugh Dickins 209562b61f61SHugh Dickins if (kpage) { 20962cee57d1SYang Shi if (PTR_ERR(kpage) == -EBUSY) 20972cee57d1SYang Shi return; 20982cee57d1SYang Shi 209908beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 210031dbd01fSIzik Eidus if (!err) { 210131dbd01fSIzik Eidus /* 210231dbd01fSIzik Eidus * The page was successfully merged: 210331dbd01fSIzik Eidus * add its rmap_item to the stable tree. 210431dbd01fSIzik Eidus */ 21055ad64688SHugh Dickins lock_page(kpage); 21062c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, page_stable_node(kpage), 21072c653d0eSAndrea Arcangeli max_page_sharing_bypass); 21085ad64688SHugh Dickins unlock_page(kpage); 210931dbd01fSIzik Eidus } 21108dd3557aSHugh Dickins put_page(kpage); 211131dbd01fSIzik Eidus return; 211231dbd01fSIzik Eidus } 211331dbd01fSIzik Eidus 211431dbd01fSIzik Eidus /* 21154035c07aSHugh Dickins * If the hash value of the page has changed from the last time 21164035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 21174035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 21184035c07aSHugh Dickins * to waste our time searching for something identical to it there. 211931dbd01fSIzik Eidus */ 212031dbd01fSIzik Eidus checksum = calc_checksum(page); 212131dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 212231dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 212331dbd01fSIzik Eidus return; 212431dbd01fSIzik Eidus } 212531dbd01fSIzik Eidus 2126e86c59b1SClaudio Imbrenda /* 2127e86c59b1SClaudio Imbrenda * Same checksum as an empty page. We attempt to merge it with the 2128e86c59b1SClaudio Imbrenda * appropriate zero page if the user enabled this via sysfs. 2129e86c59b1SClaudio Imbrenda */ 2130e86c59b1SClaudio Imbrenda if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2131e86c59b1SClaudio Imbrenda struct vm_area_struct *vma; 2132e86c59b1SClaudio Imbrenda 2133d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 21344b22927fSKirill Tkhai vma = find_mergeable_vma(mm, rmap_item->address); 213556df70a6SMuchun Song if (vma) { 2136e86c59b1SClaudio Imbrenda err = try_to_merge_one_page(vma, page, 2137e86c59b1SClaudio Imbrenda ZERO_PAGE(rmap_item->address)); 213856df70a6SMuchun Song } else { 213956df70a6SMuchun Song /* 214056df70a6SMuchun Song * If the vma is out of date, we do not need to 214156df70a6SMuchun Song * continue. 214256df70a6SMuchun Song */ 214356df70a6SMuchun Song err = 0; 214456df70a6SMuchun Song } 2145d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2146e86c59b1SClaudio Imbrenda /* 2147e86c59b1SClaudio Imbrenda * In case of failure, the page was not really empty, so we 2148e86c59b1SClaudio Imbrenda * need to continue. Otherwise we're done. 2149e86c59b1SClaudio Imbrenda */ 2150e86c59b1SClaudio Imbrenda if (!err) 2151e86c59b1SClaudio Imbrenda return; 2152e86c59b1SClaudio Imbrenda } 21538dd3557aSHugh Dickins tree_rmap_item = 21548dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 215531dbd01fSIzik Eidus if (tree_rmap_item) { 215677da2ba0SClaudio Imbrenda bool split; 215777da2ba0SClaudio Imbrenda 21588dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 21598dd3557aSHugh Dickins tree_rmap_item, tree_page); 216077da2ba0SClaudio Imbrenda /* 216177da2ba0SClaudio Imbrenda * If both pages we tried to merge belong to the same compound 216277da2ba0SClaudio Imbrenda * page, then we actually ended up increasing the reference 216377da2ba0SClaudio Imbrenda * count of the same compound page twice, and split_huge_page 216477da2ba0SClaudio Imbrenda * failed. 216577da2ba0SClaudio Imbrenda * Here we set a flag if that happened, and we use it later to 216677da2ba0SClaudio Imbrenda * try split_huge_page again. Since we call put_page right 216777da2ba0SClaudio Imbrenda * afterwards, the reference count will be correct and 216877da2ba0SClaudio Imbrenda * split_huge_page should succeed. 216977da2ba0SClaudio Imbrenda */ 217077da2ba0SClaudio Imbrenda split = PageTransCompound(page) 217177da2ba0SClaudio Imbrenda && compound_head(page) == compound_head(tree_page); 21728dd3557aSHugh Dickins put_page(tree_page); 21738dd3557aSHugh Dickins if (kpage) { 2174bc56620bSHugh Dickins /* 2175bc56620bSHugh Dickins * The pages were successfully merged: insert new 2176bc56620bSHugh Dickins * node in the stable tree and add both rmap_items. 2177bc56620bSHugh Dickins */ 21785ad64688SHugh Dickins lock_page(kpage); 21797b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 21807b6ba2c7SHugh Dickins if (stable_node) { 21812c653d0eSAndrea Arcangeli stable_tree_append(tree_rmap_item, stable_node, 21822c653d0eSAndrea Arcangeli false); 21832c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, stable_node, 21842c653d0eSAndrea Arcangeli false); 21857b6ba2c7SHugh Dickins } 21865ad64688SHugh Dickins unlock_page(kpage); 21877b6ba2c7SHugh Dickins 218831dbd01fSIzik Eidus /* 218931dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 219031dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 219131dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 219231dbd01fSIzik Eidus * in which case we need to break_cow on both. 219331dbd01fSIzik Eidus */ 21947b6ba2c7SHugh Dickins if (!stable_node) { 21958dd3557aSHugh Dickins break_cow(tree_rmap_item); 21968dd3557aSHugh Dickins break_cow(rmap_item); 219731dbd01fSIzik Eidus } 219877da2ba0SClaudio Imbrenda } else if (split) { 219977da2ba0SClaudio Imbrenda /* 220077da2ba0SClaudio Imbrenda * We are here if we tried to merge two pages and 220177da2ba0SClaudio Imbrenda * failed because they both belonged to the same 220277da2ba0SClaudio Imbrenda * compound page. We will split the page now, but no 220377da2ba0SClaudio Imbrenda * merging will take place. 220477da2ba0SClaudio Imbrenda * We do not want to add the cost of a full lock; if 220577da2ba0SClaudio Imbrenda * the page is locked, it is better to skip it and 220677da2ba0SClaudio Imbrenda * perhaps try again later. 220777da2ba0SClaudio Imbrenda */ 220877da2ba0SClaudio Imbrenda if (!trylock_page(page)) 220977da2ba0SClaudio Imbrenda return; 221077da2ba0SClaudio Imbrenda split_huge_page(page); 221177da2ba0SClaudio Imbrenda unlock_page(page); 221231dbd01fSIzik Eidus } 221331dbd01fSIzik Eidus } 221431dbd01fSIzik Eidus } 221531dbd01fSIzik Eidus 221621fbd591SQi Zheng static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, 221721fbd591SQi Zheng struct ksm_rmap_item **rmap_list, 221831dbd01fSIzik Eidus unsigned long addr) 221931dbd01fSIzik Eidus { 222021fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 222131dbd01fSIzik Eidus 22226514d511SHugh Dickins while (*rmap_list) { 22236514d511SHugh Dickins rmap_item = *rmap_list; 222493d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 222531dbd01fSIzik Eidus return rmap_item; 222631dbd01fSIzik Eidus if (rmap_item->address > addr) 222731dbd01fSIzik Eidus break; 22286514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 222931dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 223031dbd01fSIzik Eidus free_rmap_item(rmap_item); 223131dbd01fSIzik Eidus } 223231dbd01fSIzik Eidus 223331dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 223431dbd01fSIzik Eidus if (rmap_item) { 223531dbd01fSIzik Eidus /* It has already been zeroed */ 223658730ab6SQi Zheng rmap_item->mm = mm_slot->slot.mm; 2237cb4df4caSxu xin rmap_item->mm->ksm_rmap_items++; 223831dbd01fSIzik Eidus rmap_item->address = addr; 22396514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 22406514d511SHugh Dickins *rmap_list = rmap_item; 224131dbd01fSIzik Eidus } 224231dbd01fSIzik Eidus return rmap_item; 224331dbd01fSIzik Eidus } 224431dbd01fSIzik Eidus 224521fbd591SQi Zheng static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) 224631dbd01fSIzik Eidus { 224731dbd01fSIzik Eidus struct mm_struct *mm; 224858730ab6SQi Zheng struct ksm_mm_slot *mm_slot; 224958730ab6SQi Zheng struct mm_slot *slot; 225031dbd01fSIzik Eidus struct vm_area_struct *vma; 225121fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 2252a5f18ba0SMatthew Wilcox (Oracle) struct vma_iterator vmi; 225390bd6fd3SPetr Holasek int nid; 225431dbd01fSIzik Eidus 225558730ab6SQi Zheng if (list_empty(&ksm_mm_head.slot.mm_node)) 225631dbd01fSIzik Eidus return NULL; 225731dbd01fSIzik Eidus 225858730ab6SQi Zheng mm_slot = ksm_scan.mm_slot; 225958730ab6SQi Zheng if (mm_slot == &ksm_mm_head) { 22602919bfd0SHugh Dickins /* 22612919bfd0SHugh Dickins * A number of pages can hang around indefinitely on per-cpu 22622919bfd0SHugh Dickins * pagevecs, raised page count preventing write_protect_page 22632919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 22642919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 22652919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 22662919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 22672919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 22682919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 22692919bfd0SHugh Dickins */ 22702919bfd0SHugh Dickins lru_add_drain_all(); 22712919bfd0SHugh Dickins 22724146d2d6SHugh Dickins /* 22734146d2d6SHugh Dickins * Whereas stale stable_nodes on the stable_tree itself 22744146d2d6SHugh Dickins * get pruned in the regular course of stable_tree_search(), 22754146d2d6SHugh Dickins * those moved out to the migrate_nodes list can accumulate: 22764146d2d6SHugh Dickins * so prune them once before each full scan. 22774146d2d6SHugh Dickins */ 22784146d2d6SHugh Dickins if (!ksm_merge_across_nodes) { 227921fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 22804146d2d6SHugh Dickins struct page *page; 22814146d2d6SHugh Dickins 228203640418SGeliang Tang list_for_each_entry_safe(stable_node, next, 228303640418SGeliang Tang &migrate_nodes, list) { 22842cee57d1SYang Shi page = get_ksm_page(stable_node, 22852cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 22864146d2d6SHugh Dickins if (page) 22874146d2d6SHugh Dickins put_page(page); 22884146d2d6SHugh Dickins cond_resched(); 22894146d2d6SHugh Dickins } 22904146d2d6SHugh Dickins } 22914146d2d6SHugh Dickins 2292ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) 229390bd6fd3SPetr Holasek root_unstable_tree[nid] = RB_ROOT; 229431dbd01fSIzik Eidus 229531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 229658730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 229758730ab6SQi Zheng struct mm_slot, mm_node); 229858730ab6SQi Zheng mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 229958730ab6SQi Zheng ksm_scan.mm_slot = mm_slot; 230031dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 23012b472611SHugh Dickins /* 23022b472611SHugh Dickins * Although we tested list_empty() above, a racing __ksm_exit 23032b472611SHugh Dickins * of the last mm on the list may have removed it since then. 23042b472611SHugh Dickins */ 230558730ab6SQi Zheng if (mm_slot == &ksm_mm_head) 23062b472611SHugh Dickins return NULL; 230731dbd01fSIzik Eidus next_mm: 230831dbd01fSIzik Eidus ksm_scan.address = 0; 230958730ab6SQi Zheng ksm_scan.rmap_list = &mm_slot->rmap_list; 231031dbd01fSIzik Eidus } 231131dbd01fSIzik Eidus 231258730ab6SQi Zheng slot = &mm_slot->slot; 231331dbd01fSIzik Eidus mm = slot->mm; 2314a5f18ba0SMatthew Wilcox (Oracle) vma_iter_init(&vmi, mm, ksm_scan.address); 2315a5f18ba0SMatthew Wilcox (Oracle) 2316d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 23179ba69294SHugh Dickins if (ksm_test_exit(mm)) 2318a5f18ba0SMatthew Wilcox (Oracle) goto no_vmas; 23199ba69294SHugh Dickins 2320a5f18ba0SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) { 232131dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 232231dbd01fSIzik Eidus continue; 232331dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 232431dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 232531dbd01fSIzik Eidus if (!vma->anon_vma) 232631dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 232731dbd01fSIzik Eidus 232831dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 23299ba69294SHugh Dickins if (ksm_test_exit(mm)) 23309ba69294SHugh Dickins break; 233131dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 2332f7091ed6SHaiyue Wang if (IS_ERR_OR_NULL(*page)) { 233321ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 233421ae5b01SAndrea Arcangeli cond_resched(); 233521ae5b01SAndrea Arcangeli continue; 233621ae5b01SAndrea Arcangeli } 2337f7091ed6SHaiyue Wang if (is_zone_device_page(*page)) 2338f7091ed6SHaiyue Wang goto next_page; 2339f765f540SKirill A. Shutemov if (PageAnon(*page)) { 234031dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 234131dbd01fSIzik Eidus flush_dcache_page(*page); 234258730ab6SQi Zheng rmap_item = get_next_rmap_item(mm_slot, 23436514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 234431dbd01fSIzik Eidus if (rmap_item) { 23456514d511SHugh Dickins ksm_scan.rmap_list = 23466514d511SHugh Dickins &rmap_item->rmap_list; 234731dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 234831dbd01fSIzik Eidus } else 234931dbd01fSIzik Eidus put_page(*page); 2350d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 235131dbd01fSIzik Eidus return rmap_item; 235231dbd01fSIzik Eidus } 2353f7091ed6SHaiyue Wang next_page: 235431dbd01fSIzik Eidus put_page(*page); 235531dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 235631dbd01fSIzik Eidus cond_resched(); 235731dbd01fSIzik Eidus } 235831dbd01fSIzik Eidus } 235931dbd01fSIzik Eidus 23609ba69294SHugh Dickins if (ksm_test_exit(mm)) { 2361a5f18ba0SMatthew Wilcox (Oracle) no_vmas: 23629ba69294SHugh Dickins ksm_scan.address = 0; 236358730ab6SQi Zheng ksm_scan.rmap_list = &mm_slot->rmap_list; 23649ba69294SHugh Dickins } 236531dbd01fSIzik Eidus /* 236631dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 236731dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 236831dbd01fSIzik Eidus */ 2369420be4edSChengyang Fan remove_trailing_rmap_items(ksm_scan.rmap_list); 237031dbd01fSIzik Eidus 237131dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 237258730ab6SQi Zheng slot = list_entry(mm_slot->slot.mm_node.next, 237358730ab6SQi Zheng struct mm_slot, mm_node); 237458730ab6SQi Zheng ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2375cd551f97SHugh Dickins if (ksm_scan.address == 0) { 2376cd551f97SHugh Dickins /* 2377c1e8d7c6SMichel Lespinasse * We've completed a full scan of all vmas, holding mmap_lock 2378cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 2379cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 23809ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 23819ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 23829ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 2383c1e8d7c6SMichel Lespinasse * mmap_lock then protects against race with MADV_MERGEABLE). 2384cd551f97SHugh Dickins */ 238558730ab6SQi Zheng hash_del(&mm_slot->slot.hash); 238658730ab6SQi Zheng list_del(&mm_slot->slot.mm_node); 23879ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 23889ba69294SHugh Dickins 238958730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 2390cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2391d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 23929ba69294SHugh Dickins mmdrop(mm); 23939ba69294SHugh Dickins } else { 2394d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 23957496fea9SZhou Chengming /* 23963e4e28c5SMichel Lespinasse * mmap_read_unlock(mm) first because after 23977496fea9SZhou Chengming * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 23987496fea9SZhou Chengming * already have been freed under us by __ksm_exit() 23997496fea9SZhou Chengming * because the "mm_slot" is still hashed and 24007496fea9SZhou Chengming * ksm_scan.mm_slot doesn't point to it anymore. 24017496fea9SZhou Chengming */ 24027496fea9SZhou Chengming spin_unlock(&ksm_mmlist_lock); 24039ba69294SHugh Dickins } 240431dbd01fSIzik Eidus 240531dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 240658730ab6SQi Zheng mm_slot = ksm_scan.mm_slot; 240758730ab6SQi Zheng if (mm_slot != &ksm_mm_head) 240831dbd01fSIzik Eidus goto next_mm; 240931dbd01fSIzik Eidus 241031dbd01fSIzik Eidus ksm_scan.seqnr++; 241131dbd01fSIzik Eidus return NULL; 241231dbd01fSIzik Eidus } 241331dbd01fSIzik Eidus 241431dbd01fSIzik Eidus /** 241531dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 2416b7701a5fSMike Rapoport * @scan_npages: number of pages we want to scan before we return. 241731dbd01fSIzik Eidus */ 241831dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 241931dbd01fSIzik Eidus { 242021fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 24213f649ab7SKees Cook struct page *page; 242231dbd01fSIzik Eidus 2423878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 242431dbd01fSIzik Eidus cond_resched(); 242531dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 242631dbd01fSIzik Eidus if (!rmap_item) 242731dbd01fSIzik Eidus return; 242831dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 242931dbd01fSIzik Eidus put_page(page); 243031dbd01fSIzik Eidus } 243131dbd01fSIzik Eidus } 243231dbd01fSIzik Eidus 24336e158384SHugh Dickins static int ksmd_should_run(void) 24346e158384SHugh Dickins { 243558730ab6SQi Zheng return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); 24366e158384SHugh Dickins } 24376e158384SHugh Dickins 243831dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 243931dbd01fSIzik Eidus { 2440fcf9a0efSKirill Tkhai unsigned int sleep_ms; 2441fcf9a0efSKirill Tkhai 2442878aee7dSAndrea Arcangeli set_freezable(); 2443339aa624SIzik Eidus set_user_nice(current, 5); 244431dbd01fSIzik Eidus 244531dbd01fSIzik Eidus while (!kthread_should_stop()) { 244631dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2447ef4d43a8SHugh Dickins wait_while_offlining(); 24486e158384SHugh Dickins if (ksmd_should_run()) 244931dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 245031dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 24516e158384SHugh Dickins 2452878aee7dSAndrea Arcangeli try_to_freeze(); 2453878aee7dSAndrea Arcangeli 24546e158384SHugh Dickins if (ksmd_should_run()) { 2455fcf9a0efSKirill Tkhai sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); 2456fcf9a0efSKirill Tkhai wait_event_interruptible_timeout(ksm_iter_wait, 2457fcf9a0efSKirill Tkhai sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), 2458fcf9a0efSKirill Tkhai msecs_to_jiffies(sleep_ms)); 245931dbd01fSIzik Eidus } else { 2460878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 24616e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 246231dbd01fSIzik Eidus } 246331dbd01fSIzik Eidus } 246431dbd01fSIzik Eidus return 0; 246531dbd01fSIzik Eidus } 246631dbd01fSIzik Eidus 2467f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2468f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 2469f8af4da3SHugh Dickins { 2470f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 2471d952b791SHugh Dickins int err; 2472f8af4da3SHugh Dickins 2473f8af4da3SHugh Dickins switch (advice) { 2474f8af4da3SHugh Dickins case MADV_MERGEABLE: 2475f8af4da3SHugh Dickins /* 2476f8af4da3SHugh Dickins * Be somewhat over-protective for now! 2477f8af4da3SHugh Dickins */ 2478f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 2479f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 24800661a336SKirill A. Shutemov VM_HUGETLB | VM_MIXEDMAP)) 2481f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2482f8af4da3SHugh Dickins 2483e1fb4a08SDave Jiang if (vma_is_dax(vma)) 2484e1fb4a08SDave Jiang return 0; 2485e1fb4a08SDave Jiang 248612564485SShawn Anastasio #ifdef VM_SAO 248712564485SShawn Anastasio if (*vm_flags & VM_SAO) 248812564485SShawn Anastasio return 0; 248912564485SShawn Anastasio #endif 249074a04967SKhalid Aziz #ifdef VM_SPARC_ADI 249174a04967SKhalid Aziz if (*vm_flags & VM_SPARC_ADI) 249274a04967SKhalid Aziz return 0; 249374a04967SKhalid Aziz #endif 2494cc2383ecSKonstantin Khlebnikov 2495d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2496d952b791SHugh Dickins err = __ksm_enter(mm); 2497d952b791SHugh Dickins if (err) 2498d952b791SHugh Dickins return err; 2499d952b791SHugh Dickins } 2500f8af4da3SHugh Dickins 2501f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 2502f8af4da3SHugh Dickins break; 2503f8af4da3SHugh Dickins 2504f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 2505f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 2506f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2507f8af4da3SHugh Dickins 2508d952b791SHugh Dickins if (vma->anon_vma) { 2509d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 2510d952b791SHugh Dickins if (err) 2511d952b791SHugh Dickins return err; 2512d952b791SHugh Dickins } 2513f8af4da3SHugh Dickins 2514f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 2515f8af4da3SHugh Dickins break; 2516f8af4da3SHugh Dickins } 2517f8af4da3SHugh Dickins 2518f8af4da3SHugh Dickins return 0; 2519f8af4da3SHugh Dickins } 252033cf1707SBharata B Rao EXPORT_SYMBOL_GPL(ksm_madvise); 2521f8af4da3SHugh Dickins 2522f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 2523f8af4da3SHugh Dickins { 252421fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 252558730ab6SQi Zheng struct mm_slot *slot; 25266e158384SHugh Dickins int needs_wakeup; 25276e158384SHugh Dickins 252858730ab6SQi Zheng mm_slot = mm_slot_alloc(mm_slot_cache); 252931dbd01fSIzik Eidus if (!mm_slot) 253031dbd01fSIzik Eidus return -ENOMEM; 253131dbd01fSIzik Eidus 253258730ab6SQi Zheng slot = &mm_slot->slot; 253358730ab6SQi Zheng 25346e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 253558730ab6SQi Zheng needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); 25366e158384SHugh Dickins 253731dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 253858730ab6SQi Zheng mm_slot_insert(mm_slots_hash, mm, slot); 253931dbd01fSIzik Eidus /* 2540cbf86cfeSHugh Dickins * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2541cbf86cfeSHugh Dickins * insert just behind the scanning cursor, to let the area settle 254231dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 254331dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 2544cbf86cfeSHugh Dickins * 2545cbf86cfeSHugh Dickins * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2546cbf86cfeSHugh Dickins * scanning cursor, otherwise KSM pages in newly forked mms will be 2547cbf86cfeSHugh Dickins * missed: then we might as well insert at the end of the list. 254831dbd01fSIzik Eidus */ 2549cbf86cfeSHugh Dickins if (ksm_run & KSM_RUN_UNMERGE) 255058730ab6SQi Zheng list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); 2551cbf86cfeSHugh Dickins else 255258730ab6SQi Zheng list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); 255331dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 255431dbd01fSIzik Eidus 2555f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 2556f1f10076SVegard Nossum mmgrab(mm); 25576e158384SHugh Dickins 25586e158384SHugh Dickins if (needs_wakeup) 25596e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 25606e158384SHugh Dickins 2561f8af4da3SHugh Dickins return 0; 2562f8af4da3SHugh Dickins } 2563f8af4da3SHugh Dickins 25641c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 2565f8af4da3SHugh Dickins { 256621fbd591SQi Zheng struct ksm_mm_slot *mm_slot; 256758730ab6SQi Zheng struct mm_slot *slot; 25689ba69294SHugh Dickins int easy_to_free = 0; 2569cd551f97SHugh Dickins 257031dbd01fSIzik Eidus /* 25719ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 25729ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 25739ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 2574c1e8d7c6SMichel Lespinasse * mmap_lock to synchronize with any break_cows before pagetables 25759ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 25769ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 257731dbd01fSIzik Eidus */ 25789ba69294SHugh Dickins 2579cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 258058730ab6SQi Zheng slot = mm_slot_lookup(mm_slots_hash, mm); 258158730ab6SQi Zheng mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 25829ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 25836514d511SHugh Dickins if (!mm_slot->rmap_list) { 258458730ab6SQi Zheng hash_del(&slot->hash); 258558730ab6SQi Zheng list_del(&slot->mm_node); 25869ba69294SHugh Dickins easy_to_free = 1; 25879ba69294SHugh Dickins } else { 258858730ab6SQi Zheng list_move(&slot->mm_node, 258958730ab6SQi Zheng &ksm_scan.mm_slot->slot.mm_node); 25909ba69294SHugh Dickins } 25919ba69294SHugh Dickins } 2592cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 2593cd551f97SHugh Dickins 25949ba69294SHugh Dickins if (easy_to_free) { 259558730ab6SQi Zheng mm_slot_free(mm_slot_cache, mm_slot); 2596cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 25979ba69294SHugh Dickins mmdrop(mm); 25989ba69294SHugh Dickins } else if (mm_slot) { 2599d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 2600d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 26019ba69294SHugh Dickins } 2602f8af4da3SHugh Dickins } 260331dbd01fSIzik Eidus 2604cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 26055ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 26065ad64688SHugh Dickins { 2607e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2608e05b3453SMatthew Wilcox (Oracle) struct anon_vma *anon_vma = folio_anon_vma(folio); 26095ad64688SHugh Dickins struct page *new_page; 26105ad64688SHugh Dickins 2611cbf86cfeSHugh Dickins if (PageKsm(page)) { 2612cbf86cfeSHugh Dickins if (page_stable_node(page) && 2613cbf86cfeSHugh Dickins !(ksm_run & KSM_RUN_UNMERGE)) 2614cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2615cbf86cfeSHugh Dickins } else if (!anon_vma) { 2616cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2617e1c63e11SNanyong Sun } else if (page->index == linear_page_index(vma, address) && 2618e1c63e11SNanyong Sun anon_vma->root == vma->anon_vma->root) { 2619cbf86cfeSHugh Dickins return page; /* still no need to copy it */ 2620cbf86cfeSHugh Dickins } 2621cbf86cfeSHugh Dickins if (!PageUptodate(page)) 2622cbf86cfeSHugh Dickins return page; /* let do_swap_page report the error */ 2623cbf86cfeSHugh Dickins 26245ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 26258f425e4eSMatthew Wilcox (Oracle) if (new_page && 26268f425e4eSMatthew Wilcox (Oracle) mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { 262762fdb163SHugh Dickins put_page(new_page); 262862fdb163SHugh Dickins new_page = NULL; 262962fdb163SHugh Dickins } 26305ad64688SHugh Dickins if (new_page) { 26315ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 26325ad64688SHugh Dickins 26335ad64688SHugh Dickins SetPageDirty(new_page); 26345ad64688SHugh Dickins __SetPageUptodate(new_page); 263548c935adSKirill A. Shutemov __SetPageLocked(new_page); 26364d45c3afSYang Yang #ifdef CONFIG_SWAP 26374d45c3afSYang Yang count_vm_event(KSM_SWPIN_COPY); 26384d45c3afSYang Yang #endif 26395ad64688SHugh Dickins } 26405ad64688SHugh Dickins 26415ad64688SHugh Dickins return new_page; 26425ad64688SHugh Dickins } 26435ad64688SHugh Dickins 26446d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) 2645e9995ef9SHugh Dickins { 264621fbd591SQi Zheng struct ksm_stable_node *stable_node; 264721fbd591SQi Zheng struct ksm_rmap_item *rmap_item; 2648e9995ef9SHugh Dickins int search_new_forks = 0; 2649e9995ef9SHugh Dickins 26502f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); 26519f32624bSJoonsoo Kim 26529f32624bSJoonsoo Kim /* 26539f32624bSJoonsoo Kim * Rely on the page lock to protect against concurrent modifications 26549f32624bSJoonsoo Kim * to that page's node of the stable tree. 26559f32624bSJoonsoo Kim */ 26562f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2657e9995ef9SHugh Dickins 26582f031c6fSMatthew Wilcox (Oracle) stable_node = folio_stable_node(folio); 2659e9995ef9SHugh Dickins if (!stable_node) 26601df631aeSMinchan Kim return; 2661e9995ef9SHugh Dickins again: 2662b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2663e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 26645beb4930SRik van Riel struct anon_vma_chain *vmac; 2665e9995ef9SHugh Dickins struct vm_area_struct *vma; 2666e9995ef9SHugh Dickins 2667ad12695fSAndrea Arcangeli cond_resched(); 26686d4675e6SMinchan Kim if (!anon_vma_trylock_read(anon_vma)) { 26696d4675e6SMinchan Kim if (rwc->try_lock) { 26706d4675e6SMinchan Kim rwc->contended = true; 26716d4675e6SMinchan Kim return; 26726d4675e6SMinchan Kim } 2673b6b19f25SHugh Dickins anon_vma_lock_read(anon_vma); 26746d4675e6SMinchan Kim } 2675bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2676bf181b9fSMichel Lespinasse 0, ULONG_MAX) { 26771105a2fcSJia He unsigned long addr; 26781105a2fcSJia He 2679ad12695fSAndrea Arcangeli cond_resched(); 26805beb4930SRik van Riel vma = vmac->vma; 26811105a2fcSJia He 26821105a2fcSJia He /* Ignore the stable/unstable/sqnr flags */ 2683cd7fae26SMiaohe Lin addr = rmap_item->address & PAGE_MASK; 26841105a2fcSJia He 26851105a2fcSJia He if (addr < vma->vm_start || addr >= vma->vm_end) 2686e9995ef9SHugh Dickins continue; 2687e9995ef9SHugh Dickins /* 2688e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 2689e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 2690e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 2691e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 2692e9995ef9SHugh Dickins */ 2693e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2694e9995ef9SHugh Dickins continue; 2695e9995ef9SHugh Dickins 26960dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 26970dd1c7bbSJoonsoo Kim continue; 26980dd1c7bbSJoonsoo Kim 26992f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { 2700b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 27011df631aeSMinchan Kim return; 2702e9995ef9SHugh Dickins } 27032f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) { 27040dd1c7bbSJoonsoo Kim anon_vma_unlock_read(anon_vma); 27051df631aeSMinchan Kim return; 27060dd1c7bbSJoonsoo Kim } 2707e9995ef9SHugh Dickins } 2708b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 2709e9995ef9SHugh Dickins } 2710e9995ef9SHugh Dickins if (!search_new_forks++) 2711e9995ef9SHugh Dickins goto again; 2712e9995ef9SHugh Dickins } 2713e9995ef9SHugh Dickins 271452629506SJoonsoo Kim #ifdef CONFIG_MIGRATION 271519138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) 2716e9995ef9SHugh Dickins { 271721fbd591SQi Zheng struct ksm_stable_node *stable_node; 2718e9995ef9SHugh Dickins 271919138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 272019138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); 272119138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); 2722e9995ef9SHugh Dickins 272319138349SMatthew Wilcox (Oracle) stable_node = folio_stable_node(folio); 2724e9995ef9SHugh Dickins if (stable_node) { 272519138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); 272619138349SMatthew Wilcox (Oracle) stable_node->kpfn = folio_pfn(newfolio); 2727c8d6553bSHugh Dickins /* 272819138349SMatthew Wilcox (Oracle) * newfolio->mapping was set in advance; now we need smp_wmb() 2729c8d6553bSHugh Dickins * to make sure that the new stable_node->kpfn is visible 273019138349SMatthew Wilcox (Oracle) * to get_ksm_page() before it can see that folio->mapping 273119138349SMatthew Wilcox (Oracle) * has gone stale (or that folio_test_swapcache has been cleared). 2732c8d6553bSHugh Dickins */ 2733c8d6553bSHugh Dickins smp_wmb(); 273419138349SMatthew Wilcox (Oracle) set_page_stable_node(&folio->page, NULL); 2735e9995ef9SHugh Dickins } 2736e9995ef9SHugh Dickins } 2737e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 2738e9995ef9SHugh Dickins 273962b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 2740ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2741ef4d43a8SHugh Dickins { 2742ef4d43a8SHugh Dickins while (ksm_run & KSM_RUN_OFFLINE) { 2743ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2744ef4d43a8SHugh Dickins wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 274574316201SNeilBrown TASK_UNINTERRUPTIBLE); 2746ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2747ef4d43a8SHugh Dickins } 2748ef4d43a8SHugh Dickins } 2749ef4d43a8SHugh Dickins 275021fbd591SQi Zheng static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, 27512c653d0eSAndrea Arcangeli unsigned long start_pfn, 27522c653d0eSAndrea Arcangeli unsigned long end_pfn) 27532c653d0eSAndrea Arcangeli { 27542c653d0eSAndrea Arcangeli if (stable_node->kpfn >= start_pfn && 27552c653d0eSAndrea Arcangeli stable_node->kpfn < end_pfn) { 27562c653d0eSAndrea Arcangeli /* 27572c653d0eSAndrea Arcangeli * Don't get_ksm_page, page has already gone: 27582c653d0eSAndrea Arcangeli * which is why we keep kpfn instead of page* 27592c653d0eSAndrea Arcangeli */ 27602c653d0eSAndrea Arcangeli remove_node_from_stable_tree(stable_node); 27612c653d0eSAndrea Arcangeli return true; 27622c653d0eSAndrea Arcangeli } 27632c653d0eSAndrea Arcangeli return false; 27642c653d0eSAndrea Arcangeli } 27652c653d0eSAndrea Arcangeli 276621fbd591SQi Zheng static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, 27672c653d0eSAndrea Arcangeli unsigned long start_pfn, 27682c653d0eSAndrea Arcangeli unsigned long end_pfn, 27692c653d0eSAndrea Arcangeli struct rb_root *root) 27702c653d0eSAndrea Arcangeli { 277121fbd591SQi Zheng struct ksm_stable_node *dup; 27722c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 27732c653d0eSAndrea Arcangeli 27742c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 27752c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 27762c653d0eSAndrea Arcangeli return stable_node_dup_remove_range(stable_node, start_pfn, 27772c653d0eSAndrea Arcangeli end_pfn); 27782c653d0eSAndrea Arcangeli } 27792c653d0eSAndrea Arcangeli 27802c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 27812c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 27822c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 27832c653d0eSAndrea Arcangeli stable_node_dup_remove_range(dup, start_pfn, end_pfn); 27842c653d0eSAndrea Arcangeli } 27852c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 27862c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 27872c653d0eSAndrea Arcangeli return true; /* notify caller that tree was rebalanced */ 27882c653d0eSAndrea Arcangeli } else 27892c653d0eSAndrea Arcangeli return false; 27902c653d0eSAndrea Arcangeli } 27912c653d0eSAndrea Arcangeli 2792ee0ea59cSHugh Dickins static void ksm_check_stable_tree(unsigned long start_pfn, 279362b61f61SHugh Dickins unsigned long end_pfn) 279462b61f61SHugh Dickins { 279521fbd591SQi Zheng struct ksm_stable_node *stable_node, *next; 279662b61f61SHugh Dickins struct rb_node *node; 279790bd6fd3SPetr Holasek int nid; 279862b61f61SHugh Dickins 2799ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 2800ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 2801ee0ea59cSHugh Dickins while (node) { 280221fbd591SQi Zheng stable_node = rb_entry(node, struct ksm_stable_node, node); 28032c653d0eSAndrea Arcangeli if (stable_node_chain_remove_range(stable_node, 28042c653d0eSAndrea Arcangeli start_pfn, end_pfn, 28052c653d0eSAndrea Arcangeli root_stable_tree + 28062c653d0eSAndrea Arcangeli nid)) 2807ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 28082c653d0eSAndrea Arcangeli else 2809ee0ea59cSHugh Dickins node = rb_next(node); 2810ee0ea59cSHugh Dickins cond_resched(); 281162b61f61SHugh Dickins } 2812ee0ea59cSHugh Dickins } 281303640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 28144146d2d6SHugh Dickins if (stable_node->kpfn >= start_pfn && 28154146d2d6SHugh Dickins stable_node->kpfn < end_pfn) 28164146d2d6SHugh Dickins remove_node_from_stable_tree(stable_node); 28174146d2d6SHugh Dickins cond_resched(); 28184146d2d6SHugh Dickins } 281962b61f61SHugh Dickins } 282062b61f61SHugh Dickins 282162b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 282262b61f61SHugh Dickins unsigned long action, void *arg) 282362b61f61SHugh Dickins { 282462b61f61SHugh Dickins struct memory_notify *mn = arg; 282562b61f61SHugh Dickins 282662b61f61SHugh Dickins switch (action) { 282762b61f61SHugh Dickins case MEM_GOING_OFFLINE: 282862b61f61SHugh Dickins /* 2829ef4d43a8SHugh Dickins * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 2830ef4d43a8SHugh Dickins * and remove_all_stable_nodes() while memory is going offline: 2831ef4d43a8SHugh Dickins * it is unsafe for them to touch the stable tree at this time. 2832ef4d43a8SHugh Dickins * But unmerge_ksm_pages(), rmap lookups and other entry points 2833ef4d43a8SHugh Dickins * which do not need the ksm_thread_mutex are all safe. 283462b61f61SHugh Dickins */ 2835ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2836ef4d43a8SHugh Dickins ksm_run |= KSM_RUN_OFFLINE; 2837ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 283862b61f61SHugh Dickins break; 283962b61f61SHugh Dickins 284062b61f61SHugh Dickins case MEM_OFFLINE: 284162b61f61SHugh Dickins /* 284262b61f61SHugh Dickins * Most of the work is done by page migration; but there might 284362b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 2844ee0ea59cSHugh Dickins * pages which have been offlined: prune those from the tree, 2845ee0ea59cSHugh Dickins * otherwise get_ksm_page() might later try to access a 2846ee0ea59cSHugh Dickins * non-existent struct page. 284762b61f61SHugh Dickins */ 2848ee0ea59cSHugh Dickins ksm_check_stable_tree(mn->start_pfn, 2849ee0ea59cSHugh Dickins mn->start_pfn + mn->nr_pages); 2850e4a9bc58SJoe Perches fallthrough; 285162b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 2852ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2853ef4d43a8SHugh Dickins ksm_run &= ~KSM_RUN_OFFLINE; 285462b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2855ef4d43a8SHugh Dickins 2856ef4d43a8SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 2857ef4d43a8SHugh Dickins wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 285862b61f61SHugh Dickins break; 285962b61f61SHugh Dickins } 286062b61f61SHugh Dickins return NOTIFY_OK; 286162b61f61SHugh Dickins } 2862ef4d43a8SHugh Dickins #else 2863ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2864ef4d43a8SHugh Dickins { 2865ef4d43a8SHugh Dickins } 286662b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 286762b61f61SHugh Dickins 28682ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 28692ffd8679SHugh Dickins /* 28702ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 28712ffd8679SHugh Dickins */ 28722ffd8679SHugh Dickins 287331dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 287431dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 287531dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 28761bad2e5cSMiaohe Lin static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 287731dbd01fSIzik Eidus 287831dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 287931dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 288031dbd01fSIzik Eidus { 2881ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); 288231dbd01fSIzik Eidus } 288331dbd01fSIzik Eidus 288431dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 288531dbd01fSIzik Eidus struct kobj_attribute *attr, 288631dbd01fSIzik Eidus const char *buf, size_t count) 288731dbd01fSIzik Eidus { 2888dfefd226SAlexey Dobriyan unsigned int msecs; 288931dbd01fSIzik Eidus int err; 289031dbd01fSIzik Eidus 2891dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 2892dfefd226SAlexey Dobriyan if (err) 289331dbd01fSIzik Eidus return -EINVAL; 289431dbd01fSIzik Eidus 289531dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 2896fcf9a0efSKirill Tkhai wake_up_interruptible(&ksm_iter_wait); 289731dbd01fSIzik Eidus 289831dbd01fSIzik Eidus return count; 289931dbd01fSIzik Eidus } 290031dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 290131dbd01fSIzik Eidus 290231dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 290331dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 290431dbd01fSIzik Eidus { 2905ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); 290631dbd01fSIzik Eidus } 290731dbd01fSIzik Eidus 290831dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 290931dbd01fSIzik Eidus struct kobj_attribute *attr, 291031dbd01fSIzik Eidus const char *buf, size_t count) 291131dbd01fSIzik Eidus { 2912dfefd226SAlexey Dobriyan unsigned int nr_pages; 291331dbd01fSIzik Eidus int err; 291431dbd01fSIzik Eidus 2915dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &nr_pages); 2916dfefd226SAlexey Dobriyan if (err) 291731dbd01fSIzik Eidus return -EINVAL; 291831dbd01fSIzik Eidus 291931dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 292031dbd01fSIzik Eidus 292131dbd01fSIzik Eidus return count; 292231dbd01fSIzik Eidus } 292331dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 292431dbd01fSIzik Eidus 292531dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 292631dbd01fSIzik Eidus char *buf) 292731dbd01fSIzik Eidus { 2928ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_run); 292931dbd01fSIzik Eidus } 293031dbd01fSIzik Eidus 293131dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 293231dbd01fSIzik Eidus const char *buf, size_t count) 293331dbd01fSIzik Eidus { 2934dfefd226SAlexey Dobriyan unsigned int flags; 293531dbd01fSIzik Eidus int err; 293631dbd01fSIzik Eidus 2937dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &flags); 2938dfefd226SAlexey Dobriyan if (err) 293931dbd01fSIzik Eidus return -EINVAL; 294031dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 294131dbd01fSIzik Eidus return -EINVAL; 294231dbd01fSIzik Eidus 294331dbd01fSIzik Eidus /* 294431dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 294531dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 2946d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 2947d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 294831dbd01fSIzik Eidus */ 294931dbd01fSIzik Eidus 295031dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2951ef4d43a8SHugh Dickins wait_while_offlining(); 295231dbd01fSIzik Eidus if (ksm_run != flags) { 295331dbd01fSIzik Eidus ksm_run = flags; 2954d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 2955e1e12d2fSDavid Rientjes set_current_oom_origin(); 2956d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 2957e1e12d2fSDavid Rientjes clear_current_oom_origin(); 2958d952b791SHugh Dickins if (err) { 2959d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 2960d952b791SHugh Dickins count = err; 2961d952b791SHugh Dickins } 2962d952b791SHugh Dickins } 296331dbd01fSIzik Eidus } 296431dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 296531dbd01fSIzik Eidus 296631dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 296731dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 296831dbd01fSIzik Eidus 296931dbd01fSIzik Eidus return count; 297031dbd01fSIzik Eidus } 297131dbd01fSIzik Eidus KSM_ATTR(run); 297231dbd01fSIzik Eidus 297390bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 297490bd6fd3SPetr Holasek static ssize_t merge_across_nodes_show(struct kobject *kobj, 297590bd6fd3SPetr Holasek struct kobj_attribute *attr, char *buf) 297690bd6fd3SPetr Holasek { 2977ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); 297890bd6fd3SPetr Holasek } 297990bd6fd3SPetr Holasek 298090bd6fd3SPetr Holasek static ssize_t merge_across_nodes_store(struct kobject *kobj, 298190bd6fd3SPetr Holasek struct kobj_attribute *attr, 298290bd6fd3SPetr Holasek const char *buf, size_t count) 298390bd6fd3SPetr Holasek { 298490bd6fd3SPetr Holasek int err; 298590bd6fd3SPetr Holasek unsigned long knob; 298690bd6fd3SPetr Holasek 298790bd6fd3SPetr Holasek err = kstrtoul(buf, 10, &knob); 298890bd6fd3SPetr Holasek if (err) 298990bd6fd3SPetr Holasek return err; 299090bd6fd3SPetr Holasek if (knob > 1) 299190bd6fd3SPetr Holasek return -EINVAL; 299290bd6fd3SPetr Holasek 299390bd6fd3SPetr Holasek mutex_lock(&ksm_thread_mutex); 2994ef4d43a8SHugh Dickins wait_while_offlining(); 299590bd6fd3SPetr Holasek if (ksm_merge_across_nodes != knob) { 2996cbf86cfeSHugh Dickins if (ksm_pages_shared || remove_all_stable_nodes()) 299790bd6fd3SPetr Holasek err = -EBUSY; 2998ef53d16cSHugh Dickins else if (root_stable_tree == one_stable_tree) { 2999ef53d16cSHugh Dickins struct rb_root *buf; 3000ef53d16cSHugh Dickins /* 3001ef53d16cSHugh Dickins * This is the first time that we switch away from the 3002ef53d16cSHugh Dickins * default of merging across nodes: must now allocate 3003ef53d16cSHugh Dickins * a buffer to hold as many roots as may be needed. 3004ef53d16cSHugh Dickins * Allocate stable and unstable together: 3005ef53d16cSHugh Dickins * MAXSMP NODES_SHIFT 10 will use 16kB. 3006ef53d16cSHugh Dickins */ 3007bafe1e14SJoe Perches buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 3008bafe1e14SJoe Perches GFP_KERNEL); 3009ef53d16cSHugh Dickins /* Let us assume that RB_ROOT is NULL is zero */ 3010ef53d16cSHugh Dickins if (!buf) 3011ef53d16cSHugh Dickins err = -ENOMEM; 3012ef53d16cSHugh Dickins else { 3013ef53d16cSHugh Dickins root_stable_tree = buf; 3014ef53d16cSHugh Dickins root_unstable_tree = buf + nr_node_ids; 3015ef53d16cSHugh Dickins /* Stable tree is empty but not the unstable */ 3016ef53d16cSHugh Dickins root_unstable_tree[0] = one_unstable_tree[0]; 3017ef53d16cSHugh Dickins } 3018ef53d16cSHugh Dickins } 3019ef53d16cSHugh Dickins if (!err) { 302090bd6fd3SPetr Holasek ksm_merge_across_nodes = knob; 3021ef53d16cSHugh Dickins ksm_nr_node_ids = knob ? 1 : nr_node_ids; 3022ef53d16cSHugh Dickins } 302390bd6fd3SPetr Holasek } 302490bd6fd3SPetr Holasek mutex_unlock(&ksm_thread_mutex); 302590bd6fd3SPetr Holasek 302690bd6fd3SPetr Holasek return err ? err : count; 302790bd6fd3SPetr Holasek } 302890bd6fd3SPetr Holasek KSM_ATTR(merge_across_nodes); 302990bd6fd3SPetr Holasek #endif 303090bd6fd3SPetr Holasek 3031e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_show(struct kobject *kobj, 3032e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, char *buf) 3033e86c59b1SClaudio Imbrenda { 3034ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); 3035e86c59b1SClaudio Imbrenda } 3036e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_store(struct kobject *kobj, 3037e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, 3038e86c59b1SClaudio Imbrenda const char *buf, size_t count) 3039e86c59b1SClaudio Imbrenda { 3040e86c59b1SClaudio Imbrenda int err; 3041e86c59b1SClaudio Imbrenda bool value; 3042e86c59b1SClaudio Imbrenda 3043e86c59b1SClaudio Imbrenda err = kstrtobool(buf, &value); 3044e86c59b1SClaudio Imbrenda if (err) 3045e86c59b1SClaudio Imbrenda return -EINVAL; 3046e86c59b1SClaudio Imbrenda 3047e86c59b1SClaudio Imbrenda ksm_use_zero_pages = value; 3048e86c59b1SClaudio Imbrenda 3049e86c59b1SClaudio Imbrenda return count; 3050e86c59b1SClaudio Imbrenda } 3051e86c59b1SClaudio Imbrenda KSM_ATTR(use_zero_pages); 3052e86c59b1SClaudio Imbrenda 30532c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_show(struct kobject *kobj, 30542c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30552c653d0eSAndrea Arcangeli { 3056ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); 30572c653d0eSAndrea Arcangeli } 30582c653d0eSAndrea Arcangeli 30592c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_store(struct kobject *kobj, 30602c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 30612c653d0eSAndrea Arcangeli const char *buf, size_t count) 30622c653d0eSAndrea Arcangeli { 30632c653d0eSAndrea Arcangeli int err; 30642c653d0eSAndrea Arcangeli int knob; 30652c653d0eSAndrea Arcangeli 30662c653d0eSAndrea Arcangeli err = kstrtoint(buf, 10, &knob); 30672c653d0eSAndrea Arcangeli if (err) 30682c653d0eSAndrea Arcangeli return err; 30692c653d0eSAndrea Arcangeli /* 30702c653d0eSAndrea Arcangeli * When a KSM page is created it is shared by 2 mappings. This 30712c653d0eSAndrea Arcangeli * being a signed comparison, it implicitly verifies it's not 30722c653d0eSAndrea Arcangeli * negative. 30732c653d0eSAndrea Arcangeli */ 30742c653d0eSAndrea Arcangeli if (knob < 2) 30752c653d0eSAndrea Arcangeli return -EINVAL; 30762c653d0eSAndrea Arcangeli 30772c653d0eSAndrea Arcangeli if (READ_ONCE(ksm_max_page_sharing) == knob) 30782c653d0eSAndrea Arcangeli return count; 30792c653d0eSAndrea Arcangeli 30802c653d0eSAndrea Arcangeli mutex_lock(&ksm_thread_mutex); 30812c653d0eSAndrea Arcangeli wait_while_offlining(); 30822c653d0eSAndrea Arcangeli if (ksm_max_page_sharing != knob) { 30832c653d0eSAndrea Arcangeli if (ksm_pages_shared || remove_all_stable_nodes()) 30842c653d0eSAndrea Arcangeli err = -EBUSY; 30852c653d0eSAndrea Arcangeli else 30862c653d0eSAndrea Arcangeli ksm_max_page_sharing = knob; 30872c653d0eSAndrea Arcangeli } 30882c653d0eSAndrea Arcangeli mutex_unlock(&ksm_thread_mutex); 30892c653d0eSAndrea Arcangeli 30902c653d0eSAndrea Arcangeli return err ? err : count; 30912c653d0eSAndrea Arcangeli } 30922c653d0eSAndrea Arcangeli KSM_ATTR(max_page_sharing); 30932c653d0eSAndrea Arcangeli 3094b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 3095b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3096b4028260SHugh Dickins { 3097ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_shared); 3098b4028260SHugh Dickins } 3099b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 3100b4028260SHugh Dickins 3101b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 3102b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3103b4028260SHugh Dickins { 3104ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); 3105b4028260SHugh Dickins } 3106b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 3107b4028260SHugh Dickins 3108473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 3109473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3110473b0ce4SHugh Dickins { 3111ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); 3112473b0ce4SHugh Dickins } 3113473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 3114473b0ce4SHugh Dickins 3115473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 3116473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3117473b0ce4SHugh Dickins { 3118473b0ce4SHugh Dickins long ksm_pages_volatile; 3119473b0ce4SHugh Dickins 3120473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3121473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 3122473b0ce4SHugh Dickins /* 3123473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 3124473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 3125473b0ce4SHugh Dickins */ 3126473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 3127473b0ce4SHugh Dickins ksm_pages_volatile = 0; 3128ae7a927dSJoe Perches return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); 3129473b0ce4SHugh Dickins } 3130473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 3131473b0ce4SHugh Dickins 31322c653d0eSAndrea Arcangeli static ssize_t stable_node_dups_show(struct kobject *kobj, 31332c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31342c653d0eSAndrea Arcangeli { 3135ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); 31362c653d0eSAndrea Arcangeli } 31372c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_dups); 31382c653d0eSAndrea Arcangeli 31392c653d0eSAndrea Arcangeli static ssize_t stable_node_chains_show(struct kobject *kobj, 31402c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 31412c653d0eSAndrea Arcangeli { 3142ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); 31432c653d0eSAndrea Arcangeli } 31442c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_chains); 31452c653d0eSAndrea Arcangeli 31462c653d0eSAndrea Arcangeli static ssize_t 31472c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_show(struct kobject *kobj, 31482c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 31492c653d0eSAndrea Arcangeli char *buf) 31502c653d0eSAndrea Arcangeli { 3151ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 31522c653d0eSAndrea Arcangeli } 31532c653d0eSAndrea Arcangeli 31542c653d0eSAndrea Arcangeli static ssize_t 31552c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_store(struct kobject *kobj, 31562c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 31572c653d0eSAndrea Arcangeli const char *buf, size_t count) 31582c653d0eSAndrea Arcangeli { 3159584ff0dfSZhansaya Bagdauletkyzy unsigned int msecs; 31602c653d0eSAndrea Arcangeli int err; 31612c653d0eSAndrea Arcangeli 3162584ff0dfSZhansaya Bagdauletkyzy err = kstrtouint(buf, 10, &msecs); 3163584ff0dfSZhansaya Bagdauletkyzy if (err) 31642c653d0eSAndrea Arcangeli return -EINVAL; 31652c653d0eSAndrea Arcangeli 31662c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs = msecs; 31672c653d0eSAndrea Arcangeli 31682c653d0eSAndrea Arcangeli return count; 31692c653d0eSAndrea Arcangeli } 31702c653d0eSAndrea Arcangeli KSM_ATTR(stable_node_chains_prune_millisecs); 31712c653d0eSAndrea Arcangeli 3172473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 3173473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3174473b0ce4SHugh Dickins { 3175ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); 3176473b0ce4SHugh Dickins } 3177473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 3178473b0ce4SHugh Dickins 317931dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 318031dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 318131dbd01fSIzik Eidus &pages_to_scan_attr.attr, 318231dbd01fSIzik Eidus &run_attr.attr, 3183b4028260SHugh Dickins &pages_shared_attr.attr, 3184b4028260SHugh Dickins &pages_sharing_attr.attr, 3185473b0ce4SHugh Dickins &pages_unshared_attr.attr, 3186473b0ce4SHugh Dickins &pages_volatile_attr.attr, 3187473b0ce4SHugh Dickins &full_scans_attr.attr, 318890bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 318990bd6fd3SPetr Holasek &merge_across_nodes_attr.attr, 319090bd6fd3SPetr Holasek #endif 31912c653d0eSAndrea Arcangeli &max_page_sharing_attr.attr, 31922c653d0eSAndrea Arcangeli &stable_node_chains_attr.attr, 31932c653d0eSAndrea Arcangeli &stable_node_dups_attr.attr, 31942c653d0eSAndrea Arcangeli &stable_node_chains_prune_millisecs_attr.attr, 3195e86c59b1SClaudio Imbrenda &use_zero_pages_attr.attr, 319631dbd01fSIzik Eidus NULL, 319731dbd01fSIzik Eidus }; 319831dbd01fSIzik Eidus 3199f907c26aSArvind Yadav static const struct attribute_group ksm_attr_group = { 320031dbd01fSIzik Eidus .attrs = ksm_attrs, 320131dbd01fSIzik Eidus .name = "ksm", 320231dbd01fSIzik Eidus }; 32032ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 320431dbd01fSIzik Eidus 320531dbd01fSIzik Eidus static int __init ksm_init(void) 320631dbd01fSIzik Eidus { 320731dbd01fSIzik Eidus struct task_struct *ksm_thread; 320831dbd01fSIzik Eidus int err; 320931dbd01fSIzik Eidus 3210e86c59b1SClaudio Imbrenda /* The correct value depends on page size and endianness */ 3211e86c59b1SClaudio Imbrenda zero_checksum = calc_checksum(ZERO_PAGE(0)); 3212e86c59b1SClaudio Imbrenda /* Default to false for backwards compatibility */ 3213e86c59b1SClaudio Imbrenda ksm_use_zero_pages = false; 3214e86c59b1SClaudio Imbrenda 321531dbd01fSIzik Eidus err = ksm_slab_init(); 321631dbd01fSIzik Eidus if (err) 321731dbd01fSIzik Eidus goto out; 321831dbd01fSIzik Eidus 321931dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 322031dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 322125acde31SPaul McQuade pr_err("ksm: creating kthread failed\n"); 322231dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 3223d9f8984cSLai Jiangshan goto out_free; 322431dbd01fSIzik Eidus } 322531dbd01fSIzik Eidus 32262ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 322731dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 322831dbd01fSIzik Eidus if (err) { 322925acde31SPaul McQuade pr_err("ksm: register sysfs failed\n"); 32302ffd8679SHugh Dickins kthread_stop(ksm_thread); 3231d9f8984cSLai Jiangshan goto out_free; 323231dbd01fSIzik Eidus } 3233c73602adSHugh Dickins #else 3234c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3235c73602adSHugh Dickins 32362ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 323731dbd01fSIzik Eidus 323862b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 3239ef4d43a8SHugh Dickins /* There is no significance to this priority 100 */ 32401eeaa4fdSLiu Shixin hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); 324162b61f61SHugh Dickins #endif 324231dbd01fSIzik Eidus return 0; 324331dbd01fSIzik Eidus 3244d9f8984cSLai Jiangshan out_free: 324531dbd01fSIzik Eidus ksm_slab_free(); 324631dbd01fSIzik Eidus out: 324731dbd01fSIzik Eidus return err; 324831dbd01fSIzik Eidus } 3249a64fb3cdSPaul Gortmaker subsys_initcall(ksm_init); 3250