17a338472SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f8af4da3SHugh Dickins /* 331dbd01fSIzik Eidus * Memory merging support. 431dbd01fSIzik Eidus * 531dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 631dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 731dbd01fSIzik Eidus * 836b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 931dbd01fSIzik Eidus * Authors: 1031dbd01fSIzik Eidus * Izik Eidus 1131dbd01fSIzik Eidus * Andrea Arcangeli 1231dbd01fSIzik Eidus * Chris Wright 1336b2528dSIzik Eidus * Hugh Dickins 14f8af4da3SHugh Dickins */ 15f8af4da3SHugh Dickins 16f8af4da3SHugh Dickins #include <linux/errno.h> 1731dbd01fSIzik Eidus #include <linux/mm.h> 1831dbd01fSIzik Eidus #include <linux/fs.h> 19f8af4da3SHugh Dickins #include <linux/mman.h> 2031dbd01fSIzik Eidus #include <linux/sched.h> 216e84f315SIngo Molnar #include <linux/sched/mm.h> 22f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 2331dbd01fSIzik Eidus #include <linux/rwsem.h> 2431dbd01fSIzik Eidus #include <linux/pagemap.h> 2531dbd01fSIzik Eidus #include <linux/rmap.h> 2631dbd01fSIzik Eidus #include <linux/spinlock.h> 2759e1a2f4STimofey Titovets #include <linux/xxhash.h> 2831dbd01fSIzik Eidus #include <linux/delay.h> 2931dbd01fSIzik Eidus #include <linux/kthread.h> 3031dbd01fSIzik Eidus #include <linux/wait.h> 3131dbd01fSIzik Eidus #include <linux/slab.h> 3231dbd01fSIzik Eidus #include <linux/rbtree.h> 3362b61f61SHugh Dickins #include <linux/memory.h> 3431dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 352c6854fdSIzik Eidus #include <linux/swap.h> 36f8af4da3SHugh Dickins #include <linux/ksm.h> 374ca3a69bSSasha Levin #include <linux/hashtable.h> 38878aee7dSAndrea Arcangeli #include <linux/freezer.h> 3972788c38SDavid Rientjes #include <linux/oom.h> 4090bd6fd3SPetr Holasek #include <linux/numa.h> 41f8af4da3SHugh Dickins 4231dbd01fSIzik Eidus #include <asm/tlbflush.h> 4373848b46SHugh Dickins #include "internal.h" 4431dbd01fSIzik Eidus 45e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 46e850dcf5SHugh Dickins #define NUMA(x) (x) 47e850dcf5SHugh Dickins #define DO_NUMA(x) do { (x); } while (0) 48e850dcf5SHugh Dickins #else 49e850dcf5SHugh Dickins #define NUMA(x) (0) 50e850dcf5SHugh Dickins #define DO_NUMA(x) do { } while (0) 51e850dcf5SHugh Dickins #endif 52e850dcf5SHugh Dickins 535a2ca3efSMike Rapoport /** 545a2ca3efSMike Rapoport * DOC: Overview 555a2ca3efSMike Rapoport * 5631dbd01fSIzik Eidus * A few notes about the KSM scanning process, 5731dbd01fSIzik Eidus * to make it easier to understand the data structures below: 5831dbd01fSIzik Eidus * 5931dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 6031dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 6131dbd01fSIzik Eidus * 6231dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 6331dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 6431dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 6531dbd01fSIzik Eidus * 6631dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 6731dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 6831dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 6931dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 7031dbd01fSIzik Eidus * 715a2ca3efSMike Rapoport * The stable tree node includes information required for reverse 725a2ca3efSMike Rapoport * mapping from a KSM page to virtual addresses that map this page. 735a2ca3efSMike Rapoport * 745a2ca3efSMike Rapoport * In order to avoid large latencies of the rmap walks on KSM pages, 755a2ca3efSMike Rapoport * KSM maintains two types of nodes in the stable tree: 765a2ca3efSMike Rapoport * 775a2ca3efSMike Rapoport * * the regular nodes that keep the reverse mapping structures in a 785a2ca3efSMike Rapoport * linked list 795a2ca3efSMike Rapoport * * the "chains" that link nodes ("dups") that represent the same 805a2ca3efSMike Rapoport * write protected memory content, but each "dup" corresponds to a 815a2ca3efSMike Rapoport * different KSM page copy of that content 825a2ca3efSMike Rapoport * 835a2ca3efSMike Rapoport * Internally, the regular nodes, "dups" and "chains" are represented 849303c9d5SMauro Carvalho Chehab * using the same struct stable_node structure. 855a2ca3efSMike Rapoport * 8631dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 8731dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 8831dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 8931dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 9031dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 9131dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 9231dbd01fSIzik Eidus * 9331dbd01fSIzik Eidus * KSM solves this problem by several techniques: 9431dbd01fSIzik Eidus * 9531dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 9631dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 9731dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 9831dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 9931dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 10031dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 10131dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 10231dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 10331dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 10431dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 10531dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 10631dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 10731dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 1088fdb3dbfSHugh Dickins * 1098fdb3dbfSHugh Dickins * If the merge_across_nodes tunable is unset, then KSM maintains multiple 1108fdb3dbfSHugh Dickins * stable trees and multiple unstable trees: one of each for each NUMA node. 11131dbd01fSIzik Eidus */ 11231dbd01fSIzik Eidus 11331dbd01fSIzik Eidus /** 11431dbd01fSIzik Eidus * struct mm_slot - ksm information per mm that is being scanned 11531dbd01fSIzik Eidus * @link: link to the mm_slots hash list 11631dbd01fSIzik Eidus * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 1176514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 11831dbd01fSIzik Eidus * @mm: the mm that this information is valid for 11931dbd01fSIzik Eidus */ 12031dbd01fSIzik Eidus struct mm_slot { 12131dbd01fSIzik Eidus struct hlist_node link; 12231dbd01fSIzik Eidus struct list_head mm_list; 1236514d511SHugh Dickins struct rmap_item *rmap_list; 12431dbd01fSIzik Eidus struct mm_struct *mm; 12531dbd01fSIzik Eidus }; 12631dbd01fSIzik Eidus 12731dbd01fSIzik Eidus /** 12831dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 12931dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 13031dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1316514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 13231dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 13331dbd01fSIzik Eidus * 13431dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 13531dbd01fSIzik Eidus */ 13631dbd01fSIzik Eidus struct ksm_scan { 13731dbd01fSIzik Eidus struct mm_slot *mm_slot; 13831dbd01fSIzik Eidus unsigned long address; 1396514d511SHugh Dickins struct rmap_item **rmap_list; 14031dbd01fSIzik Eidus unsigned long seqnr; 14131dbd01fSIzik Eidus }; 14231dbd01fSIzik Eidus 14331dbd01fSIzik Eidus /** 1447b6ba2c7SHugh Dickins * struct stable_node - node of the stable rbtree 1457b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1464146d2d6SHugh Dickins * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 1472c653d0eSAndrea Arcangeli * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 1484146d2d6SHugh Dickins * @list: linked into migrate_nodes, pending placement in the proper node tree 1497b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1504146d2d6SHugh Dickins * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 1512c653d0eSAndrea Arcangeli * @chain_prune_time: time of the last full garbage collection 1522c653d0eSAndrea Arcangeli * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 1534146d2d6SHugh Dickins * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 1547b6ba2c7SHugh Dickins */ 1557b6ba2c7SHugh Dickins struct stable_node { 1564146d2d6SHugh Dickins union { 1574146d2d6SHugh Dickins struct rb_node node; /* when node of stable tree */ 1584146d2d6SHugh Dickins struct { /* when listed for migration */ 1594146d2d6SHugh Dickins struct list_head *head; 1602c653d0eSAndrea Arcangeli struct { 1612c653d0eSAndrea Arcangeli struct hlist_node hlist_dup; 1624146d2d6SHugh Dickins struct list_head list; 1634146d2d6SHugh Dickins }; 1644146d2d6SHugh Dickins }; 1652c653d0eSAndrea Arcangeli }; 1667b6ba2c7SHugh Dickins struct hlist_head hlist; 1672c653d0eSAndrea Arcangeli union { 16862b61f61SHugh Dickins unsigned long kpfn; 1692c653d0eSAndrea Arcangeli unsigned long chain_prune_time; 1702c653d0eSAndrea Arcangeli }; 1712c653d0eSAndrea Arcangeli /* 1722c653d0eSAndrea Arcangeli * STABLE_NODE_CHAIN can be any negative number in 1732c653d0eSAndrea Arcangeli * rmap_hlist_len negative range, but better not -1 to be able 1742c653d0eSAndrea Arcangeli * to reliably detect underflows. 1752c653d0eSAndrea Arcangeli */ 1762c653d0eSAndrea Arcangeli #define STABLE_NODE_CHAIN -1024 1772c653d0eSAndrea Arcangeli int rmap_hlist_len; 1784146d2d6SHugh Dickins #ifdef CONFIG_NUMA 1794146d2d6SHugh Dickins int nid; 1804146d2d6SHugh Dickins #endif 1817b6ba2c7SHugh Dickins }; 1827b6ba2c7SHugh Dickins 1837b6ba2c7SHugh Dickins /** 18431dbd01fSIzik Eidus * struct rmap_item - reverse mapping item for virtual addresses 1856514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 186db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 187bc56620bSHugh Dickins * @nid: NUMA node id of unstable tree in which linked (may not match page) 18831dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 18931dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 19031dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1917b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1927b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1937b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 19431dbd01fSIzik Eidus */ 19531dbd01fSIzik Eidus struct rmap_item { 1966514d511SHugh Dickins struct rmap_item *rmap_list; 197bc56620bSHugh Dickins union { 198db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 199bc56620bSHugh Dickins #ifdef CONFIG_NUMA 200bc56620bSHugh Dickins int nid; /* when node of unstable tree */ 201bc56620bSHugh Dickins #endif 202bc56620bSHugh Dickins }; 20331dbd01fSIzik Eidus struct mm_struct *mm; 20431dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 20531dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 20631dbd01fSIzik Eidus union { 2077b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 2087b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 2097b6ba2c7SHugh Dickins struct stable_node *head; 2107b6ba2c7SHugh Dickins struct hlist_node hlist; 2117b6ba2c7SHugh Dickins }; 21231dbd01fSIzik Eidus }; 21331dbd01fSIzik Eidus }; 21431dbd01fSIzik Eidus 21531dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 2167b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 2177b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 21831dbd01fSIzik Eidus 21931dbd01fSIzik Eidus /* The stable and unstable tree heads */ 220ef53d16cSHugh Dickins static struct rb_root one_stable_tree[1] = { RB_ROOT }; 221ef53d16cSHugh Dickins static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 222ef53d16cSHugh Dickins static struct rb_root *root_stable_tree = one_stable_tree; 223ef53d16cSHugh Dickins static struct rb_root *root_unstable_tree = one_unstable_tree; 22431dbd01fSIzik Eidus 2254146d2d6SHugh Dickins /* Recently migrated nodes of stable tree, pending proper placement */ 2264146d2d6SHugh Dickins static LIST_HEAD(migrate_nodes); 2272c653d0eSAndrea Arcangeli #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 2284146d2d6SHugh Dickins 2294ca3a69bSSasha Levin #define MM_SLOTS_HASH_BITS 10 2304ca3a69bSSasha Levin static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 23131dbd01fSIzik Eidus 23231dbd01fSIzik Eidus static struct mm_slot ksm_mm_head = { 23331dbd01fSIzik Eidus .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 23431dbd01fSIzik Eidus }; 23531dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 23631dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 23731dbd01fSIzik Eidus }; 23831dbd01fSIzik Eidus 23931dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 2407b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 24131dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 24231dbd01fSIzik Eidus 24331dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 244b4028260SHugh Dickins static unsigned long ksm_pages_shared; 24531dbd01fSIzik Eidus 246e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 247b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 24831dbd01fSIzik Eidus 249473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 250473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 251473b0ce4SHugh Dickins 252473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 253473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 254473b0ce4SHugh Dickins 2552c653d0eSAndrea Arcangeli /* The number of stable_node chains */ 2562c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_chains; 2572c653d0eSAndrea Arcangeli 2582c653d0eSAndrea Arcangeli /* The number of stable_node dups linked to the stable_node chains */ 2592c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_dups; 2602c653d0eSAndrea Arcangeli 2612c653d0eSAndrea Arcangeli /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 2622c653d0eSAndrea Arcangeli static int ksm_stable_node_chains_prune_millisecs = 2000; 2632c653d0eSAndrea Arcangeli 2642c653d0eSAndrea Arcangeli /* Maximum number of page slots sharing a stable node */ 2652c653d0eSAndrea Arcangeli static int ksm_max_page_sharing = 256; 2662c653d0eSAndrea Arcangeli 26731dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 2682c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 26931dbd01fSIzik Eidus 27031dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 2712ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 27231dbd01fSIzik Eidus 273e86c59b1SClaudio Imbrenda /* Checksum of an empty (zeroed) page */ 274e86c59b1SClaudio Imbrenda static unsigned int zero_checksum __read_mostly; 275e86c59b1SClaudio Imbrenda 276e86c59b1SClaudio Imbrenda /* Whether to merge empty (zeroed) pages with actual zero pages */ 277e86c59b1SClaudio Imbrenda static bool ksm_use_zero_pages __read_mostly; 278e86c59b1SClaudio Imbrenda 279e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 28090bd6fd3SPetr Holasek /* Zeroed when merging across nodes is not allowed */ 28190bd6fd3SPetr Holasek static unsigned int ksm_merge_across_nodes = 1; 282ef53d16cSHugh Dickins static int ksm_nr_node_ids = 1; 283e850dcf5SHugh Dickins #else 284e850dcf5SHugh Dickins #define ksm_merge_across_nodes 1U 285ef53d16cSHugh Dickins #define ksm_nr_node_ids 1 286e850dcf5SHugh Dickins #endif 28790bd6fd3SPetr Holasek 28831dbd01fSIzik Eidus #define KSM_RUN_STOP 0 28931dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 29031dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 291ef4d43a8SHugh Dickins #define KSM_RUN_OFFLINE 4 292ef4d43a8SHugh Dickins static unsigned long ksm_run = KSM_RUN_STOP; 293ef4d43a8SHugh Dickins static void wait_while_offlining(void); 29431dbd01fSIzik Eidus 29531dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 296fcf9a0efSKirill Tkhai static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); 29731dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 29831dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 29931dbd01fSIzik Eidus 30031dbd01fSIzik Eidus #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 30131dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 30231dbd01fSIzik Eidus (__flags), NULL) 30331dbd01fSIzik Eidus 30431dbd01fSIzik Eidus static int __init ksm_slab_init(void) 30531dbd01fSIzik Eidus { 30631dbd01fSIzik Eidus rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 30731dbd01fSIzik Eidus if (!rmap_item_cache) 30831dbd01fSIzik Eidus goto out; 30931dbd01fSIzik Eidus 3107b6ba2c7SHugh Dickins stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 3117b6ba2c7SHugh Dickins if (!stable_node_cache) 3127b6ba2c7SHugh Dickins goto out_free1; 3137b6ba2c7SHugh Dickins 31431dbd01fSIzik Eidus mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 31531dbd01fSIzik Eidus if (!mm_slot_cache) 3167b6ba2c7SHugh Dickins goto out_free2; 31731dbd01fSIzik Eidus 31831dbd01fSIzik Eidus return 0; 31931dbd01fSIzik Eidus 3207b6ba2c7SHugh Dickins out_free2: 3217b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 3227b6ba2c7SHugh Dickins out_free1: 32331dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 32431dbd01fSIzik Eidus out: 32531dbd01fSIzik Eidus return -ENOMEM; 32631dbd01fSIzik Eidus } 32731dbd01fSIzik Eidus 32831dbd01fSIzik Eidus static void __init ksm_slab_free(void) 32931dbd01fSIzik Eidus { 33031dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 3317b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 33231dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 33331dbd01fSIzik Eidus mm_slot_cache = NULL; 33431dbd01fSIzik Eidus } 33531dbd01fSIzik Eidus 3362c653d0eSAndrea Arcangeli static __always_inline bool is_stable_node_chain(struct stable_node *chain) 3372c653d0eSAndrea Arcangeli { 3382c653d0eSAndrea Arcangeli return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 3392c653d0eSAndrea Arcangeli } 3402c653d0eSAndrea Arcangeli 3412c653d0eSAndrea Arcangeli static __always_inline bool is_stable_node_dup(struct stable_node *dup) 3422c653d0eSAndrea Arcangeli { 3432c653d0eSAndrea Arcangeli return dup->head == STABLE_NODE_DUP_HEAD; 3442c653d0eSAndrea Arcangeli } 3452c653d0eSAndrea Arcangeli 3462c653d0eSAndrea Arcangeli static inline void stable_node_chain_add_dup(struct stable_node *dup, 3472c653d0eSAndrea Arcangeli struct stable_node *chain) 3482c653d0eSAndrea Arcangeli { 3492c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(dup)); 3502c653d0eSAndrea Arcangeli dup->head = STABLE_NODE_DUP_HEAD; 3512c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(chain)); 3522c653d0eSAndrea Arcangeli hlist_add_head(&dup->hlist_dup, &chain->hlist); 3532c653d0eSAndrea Arcangeli ksm_stable_node_dups++; 3542c653d0eSAndrea Arcangeli } 3552c653d0eSAndrea Arcangeli 3562c653d0eSAndrea Arcangeli static inline void __stable_node_dup_del(struct stable_node *dup) 3572c653d0eSAndrea Arcangeli { 358b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 3592c653d0eSAndrea Arcangeli hlist_del(&dup->hlist_dup); 3602c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 3612c653d0eSAndrea Arcangeli } 3622c653d0eSAndrea Arcangeli 3632c653d0eSAndrea Arcangeli static inline void stable_node_dup_del(struct stable_node *dup) 3642c653d0eSAndrea Arcangeli { 3652c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 3662c653d0eSAndrea Arcangeli if (is_stable_node_dup(dup)) 3672c653d0eSAndrea Arcangeli __stable_node_dup_del(dup); 3682c653d0eSAndrea Arcangeli else 3692c653d0eSAndrea Arcangeli rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 3702c653d0eSAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 3712c653d0eSAndrea Arcangeli dup->head = NULL; 3722c653d0eSAndrea Arcangeli #endif 3732c653d0eSAndrea Arcangeli } 3742c653d0eSAndrea Arcangeli 37531dbd01fSIzik Eidus static inline struct rmap_item *alloc_rmap_item(void) 37631dbd01fSIzik Eidus { 377473b0ce4SHugh Dickins struct rmap_item *rmap_item; 378473b0ce4SHugh Dickins 3795b398e41Szhong jiang rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 3805b398e41Szhong jiang __GFP_NORETRY | __GFP_NOWARN); 381473b0ce4SHugh Dickins if (rmap_item) 382473b0ce4SHugh Dickins ksm_rmap_items++; 383473b0ce4SHugh Dickins return rmap_item; 38431dbd01fSIzik Eidus } 38531dbd01fSIzik Eidus 38631dbd01fSIzik Eidus static inline void free_rmap_item(struct rmap_item *rmap_item) 38731dbd01fSIzik Eidus { 388473b0ce4SHugh Dickins ksm_rmap_items--; 38931dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 39031dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 39131dbd01fSIzik Eidus } 39231dbd01fSIzik Eidus 3937b6ba2c7SHugh Dickins static inline struct stable_node *alloc_stable_node(void) 3947b6ba2c7SHugh Dickins { 3956213055fSzhong jiang /* 3966213055fSzhong jiang * The allocation can take too long with GFP_KERNEL when memory is under 3976213055fSzhong jiang * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 3986213055fSzhong jiang * grants access to memory reserves, helping to avoid this problem. 3996213055fSzhong jiang */ 4006213055fSzhong jiang return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 4017b6ba2c7SHugh Dickins } 4027b6ba2c7SHugh Dickins 4037b6ba2c7SHugh Dickins static inline void free_stable_node(struct stable_node *stable_node) 4047b6ba2c7SHugh Dickins { 4052c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len && 4062c653d0eSAndrea Arcangeli !is_stable_node_chain(stable_node)); 4077b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 4087b6ba2c7SHugh Dickins } 4097b6ba2c7SHugh Dickins 41031dbd01fSIzik Eidus static inline struct mm_slot *alloc_mm_slot(void) 41131dbd01fSIzik Eidus { 41231dbd01fSIzik Eidus if (!mm_slot_cache) /* initialization failed */ 41331dbd01fSIzik Eidus return NULL; 41431dbd01fSIzik Eidus return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 41531dbd01fSIzik Eidus } 41631dbd01fSIzik Eidus 41731dbd01fSIzik Eidus static inline void free_mm_slot(struct mm_slot *mm_slot) 41831dbd01fSIzik Eidus { 41931dbd01fSIzik Eidus kmem_cache_free(mm_slot_cache, mm_slot); 42031dbd01fSIzik Eidus } 42131dbd01fSIzik Eidus 42231dbd01fSIzik Eidus static struct mm_slot *get_mm_slot(struct mm_struct *mm) 42331dbd01fSIzik Eidus { 4244ca3a69bSSasha Levin struct mm_slot *slot; 42531dbd01fSIzik Eidus 426b67bfe0dSSasha Levin hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) 4274ca3a69bSSasha Levin if (slot->mm == mm) 4284ca3a69bSSasha Levin return slot; 4294ca3a69bSSasha Levin 43031dbd01fSIzik Eidus return NULL; 43131dbd01fSIzik Eidus } 43231dbd01fSIzik Eidus 43331dbd01fSIzik Eidus static void insert_to_mm_slots_hash(struct mm_struct *mm, 43431dbd01fSIzik Eidus struct mm_slot *mm_slot) 43531dbd01fSIzik Eidus { 43631dbd01fSIzik Eidus mm_slot->mm = mm; 4374ca3a69bSSasha Levin hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); 43831dbd01fSIzik Eidus } 43931dbd01fSIzik Eidus 44031dbd01fSIzik Eidus /* 441a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 442a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 443c1e8d7c6SMichel Lespinasse * takes mmap_lock briefly to serialize against them. ksm_exit() does not set 444a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 445a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 446a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 447a913e182SHugh Dickins */ 448a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 449a913e182SHugh Dickins { 450a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 451a913e182SHugh Dickins } 452a913e182SHugh Dickins 453a913e182SHugh Dickins /* 45431dbd01fSIzik Eidus * We use break_ksm to break COW on a ksm page: it's a stripped down 45531dbd01fSIzik Eidus * 4567a9547fdSLi Chen * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1) 45731dbd01fSIzik Eidus * put_page(page); 45831dbd01fSIzik Eidus * 45931dbd01fSIzik Eidus * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 46031dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 46131dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 46231dbd01fSIzik Eidus * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 4631b2ee126SDave Hansen * 4641b2ee126SDave Hansen * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context 4651b2ee126SDave Hansen * of the process that owns 'vma'. We also do not want to enforce 4661b2ee126SDave Hansen * protection keys here anyway. 46731dbd01fSIzik Eidus */ 468d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 46931dbd01fSIzik Eidus { 47031dbd01fSIzik Eidus struct page *page; 47150a7ca3cSSouptick Joarder vm_fault_t ret = 0; 47231dbd01fSIzik Eidus 47331dbd01fSIzik Eidus do { 47431dbd01fSIzik Eidus cond_resched(); 4751b2ee126SDave Hansen page = follow_page(vma, addr, 4761b2ee126SDave Hansen FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); 47722eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 47831dbd01fSIzik Eidus break; 47931dbd01fSIzik Eidus if (PageKsm(page)) 480dcddffd4SKirill A. Shutemov ret = handle_mm_fault(vma, addr, 481bce617edSPeter Xu FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, 482bce617edSPeter Xu NULL); 48331dbd01fSIzik Eidus else 48431dbd01fSIzik Eidus ret = VM_FAULT_WRITE; 48531dbd01fSIzik Eidus put_page(page); 48633692f27SLinus Torvalds } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 487d952b791SHugh Dickins /* 488d952b791SHugh Dickins * We must loop because handle_mm_fault() may back out if there's 489d952b791SHugh Dickins * any difficulty e.g. if pte accessed bit gets updated concurrently. 490d952b791SHugh Dickins * 491d952b791SHugh Dickins * VM_FAULT_WRITE is what we have been hoping for: it indicates that 492d952b791SHugh Dickins * COW has been broken, even if the vma does not permit VM_WRITE; 493d952b791SHugh Dickins * but note that a concurrent fault might break PageKsm for us. 494d952b791SHugh Dickins * 495d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 496d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 497d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 498d952b791SHugh Dickins * 499d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 500d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 501d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 502d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 503d952b791SHugh Dickins * 504d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 505d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 506d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 507d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 508d952b791SHugh Dickins * 509d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 510d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 511d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 512d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 513d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 514d952b791SHugh Dickins */ 515d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 51631dbd01fSIzik Eidus } 51731dbd01fSIzik Eidus 518ef694222SBob Liu static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 519ef694222SBob Liu unsigned long addr) 520ef694222SBob Liu { 521ef694222SBob Liu struct vm_area_struct *vma; 522ef694222SBob Liu if (ksm_test_exit(mm)) 523ef694222SBob Liu return NULL; 524ef694222SBob Liu vma = find_vma(mm, addr); 525ef694222SBob Liu if (!vma || vma->vm_start > addr) 526ef694222SBob Liu return NULL; 527ef694222SBob Liu if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 528ef694222SBob Liu return NULL; 529ef694222SBob Liu return vma; 530ef694222SBob Liu } 531ef694222SBob Liu 5328dd3557aSHugh Dickins static void break_cow(struct rmap_item *rmap_item) 53331dbd01fSIzik Eidus { 5348dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 5358dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 53631dbd01fSIzik Eidus struct vm_area_struct *vma; 53731dbd01fSIzik Eidus 5384035c07aSHugh Dickins /* 5394035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 5404035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 5414035c07aSHugh Dickins */ 5429e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 5434035c07aSHugh Dickins 544d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 545ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 546ef694222SBob Liu if (vma) 54731dbd01fSIzik Eidus break_ksm(vma, addr); 548d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 54931dbd01fSIzik Eidus } 55031dbd01fSIzik Eidus 55131dbd01fSIzik Eidus static struct page *get_mergeable_page(struct rmap_item *rmap_item) 55231dbd01fSIzik Eidus { 55331dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 55431dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 55531dbd01fSIzik Eidus struct vm_area_struct *vma; 55631dbd01fSIzik Eidus struct page *page; 55731dbd01fSIzik Eidus 558d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 559ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 560ef694222SBob Liu if (!vma) 56131dbd01fSIzik Eidus goto out; 56231dbd01fSIzik Eidus 56331dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 56422eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 56531dbd01fSIzik Eidus goto out; 566f765f540SKirill A. Shutemov if (PageAnon(page)) { 56731dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 56831dbd01fSIzik Eidus flush_dcache_page(page); 56931dbd01fSIzik Eidus } else { 57031dbd01fSIzik Eidus put_page(page); 571c8f95ed1SAndrea Arcangeli out: 572c8f95ed1SAndrea Arcangeli page = NULL; 57331dbd01fSIzik Eidus } 574d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 57531dbd01fSIzik Eidus return page; 57631dbd01fSIzik Eidus } 57731dbd01fSIzik Eidus 57890bd6fd3SPetr Holasek /* 57990bd6fd3SPetr Holasek * This helper is used for getting right index into array of tree roots. 58090bd6fd3SPetr Holasek * When merge_across_nodes knob is set to 1, there are only two rb-trees for 58190bd6fd3SPetr Holasek * stable and unstable pages from all nodes with roots in index 0. Otherwise, 58290bd6fd3SPetr Holasek * every node has its own stable and unstable tree. 58390bd6fd3SPetr Holasek */ 58490bd6fd3SPetr Holasek static inline int get_kpfn_nid(unsigned long kpfn) 58590bd6fd3SPetr Holasek { 586d8fc16a8SHugh Dickins return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 58790bd6fd3SPetr Holasek } 58890bd6fd3SPetr Holasek 5892c653d0eSAndrea Arcangeli static struct stable_node *alloc_stable_node_chain(struct stable_node *dup, 5902c653d0eSAndrea Arcangeli struct rb_root *root) 5912c653d0eSAndrea Arcangeli { 5922c653d0eSAndrea Arcangeli struct stable_node *chain = alloc_stable_node(); 5932c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 5942c653d0eSAndrea Arcangeli if (likely(chain)) { 5952c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&chain->hlist); 5962c653d0eSAndrea Arcangeli chain->chain_prune_time = jiffies; 5972c653d0eSAndrea Arcangeli chain->rmap_hlist_len = STABLE_NODE_CHAIN; 5982c653d0eSAndrea Arcangeli #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 59998fa15f3SAnshuman Khandual chain->nid = NUMA_NO_NODE; /* debug */ 6002c653d0eSAndrea Arcangeli #endif 6012c653d0eSAndrea Arcangeli ksm_stable_node_chains++; 6022c653d0eSAndrea Arcangeli 6032c653d0eSAndrea Arcangeli /* 6042c653d0eSAndrea Arcangeli * Put the stable node chain in the first dimension of 6052c653d0eSAndrea Arcangeli * the stable tree and at the same time remove the old 6062c653d0eSAndrea Arcangeli * stable node. 6072c653d0eSAndrea Arcangeli */ 6082c653d0eSAndrea Arcangeli rb_replace_node(&dup->node, &chain->node, root); 6092c653d0eSAndrea Arcangeli 6102c653d0eSAndrea Arcangeli /* 6112c653d0eSAndrea Arcangeli * Move the old stable node to the second dimension 6122c653d0eSAndrea Arcangeli * queued in the hlist_dup. The invariant is that all 6132c653d0eSAndrea Arcangeli * dup stable_nodes in the chain->hlist point to pages 614457aef94SEthon Paul * that are write protected and have the exact same 6152c653d0eSAndrea Arcangeli * content. 6162c653d0eSAndrea Arcangeli */ 6172c653d0eSAndrea Arcangeli stable_node_chain_add_dup(dup, chain); 6182c653d0eSAndrea Arcangeli } 6192c653d0eSAndrea Arcangeli return chain; 6202c653d0eSAndrea Arcangeli } 6212c653d0eSAndrea Arcangeli 6222c653d0eSAndrea Arcangeli static inline void free_stable_node_chain(struct stable_node *chain, 6232c653d0eSAndrea Arcangeli struct rb_root *root) 6242c653d0eSAndrea Arcangeli { 6252c653d0eSAndrea Arcangeli rb_erase(&chain->node, root); 6262c653d0eSAndrea Arcangeli free_stable_node(chain); 6272c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 6282c653d0eSAndrea Arcangeli } 6292c653d0eSAndrea Arcangeli 6304035c07aSHugh Dickins static void remove_node_from_stable_tree(struct stable_node *stable_node) 6314035c07aSHugh Dickins { 6324035c07aSHugh Dickins struct rmap_item *rmap_item; 6334035c07aSHugh Dickins 6342c653d0eSAndrea Arcangeli /* check it's not STABLE_NODE_CHAIN or negative */ 6352c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 6362c653d0eSAndrea Arcangeli 637b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 6384035c07aSHugh Dickins if (rmap_item->hlist.next) 6394035c07aSHugh Dickins ksm_pages_sharing--; 6404035c07aSHugh Dickins else 6414035c07aSHugh Dickins ksm_pages_shared--; 6422c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 6432c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 6449e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 6454035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 6464035c07aSHugh Dickins cond_resched(); 6474035c07aSHugh Dickins } 6484035c07aSHugh Dickins 6492c653d0eSAndrea Arcangeli /* 6502c653d0eSAndrea Arcangeli * We need the second aligned pointer of the migrate_nodes 6512c653d0eSAndrea Arcangeli * list_head to stay clear from the rb_parent_color union 6522c653d0eSAndrea Arcangeli * (aligned and different than any node) and also different 6532c653d0eSAndrea Arcangeli * from &migrate_nodes. This will verify that future list.h changes 654815f0ddbSNick Desaulniers * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 6552c653d0eSAndrea Arcangeli */ 656815f0ddbSNick Desaulniers #if defined(GCC_VERSION) && GCC_VERSION >= 40903 6572c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 6582c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 6592c653d0eSAndrea Arcangeli #endif 6602c653d0eSAndrea Arcangeli 6614146d2d6SHugh Dickins if (stable_node->head == &migrate_nodes) 6624146d2d6SHugh Dickins list_del(&stable_node->list); 6634146d2d6SHugh Dickins else 6642c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 6654035c07aSHugh Dickins free_stable_node(stable_node); 6664035c07aSHugh Dickins } 6674035c07aSHugh Dickins 6682cee57d1SYang Shi enum get_ksm_page_flags { 6692cee57d1SYang Shi GET_KSM_PAGE_NOLOCK, 6702cee57d1SYang Shi GET_KSM_PAGE_LOCK, 6712cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK 6722cee57d1SYang Shi }; 6732cee57d1SYang Shi 6744035c07aSHugh Dickins /* 6754035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 6764035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 6774035c07aSHugh Dickins * In which case we can trust the content of the page, and it 6784035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 6794035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 680c8d6553bSHugh Dickins * But beware, the stable node's page might be being migrated. 6814035c07aSHugh Dickins * 6824035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 6834035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 6844035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 6854035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 6864035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 6874035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 6884035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 6894035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 6904035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 6914035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 6924035c07aSHugh Dickins */ 6932cee57d1SYang Shi static struct page *get_ksm_page(struct stable_node *stable_node, 6942cee57d1SYang Shi enum get_ksm_page_flags flags) 6954035c07aSHugh Dickins { 6964035c07aSHugh Dickins struct page *page; 6974035c07aSHugh Dickins void *expected_mapping; 698c8d6553bSHugh Dickins unsigned long kpfn; 6994035c07aSHugh Dickins 700bda807d4SMinchan Kim expected_mapping = (void *)((unsigned long)stable_node | 701bda807d4SMinchan Kim PAGE_MAPPING_KSM); 702c8d6553bSHugh Dickins again: 70308df4774SPaul E. McKenney kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 704c8d6553bSHugh Dickins page = pfn_to_page(kpfn); 7054db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) 7064035c07aSHugh Dickins goto stale; 707c8d6553bSHugh Dickins 708c8d6553bSHugh Dickins /* 709c8d6553bSHugh Dickins * We cannot do anything with the page while its refcount is 0. 710c8d6553bSHugh Dickins * Usually 0 means free, or tail of a higher-order page: in which 711c8d6553bSHugh Dickins * case this node is no longer referenced, and should be freed; 7121c4c3b99SJiang Biao * however, it might mean that the page is under page_ref_freeze(). 713c8d6553bSHugh Dickins * The __remove_mapping() case is easy, again the node is now stale; 71452d1e606SKirill Tkhai * the same is in reuse_ksm_page() case; but if page is swapcache 71552d1e606SKirill Tkhai * in migrate_page_move_mapping(), it might still be our page, 71652d1e606SKirill Tkhai * in which case it's essential to keep the node. 717c8d6553bSHugh Dickins */ 718c8d6553bSHugh Dickins while (!get_page_unless_zero(page)) { 719c8d6553bSHugh Dickins /* 720c8d6553bSHugh Dickins * Another check for page->mapping != expected_mapping would 721c8d6553bSHugh Dickins * work here too. We have chosen the !PageSwapCache test to 722c8d6553bSHugh Dickins * optimize the common case, when the page is or is about to 723c8d6553bSHugh Dickins * be freed: PageSwapCache is cleared (under spin_lock_irq) 7241c4c3b99SJiang Biao * in the ref_freeze section of __remove_mapping(); but Anon 725c8d6553bSHugh Dickins * page->mapping reset to NULL later, in free_pages_prepare(). 726c8d6553bSHugh Dickins */ 727c8d6553bSHugh Dickins if (!PageSwapCache(page)) 7284035c07aSHugh Dickins goto stale; 729c8d6553bSHugh Dickins cpu_relax(); 730c8d6553bSHugh Dickins } 731c8d6553bSHugh Dickins 7324db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7334035c07aSHugh Dickins put_page(page); 7344035c07aSHugh Dickins goto stale; 7354035c07aSHugh Dickins } 736c8d6553bSHugh Dickins 7372cee57d1SYang Shi if (flags == GET_KSM_PAGE_TRYLOCK) { 7382cee57d1SYang Shi if (!trylock_page(page)) { 7392cee57d1SYang Shi put_page(page); 7402cee57d1SYang Shi return ERR_PTR(-EBUSY); 7412cee57d1SYang Shi } 7422cee57d1SYang Shi } else if (flags == GET_KSM_PAGE_LOCK) 7438aafa6a4SHugh Dickins lock_page(page); 7442cee57d1SYang Shi 7452cee57d1SYang Shi if (flags != GET_KSM_PAGE_NOLOCK) { 7464db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7478aafa6a4SHugh Dickins unlock_page(page); 7488aafa6a4SHugh Dickins put_page(page); 7498aafa6a4SHugh Dickins goto stale; 7508aafa6a4SHugh Dickins } 7518aafa6a4SHugh Dickins } 7524035c07aSHugh Dickins return page; 753c8d6553bSHugh Dickins 7544035c07aSHugh Dickins stale: 755c8d6553bSHugh Dickins /* 756c8d6553bSHugh Dickins * We come here from above when page->mapping or !PageSwapCache 757c8d6553bSHugh Dickins * suggests that the node is stale; but it might be under migration. 758c8d6553bSHugh Dickins * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), 759c8d6553bSHugh Dickins * before checking whether node->kpfn has been changed. 760c8d6553bSHugh Dickins */ 761c8d6553bSHugh Dickins smp_rmb(); 7624db0c3c2SJason Low if (READ_ONCE(stable_node->kpfn) != kpfn) 763c8d6553bSHugh Dickins goto again; 7644035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 7654035c07aSHugh Dickins return NULL; 7664035c07aSHugh Dickins } 7674035c07aSHugh Dickins 76831dbd01fSIzik Eidus /* 76931dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 77031dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 77131dbd01fSIzik Eidus */ 77231dbd01fSIzik Eidus static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 77331dbd01fSIzik Eidus { 7747b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 7757b6ba2c7SHugh Dickins struct stable_node *stable_node; 7765ad64688SHugh Dickins struct page *page; 77731dbd01fSIzik Eidus 7787b6ba2c7SHugh Dickins stable_node = rmap_item->head; 7793e96b6a2SMiaohe Lin page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); 7804035c07aSHugh Dickins if (!page) 7814035c07aSHugh Dickins goto out; 7825ad64688SHugh Dickins 7837b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 7845ad64688SHugh Dickins put_page(page); 78508beca44SHugh Dickins 78698666f8aSAndrea Arcangeli if (!hlist_empty(&stable_node->hlist)) 7874035c07aSHugh Dickins ksm_pages_sharing--; 7884035c07aSHugh Dickins else 789b4028260SHugh Dickins ksm_pages_shared--; 7902c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 7912c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 79231dbd01fSIzik Eidus 7939e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 79493d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 79531dbd01fSIzik Eidus 7967b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 79731dbd01fSIzik Eidus unsigned char age; 79831dbd01fSIzik Eidus /* 7999ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 80031dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 8019ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 8029ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 8039ba69294SHugh Dickins * than left over from before. 80431dbd01fSIzik Eidus */ 80531dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 806cd551f97SHugh Dickins BUG_ON(age > 1); 80731dbd01fSIzik Eidus if (!age) 80890bd6fd3SPetr Holasek rb_erase(&rmap_item->node, 809ef53d16cSHugh Dickins root_unstable_tree + NUMA(rmap_item->nid)); 81093d17715SHugh Dickins ksm_pages_unshared--; 81131dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 81293d17715SHugh Dickins } 8134035c07aSHugh Dickins out: 81431dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 81531dbd01fSIzik Eidus } 81631dbd01fSIzik Eidus 81731dbd01fSIzik Eidus static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 8186514d511SHugh Dickins struct rmap_item **rmap_list) 81931dbd01fSIzik Eidus { 8206514d511SHugh Dickins while (*rmap_list) { 8216514d511SHugh Dickins struct rmap_item *rmap_item = *rmap_list; 8226514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 82331dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 82431dbd01fSIzik Eidus free_rmap_item(rmap_item); 82531dbd01fSIzik Eidus } 82631dbd01fSIzik Eidus } 82731dbd01fSIzik Eidus 82831dbd01fSIzik Eidus /* 829e850dcf5SHugh Dickins * Though it's very tempting to unmerge rmap_items from stable tree rather 83031dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 83131dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 832c1e8d7c6SMichel Lespinasse * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing 83331dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 83431dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 83581464e30SHugh Dickins * 83681464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 83781464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 83881464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 83981464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 84031dbd01fSIzik Eidus */ 841d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 84231dbd01fSIzik Eidus unsigned long start, unsigned long end) 84331dbd01fSIzik Eidus { 84431dbd01fSIzik Eidus unsigned long addr; 845d952b791SHugh Dickins int err = 0; 84631dbd01fSIzik Eidus 847d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 8489ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 8499ba69294SHugh Dickins break; 850d952b791SHugh Dickins if (signal_pending(current)) 851d952b791SHugh Dickins err = -ERESTARTSYS; 852d952b791SHugh Dickins else 853d952b791SHugh Dickins err = break_ksm(vma, addr); 854d952b791SHugh Dickins } 855d952b791SHugh Dickins return err; 85631dbd01fSIzik Eidus } 85731dbd01fSIzik Eidus 85888484826SMike Rapoport static inline struct stable_node *page_stable_node(struct page *page) 85988484826SMike Rapoport { 86088484826SMike Rapoport return PageKsm(page) ? page_rmapping(page) : NULL; 86188484826SMike Rapoport } 86288484826SMike Rapoport 86388484826SMike Rapoport static inline void set_page_stable_node(struct page *page, 86488484826SMike Rapoport struct stable_node *stable_node) 86588484826SMike Rapoport { 86688484826SMike Rapoport page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); 86788484826SMike Rapoport } 86888484826SMike Rapoport 8692ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 8702ffd8679SHugh Dickins /* 8712ffd8679SHugh Dickins * Only called through the sysfs control interface: 8722ffd8679SHugh Dickins */ 873cbf86cfeSHugh Dickins static int remove_stable_node(struct stable_node *stable_node) 874cbf86cfeSHugh Dickins { 875cbf86cfeSHugh Dickins struct page *page; 876cbf86cfeSHugh Dickins int err; 877cbf86cfeSHugh Dickins 8782cee57d1SYang Shi page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 879cbf86cfeSHugh Dickins if (!page) { 880cbf86cfeSHugh Dickins /* 881cbf86cfeSHugh Dickins * get_ksm_page did remove_node_from_stable_tree itself. 882cbf86cfeSHugh Dickins */ 883cbf86cfeSHugh Dickins return 0; 884cbf86cfeSHugh Dickins } 885cbf86cfeSHugh Dickins 886cbf86cfeSHugh Dickins /* 8879a63236fSAndrey Ryabinin * Page could be still mapped if this races with __mmput() running in 8889a63236fSAndrey Ryabinin * between ksm_exit() and exit_mmap(). Just refuse to let 8899a63236fSAndrey Ryabinin * merge_across_nodes/max_page_sharing be switched. 8908fdb3dbfSHugh Dickins */ 8918fdb3dbfSHugh Dickins err = -EBUSY; 8929a63236fSAndrey Ryabinin if (!page_mapped(page)) { 8938fdb3dbfSHugh Dickins /* 8948fdb3dbfSHugh Dickins * The stable node did not yet appear stale to get_ksm_page(), 8958fdb3dbfSHugh Dickins * since that allows for an unmapped ksm page to be recognized 8968fdb3dbfSHugh Dickins * right up until it is freed; but the node is safe to remove. 897cbf86cfeSHugh Dickins * This page might be in a pagevec waiting to be freed, 898cbf86cfeSHugh Dickins * or it might be PageSwapCache (perhaps under writeback), 899cbf86cfeSHugh Dickins * or it might have been removed from swapcache a moment ago. 900cbf86cfeSHugh Dickins */ 901cbf86cfeSHugh Dickins set_page_stable_node(page, NULL); 902cbf86cfeSHugh Dickins remove_node_from_stable_tree(stable_node); 903cbf86cfeSHugh Dickins err = 0; 904cbf86cfeSHugh Dickins } 905cbf86cfeSHugh Dickins 906cbf86cfeSHugh Dickins unlock_page(page); 907cbf86cfeSHugh Dickins put_page(page); 908cbf86cfeSHugh Dickins return err; 909cbf86cfeSHugh Dickins } 910cbf86cfeSHugh Dickins 9112c653d0eSAndrea Arcangeli static int remove_stable_node_chain(struct stable_node *stable_node, 9122c653d0eSAndrea Arcangeli struct rb_root *root) 9132c653d0eSAndrea Arcangeli { 9142c653d0eSAndrea Arcangeli struct stable_node *dup; 9152c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 9162c653d0eSAndrea Arcangeli 9172c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 9182c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 9192c653d0eSAndrea Arcangeli if (remove_stable_node(stable_node)) 9202c653d0eSAndrea Arcangeli return true; 9212c653d0eSAndrea Arcangeli else 9222c653d0eSAndrea Arcangeli return false; 9232c653d0eSAndrea Arcangeli } 9242c653d0eSAndrea Arcangeli 9252c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 9262c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 9272c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 9282c653d0eSAndrea Arcangeli if (remove_stable_node(dup)) 9292c653d0eSAndrea Arcangeli return true; 9302c653d0eSAndrea Arcangeli } 9312c653d0eSAndrea Arcangeli BUG_ON(!hlist_empty(&stable_node->hlist)); 9322c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 9332c653d0eSAndrea Arcangeli return false; 9342c653d0eSAndrea Arcangeli } 9352c653d0eSAndrea Arcangeli 936cbf86cfeSHugh Dickins static int remove_all_stable_nodes(void) 937cbf86cfeSHugh Dickins { 93803640418SGeliang Tang struct stable_node *stable_node, *next; 939cbf86cfeSHugh Dickins int nid; 940cbf86cfeSHugh Dickins int err = 0; 941cbf86cfeSHugh Dickins 942ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 943cbf86cfeSHugh Dickins while (root_stable_tree[nid].rb_node) { 944cbf86cfeSHugh Dickins stable_node = rb_entry(root_stable_tree[nid].rb_node, 945cbf86cfeSHugh Dickins struct stable_node, node); 9462c653d0eSAndrea Arcangeli if (remove_stable_node_chain(stable_node, 9472c653d0eSAndrea Arcangeli root_stable_tree + nid)) { 948cbf86cfeSHugh Dickins err = -EBUSY; 949cbf86cfeSHugh Dickins break; /* proceed to next nid */ 950cbf86cfeSHugh Dickins } 951cbf86cfeSHugh Dickins cond_resched(); 952cbf86cfeSHugh Dickins } 953cbf86cfeSHugh Dickins } 95403640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 9554146d2d6SHugh Dickins if (remove_stable_node(stable_node)) 9564146d2d6SHugh Dickins err = -EBUSY; 9574146d2d6SHugh Dickins cond_resched(); 9584146d2d6SHugh Dickins } 959cbf86cfeSHugh Dickins return err; 960cbf86cfeSHugh Dickins } 961cbf86cfeSHugh Dickins 962d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 96331dbd01fSIzik Eidus { 96431dbd01fSIzik Eidus struct mm_slot *mm_slot; 96531dbd01fSIzik Eidus struct mm_struct *mm; 96631dbd01fSIzik Eidus struct vm_area_struct *vma; 967d952b791SHugh Dickins int err = 0; 96831dbd01fSIzik Eidus 969d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 9709ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 971d952b791SHugh Dickins struct mm_slot, mm_list); 972d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 973d952b791SHugh Dickins 9749ba69294SHugh Dickins for (mm_slot = ksm_scan.mm_slot; 9759ba69294SHugh Dickins mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 97631dbd01fSIzik Eidus mm = mm_slot->mm; 977d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 97831dbd01fSIzik Eidus for (vma = mm->mmap; vma; vma = vma->vm_next) { 9799ba69294SHugh Dickins if (ksm_test_exit(mm)) 9809ba69294SHugh Dickins break; 98131dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 98231dbd01fSIzik Eidus continue; 983d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 984d952b791SHugh Dickins vma->vm_start, vma->vm_end); 9859ba69294SHugh Dickins if (err) 9869ba69294SHugh Dickins goto error; 987d952b791SHugh Dickins } 9889ba69294SHugh Dickins 9896514d511SHugh Dickins remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 990d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 99131dbd01fSIzik Eidus 99231dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 9939ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 994d952b791SHugh Dickins struct mm_slot, mm_list); 9959ba69294SHugh Dickins if (ksm_test_exit(mm)) { 9964ca3a69bSSasha Levin hash_del(&mm_slot->link); 9979ba69294SHugh Dickins list_del(&mm_slot->mm_list); 99831dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 9999ba69294SHugh Dickins 10009ba69294SHugh Dickins free_mm_slot(mm_slot); 10019ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 10029ba69294SHugh Dickins mmdrop(mm); 10037496fea9SZhou Chengming } else 10049ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 100531dbd01fSIzik Eidus } 100631dbd01fSIzik Eidus 1007cbf86cfeSHugh Dickins /* Clean up stable nodes, but don't worry if some are still busy */ 1008cbf86cfeSHugh Dickins remove_all_stable_nodes(); 1009d952b791SHugh Dickins ksm_scan.seqnr = 0; 10109ba69294SHugh Dickins return 0; 10119ba69294SHugh Dickins 10129ba69294SHugh Dickins error: 1013d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1014d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 1015d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 1016d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1017d952b791SHugh Dickins return err; 1018d952b791SHugh Dickins } 10192ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 1020d952b791SHugh Dickins 102131dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 102231dbd01fSIzik Eidus { 102331dbd01fSIzik Eidus u32 checksum; 10249b04c5feSCong Wang void *addr = kmap_atomic(page); 102559e1a2f4STimofey Titovets checksum = xxhash(addr, PAGE_SIZE, 0); 10269b04c5feSCong Wang kunmap_atomic(addr); 102731dbd01fSIzik Eidus return checksum; 102831dbd01fSIzik Eidus } 102931dbd01fSIzik Eidus 103031dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 103131dbd01fSIzik Eidus pte_t *orig_pte) 103231dbd01fSIzik Eidus { 103331dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 103436eaff33SKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 103536eaff33SKirill A. Shutemov .page = page, 103636eaff33SKirill A. Shutemov .vma = vma, 103736eaff33SKirill A. Shutemov }; 103831dbd01fSIzik Eidus int swapped; 103931dbd01fSIzik Eidus int err = -EFAULT; 1040ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 104131dbd01fSIzik Eidus 104236eaff33SKirill A. Shutemov pvmw.address = page_address_in_vma(page, vma); 104336eaff33SKirill A. Shutemov if (pvmw.address == -EFAULT) 104431dbd01fSIzik Eidus goto out; 104531dbd01fSIzik Eidus 104629ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 10476bdb913fSHaggai Eran 10487269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 10496f4f13e8SJérôme Glisse pvmw.address, 1050ac46d4f3SJérôme Glisse pvmw.address + PAGE_SIZE); 1051ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 10526bdb913fSHaggai Eran 105336eaff33SKirill A. Shutemov if (!page_vma_mapped_walk(&pvmw)) 10546bdb913fSHaggai Eran goto out_mn; 105536eaff33SKirill A. Shutemov if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 105636eaff33SKirill A. Shutemov goto out_unlock; 105731dbd01fSIzik Eidus 1058595cd8f2SAneesh Kumar K.V if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 1059b3a81d08SMinchan Kim (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || 1060b3a81d08SMinchan Kim mm_tlb_flush_pending(mm)) { 106131dbd01fSIzik Eidus pte_t entry; 106231dbd01fSIzik Eidus 106331dbd01fSIzik Eidus swapped = PageSwapCache(page); 106436eaff33SKirill A. Shutemov flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 106531dbd01fSIzik Eidus /* 106625985edcSLucas De Marchi * Ok this is tricky, when get_user_pages_fast() run it doesn't 106731dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 106831dbd01fSIzik Eidus * with the pagecount against the mapcount is racey and 106931dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 107031dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 107131dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 107231dbd01fSIzik Eidus * or in the middle of the check. 10730f10851eSJérôme Glisse * 10740f10851eSJérôme Glisse * No need to notify as we are downgrading page table to read 10750f10851eSJérôme Glisse * only not changing it to point to a new page. 10760f10851eSJérôme Glisse * 1077ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 107831dbd01fSIzik Eidus */ 10790f10851eSJérôme Glisse entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 108031dbd01fSIzik Eidus /* 108131dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 108231dbd01fSIzik Eidus * page 108331dbd01fSIzik Eidus */ 108431e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 108536eaff33SKirill A. Shutemov set_pte_at(mm, pvmw.address, pvmw.pte, entry); 108631dbd01fSIzik Eidus goto out_unlock; 108731dbd01fSIzik Eidus } 10884e31635cSHugh Dickins if (pte_dirty(entry)) 10894e31635cSHugh Dickins set_page_dirty(page); 1090595cd8f2SAneesh Kumar K.V 1091595cd8f2SAneesh Kumar K.V if (pte_protnone(entry)) 1092595cd8f2SAneesh Kumar K.V entry = pte_mkclean(pte_clear_savedwrite(entry)); 1093595cd8f2SAneesh Kumar K.V else 10944e31635cSHugh Dickins entry = pte_mkclean(pte_wrprotect(entry)); 109536eaff33SKirill A. Shutemov set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); 109631dbd01fSIzik Eidus } 109736eaff33SKirill A. Shutemov *orig_pte = *pvmw.pte; 109831dbd01fSIzik Eidus err = 0; 109931dbd01fSIzik Eidus 110031dbd01fSIzik Eidus out_unlock: 110136eaff33SKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 11026bdb913fSHaggai Eran out_mn: 1103ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 110431dbd01fSIzik Eidus out: 110531dbd01fSIzik Eidus return err; 110631dbd01fSIzik Eidus } 110731dbd01fSIzik Eidus 110831dbd01fSIzik Eidus /** 110931dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 11108dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 11118dd3557aSHugh Dickins * @page: the page we are replacing by kpage 11128dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 111331dbd01fSIzik Eidus * @orig_pte: the original value of the pte 111431dbd01fSIzik Eidus * 111531dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 111631dbd01fSIzik Eidus */ 11178dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 11188dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 111931dbd01fSIzik Eidus { 112031dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 112131dbd01fSIzik Eidus pmd_t *pmd; 112231dbd01fSIzik Eidus pte_t *ptep; 1123e86c59b1SClaudio Imbrenda pte_t newpte; 112431dbd01fSIzik Eidus spinlock_t *ptl; 112531dbd01fSIzik Eidus unsigned long addr; 112631dbd01fSIzik Eidus int err = -EFAULT; 1127ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 112831dbd01fSIzik Eidus 11298dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 113031dbd01fSIzik Eidus if (addr == -EFAULT) 113131dbd01fSIzik Eidus goto out; 113231dbd01fSIzik Eidus 11336219049aSBob Liu pmd = mm_find_pmd(mm, addr); 11346219049aSBob Liu if (!pmd) 113531dbd01fSIzik Eidus goto out; 113631dbd01fSIzik Eidus 11377269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, 11386f4f13e8SJérôme Glisse addr + PAGE_SIZE); 1139ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 11406bdb913fSHaggai Eran 114131dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 114231dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 114331dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 11446bdb913fSHaggai Eran goto out_mn; 114531dbd01fSIzik Eidus } 114631dbd01fSIzik Eidus 1147e86c59b1SClaudio Imbrenda /* 1148e86c59b1SClaudio Imbrenda * No need to check ksm_use_zero_pages here: we can only have a 1149457aef94SEthon Paul * zero_page here if ksm_use_zero_pages was enabled already. 1150e86c59b1SClaudio Imbrenda */ 1151e86c59b1SClaudio Imbrenda if (!is_zero_pfn(page_to_pfn(kpage))) { 11528dd3557aSHugh Dickins get_page(kpage); 1153d281ee61SKirill A. Shutemov page_add_anon_rmap(kpage, vma, addr, false); 1154e86c59b1SClaudio Imbrenda newpte = mk_pte(kpage, vma->vm_page_prot); 1155e86c59b1SClaudio Imbrenda } else { 1156e86c59b1SClaudio Imbrenda newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), 1157e86c59b1SClaudio Imbrenda vma->vm_page_prot)); 1158a38c015fSClaudio Imbrenda /* 1159a38c015fSClaudio Imbrenda * We're replacing an anonymous page with a zero page, which is 1160a38c015fSClaudio Imbrenda * not anonymous. We need to do proper accounting otherwise we 1161a38c015fSClaudio Imbrenda * will get wrong values in /proc, and a BUG message in dmesg 1162a38c015fSClaudio Imbrenda * when tearing down the mm. 1163a38c015fSClaudio Imbrenda */ 1164a38c015fSClaudio Imbrenda dec_mm_counter(mm, MM_ANONPAGES); 1165e86c59b1SClaudio Imbrenda } 116631dbd01fSIzik Eidus 116731dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 11680f10851eSJérôme Glisse /* 11690f10851eSJérôme Glisse * No need to notify as we are replacing a read only page with another 11700f10851eSJérôme Glisse * read only page with the same content. 11710f10851eSJérôme Glisse * 1172ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 11730f10851eSJérôme Glisse */ 11740f10851eSJérôme Glisse ptep_clear_flush(vma, addr, ptep); 1175e86c59b1SClaudio Imbrenda set_pte_at_notify(mm, addr, ptep, newpte); 117631dbd01fSIzik Eidus 1177d281ee61SKirill A. Shutemov page_remove_rmap(page, false); 1178ae52a2adSHugh Dickins if (!page_mapped(page)) 1179ae52a2adSHugh Dickins try_to_free_swap(page); 11808dd3557aSHugh Dickins put_page(page); 118131dbd01fSIzik Eidus 118231dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 118331dbd01fSIzik Eidus err = 0; 11846bdb913fSHaggai Eran out_mn: 1185ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 118631dbd01fSIzik Eidus out: 118731dbd01fSIzik Eidus return err; 118831dbd01fSIzik Eidus } 118931dbd01fSIzik Eidus 119031dbd01fSIzik Eidus /* 119131dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 11928dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 11938dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 119480e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 119580e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 119631dbd01fSIzik Eidus * 119731dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 119831dbd01fSIzik Eidus */ 119931dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 12008dd3557aSHugh Dickins struct page *page, struct page *kpage) 120131dbd01fSIzik Eidus { 120231dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 120331dbd01fSIzik Eidus int err = -EFAULT; 120431dbd01fSIzik Eidus 1205db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 1206db114b83SHugh Dickins return 0; 1207db114b83SHugh Dickins 12088dd3557aSHugh Dickins if (!PageAnon(page)) 120931dbd01fSIzik Eidus goto out; 121031dbd01fSIzik Eidus 121131dbd01fSIzik Eidus /* 121231dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 121331dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 121431dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 121531dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 121631dbd01fSIzik Eidus * then come back to this page when it is unlocked. 121731dbd01fSIzik Eidus */ 12188dd3557aSHugh Dickins if (!trylock_page(page)) 121931e855eaSHugh Dickins goto out; 1220f765f540SKirill A. Shutemov 1221f765f540SKirill A. Shutemov if (PageTransCompound(page)) { 1222a7306c34SAndrea Arcangeli if (split_huge_page(page)) 1223f765f540SKirill A. Shutemov goto out_unlock; 1224f765f540SKirill A. Shutemov } 1225f765f540SKirill A. Shutemov 122631dbd01fSIzik Eidus /* 122731dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 122831dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 122931dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 123031dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 123131dbd01fSIzik Eidus */ 123280e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 123380e14822SHugh Dickins if (!kpage) { 123480e14822SHugh Dickins /* 123580e14822SHugh Dickins * While we hold page lock, upgrade page from 123680e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 123780e14822SHugh Dickins * stable_tree_insert() will update stable_node. 123880e14822SHugh Dickins */ 123980e14822SHugh Dickins set_page_stable_node(page, NULL); 124080e14822SHugh Dickins mark_page_accessed(page); 1241337ed7ebSMinchan Kim /* 1242337ed7ebSMinchan Kim * Page reclaim just frees a clean page with no dirty 1243337ed7ebSMinchan Kim * ptes: make sure that the ksm page would be swapped. 1244337ed7ebSMinchan Kim */ 1245337ed7ebSMinchan Kim if (!PageDirty(page)) 1246337ed7ebSMinchan Kim SetPageDirty(page); 124780e14822SHugh Dickins err = 0; 124880e14822SHugh Dickins } else if (pages_identical(page, kpage)) 12498dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 125080e14822SHugh Dickins } 125131dbd01fSIzik Eidus 125280e14822SHugh Dickins if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 125373848b46SHugh Dickins munlock_vma_page(page); 12545ad64688SHugh Dickins if (!PageMlocked(kpage)) { 12555ad64688SHugh Dickins unlock_page(page); 12565ad64688SHugh Dickins lock_page(kpage); 12575ad64688SHugh Dickins mlock_vma_page(kpage); 12585ad64688SHugh Dickins page = kpage; /* for final unlock */ 12595ad64688SHugh Dickins } 12605ad64688SHugh Dickins } 126173848b46SHugh Dickins 1262f765f540SKirill A. Shutemov out_unlock: 12638dd3557aSHugh Dickins unlock_page(page); 126431dbd01fSIzik Eidus out: 126531dbd01fSIzik Eidus return err; 126631dbd01fSIzik Eidus } 126731dbd01fSIzik Eidus 126831dbd01fSIzik Eidus /* 126981464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 127081464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 12718dd3557aSHugh Dickins * 12728dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 127381464e30SHugh Dickins */ 12748dd3557aSHugh Dickins static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 12758dd3557aSHugh Dickins struct page *page, struct page *kpage) 127681464e30SHugh Dickins { 12778dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 127881464e30SHugh Dickins struct vm_area_struct *vma; 127981464e30SHugh Dickins int err = -EFAULT; 128081464e30SHugh Dickins 1281d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 128285c6e8ddSAndrea Arcangeli vma = find_mergeable_vma(mm, rmap_item->address); 128385c6e8ddSAndrea Arcangeli if (!vma) 12849ba69294SHugh Dickins goto out; 12859ba69294SHugh Dickins 12868dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 1287db114b83SHugh Dickins if (err) 1288db114b83SHugh Dickins goto out; 1289db114b83SHugh Dickins 1290bc56620bSHugh Dickins /* Unstable nid is in union with stable anon_vma: remove first */ 1291bc56620bSHugh Dickins remove_rmap_item_from_tree(rmap_item); 1292bc56620bSHugh Dickins 1293c1e8d7c6SMichel Lespinasse /* Must get reference to anon_vma while still holding mmap_lock */ 12949e60109fSPeter Zijlstra rmap_item->anon_vma = vma->anon_vma; 12959e60109fSPeter Zijlstra get_anon_vma(vma->anon_vma); 129681464e30SHugh Dickins out: 1297d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 129881464e30SHugh Dickins return err; 129981464e30SHugh Dickins } 130081464e30SHugh Dickins 130181464e30SHugh Dickins /* 130231dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 130331dbd01fSIzik Eidus * to be merged into one page. 130431dbd01fSIzik Eidus * 13058dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 13068dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 130731dbd01fSIzik Eidus * 130880e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 130931dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 131031dbd01fSIzik Eidus */ 13118dd3557aSHugh Dickins static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 13128dd3557aSHugh Dickins struct page *page, 13138dd3557aSHugh Dickins struct rmap_item *tree_rmap_item, 13148dd3557aSHugh Dickins struct page *tree_page) 131531dbd01fSIzik Eidus { 131680e14822SHugh Dickins int err; 131731dbd01fSIzik Eidus 131880e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 131931dbd01fSIzik Eidus if (!err) { 13208dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 132180e14822SHugh Dickins tree_page, page); 132231dbd01fSIzik Eidus /* 132381464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 132481464e30SHugh Dickins * pointing to it: so break it. 132531dbd01fSIzik Eidus */ 13264035c07aSHugh Dickins if (err) 13278dd3557aSHugh Dickins break_cow(rmap_item); 132831dbd01fSIzik Eidus } 132980e14822SHugh Dickins return err ? NULL : page; 133031dbd01fSIzik Eidus } 133131dbd01fSIzik Eidus 13322c653d0eSAndrea Arcangeli static __always_inline 13332c653d0eSAndrea Arcangeli bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset) 13342c653d0eSAndrea Arcangeli { 13352c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len < 0); 13362c653d0eSAndrea Arcangeli /* 13372c653d0eSAndrea Arcangeli * Check that at least one mapping still exists, otherwise 13382c653d0eSAndrea Arcangeli * there's no much point to merge and share with this 13392c653d0eSAndrea Arcangeli * stable_node, as the underlying tree_page of the other 13402c653d0eSAndrea Arcangeli * sharer is going to be freed soon. 13412c653d0eSAndrea Arcangeli */ 13422c653d0eSAndrea Arcangeli return stable_node->rmap_hlist_len && 13432c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 13442c653d0eSAndrea Arcangeli } 13452c653d0eSAndrea Arcangeli 13462c653d0eSAndrea Arcangeli static __always_inline 13472c653d0eSAndrea Arcangeli bool is_page_sharing_candidate(struct stable_node *stable_node) 13482c653d0eSAndrea Arcangeli { 13492c653d0eSAndrea Arcangeli return __is_page_sharing_candidate(stable_node, 0); 13502c653d0eSAndrea Arcangeli } 13512c653d0eSAndrea Arcangeli 1352c01f0b54SColin Ian King static struct page *stable_node_dup(struct stable_node **_stable_node_dup, 13538dc5ffcdSAndrea Arcangeli struct stable_node **_stable_node, 13542c653d0eSAndrea Arcangeli struct rb_root *root, 13552c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 13562c653d0eSAndrea Arcangeli { 1357b4fecc67SAndrea Arcangeli struct stable_node *dup, *found = NULL, *stable_node = *_stable_node; 13582c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 13598dc5ffcdSAndrea Arcangeli struct page *_tree_page, *tree_page = NULL; 13602c653d0eSAndrea Arcangeli int nr = 0; 13612c653d0eSAndrea Arcangeli int found_rmap_hlist_len; 13622c653d0eSAndrea Arcangeli 13632c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes || 13642c653d0eSAndrea Arcangeli time_before(jiffies, stable_node->chain_prune_time + 13652c653d0eSAndrea Arcangeli msecs_to_jiffies( 13662c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs))) 13672c653d0eSAndrea Arcangeli prune_stale_stable_nodes = false; 13682c653d0eSAndrea Arcangeli else 13692c653d0eSAndrea Arcangeli stable_node->chain_prune_time = jiffies; 13702c653d0eSAndrea Arcangeli 13712c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 13722c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 13732c653d0eSAndrea Arcangeli cond_resched(); 13742c653d0eSAndrea Arcangeli /* 13752c653d0eSAndrea Arcangeli * We must walk all stable_node_dup to prune the stale 13762c653d0eSAndrea Arcangeli * stable nodes during lookup. 13772c653d0eSAndrea Arcangeli * 13782c653d0eSAndrea Arcangeli * get_ksm_page can drop the nodes from the 13792c653d0eSAndrea Arcangeli * stable_node->hlist if they point to freed pages 13802c653d0eSAndrea Arcangeli * (that's why we do a _safe walk). The "dup" 13812c653d0eSAndrea Arcangeli * stable_node parameter itself will be freed from 13822c653d0eSAndrea Arcangeli * under us if it returns NULL. 13832c653d0eSAndrea Arcangeli */ 13842cee57d1SYang Shi _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); 13852c653d0eSAndrea Arcangeli if (!_tree_page) 13862c653d0eSAndrea Arcangeli continue; 13872c653d0eSAndrea Arcangeli nr += 1; 13882c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(dup)) { 13892c653d0eSAndrea Arcangeli if (!found || 13902c653d0eSAndrea Arcangeli dup->rmap_hlist_len > found_rmap_hlist_len) { 13912c653d0eSAndrea Arcangeli if (found) 13928dc5ffcdSAndrea Arcangeli put_page(tree_page); 13932c653d0eSAndrea Arcangeli found = dup; 13942c653d0eSAndrea Arcangeli found_rmap_hlist_len = found->rmap_hlist_len; 13958dc5ffcdSAndrea Arcangeli tree_page = _tree_page; 13962c653d0eSAndrea Arcangeli 13978dc5ffcdSAndrea Arcangeli /* skip put_page for found dup */ 13982c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes) 13992c653d0eSAndrea Arcangeli break; 14002c653d0eSAndrea Arcangeli continue; 14012c653d0eSAndrea Arcangeli } 14022c653d0eSAndrea Arcangeli } 14032c653d0eSAndrea Arcangeli put_page(_tree_page); 14042c653d0eSAndrea Arcangeli } 14052c653d0eSAndrea Arcangeli 140680b18dfaSAndrea Arcangeli if (found) { 14072c653d0eSAndrea Arcangeli /* 140880b18dfaSAndrea Arcangeli * nr is counting all dups in the chain only if 140980b18dfaSAndrea Arcangeli * prune_stale_stable_nodes is true, otherwise we may 141080b18dfaSAndrea Arcangeli * break the loop at nr == 1 even if there are 141180b18dfaSAndrea Arcangeli * multiple entries. 14122c653d0eSAndrea Arcangeli */ 141380b18dfaSAndrea Arcangeli if (prune_stale_stable_nodes && nr == 1) { 14142c653d0eSAndrea Arcangeli /* 14152c653d0eSAndrea Arcangeli * If there's not just one entry it would 14162c653d0eSAndrea Arcangeli * corrupt memory, better BUG_ON. In KSM 14172c653d0eSAndrea Arcangeli * context with no lock held it's not even 14182c653d0eSAndrea Arcangeli * fatal. 14192c653d0eSAndrea Arcangeli */ 14202c653d0eSAndrea Arcangeli BUG_ON(stable_node->hlist.first->next); 14212c653d0eSAndrea Arcangeli 14222c653d0eSAndrea Arcangeli /* 14232c653d0eSAndrea Arcangeli * There's just one entry and it is below the 14242c653d0eSAndrea Arcangeli * deduplication limit so drop the chain. 14252c653d0eSAndrea Arcangeli */ 14262c653d0eSAndrea Arcangeli rb_replace_node(&stable_node->node, &found->node, 14272c653d0eSAndrea Arcangeli root); 14282c653d0eSAndrea Arcangeli free_stable_node(stable_node); 14292c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 14302c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 1431b4fecc67SAndrea Arcangeli /* 14320ba1d0f7SAndrea Arcangeli * NOTE: the caller depends on the stable_node 14330ba1d0f7SAndrea Arcangeli * to be equal to stable_node_dup if the chain 14340ba1d0f7SAndrea Arcangeli * was collapsed. 1435b4fecc67SAndrea Arcangeli */ 14360ba1d0f7SAndrea Arcangeli *_stable_node = found; 14370ba1d0f7SAndrea Arcangeli /* 14380ba1d0f7SAndrea Arcangeli * Just for robustneess as stable_node is 14390ba1d0f7SAndrea Arcangeli * otherwise left as a stable pointer, the 14400ba1d0f7SAndrea Arcangeli * compiler shall optimize it away at build 14410ba1d0f7SAndrea Arcangeli * time. 14420ba1d0f7SAndrea Arcangeli */ 14430ba1d0f7SAndrea Arcangeli stable_node = NULL; 144480b18dfaSAndrea Arcangeli } else if (stable_node->hlist.first != &found->hlist_dup && 144580b18dfaSAndrea Arcangeli __is_page_sharing_candidate(found, 1)) { 14462c653d0eSAndrea Arcangeli /* 144780b18dfaSAndrea Arcangeli * If the found stable_node dup can accept one 144880b18dfaSAndrea Arcangeli * more future merge (in addition to the one 144980b18dfaSAndrea Arcangeli * that is underway) and is not at the head of 145080b18dfaSAndrea Arcangeli * the chain, put it there so next search will 145180b18dfaSAndrea Arcangeli * be quicker in the !prune_stale_stable_nodes 145280b18dfaSAndrea Arcangeli * case. 145380b18dfaSAndrea Arcangeli * 145480b18dfaSAndrea Arcangeli * NOTE: it would be inaccurate to use nr > 1 145580b18dfaSAndrea Arcangeli * instead of checking the hlist.first pointer 145680b18dfaSAndrea Arcangeli * directly, because in the 145780b18dfaSAndrea Arcangeli * prune_stale_stable_nodes case "nr" isn't 145880b18dfaSAndrea Arcangeli * the position of the found dup in the chain, 145980b18dfaSAndrea Arcangeli * but the total number of dups in the chain. 14602c653d0eSAndrea Arcangeli */ 14612c653d0eSAndrea Arcangeli hlist_del(&found->hlist_dup); 14622c653d0eSAndrea Arcangeli hlist_add_head(&found->hlist_dup, 14632c653d0eSAndrea Arcangeli &stable_node->hlist); 14642c653d0eSAndrea Arcangeli } 14652c653d0eSAndrea Arcangeli } 14662c653d0eSAndrea Arcangeli 14678dc5ffcdSAndrea Arcangeli *_stable_node_dup = found; 14688dc5ffcdSAndrea Arcangeli return tree_page; 14692c653d0eSAndrea Arcangeli } 14702c653d0eSAndrea Arcangeli 14712c653d0eSAndrea Arcangeli static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, 14722c653d0eSAndrea Arcangeli struct rb_root *root) 14732c653d0eSAndrea Arcangeli { 14742c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) 14752c653d0eSAndrea Arcangeli return stable_node; 14762c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 14772c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 14782c653d0eSAndrea Arcangeli return NULL; 14792c653d0eSAndrea Arcangeli } 14802c653d0eSAndrea Arcangeli return hlist_entry(stable_node->hlist.first, 14812c653d0eSAndrea Arcangeli typeof(*stable_node), hlist_dup); 14822c653d0eSAndrea Arcangeli } 14832c653d0eSAndrea Arcangeli 14848dc5ffcdSAndrea Arcangeli /* 14858dc5ffcdSAndrea Arcangeli * Like for get_ksm_page, this function can free the *_stable_node and 14868dc5ffcdSAndrea Arcangeli * *_stable_node_dup if the returned tree_page is NULL. 14878dc5ffcdSAndrea Arcangeli * 14888dc5ffcdSAndrea Arcangeli * It can also free and overwrite *_stable_node with the found 14898dc5ffcdSAndrea Arcangeli * stable_node_dup if the chain is collapsed (in which case 14908dc5ffcdSAndrea Arcangeli * *_stable_node will be equal to *_stable_node_dup like if the chain 14918dc5ffcdSAndrea Arcangeli * never existed). It's up to the caller to verify tree_page is not 14928dc5ffcdSAndrea Arcangeli * NULL before dereferencing *_stable_node or *_stable_node_dup. 14938dc5ffcdSAndrea Arcangeli * 14948dc5ffcdSAndrea Arcangeli * *_stable_node_dup is really a second output parameter of this 14958dc5ffcdSAndrea Arcangeli * function and will be overwritten in all cases, the caller doesn't 14968dc5ffcdSAndrea Arcangeli * need to initialize it. 14978dc5ffcdSAndrea Arcangeli */ 14988dc5ffcdSAndrea Arcangeli static struct page *__stable_node_chain(struct stable_node **_stable_node_dup, 14998dc5ffcdSAndrea Arcangeli struct stable_node **_stable_node, 15002c653d0eSAndrea Arcangeli struct rb_root *root, 15012c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 15022c653d0eSAndrea Arcangeli { 1503b4fecc67SAndrea Arcangeli struct stable_node *stable_node = *_stable_node; 15042c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 15052c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(stable_node)) { 15068dc5ffcdSAndrea Arcangeli *_stable_node_dup = stable_node; 15072cee57d1SYang Shi return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); 15082c653d0eSAndrea Arcangeli } 15098dc5ffcdSAndrea Arcangeli /* 15108dc5ffcdSAndrea Arcangeli * _stable_node_dup set to NULL means the stable_node 15118dc5ffcdSAndrea Arcangeli * reached the ksm_max_page_sharing limit. 15128dc5ffcdSAndrea Arcangeli */ 15138dc5ffcdSAndrea Arcangeli *_stable_node_dup = NULL; 15142c653d0eSAndrea Arcangeli return NULL; 15152c653d0eSAndrea Arcangeli } 15168dc5ffcdSAndrea Arcangeli return stable_node_dup(_stable_node_dup, _stable_node, root, 15172c653d0eSAndrea Arcangeli prune_stale_stable_nodes); 15182c653d0eSAndrea Arcangeli } 15192c653d0eSAndrea Arcangeli 15208dc5ffcdSAndrea Arcangeli static __always_inline struct page *chain_prune(struct stable_node **s_n_d, 15218dc5ffcdSAndrea Arcangeli struct stable_node **s_n, 15222c653d0eSAndrea Arcangeli struct rb_root *root) 15232c653d0eSAndrea Arcangeli { 15248dc5ffcdSAndrea Arcangeli return __stable_node_chain(s_n_d, s_n, root, true); 15252c653d0eSAndrea Arcangeli } 15262c653d0eSAndrea Arcangeli 15278dc5ffcdSAndrea Arcangeli static __always_inline struct page *chain(struct stable_node **s_n_d, 15288dc5ffcdSAndrea Arcangeli struct stable_node *s_n, 15292c653d0eSAndrea Arcangeli struct rb_root *root) 15302c653d0eSAndrea Arcangeli { 15318dc5ffcdSAndrea Arcangeli struct stable_node *old_stable_node = s_n; 15328dc5ffcdSAndrea Arcangeli struct page *tree_page; 15338dc5ffcdSAndrea Arcangeli 15348dc5ffcdSAndrea Arcangeli tree_page = __stable_node_chain(s_n_d, &s_n, root, false); 15358dc5ffcdSAndrea Arcangeli /* not pruning dups so s_n cannot have changed */ 15368dc5ffcdSAndrea Arcangeli VM_BUG_ON(s_n != old_stable_node); 15378dc5ffcdSAndrea Arcangeli return tree_page; 15382c653d0eSAndrea Arcangeli } 15392c653d0eSAndrea Arcangeli 154031dbd01fSIzik Eidus /* 15418dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 154231dbd01fSIzik Eidus * 154331dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 154431dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 154531dbd01fSIzik Eidus * 15467b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 154731dbd01fSIzik Eidus * NULL otherwise. 154831dbd01fSIzik Eidus */ 154962b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 155031dbd01fSIzik Eidus { 155190bd6fd3SPetr Holasek int nid; 1552ef53d16cSHugh Dickins struct rb_root *root; 15534146d2d6SHugh Dickins struct rb_node **new; 15544146d2d6SHugh Dickins struct rb_node *parent; 15552c653d0eSAndrea Arcangeli struct stable_node *stable_node, *stable_node_dup, *stable_node_any; 15564146d2d6SHugh Dickins struct stable_node *page_node; 155731dbd01fSIzik Eidus 15584146d2d6SHugh Dickins page_node = page_stable_node(page); 15594146d2d6SHugh Dickins if (page_node && page_node->head != &migrate_nodes) { 15604146d2d6SHugh Dickins /* ksm page forked */ 156108beca44SHugh Dickins get_page(page); 156262b61f61SHugh Dickins return page; 156308beca44SHugh Dickins } 156408beca44SHugh Dickins 156590bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1566ef53d16cSHugh Dickins root = root_stable_tree + nid; 15674146d2d6SHugh Dickins again: 1568ef53d16cSHugh Dickins new = &root->rb_node; 15694146d2d6SHugh Dickins parent = NULL; 157090bd6fd3SPetr Holasek 15714146d2d6SHugh Dickins while (*new) { 15724035c07aSHugh Dickins struct page *tree_page; 157331dbd01fSIzik Eidus int ret; 157431dbd01fSIzik Eidus 157531dbd01fSIzik Eidus cond_resched(); 15764146d2d6SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 15772c653d0eSAndrea Arcangeli stable_node_any = NULL; 15788dc5ffcdSAndrea Arcangeli tree_page = chain_prune(&stable_node_dup, &stable_node, root); 1579b4fecc67SAndrea Arcangeli /* 1580b4fecc67SAndrea Arcangeli * NOTE: stable_node may have been freed by 1581b4fecc67SAndrea Arcangeli * chain_prune() if the returned stable_node_dup is 1582b4fecc67SAndrea Arcangeli * not NULL. stable_node_dup may have been inserted in 1583b4fecc67SAndrea Arcangeli * the rbtree instead as a regular stable_node (in 1584b4fecc67SAndrea Arcangeli * order to collapse the stable_node chain if a single 15850ba1d0f7SAndrea Arcangeli * stable_node dup was found in it). In such case the 15860ba1d0f7SAndrea Arcangeli * stable_node is overwritten by the calleee to point 15870ba1d0f7SAndrea Arcangeli * to the stable_node_dup that was collapsed in the 15880ba1d0f7SAndrea Arcangeli * stable rbtree and stable_node will be equal to 15890ba1d0f7SAndrea Arcangeli * stable_node_dup like if the chain never existed. 1590b4fecc67SAndrea Arcangeli */ 15912c653d0eSAndrea Arcangeli if (!stable_node_dup) { 15922c653d0eSAndrea Arcangeli /* 15932c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 15942c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 15952c653d0eSAndrea Arcangeli * empty and should be rb_erased. 15962c653d0eSAndrea Arcangeli */ 15972c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 15982c653d0eSAndrea Arcangeli root); 15992c653d0eSAndrea Arcangeli if (!stable_node_any) { 16002c653d0eSAndrea Arcangeli /* rb_erase just run */ 16012c653d0eSAndrea Arcangeli goto again; 16022c653d0eSAndrea Arcangeli } 16032c653d0eSAndrea Arcangeli /* 16042c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 16052c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 16062c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 16072c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 16082c653d0eSAndrea Arcangeli * have the same content and they're 1609457aef94SEthon Paul * write protected at all times. Any will work 16102c653d0eSAndrea Arcangeli * fine to continue the walk. 16112c653d0eSAndrea Arcangeli */ 16122cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 16132cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 16142c653d0eSAndrea Arcangeli } 16152c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1616f2e5ff85SAndrea Arcangeli if (!tree_page) { 1617f2e5ff85SAndrea Arcangeli /* 1618f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1619f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1620f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1621f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1622f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1623f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1624f2e5ff85SAndrea Arcangeli * stable_node was stale. 1625f2e5ff85SAndrea Arcangeli */ 1626f2e5ff85SAndrea Arcangeli goto again; 1627f2e5ff85SAndrea Arcangeli } 162831dbd01fSIzik Eidus 16294035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 1630c8d6553bSHugh Dickins put_page(tree_page); 163131dbd01fSIzik Eidus 16324146d2d6SHugh Dickins parent = *new; 1633c8d6553bSHugh Dickins if (ret < 0) 16344146d2d6SHugh Dickins new = &parent->rb_left; 1635c8d6553bSHugh Dickins else if (ret > 0) 16364146d2d6SHugh Dickins new = &parent->rb_right; 1637c8d6553bSHugh Dickins else { 16382c653d0eSAndrea Arcangeli if (page_node) { 16392c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 16402c653d0eSAndrea Arcangeli /* 16412c653d0eSAndrea Arcangeli * Test if the migrated page should be merged 16422c653d0eSAndrea Arcangeli * into a stable node dup. If the mapcount is 16432c653d0eSAndrea Arcangeli * 1 we can migrate it with another KSM page 16442c653d0eSAndrea Arcangeli * without adding it to the chain. 16452c653d0eSAndrea Arcangeli */ 16462c653d0eSAndrea Arcangeli if (page_mapcount(page) > 1) 16472c653d0eSAndrea Arcangeli goto chain_append; 16482c653d0eSAndrea Arcangeli } 16492c653d0eSAndrea Arcangeli 16502c653d0eSAndrea Arcangeli if (!stable_node_dup) { 16512c653d0eSAndrea Arcangeli /* 16522c653d0eSAndrea Arcangeli * If the stable_node is a chain and 16532c653d0eSAndrea Arcangeli * we got a payload match in memcmp 16542c653d0eSAndrea Arcangeli * but we cannot merge the scanned 16552c653d0eSAndrea Arcangeli * page in any of the existing 16562c653d0eSAndrea Arcangeli * stable_node dups because they're 16572c653d0eSAndrea Arcangeli * all full, we need to wait the 16582c653d0eSAndrea Arcangeli * scanned page to find itself a match 16592c653d0eSAndrea Arcangeli * in the unstable tree to create a 16602c653d0eSAndrea Arcangeli * brand new KSM page to add later to 16612c653d0eSAndrea Arcangeli * the dups of this stable_node. 16622c653d0eSAndrea Arcangeli */ 16632c653d0eSAndrea Arcangeli return NULL; 16642c653d0eSAndrea Arcangeli } 16652c653d0eSAndrea Arcangeli 1666c8d6553bSHugh Dickins /* 1667c8d6553bSHugh Dickins * Lock and unlock the stable_node's page (which 1668c8d6553bSHugh Dickins * might already have been migrated) so that page 1669c8d6553bSHugh Dickins * migration is sure to notice its raised count. 1670c8d6553bSHugh Dickins * It would be more elegant to return stable_node 1671c8d6553bSHugh Dickins * than kpage, but that involves more changes. 1672c8d6553bSHugh Dickins */ 16732cee57d1SYang Shi tree_page = get_ksm_page(stable_node_dup, 16742cee57d1SYang Shi GET_KSM_PAGE_TRYLOCK); 16752cee57d1SYang Shi 16762cee57d1SYang Shi if (PTR_ERR(tree_page) == -EBUSY) 16772cee57d1SYang Shi return ERR_PTR(-EBUSY); 16782cee57d1SYang Shi 16792c653d0eSAndrea Arcangeli if (unlikely(!tree_page)) 16802c653d0eSAndrea Arcangeli /* 16812c653d0eSAndrea Arcangeli * The tree may have been rebalanced, 16822c653d0eSAndrea Arcangeli * so re-evaluate parent and new. 16832c653d0eSAndrea Arcangeli */ 16842c653d0eSAndrea Arcangeli goto again; 1685c8d6553bSHugh Dickins unlock_page(tree_page); 16862c653d0eSAndrea Arcangeli 16872c653d0eSAndrea Arcangeli if (get_kpfn_nid(stable_node_dup->kpfn) != 16882c653d0eSAndrea Arcangeli NUMA(stable_node_dup->nid)) { 16894146d2d6SHugh Dickins put_page(tree_page); 16904146d2d6SHugh Dickins goto replace; 16914146d2d6SHugh Dickins } 169262b61f61SHugh Dickins return tree_page; 169331dbd01fSIzik Eidus } 1694c8d6553bSHugh Dickins } 169531dbd01fSIzik Eidus 16964146d2d6SHugh Dickins if (!page_node) 169731dbd01fSIzik Eidus return NULL; 16984146d2d6SHugh Dickins 16994146d2d6SHugh Dickins list_del(&page_node->list); 17004146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 17014146d2d6SHugh Dickins rb_link_node(&page_node->node, parent, new); 1702ef53d16cSHugh Dickins rb_insert_color(&page_node->node, root); 17032c653d0eSAndrea Arcangeli out: 17042c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) { 17054146d2d6SHugh Dickins get_page(page); 17064146d2d6SHugh Dickins return page; 17072c653d0eSAndrea Arcangeli } else 17082c653d0eSAndrea Arcangeli return NULL; 17094146d2d6SHugh Dickins 17104146d2d6SHugh Dickins replace: 1711b4fecc67SAndrea Arcangeli /* 1712b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17130ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17140ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17150ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17160ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17170ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1718b4fecc67SAndrea Arcangeli */ 17190ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1720b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1721b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17222c653d0eSAndrea Arcangeli /* there is no chain */ 17234146d2d6SHugh Dickins if (page_node) { 17242c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17254146d2d6SHugh Dickins list_del(&page_node->list); 17264146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 1727b4fecc67SAndrea Arcangeli rb_replace_node(&stable_node_dup->node, 1728b4fecc67SAndrea Arcangeli &page_node->node, 17292c653d0eSAndrea Arcangeli root); 17302c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17314146d2d6SHugh Dickins get_page(page); 17322c653d0eSAndrea Arcangeli else 17332c653d0eSAndrea Arcangeli page = NULL; 17344146d2d6SHugh Dickins } else { 1735b4fecc67SAndrea Arcangeli rb_erase(&stable_node_dup->node, root); 17364146d2d6SHugh Dickins page = NULL; 17374146d2d6SHugh Dickins } 17382c653d0eSAndrea Arcangeli } else { 17392c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(stable_node)); 17402c653d0eSAndrea Arcangeli __stable_node_dup_del(stable_node_dup); 17412c653d0eSAndrea Arcangeli if (page_node) { 17422c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17432c653d0eSAndrea Arcangeli list_del(&page_node->list); 17442c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 17452c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 17462c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17472c653d0eSAndrea Arcangeli get_page(page); 17482c653d0eSAndrea Arcangeli else 17492c653d0eSAndrea Arcangeli page = NULL; 17502c653d0eSAndrea Arcangeli } else { 17512c653d0eSAndrea Arcangeli page = NULL; 17522c653d0eSAndrea Arcangeli } 17532c653d0eSAndrea Arcangeli } 17542c653d0eSAndrea Arcangeli stable_node_dup->head = &migrate_nodes; 17552c653d0eSAndrea Arcangeli list_add(&stable_node_dup->list, stable_node_dup->head); 17564146d2d6SHugh Dickins return page; 17572c653d0eSAndrea Arcangeli 17582c653d0eSAndrea Arcangeli chain_append: 17592c653d0eSAndrea Arcangeli /* stable_node_dup could be null if it reached the limit */ 17602c653d0eSAndrea Arcangeli if (!stable_node_dup) 17612c653d0eSAndrea Arcangeli stable_node_dup = stable_node_any; 1762b4fecc67SAndrea Arcangeli /* 1763b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17640ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17650ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17660ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17670ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17680ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1769b4fecc67SAndrea Arcangeli */ 17700ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1771b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17722c653d0eSAndrea Arcangeli /* chain is missing so create it */ 17732c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(stable_node_dup, 17742c653d0eSAndrea Arcangeli root); 17752c653d0eSAndrea Arcangeli if (!stable_node) 17762c653d0eSAndrea Arcangeli return NULL; 17772c653d0eSAndrea Arcangeli } 17782c653d0eSAndrea Arcangeli /* 17792c653d0eSAndrea Arcangeli * Add this stable_node dup that was 17802c653d0eSAndrea Arcangeli * migrated to the stable_node chain 17812c653d0eSAndrea Arcangeli * of the current nid for this page 17822c653d0eSAndrea Arcangeli * content. 17832c653d0eSAndrea Arcangeli */ 1784b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 17852c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17862c653d0eSAndrea Arcangeli list_del(&page_node->list); 17872c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 17882c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 17892c653d0eSAndrea Arcangeli goto out; 179031dbd01fSIzik Eidus } 179131dbd01fSIzik Eidus 179231dbd01fSIzik Eidus /* 1793e850dcf5SHugh Dickins * stable_tree_insert - insert stable tree node pointing to new ksm page 179431dbd01fSIzik Eidus * into the stable tree. 179531dbd01fSIzik Eidus * 17967b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 17977b6ba2c7SHugh Dickins * NULL otherwise. 179831dbd01fSIzik Eidus */ 17997b6ba2c7SHugh Dickins static struct stable_node *stable_tree_insert(struct page *kpage) 180031dbd01fSIzik Eidus { 180190bd6fd3SPetr Holasek int nid; 180290bd6fd3SPetr Holasek unsigned long kpfn; 1803ef53d16cSHugh Dickins struct rb_root *root; 180490bd6fd3SPetr Holasek struct rb_node **new; 1805f2e5ff85SAndrea Arcangeli struct rb_node *parent; 18062c653d0eSAndrea Arcangeli struct stable_node *stable_node, *stable_node_dup, *stable_node_any; 18072c653d0eSAndrea Arcangeli bool need_chain = false; 180831dbd01fSIzik Eidus 180990bd6fd3SPetr Holasek kpfn = page_to_pfn(kpage); 181090bd6fd3SPetr Holasek nid = get_kpfn_nid(kpfn); 1811ef53d16cSHugh Dickins root = root_stable_tree + nid; 1812f2e5ff85SAndrea Arcangeli again: 1813f2e5ff85SAndrea Arcangeli parent = NULL; 1814ef53d16cSHugh Dickins new = &root->rb_node; 181590bd6fd3SPetr Holasek 181631dbd01fSIzik Eidus while (*new) { 18174035c07aSHugh Dickins struct page *tree_page; 181831dbd01fSIzik Eidus int ret; 181931dbd01fSIzik Eidus 182031dbd01fSIzik Eidus cond_resched(); 182108beca44SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 18222c653d0eSAndrea Arcangeli stable_node_any = NULL; 18238dc5ffcdSAndrea Arcangeli tree_page = chain(&stable_node_dup, stable_node, root); 18242c653d0eSAndrea Arcangeli if (!stable_node_dup) { 18252c653d0eSAndrea Arcangeli /* 18262c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 18272c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 18282c653d0eSAndrea Arcangeli * empty and should be rb_erased. 18292c653d0eSAndrea Arcangeli */ 18302c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 18312c653d0eSAndrea Arcangeli root); 18322c653d0eSAndrea Arcangeli if (!stable_node_any) { 18332c653d0eSAndrea Arcangeli /* rb_erase just run */ 18342c653d0eSAndrea Arcangeli goto again; 18352c653d0eSAndrea Arcangeli } 18362c653d0eSAndrea Arcangeli /* 18372c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 18382c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 18392c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 18402c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 18412c653d0eSAndrea Arcangeli * have the same content and they're 1842457aef94SEthon Paul * write protected at all times. Any will work 18432c653d0eSAndrea Arcangeli * fine to continue the walk. 18442c653d0eSAndrea Arcangeli */ 18452cee57d1SYang Shi tree_page = get_ksm_page(stable_node_any, 18462cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 18472c653d0eSAndrea Arcangeli } 18482c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1849f2e5ff85SAndrea Arcangeli if (!tree_page) { 1850f2e5ff85SAndrea Arcangeli /* 1851f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1852f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1853f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1854f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1855f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1856f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1857f2e5ff85SAndrea Arcangeli * stable_node was stale. 1858f2e5ff85SAndrea Arcangeli */ 1859f2e5ff85SAndrea Arcangeli goto again; 1860f2e5ff85SAndrea Arcangeli } 186131dbd01fSIzik Eidus 18624035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 18634035c07aSHugh Dickins put_page(tree_page); 186431dbd01fSIzik Eidus 186531dbd01fSIzik Eidus parent = *new; 186631dbd01fSIzik Eidus if (ret < 0) 186731dbd01fSIzik Eidus new = &parent->rb_left; 186831dbd01fSIzik Eidus else if (ret > 0) 186931dbd01fSIzik Eidus new = &parent->rb_right; 187031dbd01fSIzik Eidus else { 18712c653d0eSAndrea Arcangeli need_chain = true; 18722c653d0eSAndrea Arcangeli break; 187331dbd01fSIzik Eidus } 187431dbd01fSIzik Eidus } 187531dbd01fSIzik Eidus 18762c653d0eSAndrea Arcangeli stable_node_dup = alloc_stable_node(); 18772c653d0eSAndrea Arcangeli if (!stable_node_dup) 18787b6ba2c7SHugh Dickins return NULL; 187931dbd01fSIzik Eidus 18802c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&stable_node_dup->hlist); 18812c653d0eSAndrea Arcangeli stable_node_dup->kpfn = kpfn; 18822c653d0eSAndrea Arcangeli set_page_stable_node(kpage, stable_node_dup); 18832c653d0eSAndrea Arcangeli stable_node_dup->rmap_hlist_len = 0; 18842c653d0eSAndrea Arcangeli DO_NUMA(stable_node_dup->nid = nid); 18852c653d0eSAndrea Arcangeli if (!need_chain) { 18862c653d0eSAndrea Arcangeli rb_link_node(&stable_node_dup->node, parent, new); 18872c653d0eSAndrea Arcangeli rb_insert_color(&stable_node_dup->node, root); 18882c653d0eSAndrea Arcangeli } else { 18892c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 18902c653d0eSAndrea Arcangeli struct stable_node *orig = stable_node; 18912c653d0eSAndrea Arcangeli /* chain is missing so create it */ 18922c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(orig, root); 18932c653d0eSAndrea Arcangeli if (!stable_node) { 18942c653d0eSAndrea Arcangeli free_stable_node(stable_node_dup); 18952c653d0eSAndrea Arcangeli return NULL; 18962c653d0eSAndrea Arcangeli } 18972c653d0eSAndrea Arcangeli } 18982c653d0eSAndrea Arcangeli stable_node_chain_add_dup(stable_node_dup, stable_node); 18992c653d0eSAndrea Arcangeli } 190008beca44SHugh Dickins 19012c653d0eSAndrea Arcangeli return stable_node_dup; 190231dbd01fSIzik Eidus } 190331dbd01fSIzik Eidus 190431dbd01fSIzik Eidus /* 19058dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 19068dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 190731dbd01fSIzik Eidus * 190831dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 190931dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 191031dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 191131dbd01fSIzik Eidus * 191231dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 191331dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 191431dbd01fSIzik Eidus * 191531dbd01fSIzik Eidus * This function does both searching and inserting, because they share 191631dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 191731dbd01fSIzik Eidus */ 19188dd3557aSHugh Dickins static 19198dd3557aSHugh Dickins struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 19208dd3557aSHugh Dickins struct page *page, 19218dd3557aSHugh Dickins struct page **tree_pagep) 192231dbd01fSIzik Eidus { 192390bd6fd3SPetr Holasek struct rb_node **new; 192490bd6fd3SPetr Holasek struct rb_root *root; 192531dbd01fSIzik Eidus struct rb_node *parent = NULL; 192690bd6fd3SPetr Holasek int nid; 192790bd6fd3SPetr Holasek 192890bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1929ef53d16cSHugh Dickins root = root_unstable_tree + nid; 193090bd6fd3SPetr Holasek new = &root->rb_node; 193131dbd01fSIzik Eidus 193231dbd01fSIzik Eidus while (*new) { 193331dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 19348dd3557aSHugh Dickins struct page *tree_page; 193531dbd01fSIzik Eidus int ret; 193631dbd01fSIzik Eidus 1937d178f27fSHugh Dickins cond_resched(); 193831dbd01fSIzik Eidus tree_rmap_item = rb_entry(*new, struct rmap_item, node); 19398dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 1940c8f95ed1SAndrea Arcangeli if (!tree_page) 194131dbd01fSIzik Eidus return NULL; 194231dbd01fSIzik Eidus 194331dbd01fSIzik Eidus /* 19448dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 194531dbd01fSIzik Eidus */ 19468dd3557aSHugh Dickins if (page == tree_page) { 19478dd3557aSHugh Dickins put_page(tree_page); 194831dbd01fSIzik Eidus return NULL; 194931dbd01fSIzik Eidus } 195031dbd01fSIzik Eidus 19518dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 195231dbd01fSIzik Eidus 195331dbd01fSIzik Eidus parent = *new; 195431dbd01fSIzik Eidus if (ret < 0) { 19558dd3557aSHugh Dickins put_page(tree_page); 195631dbd01fSIzik Eidus new = &parent->rb_left; 195731dbd01fSIzik Eidus } else if (ret > 0) { 19588dd3557aSHugh Dickins put_page(tree_page); 195931dbd01fSIzik Eidus new = &parent->rb_right; 1960b599cbdfSHugh Dickins } else if (!ksm_merge_across_nodes && 1961b599cbdfSHugh Dickins page_to_nid(tree_page) != nid) { 1962b599cbdfSHugh Dickins /* 1963b599cbdfSHugh Dickins * If tree_page has been migrated to another NUMA node, 1964b599cbdfSHugh Dickins * it will be flushed out and put in the right unstable 1965b599cbdfSHugh Dickins * tree next time: only merge with it when across_nodes. 1966b599cbdfSHugh Dickins */ 1967b599cbdfSHugh Dickins put_page(tree_page); 1968b599cbdfSHugh Dickins return NULL; 196931dbd01fSIzik Eidus } else { 19708dd3557aSHugh Dickins *tree_pagep = tree_page; 197131dbd01fSIzik Eidus return tree_rmap_item; 197231dbd01fSIzik Eidus } 197331dbd01fSIzik Eidus } 197431dbd01fSIzik Eidus 19757b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 197631dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1977e850dcf5SHugh Dickins DO_NUMA(rmap_item->nid = nid); 197831dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 197990bd6fd3SPetr Holasek rb_insert_color(&rmap_item->node, root); 198031dbd01fSIzik Eidus 1981473b0ce4SHugh Dickins ksm_pages_unshared++; 198231dbd01fSIzik Eidus return NULL; 198331dbd01fSIzik Eidus } 198431dbd01fSIzik Eidus 198531dbd01fSIzik Eidus /* 198631dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 198731dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 198831dbd01fSIzik Eidus * the same ksm page. 198931dbd01fSIzik Eidus */ 199031dbd01fSIzik Eidus static void stable_tree_append(struct rmap_item *rmap_item, 19912c653d0eSAndrea Arcangeli struct stable_node *stable_node, 19922c653d0eSAndrea Arcangeli bool max_page_sharing_bypass) 199331dbd01fSIzik Eidus { 19942c653d0eSAndrea Arcangeli /* 19952c653d0eSAndrea Arcangeli * rmap won't find this mapping if we don't insert the 19962c653d0eSAndrea Arcangeli * rmap_item in the right stable_node 19972c653d0eSAndrea Arcangeli * duplicate. page_migration could break later if rmap breaks, 19982c653d0eSAndrea Arcangeli * so we can as well crash here. We really need to check for 19992c653d0eSAndrea Arcangeli * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 2000457aef94SEthon Paul * for other negative values as an underflow if detected here 20012c653d0eSAndrea Arcangeli * for the first time (and not when decreasing rmap_hlist_len) 20022c653d0eSAndrea Arcangeli * would be sign of memory corruption in the stable_node. 20032c653d0eSAndrea Arcangeli */ 20042c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 20052c653d0eSAndrea Arcangeli 20062c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len++; 20072c653d0eSAndrea Arcangeli if (!max_page_sharing_bypass) 20082c653d0eSAndrea Arcangeli /* possibly non fatal but unexpected overflow, only warn */ 20092c653d0eSAndrea Arcangeli WARN_ON_ONCE(stable_node->rmap_hlist_len > 20102c653d0eSAndrea Arcangeli ksm_max_page_sharing); 20112c653d0eSAndrea Arcangeli 20127b6ba2c7SHugh Dickins rmap_item->head = stable_node; 201331dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 20147b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 2015e178dfdeSHugh Dickins 20167b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 2017e178dfdeSHugh Dickins ksm_pages_sharing++; 20187b6ba2c7SHugh Dickins else 20197b6ba2c7SHugh Dickins ksm_pages_shared++; 202031dbd01fSIzik Eidus } 202131dbd01fSIzik Eidus 202231dbd01fSIzik Eidus /* 202381464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 202481464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 202581464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 202681464e30SHugh Dickins * both transferred to the stable tree. 202731dbd01fSIzik Eidus * 202831dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 202931dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 203031dbd01fSIzik Eidus */ 203131dbd01fSIzik Eidus static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 203231dbd01fSIzik Eidus { 20334b22927fSKirill Tkhai struct mm_struct *mm = rmap_item->mm; 203431dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 20358dd3557aSHugh Dickins struct page *tree_page = NULL; 20367b6ba2c7SHugh Dickins struct stable_node *stable_node; 20378dd3557aSHugh Dickins struct page *kpage; 203831dbd01fSIzik Eidus unsigned int checksum; 203931dbd01fSIzik Eidus int err; 20402c653d0eSAndrea Arcangeli bool max_page_sharing_bypass = false; 204131dbd01fSIzik Eidus 20424146d2d6SHugh Dickins stable_node = page_stable_node(page); 20434146d2d6SHugh Dickins if (stable_node) { 20444146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20452c653d0eSAndrea Arcangeli get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 20462c653d0eSAndrea Arcangeli NUMA(stable_node->nid)) { 20472c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 20484146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 20494146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 20504146d2d6SHugh Dickins } 20514146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20524146d2d6SHugh Dickins rmap_item->head == stable_node) 20534146d2d6SHugh Dickins return; 20542c653d0eSAndrea Arcangeli /* 20552c653d0eSAndrea Arcangeli * If it's a KSM fork, allow it to go over the sharing limit 20562c653d0eSAndrea Arcangeli * without warnings. 20572c653d0eSAndrea Arcangeli */ 20582c653d0eSAndrea Arcangeli if (!is_page_sharing_candidate(stable_node)) 20592c653d0eSAndrea Arcangeli max_page_sharing_bypass = true; 20604146d2d6SHugh Dickins } 206131dbd01fSIzik Eidus 206231dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 206362b61f61SHugh Dickins kpage = stable_tree_search(page); 20644146d2d6SHugh Dickins if (kpage == page && rmap_item->head == stable_node) { 20654146d2d6SHugh Dickins put_page(kpage); 20664146d2d6SHugh Dickins return; 20674146d2d6SHugh Dickins } 20684146d2d6SHugh Dickins 20694146d2d6SHugh Dickins remove_rmap_item_from_tree(rmap_item); 20704146d2d6SHugh Dickins 207162b61f61SHugh Dickins if (kpage) { 20722cee57d1SYang Shi if (PTR_ERR(kpage) == -EBUSY) 20732cee57d1SYang Shi return; 20742cee57d1SYang Shi 207508beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 207631dbd01fSIzik Eidus if (!err) { 207731dbd01fSIzik Eidus /* 207831dbd01fSIzik Eidus * The page was successfully merged: 207931dbd01fSIzik Eidus * add its rmap_item to the stable tree. 208031dbd01fSIzik Eidus */ 20815ad64688SHugh Dickins lock_page(kpage); 20822c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, page_stable_node(kpage), 20832c653d0eSAndrea Arcangeli max_page_sharing_bypass); 20845ad64688SHugh Dickins unlock_page(kpage); 208531dbd01fSIzik Eidus } 20868dd3557aSHugh Dickins put_page(kpage); 208731dbd01fSIzik Eidus return; 208831dbd01fSIzik Eidus } 208931dbd01fSIzik Eidus 209031dbd01fSIzik Eidus /* 20914035c07aSHugh Dickins * If the hash value of the page has changed from the last time 20924035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 20934035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 20944035c07aSHugh Dickins * to waste our time searching for something identical to it there. 209531dbd01fSIzik Eidus */ 209631dbd01fSIzik Eidus checksum = calc_checksum(page); 209731dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 209831dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 209931dbd01fSIzik Eidus return; 210031dbd01fSIzik Eidus } 210131dbd01fSIzik Eidus 2102e86c59b1SClaudio Imbrenda /* 2103e86c59b1SClaudio Imbrenda * Same checksum as an empty page. We attempt to merge it with the 2104e86c59b1SClaudio Imbrenda * appropriate zero page if the user enabled this via sysfs. 2105e86c59b1SClaudio Imbrenda */ 2106e86c59b1SClaudio Imbrenda if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2107e86c59b1SClaudio Imbrenda struct vm_area_struct *vma; 2108e86c59b1SClaudio Imbrenda 2109d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 21104b22927fSKirill Tkhai vma = find_mergeable_vma(mm, rmap_item->address); 211156df70a6SMuchun Song if (vma) { 2112e86c59b1SClaudio Imbrenda err = try_to_merge_one_page(vma, page, 2113e86c59b1SClaudio Imbrenda ZERO_PAGE(rmap_item->address)); 211456df70a6SMuchun Song } else { 211556df70a6SMuchun Song /* 211656df70a6SMuchun Song * If the vma is out of date, we do not need to 211756df70a6SMuchun Song * continue. 211856df70a6SMuchun Song */ 211956df70a6SMuchun Song err = 0; 212056df70a6SMuchun Song } 2121d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2122e86c59b1SClaudio Imbrenda /* 2123e86c59b1SClaudio Imbrenda * In case of failure, the page was not really empty, so we 2124e86c59b1SClaudio Imbrenda * need to continue. Otherwise we're done. 2125e86c59b1SClaudio Imbrenda */ 2126e86c59b1SClaudio Imbrenda if (!err) 2127e86c59b1SClaudio Imbrenda return; 2128e86c59b1SClaudio Imbrenda } 21298dd3557aSHugh Dickins tree_rmap_item = 21308dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 213131dbd01fSIzik Eidus if (tree_rmap_item) { 213277da2ba0SClaudio Imbrenda bool split; 213377da2ba0SClaudio Imbrenda 21348dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 21358dd3557aSHugh Dickins tree_rmap_item, tree_page); 213677da2ba0SClaudio Imbrenda /* 213777da2ba0SClaudio Imbrenda * If both pages we tried to merge belong to the same compound 213877da2ba0SClaudio Imbrenda * page, then we actually ended up increasing the reference 213977da2ba0SClaudio Imbrenda * count of the same compound page twice, and split_huge_page 214077da2ba0SClaudio Imbrenda * failed. 214177da2ba0SClaudio Imbrenda * Here we set a flag if that happened, and we use it later to 214277da2ba0SClaudio Imbrenda * try split_huge_page again. Since we call put_page right 214377da2ba0SClaudio Imbrenda * afterwards, the reference count will be correct and 214477da2ba0SClaudio Imbrenda * split_huge_page should succeed. 214577da2ba0SClaudio Imbrenda */ 214677da2ba0SClaudio Imbrenda split = PageTransCompound(page) 214777da2ba0SClaudio Imbrenda && compound_head(page) == compound_head(tree_page); 21488dd3557aSHugh Dickins put_page(tree_page); 21498dd3557aSHugh Dickins if (kpage) { 2150bc56620bSHugh Dickins /* 2151bc56620bSHugh Dickins * The pages were successfully merged: insert new 2152bc56620bSHugh Dickins * node in the stable tree and add both rmap_items. 2153bc56620bSHugh Dickins */ 21545ad64688SHugh Dickins lock_page(kpage); 21557b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 21567b6ba2c7SHugh Dickins if (stable_node) { 21572c653d0eSAndrea Arcangeli stable_tree_append(tree_rmap_item, stable_node, 21582c653d0eSAndrea Arcangeli false); 21592c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, stable_node, 21602c653d0eSAndrea Arcangeli false); 21617b6ba2c7SHugh Dickins } 21625ad64688SHugh Dickins unlock_page(kpage); 21637b6ba2c7SHugh Dickins 216431dbd01fSIzik Eidus /* 216531dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 216631dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 216731dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 216831dbd01fSIzik Eidus * in which case we need to break_cow on both. 216931dbd01fSIzik Eidus */ 21707b6ba2c7SHugh Dickins if (!stable_node) { 21718dd3557aSHugh Dickins break_cow(tree_rmap_item); 21728dd3557aSHugh Dickins break_cow(rmap_item); 217331dbd01fSIzik Eidus } 217477da2ba0SClaudio Imbrenda } else if (split) { 217577da2ba0SClaudio Imbrenda /* 217677da2ba0SClaudio Imbrenda * We are here if we tried to merge two pages and 217777da2ba0SClaudio Imbrenda * failed because they both belonged to the same 217877da2ba0SClaudio Imbrenda * compound page. We will split the page now, but no 217977da2ba0SClaudio Imbrenda * merging will take place. 218077da2ba0SClaudio Imbrenda * We do not want to add the cost of a full lock; if 218177da2ba0SClaudio Imbrenda * the page is locked, it is better to skip it and 218277da2ba0SClaudio Imbrenda * perhaps try again later. 218377da2ba0SClaudio Imbrenda */ 218477da2ba0SClaudio Imbrenda if (!trylock_page(page)) 218577da2ba0SClaudio Imbrenda return; 218677da2ba0SClaudio Imbrenda split_huge_page(page); 218777da2ba0SClaudio Imbrenda unlock_page(page); 218831dbd01fSIzik Eidus } 218931dbd01fSIzik Eidus } 219031dbd01fSIzik Eidus } 219131dbd01fSIzik Eidus 219231dbd01fSIzik Eidus static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 21936514d511SHugh Dickins struct rmap_item **rmap_list, 219431dbd01fSIzik Eidus unsigned long addr) 219531dbd01fSIzik Eidus { 219631dbd01fSIzik Eidus struct rmap_item *rmap_item; 219731dbd01fSIzik Eidus 21986514d511SHugh Dickins while (*rmap_list) { 21996514d511SHugh Dickins rmap_item = *rmap_list; 220093d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 220131dbd01fSIzik Eidus return rmap_item; 220231dbd01fSIzik Eidus if (rmap_item->address > addr) 220331dbd01fSIzik Eidus break; 22046514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 220531dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 220631dbd01fSIzik Eidus free_rmap_item(rmap_item); 220731dbd01fSIzik Eidus } 220831dbd01fSIzik Eidus 220931dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 221031dbd01fSIzik Eidus if (rmap_item) { 221131dbd01fSIzik Eidus /* It has already been zeroed */ 221231dbd01fSIzik Eidus rmap_item->mm = mm_slot->mm; 221331dbd01fSIzik Eidus rmap_item->address = addr; 22146514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 22156514d511SHugh Dickins *rmap_list = rmap_item; 221631dbd01fSIzik Eidus } 221731dbd01fSIzik Eidus return rmap_item; 221831dbd01fSIzik Eidus } 221931dbd01fSIzik Eidus 222031dbd01fSIzik Eidus static struct rmap_item *scan_get_next_rmap_item(struct page **page) 222131dbd01fSIzik Eidus { 222231dbd01fSIzik Eidus struct mm_struct *mm; 222331dbd01fSIzik Eidus struct mm_slot *slot; 222431dbd01fSIzik Eidus struct vm_area_struct *vma; 222531dbd01fSIzik Eidus struct rmap_item *rmap_item; 222690bd6fd3SPetr Holasek int nid; 222731dbd01fSIzik Eidus 222831dbd01fSIzik Eidus if (list_empty(&ksm_mm_head.mm_list)) 222931dbd01fSIzik Eidus return NULL; 223031dbd01fSIzik Eidus 223131dbd01fSIzik Eidus slot = ksm_scan.mm_slot; 223231dbd01fSIzik Eidus if (slot == &ksm_mm_head) { 22332919bfd0SHugh Dickins /* 22342919bfd0SHugh Dickins * A number of pages can hang around indefinitely on per-cpu 22352919bfd0SHugh Dickins * pagevecs, raised page count preventing write_protect_page 22362919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 22372919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 22382919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 22392919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 22402919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 22412919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 22422919bfd0SHugh Dickins */ 22432919bfd0SHugh Dickins lru_add_drain_all(); 22442919bfd0SHugh Dickins 22454146d2d6SHugh Dickins /* 22464146d2d6SHugh Dickins * Whereas stale stable_nodes on the stable_tree itself 22474146d2d6SHugh Dickins * get pruned in the regular course of stable_tree_search(), 22484146d2d6SHugh Dickins * those moved out to the migrate_nodes list can accumulate: 22494146d2d6SHugh Dickins * so prune them once before each full scan. 22504146d2d6SHugh Dickins */ 22514146d2d6SHugh Dickins if (!ksm_merge_across_nodes) { 225203640418SGeliang Tang struct stable_node *stable_node, *next; 22534146d2d6SHugh Dickins struct page *page; 22544146d2d6SHugh Dickins 225503640418SGeliang Tang list_for_each_entry_safe(stable_node, next, 225603640418SGeliang Tang &migrate_nodes, list) { 22572cee57d1SYang Shi page = get_ksm_page(stable_node, 22582cee57d1SYang Shi GET_KSM_PAGE_NOLOCK); 22594146d2d6SHugh Dickins if (page) 22604146d2d6SHugh Dickins put_page(page); 22614146d2d6SHugh Dickins cond_resched(); 22624146d2d6SHugh Dickins } 22634146d2d6SHugh Dickins } 22644146d2d6SHugh Dickins 2265ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) 226690bd6fd3SPetr Holasek root_unstable_tree[nid] = RB_ROOT; 226731dbd01fSIzik Eidus 226831dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 226931dbd01fSIzik Eidus slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 227031dbd01fSIzik Eidus ksm_scan.mm_slot = slot; 227131dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 22722b472611SHugh Dickins /* 22732b472611SHugh Dickins * Although we tested list_empty() above, a racing __ksm_exit 22742b472611SHugh Dickins * of the last mm on the list may have removed it since then. 22752b472611SHugh Dickins */ 22762b472611SHugh Dickins if (slot == &ksm_mm_head) 22772b472611SHugh Dickins return NULL; 227831dbd01fSIzik Eidus next_mm: 227931dbd01fSIzik Eidus ksm_scan.address = 0; 22806514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 228131dbd01fSIzik Eidus } 228231dbd01fSIzik Eidus 228331dbd01fSIzik Eidus mm = slot->mm; 2284d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 22859ba69294SHugh Dickins if (ksm_test_exit(mm)) 22869ba69294SHugh Dickins vma = NULL; 22879ba69294SHugh Dickins else 22889ba69294SHugh Dickins vma = find_vma(mm, ksm_scan.address); 22899ba69294SHugh Dickins 22909ba69294SHugh Dickins for (; vma; vma = vma->vm_next) { 229131dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 229231dbd01fSIzik Eidus continue; 229331dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 229431dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 229531dbd01fSIzik Eidus if (!vma->anon_vma) 229631dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 229731dbd01fSIzik Eidus 229831dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 22999ba69294SHugh Dickins if (ksm_test_exit(mm)) 23009ba69294SHugh Dickins break; 230131dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 230221ae5b01SAndrea Arcangeli if (IS_ERR_OR_NULL(*page)) { 230321ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 230421ae5b01SAndrea Arcangeli cond_resched(); 230521ae5b01SAndrea Arcangeli continue; 230621ae5b01SAndrea Arcangeli } 2307f765f540SKirill A. Shutemov if (PageAnon(*page)) { 230831dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 230931dbd01fSIzik Eidus flush_dcache_page(*page); 231031dbd01fSIzik Eidus rmap_item = get_next_rmap_item(slot, 23116514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 231231dbd01fSIzik Eidus if (rmap_item) { 23136514d511SHugh Dickins ksm_scan.rmap_list = 23146514d511SHugh Dickins &rmap_item->rmap_list; 231531dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 231631dbd01fSIzik Eidus } else 231731dbd01fSIzik Eidus put_page(*page); 2318d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 231931dbd01fSIzik Eidus return rmap_item; 232031dbd01fSIzik Eidus } 232131dbd01fSIzik Eidus put_page(*page); 232231dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 232331dbd01fSIzik Eidus cond_resched(); 232431dbd01fSIzik Eidus } 232531dbd01fSIzik Eidus } 232631dbd01fSIzik Eidus 23279ba69294SHugh Dickins if (ksm_test_exit(mm)) { 23289ba69294SHugh Dickins ksm_scan.address = 0; 23296514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 23309ba69294SHugh Dickins } 233131dbd01fSIzik Eidus /* 233231dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 233331dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 233431dbd01fSIzik Eidus */ 23356514d511SHugh Dickins remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 233631dbd01fSIzik Eidus 233731dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 2338cd551f97SHugh Dickins ksm_scan.mm_slot = list_entry(slot->mm_list.next, 2339cd551f97SHugh Dickins struct mm_slot, mm_list); 2340cd551f97SHugh Dickins if (ksm_scan.address == 0) { 2341cd551f97SHugh Dickins /* 2342c1e8d7c6SMichel Lespinasse * We've completed a full scan of all vmas, holding mmap_lock 2343cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 2344cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 23459ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 23469ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 23479ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 2348c1e8d7c6SMichel Lespinasse * mmap_lock then protects against race with MADV_MERGEABLE). 2349cd551f97SHugh Dickins */ 23504ca3a69bSSasha Levin hash_del(&slot->link); 2351cd551f97SHugh Dickins list_del(&slot->mm_list); 23529ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 23539ba69294SHugh Dickins 2354cd551f97SHugh Dickins free_mm_slot(slot); 2355cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2356d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 23579ba69294SHugh Dickins mmdrop(mm); 23589ba69294SHugh Dickins } else { 2359d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 23607496fea9SZhou Chengming /* 23613e4e28c5SMichel Lespinasse * mmap_read_unlock(mm) first because after 23627496fea9SZhou Chengming * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 23637496fea9SZhou Chengming * already have been freed under us by __ksm_exit() 23647496fea9SZhou Chengming * because the "mm_slot" is still hashed and 23657496fea9SZhou Chengming * ksm_scan.mm_slot doesn't point to it anymore. 23667496fea9SZhou Chengming */ 23677496fea9SZhou Chengming spin_unlock(&ksm_mmlist_lock); 23689ba69294SHugh Dickins } 236931dbd01fSIzik Eidus 237031dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 2371cd551f97SHugh Dickins slot = ksm_scan.mm_slot; 237231dbd01fSIzik Eidus if (slot != &ksm_mm_head) 237331dbd01fSIzik Eidus goto next_mm; 237431dbd01fSIzik Eidus 237531dbd01fSIzik Eidus ksm_scan.seqnr++; 237631dbd01fSIzik Eidus return NULL; 237731dbd01fSIzik Eidus } 237831dbd01fSIzik Eidus 237931dbd01fSIzik Eidus /** 238031dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 2381b7701a5fSMike Rapoport * @scan_npages: number of pages we want to scan before we return. 238231dbd01fSIzik Eidus */ 238331dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 238431dbd01fSIzik Eidus { 238531dbd01fSIzik Eidus struct rmap_item *rmap_item; 23863f649ab7SKees Cook struct page *page; 238731dbd01fSIzik Eidus 2388878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 238931dbd01fSIzik Eidus cond_resched(); 239031dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 239131dbd01fSIzik Eidus if (!rmap_item) 239231dbd01fSIzik Eidus return; 239331dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 239431dbd01fSIzik Eidus put_page(page); 239531dbd01fSIzik Eidus } 239631dbd01fSIzik Eidus } 239731dbd01fSIzik Eidus 23986e158384SHugh Dickins static int ksmd_should_run(void) 23996e158384SHugh Dickins { 24006e158384SHugh Dickins return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 24016e158384SHugh Dickins } 24026e158384SHugh Dickins 240331dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 240431dbd01fSIzik Eidus { 2405fcf9a0efSKirill Tkhai unsigned int sleep_ms; 2406fcf9a0efSKirill Tkhai 2407878aee7dSAndrea Arcangeli set_freezable(); 2408339aa624SIzik Eidus set_user_nice(current, 5); 240931dbd01fSIzik Eidus 241031dbd01fSIzik Eidus while (!kthread_should_stop()) { 241131dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2412ef4d43a8SHugh Dickins wait_while_offlining(); 24136e158384SHugh Dickins if (ksmd_should_run()) 241431dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 241531dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 24166e158384SHugh Dickins 2417878aee7dSAndrea Arcangeli try_to_freeze(); 2418878aee7dSAndrea Arcangeli 24196e158384SHugh Dickins if (ksmd_should_run()) { 2420fcf9a0efSKirill Tkhai sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); 2421fcf9a0efSKirill Tkhai wait_event_interruptible_timeout(ksm_iter_wait, 2422fcf9a0efSKirill Tkhai sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), 2423fcf9a0efSKirill Tkhai msecs_to_jiffies(sleep_ms)); 242431dbd01fSIzik Eidus } else { 2425878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 24266e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 242731dbd01fSIzik Eidus } 242831dbd01fSIzik Eidus } 242931dbd01fSIzik Eidus return 0; 243031dbd01fSIzik Eidus } 243131dbd01fSIzik Eidus 2432f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2433f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 2434f8af4da3SHugh Dickins { 2435f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 2436d952b791SHugh Dickins int err; 2437f8af4da3SHugh Dickins 2438f8af4da3SHugh Dickins switch (advice) { 2439f8af4da3SHugh Dickins case MADV_MERGEABLE: 2440f8af4da3SHugh Dickins /* 2441f8af4da3SHugh Dickins * Be somewhat over-protective for now! 2442f8af4da3SHugh Dickins */ 2443f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 2444f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 24450661a336SKirill A. Shutemov VM_HUGETLB | VM_MIXEDMAP)) 2446f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2447f8af4da3SHugh Dickins 2448e1fb4a08SDave Jiang if (vma_is_dax(vma)) 2449e1fb4a08SDave Jiang return 0; 2450e1fb4a08SDave Jiang 245112564485SShawn Anastasio #ifdef VM_SAO 245212564485SShawn Anastasio if (*vm_flags & VM_SAO) 245312564485SShawn Anastasio return 0; 245412564485SShawn Anastasio #endif 245574a04967SKhalid Aziz #ifdef VM_SPARC_ADI 245674a04967SKhalid Aziz if (*vm_flags & VM_SPARC_ADI) 245774a04967SKhalid Aziz return 0; 245874a04967SKhalid Aziz #endif 2459cc2383ecSKonstantin Khlebnikov 2460d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2461d952b791SHugh Dickins err = __ksm_enter(mm); 2462d952b791SHugh Dickins if (err) 2463d952b791SHugh Dickins return err; 2464d952b791SHugh Dickins } 2465f8af4da3SHugh Dickins 2466f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 2467f8af4da3SHugh Dickins break; 2468f8af4da3SHugh Dickins 2469f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 2470f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 2471f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2472f8af4da3SHugh Dickins 2473d952b791SHugh Dickins if (vma->anon_vma) { 2474d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 2475d952b791SHugh Dickins if (err) 2476d952b791SHugh Dickins return err; 2477d952b791SHugh Dickins } 2478f8af4da3SHugh Dickins 2479f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 2480f8af4da3SHugh Dickins break; 2481f8af4da3SHugh Dickins } 2482f8af4da3SHugh Dickins 2483f8af4da3SHugh Dickins return 0; 2484f8af4da3SHugh Dickins } 248533cf1707SBharata B Rao EXPORT_SYMBOL_GPL(ksm_madvise); 2486f8af4da3SHugh Dickins 2487f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 2488f8af4da3SHugh Dickins { 24896e158384SHugh Dickins struct mm_slot *mm_slot; 24906e158384SHugh Dickins int needs_wakeup; 24916e158384SHugh Dickins 24926e158384SHugh Dickins mm_slot = alloc_mm_slot(); 249331dbd01fSIzik Eidus if (!mm_slot) 249431dbd01fSIzik Eidus return -ENOMEM; 249531dbd01fSIzik Eidus 24966e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 24976e158384SHugh Dickins needs_wakeup = list_empty(&ksm_mm_head.mm_list); 24986e158384SHugh Dickins 249931dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 250031dbd01fSIzik Eidus insert_to_mm_slots_hash(mm, mm_slot); 250131dbd01fSIzik Eidus /* 2502cbf86cfeSHugh Dickins * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2503cbf86cfeSHugh Dickins * insert just behind the scanning cursor, to let the area settle 250431dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 250531dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 2506cbf86cfeSHugh Dickins * 2507cbf86cfeSHugh Dickins * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2508cbf86cfeSHugh Dickins * scanning cursor, otherwise KSM pages in newly forked mms will be 2509cbf86cfeSHugh Dickins * missed: then we might as well insert at the end of the list. 251031dbd01fSIzik Eidus */ 2511cbf86cfeSHugh Dickins if (ksm_run & KSM_RUN_UNMERGE) 2512cbf86cfeSHugh Dickins list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); 2513cbf86cfeSHugh Dickins else 251431dbd01fSIzik Eidus list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 251531dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 251631dbd01fSIzik Eidus 2517f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 2518f1f10076SVegard Nossum mmgrab(mm); 25196e158384SHugh Dickins 25206e158384SHugh Dickins if (needs_wakeup) 25216e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 25226e158384SHugh Dickins 2523f8af4da3SHugh Dickins return 0; 2524f8af4da3SHugh Dickins } 2525f8af4da3SHugh Dickins 25261c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 2527f8af4da3SHugh Dickins { 2528cd551f97SHugh Dickins struct mm_slot *mm_slot; 25299ba69294SHugh Dickins int easy_to_free = 0; 2530cd551f97SHugh Dickins 253131dbd01fSIzik Eidus /* 25329ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 25339ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 25349ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 2535c1e8d7c6SMichel Lespinasse * mmap_lock to synchronize with any break_cows before pagetables 25369ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 25379ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 253831dbd01fSIzik Eidus */ 25399ba69294SHugh Dickins 2540cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 2541cd551f97SHugh Dickins mm_slot = get_mm_slot(mm); 25429ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 25436514d511SHugh Dickins if (!mm_slot->rmap_list) { 25444ca3a69bSSasha Levin hash_del(&mm_slot->link); 2545cd551f97SHugh Dickins list_del(&mm_slot->mm_list); 25469ba69294SHugh Dickins easy_to_free = 1; 25479ba69294SHugh Dickins } else { 25489ba69294SHugh Dickins list_move(&mm_slot->mm_list, 25499ba69294SHugh Dickins &ksm_scan.mm_slot->mm_list); 25509ba69294SHugh Dickins } 25519ba69294SHugh Dickins } 2552cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 2553cd551f97SHugh Dickins 25549ba69294SHugh Dickins if (easy_to_free) { 2555cd551f97SHugh Dickins free_mm_slot(mm_slot); 2556cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 25579ba69294SHugh Dickins mmdrop(mm); 25589ba69294SHugh Dickins } else if (mm_slot) { 2559d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 2560d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 25619ba69294SHugh Dickins } 2562f8af4da3SHugh Dickins } 256331dbd01fSIzik Eidus 2564cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 25655ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 25665ad64688SHugh Dickins { 2567cbf86cfeSHugh Dickins struct anon_vma *anon_vma = page_anon_vma(page); 25685ad64688SHugh Dickins struct page *new_page; 25695ad64688SHugh Dickins 2570cbf86cfeSHugh Dickins if (PageKsm(page)) { 2571cbf86cfeSHugh Dickins if (page_stable_node(page) && 2572cbf86cfeSHugh Dickins !(ksm_run & KSM_RUN_UNMERGE)) 2573cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2574cbf86cfeSHugh Dickins } else if (!anon_vma) { 2575cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2576cbf86cfeSHugh Dickins } else if (anon_vma->root == vma->anon_vma->root && 2577cbf86cfeSHugh Dickins page->index == linear_page_index(vma, address)) { 2578cbf86cfeSHugh Dickins return page; /* still no need to copy it */ 2579cbf86cfeSHugh Dickins } 2580cbf86cfeSHugh Dickins if (!PageUptodate(page)) 2581cbf86cfeSHugh Dickins return page; /* let do_swap_page report the error */ 2582cbf86cfeSHugh Dickins 25835ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 258462fdb163SHugh Dickins if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) { 258562fdb163SHugh Dickins put_page(new_page); 258662fdb163SHugh Dickins new_page = NULL; 258762fdb163SHugh Dickins } 25885ad64688SHugh Dickins if (new_page) { 25895ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 25905ad64688SHugh Dickins 25915ad64688SHugh Dickins SetPageDirty(new_page); 25925ad64688SHugh Dickins __SetPageUptodate(new_page); 259348c935adSKirill A. Shutemov __SetPageLocked(new_page); 25945ad64688SHugh Dickins } 25955ad64688SHugh Dickins 25965ad64688SHugh Dickins return new_page; 25975ad64688SHugh Dickins } 25985ad64688SHugh Dickins 25991df631aeSMinchan Kim void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 2600e9995ef9SHugh Dickins { 2601e9995ef9SHugh Dickins struct stable_node *stable_node; 2602e9995ef9SHugh Dickins struct rmap_item *rmap_item; 2603e9995ef9SHugh Dickins int search_new_forks = 0; 2604e9995ef9SHugh Dickins 2605309381feSSasha Levin VM_BUG_ON_PAGE(!PageKsm(page), page); 26069f32624bSJoonsoo Kim 26079f32624bSJoonsoo Kim /* 26089f32624bSJoonsoo Kim * Rely on the page lock to protect against concurrent modifications 26099f32624bSJoonsoo Kim * to that page's node of the stable tree. 26109f32624bSJoonsoo Kim */ 2611309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 2612e9995ef9SHugh Dickins 2613e9995ef9SHugh Dickins stable_node = page_stable_node(page); 2614e9995ef9SHugh Dickins if (!stable_node) 26151df631aeSMinchan Kim return; 2616e9995ef9SHugh Dickins again: 2617b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2618e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 26195beb4930SRik van Riel struct anon_vma_chain *vmac; 2620e9995ef9SHugh Dickins struct vm_area_struct *vma; 2621e9995ef9SHugh Dickins 2622ad12695fSAndrea Arcangeli cond_resched(); 2623b6b19f25SHugh Dickins anon_vma_lock_read(anon_vma); 2624bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2625bf181b9fSMichel Lespinasse 0, ULONG_MAX) { 26261105a2fcSJia He unsigned long addr; 26271105a2fcSJia He 2628ad12695fSAndrea Arcangeli cond_resched(); 26295beb4930SRik van Riel vma = vmac->vma; 26301105a2fcSJia He 26311105a2fcSJia He /* Ignore the stable/unstable/sqnr flags */ 2632*cd7fae26SMiaohe Lin addr = rmap_item->address & PAGE_MASK; 26331105a2fcSJia He 26341105a2fcSJia He if (addr < vma->vm_start || addr >= vma->vm_end) 2635e9995ef9SHugh Dickins continue; 2636e9995ef9SHugh Dickins /* 2637e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 2638e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 2639e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 2640e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 2641e9995ef9SHugh Dickins */ 2642e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2643e9995ef9SHugh Dickins continue; 2644e9995ef9SHugh Dickins 26450dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 26460dd1c7bbSJoonsoo Kim continue; 26470dd1c7bbSJoonsoo Kim 26481105a2fcSJia He if (!rwc->rmap_one(page, vma, addr, rwc->arg)) { 2649b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 26501df631aeSMinchan Kim return; 2651e9995ef9SHugh Dickins } 26520dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) { 26530dd1c7bbSJoonsoo Kim anon_vma_unlock_read(anon_vma); 26541df631aeSMinchan Kim return; 26550dd1c7bbSJoonsoo Kim } 2656e9995ef9SHugh Dickins } 2657b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 2658e9995ef9SHugh Dickins } 2659e9995ef9SHugh Dickins if (!search_new_forks++) 2660e9995ef9SHugh Dickins goto again; 2661e9995ef9SHugh Dickins } 2662e9995ef9SHugh Dickins 266352629506SJoonsoo Kim #ifdef CONFIG_MIGRATION 2664e9995ef9SHugh Dickins void ksm_migrate_page(struct page *newpage, struct page *oldpage) 2665e9995ef9SHugh Dickins { 2666e9995ef9SHugh Dickins struct stable_node *stable_node; 2667e9995ef9SHugh Dickins 2668309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 2669309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 2670309381feSSasha Levin VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 2671e9995ef9SHugh Dickins 2672e9995ef9SHugh Dickins stable_node = page_stable_node(newpage); 2673e9995ef9SHugh Dickins if (stable_node) { 2674309381feSSasha Levin VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 267562b61f61SHugh Dickins stable_node->kpfn = page_to_pfn(newpage); 2676c8d6553bSHugh Dickins /* 2677c8d6553bSHugh Dickins * newpage->mapping was set in advance; now we need smp_wmb() 2678c8d6553bSHugh Dickins * to make sure that the new stable_node->kpfn is visible 2679c8d6553bSHugh Dickins * to get_ksm_page() before it can see that oldpage->mapping 2680c8d6553bSHugh Dickins * has gone stale (or that PageSwapCache has been cleared). 2681c8d6553bSHugh Dickins */ 2682c8d6553bSHugh Dickins smp_wmb(); 2683c8d6553bSHugh Dickins set_page_stable_node(oldpage, NULL); 2684e9995ef9SHugh Dickins } 2685e9995ef9SHugh Dickins } 2686e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 2687e9995ef9SHugh Dickins 268862b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 2689ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2690ef4d43a8SHugh Dickins { 2691ef4d43a8SHugh Dickins while (ksm_run & KSM_RUN_OFFLINE) { 2692ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2693ef4d43a8SHugh Dickins wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 269474316201SNeilBrown TASK_UNINTERRUPTIBLE); 2695ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2696ef4d43a8SHugh Dickins } 2697ef4d43a8SHugh Dickins } 2698ef4d43a8SHugh Dickins 26992c653d0eSAndrea Arcangeli static bool stable_node_dup_remove_range(struct stable_node *stable_node, 27002c653d0eSAndrea Arcangeli unsigned long start_pfn, 27012c653d0eSAndrea Arcangeli unsigned long end_pfn) 27022c653d0eSAndrea Arcangeli { 27032c653d0eSAndrea Arcangeli if (stable_node->kpfn >= start_pfn && 27042c653d0eSAndrea Arcangeli stable_node->kpfn < end_pfn) { 27052c653d0eSAndrea Arcangeli /* 27062c653d0eSAndrea Arcangeli * Don't get_ksm_page, page has already gone: 27072c653d0eSAndrea Arcangeli * which is why we keep kpfn instead of page* 27082c653d0eSAndrea Arcangeli */ 27092c653d0eSAndrea Arcangeli remove_node_from_stable_tree(stable_node); 27102c653d0eSAndrea Arcangeli return true; 27112c653d0eSAndrea Arcangeli } 27122c653d0eSAndrea Arcangeli return false; 27132c653d0eSAndrea Arcangeli } 27142c653d0eSAndrea Arcangeli 27152c653d0eSAndrea Arcangeli static bool stable_node_chain_remove_range(struct stable_node *stable_node, 27162c653d0eSAndrea Arcangeli unsigned long start_pfn, 27172c653d0eSAndrea Arcangeli unsigned long end_pfn, 27182c653d0eSAndrea Arcangeli struct rb_root *root) 27192c653d0eSAndrea Arcangeli { 27202c653d0eSAndrea Arcangeli struct stable_node *dup; 27212c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 27222c653d0eSAndrea Arcangeli 27232c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 27242c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 27252c653d0eSAndrea Arcangeli return stable_node_dup_remove_range(stable_node, start_pfn, 27262c653d0eSAndrea Arcangeli end_pfn); 27272c653d0eSAndrea Arcangeli } 27282c653d0eSAndrea Arcangeli 27292c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 27302c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 27312c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 27322c653d0eSAndrea Arcangeli stable_node_dup_remove_range(dup, start_pfn, end_pfn); 27332c653d0eSAndrea Arcangeli } 27342c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 27352c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 27362c653d0eSAndrea Arcangeli return true; /* notify caller that tree was rebalanced */ 27372c653d0eSAndrea Arcangeli } else 27382c653d0eSAndrea Arcangeli return false; 27392c653d0eSAndrea Arcangeli } 27402c653d0eSAndrea Arcangeli 2741ee0ea59cSHugh Dickins static void ksm_check_stable_tree(unsigned long start_pfn, 274262b61f61SHugh Dickins unsigned long end_pfn) 274362b61f61SHugh Dickins { 274403640418SGeliang Tang struct stable_node *stable_node, *next; 274562b61f61SHugh Dickins struct rb_node *node; 274690bd6fd3SPetr Holasek int nid; 274762b61f61SHugh Dickins 2748ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 2749ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 2750ee0ea59cSHugh Dickins while (node) { 275162b61f61SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 27522c653d0eSAndrea Arcangeli if (stable_node_chain_remove_range(stable_node, 27532c653d0eSAndrea Arcangeli start_pfn, end_pfn, 27542c653d0eSAndrea Arcangeli root_stable_tree + 27552c653d0eSAndrea Arcangeli nid)) 2756ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 27572c653d0eSAndrea Arcangeli else 2758ee0ea59cSHugh Dickins node = rb_next(node); 2759ee0ea59cSHugh Dickins cond_resched(); 276062b61f61SHugh Dickins } 2761ee0ea59cSHugh Dickins } 276203640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 27634146d2d6SHugh Dickins if (stable_node->kpfn >= start_pfn && 27644146d2d6SHugh Dickins stable_node->kpfn < end_pfn) 27654146d2d6SHugh Dickins remove_node_from_stable_tree(stable_node); 27664146d2d6SHugh Dickins cond_resched(); 27674146d2d6SHugh Dickins } 276862b61f61SHugh Dickins } 276962b61f61SHugh Dickins 277062b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 277162b61f61SHugh Dickins unsigned long action, void *arg) 277262b61f61SHugh Dickins { 277362b61f61SHugh Dickins struct memory_notify *mn = arg; 277462b61f61SHugh Dickins 277562b61f61SHugh Dickins switch (action) { 277662b61f61SHugh Dickins case MEM_GOING_OFFLINE: 277762b61f61SHugh Dickins /* 2778ef4d43a8SHugh Dickins * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 2779ef4d43a8SHugh Dickins * and remove_all_stable_nodes() while memory is going offline: 2780ef4d43a8SHugh Dickins * it is unsafe for them to touch the stable tree at this time. 2781ef4d43a8SHugh Dickins * But unmerge_ksm_pages(), rmap lookups and other entry points 2782ef4d43a8SHugh Dickins * which do not need the ksm_thread_mutex are all safe. 278362b61f61SHugh Dickins */ 2784ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2785ef4d43a8SHugh Dickins ksm_run |= KSM_RUN_OFFLINE; 2786ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 278762b61f61SHugh Dickins break; 278862b61f61SHugh Dickins 278962b61f61SHugh Dickins case MEM_OFFLINE: 279062b61f61SHugh Dickins /* 279162b61f61SHugh Dickins * Most of the work is done by page migration; but there might 279262b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 2793ee0ea59cSHugh Dickins * pages which have been offlined: prune those from the tree, 2794ee0ea59cSHugh Dickins * otherwise get_ksm_page() might later try to access a 2795ee0ea59cSHugh Dickins * non-existent struct page. 279662b61f61SHugh Dickins */ 2797ee0ea59cSHugh Dickins ksm_check_stable_tree(mn->start_pfn, 2798ee0ea59cSHugh Dickins mn->start_pfn + mn->nr_pages); 2799e4a9bc58SJoe Perches fallthrough; 280062b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 2801ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2802ef4d43a8SHugh Dickins ksm_run &= ~KSM_RUN_OFFLINE; 280362b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2804ef4d43a8SHugh Dickins 2805ef4d43a8SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 2806ef4d43a8SHugh Dickins wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 280762b61f61SHugh Dickins break; 280862b61f61SHugh Dickins } 280962b61f61SHugh Dickins return NOTIFY_OK; 281062b61f61SHugh Dickins } 2811ef4d43a8SHugh Dickins #else 2812ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2813ef4d43a8SHugh Dickins { 2814ef4d43a8SHugh Dickins } 281562b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 281662b61f61SHugh Dickins 28172ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 28182ffd8679SHugh Dickins /* 28192ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 28202ffd8679SHugh Dickins */ 28212ffd8679SHugh Dickins 282231dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 282331dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 282431dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 282531dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = \ 282631dbd01fSIzik Eidus __ATTR(_name, 0644, _name##_show, _name##_store) 282731dbd01fSIzik Eidus 282831dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 282931dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 283031dbd01fSIzik Eidus { 2831ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); 283231dbd01fSIzik Eidus } 283331dbd01fSIzik Eidus 283431dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 283531dbd01fSIzik Eidus struct kobj_attribute *attr, 283631dbd01fSIzik Eidus const char *buf, size_t count) 283731dbd01fSIzik Eidus { 2838dfefd226SAlexey Dobriyan unsigned int msecs; 283931dbd01fSIzik Eidus int err; 284031dbd01fSIzik Eidus 2841dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs); 2842dfefd226SAlexey Dobriyan if (err) 284331dbd01fSIzik Eidus return -EINVAL; 284431dbd01fSIzik Eidus 284531dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 2846fcf9a0efSKirill Tkhai wake_up_interruptible(&ksm_iter_wait); 284731dbd01fSIzik Eidus 284831dbd01fSIzik Eidus return count; 284931dbd01fSIzik Eidus } 285031dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 285131dbd01fSIzik Eidus 285231dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 285331dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 285431dbd01fSIzik Eidus { 2855ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); 285631dbd01fSIzik Eidus } 285731dbd01fSIzik Eidus 285831dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 285931dbd01fSIzik Eidus struct kobj_attribute *attr, 286031dbd01fSIzik Eidus const char *buf, size_t count) 286131dbd01fSIzik Eidus { 2862dfefd226SAlexey Dobriyan unsigned int nr_pages; 286331dbd01fSIzik Eidus int err; 286431dbd01fSIzik Eidus 2865dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &nr_pages); 2866dfefd226SAlexey Dobriyan if (err) 286731dbd01fSIzik Eidus return -EINVAL; 286831dbd01fSIzik Eidus 286931dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 287031dbd01fSIzik Eidus 287131dbd01fSIzik Eidus return count; 287231dbd01fSIzik Eidus } 287331dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 287431dbd01fSIzik Eidus 287531dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 287631dbd01fSIzik Eidus char *buf) 287731dbd01fSIzik Eidus { 2878ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_run); 287931dbd01fSIzik Eidus } 288031dbd01fSIzik Eidus 288131dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 288231dbd01fSIzik Eidus const char *buf, size_t count) 288331dbd01fSIzik Eidus { 2884dfefd226SAlexey Dobriyan unsigned int flags; 288531dbd01fSIzik Eidus int err; 288631dbd01fSIzik Eidus 2887dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &flags); 2888dfefd226SAlexey Dobriyan if (err) 288931dbd01fSIzik Eidus return -EINVAL; 289031dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 289131dbd01fSIzik Eidus return -EINVAL; 289231dbd01fSIzik Eidus 289331dbd01fSIzik Eidus /* 289431dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 289531dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 2896d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 2897d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 289831dbd01fSIzik Eidus */ 289931dbd01fSIzik Eidus 290031dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2901ef4d43a8SHugh Dickins wait_while_offlining(); 290231dbd01fSIzik Eidus if (ksm_run != flags) { 290331dbd01fSIzik Eidus ksm_run = flags; 2904d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 2905e1e12d2fSDavid Rientjes set_current_oom_origin(); 2906d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 2907e1e12d2fSDavid Rientjes clear_current_oom_origin(); 2908d952b791SHugh Dickins if (err) { 2909d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 2910d952b791SHugh Dickins count = err; 2911d952b791SHugh Dickins } 2912d952b791SHugh Dickins } 291331dbd01fSIzik Eidus } 291431dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 291531dbd01fSIzik Eidus 291631dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 291731dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 291831dbd01fSIzik Eidus 291931dbd01fSIzik Eidus return count; 292031dbd01fSIzik Eidus } 292131dbd01fSIzik Eidus KSM_ATTR(run); 292231dbd01fSIzik Eidus 292390bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 292490bd6fd3SPetr Holasek static ssize_t merge_across_nodes_show(struct kobject *kobj, 292590bd6fd3SPetr Holasek struct kobj_attribute *attr, char *buf) 292690bd6fd3SPetr Holasek { 2927ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); 292890bd6fd3SPetr Holasek } 292990bd6fd3SPetr Holasek 293090bd6fd3SPetr Holasek static ssize_t merge_across_nodes_store(struct kobject *kobj, 293190bd6fd3SPetr Holasek struct kobj_attribute *attr, 293290bd6fd3SPetr Holasek const char *buf, size_t count) 293390bd6fd3SPetr Holasek { 293490bd6fd3SPetr Holasek int err; 293590bd6fd3SPetr Holasek unsigned long knob; 293690bd6fd3SPetr Holasek 293790bd6fd3SPetr Holasek err = kstrtoul(buf, 10, &knob); 293890bd6fd3SPetr Holasek if (err) 293990bd6fd3SPetr Holasek return err; 294090bd6fd3SPetr Holasek if (knob > 1) 294190bd6fd3SPetr Holasek return -EINVAL; 294290bd6fd3SPetr Holasek 294390bd6fd3SPetr Holasek mutex_lock(&ksm_thread_mutex); 2944ef4d43a8SHugh Dickins wait_while_offlining(); 294590bd6fd3SPetr Holasek if (ksm_merge_across_nodes != knob) { 2946cbf86cfeSHugh Dickins if (ksm_pages_shared || remove_all_stable_nodes()) 294790bd6fd3SPetr Holasek err = -EBUSY; 2948ef53d16cSHugh Dickins else if (root_stable_tree == one_stable_tree) { 2949ef53d16cSHugh Dickins struct rb_root *buf; 2950ef53d16cSHugh Dickins /* 2951ef53d16cSHugh Dickins * This is the first time that we switch away from the 2952ef53d16cSHugh Dickins * default of merging across nodes: must now allocate 2953ef53d16cSHugh Dickins * a buffer to hold as many roots as may be needed. 2954ef53d16cSHugh Dickins * Allocate stable and unstable together: 2955ef53d16cSHugh Dickins * MAXSMP NODES_SHIFT 10 will use 16kB. 2956ef53d16cSHugh Dickins */ 2957bafe1e14SJoe Perches buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 2958bafe1e14SJoe Perches GFP_KERNEL); 2959ef53d16cSHugh Dickins /* Let us assume that RB_ROOT is NULL is zero */ 2960ef53d16cSHugh Dickins if (!buf) 2961ef53d16cSHugh Dickins err = -ENOMEM; 2962ef53d16cSHugh Dickins else { 2963ef53d16cSHugh Dickins root_stable_tree = buf; 2964ef53d16cSHugh Dickins root_unstable_tree = buf + nr_node_ids; 2965ef53d16cSHugh Dickins /* Stable tree is empty but not the unstable */ 2966ef53d16cSHugh Dickins root_unstable_tree[0] = one_unstable_tree[0]; 2967ef53d16cSHugh Dickins } 2968ef53d16cSHugh Dickins } 2969ef53d16cSHugh Dickins if (!err) { 297090bd6fd3SPetr Holasek ksm_merge_across_nodes = knob; 2971ef53d16cSHugh Dickins ksm_nr_node_ids = knob ? 1 : nr_node_ids; 2972ef53d16cSHugh Dickins } 297390bd6fd3SPetr Holasek } 297490bd6fd3SPetr Holasek mutex_unlock(&ksm_thread_mutex); 297590bd6fd3SPetr Holasek 297690bd6fd3SPetr Holasek return err ? err : count; 297790bd6fd3SPetr Holasek } 297890bd6fd3SPetr Holasek KSM_ATTR(merge_across_nodes); 297990bd6fd3SPetr Holasek #endif 298090bd6fd3SPetr Holasek 2981e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_show(struct kobject *kobj, 2982e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, char *buf) 2983e86c59b1SClaudio Imbrenda { 2984ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); 2985e86c59b1SClaudio Imbrenda } 2986e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_store(struct kobject *kobj, 2987e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, 2988e86c59b1SClaudio Imbrenda const char *buf, size_t count) 2989e86c59b1SClaudio Imbrenda { 2990e86c59b1SClaudio Imbrenda int err; 2991e86c59b1SClaudio Imbrenda bool value; 2992e86c59b1SClaudio Imbrenda 2993e86c59b1SClaudio Imbrenda err = kstrtobool(buf, &value); 2994e86c59b1SClaudio Imbrenda if (err) 2995e86c59b1SClaudio Imbrenda return -EINVAL; 2996e86c59b1SClaudio Imbrenda 2997e86c59b1SClaudio Imbrenda ksm_use_zero_pages = value; 2998e86c59b1SClaudio Imbrenda 2999e86c59b1SClaudio Imbrenda return count; 3000e86c59b1SClaudio Imbrenda } 3001e86c59b1SClaudio Imbrenda KSM_ATTR(use_zero_pages); 3002e86c59b1SClaudio Imbrenda 30032c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_show(struct kobject *kobj, 30042c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30052c653d0eSAndrea Arcangeli { 3006ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); 30072c653d0eSAndrea Arcangeli } 30082c653d0eSAndrea Arcangeli 30092c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_store(struct kobject *kobj, 30102c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 30112c653d0eSAndrea Arcangeli const char *buf, size_t count) 30122c653d0eSAndrea Arcangeli { 30132c653d0eSAndrea Arcangeli int err; 30142c653d0eSAndrea Arcangeli int knob; 30152c653d0eSAndrea Arcangeli 30162c653d0eSAndrea Arcangeli err = kstrtoint(buf, 10, &knob); 30172c653d0eSAndrea Arcangeli if (err) 30182c653d0eSAndrea Arcangeli return err; 30192c653d0eSAndrea Arcangeli /* 30202c653d0eSAndrea Arcangeli * When a KSM page is created it is shared by 2 mappings. This 30212c653d0eSAndrea Arcangeli * being a signed comparison, it implicitly verifies it's not 30222c653d0eSAndrea Arcangeli * negative. 30232c653d0eSAndrea Arcangeli */ 30242c653d0eSAndrea Arcangeli if (knob < 2) 30252c653d0eSAndrea Arcangeli return -EINVAL; 30262c653d0eSAndrea Arcangeli 30272c653d0eSAndrea Arcangeli if (READ_ONCE(ksm_max_page_sharing) == knob) 30282c653d0eSAndrea Arcangeli return count; 30292c653d0eSAndrea Arcangeli 30302c653d0eSAndrea Arcangeli mutex_lock(&ksm_thread_mutex); 30312c653d0eSAndrea Arcangeli wait_while_offlining(); 30322c653d0eSAndrea Arcangeli if (ksm_max_page_sharing != knob) { 30332c653d0eSAndrea Arcangeli if (ksm_pages_shared || remove_all_stable_nodes()) 30342c653d0eSAndrea Arcangeli err = -EBUSY; 30352c653d0eSAndrea Arcangeli else 30362c653d0eSAndrea Arcangeli ksm_max_page_sharing = knob; 30372c653d0eSAndrea Arcangeli } 30382c653d0eSAndrea Arcangeli mutex_unlock(&ksm_thread_mutex); 30392c653d0eSAndrea Arcangeli 30402c653d0eSAndrea Arcangeli return err ? err : count; 30412c653d0eSAndrea Arcangeli } 30422c653d0eSAndrea Arcangeli KSM_ATTR(max_page_sharing); 30432c653d0eSAndrea Arcangeli 3044b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 3045b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3046b4028260SHugh Dickins { 3047ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_shared); 3048b4028260SHugh Dickins } 3049b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 3050b4028260SHugh Dickins 3051b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 3052b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 3053b4028260SHugh Dickins { 3054ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); 3055b4028260SHugh Dickins } 3056b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 3057b4028260SHugh Dickins 3058473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 3059473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3060473b0ce4SHugh Dickins { 3061ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); 3062473b0ce4SHugh Dickins } 3063473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 3064473b0ce4SHugh Dickins 3065473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 3066473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3067473b0ce4SHugh Dickins { 3068473b0ce4SHugh Dickins long ksm_pages_volatile; 3069473b0ce4SHugh Dickins 3070473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3071473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 3072473b0ce4SHugh Dickins /* 3073473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 3074473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 3075473b0ce4SHugh Dickins */ 3076473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 3077473b0ce4SHugh Dickins ksm_pages_volatile = 0; 3078ae7a927dSJoe Perches return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); 3079473b0ce4SHugh Dickins } 3080473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 3081473b0ce4SHugh Dickins 30822c653d0eSAndrea Arcangeli static ssize_t stable_node_dups_show(struct kobject *kobj, 30832c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30842c653d0eSAndrea Arcangeli { 3085ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); 30862c653d0eSAndrea Arcangeli } 30872c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_dups); 30882c653d0eSAndrea Arcangeli 30892c653d0eSAndrea Arcangeli static ssize_t stable_node_chains_show(struct kobject *kobj, 30902c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30912c653d0eSAndrea Arcangeli { 3092ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); 30932c653d0eSAndrea Arcangeli } 30942c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_chains); 30952c653d0eSAndrea Arcangeli 30962c653d0eSAndrea Arcangeli static ssize_t 30972c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_show(struct kobject *kobj, 30982c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 30992c653d0eSAndrea Arcangeli char *buf) 31002c653d0eSAndrea Arcangeli { 3101ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 31022c653d0eSAndrea Arcangeli } 31032c653d0eSAndrea Arcangeli 31042c653d0eSAndrea Arcangeli static ssize_t 31052c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_store(struct kobject *kobj, 31062c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 31072c653d0eSAndrea Arcangeli const char *buf, size_t count) 31082c653d0eSAndrea Arcangeli { 31092c653d0eSAndrea Arcangeli unsigned long msecs; 31102c653d0eSAndrea Arcangeli int err; 31112c653d0eSAndrea Arcangeli 31122c653d0eSAndrea Arcangeli err = kstrtoul(buf, 10, &msecs); 31132c653d0eSAndrea Arcangeli if (err || msecs > UINT_MAX) 31142c653d0eSAndrea Arcangeli return -EINVAL; 31152c653d0eSAndrea Arcangeli 31162c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs = msecs; 31172c653d0eSAndrea Arcangeli 31182c653d0eSAndrea Arcangeli return count; 31192c653d0eSAndrea Arcangeli } 31202c653d0eSAndrea Arcangeli KSM_ATTR(stable_node_chains_prune_millisecs); 31212c653d0eSAndrea Arcangeli 3122473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 3123473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3124473b0ce4SHugh Dickins { 3125ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); 3126473b0ce4SHugh Dickins } 3127473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 3128473b0ce4SHugh Dickins 312931dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 313031dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 313131dbd01fSIzik Eidus &pages_to_scan_attr.attr, 313231dbd01fSIzik Eidus &run_attr.attr, 3133b4028260SHugh Dickins &pages_shared_attr.attr, 3134b4028260SHugh Dickins &pages_sharing_attr.attr, 3135473b0ce4SHugh Dickins &pages_unshared_attr.attr, 3136473b0ce4SHugh Dickins &pages_volatile_attr.attr, 3137473b0ce4SHugh Dickins &full_scans_attr.attr, 313890bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 313990bd6fd3SPetr Holasek &merge_across_nodes_attr.attr, 314090bd6fd3SPetr Holasek #endif 31412c653d0eSAndrea Arcangeli &max_page_sharing_attr.attr, 31422c653d0eSAndrea Arcangeli &stable_node_chains_attr.attr, 31432c653d0eSAndrea Arcangeli &stable_node_dups_attr.attr, 31442c653d0eSAndrea Arcangeli &stable_node_chains_prune_millisecs_attr.attr, 3145e86c59b1SClaudio Imbrenda &use_zero_pages_attr.attr, 314631dbd01fSIzik Eidus NULL, 314731dbd01fSIzik Eidus }; 314831dbd01fSIzik Eidus 3149f907c26aSArvind Yadav static const struct attribute_group ksm_attr_group = { 315031dbd01fSIzik Eidus .attrs = ksm_attrs, 315131dbd01fSIzik Eidus .name = "ksm", 315231dbd01fSIzik Eidus }; 31532ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 315431dbd01fSIzik Eidus 315531dbd01fSIzik Eidus static int __init ksm_init(void) 315631dbd01fSIzik Eidus { 315731dbd01fSIzik Eidus struct task_struct *ksm_thread; 315831dbd01fSIzik Eidus int err; 315931dbd01fSIzik Eidus 3160e86c59b1SClaudio Imbrenda /* The correct value depends on page size and endianness */ 3161e86c59b1SClaudio Imbrenda zero_checksum = calc_checksum(ZERO_PAGE(0)); 3162e86c59b1SClaudio Imbrenda /* Default to false for backwards compatibility */ 3163e86c59b1SClaudio Imbrenda ksm_use_zero_pages = false; 3164e86c59b1SClaudio Imbrenda 316531dbd01fSIzik Eidus err = ksm_slab_init(); 316631dbd01fSIzik Eidus if (err) 316731dbd01fSIzik Eidus goto out; 316831dbd01fSIzik Eidus 316931dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 317031dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 317125acde31SPaul McQuade pr_err("ksm: creating kthread failed\n"); 317231dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 3173d9f8984cSLai Jiangshan goto out_free; 317431dbd01fSIzik Eidus } 317531dbd01fSIzik Eidus 31762ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 317731dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 317831dbd01fSIzik Eidus if (err) { 317925acde31SPaul McQuade pr_err("ksm: register sysfs failed\n"); 31802ffd8679SHugh Dickins kthread_stop(ksm_thread); 3181d9f8984cSLai Jiangshan goto out_free; 318231dbd01fSIzik Eidus } 3183c73602adSHugh Dickins #else 3184c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3185c73602adSHugh Dickins 31862ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 318731dbd01fSIzik Eidus 318862b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 3189ef4d43a8SHugh Dickins /* There is no significance to this priority 100 */ 319062b61f61SHugh Dickins hotplug_memory_notifier(ksm_memory_callback, 100); 319162b61f61SHugh Dickins #endif 319231dbd01fSIzik Eidus return 0; 319331dbd01fSIzik Eidus 3194d9f8984cSLai Jiangshan out_free: 319531dbd01fSIzik Eidus ksm_slab_free(); 319631dbd01fSIzik Eidus out: 319731dbd01fSIzik Eidus return err; 319831dbd01fSIzik Eidus } 3199a64fb3cdSPaul Gortmaker subsys_initcall(ksm_init); 3200