1f8af4da3SHugh Dickins /* 231dbd01fSIzik Eidus * Memory merging support. 331dbd01fSIzik Eidus * 431dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 531dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 631dbd01fSIzik Eidus * 736b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 831dbd01fSIzik Eidus * Authors: 931dbd01fSIzik Eidus * Izik Eidus 1031dbd01fSIzik Eidus * Andrea Arcangeli 1131dbd01fSIzik Eidus * Chris Wright 1236b2528dSIzik Eidus * Hugh Dickins 1331dbd01fSIzik Eidus * 1431dbd01fSIzik Eidus * This work is licensed under the terms of the GNU GPL, version 2. 15f8af4da3SHugh Dickins */ 16f8af4da3SHugh Dickins 17f8af4da3SHugh Dickins #include <linux/errno.h> 1831dbd01fSIzik Eidus #include <linux/mm.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 226e84f315SIngo Molnar #include <linux/sched/mm.h> 23f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 2431dbd01fSIzik Eidus #include <linux/rwsem.h> 2531dbd01fSIzik Eidus #include <linux/pagemap.h> 2631dbd01fSIzik Eidus #include <linux/rmap.h> 2731dbd01fSIzik Eidus #include <linux/spinlock.h> 2831dbd01fSIzik Eidus #include <linux/jhash.h> 2931dbd01fSIzik Eidus #include <linux/delay.h> 3031dbd01fSIzik Eidus #include <linux/kthread.h> 3131dbd01fSIzik Eidus #include <linux/wait.h> 3231dbd01fSIzik Eidus #include <linux/slab.h> 3331dbd01fSIzik Eidus #include <linux/rbtree.h> 3462b61f61SHugh Dickins #include <linux/memory.h> 3531dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 362c6854fdSIzik Eidus #include <linux/swap.h> 37f8af4da3SHugh Dickins #include <linux/ksm.h> 384ca3a69bSSasha Levin #include <linux/hashtable.h> 39878aee7dSAndrea Arcangeli #include <linux/freezer.h> 4072788c38SDavid Rientjes #include <linux/oom.h> 4190bd6fd3SPetr Holasek #include <linux/numa.h> 42f8af4da3SHugh Dickins 4331dbd01fSIzik Eidus #include <asm/tlbflush.h> 4473848b46SHugh Dickins #include "internal.h" 4531dbd01fSIzik Eidus 46e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 47e850dcf5SHugh Dickins #define NUMA(x) (x) 48e850dcf5SHugh Dickins #define DO_NUMA(x) do { (x); } while (0) 49e850dcf5SHugh Dickins #else 50e850dcf5SHugh Dickins #define NUMA(x) (0) 51e850dcf5SHugh Dickins #define DO_NUMA(x) do { } while (0) 52e850dcf5SHugh Dickins #endif 53e850dcf5SHugh Dickins 5431dbd01fSIzik Eidus /* 5531dbd01fSIzik Eidus * A few notes about the KSM scanning process, 5631dbd01fSIzik Eidus * to make it easier to understand the data structures below: 5731dbd01fSIzik Eidus * 5831dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 5931dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 6031dbd01fSIzik Eidus * 6131dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 6231dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 6331dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 6431dbd01fSIzik Eidus * 6531dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 6631dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 6731dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 6831dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 6931dbd01fSIzik Eidus * 7031dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 7131dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 7231dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 7331dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 7431dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 7531dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 7631dbd01fSIzik Eidus * 7731dbd01fSIzik Eidus * KSM solves this problem by several techniques: 7831dbd01fSIzik Eidus * 7931dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 8031dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 8131dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 8231dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 8331dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 8431dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 8531dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 8631dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 8731dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 8831dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 8931dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 9031dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 9131dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 928fdb3dbfSHugh Dickins * 938fdb3dbfSHugh Dickins * If the merge_across_nodes tunable is unset, then KSM maintains multiple 948fdb3dbfSHugh Dickins * stable trees and multiple unstable trees: one of each for each NUMA node. 9531dbd01fSIzik Eidus */ 9631dbd01fSIzik Eidus 9731dbd01fSIzik Eidus /** 9831dbd01fSIzik Eidus * struct mm_slot - ksm information per mm that is being scanned 9931dbd01fSIzik Eidus * @link: link to the mm_slots hash list 10031dbd01fSIzik Eidus * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 1016514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 10231dbd01fSIzik Eidus * @mm: the mm that this information is valid for 10331dbd01fSIzik Eidus */ 10431dbd01fSIzik Eidus struct mm_slot { 10531dbd01fSIzik Eidus struct hlist_node link; 10631dbd01fSIzik Eidus struct list_head mm_list; 1076514d511SHugh Dickins struct rmap_item *rmap_list; 10831dbd01fSIzik Eidus struct mm_struct *mm; 10931dbd01fSIzik Eidus }; 11031dbd01fSIzik Eidus 11131dbd01fSIzik Eidus /** 11231dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 11331dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 11431dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1156514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 11631dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 11731dbd01fSIzik Eidus * 11831dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 11931dbd01fSIzik Eidus */ 12031dbd01fSIzik Eidus struct ksm_scan { 12131dbd01fSIzik Eidus struct mm_slot *mm_slot; 12231dbd01fSIzik Eidus unsigned long address; 1236514d511SHugh Dickins struct rmap_item **rmap_list; 12431dbd01fSIzik Eidus unsigned long seqnr; 12531dbd01fSIzik Eidus }; 12631dbd01fSIzik Eidus 12731dbd01fSIzik Eidus /** 1287b6ba2c7SHugh Dickins * struct stable_node - node of the stable rbtree 1297b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1304146d2d6SHugh Dickins * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 1312c653d0eSAndrea Arcangeli * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 1324146d2d6SHugh Dickins * @list: linked into migrate_nodes, pending placement in the proper node tree 1337b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1344146d2d6SHugh Dickins * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 1352c653d0eSAndrea Arcangeli * @chain_prune_time: time of the last full garbage collection 1362c653d0eSAndrea Arcangeli * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 1374146d2d6SHugh Dickins * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 1387b6ba2c7SHugh Dickins */ 1397b6ba2c7SHugh Dickins struct stable_node { 1404146d2d6SHugh Dickins union { 1414146d2d6SHugh Dickins struct rb_node node; /* when node of stable tree */ 1424146d2d6SHugh Dickins struct { /* when listed for migration */ 1434146d2d6SHugh Dickins struct list_head *head; 1442c653d0eSAndrea Arcangeli struct { 1452c653d0eSAndrea Arcangeli struct hlist_node hlist_dup; 1464146d2d6SHugh Dickins struct list_head list; 1474146d2d6SHugh Dickins }; 1484146d2d6SHugh Dickins }; 1492c653d0eSAndrea Arcangeli }; 1507b6ba2c7SHugh Dickins struct hlist_head hlist; 1512c653d0eSAndrea Arcangeli union { 15262b61f61SHugh Dickins unsigned long kpfn; 1532c653d0eSAndrea Arcangeli unsigned long chain_prune_time; 1542c653d0eSAndrea Arcangeli }; 1552c653d0eSAndrea Arcangeli /* 1562c653d0eSAndrea Arcangeli * STABLE_NODE_CHAIN can be any negative number in 1572c653d0eSAndrea Arcangeli * rmap_hlist_len negative range, but better not -1 to be able 1582c653d0eSAndrea Arcangeli * to reliably detect underflows. 1592c653d0eSAndrea Arcangeli */ 1602c653d0eSAndrea Arcangeli #define STABLE_NODE_CHAIN -1024 1612c653d0eSAndrea Arcangeli int rmap_hlist_len; 1624146d2d6SHugh Dickins #ifdef CONFIG_NUMA 1634146d2d6SHugh Dickins int nid; 1644146d2d6SHugh Dickins #endif 1657b6ba2c7SHugh Dickins }; 1667b6ba2c7SHugh Dickins 1677b6ba2c7SHugh Dickins /** 16831dbd01fSIzik Eidus * struct rmap_item - reverse mapping item for virtual addresses 1696514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 170db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 171bc56620bSHugh Dickins * @nid: NUMA node id of unstable tree in which linked (may not match page) 17231dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 17331dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 17431dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1757b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1767b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1777b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 17831dbd01fSIzik Eidus */ 17931dbd01fSIzik Eidus struct rmap_item { 1806514d511SHugh Dickins struct rmap_item *rmap_list; 181bc56620bSHugh Dickins union { 182db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 183bc56620bSHugh Dickins #ifdef CONFIG_NUMA 184bc56620bSHugh Dickins int nid; /* when node of unstable tree */ 185bc56620bSHugh Dickins #endif 186bc56620bSHugh Dickins }; 18731dbd01fSIzik Eidus struct mm_struct *mm; 18831dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 18931dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 19031dbd01fSIzik Eidus union { 1917b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 1927b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 1937b6ba2c7SHugh Dickins struct stable_node *head; 1947b6ba2c7SHugh Dickins struct hlist_node hlist; 1957b6ba2c7SHugh Dickins }; 19631dbd01fSIzik Eidus }; 19731dbd01fSIzik Eidus }; 19831dbd01fSIzik Eidus 19931dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 2007b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 2017b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 20231dbd01fSIzik Eidus 20331dbd01fSIzik Eidus /* The stable and unstable tree heads */ 204ef53d16cSHugh Dickins static struct rb_root one_stable_tree[1] = { RB_ROOT }; 205ef53d16cSHugh Dickins static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 206ef53d16cSHugh Dickins static struct rb_root *root_stable_tree = one_stable_tree; 207ef53d16cSHugh Dickins static struct rb_root *root_unstable_tree = one_unstable_tree; 20831dbd01fSIzik Eidus 2094146d2d6SHugh Dickins /* Recently migrated nodes of stable tree, pending proper placement */ 2104146d2d6SHugh Dickins static LIST_HEAD(migrate_nodes); 2112c653d0eSAndrea Arcangeli #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 2124146d2d6SHugh Dickins 2134ca3a69bSSasha Levin #define MM_SLOTS_HASH_BITS 10 2144ca3a69bSSasha Levin static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 21531dbd01fSIzik Eidus 21631dbd01fSIzik Eidus static struct mm_slot ksm_mm_head = { 21731dbd01fSIzik Eidus .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 21831dbd01fSIzik Eidus }; 21931dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 22031dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 22131dbd01fSIzik Eidus }; 22231dbd01fSIzik Eidus 22331dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 2247b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 22531dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 22631dbd01fSIzik Eidus 22731dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 228b4028260SHugh Dickins static unsigned long ksm_pages_shared; 22931dbd01fSIzik Eidus 230e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 231b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 23231dbd01fSIzik Eidus 233473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 234473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 235473b0ce4SHugh Dickins 236473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 237473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 238473b0ce4SHugh Dickins 2392c653d0eSAndrea Arcangeli /* The number of stable_node chains */ 2402c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_chains; 2412c653d0eSAndrea Arcangeli 2422c653d0eSAndrea Arcangeli /* The number of stable_node dups linked to the stable_node chains */ 2432c653d0eSAndrea Arcangeli static unsigned long ksm_stable_node_dups; 2442c653d0eSAndrea Arcangeli 2452c653d0eSAndrea Arcangeli /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 2462c653d0eSAndrea Arcangeli static int ksm_stable_node_chains_prune_millisecs = 2000; 2472c653d0eSAndrea Arcangeli 2482c653d0eSAndrea Arcangeli /* Maximum number of page slots sharing a stable node */ 2492c653d0eSAndrea Arcangeli static int ksm_max_page_sharing = 256; 2502c653d0eSAndrea Arcangeli 25131dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 2522c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 25331dbd01fSIzik Eidus 25431dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 2552ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 25631dbd01fSIzik Eidus 257e86c59b1SClaudio Imbrenda /* Checksum of an empty (zeroed) page */ 258e86c59b1SClaudio Imbrenda static unsigned int zero_checksum __read_mostly; 259e86c59b1SClaudio Imbrenda 260e86c59b1SClaudio Imbrenda /* Whether to merge empty (zeroed) pages with actual zero pages */ 261e86c59b1SClaudio Imbrenda static bool ksm_use_zero_pages __read_mostly; 262e86c59b1SClaudio Imbrenda 263e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 26490bd6fd3SPetr Holasek /* Zeroed when merging across nodes is not allowed */ 26590bd6fd3SPetr Holasek static unsigned int ksm_merge_across_nodes = 1; 266ef53d16cSHugh Dickins static int ksm_nr_node_ids = 1; 267e850dcf5SHugh Dickins #else 268e850dcf5SHugh Dickins #define ksm_merge_across_nodes 1U 269ef53d16cSHugh Dickins #define ksm_nr_node_ids 1 270e850dcf5SHugh Dickins #endif 27190bd6fd3SPetr Holasek 27231dbd01fSIzik Eidus #define KSM_RUN_STOP 0 27331dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 27431dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 275ef4d43a8SHugh Dickins #define KSM_RUN_OFFLINE 4 276ef4d43a8SHugh Dickins static unsigned long ksm_run = KSM_RUN_STOP; 277ef4d43a8SHugh Dickins static void wait_while_offlining(void); 27831dbd01fSIzik Eidus 27931dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 28031dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 28131dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 28231dbd01fSIzik Eidus 28331dbd01fSIzik Eidus #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 28431dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 28531dbd01fSIzik Eidus (__flags), NULL) 28631dbd01fSIzik Eidus 28731dbd01fSIzik Eidus static int __init ksm_slab_init(void) 28831dbd01fSIzik Eidus { 28931dbd01fSIzik Eidus rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 29031dbd01fSIzik Eidus if (!rmap_item_cache) 29131dbd01fSIzik Eidus goto out; 29231dbd01fSIzik Eidus 2937b6ba2c7SHugh Dickins stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 2947b6ba2c7SHugh Dickins if (!stable_node_cache) 2957b6ba2c7SHugh Dickins goto out_free1; 2967b6ba2c7SHugh Dickins 29731dbd01fSIzik Eidus mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 29831dbd01fSIzik Eidus if (!mm_slot_cache) 2997b6ba2c7SHugh Dickins goto out_free2; 30031dbd01fSIzik Eidus 30131dbd01fSIzik Eidus return 0; 30231dbd01fSIzik Eidus 3037b6ba2c7SHugh Dickins out_free2: 3047b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 3057b6ba2c7SHugh Dickins out_free1: 30631dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 30731dbd01fSIzik Eidus out: 30831dbd01fSIzik Eidus return -ENOMEM; 30931dbd01fSIzik Eidus } 31031dbd01fSIzik Eidus 31131dbd01fSIzik Eidus static void __init ksm_slab_free(void) 31231dbd01fSIzik Eidus { 31331dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 3147b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 31531dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 31631dbd01fSIzik Eidus mm_slot_cache = NULL; 31731dbd01fSIzik Eidus } 31831dbd01fSIzik Eidus 3192c653d0eSAndrea Arcangeli static __always_inline bool is_stable_node_chain(struct stable_node *chain) 3202c653d0eSAndrea Arcangeli { 3212c653d0eSAndrea Arcangeli return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 3222c653d0eSAndrea Arcangeli } 3232c653d0eSAndrea Arcangeli 3242c653d0eSAndrea Arcangeli static __always_inline bool is_stable_node_dup(struct stable_node *dup) 3252c653d0eSAndrea Arcangeli { 3262c653d0eSAndrea Arcangeli return dup->head == STABLE_NODE_DUP_HEAD; 3272c653d0eSAndrea Arcangeli } 3282c653d0eSAndrea Arcangeli 3292c653d0eSAndrea Arcangeli static inline void stable_node_chain_add_dup(struct stable_node *dup, 3302c653d0eSAndrea Arcangeli struct stable_node *chain) 3312c653d0eSAndrea Arcangeli { 3322c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(dup)); 3332c653d0eSAndrea Arcangeli dup->head = STABLE_NODE_DUP_HEAD; 3342c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(chain)); 3352c653d0eSAndrea Arcangeli hlist_add_head(&dup->hlist_dup, &chain->hlist); 3362c653d0eSAndrea Arcangeli ksm_stable_node_dups++; 3372c653d0eSAndrea Arcangeli } 3382c653d0eSAndrea Arcangeli 3392c653d0eSAndrea Arcangeli static inline void __stable_node_dup_del(struct stable_node *dup) 3402c653d0eSAndrea Arcangeli { 341b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 3422c653d0eSAndrea Arcangeli hlist_del(&dup->hlist_dup); 3432c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 3442c653d0eSAndrea Arcangeli } 3452c653d0eSAndrea Arcangeli 3462c653d0eSAndrea Arcangeli static inline void stable_node_dup_del(struct stable_node *dup) 3472c653d0eSAndrea Arcangeli { 3482c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 3492c653d0eSAndrea Arcangeli if (is_stable_node_dup(dup)) 3502c653d0eSAndrea Arcangeli __stable_node_dup_del(dup); 3512c653d0eSAndrea Arcangeli else 3522c653d0eSAndrea Arcangeli rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 3532c653d0eSAndrea Arcangeli #ifdef CONFIG_DEBUG_VM 3542c653d0eSAndrea Arcangeli dup->head = NULL; 3552c653d0eSAndrea Arcangeli #endif 3562c653d0eSAndrea Arcangeli } 3572c653d0eSAndrea Arcangeli 35831dbd01fSIzik Eidus static inline struct rmap_item *alloc_rmap_item(void) 35931dbd01fSIzik Eidus { 360473b0ce4SHugh Dickins struct rmap_item *rmap_item; 361473b0ce4SHugh Dickins 3625b398e41Szhong jiang rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 3635b398e41Szhong jiang __GFP_NORETRY | __GFP_NOWARN); 364473b0ce4SHugh Dickins if (rmap_item) 365473b0ce4SHugh Dickins ksm_rmap_items++; 366473b0ce4SHugh Dickins return rmap_item; 36731dbd01fSIzik Eidus } 36831dbd01fSIzik Eidus 36931dbd01fSIzik Eidus static inline void free_rmap_item(struct rmap_item *rmap_item) 37031dbd01fSIzik Eidus { 371473b0ce4SHugh Dickins ksm_rmap_items--; 37231dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 37331dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 37431dbd01fSIzik Eidus } 37531dbd01fSIzik Eidus 3767b6ba2c7SHugh Dickins static inline struct stable_node *alloc_stable_node(void) 3777b6ba2c7SHugh Dickins { 3786213055fSzhong jiang /* 3796213055fSzhong jiang * The allocation can take too long with GFP_KERNEL when memory is under 3806213055fSzhong jiang * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 3816213055fSzhong jiang * grants access to memory reserves, helping to avoid this problem. 3826213055fSzhong jiang */ 3836213055fSzhong jiang return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 3847b6ba2c7SHugh Dickins } 3857b6ba2c7SHugh Dickins 3867b6ba2c7SHugh Dickins static inline void free_stable_node(struct stable_node *stable_node) 3877b6ba2c7SHugh Dickins { 3882c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len && 3892c653d0eSAndrea Arcangeli !is_stable_node_chain(stable_node)); 3907b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 3917b6ba2c7SHugh Dickins } 3927b6ba2c7SHugh Dickins 39331dbd01fSIzik Eidus static inline struct mm_slot *alloc_mm_slot(void) 39431dbd01fSIzik Eidus { 39531dbd01fSIzik Eidus if (!mm_slot_cache) /* initialization failed */ 39631dbd01fSIzik Eidus return NULL; 39731dbd01fSIzik Eidus return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 39831dbd01fSIzik Eidus } 39931dbd01fSIzik Eidus 40031dbd01fSIzik Eidus static inline void free_mm_slot(struct mm_slot *mm_slot) 40131dbd01fSIzik Eidus { 40231dbd01fSIzik Eidus kmem_cache_free(mm_slot_cache, mm_slot); 40331dbd01fSIzik Eidus } 40431dbd01fSIzik Eidus 40531dbd01fSIzik Eidus static struct mm_slot *get_mm_slot(struct mm_struct *mm) 40631dbd01fSIzik Eidus { 4074ca3a69bSSasha Levin struct mm_slot *slot; 40831dbd01fSIzik Eidus 409b67bfe0dSSasha Levin hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) 4104ca3a69bSSasha Levin if (slot->mm == mm) 4114ca3a69bSSasha Levin return slot; 4124ca3a69bSSasha Levin 41331dbd01fSIzik Eidus return NULL; 41431dbd01fSIzik Eidus } 41531dbd01fSIzik Eidus 41631dbd01fSIzik Eidus static void insert_to_mm_slots_hash(struct mm_struct *mm, 41731dbd01fSIzik Eidus struct mm_slot *mm_slot) 41831dbd01fSIzik Eidus { 41931dbd01fSIzik Eidus mm_slot->mm = mm; 4204ca3a69bSSasha Levin hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); 42131dbd01fSIzik Eidus } 42231dbd01fSIzik Eidus 42331dbd01fSIzik Eidus /* 424a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 425a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 426a913e182SHugh Dickins * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 427a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 428a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 429a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 430a913e182SHugh Dickins */ 431a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 432a913e182SHugh Dickins { 433a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 434a913e182SHugh Dickins } 435a913e182SHugh Dickins 436a913e182SHugh Dickins /* 43731dbd01fSIzik Eidus * We use break_ksm to break COW on a ksm page: it's a stripped down 43831dbd01fSIzik Eidus * 439d4edcf0dSDave Hansen * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) 44031dbd01fSIzik Eidus * put_page(page); 44131dbd01fSIzik Eidus * 44231dbd01fSIzik Eidus * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 44331dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 44431dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 44531dbd01fSIzik Eidus * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 4461b2ee126SDave Hansen * 4471b2ee126SDave Hansen * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context 4481b2ee126SDave Hansen * of the process that owns 'vma'. We also do not want to enforce 4491b2ee126SDave Hansen * protection keys here anyway. 45031dbd01fSIzik Eidus */ 451d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 45231dbd01fSIzik Eidus { 45331dbd01fSIzik Eidus struct page *page; 454d952b791SHugh Dickins int ret = 0; 45531dbd01fSIzik Eidus 45631dbd01fSIzik Eidus do { 45731dbd01fSIzik Eidus cond_resched(); 4581b2ee126SDave Hansen page = follow_page(vma, addr, 4591b2ee126SDave Hansen FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); 46022eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 46131dbd01fSIzik Eidus break; 46231dbd01fSIzik Eidus if (PageKsm(page)) 463dcddffd4SKirill A. Shutemov ret = handle_mm_fault(vma, addr, 464dcddffd4SKirill A. Shutemov FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); 46531dbd01fSIzik Eidus else 46631dbd01fSIzik Eidus ret = VM_FAULT_WRITE; 46731dbd01fSIzik Eidus put_page(page); 46833692f27SLinus Torvalds } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 469d952b791SHugh Dickins /* 470d952b791SHugh Dickins * We must loop because handle_mm_fault() may back out if there's 471d952b791SHugh Dickins * any difficulty e.g. if pte accessed bit gets updated concurrently. 472d952b791SHugh Dickins * 473d952b791SHugh Dickins * VM_FAULT_WRITE is what we have been hoping for: it indicates that 474d952b791SHugh Dickins * COW has been broken, even if the vma does not permit VM_WRITE; 475d952b791SHugh Dickins * but note that a concurrent fault might break PageKsm for us. 476d952b791SHugh Dickins * 477d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 478d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 479d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 480d952b791SHugh Dickins * 481d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 482d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 483d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 484d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 485d952b791SHugh Dickins * 486d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 487d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 488d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 489d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 490d952b791SHugh Dickins * 491d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 492d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 493d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 494d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 495d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 496d952b791SHugh Dickins */ 497d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 49831dbd01fSIzik Eidus } 49931dbd01fSIzik Eidus 500ef694222SBob Liu static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 501ef694222SBob Liu unsigned long addr) 502ef694222SBob Liu { 503ef694222SBob Liu struct vm_area_struct *vma; 504ef694222SBob Liu if (ksm_test_exit(mm)) 505ef694222SBob Liu return NULL; 506ef694222SBob Liu vma = find_vma(mm, addr); 507ef694222SBob Liu if (!vma || vma->vm_start > addr) 508ef694222SBob Liu return NULL; 509ef694222SBob Liu if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 510ef694222SBob Liu return NULL; 511ef694222SBob Liu return vma; 512ef694222SBob Liu } 513ef694222SBob Liu 5148dd3557aSHugh Dickins static void break_cow(struct rmap_item *rmap_item) 51531dbd01fSIzik Eidus { 5168dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 5178dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 51831dbd01fSIzik Eidus struct vm_area_struct *vma; 51931dbd01fSIzik Eidus 5204035c07aSHugh Dickins /* 5214035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 5224035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 5234035c07aSHugh Dickins */ 5249e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 5254035c07aSHugh Dickins 52681464e30SHugh Dickins down_read(&mm->mmap_sem); 527ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 528ef694222SBob Liu if (vma) 52931dbd01fSIzik Eidus break_ksm(vma, addr); 53031dbd01fSIzik Eidus up_read(&mm->mmap_sem); 53131dbd01fSIzik Eidus } 53231dbd01fSIzik Eidus 53331dbd01fSIzik Eidus static struct page *get_mergeable_page(struct rmap_item *rmap_item) 53431dbd01fSIzik Eidus { 53531dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 53631dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 53731dbd01fSIzik Eidus struct vm_area_struct *vma; 53831dbd01fSIzik Eidus struct page *page; 53931dbd01fSIzik Eidus 54031dbd01fSIzik Eidus down_read(&mm->mmap_sem); 541ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 542ef694222SBob Liu if (!vma) 54331dbd01fSIzik Eidus goto out; 54431dbd01fSIzik Eidus 54531dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 54622eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 54731dbd01fSIzik Eidus goto out; 548f765f540SKirill A. Shutemov if (PageAnon(page)) { 54931dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 55031dbd01fSIzik Eidus flush_dcache_page(page); 55131dbd01fSIzik Eidus } else { 55231dbd01fSIzik Eidus put_page(page); 553c8f95ed1SAndrea Arcangeli out: 554c8f95ed1SAndrea Arcangeli page = NULL; 55531dbd01fSIzik Eidus } 55631dbd01fSIzik Eidus up_read(&mm->mmap_sem); 55731dbd01fSIzik Eidus return page; 55831dbd01fSIzik Eidus } 55931dbd01fSIzik Eidus 56090bd6fd3SPetr Holasek /* 56190bd6fd3SPetr Holasek * This helper is used for getting right index into array of tree roots. 56290bd6fd3SPetr Holasek * When merge_across_nodes knob is set to 1, there are only two rb-trees for 56390bd6fd3SPetr Holasek * stable and unstable pages from all nodes with roots in index 0. Otherwise, 56490bd6fd3SPetr Holasek * every node has its own stable and unstable tree. 56590bd6fd3SPetr Holasek */ 56690bd6fd3SPetr Holasek static inline int get_kpfn_nid(unsigned long kpfn) 56790bd6fd3SPetr Holasek { 568d8fc16a8SHugh Dickins return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 56990bd6fd3SPetr Holasek } 57090bd6fd3SPetr Holasek 5712c653d0eSAndrea Arcangeli static struct stable_node *alloc_stable_node_chain(struct stable_node *dup, 5722c653d0eSAndrea Arcangeli struct rb_root *root) 5732c653d0eSAndrea Arcangeli { 5742c653d0eSAndrea Arcangeli struct stable_node *chain = alloc_stable_node(); 5752c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(dup)); 5762c653d0eSAndrea Arcangeli if (likely(chain)) { 5772c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&chain->hlist); 5782c653d0eSAndrea Arcangeli chain->chain_prune_time = jiffies; 5792c653d0eSAndrea Arcangeli chain->rmap_hlist_len = STABLE_NODE_CHAIN; 5802c653d0eSAndrea Arcangeli #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 5812c653d0eSAndrea Arcangeli chain->nid = -1; /* debug */ 5822c653d0eSAndrea Arcangeli #endif 5832c653d0eSAndrea Arcangeli ksm_stable_node_chains++; 5842c653d0eSAndrea Arcangeli 5852c653d0eSAndrea Arcangeli /* 5862c653d0eSAndrea Arcangeli * Put the stable node chain in the first dimension of 5872c653d0eSAndrea Arcangeli * the stable tree and at the same time remove the old 5882c653d0eSAndrea Arcangeli * stable node. 5892c653d0eSAndrea Arcangeli */ 5902c653d0eSAndrea Arcangeli rb_replace_node(&dup->node, &chain->node, root); 5912c653d0eSAndrea Arcangeli 5922c653d0eSAndrea Arcangeli /* 5932c653d0eSAndrea Arcangeli * Move the old stable node to the second dimension 5942c653d0eSAndrea Arcangeli * queued in the hlist_dup. The invariant is that all 5952c653d0eSAndrea Arcangeli * dup stable_nodes in the chain->hlist point to pages 5962c653d0eSAndrea Arcangeli * that are wrprotected and have the exact same 5972c653d0eSAndrea Arcangeli * content. 5982c653d0eSAndrea Arcangeli */ 5992c653d0eSAndrea Arcangeli stable_node_chain_add_dup(dup, chain); 6002c653d0eSAndrea Arcangeli } 6012c653d0eSAndrea Arcangeli return chain; 6022c653d0eSAndrea Arcangeli } 6032c653d0eSAndrea Arcangeli 6042c653d0eSAndrea Arcangeli static inline void free_stable_node_chain(struct stable_node *chain, 6052c653d0eSAndrea Arcangeli struct rb_root *root) 6062c653d0eSAndrea Arcangeli { 6072c653d0eSAndrea Arcangeli rb_erase(&chain->node, root); 6082c653d0eSAndrea Arcangeli free_stable_node(chain); 6092c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 6102c653d0eSAndrea Arcangeli } 6112c653d0eSAndrea Arcangeli 6124035c07aSHugh Dickins static void remove_node_from_stable_tree(struct stable_node *stable_node) 6134035c07aSHugh Dickins { 6144035c07aSHugh Dickins struct rmap_item *rmap_item; 6154035c07aSHugh Dickins 6162c653d0eSAndrea Arcangeli /* check it's not STABLE_NODE_CHAIN or negative */ 6172c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 6182c653d0eSAndrea Arcangeli 619b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 6204035c07aSHugh Dickins if (rmap_item->hlist.next) 6214035c07aSHugh Dickins ksm_pages_sharing--; 6224035c07aSHugh Dickins else 6234035c07aSHugh Dickins ksm_pages_shared--; 6242c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 6252c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 6269e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 6274035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 6284035c07aSHugh Dickins cond_resched(); 6294035c07aSHugh Dickins } 6304035c07aSHugh Dickins 6312c653d0eSAndrea Arcangeli /* 6322c653d0eSAndrea Arcangeli * We need the second aligned pointer of the migrate_nodes 6332c653d0eSAndrea Arcangeli * list_head to stay clear from the rb_parent_color union 6342c653d0eSAndrea Arcangeli * (aligned and different than any node) and also different 6352c653d0eSAndrea Arcangeli * from &migrate_nodes. This will verify that future list.h changes 6362c653d0eSAndrea Arcangeli * don't break STABLE_NODE_DUP_HEAD. 6372c653d0eSAndrea Arcangeli */ 6382c653d0eSAndrea Arcangeli #if GCC_VERSION >= 40903 /* only recent gcc can handle it */ 6392c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 6402c653d0eSAndrea Arcangeli BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 6412c653d0eSAndrea Arcangeli #endif 6422c653d0eSAndrea Arcangeli 6434146d2d6SHugh Dickins if (stable_node->head == &migrate_nodes) 6444146d2d6SHugh Dickins list_del(&stable_node->list); 6454146d2d6SHugh Dickins else 6462c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 6474035c07aSHugh Dickins free_stable_node(stable_node); 6484035c07aSHugh Dickins } 6494035c07aSHugh Dickins 6504035c07aSHugh Dickins /* 6514035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 6524035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 6534035c07aSHugh Dickins * In which case we can trust the content of the page, and it 6544035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 6554035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 656c8d6553bSHugh Dickins * But beware, the stable node's page might be being migrated. 6574035c07aSHugh Dickins * 6584035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 6594035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 6604035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 6614035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 6624035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 6634035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 6644035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 6654035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 6664035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 6674035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 6684035c07aSHugh Dickins */ 6698fdb3dbfSHugh Dickins static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) 6704035c07aSHugh Dickins { 6714035c07aSHugh Dickins struct page *page; 6724035c07aSHugh Dickins void *expected_mapping; 673c8d6553bSHugh Dickins unsigned long kpfn; 6744035c07aSHugh Dickins 675bda807d4SMinchan Kim expected_mapping = (void *)((unsigned long)stable_node | 676bda807d4SMinchan Kim PAGE_MAPPING_KSM); 677c8d6553bSHugh Dickins again: 67808df4774SPaul E. McKenney kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 679c8d6553bSHugh Dickins page = pfn_to_page(kpfn); 6804db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) 6814035c07aSHugh Dickins goto stale; 682c8d6553bSHugh Dickins 683c8d6553bSHugh Dickins /* 684c8d6553bSHugh Dickins * We cannot do anything with the page while its refcount is 0. 685c8d6553bSHugh Dickins * Usually 0 means free, or tail of a higher-order page: in which 686c8d6553bSHugh Dickins * case this node is no longer referenced, and should be freed; 687c8d6553bSHugh Dickins * however, it might mean that the page is under page_freeze_refs(). 688c8d6553bSHugh Dickins * The __remove_mapping() case is easy, again the node is now stale; 689c8d6553bSHugh Dickins * but if page is swapcache in migrate_page_move_mapping(), it might 690c8d6553bSHugh Dickins * still be our page, in which case it's essential to keep the node. 691c8d6553bSHugh Dickins */ 692c8d6553bSHugh Dickins while (!get_page_unless_zero(page)) { 693c8d6553bSHugh Dickins /* 694c8d6553bSHugh Dickins * Another check for page->mapping != expected_mapping would 695c8d6553bSHugh Dickins * work here too. We have chosen the !PageSwapCache test to 696c8d6553bSHugh Dickins * optimize the common case, when the page is or is about to 697c8d6553bSHugh Dickins * be freed: PageSwapCache is cleared (under spin_lock_irq) 698c8d6553bSHugh Dickins * in the freeze_refs section of __remove_mapping(); but Anon 699c8d6553bSHugh Dickins * page->mapping reset to NULL later, in free_pages_prepare(). 700c8d6553bSHugh Dickins */ 701c8d6553bSHugh Dickins if (!PageSwapCache(page)) 7024035c07aSHugh Dickins goto stale; 703c8d6553bSHugh Dickins cpu_relax(); 704c8d6553bSHugh Dickins } 705c8d6553bSHugh Dickins 7064db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7074035c07aSHugh Dickins put_page(page); 7084035c07aSHugh Dickins goto stale; 7094035c07aSHugh Dickins } 710c8d6553bSHugh Dickins 7118fdb3dbfSHugh Dickins if (lock_it) { 7128aafa6a4SHugh Dickins lock_page(page); 7134db0c3c2SJason Low if (READ_ONCE(page->mapping) != expected_mapping) { 7148aafa6a4SHugh Dickins unlock_page(page); 7158aafa6a4SHugh Dickins put_page(page); 7168aafa6a4SHugh Dickins goto stale; 7178aafa6a4SHugh Dickins } 7188aafa6a4SHugh Dickins } 7194035c07aSHugh Dickins return page; 720c8d6553bSHugh Dickins 7214035c07aSHugh Dickins stale: 722c8d6553bSHugh Dickins /* 723c8d6553bSHugh Dickins * We come here from above when page->mapping or !PageSwapCache 724c8d6553bSHugh Dickins * suggests that the node is stale; but it might be under migration. 725c8d6553bSHugh Dickins * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), 726c8d6553bSHugh Dickins * before checking whether node->kpfn has been changed. 727c8d6553bSHugh Dickins */ 728c8d6553bSHugh Dickins smp_rmb(); 7294db0c3c2SJason Low if (READ_ONCE(stable_node->kpfn) != kpfn) 730c8d6553bSHugh Dickins goto again; 7314035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 7324035c07aSHugh Dickins return NULL; 7334035c07aSHugh Dickins } 7344035c07aSHugh Dickins 73531dbd01fSIzik Eidus /* 73631dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 73731dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 73831dbd01fSIzik Eidus */ 73931dbd01fSIzik Eidus static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 74031dbd01fSIzik Eidus { 7417b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 7427b6ba2c7SHugh Dickins struct stable_node *stable_node; 7435ad64688SHugh Dickins struct page *page; 74431dbd01fSIzik Eidus 7457b6ba2c7SHugh Dickins stable_node = rmap_item->head; 7468aafa6a4SHugh Dickins page = get_ksm_page(stable_node, true); 7474035c07aSHugh Dickins if (!page) 7484035c07aSHugh Dickins goto out; 7495ad64688SHugh Dickins 7507b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 7515ad64688SHugh Dickins unlock_page(page); 7525ad64688SHugh Dickins put_page(page); 75308beca44SHugh Dickins 75498666f8aSAndrea Arcangeli if (!hlist_empty(&stable_node->hlist)) 7554035c07aSHugh Dickins ksm_pages_sharing--; 7564035c07aSHugh Dickins else 757b4028260SHugh Dickins ksm_pages_shared--; 7582c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 7592c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len--; 76031dbd01fSIzik Eidus 7619e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 76293d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 76331dbd01fSIzik Eidus 7647b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 76531dbd01fSIzik Eidus unsigned char age; 76631dbd01fSIzik Eidus /* 7679ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 76831dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 7699ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 7709ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 7719ba69294SHugh Dickins * than left over from before. 77231dbd01fSIzik Eidus */ 77331dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 774cd551f97SHugh Dickins BUG_ON(age > 1); 77531dbd01fSIzik Eidus if (!age) 77690bd6fd3SPetr Holasek rb_erase(&rmap_item->node, 777ef53d16cSHugh Dickins root_unstable_tree + NUMA(rmap_item->nid)); 77893d17715SHugh Dickins ksm_pages_unshared--; 77931dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 78093d17715SHugh Dickins } 7814035c07aSHugh Dickins out: 78231dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 78331dbd01fSIzik Eidus } 78431dbd01fSIzik Eidus 78531dbd01fSIzik Eidus static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 7866514d511SHugh Dickins struct rmap_item **rmap_list) 78731dbd01fSIzik Eidus { 7886514d511SHugh Dickins while (*rmap_list) { 7896514d511SHugh Dickins struct rmap_item *rmap_item = *rmap_list; 7906514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 79131dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 79231dbd01fSIzik Eidus free_rmap_item(rmap_item); 79331dbd01fSIzik Eidus } 79431dbd01fSIzik Eidus } 79531dbd01fSIzik Eidus 79631dbd01fSIzik Eidus /* 797e850dcf5SHugh Dickins * Though it's very tempting to unmerge rmap_items from stable tree rather 79831dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 79931dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 80031dbd01fSIzik Eidus * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 80131dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 80231dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 80381464e30SHugh Dickins * 80481464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 80581464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 80681464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 80781464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 80831dbd01fSIzik Eidus */ 809d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 81031dbd01fSIzik Eidus unsigned long start, unsigned long end) 81131dbd01fSIzik Eidus { 81231dbd01fSIzik Eidus unsigned long addr; 813d952b791SHugh Dickins int err = 0; 81431dbd01fSIzik Eidus 815d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 8169ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 8179ba69294SHugh Dickins break; 818d952b791SHugh Dickins if (signal_pending(current)) 819d952b791SHugh Dickins err = -ERESTARTSYS; 820d952b791SHugh Dickins else 821d952b791SHugh Dickins err = break_ksm(vma, addr); 822d952b791SHugh Dickins } 823d952b791SHugh Dickins return err; 82431dbd01fSIzik Eidus } 82531dbd01fSIzik Eidus 8262ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 8272ffd8679SHugh Dickins /* 8282ffd8679SHugh Dickins * Only called through the sysfs control interface: 8292ffd8679SHugh Dickins */ 830cbf86cfeSHugh Dickins static int remove_stable_node(struct stable_node *stable_node) 831cbf86cfeSHugh Dickins { 832cbf86cfeSHugh Dickins struct page *page; 833cbf86cfeSHugh Dickins int err; 834cbf86cfeSHugh Dickins 835cbf86cfeSHugh Dickins page = get_ksm_page(stable_node, true); 836cbf86cfeSHugh Dickins if (!page) { 837cbf86cfeSHugh Dickins /* 838cbf86cfeSHugh Dickins * get_ksm_page did remove_node_from_stable_tree itself. 839cbf86cfeSHugh Dickins */ 840cbf86cfeSHugh Dickins return 0; 841cbf86cfeSHugh Dickins } 842cbf86cfeSHugh Dickins 8438fdb3dbfSHugh Dickins if (WARN_ON_ONCE(page_mapped(page))) { 844cbf86cfeSHugh Dickins /* 8458fdb3dbfSHugh Dickins * This should not happen: but if it does, just refuse to let 8468fdb3dbfSHugh Dickins * merge_across_nodes be switched - there is no need to panic. 8478fdb3dbfSHugh Dickins */ 8488fdb3dbfSHugh Dickins err = -EBUSY; 8498fdb3dbfSHugh Dickins } else { 8508fdb3dbfSHugh Dickins /* 8518fdb3dbfSHugh Dickins * The stable node did not yet appear stale to get_ksm_page(), 8528fdb3dbfSHugh Dickins * since that allows for an unmapped ksm page to be recognized 8538fdb3dbfSHugh Dickins * right up until it is freed; but the node is safe to remove. 854cbf86cfeSHugh Dickins * This page might be in a pagevec waiting to be freed, 855cbf86cfeSHugh Dickins * or it might be PageSwapCache (perhaps under writeback), 856cbf86cfeSHugh Dickins * or it might have been removed from swapcache a moment ago. 857cbf86cfeSHugh Dickins */ 858cbf86cfeSHugh Dickins set_page_stable_node(page, NULL); 859cbf86cfeSHugh Dickins remove_node_from_stable_tree(stable_node); 860cbf86cfeSHugh Dickins err = 0; 861cbf86cfeSHugh Dickins } 862cbf86cfeSHugh Dickins 863cbf86cfeSHugh Dickins unlock_page(page); 864cbf86cfeSHugh Dickins put_page(page); 865cbf86cfeSHugh Dickins return err; 866cbf86cfeSHugh Dickins } 867cbf86cfeSHugh Dickins 8682c653d0eSAndrea Arcangeli static int remove_stable_node_chain(struct stable_node *stable_node, 8692c653d0eSAndrea Arcangeli struct rb_root *root) 8702c653d0eSAndrea Arcangeli { 8712c653d0eSAndrea Arcangeli struct stable_node *dup; 8722c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 8732c653d0eSAndrea Arcangeli 8742c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 8752c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 8762c653d0eSAndrea Arcangeli if (remove_stable_node(stable_node)) 8772c653d0eSAndrea Arcangeli return true; 8782c653d0eSAndrea Arcangeli else 8792c653d0eSAndrea Arcangeli return false; 8802c653d0eSAndrea Arcangeli } 8812c653d0eSAndrea Arcangeli 8822c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 8832c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 8842c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 8852c653d0eSAndrea Arcangeli if (remove_stable_node(dup)) 8862c653d0eSAndrea Arcangeli return true; 8872c653d0eSAndrea Arcangeli } 8882c653d0eSAndrea Arcangeli BUG_ON(!hlist_empty(&stable_node->hlist)); 8892c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 8902c653d0eSAndrea Arcangeli return false; 8912c653d0eSAndrea Arcangeli } 8922c653d0eSAndrea Arcangeli 893cbf86cfeSHugh Dickins static int remove_all_stable_nodes(void) 894cbf86cfeSHugh Dickins { 89503640418SGeliang Tang struct stable_node *stable_node, *next; 896cbf86cfeSHugh Dickins int nid; 897cbf86cfeSHugh Dickins int err = 0; 898cbf86cfeSHugh Dickins 899ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 900cbf86cfeSHugh Dickins while (root_stable_tree[nid].rb_node) { 901cbf86cfeSHugh Dickins stable_node = rb_entry(root_stable_tree[nid].rb_node, 902cbf86cfeSHugh Dickins struct stable_node, node); 9032c653d0eSAndrea Arcangeli if (remove_stable_node_chain(stable_node, 9042c653d0eSAndrea Arcangeli root_stable_tree + nid)) { 905cbf86cfeSHugh Dickins err = -EBUSY; 906cbf86cfeSHugh Dickins break; /* proceed to next nid */ 907cbf86cfeSHugh Dickins } 908cbf86cfeSHugh Dickins cond_resched(); 909cbf86cfeSHugh Dickins } 910cbf86cfeSHugh Dickins } 91103640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 9124146d2d6SHugh Dickins if (remove_stable_node(stable_node)) 9134146d2d6SHugh Dickins err = -EBUSY; 9144146d2d6SHugh Dickins cond_resched(); 9154146d2d6SHugh Dickins } 916cbf86cfeSHugh Dickins return err; 917cbf86cfeSHugh Dickins } 918cbf86cfeSHugh Dickins 919d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 92031dbd01fSIzik Eidus { 92131dbd01fSIzik Eidus struct mm_slot *mm_slot; 92231dbd01fSIzik Eidus struct mm_struct *mm; 92331dbd01fSIzik Eidus struct vm_area_struct *vma; 924d952b791SHugh Dickins int err = 0; 92531dbd01fSIzik Eidus 926d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 9279ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 928d952b791SHugh Dickins struct mm_slot, mm_list); 929d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 930d952b791SHugh Dickins 9319ba69294SHugh Dickins for (mm_slot = ksm_scan.mm_slot; 9329ba69294SHugh Dickins mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 93331dbd01fSIzik Eidus mm = mm_slot->mm; 93431dbd01fSIzik Eidus down_read(&mm->mmap_sem); 93531dbd01fSIzik Eidus for (vma = mm->mmap; vma; vma = vma->vm_next) { 9369ba69294SHugh Dickins if (ksm_test_exit(mm)) 9379ba69294SHugh Dickins break; 93831dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 93931dbd01fSIzik Eidus continue; 940d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 941d952b791SHugh Dickins vma->vm_start, vma->vm_end); 9429ba69294SHugh Dickins if (err) 9439ba69294SHugh Dickins goto error; 944d952b791SHugh Dickins } 9459ba69294SHugh Dickins 9466514d511SHugh Dickins remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 9477496fea9SZhou Chengming up_read(&mm->mmap_sem); 94831dbd01fSIzik Eidus 94931dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 9509ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 951d952b791SHugh Dickins struct mm_slot, mm_list); 9529ba69294SHugh Dickins if (ksm_test_exit(mm)) { 9534ca3a69bSSasha Levin hash_del(&mm_slot->link); 9549ba69294SHugh Dickins list_del(&mm_slot->mm_list); 95531dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 9569ba69294SHugh Dickins 9579ba69294SHugh Dickins free_mm_slot(mm_slot); 9589ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 9599ba69294SHugh Dickins mmdrop(mm); 9607496fea9SZhou Chengming } else 9619ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 96231dbd01fSIzik Eidus } 96331dbd01fSIzik Eidus 964cbf86cfeSHugh Dickins /* Clean up stable nodes, but don't worry if some are still busy */ 965cbf86cfeSHugh Dickins remove_all_stable_nodes(); 966d952b791SHugh Dickins ksm_scan.seqnr = 0; 9679ba69294SHugh Dickins return 0; 9689ba69294SHugh Dickins 9699ba69294SHugh Dickins error: 9709ba69294SHugh Dickins up_read(&mm->mmap_sem); 971d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 972d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 973d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 974d952b791SHugh Dickins return err; 975d952b791SHugh Dickins } 9762ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 977d952b791SHugh Dickins 97831dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 97931dbd01fSIzik Eidus { 98031dbd01fSIzik Eidus u32 checksum; 9819b04c5feSCong Wang void *addr = kmap_atomic(page); 98231dbd01fSIzik Eidus checksum = jhash2(addr, PAGE_SIZE / 4, 17); 9839b04c5feSCong Wang kunmap_atomic(addr); 98431dbd01fSIzik Eidus return checksum; 98531dbd01fSIzik Eidus } 98631dbd01fSIzik Eidus 98731dbd01fSIzik Eidus static int memcmp_pages(struct page *page1, struct page *page2) 98831dbd01fSIzik Eidus { 98931dbd01fSIzik Eidus char *addr1, *addr2; 99031dbd01fSIzik Eidus int ret; 99131dbd01fSIzik Eidus 9929b04c5feSCong Wang addr1 = kmap_atomic(page1); 9939b04c5feSCong Wang addr2 = kmap_atomic(page2); 99431dbd01fSIzik Eidus ret = memcmp(addr1, addr2, PAGE_SIZE); 9959b04c5feSCong Wang kunmap_atomic(addr2); 9969b04c5feSCong Wang kunmap_atomic(addr1); 99731dbd01fSIzik Eidus return ret; 99831dbd01fSIzik Eidus } 99931dbd01fSIzik Eidus 100031dbd01fSIzik Eidus static inline int pages_identical(struct page *page1, struct page *page2) 100131dbd01fSIzik Eidus { 100231dbd01fSIzik Eidus return !memcmp_pages(page1, page2); 100331dbd01fSIzik Eidus } 100431dbd01fSIzik Eidus 100531dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 100631dbd01fSIzik Eidus pte_t *orig_pte) 100731dbd01fSIzik Eidus { 100831dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 100936eaff33SKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 101036eaff33SKirill A. Shutemov .page = page, 101136eaff33SKirill A. Shutemov .vma = vma, 101236eaff33SKirill A. Shutemov }; 101331dbd01fSIzik Eidus int swapped; 101431dbd01fSIzik Eidus int err = -EFAULT; 10156bdb913fSHaggai Eran unsigned long mmun_start; /* For mmu_notifiers */ 10166bdb913fSHaggai Eran unsigned long mmun_end; /* For mmu_notifiers */ 101731dbd01fSIzik Eidus 101836eaff33SKirill A. Shutemov pvmw.address = page_address_in_vma(page, vma); 101936eaff33SKirill A. Shutemov if (pvmw.address == -EFAULT) 102031dbd01fSIzik Eidus goto out; 102131dbd01fSIzik Eidus 102229ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 10236bdb913fSHaggai Eran 102436eaff33SKirill A. Shutemov mmun_start = pvmw.address; 102536eaff33SKirill A. Shutemov mmun_end = pvmw.address + PAGE_SIZE; 10266bdb913fSHaggai Eran mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 10276bdb913fSHaggai Eran 102836eaff33SKirill A. Shutemov if (!page_vma_mapped_walk(&pvmw)) 10296bdb913fSHaggai Eran goto out_mn; 103036eaff33SKirill A. Shutemov if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 103136eaff33SKirill A. Shutemov goto out_unlock; 103231dbd01fSIzik Eidus 1033595cd8f2SAneesh Kumar K.V if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 1034b3a81d08SMinchan Kim (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || 1035b3a81d08SMinchan Kim mm_tlb_flush_pending(mm)) { 103631dbd01fSIzik Eidus pte_t entry; 103731dbd01fSIzik Eidus 103831dbd01fSIzik Eidus swapped = PageSwapCache(page); 103936eaff33SKirill A. Shutemov flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 104031dbd01fSIzik Eidus /* 104125985edcSLucas De Marchi * Ok this is tricky, when get_user_pages_fast() run it doesn't 104231dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 104331dbd01fSIzik Eidus * with the pagecount against the mapcount is racey and 104431dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 104531dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 104631dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 104731dbd01fSIzik Eidus * or in the middle of the check. 10480f10851eSJérôme Glisse * 10490f10851eSJérôme Glisse * No need to notify as we are downgrading page table to read 10500f10851eSJérôme Glisse * only not changing it to point to a new page. 10510f10851eSJérôme Glisse * 10520f10851eSJérôme Glisse * See Documentation/vm/mmu_notifier.txt 105331dbd01fSIzik Eidus */ 10540f10851eSJérôme Glisse entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 105531dbd01fSIzik Eidus /* 105631dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 105731dbd01fSIzik Eidus * page 105831dbd01fSIzik Eidus */ 105931e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 106036eaff33SKirill A. Shutemov set_pte_at(mm, pvmw.address, pvmw.pte, entry); 106131dbd01fSIzik Eidus goto out_unlock; 106231dbd01fSIzik Eidus } 10634e31635cSHugh Dickins if (pte_dirty(entry)) 10644e31635cSHugh Dickins set_page_dirty(page); 1065595cd8f2SAneesh Kumar K.V 1066595cd8f2SAneesh Kumar K.V if (pte_protnone(entry)) 1067595cd8f2SAneesh Kumar K.V entry = pte_mkclean(pte_clear_savedwrite(entry)); 1068595cd8f2SAneesh Kumar K.V else 10694e31635cSHugh Dickins entry = pte_mkclean(pte_wrprotect(entry)); 107036eaff33SKirill A. Shutemov set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); 107131dbd01fSIzik Eidus } 107236eaff33SKirill A. Shutemov *orig_pte = *pvmw.pte; 107331dbd01fSIzik Eidus err = 0; 107431dbd01fSIzik Eidus 107531dbd01fSIzik Eidus out_unlock: 107636eaff33SKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 10776bdb913fSHaggai Eran out_mn: 10786bdb913fSHaggai Eran mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 107931dbd01fSIzik Eidus out: 108031dbd01fSIzik Eidus return err; 108131dbd01fSIzik Eidus } 108231dbd01fSIzik Eidus 108331dbd01fSIzik Eidus /** 108431dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 10858dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 10868dd3557aSHugh Dickins * @page: the page we are replacing by kpage 10878dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 108831dbd01fSIzik Eidus * @orig_pte: the original value of the pte 108931dbd01fSIzik Eidus * 109031dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 109131dbd01fSIzik Eidus */ 10928dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 10938dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 109431dbd01fSIzik Eidus { 109531dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 109631dbd01fSIzik Eidus pmd_t *pmd; 109731dbd01fSIzik Eidus pte_t *ptep; 1098e86c59b1SClaudio Imbrenda pte_t newpte; 109931dbd01fSIzik Eidus spinlock_t *ptl; 110031dbd01fSIzik Eidus unsigned long addr; 110131dbd01fSIzik Eidus int err = -EFAULT; 11026bdb913fSHaggai Eran unsigned long mmun_start; /* For mmu_notifiers */ 11036bdb913fSHaggai Eran unsigned long mmun_end; /* For mmu_notifiers */ 110431dbd01fSIzik Eidus 11058dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 110631dbd01fSIzik Eidus if (addr == -EFAULT) 110731dbd01fSIzik Eidus goto out; 110831dbd01fSIzik Eidus 11096219049aSBob Liu pmd = mm_find_pmd(mm, addr); 11106219049aSBob Liu if (!pmd) 111131dbd01fSIzik Eidus goto out; 111231dbd01fSIzik Eidus 11136bdb913fSHaggai Eran mmun_start = addr; 11146bdb913fSHaggai Eran mmun_end = addr + PAGE_SIZE; 11156bdb913fSHaggai Eran mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 11166bdb913fSHaggai Eran 111731dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 111831dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 111931dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 11206bdb913fSHaggai Eran goto out_mn; 112131dbd01fSIzik Eidus } 112231dbd01fSIzik Eidus 1123e86c59b1SClaudio Imbrenda /* 1124e86c59b1SClaudio Imbrenda * No need to check ksm_use_zero_pages here: we can only have a 1125e86c59b1SClaudio Imbrenda * zero_page here if ksm_use_zero_pages was enabled alreaady. 1126e86c59b1SClaudio Imbrenda */ 1127e86c59b1SClaudio Imbrenda if (!is_zero_pfn(page_to_pfn(kpage))) { 11288dd3557aSHugh Dickins get_page(kpage); 1129d281ee61SKirill A. Shutemov page_add_anon_rmap(kpage, vma, addr, false); 1130e86c59b1SClaudio Imbrenda newpte = mk_pte(kpage, vma->vm_page_prot); 1131e86c59b1SClaudio Imbrenda } else { 1132e86c59b1SClaudio Imbrenda newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), 1133e86c59b1SClaudio Imbrenda vma->vm_page_prot)); 1134*a38c015fSClaudio Imbrenda /* 1135*a38c015fSClaudio Imbrenda * We're replacing an anonymous page with a zero page, which is 1136*a38c015fSClaudio Imbrenda * not anonymous. We need to do proper accounting otherwise we 1137*a38c015fSClaudio Imbrenda * will get wrong values in /proc, and a BUG message in dmesg 1138*a38c015fSClaudio Imbrenda * when tearing down the mm. 1139*a38c015fSClaudio Imbrenda */ 1140*a38c015fSClaudio Imbrenda dec_mm_counter(mm, MM_ANONPAGES); 1141e86c59b1SClaudio Imbrenda } 114231dbd01fSIzik Eidus 114331dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 11440f10851eSJérôme Glisse /* 11450f10851eSJérôme Glisse * No need to notify as we are replacing a read only page with another 11460f10851eSJérôme Glisse * read only page with the same content. 11470f10851eSJérôme Glisse * 11480f10851eSJérôme Glisse * See Documentation/vm/mmu_notifier.txt 11490f10851eSJérôme Glisse */ 11500f10851eSJérôme Glisse ptep_clear_flush(vma, addr, ptep); 1151e86c59b1SClaudio Imbrenda set_pte_at_notify(mm, addr, ptep, newpte); 115231dbd01fSIzik Eidus 1153d281ee61SKirill A. Shutemov page_remove_rmap(page, false); 1154ae52a2adSHugh Dickins if (!page_mapped(page)) 1155ae52a2adSHugh Dickins try_to_free_swap(page); 11568dd3557aSHugh Dickins put_page(page); 115731dbd01fSIzik Eidus 115831dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 115931dbd01fSIzik Eidus err = 0; 11606bdb913fSHaggai Eran out_mn: 11616bdb913fSHaggai Eran mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 116231dbd01fSIzik Eidus out: 116331dbd01fSIzik Eidus return err; 116431dbd01fSIzik Eidus } 116531dbd01fSIzik Eidus 116631dbd01fSIzik Eidus /* 116731dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 11688dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 11698dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 117080e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 117180e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 117231dbd01fSIzik Eidus * 117331dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 117431dbd01fSIzik Eidus */ 117531dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 11768dd3557aSHugh Dickins struct page *page, struct page *kpage) 117731dbd01fSIzik Eidus { 117831dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 117931dbd01fSIzik Eidus int err = -EFAULT; 118031dbd01fSIzik Eidus 1181db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 1182db114b83SHugh Dickins return 0; 1183db114b83SHugh Dickins 11848dd3557aSHugh Dickins if (!PageAnon(page)) 118531dbd01fSIzik Eidus goto out; 118631dbd01fSIzik Eidus 118731dbd01fSIzik Eidus /* 118831dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 118931dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 119031dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 119131dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 119231dbd01fSIzik Eidus * then come back to this page when it is unlocked. 119331dbd01fSIzik Eidus */ 11948dd3557aSHugh Dickins if (!trylock_page(page)) 119531e855eaSHugh Dickins goto out; 1196f765f540SKirill A. Shutemov 1197f765f540SKirill A. Shutemov if (PageTransCompound(page)) { 1198a7306c34SAndrea Arcangeli if (split_huge_page(page)) 1199f765f540SKirill A. Shutemov goto out_unlock; 1200f765f540SKirill A. Shutemov } 1201f765f540SKirill A. Shutemov 120231dbd01fSIzik Eidus /* 120331dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 120431dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 120531dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 120631dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 120731dbd01fSIzik Eidus */ 120880e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 120980e14822SHugh Dickins if (!kpage) { 121080e14822SHugh Dickins /* 121180e14822SHugh Dickins * While we hold page lock, upgrade page from 121280e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 121380e14822SHugh Dickins * stable_tree_insert() will update stable_node. 121480e14822SHugh Dickins */ 121580e14822SHugh Dickins set_page_stable_node(page, NULL); 121680e14822SHugh Dickins mark_page_accessed(page); 1217337ed7ebSMinchan Kim /* 1218337ed7ebSMinchan Kim * Page reclaim just frees a clean page with no dirty 1219337ed7ebSMinchan Kim * ptes: make sure that the ksm page would be swapped. 1220337ed7ebSMinchan Kim */ 1221337ed7ebSMinchan Kim if (!PageDirty(page)) 1222337ed7ebSMinchan Kim SetPageDirty(page); 122380e14822SHugh Dickins err = 0; 122480e14822SHugh Dickins } else if (pages_identical(page, kpage)) 12258dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 122680e14822SHugh Dickins } 122731dbd01fSIzik Eidus 122880e14822SHugh Dickins if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 122973848b46SHugh Dickins munlock_vma_page(page); 12305ad64688SHugh Dickins if (!PageMlocked(kpage)) { 12315ad64688SHugh Dickins unlock_page(page); 12325ad64688SHugh Dickins lock_page(kpage); 12335ad64688SHugh Dickins mlock_vma_page(kpage); 12345ad64688SHugh Dickins page = kpage; /* for final unlock */ 12355ad64688SHugh Dickins } 12365ad64688SHugh Dickins } 123773848b46SHugh Dickins 1238f765f540SKirill A. Shutemov out_unlock: 12398dd3557aSHugh Dickins unlock_page(page); 124031dbd01fSIzik Eidus out: 124131dbd01fSIzik Eidus return err; 124231dbd01fSIzik Eidus } 124331dbd01fSIzik Eidus 124431dbd01fSIzik Eidus /* 124581464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 124681464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 12478dd3557aSHugh Dickins * 12488dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 124981464e30SHugh Dickins */ 12508dd3557aSHugh Dickins static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 12518dd3557aSHugh Dickins struct page *page, struct page *kpage) 125281464e30SHugh Dickins { 12538dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 125481464e30SHugh Dickins struct vm_area_struct *vma; 125581464e30SHugh Dickins int err = -EFAULT; 125681464e30SHugh Dickins 12578dd3557aSHugh Dickins down_read(&mm->mmap_sem); 125885c6e8ddSAndrea Arcangeli vma = find_mergeable_vma(mm, rmap_item->address); 125985c6e8ddSAndrea Arcangeli if (!vma) 12609ba69294SHugh Dickins goto out; 12619ba69294SHugh Dickins 12628dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 1263db114b83SHugh Dickins if (err) 1264db114b83SHugh Dickins goto out; 1265db114b83SHugh Dickins 1266bc56620bSHugh Dickins /* Unstable nid is in union with stable anon_vma: remove first */ 1267bc56620bSHugh Dickins remove_rmap_item_from_tree(rmap_item); 1268bc56620bSHugh Dickins 1269db114b83SHugh Dickins /* Must get reference to anon_vma while still holding mmap_sem */ 12709e60109fSPeter Zijlstra rmap_item->anon_vma = vma->anon_vma; 12719e60109fSPeter Zijlstra get_anon_vma(vma->anon_vma); 127281464e30SHugh Dickins out: 12738dd3557aSHugh Dickins up_read(&mm->mmap_sem); 127481464e30SHugh Dickins return err; 127581464e30SHugh Dickins } 127681464e30SHugh Dickins 127781464e30SHugh Dickins /* 127831dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 127931dbd01fSIzik Eidus * to be merged into one page. 128031dbd01fSIzik Eidus * 12818dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 12828dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 128331dbd01fSIzik Eidus * 128480e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 128531dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 128631dbd01fSIzik Eidus */ 12878dd3557aSHugh Dickins static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 12888dd3557aSHugh Dickins struct page *page, 12898dd3557aSHugh Dickins struct rmap_item *tree_rmap_item, 12908dd3557aSHugh Dickins struct page *tree_page) 129131dbd01fSIzik Eidus { 129280e14822SHugh Dickins int err; 129331dbd01fSIzik Eidus 129480e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 129531dbd01fSIzik Eidus if (!err) { 12968dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 129780e14822SHugh Dickins tree_page, page); 129831dbd01fSIzik Eidus /* 129981464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 130081464e30SHugh Dickins * pointing to it: so break it. 130131dbd01fSIzik Eidus */ 13024035c07aSHugh Dickins if (err) 13038dd3557aSHugh Dickins break_cow(rmap_item); 130431dbd01fSIzik Eidus } 130580e14822SHugh Dickins return err ? NULL : page; 130631dbd01fSIzik Eidus } 130731dbd01fSIzik Eidus 13082c653d0eSAndrea Arcangeli static __always_inline 13092c653d0eSAndrea Arcangeli bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset) 13102c653d0eSAndrea Arcangeli { 13112c653d0eSAndrea Arcangeli VM_BUG_ON(stable_node->rmap_hlist_len < 0); 13122c653d0eSAndrea Arcangeli /* 13132c653d0eSAndrea Arcangeli * Check that at least one mapping still exists, otherwise 13142c653d0eSAndrea Arcangeli * there's no much point to merge and share with this 13152c653d0eSAndrea Arcangeli * stable_node, as the underlying tree_page of the other 13162c653d0eSAndrea Arcangeli * sharer is going to be freed soon. 13172c653d0eSAndrea Arcangeli */ 13182c653d0eSAndrea Arcangeli return stable_node->rmap_hlist_len && 13192c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 13202c653d0eSAndrea Arcangeli } 13212c653d0eSAndrea Arcangeli 13222c653d0eSAndrea Arcangeli static __always_inline 13232c653d0eSAndrea Arcangeli bool is_page_sharing_candidate(struct stable_node *stable_node) 13242c653d0eSAndrea Arcangeli { 13252c653d0eSAndrea Arcangeli return __is_page_sharing_candidate(stable_node, 0); 13262c653d0eSAndrea Arcangeli } 13272c653d0eSAndrea Arcangeli 1328c01f0b54SColin Ian King static struct page *stable_node_dup(struct stable_node **_stable_node_dup, 13298dc5ffcdSAndrea Arcangeli struct stable_node **_stable_node, 13302c653d0eSAndrea Arcangeli struct rb_root *root, 13312c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 13322c653d0eSAndrea Arcangeli { 1333b4fecc67SAndrea Arcangeli struct stable_node *dup, *found = NULL, *stable_node = *_stable_node; 13342c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 13358dc5ffcdSAndrea Arcangeli struct page *_tree_page, *tree_page = NULL; 13362c653d0eSAndrea Arcangeli int nr = 0; 13372c653d0eSAndrea Arcangeli int found_rmap_hlist_len; 13382c653d0eSAndrea Arcangeli 13392c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes || 13402c653d0eSAndrea Arcangeli time_before(jiffies, stable_node->chain_prune_time + 13412c653d0eSAndrea Arcangeli msecs_to_jiffies( 13422c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs))) 13432c653d0eSAndrea Arcangeli prune_stale_stable_nodes = false; 13442c653d0eSAndrea Arcangeli else 13452c653d0eSAndrea Arcangeli stable_node->chain_prune_time = jiffies; 13462c653d0eSAndrea Arcangeli 13472c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 13482c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 13492c653d0eSAndrea Arcangeli cond_resched(); 13502c653d0eSAndrea Arcangeli /* 13512c653d0eSAndrea Arcangeli * We must walk all stable_node_dup to prune the stale 13522c653d0eSAndrea Arcangeli * stable nodes during lookup. 13532c653d0eSAndrea Arcangeli * 13542c653d0eSAndrea Arcangeli * get_ksm_page can drop the nodes from the 13552c653d0eSAndrea Arcangeli * stable_node->hlist if they point to freed pages 13562c653d0eSAndrea Arcangeli * (that's why we do a _safe walk). The "dup" 13572c653d0eSAndrea Arcangeli * stable_node parameter itself will be freed from 13582c653d0eSAndrea Arcangeli * under us if it returns NULL. 13592c653d0eSAndrea Arcangeli */ 13602c653d0eSAndrea Arcangeli _tree_page = get_ksm_page(dup, false); 13612c653d0eSAndrea Arcangeli if (!_tree_page) 13622c653d0eSAndrea Arcangeli continue; 13632c653d0eSAndrea Arcangeli nr += 1; 13642c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(dup)) { 13652c653d0eSAndrea Arcangeli if (!found || 13662c653d0eSAndrea Arcangeli dup->rmap_hlist_len > found_rmap_hlist_len) { 13672c653d0eSAndrea Arcangeli if (found) 13688dc5ffcdSAndrea Arcangeli put_page(tree_page); 13692c653d0eSAndrea Arcangeli found = dup; 13702c653d0eSAndrea Arcangeli found_rmap_hlist_len = found->rmap_hlist_len; 13718dc5ffcdSAndrea Arcangeli tree_page = _tree_page; 13722c653d0eSAndrea Arcangeli 13738dc5ffcdSAndrea Arcangeli /* skip put_page for found dup */ 13742c653d0eSAndrea Arcangeli if (!prune_stale_stable_nodes) 13752c653d0eSAndrea Arcangeli break; 13762c653d0eSAndrea Arcangeli continue; 13772c653d0eSAndrea Arcangeli } 13782c653d0eSAndrea Arcangeli } 13792c653d0eSAndrea Arcangeli put_page(_tree_page); 13802c653d0eSAndrea Arcangeli } 13812c653d0eSAndrea Arcangeli 138280b18dfaSAndrea Arcangeli if (found) { 13832c653d0eSAndrea Arcangeli /* 138480b18dfaSAndrea Arcangeli * nr is counting all dups in the chain only if 138580b18dfaSAndrea Arcangeli * prune_stale_stable_nodes is true, otherwise we may 138680b18dfaSAndrea Arcangeli * break the loop at nr == 1 even if there are 138780b18dfaSAndrea Arcangeli * multiple entries. 13882c653d0eSAndrea Arcangeli */ 138980b18dfaSAndrea Arcangeli if (prune_stale_stable_nodes && nr == 1) { 13902c653d0eSAndrea Arcangeli /* 13912c653d0eSAndrea Arcangeli * If there's not just one entry it would 13922c653d0eSAndrea Arcangeli * corrupt memory, better BUG_ON. In KSM 13932c653d0eSAndrea Arcangeli * context with no lock held it's not even 13942c653d0eSAndrea Arcangeli * fatal. 13952c653d0eSAndrea Arcangeli */ 13962c653d0eSAndrea Arcangeli BUG_ON(stable_node->hlist.first->next); 13972c653d0eSAndrea Arcangeli 13982c653d0eSAndrea Arcangeli /* 13992c653d0eSAndrea Arcangeli * There's just one entry and it is below the 14002c653d0eSAndrea Arcangeli * deduplication limit so drop the chain. 14012c653d0eSAndrea Arcangeli */ 14022c653d0eSAndrea Arcangeli rb_replace_node(&stable_node->node, &found->node, 14032c653d0eSAndrea Arcangeli root); 14042c653d0eSAndrea Arcangeli free_stable_node(stable_node); 14052c653d0eSAndrea Arcangeli ksm_stable_node_chains--; 14062c653d0eSAndrea Arcangeli ksm_stable_node_dups--; 1407b4fecc67SAndrea Arcangeli /* 14080ba1d0f7SAndrea Arcangeli * NOTE: the caller depends on the stable_node 14090ba1d0f7SAndrea Arcangeli * to be equal to stable_node_dup if the chain 14100ba1d0f7SAndrea Arcangeli * was collapsed. 1411b4fecc67SAndrea Arcangeli */ 14120ba1d0f7SAndrea Arcangeli *_stable_node = found; 14130ba1d0f7SAndrea Arcangeli /* 14140ba1d0f7SAndrea Arcangeli * Just for robustneess as stable_node is 14150ba1d0f7SAndrea Arcangeli * otherwise left as a stable pointer, the 14160ba1d0f7SAndrea Arcangeli * compiler shall optimize it away at build 14170ba1d0f7SAndrea Arcangeli * time. 14180ba1d0f7SAndrea Arcangeli */ 14190ba1d0f7SAndrea Arcangeli stable_node = NULL; 142080b18dfaSAndrea Arcangeli } else if (stable_node->hlist.first != &found->hlist_dup && 142180b18dfaSAndrea Arcangeli __is_page_sharing_candidate(found, 1)) { 14222c653d0eSAndrea Arcangeli /* 142380b18dfaSAndrea Arcangeli * If the found stable_node dup can accept one 142480b18dfaSAndrea Arcangeli * more future merge (in addition to the one 142580b18dfaSAndrea Arcangeli * that is underway) and is not at the head of 142680b18dfaSAndrea Arcangeli * the chain, put it there so next search will 142780b18dfaSAndrea Arcangeli * be quicker in the !prune_stale_stable_nodes 142880b18dfaSAndrea Arcangeli * case. 142980b18dfaSAndrea Arcangeli * 143080b18dfaSAndrea Arcangeli * NOTE: it would be inaccurate to use nr > 1 143180b18dfaSAndrea Arcangeli * instead of checking the hlist.first pointer 143280b18dfaSAndrea Arcangeli * directly, because in the 143380b18dfaSAndrea Arcangeli * prune_stale_stable_nodes case "nr" isn't 143480b18dfaSAndrea Arcangeli * the position of the found dup in the chain, 143580b18dfaSAndrea Arcangeli * but the total number of dups in the chain. 14362c653d0eSAndrea Arcangeli */ 14372c653d0eSAndrea Arcangeli hlist_del(&found->hlist_dup); 14382c653d0eSAndrea Arcangeli hlist_add_head(&found->hlist_dup, 14392c653d0eSAndrea Arcangeli &stable_node->hlist); 14402c653d0eSAndrea Arcangeli } 14412c653d0eSAndrea Arcangeli } 14422c653d0eSAndrea Arcangeli 14438dc5ffcdSAndrea Arcangeli *_stable_node_dup = found; 14448dc5ffcdSAndrea Arcangeli return tree_page; 14452c653d0eSAndrea Arcangeli } 14462c653d0eSAndrea Arcangeli 14472c653d0eSAndrea Arcangeli static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, 14482c653d0eSAndrea Arcangeli struct rb_root *root) 14492c653d0eSAndrea Arcangeli { 14502c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) 14512c653d0eSAndrea Arcangeli return stable_node; 14522c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 14532c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 14542c653d0eSAndrea Arcangeli return NULL; 14552c653d0eSAndrea Arcangeli } 14562c653d0eSAndrea Arcangeli return hlist_entry(stable_node->hlist.first, 14572c653d0eSAndrea Arcangeli typeof(*stable_node), hlist_dup); 14582c653d0eSAndrea Arcangeli } 14592c653d0eSAndrea Arcangeli 14608dc5ffcdSAndrea Arcangeli /* 14618dc5ffcdSAndrea Arcangeli * Like for get_ksm_page, this function can free the *_stable_node and 14628dc5ffcdSAndrea Arcangeli * *_stable_node_dup if the returned tree_page is NULL. 14638dc5ffcdSAndrea Arcangeli * 14648dc5ffcdSAndrea Arcangeli * It can also free and overwrite *_stable_node with the found 14658dc5ffcdSAndrea Arcangeli * stable_node_dup if the chain is collapsed (in which case 14668dc5ffcdSAndrea Arcangeli * *_stable_node will be equal to *_stable_node_dup like if the chain 14678dc5ffcdSAndrea Arcangeli * never existed). It's up to the caller to verify tree_page is not 14688dc5ffcdSAndrea Arcangeli * NULL before dereferencing *_stable_node or *_stable_node_dup. 14698dc5ffcdSAndrea Arcangeli * 14708dc5ffcdSAndrea Arcangeli * *_stable_node_dup is really a second output parameter of this 14718dc5ffcdSAndrea Arcangeli * function and will be overwritten in all cases, the caller doesn't 14728dc5ffcdSAndrea Arcangeli * need to initialize it. 14738dc5ffcdSAndrea Arcangeli */ 14748dc5ffcdSAndrea Arcangeli static struct page *__stable_node_chain(struct stable_node **_stable_node_dup, 14758dc5ffcdSAndrea Arcangeli struct stable_node **_stable_node, 14762c653d0eSAndrea Arcangeli struct rb_root *root, 14772c653d0eSAndrea Arcangeli bool prune_stale_stable_nodes) 14782c653d0eSAndrea Arcangeli { 1479b4fecc67SAndrea Arcangeli struct stable_node *stable_node = *_stable_node; 14802c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 14812c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(stable_node)) { 14828dc5ffcdSAndrea Arcangeli *_stable_node_dup = stable_node; 14838dc5ffcdSAndrea Arcangeli return get_ksm_page(stable_node, false); 14842c653d0eSAndrea Arcangeli } 14858dc5ffcdSAndrea Arcangeli /* 14868dc5ffcdSAndrea Arcangeli * _stable_node_dup set to NULL means the stable_node 14878dc5ffcdSAndrea Arcangeli * reached the ksm_max_page_sharing limit. 14888dc5ffcdSAndrea Arcangeli */ 14898dc5ffcdSAndrea Arcangeli *_stable_node_dup = NULL; 14902c653d0eSAndrea Arcangeli return NULL; 14912c653d0eSAndrea Arcangeli } 14928dc5ffcdSAndrea Arcangeli return stable_node_dup(_stable_node_dup, _stable_node, root, 14932c653d0eSAndrea Arcangeli prune_stale_stable_nodes); 14942c653d0eSAndrea Arcangeli } 14952c653d0eSAndrea Arcangeli 14968dc5ffcdSAndrea Arcangeli static __always_inline struct page *chain_prune(struct stable_node **s_n_d, 14978dc5ffcdSAndrea Arcangeli struct stable_node **s_n, 14982c653d0eSAndrea Arcangeli struct rb_root *root) 14992c653d0eSAndrea Arcangeli { 15008dc5ffcdSAndrea Arcangeli return __stable_node_chain(s_n_d, s_n, root, true); 15012c653d0eSAndrea Arcangeli } 15022c653d0eSAndrea Arcangeli 15038dc5ffcdSAndrea Arcangeli static __always_inline struct page *chain(struct stable_node **s_n_d, 15048dc5ffcdSAndrea Arcangeli struct stable_node *s_n, 15052c653d0eSAndrea Arcangeli struct rb_root *root) 15062c653d0eSAndrea Arcangeli { 15078dc5ffcdSAndrea Arcangeli struct stable_node *old_stable_node = s_n; 15088dc5ffcdSAndrea Arcangeli struct page *tree_page; 15098dc5ffcdSAndrea Arcangeli 15108dc5ffcdSAndrea Arcangeli tree_page = __stable_node_chain(s_n_d, &s_n, root, false); 15118dc5ffcdSAndrea Arcangeli /* not pruning dups so s_n cannot have changed */ 15128dc5ffcdSAndrea Arcangeli VM_BUG_ON(s_n != old_stable_node); 15138dc5ffcdSAndrea Arcangeli return tree_page; 15142c653d0eSAndrea Arcangeli } 15152c653d0eSAndrea Arcangeli 151631dbd01fSIzik Eidus /* 15178dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 151831dbd01fSIzik Eidus * 151931dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 152031dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 152131dbd01fSIzik Eidus * 15227b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 152331dbd01fSIzik Eidus * NULL otherwise. 152431dbd01fSIzik Eidus */ 152562b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 152631dbd01fSIzik Eidus { 152790bd6fd3SPetr Holasek int nid; 1528ef53d16cSHugh Dickins struct rb_root *root; 15294146d2d6SHugh Dickins struct rb_node **new; 15304146d2d6SHugh Dickins struct rb_node *parent; 15312c653d0eSAndrea Arcangeli struct stable_node *stable_node, *stable_node_dup, *stable_node_any; 15324146d2d6SHugh Dickins struct stable_node *page_node; 153331dbd01fSIzik Eidus 15344146d2d6SHugh Dickins page_node = page_stable_node(page); 15354146d2d6SHugh Dickins if (page_node && page_node->head != &migrate_nodes) { 15364146d2d6SHugh Dickins /* ksm page forked */ 153708beca44SHugh Dickins get_page(page); 153862b61f61SHugh Dickins return page; 153908beca44SHugh Dickins } 154008beca44SHugh Dickins 154190bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1542ef53d16cSHugh Dickins root = root_stable_tree + nid; 15434146d2d6SHugh Dickins again: 1544ef53d16cSHugh Dickins new = &root->rb_node; 15454146d2d6SHugh Dickins parent = NULL; 154690bd6fd3SPetr Holasek 15474146d2d6SHugh Dickins while (*new) { 15484035c07aSHugh Dickins struct page *tree_page; 154931dbd01fSIzik Eidus int ret; 155031dbd01fSIzik Eidus 155131dbd01fSIzik Eidus cond_resched(); 15524146d2d6SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 15532c653d0eSAndrea Arcangeli stable_node_any = NULL; 15548dc5ffcdSAndrea Arcangeli tree_page = chain_prune(&stable_node_dup, &stable_node, root); 1555b4fecc67SAndrea Arcangeli /* 1556b4fecc67SAndrea Arcangeli * NOTE: stable_node may have been freed by 1557b4fecc67SAndrea Arcangeli * chain_prune() if the returned stable_node_dup is 1558b4fecc67SAndrea Arcangeli * not NULL. stable_node_dup may have been inserted in 1559b4fecc67SAndrea Arcangeli * the rbtree instead as a regular stable_node (in 1560b4fecc67SAndrea Arcangeli * order to collapse the stable_node chain if a single 15610ba1d0f7SAndrea Arcangeli * stable_node dup was found in it). In such case the 15620ba1d0f7SAndrea Arcangeli * stable_node is overwritten by the calleee to point 15630ba1d0f7SAndrea Arcangeli * to the stable_node_dup that was collapsed in the 15640ba1d0f7SAndrea Arcangeli * stable rbtree and stable_node will be equal to 15650ba1d0f7SAndrea Arcangeli * stable_node_dup like if the chain never existed. 1566b4fecc67SAndrea Arcangeli */ 15672c653d0eSAndrea Arcangeli if (!stable_node_dup) { 15682c653d0eSAndrea Arcangeli /* 15692c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 15702c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 15712c653d0eSAndrea Arcangeli * empty and should be rb_erased. 15722c653d0eSAndrea Arcangeli */ 15732c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 15742c653d0eSAndrea Arcangeli root); 15752c653d0eSAndrea Arcangeli if (!stable_node_any) { 15762c653d0eSAndrea Arcangeli /* rb_erase just run */ 15772c653d0eSAndrea Arcangeli goto again; 15782c653d0eSAndrea Arcangeli } 15792c653d0eSAndrea Arcangeli /* 15802c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 15812c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 15822c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 15832c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 15842c653d0eSAndrea Arcangeli * have the same content and they're 15852c653d0eSAndrea Arcangeli * wrprotected at all times. Any will work 15862c653d0eSAndrea Arcangeli * fine to continue the walk. 15872c653d0eSAndrea Arcangeli */ 15882c653d0eSAndrea Arcangeli tree_page = get_ksm_page(stable_node_any, false); 15892c653d0eSAndrea Arcangeli } 15902c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1591f2e5ff85SAndrea Arcangeli if (!tree_page) { 1592f2e5ff85SAndrea Arcangeli /* 1593f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1594f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1595f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1596f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1597f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1598f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1599f2e5ff85SAndrea Arcangeli * stable_node was stale. 1600f2e5ff85SAndrea Arcangeli */ 1601f2e5ff85SAndrea Arcangeli goto again; 1602f2e5ff85SAndrea Arcangeli } 160331dbd01fSIzik Eidus 16044035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 1605c8d6553bSHugh Dickins put_page(tree_page); 160631dbd01fSIzik Eidus 16074146d2d6SHugh Dickins parent = *new; 1608c8d6553bSHugh Dickins if (ret < 0) 16094146d2d6SHugh Dickins new = &parent->rb_left; 1610c8d6553bSHugh Dickins else if (ret > 0) 16114146d2d6SHugh Dickins new = &parent->rb_right; 1612c8d6553bSHugh Dickins else { 16132c653d0eSAndrea Arcangeli if (page_node) { 16142c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 16152c653d0eSAndrea Arcangeli /* 16162c653d0eSAndrea Arcangeli * Test if the migrated page should be merged 16172c653d0eSAndrea Arcangeli * into a stable node dup. If the mapcount is 16182c653d0eSAndrea Arcangeli * 1 we can migrate it with another KSM page 16192c653d0eSAndrea Arcangeli * without adding it to the chain. 16202c653d0eSAndrea Arcangeli */ 16212c653d0eSAndrea Arcangeli if (page_mapcount(page) > 1) 16222c653d0eSAndrea Arcangeli goto chain_append; 16232c653d0eSAndrea Arcangeli } 16242c653d0eSAndrea Arcangeli 16252c653d0eSAndrea Arcangeli if (!stable_node_dup) { 16262c653d0eSAndrea Arcangeli /* 16272c653d0eSAndrea Arcangeli * If the stable_node is a chain and 16282c653d0eSAndrea Arcangeli * we got a payload match in memcmp 16292c653d0eSAndrea Arcangeli * but we cannot merge the scanned 16302c653d0eSAndrea Arcangeli * page in any of the existing 16312c653d0eSAndrea Arcangeli * stable_node dups because they're 16322c653d0eSAndrea Arcangeli * all full, we need to wait the 16332c653d0eSAndrea Arcangeli * scanned page to find itself a match 16342c653d0eSAndrea Arcangeli * in the unstable tree to create a 16352c653d0eSAndrea Arcangeli * brand new KSM page to add later to 16362c653d0eSAndrea Arcangeli * the dups of this stable_node. 16372c653d0eSAndrea Arcangeli */ 16382c653d0eSAndrea Arcangeli return NULL; 16392c653d0eSAndrea Arcangeli } 16402c653d0eSAndrea Arcangeli 1641c8d6553bSHugh Dickins /* 1642c8d6553bSHugh Dickins * Lock and unlock the stable_node's page (which 1643c8d6553bSHugh Dickins * might already have been migrated) so that page 1644c8d6553bSHugh Dickins * migration is sure to notice its raised count. 1645c8d6553bSHugh Dickins * It would be more elegant to return stable_node 1646c8d6553bSHugh Dickins * than kpage, but that involves more changes. 1647c8d6553bSHugh Dickins */ 16482c653d0eSAndrea Arcangeli tree_page = get_ksm_page(stable_node_dup, true); 16492c653d0eSAndrea Arcangeli if (unlikely(!tree_page)) 16502c653d0eSAndrea Arcangeli /* 16512c653d0eSAndrea Arcangeli * The tree may have been rebalanced, 16522c653d0eSAndrea Arcangeli * so re-evaluate parent and new. 16532c653d0eSAndrea Arcangeli */ 16542c653d0eSAndrea Arcangeli goto again; 1655c8d6553bSHugh Dickins unlock_page(tree_page); 16562c653d0eSAndrea Arcangeli 16572c653d0eSAndrea Arcangeli if (get_kpfn_nid(stable_node_dup->kpfn) != 16582c653d0eSAndrea Arcangeli NUMA(stable_node_dup->nid)) { 16594146d2d6SHugh Dickins put_page(tree_page); 16604146d2d6SHugh Dickins goto replace; 16614146d2d6SHugh Dickins } 166262b61f61SHugh Dickins return tree_page; 166331dbd01fSIzik Eidus } 1664c8d6553bSHugh Dickins } 166531dbd01fSIzik Eidus 16664146d2d6SHugh Dickins if (!page_node) 166731dbd01fSIzik Eidus return NULL; 16684146d2d6SHugh Dickins 16694146d2d6SHugh Dickins list_del(&page_node->list); 16704146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 16714146d2d6SHugh Dickins rb_link_node(&page_node->node, parent, new); 1672ef53d16cSHugh Dickins rb_insert_color(&page_node->node, root); 16732c653d0eSAndrea Arcangeli out: 16742c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) { 16754146d2d6SHugh Dickins get_page(page); 16764146d2d6SHugh Dickins return page; 16772c653d0eSAndrea Arcangeli } else 16782c653d0eSAndrea Arcangeli return NULL; 16794146d2d6SHugh Dickins 16804146d2d6SHugh Dickins replace: 1681b4fecc67SAndrea Arcangeli /* 1682b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 16830ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 16840ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 16850ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 16860ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 16870ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1688b4fecc67SAndrea Arcangeli */ 16890ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1690b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1691b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 16922c653d0eSAndrea Arcangeli /* there is no chain */ 16934146d2d6SHugh Dickins if (page_node) { 16942c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 16954146d2d6SHugh Dickins list_del(&page_node->list); 16964146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 1697b4fecc67SAndrea Arcangeli rb_replace_node(&stable_node_dup->node, 1698b4fecc67SAndrea Arcangeli &page_node->node, 16992c653d0eSAndrea Arcangeli root); 17002c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17014146d2d6SHugh Dickins get_page(page); 17022c653d0eSAndrea Arcangeli else 17032c653d0eSAndrea Arcangeli page = NULL; 17044146d2d6SHugh Dickins } else { 1705b4fecc67SAndrea Arcangeli rb_erase(&stable_node_dup->node, root); 17064146d2d6SHugh Dickins page = NULL; 17074146d2d6SHugh Dickins } 17082c653d0eSAndrea Arcangeli } else { 17092c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(stable_node)); 17102c653d0eSAndrea Arcangeli __stable_node_dup_del(stable_node_dup); 17112c653d0eSAndrea Arcangeli if (page_node) { 17122c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17132c653d0eSAndrea Arcangeli list_del(&page_node->list); 17142c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 17152c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 17162c653d0eSAndrea Arcangeli if (is_page_sharing_candidate(page_node)) 17172c653d0eSAndrea Arcangeli get_page(page); 17182c653d0eSAndrea Arcangeli else 17192c653d0eSAndrea Arcangeli page = NULL; 17202c653d0eSAndrea Arcangeli } else { 17212c653d0eSAndrea Arcangeli page = NULL; 17222c653d0eSAndrea Arcangeli } 17232c653d0eSAndrea Arcangeli } 17242c653d0eSAndrea Arcangeli stable_node_dup->head = &migrate_nodes; 17252c653d0eSAndrea Arcangeli list_add(&stable_node_dup->list, stable_node_dup->head); 17264146d2d6SHugh Dickins return page; 17272c653d0eSAndrea Arcangeli 17282c653d0eSAndrea Arcangeli chain_append: 17292c653d0eSAndrea Arcangeli /* stable_node_dup could be null if it reached the limit */ 17302c653d0eSAndrea Arcangeli if (!stable_node_dup) 17312c653d0eSAndrea Arcangeli stable_node_dup = stable_node_any; 1732b4fecc67SAndrea Arcangeli /* 1733b4fecc67SAndrea Arcangeli * If stable_node was a chain and chain_prune collapsed it, 17340ba1d0f7SAndrea Arcangeli * stable_node has been updated to be the new regular 17350ba1d0f7SAndrea Arcangeli * stable_node. A collapse of the chain is indistinguishable 17360ba1d0f7SAndrea Arcangeli * from the case there was no chain in the stable 17370ba1d0f7SAndrea Arcangeli * rbtree. Otherwise stable_node is the chain and 17380ba1d0f7SAndrea Arcangeli * stable_node_dup is the dup to replace. 1739b4fecc67SAndrea Arcangeli */ 17400ba1d0f7SAndrea Arcangeli if (stable_node_dup == stable_node) { 1741b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1742b4fecc67SAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 17432c653d0eSAndrea Arcangeli /* chain is missing so create it */ 17442c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(stable_node_dup, 17452c653d0eSAndrea Arcangeli root); 17462c653d0eSAndrea Arcangeli if (!stable_node) 17472c653d0eSAndrea Arcangeli return NULL; 17482c653d0eSAndrea Arcangeli } 17492c653d0eSAndrea Arcangeli /* 17502c653d0eSAndrea Arcangeli * Add this stable_node dup that was 17512c653d0eSAndrea Arcangeli * migrated to the stable_node chain 17522c653d0eSAndrea Arcangeli * of the current nid for this page 17532c653d0eSAndrea Arcangeli * content. 17542c653d0eSAndrea Arcangeli */ 1755b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_chain(stable_node)); 1756b4fecc67SAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 17572c653d0eSAndrea Arcangeli VM_BUG_ON(page_node->head != &migrate_nodes); 17582c653d0eSAndrea Arcangeli list_del(&page_node->list); 17592c653d0eSAndrea Arcangeli DO_NUMA(page_node->nid = nid); 17602c653d0eSAndrea Arcangeli stable_node_chain_add_dup(page_node, stable_node); 17612c653d0eSAndrea Arcangeli goto out; 176231dbd01fSIzik Eidus } 176331dbd01fSIzik Eidus 176431dbd01fSIzik Eidus /* 1765e850dcf5SHugh Dickins * stable_tree_insert - insert stable tree node pointing to new ksm page 176631dbd01fSIzik Eidus * into the stable tree. 176731dbd01fSIzik Eidus * 17687b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 17697b6ba2c7SHugh Dickins * NULL otherwise. 177031dbd01fSIzik Eidus */ 17717b6ba2c7SHugh Dickins static struct stable_node *stable_tree_insert(struct page *kpage) 177231dbd01fSIzik Eidus { 177390bd6fd3SPetr Holasek int nid; 177490bd6fd3SPetr Holasek unsigned long kpfn; 1775ef53d16cSHugh Dickins struct rb_root *root; 177690bd6fd3SPetr Holasek struct rb_node **new; 1777f2e5ff85SAndrea Arcangeli struct rb_node *parent; 17782c653d0eSAndrea Arcangeli struct stable_node *stable_node, *stable_node_dup, *stable_node_any; 17792c653d0eSAndrea Arcangeli bool need_chain = false; 178031dbd01fSIzik Eidus 178190bd6fd3SPetr Holasek kpfn = page_to_pfn(kpage); 178290bd6fd3SPetr Holasek nid = get_kpfn_nid(kpfn); 1783ef53d16cSHugh Dickins root = root_stable_tree + nid; 1784f2e5ff85SAndrea Arcangeli again: 1785f2e5ff85SAndrea Arcangeli parent = NULL; 1786ef53d16cSHugh Dickins new = &root->rb_node; 178790bd6fd3SPetr Holasek 178831dbd01fSIzik Eidus while (*new) { 17894035c07aSHugh Dickins struct page *tree_page; 179031dbd01fSIzik Eidus int ret; 179131dbd01fSIzik Eidus 179231dbd01fSIzik Eidus cond_resched(); 179308beca44SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 17942c653d0eSAndrea Arcangeli stable_node_any = NULL; 17958dc5ffcdSAndrea Arcangeli tree_page = chain(&stable_node_dup, stable_node, root); 17962c653d0eSAndrea Arcangeli if (!stable_node_dup) { 17972c653d0eSAndrea Arcangeli /* 17982c653d0eSAndrea Arcangeli * Either all stable_node dups were full in 17992c653d0eSAndrea Arcangeli * this stable_node chain, or this chain was 18002c653d0eSAndrea Arcangeli * empty and should be rb_erased. 18012c653d0eSAndrea Arcangeli */ 18022c653d0eSAndrea Arcangeli stable_node_any = stable_node_dup_any(stable_node, 18032c653d0eSAndrea Arcangeli root); 18042c653d0eSAndrea Arcangeli if (!stable_node_any) { 18052c653d0eSAndrea Arcangeli /* rb_erase just run */ 18062c653d0eSAndrea Arcangeli goto again; 18072c653d0eSAndrea Arcangeli } 18082c653d0eSAndrea Arcangeli /* 18092c653d0eSAndrea Arcangeli * Take any of the stable_node dups page of 18102c653d0eSAndrea Arcangeli * this stable_node chain to let the tree walk 18112c653d0eSAndrea Arcangeli * continue. All KSM pages belonging to the 18122c653d0eSAndrea Arcangeli * stable_node dups in a stable_node chain 18132c653d0eSAndrea Arcangeli * have the same content and they're 18142c653d0eSAndrea Arcangeli * wrprotected at all times. Any will work 18152c653d0eSAndrea Arcangeli * fine to continue the walk. 18162c653d0eSAndrea Arcangeli */ 18172c653d0eSAndrea Arcangeli tree_page = get_ksm_page(stable_node_any, false); 18182c653d0eSAndrea Arcangeli } 18192c653d0eSAndrea Arcangeli VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1820f2e5ff85SAndrea Arcangeli if (!tree_page) { 1821f2e5ff85SAndrea Arcangeli /* 1822f2e5ff85SAndrea Arcangeli * If we walked over a stale stable_node, 1823f2e5ff85SAndrea Arcangeli * get_ksm_page() will call rb_erase() and it 1824f2e5ff85SAndrea Arcangeli * may rebalance the tree from under us. So 1825f2e5ff85SAndrea Arcangeli * restart the search from scratch. Returning 1826f2e5ff85SAndrea Arcangeli * NULL would be safe too, but we'd generate 1827f2e5ff85SAndrea Arcangeli * false negative insertions just because some 1828f2e5ff85SAndrea Arcangeli * stable_node was stale. 1829f2e5ff85SAndrea Arcangeli */ 1830f2e5ff85SAndrea Arcangeli goto again; 1831f2e5ff85SAndrea Arcangeli } 183231dbd01fSIzik Eidus 18334035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 18344035c07aSHugh Dickins put_page(tree_page); 183531dbd01fSIzik Eidus 183631dbd01fSIzik Eidus parent = *new; 183731dbd01fSIzik Eidus if (ret < 0) 183831dbd01fSIzik Eidus new = &parent->rb_left; 183931dbd01fSIzik Eidus else if (ret > 0) 184031dbd01fSIzik Eidus new = &parent->rb_right; 184131dbd01fSIzik Eidus else { 18422c653d0eSAndrea Arcangeli need_chain = true; 18432c653d0eSAndrea Arcangeli break; 184431dbd01fSIzik Eidus } 184531dbd01fSIzik Eidus } 184631dbd01fSIzik Eidus 18472c653d0eSAndrea Arcangeli stable_node_dup = alloc_stable_node(); 18482c653d0eSAndrea Arcangeli if (!stable_node_dup) 18497b6ba2c7SHugh Dickins return NULL; 185031dbd01fSIzik Eidus 18512c653d0eSAndrea Arcangeli INIT_HLIST_HEAD(&stable_node_dup->hlist); 18522c653d0eSAndrea Arcangeli stable_node_dup->kpfn = kpfn; 18532c653d0eSAndrea Arcangeli set_page_stable_node(kpage, stable_node_dup); 18542c653d0eSAndrea Arcangeli stable_node_dup->rmap_hlist_len = 0; 18552c653d0eSAndrea Arcangeli DO_NUMA(stable_node_dup->nid = nid); 18562c653d0eSAndrea Arcangeli if (!need_chain) { 18572c653d0eSAndrea Arcangeli rb_link_node(&stable_node_dup->node, parent, new); 18582c653d0eSAndrea Arcangeli rb_insert_color(&stable_node_dup->node, root); 18592c653d0eSAndrea Arcangeli } else { 18602c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 18612c653d0eSAndrea Arcangeli struct stable_node *orig = stable_node; 18622c653d0eSAndrea Arcangeli /* chain is missing so create it */ 18632c653d0eSAndrea Arcangeli stable_node = alloc_stable_node_chain(orig, root); 18642c653d0eSAndrea Arcangeli if (!stable_node) { 18652c653d0eSAndrea Arcangeli free_stable_node(stable_node_dup); 18662c653d0eSAndrea Arcangeli return NULL; 18672c653d0eSAndrea Arcangeli } 18682c653d0eSAndrea Arcangeli } 18692c653d0eSAndrea Arcangeli stable_node_chain_add_dup(stable_node_dup, stable_node); 18702c653d0eSAndrea Arcangeli } 187108beca44SHugh Dickins 18722c653d0eSAndrea Arcangeli return stable_node_dup; 187331dbd01fSIzik Eidus } 187431dbd01fSIzik Eidus 187531dbd01fSIzik Eidus /* 18768dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 18778dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 187831dbd01fSIzik Eidus * 187931dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 188031dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 188131dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 188231dbd01fSIzik Eidus * 188331dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 188431dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 188531dbd01fSIzik Eidus * 188631dbd01fSIzik Eidus * This function does both searching and inserting, because they share 188731dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 188831dbd01fSIzik Eidus */ 18898dd3557aSHugh Dickins static 18908dd3557aSHugh Dickins struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 18918dd3557aSHugh Dickins struct page *page, 18928dd3557aSHugh Dickins struct page **tree_pagep) 189331dbd01fSIzik Eidus { 189490bd6fd3SPetr Holasek struct rb_node **new; 189590bd6fd3SPetr Holasek struct rb_root *root; 189631dbd01fSIzik Eidus struct rb_node *parent = NULL; 189790bd6fd3SPetr Holasek int nid; 189890bd6fd3SPetr Holasek 189990bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1900ef53d16cSHugh Dickins root = root_unstable_tree + nid; 190190bd6fd3SPetr Holasek new = &root->rb_node; 190231dbd01fSIzik Eidus 190331dbd01fSIzik Eidus while (*new) { 190431dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 19058dd3557aSHugh Dickins struct page *tree_page; 190631dbd01fSIzik Eidus int ret; 190731dbd01fSIzik Eidus 1908d178f27fSHugh Dickins cond_resched(); 190931dbd01fSIzik Eidus tree_rmap_item = rb_entry(*new, struct rmap_item, node); 19108dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 1911c8f95ed1SAndrea Arcangeli if (!tree_page) 191231dbd01fSIzik Eidus return NULL; 191331dbd01fSIzik Eidus 191431dbd01fSIzik Eidus /* 19158dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 191631dbd01fSIzik Eidus */ 19178dd3557aSHugh Dickins if (page == tree_page) { 19188dd3557aSHugh Dickins put_page(tree_page); 191931dbd01fSIzik Eidus return NULL; 192031dbd01fSIzik Eidus } 192131dbd01fSIzik Eidus 19228dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 192331dbd01fSIzik Eidus 192431dbd01fSIzik Eidus parent = *new; 192531dbd01fSIzik Eidus if (ret < 0) { 19268dd3557aSHugh Dickins put_page(tree_page); 192731dbd01fSIzik Eidus new = &parent->rb_left; 192831dbd01fSIzik Eidus } else if (ret > 0) { 19298dd3557aSHugh Dickins put_page(tree_page); 193031dbd01fSIzik Eidus new = &parent->rb_right; 1931b599cbdfSHugh Dickins } else if (!ksm_merge_across_nodes && 1932b599cbdfSHugh Dickins page_to_nid(tree_page) != nid) { 1933b599cbdfSHugh Dickins /* 1934b599cbdfSHugh Dickins * If tree_page has been migrated to another NUMA node, 1935b599cbdfSHugh Dickins * it will be flushed out and put in the right unstable 1936b599cbdfSHugh Dickins * tree next time: only merge with it when across_nodes. 1937b599cbdfSHugh Dickins */ 1938b599cbdfSHugh Dickins put_page(tree_page); 1939b599cbdfSHugh Dickins return NULL; 194031dbd01fSIzik Eidus } else { 19418dd3557aSHugh Dickins *tree_pagep = tree_page; 194231dbd01fSIzik Eidus return tree_rmap_item; 194331dbd01fSIzik Eidus } 194431dbd01fSIzik Eidus } 194531dbd01fSIzik Eidus 19467b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 194731dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1948e850dcf5SHugh Dickins DO_NUMA(rmap_item->nid = nid); 194931dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 195090bd6fd3SPetr Holasek rb_insert_color(&rmap_item->node, root); 195131dbd01fSIzik Eidus 1952473b0ce4SHugh Dickins ksm_pages_unshared++; 195331dbd01fSIzik Eidus return NULL; 195431dbd01fSIzik Eidus } 195531dbd01fSIzik Eidus 195631dbd01fSIzik Eidus /* 195731dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 195831dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 195931dbd01fSIzik Eidus * the same ksm page. 196031dbd01fSIzik Eidus */ 196131dbd01fSIzik Eidus static void stable_tree_append(struct rmap_item *rmap_item, 19622c653d0eSAndrea Arcangeli struct stable_node *stable_node, 19632c653d0eSAndrea Arcangeli bool max_page_sharing_bypass) 196431dbd01fSIzik Eidus { 19652c653d0eSAndrea Arcangeli /* 19662c653d0eSAndrea Arcangeli * rmap won't find this mapping if we don't insert the 19672c653d0eSAndrea Arcangeli * rmap_item in the right stable_node 19682c653d0eSAndrea Arcangeli * duplicate. page_migration could break later if rmap breaks, 19692c653d0eSAndrea Arcangeli * so we can as well crash here. We really need to check for 19702c653d0eSAndrea Arcangeli * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 19712c653d0eSAndrea Arcangeli * for other negative values as an undeflow if detected here 19722c653d0eSAndrea Arcangeli * for the first time (and not when decreasing rmap_hlist_len) 19732c653d0eSAndrea Arcangeli * would be sign of memory corruption in the stable_node. 19742c653d0eSAndrea Arcangeli */ 19752c653d0eSAndrea Arcangeli BUG_ON(stable_node->rmap_hlist_len < 0); 19762c653d0eSAndrea Arcangeli 19772c653d0eSAndrea Arcangeli stable_node->rmap_hlist_len++; 19782c653d0eSAndrea Arcangeli if (!max_page_sharing_bypass) 19792c653d0eSAndrea Arcangeli /* possibly non fatal but unexpected overflow, only warn */ 19802c653d0eSAndrea Arcangeli WARN_ON_ONCE(stable_node->rmap_hlist_len > 19812c653d0eSAndrea Arcangeli ksm_max_page_sharing); 19822c653d0eSAndrea Arcangeli 19837b6ba2c7SHugh Dickins rmap_item->head = stable_node; 198431dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 19857b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1986e178dfdeSHugh Dickins 19877b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 1988e178dfdeSHugh Dickins ksm_pages_sharing++; 19897b6ba2c7SHugh Dickins else 19907b6ba2c7SHugh Dickins ksm_pages_shared++; 199131dbd01fSIzik Eidus } 199231dbd01fSIzik Eidus 199331dbd01fSIzik Eidus /* 199481464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 199581464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 199681464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 199781464e30SHugh Dickins * both transferred to the stable tree. 199831dbd01fSIzik Eidus * 199931dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 200031dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 200131dbd01fSIzik Eidus */ 200231dbd01fSIzik Eidus static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 200331dbd01fSIzik Eidus { 20044b22927fSKirill Tkhai struct mm_struct *mm = rmap_item->mm; 200531dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 20068dd3557aSHugh Dickins struct page *tree_page = NULL; 20077b6ba2c7SHugh Dickins struct stable_node *stable_node; 20088dd3557aSHugh Dickins struct page *kpage; 200931dbd01fSIzik Eidus unsigned int checksum; 201031dbd01fSIzik Eidus int err; 20112c653d0eSAndrea Arcangeli bool max_page_sharing_bypass = false; 201231dbd01fSIzik Eidus 20134146d2d6SHugh Dickins stable_node = page_stable_node(page); 20144146d2d6SHugh Dickins if (stable_node) { 20154146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20162c653d0eSAndrea Arcangeli get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 20172c653d0eSAndrea Arcangeli NUMA(stable_node->nid)) { 20182c653d0eSAndrea Arcangeli stable_node_dup_del(stable_node); 20194146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 20204146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 20214146d2d6SHugh Dickins } 20224146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 20234146d2d6SHugh Dickins rmap_item->head == stable_node) 20244146d2d6SHugh Dickins return; 20252c653d0eSAndrea Arcangeli /* 20262c653d0eSAndrea Arcangeli * If it's a KSM fork, allow it to go over the sharing limit 20272c653d0eSAndrea Arcangeli * without warnings. 20282c653d0eSAndrea Arcangeli */ 20292c653d0eSAndrea Arcangeli if (!is_page_sharing_candidate(stable_node)) 20302c653d0eSAndrea Arcangeli max_page_sharing_bypass = true; 20314146d2d6SHugh Dickins } 203231dbd01fSIzik Eidus 203331dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 203462b61f61SHugh Dickins kpage = stable_tree_search(page); 20354146d2d6SHugh Dickins if (kpage == page && rmap_item->head == stable_node) { 20364146d2d6SHugh Dickins put_page(kpage); 20374146d2d6SHugh Dickins return; 20384146d2d6SHugh Dickins } 20394146d2d6SHugh Dickins 20404146d2d6SHugh Dickins remove_rmap_item_from_tree(rmap_item); 20414146d2d6SHugh Dickins 204262b61f61SHugh Dickins if (kpage) { 204308beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 204431dbd01fSIzik Eidus if (!err) { 204531dbd01fSIzik Eidus /* 204631dbd01fSIzik Eidus * The page was successfully merged: 204731dbd01fSIzik Eidus * add its rmap_item to the stable tree. 204831dbd01fSIzik Eidus */ 20495ad64688SHugh Dickins lock_page(kpage); 20502c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, page_stable_node(kpage), 20512c653d0eSAndrea Arcangeli max_page_sharing_bypass); 20525ad64688SHugh Dickins unlock_page(kpage); 205331dbd01fSIzik Eidus } 20548dd3557aSHugh Dickins put_page(kpage); 205531dbd01fSIzik Eidus return; 205631dbd01fSIzik Eidus } 205731dbd01fSIzik Eidus 205831dbd01fSIzik Eidus /* 20594035c07aSHugh Dickins * If the hash value of the page has changed from the last time 20604035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 20614035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 20624035c07aSHugh Dickins * to waste our time searching for something identical to it there. 206331dbd01fSIzik Eidus */ 206431dbd01fSIzik Eidus checksum = calc_checksum(page); 206531dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 206631dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 206731dbd01fSIzik Eidus return; 206831dbd01fSIzik Eidus } 206931dbd01fSIzik Eidus 2070e86c59b1SClaudio Imbrenda /* 2071e86c59b1SClaudio Imbrenda * Same checksum as an empty page. We attempt to merge it with the 2072e86c59b1SClaudio Imbrenda * appropriate zero page if the user enabled this via sysfs. 2073e86c59b1SClaudio Imbrenda */ 2074e86c59b1SClaudio Imbrenda if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2075e86c59b1SClaudio Imbrenda struct vm_area_struct *vma; 2076e86c59b1SClaudio Imbrenda 20774b22927fSKirill Tkhai down_read(&mm->mmap_sem); 20784b22927fSKirill Tkhai vma = find_mergeable_vma(mm, rmap_item->address); 2079e86c59b1SClaudio Imbrenda err = try_to_merge_one_page(vma, page, 2080e86c59b1SClaudio Imbrenda ZERO_PAGE(rmap_item->address)); 20814b22927fSKirill Tkhai up_read(&mm->mmap_sem); 2082e86c59b1SClaudio Imbrenda /* 2083e86c59b1SClaudio Imbrenda * In case of failure, the page was not really empty, so we 2084e86c59b1SClaudio Imbrenda * need to continue. Otherwise we're done. 2085e86c59b1SClaudio Imbrenda */ 2086e86c59b1SClaudio Imbrenda if (!err) 2087e86c59b1SClaudio Imbrenda return; 2088e86c59b1SClaudio Imbrenda } 20898dd3557aSHugh Dickins tree_rmap_item = 20908dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 209131dbd01fSIzik Eidus if (tree_rmap_item) { 209277da2ba0SClaudio Imbrenda bool split; 209377da2ba0SClaudio Imbrenda 20948dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 20958dd3557aSHugh Dickins tree_rmap_item, tree_page); 209677da2ba0SClaudio Imbrenda /* 209777da2ba0SClaudio Imbrenda * If both pages we tried to merge belong to the same compound 209877da2ba0SClaudio Imbrenda * page, then we actually ended up increasing the reference 209977da2ba0SClaudio Imbrenda * count of the same compound page twice, and split_huge_page 210077da2ba0SClaudio Imbrenda * failed. 210177da2ba0SClaudio Imbrenda * Here we set a flag if that happened, and we use it later to 210277da2ba0SClaudio Imbrenda * try split_huge_page again. Since we call put_page right 210377da2ba0SClaudio Imbrenda * afterwards, the reference count will be correct and 210477da2ba0SClaudio Imbrenda * split_huge_page should succeed. 210577da2ba0SClaudio Imbrenda */ 210677da2ba0SClaudio Imbrenda split = PageTransCompound(page) 210777da2ba0SClaudio Imbrenda && compound_head(page) == compound_head(tree_page); 21088dd3557aSHugh Dickins put_page(tree_page); 21098dd3557aSHugh Dickins if (kpage) { 2110bc56620bSHugh Dickins /* 2111bc56620bSHugh Dickins * The pages were successfully merged: insert new 2112bc56620bSHugh Dickins * node in the stable tree and add both rmap_items. 2113bc56620bSHugh Dickins */ 21145ad64688SHugh Dickins lock_page(kpage); 21157b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 21167b6ba2c7SHugh Dickins if (stable_node) { 21172c653d0eSAndrea Arcangeli stable_tree_append(tree_rmap_item, stable_node, 21182c653d0eSAndrea Arcangeli false); 21192c653d0eSAndrea Arcangeli stable_tree_append(rmap_item, stable_node, 21202c653d0eSAndrea Arcangeli false); 21217b6ba2c7SHugh Dickins } 21225ad64688SHugh Dickins unlock_page(kpage); 21237b6ba2c7SHugh Dickins 212431dbd01fSIzik Eidus /* 212531dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 212631dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 212731dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 212831dbd01fSIzik Eidus * in which case we need to break_cow on both. 212931dbd01fSIzik Eidus */ 21307b6ba2c7SHugh Dickins if (!stable_node) { 21318dd3557aSHugh Dickins break_cow(tree_rmap_item); 21328dd3557aSHugh Dickins break_cow(rmap_item); 213331dbd01fSIzik Eidus } 213477da2ba0SClaudio Imbrenda } else if (split) { 213577da2ba0SClaudio Imbrenda /* 213677da2ba0SClaudio Imbrenda * We are here if we tried to merge two pages and 213777da2ba0SClaudio Imbrenda * failed because they both belonged to the same 213877da2ba0SClaudio Imbrenda * compound page. We will split the page now, but no 213977da2ba0SClaudio Imbrenda * merging will take place. 214077da2ba0SClaudio Imbrenda * We do not want to add the cost of a full lock; if 214177da2ba0SClaudio Imbrenda * the page is locked, it is better to skip it and 214277da2ba0SClaudio Imbrenda * perhaps try again later. 214377da2ba0SClaudio Imbrenda */ 214477da2ba0SClaudio Imbrenda if (!trylock_page(page)) 214577da2ba0SClaudio Imbrenda return; 214677da2ba0SClaudio Imbrenda split_huge_page(page); 214777da2ba0SClaudio Imbrenda unlock_page(page); 214831dbd01fSIzik Eidus } 214931dbd01fSIzik Eidus } 215031dbd01fSIzik Eidus } 215131dbd01fSIzik Eidus 215231dbd01fSIzik Eidus static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 21536514d511SHugh Dickins struct rmap_item **rmap_list, 215431dbd01fSIzik Eidus unsigned long addr) 215531dbd01fSIzik Eidus { 215631dbd01fSIzik Eidus struct rmap_item *rmap_item; 215731dbd01fSIzik Eidus 21586514d511SHugh Dickins while (*rmap_list) { 21596514d511SHugh Dickins rmap_item = *rmap_list; 216093d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 216131dbd01fSIzik Eidus return rmap_item; 216231dbd01fSIzik Eidus if (rmap_item->address > addr) 216331dbd01fSIzik Eidus break; 21646514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 216531dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 216631dbd01fSIzik Eidus free_rmap_item(rmap_item); 216731dbd01fSIzik Eidus } 216831dbd01fSIzik Eidus 216931dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 217031dbd01fSIzik Eidus if (rmap_item) { 217131dbd01fSIzik Eidus /* It has already been zeroed */ 217231dbd01fSIzik Eidus rmap_item->mm = mm_slot->mm; 217331dbd01fSIzik Eidus rmap_item->address = addr; 21746514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 21756514d511SHugh Dickins *rmap_list = rmap_item; 217631dbd01fSIzik Eidus } 217731dbd01fSIzik Eidus return rmap_item; 217831dbd01fSIzik Eidus } 217931dbd01fSIzik Eidus 218031dbd01fSIzik Eidus static struct rmap_item *scan_get_next_rmap_item(struct page **page) 218131dbd01fSIzik Eidus { 218231dbd01fSIzik Eidus struct mm_struct *mm; 218331dbd01fSIzik Eidus struct mm_slot *slot; 218431dbd01fSIzik Eidus struct vm_area_struct *vma; 218531dbd01fSIzik Eidus struct rmap_item *rmap_item; 218690bd6fd3SPetr Holasek int nid; 218731dbd01fSIzik Eidus 218831dbd01fSIzik Eidus if (list_empty(&ksm_mm_head.mm_list)) 218931dbd01fSIzik Eidus return NULL; 219031dbd01fSIzik Eidus 219131dbd01fSIzik Eidus slot = ksm_scan.mm_slot; 219231dbd01fSIzik Eidus if (slot == &ksm_mm_head) { 21932919bfd0SHugh Dickins /* 21942919bfd0SHugh Dickins * A number of pages can hang around indefinitely on per-cpu 21952919bfd0SHugh Dickins * pagevecs, raised page count preventing write_protect_page 21962919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 21972919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 21982919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 21992919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 22002919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 22012919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 22022919bfd0SHugh Dickins */ 22032919bfd0SHugh Dickins lru_add_drain_all(); 22042919bfd0SHugh Dickins 22054146d2d6SHugh Dickins /* 22064146d2d6SHugh Dickins * Whereas stale stable_nodes on the stable_tree itself 22074146d2d6SHugh Dickins * get pruned in the regular course of stable_tree_search(), 22084146d2d6SHugh Dickins * those moved out to the migrate_nodes list can accumulate: 22094146d2d6SHugh Dickins * so prune them once before each full scan. 22104146d2d6SHugh Dickins */ 22114146d2d6SHugh Dickins if (!ksm_merge_across_nodes) { 221203640418SGeliang Tang struct stable_node *stable_node, *next; 22134146d2d6SHugh Dickins struct page *page; 22144146d2d6SHugh Dickins 221503640418SGeliang Tang list_for_each_entry_safe(stable_node, next, 221603640418SGeliang Tang &migrate_nodes, list) { 22174146d2d6SHugh Dickins page = get_ksm_page(stable_node, false); 22184146d2d6SHugh Dickins if (page) 22194146d2d6SHugh Dickins put_page(page); 22204146d2d6SHugh Dickins cond_resched(); 22214146d2d6SHugh Dickins } 22224146d2d6SHugh Dickins } 22234146d2d6SHugh Dickins 2224ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) 222590bd6fd3SPetr Holasek root_unstable_tree[nid] = RB_ROOT; 222631dbd01fSIzik Eidus 222731dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 222831dbd01fSIzik Eidus slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 222931dbd01fSIzik Eidus ksm_scan.mm_slot = slot; 223031dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 22312b472611SHugh Dickins /* 22322b472611SHugh Dickins * Although we tested list_empty() above, a racing __ksm_exit 22332b472611SHugh Dickins * of the last mm on the list may have removed it since then. 22342b472611SHugh Dickins */ 22352b472611SHugh Dickins if (slot == &ksm_mm_head) 22362b472611SHugh Dickins return NULL; 223731dbd01fSIzik Eidus next_mm: 223831dbd01fSIzik Eidus ksm_scan.address = 0; 22396514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 224031dbd01fSIzik Eidus } 224131dbd01fSIzik Eidus 224231dbd01fSIzik Eidus mm = slot->mm; 224331dbd01fSIzik Eidus down_read(&mm->mmap_sem); 22449ba69294SHugh Dickins if (ksm_test_exit(mm)) 22459ba69294SHugh Dickins vma = NULL; 22469ba69294SHugh Dickins else 22479ba69294SHugh Dickins vma = find_vma(mm, ksm_scan.address); 22489ba69294SHugh Dickins 22499ba69294SHugh Dickins for (; vma; vma = vma->vm_next) { 225031dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 225131dbd01fSIzik Eidus continue; 225231dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 225331dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 225431dbd01fSIzik Eidus if (!vma->anon_vma) 225531dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 225631dbd01fSIzik Eidus 225731dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 22589ba69294SHugh Dickins if (ksm_test_exit(mm)) 22599ba69294SHugh Dickins break; 226031dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 226121ae5b01SAndrea Arcangeli if (IS_ERR_OR_NULL(*page)) { 226221ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 226321ae5b01SAndrea Arcangeli cond_resched(); 226421ae5b01SAndrea Arcangeli continue; 226521ae5b01SAndrea Arcangeli } 2266f765f540SKirill A. Shutemov if (PageAnon(*page)) { 226731dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 226831dbd01fSIzik Eidus flush_dcache_page(*page); 226931dbd01fSIzik Eidus rmap_item = get_next_rmap_item(slot, 22706514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 227131dbd01fSIzik Eidus if (rmap_item) { 22726514d511SHugh Dickins ksm_scan.rmap_list = 22736514d511SHugh Dickins &rmap_item->rmap_list; 227431dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 227531dbd01fSIzik Eidus } else 227631dbd01fSIzik Eidus put_page(*page); 227731dbd01fSIzik Eidus up_read(&mm->mmap_sem); 227831dbd01fSIzik Eidus return rmap_item; 227931dbd01fSIzik Eidus } 228031dbd01fSIzik Eidus put_page(*page); 228131dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 228231dbd01fSIzik Eidus cond_resched(); 228331dbd01fSIzik Eidus } 228431dbd01fSIzik Eidus } 228531dbd01fSIzik Eidus 22869ba69294SHugh Dickins if (ksm_test_exit(mm)) { 22879ba69294SHugh Dickins ksm_scan.address = 0; 22886514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 22899ba69294SHugh Dickins } 229031dbd01fSIzik Eidus /* 229131dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 229231dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 229331dbd01fSIzik Eidus */ 22946514d511SHugh Dickins remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 229531dbd01fSIzik Eidus 229631dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 2297cd551f97SHugh Dickins ksm_scan.mm_slot = list_entry(slot->mm_list.next, 2298cd551f97SHugh Dickins struct mm_slot, mm_list); 2299cd551f97SHugh Dickins if (ksm_scan.address == 0) { 2300cd551f97SHugh Dickins /* 2301cd551f97SHugh Dickins * We've completed a full scan of all vmas, holding mmap_sem 2302cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 2303cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 23049ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 23059ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 23069ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 23079ba69294SHugh Dickins * mmap_sem then protects against race with MADV_MERGEABLE). 2308cd551f97SHugh Dickins */ 23094ca3a69bSSasha Levin hash_del(&slot->link); 2310cd551f97SHugh Dickins list_del(&slot->mm_list); 23119ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 23129ba69294SHugh Dickins 2313cd551f97SHugh Dickins free_mm_slot(slot); 2314cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 23159ba69294SHugh Dickins up_read(&mm->mmap_sem); 23169ba69294SHugh Dickins mmdrop(mm); 23179ba69294SHugh Dickins } else { 2318cd551f97SHugh Dickins up_read(&mm->mmap_sem); 23197496fea9SZhou Chengming /* 23207496fea9SZhou Chengming * up_read(&mm->mmap_sem) first because after 23217496fea9SZhou Chengming * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 23227496fea9SZhou Chengming * already have been freed under us by __ksm_exit() 23237496fea9SZhou Chengming * because the "mm_slot" is still hashed and 23247496fea9SZhou Chengming * ksm_scan.mm_slot doesn't point to it anymore. 23257496fea9SZhou Chengming */ 23267496fea9SZhou Chengming spin_unlock(&ksm_mmlist_lock); 23279ba69294SHugh Dickins } 232831dbd01fSIzik Eidus 232931dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 2330cd551f97SHugh Dickins slot = ksm_scan.mm_slot; 233131dbd01fSIzik Eidus if (slot != &ksm_mm_head) 233231dbd01fSIzik Eidus goto next_mm; 233331dbd01fSIzik Eidus 233431dbd01fSIzik Eidus ksm_scan.seqnr++; 233531dbd01fSIzik Eidus return NULL; 233631dbd01fSIzik Eidus } 233731dbd01fSIzik Eidus 233831dbd01fSIzik Eidus /** 233931dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 2340b7701a5fSMike Rapoport * @scan_npages: number of pages we want to scan before we return. 234131dbd01fSIzik Eidus */ 234231dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 234331dbd01fSIzik Eidus { 234431dbd01fSIzik Eidus struct rmap_item *rmap_item; 234522eccdd7SDan Carpenter struct page *uninitialized_var(page); 234631dbd01fSIzik Eidus 2347878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 234831dbd01fSIzik Eidus cond_resched(); 234931dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 235031dbd01fSIzik Eidus if (!rmap_item) 235131dbd01fSIzik Eidus return; 235231dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 235331dbd01fSIzik Eidus put_page(page); 235431dbd01fSIzik Eidus } 235531dbd01fSIzik Eidus } 235631dbd01fSIzik Eidus 23576e158384SHugh Dickins static int ksmd_should_run(void) 23586e158384SHugh Dickins { 23596e158384SHugh Dickins return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 23606e158384SHugh Dickins } 23616e158384SHugh Dickins 236231dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 236331dbd01fSIzik Eidus { 2364878aee7dSAndrea Arcangeli set_freezable(); 2365339aa624SIzik Eidus set_user_nice(current, 5); 236631dbd01fSIzik Eidus 236731dbd01fSIzik Eidus while (!kthread_should_stop()) { 236831dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2369ef4d43a8SHugh Dickins wait_while_offlining(); 23706e158384SHugh Dickins if (ksmd_should_run()) 237131dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 237231dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 23736e158384SHugh Dickins 2374878aee7dSAndrea Arcangeli try_to_freeze(); 2375878aee7dSAndrea Arcangeli 23766e158384SHugh Dickins if (ksmd_should_run()) { 237731dbd01fSIzik Eidus schedule_timeout_interruptible( 237831dbd01fSIzik Eidus msecs_to_jiffies(ksm_thread_sleep_millisecs)); 237931dbd01fSIzik Eidus } else { 2380878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 23816e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 238231dbd01fSIzik Eidus } 238331dbd01fSIzik Eidus } 238431dbd01fSIzik Eidus return 0; 238531dbd01fSIzik Eidus } 238631dbd01fSIzik Eidus 2387f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2388f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 2389f8af4da3SHugh Dickins { 2390f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 2391d952b791SHugh Dickins int err; 2392f8af4da3SHugh Dickins 2393f8af4da3SHugh Dickins switch (advice) { 2394f8af4da3SHugh Dickins case MADV_MERGEABLE: 2395f8af4da3SHugh Dickins /* 2396f8af4da3SHugh Dickins * Be somewhat over-protective for now! 2397f8af4da3SHugh Dickins */ 2398f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 2399f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 24000661a336SKirill A. Shutemov VM_HUGETLB | VM_MIXEDMAP)) 2401f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2402f8af4da3SHugh Dickins 2403cc2383ecSKonstantin Khlebnikov #ifdef VM_SAO 2404cc2383ecSKonstantin Khlebnikov if (*vm_flags & VM_SAO) 2405cc2383ecSKonstantin Khlebnikov return 0; 2406cc2383ecSKonstantin Khlebnikov #endif 240774a04967SKhalid Aziz #ifdef VM_SPARC_ADI 240874a04967SKhalid Aziz if (*vm_flags & VM_SPARC_ADI) 240974a04967SKhalid Aziz return 0; 241074a04967SKhalid Aziz #endif 2411cc2383ecSKonstantin Khlebnikov 2412d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2413d952b791SHugh Dickins err = __ksm_enter(mm); 2414d952b791SHugh Dickins if (err) 2415d952b791SHugh Dickins return err; 2416d952b791SHugh Dickins } 2417f8af4da3SHugh Dickins 2418f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 2419f8af4da3SHugh Dickins break; 2420f8af4da3SHugh Dickins 2421f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 2422f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 2423f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 2424f8af4da3SHugh Dickins 2425d952b791SHugh Dickins if (vma->anon_vma) { 2426d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 2427d952b791SHugh Dickins if (err) 2428d952b791SHugh Dickins return err; 2429d952b791SHugh Dickins } 2430f8af4da3SHugh Dickins 2431f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 2432f8af4da3SHugh Dickins break; 2433f8af4da3SHugh Dickins } 2434f8af4da3SHugh Dickins 2435f8af4da3SHugh Dickins return 0; 2436f8af4da3SHugh Dickins } 2437f8af4da3SHugh Dickins 2438f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 2439f8af4da3SHugh Dickins { 24406e158384SHugh Dickins struct mm_slot *mm_slot; 24416e158384SHugh Dickins int needs_wakeup; 24426e158384SHugh Dickins 24436e158384SHugh Dickins mm_slot = alloc_mm_slot(); 244431dbd01fSIzik Eidus if (!mm_slot) 244531dbd01fSIzik Eidus return -ENOMEM; 244631dbd01fSIzik Eidus 24476e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 24486e158384SHugh Dickins needs_wakeup = list_empty(&ksm_mm_head.mm_list); 24496e158384SHugh Dickins 245031dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 245131dbd01fSIzik Eidus insert_to_mm_slots_hash(mm, mm_slot); 245231dbd01fSIzik Eidus /* 2453cbf86cfeSHugh Dickins * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2454cbf86cfeSHugh Dickins * insert just behind the scanning cursor, to let the area settle 245531dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 245631dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 2457cbf86cfeSHugh Dickins * 2458cbf86cfeSHugh Dickins * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2459cbf86cfeSHugh Dickins * scanning cursor, otherwise KSM pages in newly forked mms will be 2460cbf86cfeSHugh Dickins * missed: then we might as well insert at the end of the list. 246131dbd01fSIzik Eidus */ 2462cbf86cfeSHugh Dickins if (ksm_run & KSM_RUN_UNMERGE) 2463cbf86cfeSHugh Dickins list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); 2464cbf86cfeSHugh Dickins else 246531dbd01fSIzik Eidus list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 246631dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 246731dbd01fSIzik Eidus 2468f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 2469f1f10076SVegard Nossum mmgrab(mm); 24706e158384SHugh Dickins 24716e158384SHugh Dickins if (needs_wakeup) 24726e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 24736e158384SHugh Dickins 2474f8af4da3SHugh Dickins return 0; 2475f8af4da3SHugh Dickins } 2476f8af4da3SHugh Dickins 24771c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 2478f8af4da3SHugh Dickins { 2479cd551f97SHugh Dickins struct mm_slot *mm_slot; 24809ba69294SHugh Dickins int easy_to_free = 0; 2481cd551f97SHugh Dickins 248231dbd01fSIzik Eidus /* 24839ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 24849ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 24859ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 24869ba69294SHugh Dickins * mmap_sem to synchronize with any break_cows before pagetables 24879ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 24889ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 248931dbd01fSIzik Eidus */ 24909ba69294SHugh Dickins 2491cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 2492cd551f97SHugh Dickins mm_slot = get_mm_slot(mm); 24939ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 24946514d511SHugh Dickins if (!mm_slot->rmap_list) { 24954ca3a69bSSasha Levin hash_del(&mm_slot->link); 2496cd551f97SHugh Dickins list_del(&mm_slot->mm_list); 24979ba69294SHugh Dickins easy_to_free = 1; 24989ba69294SHugh Dickins } else { 24999ba69294SHugh Dickins list_move(&mm_slot->mm_list, 25009ba69294SHugh Dickins &ksm_scan.mm_slot->mm_list); 25019ba69294SHugh Dickins } 25029ba69294SHugh Dickins } 2503cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 2504cd551f97SHugh Dickins 25059ba69294SHugh Dickins if (easy_to_free) { 2506cd551f97SHugh Dickins free_mm_slot(mm_slot); 2507cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 25089ba69294SHugh Dickins mmdrop(mm); 25099ba69294SHugh Dickins } else if (mm_slot) { 25109ba69294SHugh Dickins down_write(&mm->mmap_sem); 25119ba69294SHugh Dickins up_write(&mm->mmap_sem); 25129ba69294SHugh Dickins } 2513f8af4da3SHugh Dickins } 251431dbd01fSIzik Eidus 2515cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 25165ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 25175ad64688SHugh Dickins { 2518cbf86cfeSHugh Dickins struct anon_vma *anon_vma = page_anon_vma(page); 25195ad64688SHugh Dickins struct page *new_page; 25205ad64688SHugh Dickins 2521cbf86cfeSHugh Dickins if (PageKsm(page)) { 2522cbf86cfeSHugh Dickins if (page_stable_node(page) && 2523cbf86cfeSHugh Dickins !(ksm_run & KSM_RUN_UNMERGE)) 2524cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2525cbf86cfeSHugh Dickins } else if (!anon_vma) { 2526cbf86cfeSHugh Dickins return page; /* no need to copy it */ 2527cbf86cfeSHugh Dickins } else if (anon_vma->root == vma->anon_vma->root && 2528cbf86cfeSHugh Dickins page->index == linear_page_index(vma, address)) { 2529cbf86cfeSHugh Dickins return page; /* still no need to copy it */ 2530cbf86cfeSHugh Dickins } 2531cbf86cfeSHugh Dickins if (!PageUptodate(page)) 2532cbf86cfeSHugh Dickins return page; /* let do_swap_page report the error */ 2533cbf86cfeSHugh Dickins 25345ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 25355ad64688SHugh Dickins if (new_page) { 25365ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 25375ad64688SHugh Dickins 25385ad64688SHugh Dickins SetPageDirty(new_page); 25395ad64688SHugh Dickins __SetPageUptodate(new_page); 254048c935adSKirill A. Shutemov __SetPageLocked(new_page); 25415ad64688SHugh Dickins } 25425ad64688SHugh Dickins 25435ad64688SHugh Dickins return new_page; 25445ad64688SHugh Dickins } 25455ad64688SHugh Dickins 25461df631aeSMinchan Kim void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 2547e9995ef9SHugh Dickins { 2548e9995ef9SHugh Dickins struct stable_node *stable_node; 2549e9995ef9SHugh Dickins struct rmap_item *rmap_item; 2550e9995ef9SHugh Dickins int search_new_forks = 0; 2551e9995ef9SHugh Dickins 2552309381feSSasha Levin VM_BUG_ON_PAGE(!PageKsm(page), page); 25539f32624bSJoonsoo Kim 25549f32624bSJoonsoo Kim /* 25559f32624bSJoonsoo Kim * Rely on the page lock to protect against concurrent modifications 25569f32624bSJoonsoo Kim * to that page's node of the stable tree. 25579f32624bSJoonsoo Kim */ 2558309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 2559e9995ef9SHugh Dickins 2560e9995ef9SHugh Dickins stable_node = page_stable_node(page); 2561e9995ef9SHugh Dickins if (!stable_node) 25621df631aeSMinchan Kim return; 2563e9995ef9SHugh Dickins again: 2564b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2565e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 25665beb4930SRik van Riel struct anon_vma_chain *vmac; 2567e9995ef9SHugh Dickins struct vm_area_struct *vma; 2568e9995ef9SHugh Dickins 2569ad12695fSAndrea Arcangeli cond_resched(); 2570b6b19f25SHugh Dickins anon_vma_lock_read(anon_vma); 2571bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2572bf181b9fSMichel Lespinasse 0, ULONG_MAX) { 2573ad12695fSAndrea Arcangeli cond_resched(); 25745beb4930SRik van Riel vma = vmac->vma; 2575e9995ef9SHugh Dickins if (rmap_item->address < vma->vm_start || 2576e9995ef9SHugh Dickins rmap_item->address >= vma->vm_end) 2577e9995ef9SHugh Dickins continue; 2578e9995ef9SHugh Dickins /* 2579e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 2580e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 2581e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 2582e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 2583e9995ef9SHugh Dickins */ 2584e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2585e9995ef9SHugh Dickins continue; 2586e9995ef9SHugh Dickins 25870dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 25880dd1c7bbSJoonsoo Kim continue; 25890dd1c7bbSJoonsoo Kim 2590e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, 25911df631aeSMinchan Kim rmap_item->address, rwc->arg)) { 2592b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 25931df631aeSMinchan Kim return; 2594e9995ef9SHugh Dickins } 25950dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) { 25960dd1c7bbSJoonsoo Kim anon_vma_unlock_read(anon_vma); 25971df631aeSMinchan Kim return; 25980dd1c7bbSJoonsoo Kim } 2599e9995ef9SHugh Dickins } 2600b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 2601e9995ef9SHugh Dickins } 2602e9995ef9SHugh Dickins if (!search_new_forks++) 2603e9995ef9SHugh Dickins goto again; 2604e9995ef9SHugh Dickins } 2605e9995ef9SHugh Dickins 260652629506SJoonsoo Kim #ifdef CONFIG_MIGRATION 2607e9995ef9SHugh Dickins void ksm_migrate_page(struct page *newpage, struct page *oldpage) 2608e9995ef9SHugh Dickins { 2609e9995ef9SHugh Dickins struct stable_node *stable_node; 2610e9995ef9SHugh Dickins 2611309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 2612309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 2613309381feSSasha Levin VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 2614e9995ef9SHugh Dickins 2615e9995ef9SHugh Dickins stable_node = page_stable_node(newpage); 2616e9995ef9SHugh Dickins if (stable_node) { 2617309381feSSasha Levin VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 261862b61f61SHugh Dickins stable_node->kpfn = page_to_pfn(newpage); 2619c8d6553bSHugh Dickins /* 2620c8d6553bSHugh Dickins * newpage->mapping was set in advance; now we need smp_wmb() 2621c8d6553bSHugh Dickins * to make sure that the new stable_node->kpfn is visible 2622c8d6553bSHugh Dickins * to get_ksm_page() before it can see that oldpage->mapping 2623c8d6553bSHugh Dickins * has gone stale (or that PageSwapCache has been cleared). 2624c8d6553bSHugh Dickins */ 2625c8d6553bSHugh Dickins smp_wmb(); 2626c8d6553bSHugh Dickins set_page_stable_node(oldpage, NULL); 2627e9995ef9SHugh Dickins } 2628e9995ef9SHugh Dickins } 2629e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 2630e9995ef9SHugh Dickins 263162b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 2632ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2633ef4d43a8SHugh Dickins { 2634ef4d43a8SHugh Dickins while (ksm_run & KSM_RUN_OFFLINE) { 2635ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2636ef4d43a8SHugh Dickins wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 263774316201SNeilBrown TASK_UNINTERRUPTIBLE); 2638ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2639ef4d43a8SHugh Dickins } 2640ef4d43a8SHugh Dickins } 2641ef4d43a8SHugh Dickins 26422c653d0eSAndrea Arcangeli static bool stable_node_dup_remove_range(struct stable_node *stable_node, 26432c653d0eSAndrea Arcangeli unsigned long start_pfn, 26442c653d0eSAndrea Arcangeli unsigned long end_pfn) 26452c653d0eSAndrea Arcangeli { 26462c653d0eSAndrea Arcangeli if (stable_node->kpfn >= start_pfn && 26472c653d0eSAndrea Arcangeli stable_node->kpfn < end_pfn) { 26482c653d0eSAndrea Arcangeli /* 26492c653d0eSAndrea Arcangeli * Don't get_ksm_page, page has already gone: 26502c653d0eSAndrea Arcangeli * which is why we keep kpfn instead of page* 26512c653d0eSAndrea Arcangeli */ 26522c653d0eSAndrea Arcangeli remove_node_from_stable_tree(stable_node); 26532c653d0eSAndrea Arcangeli return true; 26542c653d0eSAndrea Arcangeli } 26552c653d0eSAndrea Arcangeli return false; 26562c653d0eSAndrea Arcangeli } 26572c653d0eSAndrea Arcangeli 26582c653d0eSAndrea Arcangeli static bool stable_node_chain_remove_range(struct stable_node *stable_node, 26592c653d0eSAndrea Arcangeli unsigned long start_pfn, 26602c653d0eSAndrea Arcangeli unsigned long end_pfn, 26612c653d0eSAndrea Arcangeli struct rb_root *root) 26622c653d0eSAndrea Arcangeli { 26632c653d0eSAndrea Arcangeli struct stable_node *dup; 26642c653d0eSAndrea Arcangeli struct hlist_node *hlist_safe; 26652c653d0eSAndrea Arcangeli 26662c653d0eSAndrea Arcangeli if (!is_stable_node_chain(stable_node)) { 26672c653d0eSAndrea Arcangeli VM_BUG_ON(is_stable_node_dup(stable_node)); 26682c653d0eSAndrea Arcangeli return stable_node_dup_remove_range(stable_node, start_pfn, 26692c653d0eSAndrea Arcangeli end_pfn); 26702c653d0eSAndrea Arcangeli } 26712c653d0eSAndrea Arcangeli 26722c653d0eSAndrea Arcangeli hlist_for_each_entry_safe(dup, hlist_safe, 26732c653d0eSAndrea Arcangeli &stable_node->hlist, hlist_dup) { 26742c653d0eSAndrea Arcangeli VM_BUG_ON(!is_stable_node_dup(dup)); 26752c653d0eSAndrea Arcangeli stable_node_dup_remove_range(dup, start_pfn, end_pfn); 26762c653d0eSAndrea Arcangeli } 26772c653d0eSAndrea Arcangeli if (hlist_empty(&stable_node->hlist)) { 26782c653d0eSAndrea Arcangeli free_stable_node_chain(stable_node, root); 26792c653d0eSAndrea Arcangeli return true; /* notify caller that tree was rebalanced */ 26802c653d0eSAndrea Arcangeli } else 26812c653d0eSAndrea Arcangeli return false; 26822c653d0eSAndrea Arcangeli } 26832c653d0eSAndrea Arcangeli 2684ee0ea59cSHugh Dickins static void ksm_check_stable_tree(unsigned long start_pfn, 268562b61f61SHugh Dickins unsigned long end_pfn) 268662b61f61SHugh Dickins { 268703640418SGeliang Tang struct stable_node *stable_node, *next; 268862b61f61SHugh Dickins struct rb_node *node; 268990bd6fd3SPetr Holasek int nid; 269062b61f61SHugh Dickins 2691ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 2692ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 2693ee0ea59cSHugh Dickins while (node) { 269462b61f61SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 26952c653d0eSAndrea Arcangeli if (stable_node_chain_remove_range(stable_node, 26962c653d0eSAndrea Arcangeli start_pfn, end_pfn, 26972c653d0eSAndrea Arcangeli root_stable_tree + 26982c653d0eSAndrea Arcangeli nid)) 2699ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 27002c653d0eSAndrea Arcangeli else 2701ee0ea59cSHugh Dickins node = rb_next(node); 2702ee0ea59cSHugh Dickins cond_resched(); 270362b61f61SHugh Dickins } 2704ee0ea59cSHugh Dickins } 270503640418SGeliang Tang list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 27064146d2d6SHugh Dickins if (stable_node->kpfn >= start_pfn && 27074146d2d6SHugh Dickins stable_node->kpfn < end_pfn) 27084146d2d6SHugh Dickins remove_node_from_stable_tree(stable_node); 27094146d2d6SHugh Dickins cond_resched(); 27104146d2d6SHugh Dickins } 271162b61f61SHugh Dickins } 271262b61f61SHugh Dickins 271362b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 271462b61f61SHugh Dickins unsigned long action, void *arg) 271562b61f61SHugh Dickins { 271662b61f61SHugh Dickins struct memory_notify *mn = arg; 271762b61f61SHugh Dickins 271862b61f61SHugh Dickins switch (action) { 271962b61f61SHugh Dickins case MEM_GOING_OFFLINE: 272062b61f61SHugh Dickins /* 2721ef4d43a8SHugh Dickins * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 2722ef4d43a8SHugh Dickins * and remove_all_stable_nodes() while memory is going offline: 2723ef4d43a8SHugh Dickins * it is unsafe for them to touch the stable tree at this time. 2724ef4d43a8SHugh Dickins * But unmerge_ksm_pages(), rmap lookups and other entry points 2725ef4d43a8SHugh Dickins * which do not need the ksm_thread_mutex are all safe. 272662b61f61SHugh Dickins */ 2727ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2728ef4d43a8SHugh Dickins ksm_run |= KSM_RUN_OFFLINE; 2729ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 273062b61f61SHugh Dickins break; 273162b61f61SHugh Dickins 273262b61f61SHugh Dickins case MEM_OFFLINE: 273362b61f61SHugh Dickins /* 273462b61f61SHugh Dickins * Most of the work is done by page migration; but there might 273562b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 2736ee0ea59cSHugh Dickins * pages which have been offlined: prune those from the tree, 2737ee0ea59cSHugh Dickins * otherwise get_ksm_page() might later try to access a 2738ee0ea59cSHugh Dickins * non-existent struct page. 273962b61f61SHugh Dickins */ 2740ee0ea59cSHugh Dickins ksm_check_stable_tree(mn->start_pfn, 2741ee0ea59cSHugh Dickins mn->start_pfn + mn->nr_pages); 274262b61f61SHugh Dickins /* fallthrough */ 274362b61f61SHugh Dickins 274462b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 2745ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2746ef4d43a8SHugh Dickins ksm_run &= ~KSM_RUN_OFFLINE; 274762b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2748ef4d43a8SHugh Dickins 2749ef4d43a8SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 2750ef4d43a8SHugh Dickins wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 275162b61f61SHugh Dickins break; 275262b61f61SHugh Dickins } 275362b61f61SHugh Dickins return NOTIFY_OK; 275462b61f61SHugh Dickins } 2755ef4d43a8SHugh Dickins #else 2756ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2757ef4d43a8SHugh Dickins { 2758ef4d43a8SHugh Dickins } 275962b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 276062b61f61SHugh Dickins 27612ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 27622ffd8679SHugh Dickins /* 27632ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 27642ffd8679SHugh Dickins */ 27652ffd8679SHugh Dickins 276631dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 276731dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 276831dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 276931dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = \ 277031dbd01fSIzik Eidus __ATTR(_name, 0644, _name##_show, _name##_store) 277131dbd01fSIzik Eidus 277231dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 277331dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 277431dbd01fSIzik Eidus { 277531dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 277631dbd01fSIzik Eidus } 277731dbd01fSIzik Eidus 277831dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 277931dbd01fSIzik Eidus struct kobj_attribute *attr, 278031dbd01fSIzik Eidus const char *buf, size_t count) 278131dbd01fSIzik Eidus { 278231dbd01fSIzik Eidus unsigned long msecs; 278331dbd01fSIzik Eidus int err; 278431dbd01fSIzik Eidus 27853dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 278631dbd01fSIzik Eidus if (err || msecs > UINT_MAX) 278731dbd01fSIzik Eidus return -EINVAL; 278831dbd01fSIzik Eidus 278931dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 279031dbd01fSIzik Eidus 279131dbd01fSIzik Eidus return count; 279231dbd01fSIzik Eidus } 279331dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 279431dbd01fSIzik Eidus 279531dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 279631dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 279731dbd01fSIzik Eidus { 279831dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 279931dbd01fSIzik Eidus } 280031dbd01fSIzik Eidus 280131dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 280231dbd01fSIzik Eidus struct kobj_attribute *attr, 280331dbd01fSIzik Eidus const char *buf, size_t count) 280431dbd01fSIzik Eidus { 280531dbd01fSIzik Eidus int err; 280631dbd01fSIzik Eidus unsigned long nr_pages; 280731dbd01fSIzik Eidus 28083dbb95f7SJingoo Han err = kstrtoul(buf, 10, &nr_pages); 280931dbd01fSIzik Eidus if (err || nr_pages > UINT_MAX) 281031dbd01fSIzik Eidus return -EINVAL; 281131dbd01fSIzik Eidus 281231dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 281331dbd01fSIzik Eidus 281431dbd01fSIzik Eidus return count; 281531dbd01fSIzik Eidus } 281631dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 281731dbd01fSIzik Eidus 281831dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 281931dbd01fSIzik Eidus char *buf) 282031dbd01fSIzik Eidus { 2821ef4d43a8SHugh Dickins return sprintf(buf, "%lu\n", ksm_run); 282231dbd01fSIzik Eidus } 282331dbd01fSIzik Eidus 282431dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 282531dbd01fSIzik Eidus const char *buf, size_t count) 282631dbd01fSIzik Eidus { 282731dbd01fSIzik Eidus int err; 282831dbd01fSIzik Eidus unsigned long flags; 282931dbd01fSIzik Eidus 28303dbb95f7SJingoo Han err = kstrtoul(buf, 10, &flags); 283131dbd01fSIzik Eidus if (err || flags > UINT_MAX) 283231dbd01fSIzik Eidus return -EINVAL; 283331dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 283431dbd01fSIzik Eidus return -EINVAL; 283531dbd01fSIzik Eidus 283631dbd01fSIzik Eidus /* 283731dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 283831dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 2839d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 2840d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 284131dbd01fSIzik Eidus */ 284231dbd01fSIzik Eidus 284331dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2844ef4d43a8SHugh Dickins wait_while_offlining(); 284531dbd01fSIzik Eidus if (ksm_run != flags) { 284631dbd01fSIzik Eidus ksm_run = flags; 2847d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 2848e1e12d2fSDavid Rientjes set_current_oom_origin(); 2849d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 2850e1e12d2fSDavid Rientjes clear_current_oom_origin(); 2851d952b791SHugh Dickins if (err) { 2852d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 2853d952b791SHugh Dickins count = err; 2854d952b791SHugh Dickins } 2855d952b791SHugh Dickins } 285631dbd01fSIzik Eidus } 285731dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 285831dbd01fSIzik Eidus 285931dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 286031dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 286131dbd01fSIzik Eidus 286231dbd01fSIzik Eidus return count; 286331dbd01fSIzik Eidus } 286431dbd01fSIzik Eidus KSM_ATTR(run); 286531dbd01fSIzik Eidus 286690bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 286790bd6fd3SPetr Holasek static ssize_t merge_across_nodes_show(struct kobject *kobj, 286890bd6fd3SPetr Holasek struct kobj_attribute *attr, char *buf) 286990bd6fd3SPetr Holasek { 287090bd6fd3SPetr Holasek return sprintf(buf, "%u\n", ksm_merge_across_nodes); 287190bd6fd3SPetr Holasek } 287290bd6fd3SPetr Holasek 287390bd6fd3SPetr Holasek static ssize_t merge_across_nodes_store(struct kobject *kobj, 287490bd6fd3SPetr Holasek struct kobj_attribute *attr, 287590bd6fd3SPetr Holasek const char *buf, size_t count) 287690bd6fd3SPetr Holasek { 287790bd6fd3SPetr Holasek int err; 287890bd6fd3SPetr Holasek unsigned long knob; 287990bd6fd3SPetr Holasek 288090bd6fd3SPetr Holasek err = kstrtoul(buf, 10, &knob); 288190bd6fd3SPetr Holasek if (err) 288290bd6fd3SPetr Holasek return err; 288390bd6fd3SPetr Holasek if (knob > 1) 288490bd6fd3SPetr Holasek return -EINVAL; 288590bd6fd3SPetr Holasek 288690bd6fd3SPetr Holasek mutex_lock(&ksm_thread_mutex); 2887ef4d43a8SHugh Dickins wait_while_offlining(); 288890bd6fd3SPetr Holasek if (ksm_merge_across_nodes != knob) { 2889cbf86cfeSHugh Dickins if (ksm_pages_shared || remove_all_stable_nodes()) 289090bd6fd3SPetr Holasek err = -EBUSY; 2891ef53d16cSHugh Dickins else if (root_stable_tree == one_stable_tree) { 2892ef53d16cSHugh Dickins struct rb_root *buf; 2893ef53d16cSHugh Dickins /* 2894ef53d16cSHugh Dickins * This is the first time that we switch away from the 2895ef53d16cSHugh Dickins * default of merging across nodes: must now allocate 2896ef53d16cSHugh Dickins * a buffer to hold as many roots as may be needed. 2897ef53d16cSHugh Dickins * Allocate stable and unstable together: 2898ef53d16cSHugh Dickins * MAXSMP NODES_SHIFT 10 will use 16kB. 2899ef53d16cSHugh Dickins */ 2900bafe1e14SJoe Perches buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 2901bafe1e14SJoe Perches GFP_KERNEL); 2902ef53d16cSHugh Dickins /* Let us assume that RB_ROOT is NULL is zero */ 2903ef53d16cSHugh Dickins if (!buf) 2904ef53d16cSHugh Dickins err = -ENOMEM; 2905ef53d16cSHugh Dickins else { 2906ef53d16cSHugh Dickins root_stable_tree = buf; 2907ef53d16cSHugh Dickins root_unstable_tree = buf + nr_node_ids; 2908ef53d16cSHugh Dickins /* Stable tree is empty but not the unstable */ 2909ef53d16cSHugh Dickins root_unstable_tree[0] = one_unstable_tree[0]; 2910ef53d16cSHugh Dickins } 2911ef53d16cSHugh Dickins } 2912ef53d16cSHugh Dickins if (!err) { 291390bd6fd3SPetr Holasek ksm_merge_across_nodes = knob; 2914ef53d16cSHugh Dickins ksm_nr_node_ids = knob ? 1 : nr_node_ids; 2915ef53d16cSHugh Dickins } 291690bd6fd3SPetr Holasek } 291790bd6fd3SPetr Holasek mutex_unlock(&ksm_thread_mutex); 291890bd6fd3SPetr Holasek 291990bd6fd3SPetr Holasek return err ? err : count; 292090bd6fd3SPetr Holasek } 292190bd6fd3SPetr Holasek KSM_ATTR(merge_across_nodes); 292290bd6fd3SPetr Holasek #endif 292390bd6fd3SPetr Holasek 2924e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_show(struct kobject *kobj, 2925e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, char *buf) 2926e86c59b1SClaudio Imbrenda { 2927e86c59b1SClaudio Imbrenda return sprintf(buf, "%u\n", ksm_use_zero_pages); 2928e86c59b1SClaudio Imbrenda } 2929e86c59b1SClaudio Imbrenda static ssize_t use_zero_pages_store(struct kobject *kobj, 2930e86c59b1SClaudio Imbrenda struct kobj_attribute *attr, 2931e86c59b1SClaudio Imbrenda const char *buf, size_t count) 2932e86c59b1SClaudio Imbrenda { 2933e86c59b1SClaudio Imbrenda int err; 2934e86c59b1SClaudio Imbrenda bool value; 2935e86c59b1SClaudio Imbrenda 2936e86c59b1SClaudio Imbrenda err = kstrtobool(buf, &value); 2937e86c59b1SClaudio Imbrenda if (err) 2938e86c59b1SClaudio Imbrenda return -EINVAL; 2939e86c59b1SClaudio Imbrenda 2940e86c59b1SClaudio Imbrenda ksm_use_zero_pages = value; 2941e86c59b1SClaudio Imbrenda 2942e86c59b1SClaudio Imbrenda return count; 2943e86c59b1SClaudio Imbrenda } 2944e86c59b1SClaudio Imbrenda KSM_ATTR(use_zero_pages); 2945e86c59b1SClaudio Imbrenda 29462c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_show(struct kobject *kobj, 29472c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 29482c653d0eSAndrea Arcangeli { 29492c653d0eSAndrea Arcangeli return sprintf(buf, "%u\n", ksm_max_page_sharing); 29502c653d0eSAndrea Arcangeli } 29512c653d0eSAndrea Arcangeli 29522c653d0eSAndrea Arcangeli static ssize_t max_page_sharing_store(struct kobject *kobj, 29532c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 29542c653d0eSAndrea Arcangeli const char *buf, size_t count) 29552c653d0eSAndrea Arcangeli { 29562c653d0eSAndrea Arcangeli int err; 29572c653d0eSAndrea Arcangeli int knob; 29582c653d0eSAndrea Arcangeli 29592c653d0eSAndrea Arcangeli err = kstrtoint(buf, 10, &knob); 29602c653d0eSAndrea Arcangeli if (err) 29612c653d0eSAndrea Arcangeli return err; 29622c653d0eSAndrea Arcangeli /* 29632c653d0eSAndrea Arcangeli * When a KSM page is created it is shared by 2 mappings. This 29642c653d0eSAndrea Arcangeli * being a signed comparison, it implicitly verifies it's not 29652c653d0eSAndrea Arcangeli * negative. 29662c653d0eSAndrea Arcangeli */ 29672c653d0eSAndrea Arcangeli if (knob < 2) 29682c653d0eSAndrea Arcangeli return -EINVAL; 29692c653d0eSAndrea Arcangeli 29702c653d0eSAndrea Arcangeli if (READ_ONCE(ksm_max_page_sharing) == knob) 29712c653d0eSAndrea Arcangeli return count; 29722c653d0eSAndrea Arcangeli 29732c653d0eSAndrea Arcangeli mutex_lock(&ksm_thread_mutex); 29742c653d0eSAndrea Arcangeli wait_while_offlining(); 29752c653d0eSAndrea Arcangeli if (ksm_max_page_sharing != knob) { 29762c653d0eSAndrea Arcangeli if (ksm_pages_shared || remove_all_stable_nodes()) 29772c653d0eSAndrea Arcangeli err = -EBUSY; 29782c653d0eSAndrea Arcangeli else 29792c653d0eSAndrea Arcangeli ksm_max_page_sharing = knob; 29802c653d0eSAndrea Arcangeli } 29812c653d0eSAndrea Arcangeli mutex_unlock(&ksm_thread_mutex); 29822c653d0eSAndrea Arcangeli 29832c653d0eSAndrea Arcangeli return err ? err : count; 29842c653d0eSAndrea Arcangeli } 29852c653d0eSAndrea Arcangeli KSM_ATTR(max_page_sharing); 29862c653d0eSAndrea Arcangeli 2987b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 2988b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 2989b4028260SHugh Dickins { 2990b4028260SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_shared); 2991b4028260SHugh Dickins } 2992b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 2993b4028260SHugh Dickins 2994b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 2995b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 2996b4028260SHugh Dickins { 2997e178dfdeSHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_sharing); 2998b4028260SHugh Dickins } 2999b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 3000b4028260SHugh Dickins 3001473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 3002473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3003473b0ce4SHugh Dickins { 3004473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_unshared); 3005473b0ce4SHugh Dickins } 3006473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 3007473b0ce4SHugh Dickins 3008473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 3009473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3010473b0ce4SHugh Dickins { 3011473b0ce4SHugh Dickins long ksm_pages_volatile; 3012473b0ce4SHugh Dickins 3013473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3014473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 3015473b0ce4SHugh Dickins /* 3016473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 3017473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 3018473b0ce4SHugh Dickins */ 3019473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 3020473b0ce4SHugh Dickins ksm_pages_volatile = 0; 3021473b0ce4SHugh Dickins return sprintf(buf, "%ld\n", ksm_pages_volatile); 3022473b0ce4SHugh Dickins } 3023473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 3024473b0ce4SHugh Dickins 30252c653d0eSAndrea Arcangeli static ssize_t stable_node_dups_show(struct kobject *kobj, 30262c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30272c653d0eSAndrea Arcangeli { 30282c653d0eSAndrea Arcangeli return sprintf(buf, "%lu\n", ksm_stable_node_dups); 30292c653d0eSAndrea Arcangeli } 30302c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_dups); 30312c653d0eSAndrea Arcangeli 30322c653d0eSAndrea Arcangeli static ssize_t stable_node_chains_show(struct kobject *kobj, 30332c653d0eSAndrea Arcangeli struct kobj_attribute *attr, char *buf) 30342c653d0eSAndrea Arcangeli { 30352c653d0eSAndrea Arcangeli return sprintf(buf, "%lu\n", ksm_stable_node_chains); 30362c653d0eSAndrea Arcangeli } 30372c653d0eSAndrea Arcangeli KSM_ATTR_RO(stable_node_chains); 30382c653d0eSAndrea Arcangeli 30392c653d0eSAndrea Arcangeli static ssize_t 30402c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_show(struct kobject *kobj, 30412c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 30422c653d0eSAndrea Arcangeli char *buf) 30432c653d0eSAndrea Arcangeli { 30442c653d0eSAndrea Arcangeli return sprintf(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 30452c653d0eSAndrea Arcangeli } 30462c653d0eSAndrea Arcangeli 30472c653d0eSAndrea Arcangeli static ssize_t 30482c653d0eSAndrea Arcangeli stable_node_chains_prune_millisecs_store(struct kobject *kobj, 30492c653d0eSAndrea Arcangeli struct kobj_attribute *attr, 30502c653d0eSAndrea Arcangeli const char *buf, size_t count) 30512c653d0eSAndrea Arcangeli { 30522c653d0eSAndrea Arcangeli unsigned long msecs; 30532c653d0eSAndrea Arcangeli int err; 30542c653d0eSAndrea Arcangeli 30552c653d0eSAndrea Arcangeli err = kstrtoul(buf, 10, &msecs); 30562c653d0eSAndrea Arcangeli if (err || msecs > UINT_MAX) 30572c653d0eSAndrea Arcangeli return -EINVAL; 30582c653d0eSAndrea Arcangeli 30592c653d0eSAndrea Arcangeli ksm_stable_node_chains_prune_millisecs = msecs; 30602c653d0eSAndrea Arcangeli 30612c653d0eSAndrea Arcangeli return count; 30622c653d0eSAndrea Arcangeli } 30632c653d0eSAndrea Arcangeli KSM_ATTR(stable_node_chains_prune_millisecs); 30642c653d0eSAndrea Arcangeli 3065473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 3066473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 3067473b0ce4SHugh Dickins { 3068473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_scan.seqnr); 3069473b0ce4SHugh Dickins } 3070473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 3071473b0ce4SHugh Dickins 307231dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 307331dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 307431dbd01fSIzik Eidus &pages_to_scan_attr.attr, 307531dbd01fSIzik Eidus &run_attr.attr, 3076b4028260SHugh Dickins &pages_shared_attr.attr, 3077b4028260SHugh Dickins &pages_sharing_attr.attr, 3078473b0ce4SHugh Dickins &pages_unshared_attr.attr, 3079473b0ce4SHugh Dickins &pages_volatile_attr.attr, 3080473b0ce4SHugh Dickins &full_scans_attr.attr, 308190bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 308290bd6fd3SPetr Holasek &merge_across_nodes_attr.attr, 308390bd6fd3SPetr Holasek #endif 30842c653d0eSAndrea Arcangeli &max_page_sharing_attr.attr, 30852c653d0eSAndrea Arcangeli &stable_node_chains_attr.attr, 30862c653d0eSAndrea Arcangeli &stable_node_dups_attr.attr, 30872c653d0eSAndrea Arcangeli &stable_node_chains_prune_millisecs_attr.attr, 3088e86c59b1SClaudio Imbrenda &use_zero_pages_attr.attr, 308931dbd01fSIzik Eidus NULL, 309031dbd01fSIzik Eidus }; 309131dbd01fSIzik Eidus 3092f907c26aSArvind Yadav static const struct attribute_group ksm_attr_group = { 309331dbd01fSIzik Eidus .attrs = ksm_attrs, 309431dbd01fSIzik Eidus .name = "ksm", 309531dbd01fSIzik Eidus }; 30962ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 309731dbd01fSIzik Eidus 309831dbd01fSIzik Eidus static int __init ksm_init(void) 309931dbd01fSIzik Eidus { 310031dbd01fSIzik Eidus struct task_struct *ksm_thread; 310131dbd01fSIzik Eidus int err; 310231dbd01fSIzik Eidus 3103e86c59b1SClaudio Imbrenda /* The correct value depends on page size and endianness */ 3104e86c59b1SClaudio Imbrenda zero_checksum = calc_checksum(ZERO_PAGE(0)); 3105e86c59b1SClaudio Imbrenda /* Default to false for backwards compatibility */ 3106e86c59b1SClaudio Imbrenda ksm_use_zero_pages = false; 3107e86c59b1SClaudio Imbrenda 310831dbd01fSIzik Eidus err = ksm_slab_init(); 310931dbd01fSIzik Eidus if (err) 311031dbd01fSIzik Eidus goto out; 311131dbd01fSIzik Eidus 311231dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 311331dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 311425acde31SPaul McQuade pr_err("ksm: creating kthread failed\n"); 311531dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 3116d9f8984cSLai Jiangshan goto out_free; 311731dbd01fSIzik Eidus } 311831dbd01fSIzik Eidus 31192ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 312031dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 312131dbd01fSIzik Eidus if (err) { 312225acde31SPaul McQuade pr_err("ksm: register sysfs failed\n"); 31232ffd8679SHugh Dickins kthread_stop(ksm_thread); 3124d9f8984cSLai Jiangshan goto out_free; 312531dbd01fSIzik Eidus } 3126c73602adSHugh Dickins #else 3127c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3128c73602adSHugh Dickins 31292ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 313031dbd01fSIzik Eidus 313162b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 3132ef4d43a8SHugh Dickins /* There is no significance to this priority 100 */ 313362b61f61SHugh Dickins hotplug_memory_notifier(ksm_memory_callback, 100); 313462b61f61SHugh Dickins #endif 313531dbd01fSIzik Eidus return 0; 313631dbd01fSIzik Eidus 3137d9f8984cSLai Jiangshan out_free: 313831dbd01fSIzik Eidus ksm_slab_free(); 313931dbd01fSIzik Eidus out: 314031dbd01fSIzik Eidus return err; 314131dbd01fSIzik Eidus } 3142a64fb3cdSPaul Gortmaker subsys_initcall(ksm_init); 3143