1f8af4da3SHugh Dickins /* 231dbd01fSIzik Eidus * Memory merging support. 331dbd01fSIzik Eidus * 431dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 531dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 631dbd01fSIzik Eidus * 736b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 831dbd01fSIzik Eidus * Authors: 931dbd01fSIzik Eidus * Izik Eidus 1031dbd01fSIzik Eidus * Andrea Arcangeli 1131dbd01fSIzik Eidus * Chris Wright 1236b2528dSIzik Eidus * Hugh Dickins 1331dbd01fSIzik Eidus * 1431dbd01fSIzik Eidus * This work is licensed under the terms of the GNU GPL, version 2. 15f8af4da3SHugh Dickins */ 16f8af4da3SHugh Dickins 17f8af4da3SHugh Dickins #include <linux/errno.h> 1831dbd01fSIzik Eidus #include <linux/mm.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 2231dbd01fSIzik Eidus #include <linux/rwsem.h> 2331dbd01fSIzik Eidus #include <linux/pagemap.h> 2431dbd01fSIzik Eidus #include <linux/rmap.h> 2531dbd01fSIzik Eidus #include <linux/spinlock.h> 2631dbd01fSIzik Eidus #include <linux/jhash.h> 2731dbd01fSIzik Eidus #include <linux/delay.h> 2831dbd01fSIzik Eidus #include <linux/kthread.h> 2931dbd01fSIzik Eidus #include <linux/wait.h> 3031dbd01fSIzik Eidus #include <linux/slab.h> 3131dbd01fSIzik Eidus #include <linux/rbtree.h> 3231dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 332c6854fdSIzik Eidus #include <linux/swap.h> 34f8af4da3SHugh Dickins #include <linux/ksm.h> 35f8af4da3SHugh Dickins 3631dbd01fSIzik Eidus #include <asm/tlbflush.h> 3773848b46SHugh Dickins #include "internal.h" 3831dbd01fSIzik Eidus 3931dbd01fSIzik Eidus /* 4031dbd01fSIzik Eidus * A few notes about the KSM scanning process, 4131dbd01fSIzik Eidus * to make it easier to understand the data structures below: 4231dbd01fSIzik Eidus * 4331dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 4431dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 4531dbd01fSIzik Eidus * 4631dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 4731dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 4831dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 4931dbd01fSIzik Eidus * 5031dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 5131dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 5231dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 5331dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 5431dbd01fSIzik Eidus * 5531dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 5631dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 5731dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 5831dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 5931dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 6031dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 6131dbd01fSIzik Eidus * 6231dbd01fSIzik Eidus * KSM solves this problem by several techniques: 6331dbd01fSIzik Eidus * 6431dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 6531dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 6631dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 6731dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 6831dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 6931dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 7031dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 7131dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 7231dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 7331dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 7431dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 7531dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 7631dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 7731dbd01fSIzik Eidus */ 7831dbd01fSIzik Eidus 7931dbd01fSIzik Eidus /** 8031dbd01fSIzik Eidus * struct mm_slot - ksm information per mm that is being scanned 8131dbd01fSIzik Eidus * @link: link to the mm_slots hash list 8231dbd01fSIzik Eidus * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 836514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 8431dbd01fSIzik Eidus * @mm: the mm that this information is valid for 8531dbd01fSIzik Eidus */ 8631dbd01fSIzik Eidus struct mm_slot { 8731dbd01fSIzik Eidus struct hlist_node link; 8831dbd01fSIzik Eidus struct list_head mm_list; 896514d511SHugh Dickins struct rmap_item *rmap_list; 9031dbd01fSIzik Eidus struct mm_struct *mm; 9131dbd01fSIzik Eidus }; 9231dbd01fSIzik Eidus 9331dbd01fSIzik Eidus /** 9431dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 9531dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 9631dbd01fSIzik Eidus * @address: the next address inside that to be scanned 976514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 9831dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 9931dbd01fSIzik Eidus * 10031dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 10131dbd01fSIzik Eidus */ 10231dbd01fSIzik Eidus struct ksm_scan { 10331dbd01fSIzik Eidus struct mm_slot *mm_slot; 10431dbd01fSIzik Eidus unsigned long address; 1056514d511SHugh Dickins struct rmap_item **rmap_list; 10631dbd01fSIzik Eidus unsigned long seqnr; 10731dbd01fSIzik Eidus }; 10831dbd01fSIzik Eidus 10931dbd01fSIzik Eidus /** 1107b6ba2c7SHugh Dickins * struct stable_node - node of the stable rbtree 11108beca44SHugh Dickins * @page: pointer to struct page of the ksm page 1127b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1137b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1147b6ba2c7SHugh Dickins */ 1157b6ba2c7SHugh Dickins struct stable_node { 11608beca44SHugh Dickins struct page *page; 1177b6ba2c7SHugh Dickins struct rb_node node; 1187b6ba2c7SHugh Dickins struct hlist_head hlist; 1197b6ba2c7SHugh Dickins }; 1207b6ba2c7SHugh Dickins 1217b6ba2c7SHugh Dickins /** 12231dbd01fSIzik Eidus * struct rmap_item - reverse mapping item for virtual addresses 1236514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 124*db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 12531dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 12631dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 12731dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1287b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1297b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1307b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 13131dbd01fSIzik Eidus */ 13231dbd01fSIzik Eidus struct rmap_item { 1336514d511SHugh Dickins struct rmap_item *rmap_list; 134*db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 13531dbd01fSIzik Eidus struct mm_struct *mm; 13631dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 13731dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 13831dbd01fSIzik Eidus union { 1397b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 1407b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 1417b6ba2c7SHugh Dickins struct stable_node *head; 1427b6ba2c7SHugh Dickins struct hlist_node hlist; 1437b6ba2c7SHugh Dickins }; 14431dbd01fSIzik Eidus }; 14531dbd01fSIzik Eidus }; 14631dbd01fSIzik Eidus 14731dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 1487b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 1497b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 15031dbd01fSIzik Eidus 15131dbd01fSIzik Eidus /* The stable and unstable tree heads */ 15231dbd01fSIzik Eidus static struct rb_root root_stable_tree = RB_ROOT; 15331dbd01fSIzik Eidus static struct rb_root root_unstable_tree = RB_ROOT; 15431dbd01fSIzik Eidus 15531dbd01fSIzik Eidus #define MM_SLOTS_HASH_HEADS 1024 15631dbd01fSIzik Eidus static struct hlist_head *mm_slots_hash; 15731dbd01fSIzik Eidus 15831dbd01fSIzik Eidus static struct mm_slot ksm_mm_head = { 15931dbd01fSIzik Eidus .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 16031dbd01fSIzik Eidus }; 16131dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 16231dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 16331dbd01fSIzik Eidus }; 16431dbd01fSIzik Eidus 16531dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 1667b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 16731dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 16831dbd01fSIzik Eidus 16931dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 170b4028260SHugh Dickins static unsigned long ksm_pages_shared; 17131dbd01fSIzik Eidus 172e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 173b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 17431dbd01fSIzik Eidus 175473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 176473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 177473b0ce4SHugh Dickins 178473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 179473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 180473b0ce4SHugh Dickins 18131dbd01fSIzik Eidus /* Limit on the number of unswappable pages used */ 1822c6854fdSIzik Eidus static unsigned long ksm_max_kernel_pages; 18331dbd01fSIzik Eidus 18431dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 1852c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 18631dbd01fSIzik Eidus 18731dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 1882ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 18931dbd01fSIzik Eidus 19031dbd01fSIzik Eidus #define KSM_RUN_STOP 0 19131dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 19231dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 1932c6854fdSIzik Eidus static unsigned int ksm_run = KSM_RUN_STOP; 19431dbd01fSIzik Eidus 19531dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 19631dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 19731dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 19831dbd01fSIzik Eidus 19931dbd01fSIzik Eidus #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 20031dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 20131dbd01fSIzik Eidus (__flags), NULL) 20231dbd01fSIzik Eidus 20331dbd01fSIzik Eidus static int __init ksm_slab_init(void) 20431dbd01fSIzik Eidus { 20531dbd01fSIzik Eidus rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 20631dbd01fSIzik Eidus if (!rmap_item_cache) 20731dbd01fSIzik Eidus goto out; 20831dbd01fSIzik Eidus 2097b6ba2c7SHugh Dickins stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 2107b6ba2c7SHugh Dickins if (!stable_node_cache) 2117b6ba2c7SHugh Dickins goto out_free1; 2127b6ba2c7SHugh Dickins 21331dbd01fSIzik Eidus mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 21431dbd01fSIzik Eidus if (!mm_slot_cache) 2157b6ba2c7SHugh Dickins goto out_free2; 21631dbd01fSIzik Eidus 21731dbd01fSIzik Eidus return 0; 21831dbd01fSIzik Eidus 2197b6ba2c7SHugh Dickins out_free2: 2207b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 2217b6ba2c7SHugh Dickins out_free1: 22231dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 22331dbd01fSIzik Eidus out: 22431dbd01fSIzik Eidus return -ENOMEM; 22531dbd01fSIzik Eidus } 22631dbd01fSIzik Eidus 22731dbd01fSIzik Eidus static void __init ksm_slab_free(void) 22831dbd01fSIzik Eidus { 22931dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 2307b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 23131dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 23231dbd01fSIzik Eidus mm_slot_cache = NULL; 23331dbd01fSIzik Eidus } 23431dbd01fSIzik Eidus 23531dbd01fSIzik Eidus static inline struct rmap_item *alloc_rmap_item(void) 23631dbd01fSIzik Eidus { 237473b0ce4SHugh Dickins struct rmap_item *rmap_item; 238473b0ce4SHugh Dickins 239473b0ce4SHugh Dickins rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 240473b0ce4SHugh Dickins if (rmap_item) 241473b0ce4SHugh Dickins ksm_rmap_items++; 242473b0ce4SHugh Dickins return rmap_item; 24331dbd01fSIzik Eidus } 24431dbd01fSIzik Eidus 24531dbd01fSIzik Eidus static inline void free_rmap_item(struct rmap_item *rmap_item) 24631dbd01fSIzik Eidus { 247473b0ce4SHugh Dickins ksm_rmap_items--; 24831dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 24931dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 25031dbd01fSIzik Eidus } 25131dbd01fSIzik Eidus 2527b6ba2c7SHugh Dickins static inline struct stable_node *alloc_stable_node(void) 2537b6ba2c7SHugh Dickins { 2547b6ba2c7SHugh Dickins return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); 2557b6ba2c7SHugh Dickins } 2567b6ba2c7SHugh Dickins 2577b6ba2c7SHugh Dickins static inline void free_stable_node(struct stable_node *stable_node) 2587b6ba2c7SHugh Dickins { 2597b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 2607b6ba2c7SHugh Dickins } 2617b6ba2c7SHugh Dickins 26231dbd01fSIzik Eidus static inline struct mm_slot *alloc_mm_slot(void) 26331dbd01fSIzik Eidus { 26431dbd01fSIzik Eidus if (!mm_slot_cache) /* initialization failed */ 26531dbd01fSIzik Eidus return NULL; 26631dbd01fSIzik Eidus return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 26731dbd01fSIzik Eidus } 26831dbd01fSIzik Eidus 26931dbd01fSIzik Eidus static inline void free_mm_slot(struct mm_slot *mm_slot) 27031dbd01fSIzik Eidus { 27131dbd01fSIzik Eidus kmem_cache_free(mm_slot_cache, mm_slot); 27231dbd01fSIzik Eidus } 27331dbd01fSIzik Eidus 27431dbd01fSIzik Eidus static int __init mm_slots_hash_init(void) 27531dbd01fSIzik Eidus { 27631dbd01fSIzik Eidus mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), 27731dbd01fSIzik Eidus GFP_KERNEL); 27831dbd01fSIzik Eidus if (!mm_slots_hash) 27931dbd01fSIzik Eidus return -ENOMEM; 28031dbd01fSIzik Eidus return 0; 28131dbd01fSIzik Eidus } 28231dbd01fSIzik Eidus 28331dbd01fSIzik Eidus static void __init mm_slots_hash_free(void) 28431dbd01fSIzik Eidus { 28531dbd01fSIzik Eidus kfree(mm_slots_hash); 28631dbd01fSIzik Eidus } 28731dbd01fSIzik Eidus 28831dbd01fSIzik Eidus static struct mm_slot *get_mm_slot(struct mm_struct *mm) 28931dbd01fSIzik Eidus { 29031dbd01fSIzik Eidus struct mm_slot *mm_slot; 29131dbd01fSIzik Eidus struct hlist_head *bucket; 29231dbd01fSIzik Eidus struct hlist_node *node; 29331dbd01fSIzik Eidus 29431dbd01fSIzik Eidus bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 29531dbd01fSIzik Eidus % MM_SLOTS_HASH_HEADS]; 29631dbd01fSIzik Eidus hlist_for_each_entry(mm_slot, node, bucket, link) { 29731dbd01fSIzik Eidus if (mm == mm_slot->mm) 29831dbd01fSIzik Eidus return mm_slot; 29931dbd01fSIzik Eidus } 30031dbd01fSIzik Eidus return NULL; 30131dbd01fSIzik Eidus } 30231dbd01fSIzik Eidus 30331dbd01fSIzik Eidus static void insert_to_mm_slots_hash(struct mm_struct *mm, 30431dbd01fSIzik Eidus struct mm_slot *mm_slot) 30531dbd01fSIzik Eidus { 30631dbd01fSIzik Eidus struct hlist_head *bucket; 30731dbd01fSIzik Eidus 30831dbd01fSIzik Eidus bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 30931dbd01fSIzik Eidus % MM_SLOTS_HASH_HEADS]; 31031dbd01fSIzik Eidus mm_slot->mm = mm; 31131dbd01fSIzik Eidus hlist_add_head(&mm_slot->link, bucket); 31231dbd01fSIzik Eidus } 31331dbd01fSIzik Eidus 31431dbd01fSIzik Eidus static inline int in_stable_tree(struct rmap_item *rmap_item) 31531dbd01fSIzik Eidus { 31631dbd01fSIzik Eidus return rmap_item->address & STABLE_FLAG; 31731dbd01fSIzik Eidus } 31831dbd01fSIzik Eidus 319*db114b83SHugh Dickins static void hold_anon_vma(struct rmap_item *rmap_item, 320*db114b83SHugh Dickins struct anon_vma *anon_vma) 321*db114b83SHugh Dickins { 322*db114b83SHugh Dickins rmap_item->anon_vma = anon_vma; 323*db114b83SHugh Dickins atomic_inc(&anon_vma->ksm_refcount); 324*db114b83SHugh Dickins } 325*db114b83SHugh Dickins 326*db114b83SHugh Dickins static void drop_anon_vma(struct rmap_item *rmap_item) 327*db114b83SHugh Dickins { 328*db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 329*db114b83SHugh Dickins 330*db114b83SHugh Dickins if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) { 331*db114b83SHugh Dickins int empty = list_empty(&anon_vma->head); 332*db114b83SHugh Dickins spin_unlock(&anon_vma->lock); 333*db114b83SHugh Dickins if (empty) 334*db114b83SHugh Dickins anon_vma_free(anon_vma); 335*db114b83SHugh Dickins } 336*db114b83SHugh Dickins } 337*db114b83SHugh Dickins 33831dbd01fSIzik Eidus /* 339a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 340a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 341a913e182SHugh Dickins * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 342a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 343a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 344a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 345a913e182SHugh Dickins */ 346a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 347a913e182SHugh Dickins { 348a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 349a913e182SHugh Dickins } 350a913e182SHugh Dickins 351a913e182SHugh Dickins /* 35231dbd01fSIzik Eidus * We use break_ksm to break COW on a ksm page: it's a stripped down 35331dbd01fSIzik Eidus * 35431dbd01fSIzik Eidus * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 35531dbd01fSIzik Eidus * put_page(page); 35631dbd01fSIzik Eidus * 35731dbd01fSIzik Eidus * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 35831dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 35931dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 36031dbd01fSIzik Eidus * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 36131dbd01fSIzik Eidus */ 362d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 36331dbd01fSIzik Eidus { 36431dbd01fSIzik Eidus struct page *page; 365d952b791SHugh Dickins int ret = 0; 36631dbd01fSIzik Eidus 36731dbd01fSIzik Eidus do { 36831dbd01fSIzik Eidus cond_resched(); 36931dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 37031dbd01fSIzik Eidus if (!page) 37131dbd01fSIzik Eidus break; 37231dbd01fSIzik Eidus if (PageKsm(page)) 37331dbd01fSIzik Eidus ret = handle_mm_fault(vma->vm_mm, vma, addr, 37431dbd01fSIzik Eidus FAULT_FLAG_WRITE); 37531dbd01fSIzik Eidus else 37631dbd01fSIzik Eidus ret = VM_FAULT_WRITE; 37731dbd01fSIzik Eidus put_page(page); 378d952b791SHugh Dickins } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 379d952b791SHugh Dickins /* 380d952b791SHugh Dickins * We must loop because handle_mm_fault() may back out if there's 381d952b791SHugh Dickins * any difficulty e.g. if pte accessed bit gets updated concurrently. 382d952b791SHugh Dickins * 383d952b791SHugh Dickins * VM_FAULT_WRITE is what we have been hoping for: it indicates that 384d952b791SHugh Dickins * COW has been broken, even if the vma does not permit VM_WRITE; 385d952b791SHugh Dickins * but note that a concurrent fault might break PageKsm for us. 386d952b791SHugh Dickins * 387d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 388d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 389d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 390d952b791SHugh Dickins * 391d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 392d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 393d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 394d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 395d952b791SHugh Dickins * 396d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 397d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 398d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 399d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 400d952b791SHugh Dickins * 401d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 402d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 403d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 404d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 405d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 406d952b791SHugh Dickins */ 407d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 40831dbd01fSIzik Eidus } 40931dbd01fSIzik Eidus 4108dd3557aSHugh Dickins static void break_cow(struct rmap_item *rmap_item) 41131dbd01fSIzik Eidus { 4128dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 4138dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 41431dbd01fSIzik Eidus struct vm_area_struct *vma; 41531dbd01fSIzik Eidus 41681464e30SHugh Dickins down_read(&mm->mmap_sem); 4179ba69294SHugh Dickins if (ksm_test_exit(mm)) 4189ba69294SHugh Dickins goto out; 41931dbd01fSIzik Eidus vma = find_vma(mm, addr); 42031dbd01fSIzik Eidus if (!vma || vma->vm_start > addr) 42181464e30SHugh Dickins goto out; 42231dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 42381464e30SHugh Dickins goto out; 42431dbd01fSIzik Eidus break_ksm(vma, addr); 42581464e30SHugh Dickins out: 42631dbd01fSIzik Eidus up_read(&mm->mmap_sem); 42731dbd01fSIzik Eidus } 42831dbd01fSIzik Eidus 42931dbd01fSIzik Eidus static struct page *get_mergeable_page(struct rmap_item *rmap_item) 43031dbd01fSIzik Eidus { 43131dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 43231dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 43331dbd01fSIzik Eidus struct vm_area_struct *vma; 43431dbd01fSIzik Eidus struct page *page; 43531dbd01fSIzik Eidus 43631dbd01fSIzik Eidus down_read(&mm->mmap_sem); 4379ba69294SHugh Dickins if (ksm_test_exit(mm)) 4389ba69294SHugh Dickins goto out; 43931dbd01fSIzik Eidus vma = find_vma(mm, addr); 44031dbd01fSIzik Eidus if (!vma || vma->vm_start > addr) 44131dbd01fSIzik Eidus goto out; 44231dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 44331dbd01fSIzik Eidus goto out; 44431dbd01fSIzik Eidus 44531dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 44631dbd01fSIzik Eidus if (!page) 44731dbd01fSIzik Eidus goto out; 44831dbd01fSIzik Eidus if (PageAnon(page)) { 44931dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 45031dbd01fSIzik Eidus flush_dcache_page(page); 45131dbd01fSIzik Eidus } else { 45231dbd01fSIzik Eidus put_page(page); 45331dbd01fSIzik Eidus out: page = NULL; 45431dbd01fSIzik Eidus } 45531dbd01fSIzik Eidus up_read(&mm->mmap_sem); 45631dbd01fSIzik Eidus return page; 45731dbd01fSIzik Eidus } 45831dbd01fSIzik Eidus 45931dbd01fSIzik Eidus /* 46031dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 46131dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 46231dbd01fSIzik Eidus */ 46331dbd01fSIzik Eidus static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 46431dbd01fSIzik Eidus { 4657b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 4667b6ba2c7SHugh Dickins struct stable_node *stable_node; 4675ad64688SHugh Dickins struct page *page; 46831dbd01fSIzik Eidus 4697b6ba2c7SHugh Dickins stable_node = rmap_item->head; 4705ad64688SHugh Dickins page = stable_node->page; 4715ad64688SHugh Dickins lock_page(page); 4725ad64688SHugh Dickins 4737b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 4745ad64688SHugh Dickins if (stable_node->hlist.first) { 4755ad64688SHugh Dickins unlock_page(page); 476e178dfdeSHugh Dickins ksm_pages_sharing--; 4775ad64688SHugh Dickins } else { 4785ad64688SHugh Dickins set_page_stable_node(page, NULL); 4795ad64688SHugh Dickins unlock_page(page); 4805ad64688SHugh Dickins put_page(page); 48108beca44SHugh Dickins 4827b6ba2c7SHugh Dickins rb_erase(&stable_node->node, &root_stable_tree); 4837b6ba2c7SHugh Dickins free_stable_node(stable_node); 484b4028260SHugh Dickins ksm_pages_shared--; 48531dbd01fSIzik Eidus } 48631dbd01fSIzik Eidus 487*db114b83SHugh Dickins drop_anon_vma(rmap_item); 48893d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 48931dbd01fSIzik Eidus 4907b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 49131dbd01fSIzik Eidus unsigned char age; 49231dbd01fSIzik Eidus /* 4939ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 49431dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 4959ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 4969ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 4979ba69294SHugh Dickins * than left over from before. 49831dbd01fSIzik Eidus */ 49931dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 500cd551f97SHugh Dickins BUG_ON(age > 1); 50131dbd01fSIzik Eidus if (!age) 50231dbd01fSIzik Eidus rb_erase(&rmap_item->node, &root_unstable_tree); 50331dbd01fSIzik Eidus 50493d17715SHugh Dickins ksm_pages_unshared--; 50531dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 50693d17715SHugh Dickins } 50731dbd01fSIzik Eidus 50831dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 50931dbd01fSIzik Eidus } 51031dbd01fSIzik Eidus 51131dbd01fSIzik Eidus static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 5126514d511SHugh Dickins struct rmap_item **rmap_list) 51331dbd01fSIzik Eidus { 5146514d511SHugh Dickins while (*rmap_list) { 5156514d511SHugh Dickins struct rmap_item *rmap_item = *rmap_list; 5166514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 51731dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 51831dbd01fSIzik Eidus free_rmap_item(rmap_item); 51931dbd01fSIzik Eidus } 52031dbd01fSIzik Eidus } 52131dbd01fSIzik Eidus 52231dbd01fSIzik Eidus /* 52331dbd01fSIzik Eidus * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather 52431dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 52531dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 52631dbd01fSIzik Eidus * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 52731dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 52831dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 52981464e30SHugh Dickins * 53081464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 53181464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 53281464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 53381464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 53431dbd01fSIzik Eidus */ 535d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 53631dbd01fSIzik Eidus unsigned long start, unsigned long end) 53731dbd01fSIzik Eidus { 53831dbd01fSIzik Eidus unsigned long addr; 539d952b791SHugh Dickins int err = 0; 54031dbd01fSIzik Eidus 541d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 5429ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 5439ba69294SHugh Dickins break; 544d952b791SHugh Dickins if (signal_pending(current)) 545d952b791SHugh Dickins err = -ERESTARTSYS; 546d952b791SHugh Dickins else 547d952b791SHugh Dickins err = break_ksm(vma, addr); 548d952b791SHugh Dickins } 549d952b791SHugh Dickins return err; 55031dbd01fSIzik Eidus } 55131dbd01fSIzik Eidus 5522ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 5532ffd8679SHugh Dickins /* 5542ffd8679SHugh Dickins * Only called through the sysfs control interface: 5552ffd8679SHugh Dickins */ 556d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 55731dbd01fSIzik Eidus { 55831dbd01fSIzik Eidus struct mm_slot *mm_slot; 55931dbd01fSIzik Eidus struct mm_struct *mm; 56031dbd01fSIzik Eidus struct vm_area_struct *vma; 561d952b791SHugh Dickins int err = 0; 56231dbd01fSIzik Eidus 563d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 5649ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 565d952b791SHugh Dickins struct mm_slot, mm_list); 566d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 567d952b791SHugh Dickins 5689ba69294SHugh Dickins for (mm_slot = ksm_scan.mm_slot; 5699ba69294SHugh Dickins mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 57031dbd01fSIzik Eidus mm = mm_slot->mm; 57131dbd01fSIzik Eidus down_read(&mm->mmap_sem); 57231dbd01fSIzik Eidus for (vma = mm->mmap; vma; vma = vma->vm_next) { 5739ba69294SHugh Dickins if (ksm_test_exit(mm)) 5749ba69294SHugh Dickins break; 57531dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 57631dbd01fSIzik Eidus continue; 577d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 578d952b791SHugh Dickins vma->vm_start, vma->vm_end); 5799ba69294SHugh Dickins if (err) 5809ba69294SHugh Dickins goto error; 581d952b791SHugh Dickins } 5829ba69294SHugh Dickins 5836514d511SHugh Dickins remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 58431dbd01fSIzik Eidus 58531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 5869ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 587d952b791SHugh Dickins struct mm_slot, mm_list); 5889ba69294SHugh Dickins if (ksm_test_exit(mm)) { 5899ba69294SHugh Dickins hlist_del(&mm_slot->link); 5909ba69294SHugh Dickins list_del(&mm_slot->mm_list); 59131dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 5929ba69294SHugh Dickins 5939ba69294SHugh Dickins free_mm_slot(mm_slot); 5949ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 5959ba69294SHugh Dickins up_read(&mm->mmap_sem); 5969ba69294SHugh Dickins mmdrop(mm); 5979ba69294SHugh Dickins } else { 5989ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 5999ba69294SHugh Dickins up_read(&mm->mmap_sem); 6009ba69294SHugh Dickins } 60131dbd01fSIzik Eidus } 60231dbd01fSIzik Eidus 603d952b791SHugh Dickins ksm_scan.seqnr = 0; 6049ba69294SHugh Dickins return 0; 6059ba69294SHugh Dickins 6069ba69294SHugh Dickins error: 6079ba69294SHugh Dickins up_read(&mm->mmap_sem); 608d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 609d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 610d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 611d952b791SHugh Dickins return err; 612d952b791SHugh Dickins } 6132ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 614d952b791SHugh Dickins 61531dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 61631dbd01fSIzik Eidus { 61731dbd01fSIzik Eidus u32 checksum; 61831dbd01fSIzik Eidus void *addr = kmap_atomic(page, KM_USER0); 61931dbd01fSIzik Eidus checksum = jhash2(addr, PAGE_SIZE / 4, 17); 62031dbd01fSIzik Eidus kunmap_atomic(addr, KM_USER0); 62131dbd01fSIzik Eidus return checksum; 62231dbd01fSIzik Eidus } 62331dbd01fSIzik Eidus 62431dbd01fSIzik Eidus static int memcmp_pages(struct page *page1, struct page *page2) 62531dbd01fSIzik Eidus { 62631dbd01fSIzik Eidus char *addr1, *addr2; 62731dbd01fSIzik Eidus int ret; 62831dbd01fSIzik Eidus 62931dbd01fSIzik Eidus addr1 = kmap_atomic(page1, KM_USER0); 63031dbd01fSIzik Eidus addr2 = kmap_atomic(page2, KM_USER1); 63131dbd01fSIzik Eidus ret = memcmp(addr1, addr2, PAGE_SIZE); 63231dbd01fSIzik Eidus kunmap_atomic(addr2, KM_USER1); 63331dbd01fSIzik Eidus kunmap_atomic(addr1, KM_USER0); 63431dbd01fSIzik Eidus return ret; 63531dbd01fSIzik Eidus } 63631dbd01fSIzik Eidus 63731dbd01fSIzik Eidus static inline int pages_identical(struct page *page1, struct page *page2) 63831dbd01fSIzik Eidus { 63931dbd01fSIzik Eidus return !memcmp_pages(page1, page2); 64031dbd01fSIzik Eidus } 64131dbd01fSIzik Eidus 64231dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 64331dbd01fSIzik Eidus pte_t *orig_pte) 64431dbd01fSIzik Eidus { 64531dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 64631dbd01fSIzik Eidus unsigned long addr; 64731dbd01fSIzik Eidus pte_t *ptep; 64831dbd01fSIzik Eidus spinlock_t *ptl; 64931dbd01fSIzik Eidus int swapped; 65031dbd01fSIzik Eidus int err = -EFAULT; 65131dbd01fSIzik Eidus 65231dbd01fSIzik Eidus addr = page_address_in_vma(page, vma); 65331dbd01fSIzik Eidus if (addr == -EFAULT) 65431dbd01fSIzik Eidus goto out; 65531dbd01fSIzik Eidus 65631dbd01fSIzik Eidus ptep = page_check_address(page, mm, addr, &ptl, 0); 65731dbd01fSIzik Eidus if (!ptep) 65831dbd01fSIzik Eidus goto out; 65931dbd01fSIzik Eidus 66031dbd01fSIzik Eidus if (pte_write(*ptep)) { 66131dbd01fSIzik Eidus pte_t entry; 66231dbd01fSIzik Eidus 66331dbd01fSIzik Eidus swapped = PageSwapCache(page); 66431dbd01fSIzik Eidus flush_cache_page(vma, addr, page_to_pfn(page)); 66531dbd01fSIzik Eidus /* 66631dbd01fSIzik Eidus * Ok this is tricky, when get_user_pages_fast() run it doesnt 66731dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 66831dbd01fSIzik Eidus * with the pagecount against the mapcount is racey and 66931dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 67031dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 67131dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 67231dbd01fSIzik Eidus * or in the middle of the check. 67331dbd01fSIzik Eidus */ 67431dbd01fSIzik Eidus entry = ptep_clear_flush(vma, addr, ptep); 67531dbd01fSIzik Eidus /* 67631dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 67731dbd01fSIzik Eidus * page 67831dbd01fSIzik Eidus */ 67931e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 68031dbd01fSIzik Eidus set_pte_at_notify(mm, addr, ptep, entry); 68131dbd01fSIzik Eidus goto out_unlock; 68231dbd01fSIzik Eidus } 68331dbd01fSIzik Eidus entry = pte_wrprotect(entry); 68431dbd01fSIzik Eidus set_pte_at_notify(mm, addr, ptep, entry); 68531dbd01fSIzik Eidus } 68631dbd01fSIzik Eidus *orig_pte = *ptep; 68731dbd01fSIzik Eidus err = 0; 68831dbd01fSIzik Eidus 68931dbd01fSIzik Eidus out_unlock: 69031dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 69131dbd01fSIzik Eidus out: 69231dbd01fSIzik Eidus return err; 69331dbd01fSIzik Eidus } 69431dbd01fSIzik Eidus 69531dbd01fSIzik Eidus /** 69631dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 6978dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 6988dd3557aSHugh Dickins * @page: the page we are replacing by kpage 6998dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 70031dbd01fSIzik Eidus * @orig_pte: the original value of the pte 70131dbd01fSIzik Eidus * 70231dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 70331dbd01fSIzik Eidus */ 7048dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 7058dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 70631dbd01fSIzik Eidus { 70731dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 70831dbd01fSIzik Eidus pgd_t *pgd; 70931dbd01fSIzik Eidus pud_t *pud; 71031dbd01fSIzik Eidus pmd_t *pmd; 71131dbd01fSIzik Eidus pte_t *ptep; 71231dbd01fSIzik Eidus spinlock_t *ptl; 71331dbd01fSIzik Eidus unsigned long addr; 71431dbd01fSIzik Eidus int err = -EFAULT; 71531dbd01fSIzik Eidus 7168dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 71731dbd01fSIzik Eidus if (addr == -EFAULT) 71831dbd01fSIzik Eidus goto out; 71931dbd01fSIzik Eidus 72031dbd01fSIzik Eidus pgd = pgd_offset(mm, addr); 72131dbd01fSIzik Eidus if (!pgd_present(*pgd)) 72231dbd01fSIzik Eidus goto out; 72331dbd01fSIzik Eidus 72431dbd01fSIzik Eidus pud = pud_offset(pgd, addr); 72531dbd01fSIzik Eidus if (!pud_present(*pud)) 72631dbd01fSIzik Eidus goto out; 72731dbd01fSIzik Eidus 72831dbd01fSIzik Eidus pmd = pmd_offset(pud, addr); 72931dbd01fSIzik Eidus if (!pmd_present(*pmd)) 73031dbd01fSIzik Eidus goto out; 73131dbd01fSIzik Eidus 73231dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 73331dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 73431dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 73531dbd01fSIzik Eidus goto out; 73631dbd01fSIzik Eidus } 73731dbd01fSIzik Eidus 7388dd3557aSHugh Dickins get_page(kpage); 7395ad64688SHugh Dickins page_add_anon_rmap(kpage, vma, addr); 74031dbd01fSIzik Eidus 74131dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 74231dbd01fSIzik Eidus ptep_clear_flush(vma, addr, ptep); 7438dd3557aSHugh Dickins set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 74431dbd01fSIzik Eidus 7458dd3557aSHugh Dickins page_remove_rmap(page); 7468dd3557aSHugh Dickins put_page(page); 74731dbd01fSIzik Eidus 74831dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 74931dbd01fSIzik Eidus err = 0; 75031dbd01fSIzik Eidus out: 75131dbd01fSIzik Eidus return err; 75231dbd01fSIzik Eidus } 75331dbd01fSIzik Eidus 75431dbd01fSIzik Eidus /* 75531dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 7568dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 7578dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 75808beca44SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page 75931dbd01fSIzik Eidus * 76031dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 76131dbd01fSIzik Eidus */ 76231dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 7638dd3557aSHugh Dickins struct page *page, struct page *kpage) 76431dbd01fSIzik Eidus { 76531dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 76631dbd01fSIzik Eidus int err = -EFAULT; 76731dbd01fSIzik Eidus 768*db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 769*db114b83SHugh Dickins return 0; 770*db114b83SHugh Dickins 77131dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 77231dbd01fSIzik Eidus goto out; 7738dd3557aSHugh Dickins if (!PageAnon(page)) 77431dbd01fSIzik Eidus goto out; 77531dbd01fSIzik Eidus 77631dbd01fSIzik Eidus /* 77731dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 77831dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 77931dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 78031dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 78131dbd01fSIzik Eidus * then come back to this page when it is unlocked. 78231dbd01fSIzik Eidus */ 7838dd3557aSHugh Dickins if (!trylock_page(page)) 78431e855eaSHugh Dickins goto out; 78531dbd01fSIzik Eidus /* 78631dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 78731dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 78831dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 78931dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 79031dbd01fSIzik Eidus */ 7918dd3557aSHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0 && 7928dd3557aSHugh Dickins pages_identical(page, kpage)) 7938dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 79431dbd01fSIzik Eidus 7955ad64688SHugh Dickins if ((vma->vm_flags & VM_LOCKED) && !err) { 79673848b46SHugh Dickins munlock_vma_page(page); 7975ad64688SHugh Dickins if (!PageMlocked(kpage)) { 7985ad64688SHugh Dickins unlock_page(page); 7995ad64688SHugh Dickins lru_add_drain(); 8005ad64688SHugh Dickins lock_page(kpage); 8015ad64688SHugh Dickins mlock_vma_page(kpage); 8025ad64688SHugh Dickins page = kpage; /* for final unlock */ 8035ad64688SHugh Dickins } 8045ad64688SHugh Dickins } 80573848b46SHugh Dickins 8068dd3557aSHugh Dickins unlock_page(page); 80731dbd01fSIzik Eidus out: 80831dbd01fSIzik Eidus return err; 80931dbd01fSIzik Eidus } 81031dbd01fSIzik Eidus 81131dbd01fSIzik Eidus /* 81281464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 81381464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 8148dd3557aSHugh Dickins * 8158dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 81681464e30SHugh Dickins */ 8178dd3557aSHugh Dickins static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 8188dd3557aSHugh Dickins struct page *page, struct page *kpage) 81981464e30SHugh Dickins { 8208dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 82181464e30SHugh Dickins struct vm_area_struct *vma; 82281464e30SHugh Dickins int err = -EFAULT; 82381464e30SHugh Dickins 8248dd3557aSHugh Dickins down_read(&mm->mmap_sem); 8258dd3557aSHugh Dickins if (ksm_test_exit(mm)) 8268dd3557aSHugh Dickins goto out; 8278dd3557aSHugh Dickins vma = find_vma(mm, rmap_item->address); 8288dd3557aSHugh Dickins if (!vma || vma->vm_start > rmap_item->address) 8299ba69294SHugh Dickins goto out; 8309ba69294SHugh Dickins 8318dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 832*db114b83SHugh Dickins if (err) 833*db114b83SHugh Dickins goto out; 834*db114b83SHugh Dickins 835*db114b83SHugh Dickins /* Must get reference to anon_vma while still holding mmap_sem */ 836*db114b83SHugh Dickins hold_anon_vma(rmap_item, vma->anon_vma); 83781464e30SHugh Dickins out: 8388dd3557aSHugh Dickins up_read(&mm->mmap_sem); 83981464e30SHugh Dickins return err; 84081464e30SHugh Dickins } 84181464e30SHugh Dickins 84281464e30SHugh Dickins /* 84331dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 84431dbd01fSIzik Eidus * to be merged into one page. 84531dbd01fSIzik Eidus * 8468dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 8478dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 84831dbd01fSIzik Eidus * 84931dbd01fSIzik Eidus * Note that this function allocates a new kernel page: if one of the pages 85031dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 85131dbd01fSIzik Eidus */ 8528dd3557aSHugh Dickins static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 8538dd3557aSHugh Dickins struct page *page, 8548dd3557aSHugh Dickins struct rmap_item *tree_rmap_item, 8558dd3557aSHugh Dickins struct page *tree_page) 85631dbd01fSIzik Eidus { 8578dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 85831dbd01fSIzik Eidus struct vm_area_struct *vma; 85931dbd01fSIzik Eidus struct page *kpage; 86031dbd01fSIzik Eidus int err = -EFAULT; 86131dbd01fSIzik Eidus 86231dbd01fSIzik Eidus /* 86331dbd01fSIzik Eidus * The number of nodes in the stable tree 86431dbd01fSIzik Eidus * is the number of kernel pages that we hold. 86531dbd01fSIzik Eidus */ 86631dbd01fSIzik Eidus if (ksm_max_kernel_pages && 867b4028260SHugh Dickins ksm_max_kernel_pages <= ksm_pages_shared) 8688dd3557aSHugh Dickins return NULL; 86931dbd01fSIzik Eidus 87031dbd01fSIzik Eidus kpage = alloc_page(GFP_HIGHUSER); 87131dbd01fSIzik Eidus if (!kpage) 8728dd3557aSHugh Dickins return NULL; 87331dbd01fSIzik Eidus 8748dd3557aSHugh Dickins down_read(&mm->mmap_sem); 8758dd3557aSHugh Dickins if (ksm_test_exit(mm)) 8768dd3557aSHugh Dickins goto up; 8778dd3557aSHugh Dickins vma = find_vma(mm, rmap_item->address); 8788dd3557aSHugh Dickins if (!vma || vma->vm_start > rmap_item->address) 8798dd3557aSHugh Dickins goto up; 88031dbd01fSIzik Eidus 8818dd3557aSHugh Dickins copy_user_highpage(kpage, page, rmap_item->address, vma); 88208beca44SHugh Dickins 8835ad64688SHugh Dickins SetPageDirty(kpage); 8845ad64688SHugh Dickins __SetPageUptodate(kpage); 8855ad64688SHugh Dickins SetPageSwapBacked(kpage); 88608beca44SHugh Dickins set_page_stable_node(kpage, NULL); /* mark it PageKsm */ 8875ad64688SHugh Dickins lru_cache_add_lru(kpage, LRU_ACTIVE_ANON); 88808beca44SHugh Dickins 8898dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 890*db114b83SHugh Dickins if (err) 891*db114b83SHugh Dickins goto up; 892*db114b83SHugh Dickins 893*db114b83SHugh Dickins /* Must get reference to anon_vma while still holding mmap_sem */ 894*db114b83SHugh Dickins hold_anon_vma(rmap_item, vma->anon_vma); 8958dd3557aSHugh Dickins up: 8968dd3557aSHugh Dickins up_read(&mm->mmap_sem); 89731dbd01fSIzik Eidus 89831dbd01fSIzik Eidus if (!err) { 8998dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 9008dd3557aSHugh Dickins tree_page, kpage); 90131dbd01fSIzik Eidus /* 90281464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 90381464e30SHugh Dickins * pointing to it: so break it. 90431dbd01fSIzik Eidus */ 905*db114b83SHugh Dickins if (err) { 906*db114b83SHugh Dickins drop_anon_vma(rmap_item); 9078dd3557aSHugh Dickins break_cow(rmap_item); 90831dbd01fSIzik Eidus } 909*db114b83SHugh Dickins } 9108dd3557aSHugh Dickins if (err) { 91131dbd01fSIzik Eidus put_page(kpage); 9128dd3557aSHugh Dickins kpage = NULL; 9138dd3557aSHugh Dickins } 9148dd3557aSHugh Dickins return kpage; 91531dbd01fSIzik Eidus } 91631dbd01fSIzik Eidus 91731dbd01fSIzik Eidus /* 9188dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 91931dbd01fSIzik Eidus * 92031dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 92131dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 92231dbd01fSIzik Eidus * 9237b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 92431dbd01fSIzik Eidus * NULL otherwise. 92531dbd01fSIzik Eidus */ 92608beca44SHugh Dickins static struct stable_node *stable_tree_search(struct page *page) 92731dbd01fSIzik Eidus { 92831dbd01fSIzik Eidus struct rb_node *node = root_stable_tree.rb_node; 9297b6ba2c7SHugh Dickins struct stable_node *stable_node; 93031dbd01fSIzik Eidus 93108beca44SHugh Dickins stable_node = page_stable_node(page); 93208beca44SHugh Dickins if (stable_node) { /* ksm page forked */ 93308beca44SHugh Dickins get_page(page); 93408beca44SHugh Dickins return stable_node; 93508beca44SHugh Dickins } 93608beca44SHugh Dickins 93731dbd01fSIzik Eidus while (node) { 93831dbd01fSIzik Eidus int ret; 93931dbd01fSIzik Eidus 94031dbd01fSIzik Eidus cond_resched(); 94108beca44SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 94231dbd01fSIzik Eidus 94308beca44SHugh Dickins ret = memcmp_pages(page, stable_node->page); 94431dbd01fSIzik Eidus 94508beca44SHugh Dickins if (ret < 0) 94631dbd01fSIzik Eidus node = node->rb_left; 94708beca44SHugh Dickins else if (ret > 0) 94831dbd01fSIzik Eidus node = node->rb_right; 94908beca44SHugh Dickins else { 95008beca44SHugh Dickins get_page(stable_node->page); 9517b6ba2c7SHugh Dickins return stable_node; 95231dbd01fSIzik Eidus } 95331dbd01fSIzik Eidus } 95431dbd01fSIzik Eidus 95531dbd01fSIzik Eidus return NULL; 95631dbd01fSIzik Eidus } 95731dbd01fSIzik Eidus 95831dbd01fSIzik Eidus /* 95931dbd01fSIzik Eidus * stable_tree_insert - insert rmap_item pointing to new ksm page 96031dbd01fSIzik Eidus * into the stable tree. 96131dbd01fSIzik Eidus * 9627b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 9637b6ba2c7SHugh Dickins * NULL otherwise. 96431dbd01fSIzik Eidus */ 9657b6ba2c7SHugh Dickins static struct stable_node *stable_tree_insert(struct page *kpage) 96631dbd01fSIzik Eidus { 96731dbd01fSIzik Eidus struct rb_node **new = &root_stable_tree.rb_node; 96831dbd01fSIzik Eidus struct rb_node *parent = NULL; 9697b6ba2c7SHugh Dickins struct stable_node *stable_node; 97031dbd01fSIzik Eidus 97131dbd01fSIzik Eidus while (*new) { 97231dbd01fSIzik Eidus int ret; 97331dbd01fSIzik Eidus 97431dbd01fSIzik Eidus cond_resched(); 97508beca44SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 97631dbd01fSIzik Eidus 97708beca44SHugh Dickins ret = memcmp_pages(kpage, stable_node->page); 97831dbd01fSIzik Eidus 97931dbd01fSIzik Eidus parent = *new; 98031dbd01fSIzik Eidus if (ret < 0) 98131dbd01fSIzik Eidus new = &parent->rb_left; 98231dbd01fSIzik Eidus else if (ret > 0) 98331dbd01fSIzik Eidus new = &parent->rb_right; 98431dbd01fSIzik Eidus else { 98531dbd01fSIzik Eidus /* 98631dbd01fSIzik Eidus * It is not a bug that stable_tree_search() didn't 98731dbd01fSIzik Eidus * find this node: because at that time our page was 98831dbd01fSIzik Eidus * not yet write-protected, so may have changed since. 98931dbd01fSIzik Eidus */ 99031dbd01fSIzik Eidus return NULL; 99131dbd01fSIzik Eidus } 99231dbd01fSIzik Eidus } 99331dbd01fSIzik Eidus 9947b6ba2c7SHugh Dickins stable_node = alloc_stable_node(); 9957b6ba2c7SHugh Dickins if (!stable_node) 9967b6ba2c7SHugh Dickins return NULL; 99731dbd01fSIzik Eidus 9987b6ba2c7SHugh Dickins rb_link_node(&stable_node->node, parent, new); 9997b6ba2c7SHugh Dickins rb_insert_color(&stable_node->node, &root_stable_tree); 10007b6ba2c7SHugh Dickins 10017b6ba2c7SHugh Dickins INIT_HLIST_HEAD(&stable_node->hlist); 10027b6ba2c7SHugh Dickins 100308beca44SHugh Dickins get_page(kpage); 100408beca44SHugh Dickins stable_node->page = kpage; 100508beca44SHugh Dickins set_page_stable_node(kpage, stable_node); 100608beca44SHugh Dickins 10077b6ba2c7SHugh Dickins return stable_node; 100831dbd01fSIzik Eidus } 100931dbd01fSIzik Eidus 101031dbd01fSIzik Eidus /* 10118dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 10128dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 101331dbd01fSIzik Eidus * 101431dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 101531dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 101631dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 101731dbd01fSIzik Eidus * 101831dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 101931dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 102031dbd01fSIzik Eidus * 102131dbd01fSIzik Eidus * This function does both searching and inserting, because they share 102231dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 102331dbd01fSIzik Eidus */ 10248dd3557aSHugh Dickins static 10258dd3557aSHugh Dickins struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 10268dd3557aSHugh Dickins struct page *page, 10278dd3557aSHugh Dickins struct page **tree_pagep) 10288dd3557aSHugh Dickins 102931dbd01fSIzik Eidus { 103031dbd01fSIzik Eidus struct rb_node **new = &root_unstable_tree.rb_node; 103131dbd01fSIzik Eidus struct rb_node *parent = NULL; 103231dbd01fSIzik Eidus 103331dbd01fSIzik Eidus while (*new) { 103431dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 10358dd3557aSHugh Dickins struct page *tree_page; 103631dbd01fSIzik Eidus int ret; 103731dbd01fSIzik Eidus 1038d178f27fSHugh Dickins cond_resched(); 103931dbd01fSIzik Eidus tree_rmap_item = rb_entry(*new, struct rmap_item, node); 10408dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 10418dd3557aSHugh Dickins if (!tree_page) 104231dbd01fSIzik Eidus return NULL; 104331dbd01fSIzik Eidus 104431dbd01fSIzik Eidus /* 10458dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 104631dbd01fSIzik Eidus */ 10478dd3557aSHugh Dickins if (page == tree_page) { 10488dd3557aSHugh Dickins put_page(tree_page); 104931dbd01fSIzik Eidus return NULL; 105031dbd01fSIzik Eidus } 105131dbd01fSIzik Eidus 10528dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 105331dbd01fSIzik Eidus 105431dbd01fSIzik Eidus parent = *new; 105531dbd01fSIzik Eidus if (ret < 0) { 10568dd3557aSHugh Dickins put_page(tree_page); 105731dbd01fSIzik Eidus new = &parent->rb_left; 105831dbd01fSIzik Eidus } else if (ret > 0) { 10598dd3557aSHugh Dickins put_page(tree_page); 106031dbd01fSIzik Eidus new = &parent->rb_right; 106131dbd01fSIzik Eidus } else { 10628dd3557aSHugh Dickins *tree_pagep = tree_page; 106331dbd01fSIzik Eidus return tree_rmap_item; 106431dbd01fSIzik Eidus } 106531dbd01fSIzik Eidus } 106631dbd01fSIzik Eidus 10677b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 106831dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 106931dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 107031dbd01fSIzik Eidus rb_insert_color(&rmap_item->node, &root_unstable_tree); 107131dbd01fSIzik Eidus 1072473b0ce4SHugh Dickins ksm_pages_unshared++; 107331dbd01fSIzik Eidus return NULL; 107431dbd01fSIzik Eidus } 107531dbd01fSIzik Eidus 107631dbd01fSIzik Eidus /* 107731dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 107831dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 107931dbd01fSIzik Eidus * the same ksm page. 108031dbd01fSIzik Eidus */ 108131dbd01fSIzik Eidus static void stable_tree_append(struct rmap_item *rmap_item, 10827b6ba2c7SHugh Dickins struct stable_node *stable_node) 108331dbd01fSIzik Eidus { 10847b6ba2c7SHugh Dickins rmap_item->head = stable_node; 108531dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 10867b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1087e178dfdeSHugh Dickins 10887b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 1089e178dfdeSHugh Dickins ksm_pages_sharing++; 10907b6ba2c7SHugh Dickins else 10917b6ba2c7SHugh Dickins ksm_pages_shared++; 109231dbd01fSIzik Eidus } 109331dbd01fSIzik Eidus 109431dbd01fSIzik Eidus /* 109581464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 109681464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 109781464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 109881464e30SHugh Dickins * both transferred to the stable tree. 109931dbd01fSIzik Eidus * 110031dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 110131dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 110231dbd01fSIzik Eidus */ 110331dbd01fSIzik Eidus static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 110431dbd01fSIzik Eidus { 110531dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 11068dd3557aSHugh Dickins struct page *tree_page = NULL; 11077b6ba2c7SHugh Dickins struct stable_node *stable_node; 11088dd3557aSHugh Dickins struct page *kpage; 110931dbd01fSIzik Eidus unsigned int checksum; 111031dbd01fSIzik Eidus int err; 111131dbd01fSIzik Eidus 111231dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 111331dbd01fSIzik Eidus 111431dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 111508beca44SHugh Dickins stable_node = stable_tree_search(page); 11167b6ba2c7SHugh Dickins if (stable_node) { 111708beca44SHugh Dickins kpage = stable_node->page; 111808beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 111931dbd01fSIzik Eidus if (!err) { 112031dbd01fSIzik Eidus /* 112131dbd01fSIzik Eidus * The page was successfully merged: 112231dbd01fSIzik Eidus * add its rmap_item to the stable tree. 112331dbd01fSIzik Eidus */ 11245ad64688SHugh Dickins lock_page(kpage); 11257b6ba2c7SHugh Dickins stable_tree_append(rmap_item, stable_node); 11265ad64688SHugh Dickins unlock_page(kpage); 112731dbd01fSIzik Eidus } 11288dd3557aSHugh Dickins put_page(kpage); 112931dbd01fSIzik Eidus return; 113031dbd01fSIzik Eidus } 113131dbd01fSIzik Eidus 113231dbd01fSIzik Eidus /* 113331dbd01fSIzik Eidus * A ksm page might have got here by fork, but its other 113431dbd01fSIzik Eidus * references have already been removed from the stable tree. 1135d952b791SHugh Dickins * Or it might be left over from a break_ksm which failed 1136d952b791SHugh Dickins * when the mem_cgroup had reached its limit: try again now. 113731dbd01fSIzik Eidus */ 113831dbd01fSIzik Eidus if (PageKsm(page)) 11398dd3557aSHugh Dickins break_cow(rmap_item); 114031dbd01fSIzik Eidus 114131dbd01fSIzik Eidus /* 114231dbd01fSIzik Eidus * In case the hash value of the page was changed from the last time we 114331dbd01fSIzik Eidus * have calculated it, this page to be changed frequely, therefore we 114431dbd01fSIzik Eidus * don't want to insert it to the unstable tree, and we don't want to 114531dbd01fSIzik Eidus * waste our time to search if there is something identical to it there. 114631dbd01fSIzik Eidus */ 114731dbd01fSIzik Eidus checksum = calc_checksum(page); 114831dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 114931dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 115031dbd01fSIzik Eidus return; 115131dbd01fSIzik Eidus } 115231dbd01fSIzik Eidus 11538dd3557aSHugh Dickins tree_rmap_item = 11548dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 115531dbd01fSIzik Eidus if (tree_rmap_item) { 11568dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 11578dd3557aSHugh Dickins tree_rmap_item, tree_page); 11588dd3557aSHugh Dickins put_page(tree_page); 115931dbd01fSIzik Eidus /* 116031dbd01fSIzik Eidus * As soon as we merge this page, we want to remove the 116131dbd01fSIzik Eidus * rmap_item of the page we have merged with from the unstable 116231dbd01fSIzik Eidus * tree, and insert it instead as new node in the stable tree. 116331dbd01fSIzik Eidus */ 11648dd3557aSHugh Dickins if (kpage) { 116593d17715SHugh Dickins remove_rmap_item_from_tree(tree_rmap_item); 1166473b0ce4SHugh Dickins 11675ad64688SHugh Dickins lock_page(kpage); 11687b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 11697b6ba2c7SHugh Dickins if (stable_node) { 11707b6ba2c7SHugh Dickins stable_tree_append(tree_rmap_item, stable_node); 11717b6ba2c7SHugh Dickins stable_tree_append(rmap_item, stable_node); 11727b6ba2c7SHugh Dickins } 11735ad64688SHugh Dickins unlock_page(kpage); 11747b6ba2c7SHugh Dickins put_page(kpage); 11757b6ba2c7SHugh Dickins 117631dbd01fSIzik Eidus /* 117731dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 117831dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 117931dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 118031dbd01fSIzik Eidus * in which case we need to break_cow on both. 118131dbd01fSIzik Eidus */ 11827b6ba2c7SHugh Dickins if (!stable_node) { 1183*db114b83SHugh Dickins drop_anon_vma(tree_rmap_item); 11848dd3557aSHugh Dickins break_cow(tree_rmap_item); 1185*db114b83SHugh Dickins drop_anon_vma(rmap_item); 11868dd3557aSHugh Dickins break_cow(rmap_item); 118731dbd01fSIzik Eidus } 118831dbd01fSIzik Eidus } 118931dbd01fSIzik Eidus } 119031dbd01fSIzik Eidus } 119131dbd01fSIzik Eidus 119231dbd01fSIzik Eidus static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 11936514d511SHugh Dickins struct rmap_item **rmap_list, 119431dbd01fSIzik Eidus unsigned long addr) 119531dbd01fSIzik Eidus { 119631dbd01fSIzik Eidus struct rmap_item *rmap_item; 119731dbd01fSIzik Eidus 11986514d511SHugh Dickins while (*rmap_list) { 11996514d511SHugh Dickins rmap_item = *rmap_list; 120093d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 120131dbd01fSIzik Eidus return rmap_item; 120231dbd01fSIzik Eidus if (rmap_item->address > addr) 120331dbd01fSIzik Eidus break; 12046514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 120531dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 120631dbd01fSIzik Eidus free_rmap_item(rmap_item); 120731dbd01fSIzik Eidus } 120831dbd01fSIzik Eidus 120931dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 121031dbd01fSIzik Eidus if (rmap_item) { 121131dbd01fSIzik Eidus /* It has already been zeroed */ 121231dbd01fSIzik Eidus rmap_item->mm = mm_slot->mm; 121331dbd01fSIzik Eidus rmap_item->address = addr; 12146514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 12156514d511SHugh Dickins *rmap_list = rmap_item; 121631dbd01fSIzik Eidus } 121731dbd01fSIzik Eidus return rmap_item; 121831dbd01fSIzik Eidus } 121931dbd01fSIzik Eidus 122031dbd01fSIzik Eidus static struct rmap_item *scan_get_next_rmap_item(struct page **page) 122131dbd01fSIzik Eidus { 122231dbd01fSIzik Eidus struct mm_struct *mm; 122331dbd01fSIzik Eidus struct mm_slot *slot; 122431dbd01fSIzik Eidus struct vm_area_struct *vma; 122531dbd01fSIzik Eidus struct rmap_item *rmap_item; 122631dbd01fSIzik Eidus 122731dbd01fSIzik Eidus if (list_empty(&ksm_mm_head.mm_list)) 122831dbd01fSIzik Eidus return NULL; 122931dbd01fSIzik Eidus 123031dbd01fSIzik Eidus slot = ksm_scan.mm_slot; 123131dbd01fSIzik Eidus if (slot == &ksm_mm_head) { 123231dbd01fSIzik Eidus root_unstable_tree = RB_ROOT; 123331dbd01fSIzik Eidus 123431dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 123531dbd01fSIzik Eidus slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 123631dbd01fSIzik Eidus ksm_scan.mm_slot = slot; 123731dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 123831dbd01fSIzik Eidus next_mm: 123931dbd01fSIzik Eidus ksm_scan.address = 0; 12406514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 124131dbd01fSIzik Eidus } 124231dbd01fSIzik Eidus 124331dbd01fSIzik Eidus mm = slot->mm; 124431dbd01fSIzik Eidus down_read(&mm->mmap_sem); 12459ba69294SHugh Dickins if (ksm_test_exit(mm)) 12469ba69294SHugh Dickins vma = NULL; 12479ba69294SHugh Dickins else 12489ba69294SHugh Dickins vma = find_vma(mm, ksm_scan.address); 12499ba69294SHugh Dickins 12509ba69294SHugh Dickins for (; vma; vma = vma->vm_next) { 125131dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 125231dbd01fSIzik Eidus continue; 125331dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 125431dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 125531dbd01fSIzik Eidus if (!vma->anon_vma) 125631dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 125731dbd01fSIzik Eidus 125831dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 12599ba69294SHugh Dickins if (ksm_test_exit(mm)) 12609ba69294SHugh Dickins break; 126131dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 126231dbd01fSIzik Eidus if (*page && PageAnon(*page)) { 126331dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 126431dbd01fSIzik Eidus flush_dcache_page(*page); 126531dbd01fSIzik Eidus rmap_item = get_next_rmap_item(slot, 12666514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 126731dbd01fSIzik Eidus if (rmap_item) { 12686514d511SHugh Dickins ksm_scan.rmap_list = 12696514d511SHugh Dickins &rmap_item->rmap_list; 127031dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 127131dbd01fSIzik Eidus } else 127231dbd01fSIzik Eidus put_page(*page); 127331dbd01fSIzik Eidus up_read(&mm->mmap_sem); 127431dbd01fSIzik Eidus return rmap_item; 127531dbd01fSIzik Eidus } 127631dbd01fSIzik Eidus if (*page) 127731dbd01fSIzik Eidus put_page(*page); 127831dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 127931dbd01fSIzik Eidus cond_resched(); 128031dbd01fSIzik Eidus } 128131dbd01fSIzik Eidus } 128231dbd01fSIzik Eidus 12839ba69294SHugh Dickins if (ksm_test_exit(mm)) { 12849ba69294SHugh Dickins ksm_scan.address = 0; 12856514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 12869ba69294SHugh Dickins } 128731dbd01fSIzik Eidus /* 128831dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 128931dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 129031dbd01fSIzik Eidus */ 12916514d511SHugh Dickins remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 129231dbd01fSIzik Eidus 129331dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 1294cd551f97SHugh Dickins ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1295cd551f97SHugh Dickins struct mm_slot, mm_list); 1296cd551f97SHugh Dickins if (ksm_scan.address == 0) { 1297cd551f97SHugh Dickins /* 1298cd551f97SHugh Dickins * We've completed a full scan of all vmas, holding mmap_sem 1299cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 1300cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 13019ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 13029ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 13039ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 13049ba69294SHugh Dickins * mmap_sem then protects against race with MADV_MERGEABLE). 1305cd551f97SHugh Dickins */ 1306cd551f97SHugh Dickins hlist_del(&slot->link); 1307cd551f97SHugh Dickins list_del(&slot->mm_list); 13089ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 13099ba69294SHugh Dickins 1310cd551f97SHugh Dickins free_mm_slot(slot); 1311cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 13129ba69294SHugh Dickins up_read(&mm->mmap_sem); 13139ba69294SHugh Dickins mmdrop(mm); 13149ba69294SHugh Dickins } else { 131531dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 1316cd551f97SHugh Dickins up_read(&mm->mmap_sem); 13179ba69294SHugh Dickins } 131831dbd01fSIzik Eidus 131931dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 1320cd551f97SHugh Dickins slot = ksm_scan.mm_slot; 132131dbd01fSIzik Eidus if (slot != &ksm_mm_head) 132231dbd01fSIzik Eidus goto next_mm; 132331dbd01fSIzik Eidus 132431dbd01fSIzik Eidus ksm_scan.seqnr++; 132531dbd01fSIzik Eidus return NULL; 132631dbd01fSIzik Eidus } 132731dbd01fSIzik Eidus 132831dbd01fSIzik Eidus /** 132931dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 133031dbd01fSIzik Eidus * @scan_npages - number of pages we want to scan before we return. 133131dbd01fSIzik Eidus */ 133231dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 133331dbd01fSIzik Eidus { 133431dbd01fSIzik Eidus struct rmap_item *rmap_item; 133531dbd01fSIzik Eidus struct page *page; 133631dbd01fSIzik Eidus 133731dbd01fSIzik Eidus while (scan_npages--) { 133831dbd01fSIzik Eidus cond_resched(); 133931dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 134031dbd01fSIzik Eidus if (!rmap_item) 134131dbd01fSIzik Eidus return; 134231dbd01fSIzik Eidus if (!PageKsm(page) || !in_stable_tree(rmap_item)) 134331dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 134431dbd01fSIzik Eidus put_page(page); 134531dbd01fSIzik Eidus } 134631dbd01fSIzik Eidus } 134731dbd01fSIzik Eidus 13486e158384SHugh Dickins static int ksmd_should_run(void) 13496e158384SHugh Dickins { 13506e158384SHugh Dickins return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 13516e158384SHugh Dickins } 13526e158384SHugh Dickins 135331dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 135431dbd01fSIzik Eidus { 1355339aa624SIzik Eidus set_user_nice(current, 5); 135631dbd01fSIzik Eidus 135731dbd01fSIzik Eidus while (!kthread_should_stop()) { 135831dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 13596e158384SHugh Dickins if (ksmd_should_run()) 136031dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 136131dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 13626e158384SHugh Dickins 13636e158384SHugh Dickins if (ksmd_should_run()) { 136431dbd01fSIzik Eidus schedule_timeout_interruptible( 136531dbd01fSIzik Eidus msecs_to_jiffies(ksm_thread_sleep_millisecs)); 136631dbd01fSIzik Eidus } else { 136731dbd01fSIzik Eidus wait_event_interruptible(ksm_thread_wait, 13686e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 136931dbd01fSIzik Eidus } 137031dbd01fSIzik Eidus } 137131dbd01fSIzik Eidus return 0; 137231dbd01fSIzik Eidus } 137331dbd01fSIzik Eidus 1374f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 1375f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 1376f8af4da3SHugh Dickins { 1377f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 1378d952b791SHugh Dickins int err; 1379f8af4da3SHugh Dickins 1380f8af4da3SHugh Dickins switch (advice) { 1381f8af4da3SHugh Dickins case MADV_MERGEABLE: 1382f8af4da3SHugh Dickins /* 1383f8af4da3SHugh Dickins * Be somewhat over-protective for now! 1384f8af4da3SHugh Dickins */ 1385f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1386f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1387f8af4da3SHugh Dickins VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 13885ad64688SHugh Dickins VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) 1389f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1390f8af4da3SHugh Dickins 1391d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1392d952b791SHugh Dickins err = __ksm_enter(mm); 1393d952b791SHugh Dickins if (err) 1394d952b791SHugh Dickins return err; 1395d952b791SHugh Dickins } 1396f8af4da3SHugh Dickins 1397f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 1398f8af4da3SHugh Dickins break; 1399f8af4da3SHugh Dickins 1400f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 1401f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 1402f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1403f8af4da3SHugh Dickins 1404d952b791SHugh Dickins if (vma->anon_vma) { 1405d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 1406d952b791SHugh Dickins if (err) 1407d952b791SHugh Dickins return err; 1408d952b791SHugh Dickins } 1409f8af4da3SHugh Dickins 1410f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 1411f8af4da3SHugh Dickins break; 1412f8af4da3SHugh Dickins } 1413f8af4da3SHugh Dickins 1414f8af4da3SHugh Dickins return 0; 1415f8af4da3SHugh Dickins } 1416f8af4da3SHugh Dickins 1417f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 1418f8af4da3SHugh Dickins { 14196e158384SHugh Dickins struct mm_slot *mm_slot; 14206e158384SHugh Dickins int needs_wakeup; 14216e158384SHugh Dickins 14226e158384SHugh Dickins mm_slot = alloc_mm_slot(); 142331dbd01fSIzik Eidus if (!mm_slot) 142431dbd01fSIzik Eidus return -ENOMEM; 142531dbd01fSIzik Eidus 14266e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 14276e158384SHugh Dickins needs_wakeup = list_empty(&ksm_mm_head.mm_list); 14286e158384SHugh Dickins 142931dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 143031dbd01fSIzik Eidus insert_to_mm_slots_hash(mm, mm_slot); 143131dbd01fSIzik Eidus /* 143231dbd01fSIzik Eidus * Insert just behind the scanning cursor, to let the area settle 143331dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 143431dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 143531dbd01fSIzik Eidus */ 143631dbd01fSIzik Eidus list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 143731dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 143831dbd01fSIzik Eidus 1439f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 14409ba69294SHugh Dickins atomic_inc(&mm->mm_count); 14416e158384SHugh Dickins 14426e158384SHugh Dickins if (needs_wakeup) 14436e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 14446e158384SHugh Dickins 1445f8af4da3SHugh Dickins return 0; 1446f8af4da3SHugh Dickins } 1447f8af4da3SHugh Dickins 14481c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 1449f8af4da3SHugh Dickins { 1450cd551f97SHugh Dickins struct mm_slot *mm_slot; 14519ba69294SHugh Dickins int easy_to_free = 0; 1452cd551f97SHugh Dickins 145331dbd01fSIzik Eidus /* 14549ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 14559ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 14569ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 14579ba69294SHugh Dickins * mmap_sem to synchronize with any break_cows before pagetables 14589ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 14599ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 146031dbd01fSIzik Eidus */ 14619ba69294SHugh Dickins 1462cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 1463cd551f97SHugh Dickins mm_slot = get_mm_slot(mm); 14649ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 14656514d511SHugh Dickins if (!mm_slot->rmap_list) { 1466cd551f97SHugh Dickins hlist_del(&mm_slot->link); 1467cd551f97SHugh Dickins list_del(&mm_slot->mm_list); 14689ba69294SHugh Dickins easy_to_free = 1; 14699ba69294SHugh Dickins } else { 14709ba69294SHugh Dickins list_move(&mm_slot->mm_list, 14719ba69294SHugh Dickins &ksm_scan.mm_slot->mm_list); 14729ba69294SHugh Dickins } 14739ba69294SHugh Dickins } 1474cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1475cd551f97SHugh Dickins 14769ba69294SHugh Dickins if (easy_to_free) { 1477cd551f97SHugh Dickins free_mm_slot(mm_slot); 1478cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 14799ba69294SHugh Dickins mmdrop(mm); 14809ba69294SHugh Dickins } else if (mm_slot) { 14819ba69294SHugh Dickins down_write(&mm->mmap_sem); 14829ba69294SHugh Dickins up_write(&mm->mmap_sem); 14839ba69294SHugh Dickins } 1484f8af4da3SHugh Dickins } 148531dbd01fSIzik Eidus 14865ad64688SHugh Dickins struct page *ksm_does_need_to_copy(struct page *page, 14875ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 14885ad64688SHugh Dickins { 14895ad64688SHugh Dickins struct page *new_page; 14905ad64688SHugh Dickins 14915ad64688SHugh Dickins unlock_page(page); /* any racers will COW it, not modify it */ 14925ad64688SHugh Dickins 14935ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 14945ad64688SHugh Dickins if (new_page) { 14955ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 14965ad64688SHugh Dickins 14975ad64688SHugh Dickins SetPageDirty(new_page); 14985ad64688SHugh Dickins __SetPageUptodate(new_page); 14995ad64688SHugh Dickins SetPageSwapBacked(new_page); 15005ad64688SHugh Dickins __set_page_locked(new_page); 15015ad64688SHugh Dickins 15025ad64688SHugh Dickins if (page_evictable(new_page, vma)) 15035ad64688SHugh Dickins lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); 15045ad64688SHugh Dickins else 15055ad64688SHugh Dickins add_page_to_unevictable_list(new_page); 15065ad64688SHugh Dickins } 15075ad64688SHugh Dickins 15085ad64688SHugh Dickins page_cache_release(page); 15095ad64688SHugh Dickins return new_page; 15105ad64688SHugh Dickins } 15115ad64688SHugh Dickins 15125ad64688SHugh Dickins int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, 15135ad64688SHugh Dickins unsigned long *vm_flags) 15145ad64688SHugh Dickins { 15155ad64688SHugh Dickins struct stable_node *stable_node; 15165ad64688SHugh Dickins struct rmap_item *rmap_item; 15175ad64688SHugh Dickins struct hlist_node *hlist; 15185ad64688SHugh Dickins unsigned int mapcount = page_mapcount(page); 15195ad64688SHugh Dickins int referenced = 0; 1520*db114b83SHugh Dickins int search_new_forks = 0; 15215ad64688SHugh Dickins 15225ad64688SHugh Dickins VM_BUG_ON(!PageKsm(page)); 15235ad64688SHugh Dickins VM_BUG_ON(!PageLocked(page)); 15245ad64688SHugh Dickins 15255ad64688SHugh Dickins stable_node = page_stable_node(page); 15265ad64688SHugh Dickins if (!stable_node) 15275ad64688SHugh Dickins return 0; 1528*db114b83SHugh Dickins again: 15295ad64688SHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1530*db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 1531*db114b83SHugh Dickins struct vm_area_struct *vma; 1532*db114b83SHugh Dickins 1533*db114b83SHugh Dickins spin_lock(&anon_vma->lock); 1534*db114b83SHugh Dickins list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1535*db114b83SHugh Dickins if (rmap_item->address < vma->vm_start || 1536*db114b83SHugh Dickins rmap_item->address >= vma->vm_end) 1537*db114b83SHugh Dickins continue; 1538*db114b83SHugh Dickins /* 1539*db114b83SHugh Dickins * Initially we examine only the vma which covers this 1540*db114b83SHugh Dickins * rmap_item; but later, if there is still work to do, 1541*db114b83SHugh Dickins * we examine covering vmas in other mms: in case they 1542*db114b83SHugh Dickins * were forked from the original since ksmd passed. 1543*db114b83SHugh Dickins */ 1544*db114b83SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 15455ad64688SHugh Dickins continue; 15465ad64688SHugh Dickins 1547*db114b83SHugh Dickins if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 1548*db114b83SHugh Dickins continue; 15495ad64688SHugh Dickins 15505ad64688SHugh Dickins referenced += page_referenced_one(page, vma, 15515ad64688SHugh Dickins rmap_item->address, &mapcount, vm_flags); 1552*db114b83SHugh Dickins if (!search_new_forks || !mapcount) 1553*db114b83SHugh Dickins break; 1554*db114b83SHugh Dickins } 1555*db114b83SHugh Dickins spin_unlock(&anon_vma->lock); 15565ad64688SHugh Dickins if (!mapcount) 15575ad64688SHugh Dickins goto out; 15585ad64688SHugh Dickins } 1559*db114b83SHugh Dickins if (!search_new_forks++) 1560*db114b83SHugh Dickins goto again; 15615ad64688SHugh Dickins out: 15625ad64688SHugh Dickins return referenced; 15635ad64688SHugh Dickins } 15645ad64688SHugh Dickins 15655ad64688SHugh Dickins int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 15665ad64688SHugh Dickins { 15675ad64688SHugh Dickins struct stable_node *stable_node; 15685ad64688SHugh Dickins struct hlist_node *hlist; 15695ad64688SHugh Dickins struct rmap_item *rmap_item; 15705ad64688SHugh Dickins int ret = SWAP_AGAIN; 1571*db114b83SHugh Dickins int search_new_forks = 0; 15725ad64688SHugh Dickins 15735ad64688SHugh Dickins VM_BUG_ON(!PageKsm(page)); 15745ad64688SHugh Dickins VM_BUG_ON(!PageLocked(page)); 15755ad64688SHugh Dickins 15765ad64688SHugh Dickins stable_node = page_stable_node(page); 15775ad64688SHugh Dickins if (!stable_node) 15785ad64688SHugh Dickins return SWAP_FAIL; 1579*db114b83SHugh Dickins again: 15805ad64688SHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1581*db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 1582*db114b83SHugh Dickins struct vm_area_struct *vma; 15835ad64688SHugh Dickins 1584*db114b83SHugh Dickins spin_lock(&anon_vma->lock); 1585*db114b83SHugh Dickins list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1586*db114b83SHugh Dickins if (rmap_item->address < vma->vm_start || 1587*db114b83SHugh Dickins rmap_item->address >= vma->vm_end) 1588*db114b83SHugh Dickins continue; 1589*db114b83SHugh Dickins /* 1590*db114b83SHugh Dickins * Initially we examine only the vma which covers this 1591*db114b83SHugh Dickins * rmap_item; but later, if there is still work to do, 1592*db114b83SHugh Dickins * we examine covering vmas in other mms: in case they 1593*db114b83SHugh Dickins * were forked from the original since ksmd passed. 1594*db114b83SHugh Dickins */ 1595*db114b83SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1596*db114b83SHugh Dickins continue; 1597*db114b83SHugh Dickins 1598*db114b83SHugh Dickins ret = try_to_unmap_one(page, vma, 1599*db114b83SHugh Dickins rmap_item->address, flags); 1600*db114b83SHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) { 1601*db114b83SHugh Dickins spin_unlock(&anon_vma->lock); 16025ad64688SHugh Dickins goto out; 16035ad64688SHugh Dickins } 1604*db114b83SHugh Dickins } 1605*db114b83SHugh Dickins spin_unlock(&anon_vma->lock); 1606*db114b83SHugh Dickins } 1607*db114b83SHugh Dickins if (!search_new_forks++) 1608*db114b83SHugh Dickins goto again; 16095ad64688SHugh Dickins out: 16105ad64688SHugh Dickins return ret; 16115ad64688SHugh Dickins } 16125ad64688SHugh Dickins 16132ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 16142ffd8679SHugh Dickins /* 16152ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 16162ffd8679SHugh Dickins */ 16172ffd8679SHugh Dickins 161831dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 161931dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 162031dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 162131dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = \ 162231dbd01fSIzik Eidus __ATTR(_name, 0644, _name##_show, _name##_store) 162331dbd01fSIzik Eidus 162431dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 162531dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 162631dbd01fSIzik Eidus { 162731dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 162831dbd01fSIzik Eidus } 162931dbd01fSIzik Eidus 163031dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 163131dbd01fSIzik Eidus struct kobj_attribute *attr, 163231dbd01fSIzik Eidus const char *buf, size_t count) 163331dbd01fSIzik Eidus { 163431dbd01fSIzik Eidus unsigned long msecs; 163531dbd01fSIzik Eidus int err; 163631dbd01fSIzik Eidus 163731dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &msecs); 163831dbd01fSIzik Eidus if (err || msecs > UINT_MAX) 163931dbd01fSIzik Eidus return -EINVAL; 164031dbd01fSIzik Eidus 164131dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 164231dbd01fSIzik Eidus 164331dbd01fSIzik Eidus return count; 164431dbd01fSIzik Eidus } 164531dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 164631dbd01fSIzik Eidus 164731dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 164831dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 164931dbd01fSIzik Eidus { 165031dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 165131dbd01fSIzik Eidus } 165231dbd01fSIzik Eidus 165331dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 165431dbd01fSIzik Eidus struct kobj_attribute *attr, 165531dbd01fSIzik Eidus const char *buf, size_t count) 165631dbd01fSIzik Eidus { 165731dbd01fSIzik Eidus int err; 165831dbd01fSIzik Eidus unsigned long nr_pages; 165931dbd01fSIzik Eidus 166031dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &nr_pages); 166131dbd01fSIzik Eidus if (err || nr_pages > UINT_MAX) 166231dbd01fSIzik Eidus return -EINVAL; 166331dbd01fSIzik Eidus 166431dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 166531dbd01fSIzik Eidus 166631dbd01fSIzik Eidus return count; 166731dbd01fSIzik Eidus } 166831dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 166931dbd01fSIzik Eidus 167031dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 167131dbd01fSIzik Eidus char *buf) 167231dbd01fSIzik Eidus { 167331dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_run); 167431dbd01fSIzik Eidus } 167531dbd01fSIzik Eidus 167631dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 167731dbd01fSIzik Eidus const char *buf, size_t count) 167831dbd01fSIzik Eidus { 167931dbd01fSIzik Eidus int err; 168031dbd01fSIzik Eidus unsigned long flags; 168131dbd01fSIzik Eidus 168231dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &flags); 168331dbd01fSIzik Eidus if (err || flags > UINT_MAX) 168431dbd01fSIzik Eidus return -EINVAL; 168531dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 168631dbd01fSIzik Eidus return -EINVAL; 168731dbd01fSIzik Eidus 168831dbd01fSIzik Eidus /* 168931dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 169031dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 1691b4028260SHugh Dickins * breaking COW to free the unswappable pages_shared (but leaves 169231dbd01fSIzik Eidus * mm_slots on the list for when ksmd may be set running again). 169331dbd01fSIzik Eidus */ 169431dbd01fSIzik Eidus 169531dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 169631dbd01fSIzik Eidus if (ksm_run != flags) { 169731dbd01fSIzik Eidus ksm_run = flags; 1698d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 169935451beeSHugh Dickins current->flags |= PF_OOM_ORIGIN; 1700d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 170135451beeSHugh Dickins current->flags &= ~PF_OOM_ORIGIN; 1702d952b791SHugh Dickins if (err) { 1703d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 1704d952b791SHugh Dickins count = err; 1705d952b791SHugh Dickins } 1706d952b791SHugh Dickins } 170731dbd01fSIzik Eidus } 170831dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 170931dbd01fSIzik Eidus 171031dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 171131dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 171231dbd01fSIzik Eidus 171331dbd01fSIzik Eidus return count; 171431dbd01fSIzik Eidus } 171531dbd01fSIzik Eidus KSM_ATTR(run); 171631dbd01fSIzik Eidus 171731dbd01fSIzik Eidus static ssize_t max_kernel_pages_store(struct kobject *kobj, 171831dbd01fSIzik Eidus struct kobj_attribute *attr, 171931dbd01fSIzik Eidus const char *buf, size_t count) 172031dbd01fSIzik Eidus { 172131dbd01fSIzik Eidus int err; 172231dbd01fSIzik Eidus unsigned long nr_pages; 172331dbd01fSIzik Eidus 172431dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &nr_pages); 172531dbd01fSIzik Eidus if (err) 172631dbd01fSIzik Eidus return -EINVAL; 172731dbd01fSIzik Eidus 172831dbd01fSIzik Eidus ksm_max_kernel_pages = nr_pages; 172931dbd01fSIzik Eidus 173031dbd01fSIzik Eidus return count; 173131dbd01fSIzik Eidus } 173231dbd01fSIzik Eidus 173331dbd01fSIzik Eidus static ssize_t max_kernel_pages_show(struct kobject *kobj, 173431dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 173531dbd01fSIzik Eidus { 173631dbd01fSIzik Eidus return sprintf(buf, "%lu\n", ksm_max_kernel_pages); 173731dbd01fSIzik Eidus } 173831dbd01fSIzik Eidus KSM_ATTR(max_kernel_pages); 173931dbd01fSIzik Eidus 1740b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 1741b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 1742b4028260SHugh Dickins { 1743b4028260SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_shared); 1744b4028260SHugh Dickins } 1745b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 1746b4028260SHugh Dickins 1747b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 1748b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 1749b4028260SHugh Dickins { 1750e178dfdeSHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_sharing); 1751b4028260SHugh Dickins } 1752b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 1753b4028260SHugh Dickins 1754473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 1755473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1756473b0ce4SHugh Dickins { 1757473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_unshared); 1758473b0ce4SHugh Dickins } 1759473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 1760473b0ce4SHugh Dickins 1761473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 1762473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1763473b0ce4SHugh Dickins { 1764473b0ce4SHugh Dickins long ksm_pages_volatile; 1765473b0ce4SHugh Dickins 1766473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 1767473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 1768473b0ce4SHugh Dickins /* 1769473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 1770473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 1771473b0ce4SHugh Dickins */ 1772473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 1773473b0ce4SHugh Dickins ksm_pages_volatile = 0; 1774473b0ce4SHugh Dickins return sprintf(buf, "%ld\n", ksm_pages_volatile); 1775473b0ce4SHugh Dickins } 1776473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 1777473b0ce4SHugh Dickins 1778473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 1779473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1780473b0ce4SHugh Dickins { 1781473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_scan.seqnr); 1782473b0ce4SHugh Dickins } 1783473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 1784473b0ce4SHugh Dickins 178531dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 178631dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 178731dbd01fSIzik Eidus &pages_to_scan_attr.attr, 178831dbd01fSIzik Eidus &run_attr.attr, 178931dbd01fSIzik Eidus &max_kernel_pages_attr.attr, 1790b4028260SHugh Dickins &pages_shared_attr.attr, 1791b4028260SHugh Dickins &pages_sharing_attr.attr, 1792473b0ce4SHugh Dickins &pages_unshared_attr.attr, 1793473b0ce4SHugh Dickins &pages_volatile_attr.attr, 1794473b0ce4SHugh Dickins &full_scans_attr.attr, 179531dbd01fSIzik Eidus NULL, 179631dbd01fSIzik Eidus }; 179731dbd01fSIzik Eidus 179831dbd01fSIzik Eidus static struct attribute_group ksm_attr_group = { 179931dbd01fSIzik Eidus .attrs = ksm_attrs, 180031dbd01fSIzik Eidus .name = "ksm", 180131dbd01fSIzik Eidus }; 18022ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 180331dbd01fSIzik Eidus 180431dbd01fSIzik Eidus static int __init ksm_init(void) 180531dbd01fSIzik Eidus { 180631dbd01fSIzik Eidus struct task_struct *ksm_thread; 180731dbd01fSIzik Eidus int err; 180831dbd01fSIzik Eidus 1809c73602adSHugh Dickins ksm_max_kernel_pages = totalram_pages / 4; 18102c6854fdSIzik Eidus 181131dbd01fSIzik Eidus err = ksm_slab_init(); 181231dbd01fSIzik Eidus if (err) 181331dbd01fSIzik Eidus goto out; 181431dbd01fSIzik Eidus 181531dbd01fSIzik Eidus err = mm_slots_hash_init(); 181631dbd01fSIzik Eidus if (err) 181731dbd01fSIzik Eidus goto out_free1; 181831dbd01fSIzik Eidus 181931dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 182031dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 182131dbd01fSIzik Eidus printk(KERN_ERR "ksm: creating kthread failed\n"); 182231dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 182331dbd01fSIzik Eidus goto out_free2; 182431dbd01fSIzik Eidus } 182531dbd01fSIzik Eidus 18262ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 182731dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 182831dbd01fSIzik Eidus if (err) { 182931dbd01fSIzik Eidus printk(KERN_ERR "ksm: register sysfs failed\n"); 18302ffd8679SHugh Dickins kthread_stop(ksm_thread); 18312ffd8679SHugh Dickins goto out_free2; 183231dbd01fSIzik Eidus } 1833c73602adSHugh Dickins #else 1834c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 1835c73602adSHugh Dickins 18362ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 183731dbd01fSIzik Eidus 183831dbd01fSIzik Eidus return 0; 183931dbd01fSIzik Eidus 184031dbd01fSIzik Eidus out_free2: 184131dbd01fSIzik Eidus mm_slots_hash_free(); 184231dbd01fSIzik Eidus out_free1: 184331dbd01fSIzik Eidus ksm_slab_free(); 184431dbd01fSIzik Eidus out: 184531dbd01fSIzik Eidus return err; 184631dbd01fSIzik Eidus } 184731dbd01fSIzik Eidus module_init(ksm_init) 1848