1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Memory merging support. 4 * 5 * This code enables dynamic sharing of identical pages found in different 6 * memory areas, even if they are not shared by fork() 7 * 8 * Copyright (C) 2008-2009 Red Hat, Inc. 9 * Authors: 10 * Izik Eidus 11 * Andrea Arcangeli 12 * Chris Wright 13 * Hugh Dickins 14 */ 15 16 #include <linux/errno.h> 17 #include <linux/mm.h> 18 #include <linux/mm_inline.h> 19 #include <linux/fs.h> 20 #include <linux/mman.h> 21 #include <linux/sched.h> 22 #include <linux/sched/mm.h> 23 #include <linux/sched/coredump.h> 24 #include <linux/rwsem.h> 25 #include <linux/pagemap.h> 26 #include <linux/rmap.h> 27 #include <linux/spinlock.h> 28 #include <linux/xxhash.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/wait.h> 32 #include <linux/slab.h> 33 #include <linux/rbtree.h> 34 #include <linux/memory.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/swap.h> 37 #include <linux/ksm.h> 38 #include <linux/hashtable.h> 39 #include <linux/freezer.h> 40 #include <linux/oom.h> 41 #include <linux/numa.h> 42 #include <linux/pagewalk.h> 43 44 #include <asm/tlbflush.h> 45 #include "internal.h" 46 #include "mm_slot.h" 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/ksm.h> 50 51 #ifdef CONFIG_NUMA 52 #define NUMA(x) (x) 53 #define DO_NUMA(x) do { (x); } while (0) 54 #else 55 #define NUMA(x) (0) 56 #define DO_NUMA(x) do { } while (0) 57 #endif 58 59 /** 60 * DOC: Overview 61 * 62 * A few notes about the KSM scanning process, 63 * to make it easier to understand the data structures below: 64 * 65 * In order to reduce excessive scanning, KSM sorts the memory pages by their 66 * contents into a data structure that holds pointers to the pages' locations. 67 * 68 * Since the contents of the pages may change at any moment, KSM cannot just 69 * insert the pages into a normal sorted tree and expect it to find anything. 70 * Therefore KSM uses two data structures - the stable and the unstable tree. 71 * 72 * The stable tree holds pointers to all the merged pages (ksm pages), sorted 73 * by their contents. Because each such page is write-protected, searching on 74 * this tree is fully assured to be working (except when pages are unmapped), 75 * and therefore this tree is called the stable tree. 76 * 77 * The stable tree node includes information required for reverse 78 * mapping from a KSM page to virtual addresses that map this page. 79 * 80 * In order to avoid large latencies of the rmap walks on KSM pages, 81 * KSM maintains two types of nodes in the stable tree: 82 * 83 * * the regular nodes that keep the reverse mapping structures in a 84 * linked list 85 * * the "chains" that link nodes ("dups") that represent the same 86 * write protected memory content, but each "dup" corresponds to a 87 * different KSM page copy of that content 88 * 89 * Internally, the regular nodes, "dups" and "chains" are represented 90 * using the same struct ksm_stable_node structure. 91 * 92 * In addition to the stable tree, KSM uses a second data structure called the 93 * unstable tree: this tree holds pointers to pages which have been found to 94 * be "unchanged for a period of time". The unstable tree sorts these pages 95 * by their contents, but since they are not write-protected, KSM cannot rely 96 * upon the unstable tree to work correctly - the unstable tree is liable to 97 * be corrupted as its contents are modified, and so it is called unstable. 98 * 99 * KSM solves this problem by several techniques: 100 * 101 * 1) The unstable tree is flushed every time KSM completes scanning all 102 * memory areas, and then the tree is rebuilt again from the beginning. 103 * 2) KSM will only insert into the unstable tree, pages whose hash value 104 * has not changed since the previous scan of all memory areas. 105 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 106 * colors of the nodes and not on their contents, assuring that even when 107 * the tree gets "corrupted" it won't get out of balance, so scanning time 108 * remains the same (also, searching and inserting nodes in an rbtree uses 109 * the same algorithm, so we have no overhead when we flush and rebuild). 110 * 4) KSM never flushes the stable tree, which means that even if it were to 111 * take 10 attempts to find a page in the unstable tree, once it is found, 112 * it is secured in the stable tree. (When we scan a new page, we first 113 * compare it against the stable tree, and then against the unstable tree.) 114 * 115 * If the merge_across_nodes tunable is unset, then KSM maintains multiple 116 * stable trees and multiple unstable trees: one of each for each NUMA node. 117 */ 118 119 /** 120 * struct ksm_mm_slot - ksm information per mm that is being scanned 121 * @slot: hash lookup from mm to mm_slot 122 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 123 */ 124 struct ksm_mm_slot { 125 struct mm_slot slot; 126 struct ksm_rmap_item *rmap_list; 127 }; 128 129 /** 130 * struct ksm_scan - cursor for scanning 131 * @mm_slot: the current mm_slot we are scanning 132 * @address: the next address inside that to be scanned 133 * @rmap_list: link to the next rmap to be scanned in the rmap_list 134 * @seqnr: count of completed full scans (needed when removing unstable node) 135 * 136 * There is only the one ksm_scan instance of this cursor structure. 137 */ 138 struct ksm_scan { 139 struct ksm_mm_slot *mm_slot; 140 unsigned long address; 141 struct ksm_rmap_item **rmap_list; 142 unsigned long seqnr; 143 }; 144 145 /** 146 * struct ksm_stable_node - node of the stable rbtree 147 * @node: rb node of this ksm page in the stable tree 148 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 149 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 150 * @list: linked into migrate_nodes, pending placement in the proper node tree 151 * @hlist: hlist head of rmap_items using this ksm page 152 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 153 * @chain_prune_time: time of the last full garbage collection 154 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 155 * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 156 */ 157 struct ksm_stable_node { 158 union { 159 struct rb_node node; /* when node of stable tree */ 160 struct { /* when listed for migration */ 161 struct list_head *head; 162 struct { 163 struct hlist_node hlist_dup; 164 struct list_head list; 165 }; 166 }; 167 }; 168 struct hlist_head hlist; 169 union { 170 unsigned long kpfn; 171 unsigned long chain_prune_time; 172 }; 173 /* 174 * STABLE_NODE_CHAIN can be any negative number in 175 * rmap_hlist_len negative range, but better not -1 to be able 176 * to reliably detect underflows. 177 */ 178 #define STABLE_NODE_CHAIN -1024 179 int rmap_hlist_len; 180 #ifdef CONFIG_NUMA 181 int nid; 182 #endif 183 }; 184 185 /** 186 * struct ksm_rmap_item - reverse mapping item for virtual addresses 187 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 188 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 189 * @nid: NUMA node id of unstable tree in which linked (may not match page) 190 * @mm: the memory structure this rmap_item is pointing into 191 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 192 * @oldchecksum: previous checksum of the page at that virtual address 193 * @node: rb node of this rmap_item in the unstable tree 194 * @head: pointer to stable_node heading this list in the stable tree 195 * @hlist: link into hlist of rmap_items hanging off that stable_node 196 */ 197 struct ksm_rmap_item { 198 struct ksm_rmap_item *rmap_list; 199 union { 200 struct anon_vma *anon_vma; /* when stable */ 201 #ifdef CONFIG_NUMA 202 int nid; /* when node of unstable tree */ 203 #endif 204 }; 205 struct mm_struct *mm; 206 unsigned long address; /* + low bits used for flags below */ 207 unsigned int oldchecksum; /* when unstable */ 208 union { 209 struct rb_node node; /* when node of unstable tree */ 210 struct { /* when listed from stable tree */ 211 struct ksm_stable_node *head; 212 struct hlist_node hlist; 213 }; 214 }; 215 }; 216 217 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 218 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 219 #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 220 221 /* The stable and unstable tree heads */ 222 static struct rb_root one_stable_tree[1] = { RB_ROOT }; 223 static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 224 static struct rb_root *root_stable_tree = one_stable_tree; 225 static struct rb_root *root_unstable_tree = one_unstable_tree; 226 227 /* Recently migrated nodes of stable tree, pending proper placement */ 228 static LIST_HEAD(migrate_nodes); 229 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 230 231 #define MM_SLOTS_HASH_BITS 10 232 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 233 234 static struct ksm_mm_slot ksm_mm_head = { 235 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), 236 }; 237 static struct ksm_scan ksm_scan = { 238 .mm_slot = &ksm_mm_head, 239 }; 240 241 static struct kmem_cache *rmap_item_cache; 242 static struct kmem_cache *stable_node_cache; 243 static struct kmem_cache *mm_slot_cache; 244 245 /* The number of nodes in the stable tree */ 246 static unsigned long ksm_pages_shared; 247 248 /* The number of page slots additionally sharing those nodes */ 249 static unsigned long ksm_pages_sharing; 250 251 /* The number of nodes in the unstable tree */ 252 static unsigned long ksm_pages_unshared; 253 254 /* The number of rmap_items in use: to calculate pages_volatile */ 255 static unsigned long ksm_rmap_items; 256 257 /* The number of stable_node chains */ 258 static unsigned long ksm_stable_node_chains; 259 260 /* The number of stable_node dups linked to the stable_node chains */ 261 static unsigned long ksm_stable_node_dups; 262 263 /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 264 static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; 265 266 /* Maximum number of page slots sharing a stable node */ 267 static int ksm_max_page_sharing = 256; 268 269 /* Number of pages ksmd should scan in one batch */ 270 static unsigned int ksm_thread_pages_to_scan = 100; 271 272 /* Milliseconds ksmd should sleep between batches */ 273 static unsigned int ksm_thread_sleep_millisecs = 20; 274 275 /* Checksum of an empty (zeroed) page */ 276 static unsigned int zero_checksum __read_mostly; 277 278 /* Whether to merge empty (zeroed) pages with actual zero pages */ 279 static bool ksm_use_zero_pages __read_mostly; 280 281 /* The number of zero pages which is placed by KSM */ 282 unsigned long ksm_zero_pages; 283 284 #ifdef CONFIG_NUMA 285 /* Zeroed when merging across nodes is not allowed */ 286 static unsigned int ksm_merge_across_nodes = 1; 287 static int ksm_nr_node_ids = 1; 288 #else 289 #define ksm_merge_across_nodes 1U 290 #define ksm_nr_node_ids 1 291 #endif 292 293 #define KSM_RUN_STOP 0 294 #define KSM_RUN_MERGE 1 295 #define KSM_RUN_UNMERGE 2 296 #define KSM_RUN_OFFLINE 4 297 static unsigned long ksm_run = KSM_RUN_STOP; 298 static void wait_while_offlining(void); 299 300 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 301 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); 302 static DEFINE_MUTEX(ksm_thread_mutex); 303 static DEFINE_SPINLOCK(ksm_mmlist_lock); 304 305 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 306 sizeof(struct __struct), __alignof__(struct __struct),\ 307 (__flags), NULL) 308 309 static int __init ksm_slab_init(void) 310 { 311 rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); 312 if (!rmap_item_cache) 313 goto out; 314 315 stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); 316 if (!stable_node_cache) 317 goto out_free1; 318 319 mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); 320 if (!mm_slot_cache) 321 goto out_free2; 322 323 return 0; 324 325 out_free2: 326 kmem_cache_destroy(stable_node_cache); 327 out_free1: 328 kmem_cache_destroy(rmap_item_cache); 329 out: 330 return -ENOMEM; 331 } 332 333 static void __init ksm_slab_free(void) 334 { 335 kmem_cache_destroy(mm_slot_cache); 336 kmem_cache_destroy(stable_node_cache); 337 kmem_cache_destroy(rmap_item_cache); 338 mm_slot_cache = NULL; 339 } 340 341 static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) 342 { 343 return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 344 } 345 346 static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) 347 { 348 return dup->head == STABLE_NODE_DUP_HEAD; 349 } 350 351 static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, 352 struct ksm_stable_node *chain) 353 { 354 VM_BUG_ON(is_stable_node_dup(dup)); 355 dup->head = STABLE_NODE_DUP_HEAD; 356 VM_BUG_ON(!is_stable_node_chain(chain)); 357 hlist_add_head(&dup->hlist_dup, &chain->hlist); 358 ksm_stable_node_dups++; 359 } 360 361 static inline void __stable_node_dup_del(struct ksm_stable_node *dup) 362 { 363 VM_BUG_ON(!is_stable_node_dup(dup)); 364 hlist_del(&dup->hlist_dup); 365 ksm_stable_node_dups--; 366 } 367 368 static inline void stable_node_dup_del(struct ksm_stable_node *dup) 369 { 370 VM_BUG_ON(is_stable_node_chain(dup)); 371 if (is_stable_node_dup(dup)) 372 __stable_node_dup_del(dup); 373 else 374 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 375 #ifdef CONFIG_DEBUG_VM 376 dup->head = NULL; 377 #endif 378 } 379 380 static inline struct ksm_rmap_item *alloc_rmap_item(void) 381 { 382 struct ksm_rmap_item *rmap_item; 383 384 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 385 __GFP_NORETRY | __GFP_NOWARN); 386 if (rmap_item) 387 ksm_rmap_items++; 388 return rmap_item; 389 } 390 391 static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) 392 { 393 ksm_rmap_items--; 394 rmap_item->mm->ksm_rmap_items--; 395 rmap_item->mm = NULL; /* debug safety */ 396 kmem_cache_free(rmap_item_cache, rmap_item); 397 } 398 399 static inline struct ksm_stable_node *alloc_stable_node(void) 400 { 401 /* 402 * The allocation can take too long with GFP_KERNEL when memory is under 403 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 404 * grants access to memory reserves, helping to avoid this problem. 405 */ 406 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 407 } 408 409 static inline void free_stable_node(struct ksm_stable_node *stable_node) 410 { 411 VM_BUG_ON(stable_node->rmap_hlist_len && 412 !is_stable_node_chain(stable_node)); 413 kmem_cache_free(stable_node_cache, stable_node); 414 } 415 416 /* 417 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 418 * page tables after it has passed through ksm_exit() - which, if necessary, 419 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set 420 * a special flag: they can just back out as soon as mm_users goes to zero. 421 * ksm_test_exit() is used throughout to make this test for exit: in some 422 * places for correctness, in some places just to avoid unnecessary work. 423 */ 424 static inline bool ksm_test_exit(struct mm_struct *mm) 425 { 426 return atomic_read(&mm->mm_users) == 0; 427 } 428 429 static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, 430 struct mm_walk *walk) 431 { 432 struct page *page = NULL; 433 spinlock_t *ptl; 434 pte_t *pte; 435 pte_t ptent; 436 int ret; 437 438 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 439 if (!pte) 440 return 0; 441 ptent = ptep_get(pte); 442 if (pte_present(ptent)) { 443 page = vm_normal_page(walk->vma, addr, ptent); 444 } else if (!pte_none(ptent)) { 445 swp_entry_t entry = pte_to_swp_entry(ptent); 446 447 /* 448 * As KSM pages remain KSM pages until freed, no need to wait 449 * here for migration to end. 450 */ 451 if (is_migration_entry(entry)) 452 page = pfn_swap_entry_to_page(entry); 453 } 454 /* return 1 if the page is an normal ksm page or KSM-placed zero page */ 455 ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); 456 pte_unmap_unlock(pte, ptl); 457 return ret; 458 } 459 460 static const struct mm_walk_ops break_ksm_ops = { 461 .pmd_entry = break_ksm_pmd_entry, 462 }; 463 464 /* 465 * We use break_ksm to break COW on a ksm page by triggering unsharing, 466 * such that the ksm page will get replaced by an exclusive anonymous page. 467 * 468 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, 469 * in case the application has unmapped and remapped mm,addr meanwhile. 470 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 471 * mmap of /dev/mem, where we would not want to touch it. 472 * 473 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context 474 * of the process that owns 'vma'. We also do not want to enforce 475 * protection keys here anyway. 476 */ 477 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 478 { 479 vm_fault_t ret = 0; 480 481 do { 482 int ksm_page; 483 484 cond_resched(); 485 ksm_page = walk_page_range_vma(vma, addr, addr + 1, 486 &break_ksm_ops, NULL); 487 if (WARN_ON_ONCE(ksm_page < 0)) 488 return ksm_page; 489 if (!ksm_page) 490 return 0; 491 ret = handle_mm_fault(vma, addr, 492 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 493 NULL); 494 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 495 /* 496 * We must loop until we no longer find a KSM page because 497 * handle_mm_fault() may back out if there's any difficulty e.g. if 498 * pte accessed bit gets updated concurrently. 499 * 500 * VM_FAULT_SIGBUS could occur if we race with truncation of the 501 * backing file, which also invalidates anonymous pages: that's 502 * okay, that truncation will have unmapped the PageKsm for us. 503 * 504 * VM_FAULT_OOM: at the time of writing (late July 2009), setting 505 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 506 * current task has TIF_MEMDIE set, and will be OOM killed on return 507 * to user; and ksmd, having no mm, would never be chosen for that. 508 * 509 * But if the mm is in a limited mem_cgroup, then the fault may fail 510 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 511 * even ksmd can fail in this way - though it's usually breaking ksm 512 * just to undo a merge it made a moment before, so unlikely to oom. 513 * 514 * That's a pity: we might therefore have more kernel pages allocated 515 * than we're counting as nodes in the stable tree; but ksm_do_scan 516 * will retry to break_cow on each pass, so should recover the page 517 * in due course. The important thing is to not let VM_MERGEABLE 518 * be cleared while any such pages might remain in the area. 519 */ 520 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 521 } 522 523 static bool vma_ksm_compatible(struct vm_area_struct *vma) 524 { 525 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | 526 VM_IO | VM_DONTEXPAND | VM_HUGETLB | 527 VM_MIXEDMAP)) 528 return false; /* just ignore the advice */ 529 530 if (vma_is_dax(vma)) 531 return false; 532 533 #ifdef VM_SAO 534 if (vma->vm_flags & VM_SAO) 535 return false; 536 #endif 537 #ifdef VM_SPARC_ADI 538 if (vma->vm_flags & VM_SPARC_ADI) 539 return false; 540 #endif 541 542 return true; 543 } 544 545 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 546 unsigned long addr) 547 { 548 struct vm_area_struct *vma; 549 if (ksm_test_exit(mm)) 550 return NULL; 551 vma = vma_lookup(mm, addr); 552 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 553 return NULL; 554 return vma; 555 } 556 557 static void break_cow(struct ksm_rmap_item *rmap_item) 558 { 559 struct mm_struct *mm = rmap_item->mm; 560 unsigned long addr = rmap_item->address; 561 struct vm_area_struct *vma; 562 563 /* 564 * It is not an accident that whenever we want to break COW 565 * to undo, we also need to drop a reference to the anon_vma. 566 */ 567 put_anon_vma(rmap_item->anon_vma); 568 569 mmap_read_lock(mm); 570 vma = find_mergeable_vma(mm, addr); 571 if (vma) 572 break_ksm(vma, addr); 573 mmap_read_unlock(mm); 574 } 575 576 static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) 577 { 578 struct mm_struct *mm = rmap_item->mm; 579 unsigned long addr = rmap_item->address; 580 struct vm_area_struct *vma; 581 struct page *page; 582 583 mmap_read_lock(mm); 584 vma = find_mergeable_vma(mm, addr); 585 if (!vma) 586 goto out; 587 588 page = follow_page(vma, addr, FOLL_GET); 589 if (IS_ERR_OR_NULL(page)) 590 goto out; 591 if (is_zone_device_page(page)) 592 goto out_putpage; 593 if (PageAnon(page)) { 594 flush_anon_page(vma, page, addr); 595 flush_dcache_page(page); 596 } else { 597 out_putpage: 598 put_page(page); 599 out: 600 page = NULL; 601 } 602 mmap_read_unlock(mm); 603 return page; 604 } 605 606 /* 607 * This helper is used for getting right index into array of tree roots. 608 * When merge_across_nodes knob is set to 1, there are only two rb-trees for 609 * stable and unstable pages from all nodes with roots in index 0. Otherwise, 610 * every node has its own stable and unstable tree. 611 */ 612 static inline int get_kpfn_nid(unsigned long kpfn) 613 { 614 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 615 } 616 617 static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, 618 struct rb_root *root) 619 { 620 struct ksm_stable_node *chain = alloc_stable_node(); 621 VM_BUG_ON(is_stable_node_chain(dup)); 622 if (likely(chain)) { 623 INIT_HLIST_HEAD(&chain->hlist); 624 chain->chain_prune_time = jiffies; 625 chain->rmap_hlist_len = STABLE_NODE_CHAIN; 626 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 627 chain->nid = NUMA_NO_NODE; /* debug */ 628 #endif 629 ksm_stable_node_chains++; 630 631 /* 632 * Put the stable node chain in the first dimension of 633 * the stable tree and at the same time remove the old 634 * stable node. 635 */ 636 rb_replace_node(&dup->node, &chain->node, root); 637 638 /* 639 * Move the old stable node to the second dimension 640 * queued in the hlist_dup. The invariant is that all 641 * dup stable_nodes in the chain->hlist point to pages 642 * that are write protected and have the exact same 643 * content. 644 */ 645 stable_node_chain_add_dup(dup, chain); 646 } 647 return chain; 648 } 649 650 static inline void free_stable_node_chain(struct ksm_stable_node *chain, 651 struct rb_root *root) 652 { 653 rb_erase(&chain->node, root); 654 free_stable_node(chain); 655 ksm_stable_node_chains--; 656 } 657 658 static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) 659 { 660 struct ksm_rmap_item *rmap_item; 661 662 /* check it's not STABLE_NODE_CHAIN or negative */ 663 BUG_ON(stable_node->rmap_hlist_len < 0); 664 665 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 666 if (rmap_item->hlist.next) { 667 ksm_pages_sharing--; 668 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); 669 } else { 670 ksm_pages_shared--; 671 } 672 673 rmap_item->mm->ksm_merging_pages--; 674 675 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 676 stable_node->rmap_hlist_len--; 677 put_anon_vma(rmap_item->anon_vma); 678 rmap_item->address &= PAGE_MASK; 679 cond_resched(); 680 } 681 682 /* 683 * We need the second aligned pointer of the migrate_nodes 684 * list_head to stay clear from the rb_parent_color union 685 * (aligned and different than any node) and also different 686 * from &migrate_nodes. This will verify that future list.h changes 687 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 688 */ 689 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 690 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 691 692 trace_ksm_remove_ksm_page(stable_node->kpfn); 693 if (stable_node->head == &migrate_nodes) 694 list_del(&stable_node->list); 695 else 696 stable_node_dup_del(stable_node); 697 free_stable_node(stable_node); 698 } 699 700 enum get_ksm_page_flags { 701 GET_KSM_PAGE_NOLOCK, 702 GET_KSM_PAGE_LOCK, 703 GET_KSM_PAGE_TRYLOCK 704 }; 705 706 /* 707 * get_ksm_page: checks if the page indicated by the stable node 708 * is still its ksm page, despite having held no reference to it. 709 * In which case we can trust the content of the page, and it 710 * returns the gotten page; but if the page has now been zapped, 711 * remove the stale node from the stable tree and return NULL. 712 * But beware, the stable node's page might be being migrated. 713 * 714 * You would expect the stable_node to hold a reference to the ksm page. 715 * But if it increments the page's count, swapping out has to wait for 716 * ksmd to come around again before it can free the page, which may take 717 * seconds or even minutes: much too unresponsive. So instead we use a 718 * "keyhole reference": access to the ksm page from the stable node peeps 719 * out through its keyhole to see if that page still holds the right key, 720 * pointing back to this stable node. This relies on freeing a PageAnon 721 * page to reset its page->mapping to NULL, and relies on no other use of 722 * a page to put something that might look like our key in page->mapping. 723 * is on its way to being freed; but it is an anomaly to bear in mind. 724 */ 725 static struct page *get_ksm_page(struct ksm_stable_node *stable_node, 726 enum get_ksm_page_flags flags) 727 { 728 struct page *page; 729 void *expected_mapping; 730 unsigned long kpfn; 731 732 expected_mapping = (void *)((unsigned long)stable_node | 733 PAGE_MAPPING_KSM); 734 again: 735 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 736 page = pfn_to_page(kpfn); 737 if (READ_ONCE(page->mapping) != expected_mapping) 738 goto stale; 739 740 /* 741 * We cannot do anything with the page while its refcount is 0. 742 * Usually 0 means free, or tail of a higher-order page: in which 743 * case this node is no longer referenced, and should be freed; 744 * however, it might mean that the page is under page_ref_freeze(). 745 * The __remove_mapping() case is easy, again the node is now stale; 746 * the same is in reuse_ksm_page() case; but if page is swapcache 747 * in folio_migrate_mapping(), it might still be our page, 748 * in which case it's essential to keep the node. 749 */ 750 while (!get_page_unless_zero(page)) { 751 /* 752 * Another check for page->mapping != expected_mapping would 753 * work here too. We have chosen the !PageSwapCache test to 754 * optimize the common case, when the page is or is about to 755 * be freed: PageSwapCache is cleared (under spin_lock_irq) 756 * in the ref_freeze section of __remove_mapping(); but Anon 757 * page->mapping reset to NULL later, in free_pages_prepare(). 758 */ 759 if (!PageSwapCache(page)) 760 goto stale; 761 cpu_relax(); 762 } 763 764 if (READ_ONCE(page->mapping) != expected_mapping) { 765 put_page(page); 766 goto stale; 767 } 768 769 if (flags == GET_KSM_PAGE_TRYLOCK) { 770 if (!trylock_page(page)) { 771 put_page(page); 772 return ERR_PTR(-EBUSY); 773 } 774 } else if (flags == GET_KSM_PAGE_LOCK) 775 lock_page(page); 776 777 if (flags != GET_KSM_PAGE_NOLOCK) { 778 if (READ_ONCE(page->mapping) != expected_mapping) { 779 unlock_page(page); 780 put_page(page); 781 goto stale; 782 } 783 } 784 return page; 785 786 stale: 787 /* 788 * We come here from above when page->mapping or !PageSwapCache 789 * suggests that the node is stale; but it might be under migration. 790 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), 791 * before checking whether node->kpfn has been changed. 792 */ 793 smp_rmb(); 794 if (READ_ONCE(stable_node->kpfn) != kpfn) 795 goto again; 796 remove_node_from_stable_tree(stable_node); 797 return NULL; 798 } 799 800 /* 801 * Removing rmap_item from stable or unstable tree. 802 * This function will clean the information from the stable/unstable tree. 803 */ 804 static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) 805 { 806 if (rmap_item->address & STABLE_FLAG) { 807 struct ksm_stable_node *stable_node; 808 struct page *page; 809 810 stable_node = rmap_item->head; 811 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 812 if (!page) 813 goto out; 814 815 hlist_del(&rmap_item->hlist); 816 unlock_page(page); 817 put_page(page); 818 819 if (!hlist_empty(&stable_node->hlist)) 820 ksm_pages_sharing--; 821 else 822 ksm_pages_shared--; 823 824 rmap_item->mm->ksm_merging_pages--; 825 826 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 827 stable_node->rmap_hlist_len--; 828 829 put_anon_vma(rmap_item->anon_vma); 830 rmap_item->head = NULL; 831 rmap_item->address &= PAGE_MASK; 832 833 } else if (rmap_item->address & UNSTABLE_FLAG) { 834 unsigned char age; 835 /* 836 * Usually ksmd can and must skip the rb_erase, because 837 * root_unstable_tree was already reset to RB_ROOT. 838 * But be careful when an mm is exiting: do the rb_erase 839 * if this rmap_item was inserted by this scan, rather 840 * than left over from before. 841 */ 842 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 843 BUG_ON(age > 1); 844 if (!age) 845 rb_erase(&rmap_item->node, 846 root_unstable_tree + NUMA(rmap_item->nid)); 847 ksm_pages_unshared--; 848 rmap_item->address &= PAGE_MASK; 849 } 850 out: 851 cond_resched(); /* we're called from many long loops */ 852 } 853 854 static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) 855 { 856 while (*rmap_list) { 857 struct ksm_rmap_item *rmap_item = *rmap_list; 858 *rmap_list = rmap_item->rmap_list; 859 remove_rmap_item_from_tree(rmap_item); 860 free_rmap_item(rmap_item); 861 } 862 } 863 864 /* 865 * Though it's very tempting to unmerge rmap_items from stable tree rather 866 * than check every pte of a given vma, the locking doesn't quite work for 867 * that - an rmap_item is assigned to the stable tree after inserting ksm 868 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing 869 * rmap_items from parent to child at fork time (so as not to waste time 870 * if exit comes before the next scan reaches it). 871 * 872 * Similarly, although we'd like to remove rmap_items (so updating counts 873 * and freeing memory) when unmerging an area, it's easier to leave that 874 * to the next pass of ksmd - consider, for example, how ksmd might be 875 * in cmp_and_merge_page on one of the rmap_items we would be removing. 876 */ 877 static int unmerge_ksm_pages(struct vm_area_struct *vma, 878 unsigned long start, unsigned long end) 879 { 880 unsigned long addr; 881 int err = 0; 882 883 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 884 if (ksm_test_exit(vma->vm_mm)) 885 break; 886 if (signal_pending(current)) 887 err = -ERESTARTSYS; 888 else 889 err = break_ksm(vma, addr); 890 } 891 return err; 892 } 893 894 static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) 895 { 896 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; 897 } 898 899 static inline struct ksm_stable_node *page_stable_node(struct page *page) 900 { 901 return folio_stable_node(page_folio(page)); 902 } 903 904 static inline void set_page_stable_node(struct page *page, 905 struct ksm_stable_node *stable_node) 906 { 907 VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); 908 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); 909 } 910 911 #ifdef CONFIG_SYSFS 912 /* 913 * Only called through the sysfs control interface: 914 */ 915 static int remove_stable_node(struct ksm_stable_node *stable_node) 916 { 917 struct page *page; 918 int err; 919 920 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); 921 if (!page) { 922 /* 923 * get_ksm_page did remove_node_from_stable_tree itself. 924 */ 925 return 0; 926 } 927 928 /* 929 * Page could be still mapped if this races with __mmput() running in 930 * between ksm_exit() and exit_mmap(). Just refuse to let 931 * merge_across_nodes/max_page_sharing be switched. 932 */ 933 err = -EBUSY; 934 if (!page_mapped(page)) { 935 /* 936 * The stable node did not yet appear stale to get_ksm_page(), 937 * since that allows for an unmapped ksm page to be recognized 938 * right up until it is freed; but the node is safe to remove. 939 * This page might be in an LRU cache waiting to be freed, 940 * or it might be PageSwapCache (perhaps under writeback), 941 * or it might have been removed from swapcache a moment ago. 942 */ 943 set_page_stable_node(page, NULL); 944 remove_node_from_stable_tree(stable_node); 945 err = 0; 946 } 947 948 unlock_page(page); 949 put_page(page); 950 return err; 951 } 952 953 static int remove_stable_node_chain(struct ksm_stable_node *stable_node, 954 struct rb_root *root) 955 { 956 struct ksm_stable_node *dup; 957 struct hlist_node *hlist_safe; 958 959 if (!is_stable_node_chain(stable_node)) { 960 VM_BUG_ON(is_stable_node_dup(stable_node)); 961 if (remove_stable_node(stable_node)) 962 return true; 963 else 964 return false; 965 } 966 967 hlist_for_each_entry_safe(dup, hlist_safe, 968 &stable_node->hlist, hlist_dup) { 969 VM_BUG_ON(!is_stable_node_dup(dup)); 970 if (remove_stable_node(dup)) 971 return true; 972 } 973 BUG_ON(!hlist_empty(&stable_node->hlist)); 974 free_stable_node_chain(stable_node, root); 975 return false; 976 } 977 978 static int remove_all_stable_nodes(void) 979 { 980 struct ksm_stable_node *stable_node, *next; 981 int nid; 982 int err = 0; 983 984 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 985 while (root_stable_tree[nid].rb_node) { 986 stable_node = rb_entry(root_stable_tree[nid].rb_node, 987 struct ksm_stable_node, node); 988 if (remove_stable_node_chain(stable_node, 989 root_stable_tree + nid)) { 990 err = -EBUSY; 991 break; /* proceed to next nid */ 992 } 993 cond_resched(); 994 } 995 } 996 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 997 if (remove_stable_node(stable_node)) 998 err = -EBUSY; 999 cond_resched(); 1000 } 1001 return err; 1002 } 1003 1004 static int unmerge_and_remove_all_rmap_items(void) 1005 { 1006 struct ksm_mm_slot *mm_slot; 1007 struct mm_slot *slot; 1008 struct mm_struct *mm; 1009 struct vm_area_struct *vma; 1010 int err = 0; 1011 1012 spin_lock(&ksm_mmlist_lock); 1013 slot = list_entry(ksm_mm_head.slot.mm_node.next, 1014 struct mm_slot, mm_node); 1015 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 1016 spin_unlock(&ksm_mmlist_lock); 1017 1018 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; 1019 mm_slot = ksm_scan.mm_slot) { 1020 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); 1021 1022 mm = mm_slot->slot.mm; 1023 mmap_read_lock(mm); 1024 1025 /* 1026 * Exit right away if mm is exiting to avoid lockdep issue in 1027 * the maple tree 1028 */ 1029 if (ksm_test_exit(mm)) 1030 goto mm_exiting; 1031 1032 for_each_vma(vmi, vma) { 1033 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 1034 continue; 1035 err = unmerge_ksm_pages(vma, 1036 vma->vm_start, vma->vm_end); 1037 if (err) 1038 goto error; 1039 } 1040 1041 mm_exiting: 1042 remove_trailing_rmap_items(&mm_slot->rmap_list); 1043 mmap_read_unlock(mm); 1044 1045 spin_lock(&ksm_mmlist_lock); 1046 slot = list_entry(mm_slot->slot.mm_node.next, 1047 struct mm_slot, mm_node); 1048 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 1049 if (ksm_test_exit(mm)) { 1050 hash_del(&mm_slot->slot.hash); 1051 list_del(&mm_slot->slot.mm_node); 1052 spin_unlock(&ksm_mmlist_lock); 1053 1054 mm_slot_free(mm_slot_cache, mm_slot); 1055 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1056 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 1057 mmdrop(mm); 1058 } else 1059 spin_unlock(&ksm_mmlist_lock); 1060 } 1061 1062 /* Clean up stable nodes, but don't worry if some are still busy */ 1063 remove_all_stable_nodes(); 1064 ksm_scan.seqnr = 0; 1065 return 0; 1066 1067 error: 1068 mmap_read_unlock(mm); 1069 spin_lock(&ksm_mmlist_lock); 1070 ksm_scan.mm_slot = &ksm_mm_head; 1071 spin_unlock(&ksm_mmlist_lock); 1072 return err; 1073 } 1074 #endif /* CONFIG_SYSFS */ 1075 1076 static u32 calc_checksum(struct page *page) 1077 { 1078 u32 checksum; 1079 void *addr = kmap_atomic(page); 1080 checksum = xxhash(addr, PAGE_SIZE, 0); 1081 kunmap_atomic(addr); 1082 return checksum; 1083 } 1084 1085 static int write_protect_page(struct vm_area_struct *vma, struct page *page, 1086 pte_t *orig_pte) 1087 { 1088 struct mm_struct *mm = vma->vm_mm; 1089 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); 1090 int swapped; 1091 int err = -EFAULT; 1092 struct mmu_notifier_range range; 1093 bool anon_exclusive; 1094 pte_t entry; 1095 1096 pvmw.address = page_address_in_vma(page, vma); 1097 if (pvmw.address == -EFAULT) 1098 goto out; 1099 1100 BUG_ON(PageTransCompound(page)); 1101 1102 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, 1103 pvmw.address + PAGE_SIZE); 1104 mmu_notifier_invalidate_range_start(&range); 1105 1106 if (!page_vma_mapped_walk(&pvmw)) 1107 goto out_mn; 1108 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 1109 goto out_unlock; 1110 1111 anon_exclusive = PageAnonExclusive(page); 1112 entry = ptep_get(pvmw.pte); 1113 if (pte_write(entry) || pte_dirty(entry) || 1114 anon_exclusive || mm_tlb_flush_pending(mm)) { 1115 swapped = PageSwapCache(page); 1116 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 1117 /* 1118 * Ok this is tricky, when get_user_pages_fast() run it doesn't 1119 * take any lock, therefore the check that we are going to make 1120 * with the pagecount against the mapcount is racy and 1121 * O_DIRECT can happen right after the check. 1122 * So we clear the pte and flush the tlb before the check 1123 * this assure us that no O_DIRECT can happen after the check 1124 * or in the middle of the check. 1125 * 1126 * No need to notify as we are downgrading page table to read 1127 * only not changing it to point to a new page. 1128 * 1129 * See Documentation/mm/mmu_notifier.rst 1130 */ 1131 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 1132 /* 1133 * Check that no O_DIRECT or similar I/O is in progress on the 1134 * page 1135 */ 1136 if (page_mapcount(page) + 1 + swapped != page_count(page)) { 1137 set_pte_at(mm, pvmw.address, pvmw.pte, entry); 1138 goto out_unlock; 1139 } 1140 1141 /* See page_try_share_anon_rmap(): clear PTE first. */ 1142 if (anon_exclusive && page_try_share_anon_rmap(page)) { 1143 set_pte_at(mm, pvmw.address, pvmw.pte, entry); 1144 goto out_unlock; 1145 } 1146 1147 if (pte_dirty(entry)) 1148 set_page_dirty(page); 1149 entry = pte_mkclean(entry); 1150 1151 if (pte_write(entry)) 1152 entry = pte_wrprotect(entry); 1153 1154 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); 1155 } 1156 *orig_pte = entry; 1157 err = 0; 1158 1159 out_unlock: 1160 page_vma_mapped_walk_done(&pvmw); 1161 out_mn: 1162 mmu_notifier_invalidate_range_end(&range); 1163 out: 1164 return err; 1165 } 1166 1167 /** 1168 * replace_page - replace page in vma by new ksm page 1169 * @vma: vma that holds the pte pointing to page 1170 * @page: the page we are replacing by kpage 1171 * @kpage: the ksm page we replace page by 1172 * @orig_pte: the original value of the pte 1173 * 1174 * Returns 0 on success, -EFAULT on failure. 1175 */ 1176 static int replace_page(struct vm_area_struct *vma, struct page *page, 1177 struct page *kpage, pte_t orig_pte) 1178 { 1179 struct mm_struct *mm = vma->vm_mm; 1180 struct folio *folio; 1181 pmd_t *pmd; 1182 pmd_t pmde; 1183 pte_t *ptep; 1184 pte_t newpte; 1185 spinlock_t *ptl; 1186 unsigned long addr; 1187 int err = -EFAULT; 1188 struct mmu_notifier_range range; 1189 1190 addr = page_address_in_vma(page, vma); 1191 if (addr == -EFAULT) 1192 goto out; 1193 1194 pmd = mm_find_pmd(mm, addr); 1195 if (!pmd) 1196 goto out; 1197 /* 1198 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 1199 * without holding anon_vma lock for write. So when looking for a 1200 * genuine pmde (in which to find pte), test present and !THP together. 1201 */ 1202 pmde = pmdp_get_lockless(pmd); 1203 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 1204 goto out; 1205 1206 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 1207 addr + PAGE_SIZE); 1208 mmu_notifier_invalidate_range_start(&range); 1209 1210 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 1211 if (!ptep) 1212 goto out_mn; 1213 if (!pte_same(ptep_get(ptep), orig_pte)) { 1214 pte_unmap_unlock(ptep, ptl); 1215 goto out_mn; 1216 } 1217 VM_BUG_ON_PAGE(PageAnonExclusive(page), page); 1218 VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); 1219 1220 /* 1221 * No need to check ksm_use_zero_pages here: we can only have a 1222 * zero_page here if ksm_use_zero_pages was enabled already. 1223 */ 1224 if (!is_zero_pfn(page_to_pfn(kpage))) { 1225 get_page(kpage); 1226 page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); 1227 newpte = mk_pte(kpage, vma->vm_page_prot); 1228 } else { 1229 /* 1230 * Use pte_mkdirty to mark the zero page mapped by KSM, and then 1231 * we can easily track all KSM-placed zero pages by checking if 1232 * the dirty bit in zero page's PTE is set. 1233 */ 1234 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); 1235 ksm_zero_pages++; 1236 mm->ksm_zero_pages++; 1237 /* 1238 * We're replacing an anonymous page with a zero page, which is 1239 * not anonymous. We need to do proper accounting otherwise we 1240 * will get wrong values in /proc, and a BUG message in dmesg 1241 * when tearing down the mm. 1242 */ 1243 dec_mm_counter(mm, MM_ANONPAGES); 1244 } 1245 1246 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); 1247 /* 1248 * No need to notify as we are replacing a read only page with another 1249 * read only page with the same content. 1250 * 1251 * See Documentation/mm/mmu_notifier.rst 1252 */ 1253 ptep_clear_flush(vma, addr, ptep); 1254 set_pte_at_notify(mm, addr, ptep, newpte); 1255 1256 folio = page_folio(page); 1257 page_remove_rmap(page, vma, false); 1258 if (!folio_mapped(folio)) 1259 folio_free_swap(folio); 1260 folio_put(folio); 1261 1262 pte_unmap_unlock(ptep, ptl); 1263 err = 0; 1264 out_mn: 1265 mmu_notifier_invalidate_range_end(&range); 1266 out: 1267 return err; 1268 } 1269 1270 /* 1271 * try_to_merge_one_page - take two pages and merge them into one 1272 * @vma: the vma that holds the pte pointing to page 1273 * @page: the PageAnon page that we want to replace with kpage 1274 * @kpage: the PageKsm page that we want to map instead of page, 1275 * or NULL the first time when we want to use page as kpage. 1276 * 1277 * This function returns 0 if the pages were merged, -EFAULT otherwise. 1278 */ 1279 static int try_to_merge_one_page(struct vm_area_struct *vma, 1280 struct page *page, struct page *kpage) 1281 { 1282 pte_t orig_pte = __pte(0); 1283 int err = -EFAULT; 1284 1285 if (page == kpage) /* ksm page forked */ 1286 return 0; 1287 1288 if (!PageAnon(page)) 1289 goto out; 1290 1291 /* 1292 * We need the page lock to read a stable PageSwapCache in 1293 * write_protect_page(). We use trylock_page() instead of 1294 * lock_page() because we don't want to wait here - we 1295 * prefer to continue scanning and merging different pages, 1296 * then come back to this page when it is unlocked. 1297 */ 1298 if (!trylock_page(page)) 1299 goto out; 1300 1301 if (PageTransCompound(page)) { 1302 if (split_huge_page(page)) 1303 goto out_unlock; 1304 } 1305 1306 /* 1307 * If this anonymous page is mapped only here, its pte may need 1308 * to be write-protected. If it's mapped elsewhere, all of its 1309 * ptes are necessarily already write-protected. But in either 1310 * case, we need to lock and check page_count is not raised. 1311 */ 1312 if (write_protect_page(vma, page, &orig_pte) == 0) { 1313 if (!kpage) { 1314 /* 1315 * While we hold page lock, upgrade page from 1316 * PageAnon+anon_vma to PageKsm+NULL stable_node: 1317 * stable_tree_insert() will update stable_node. 1318 */ 1319 set_page_stable_node(page, NULL); 1320 mark_page_accessed(page); 1321 /* 1322 * Page reclaim just frees a clean page with no dirty 1323 * ptes: make sure that the ksm page would be swapped. 1324 */ 1325 if (!PageDirty(page)) 1326 SetPageDirty(page); 1327 err = 0; 1328 } else if (pages_identical(page, kpage)) 1329 err = replace_page(vma, page, kpage, orig_pte); 1330 } 1331 1332 out_unlock: 1333 unlock_page(page); 1334 out: 1335 return err; 1336 } 1337 1338 /* 1339 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 1340 * but no new kernel page is allocated: kpage must already be a ksm page. 1341 * 1342 * This function returns 0 if the pages were merged, -EFAULT otherwise. 1343 */ 1344 static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, 1345 struct page *page, struct page *kpage) 1346 { 1347 struct mm_struct *mm = rmap_item->mm; 1348 struct vm_area_struct *vma; 1349 int err = -EFAULT; 1350 1351 mmap_read_lock(mm); 1352 vma = find_mergeable_vma(mm, rmap_item->address); 1353 if (!vma) 1354 goto out; 1355 1356 err = try_to_merge_one_page(vma, page, kpage); 1357 if (err) 1358 goto out; 1359 1360 /* Unstable nid is in union with stable anon_vma: remove first */ 1361 remove_rmap_item_from_tree(rmap_item); 1362 1363 /* Must get reference to anon_vma while still holding mmap_lock */ 1364 rmap_item->anon_vma = vma->anon_vma; 1365 get_anon_vma(vma->anon_vma); 1366 out: 1367 mmap_read_unlock(mm); 1368 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), 1369 rmap_item, mm, err); 1370 return err; 1371 } 1372 1373 /* 1374 * try_to_merge_two_pages - take two identical pages and prepare them 1375 * to be merged into one page. 1376 * 1377 * This function returns the kpage if we successfully merged two identical 1378 * pages into one ksm page, NULL otherwise. 1379 * 1380 * Note that this function upgrades page to ksm page: if one of the pages 1381 * is already a ksm page, try_to_merge_with_ksm_page should be used. 1382 */ 1383 static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, 1384 struct page *page, 1385 struct ksm_rmap_item *tree_rmap_item, 1386 struct page *tree_page) 1387 { 1388 int err; 1389 1390 err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 1391 if (!err) { 1392 err = try_to_merge_with_ksm_page(tree_rmap_item, 1393 tree_page, page); 1394 /* 1395 * If that fails, we have a ksm page with only one pte 1396 * pointing to it: so break it. 1397 */ 1398 if (err) 1399 break_cow(rmap_item); 1400 } 1401 return err ? NULL : page; 1402 } 1403 1404 static __always_inline 1405 bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) 1406 { 1407 VM_BUG_ON(stable_node->rmap_hlist_len < 0); 1408 /* 1409 * Check that at least one mapping still exists, otherwise 1410 * there's no much point to merge and share with this 1411 * stable_node, as the underlying tree_page of the other 1412 * sharer is going to be freed soon. 1413 */ 1414 return stable_node->rmap_hlist_len && 1415 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 1416 } 1417 1418 static __always_inline 1419 bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) 1420 { 1421 return __is_page_sharing_candidate(stable_node, 0); 1422 } 1423 1424 static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, 1425 struct ksm_stable_node **_stable_node, 1426 struct rb_root *root, 1427 bool prune_stale_stable_nodes) 1428 { 1429 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; 1430 struct hlist_node *hlist_safe; 1431 struct page *_tree_page, *tree_page = NULL; 1432 int nr = 0; 1433 int found_rmap_hlist_len; 1434 1435 if (!prune_stale_stable_nodes || 1436 time_before(jiffies, stable_node->chain_prune_time + 1437 msecs_to_jiffies( 1438 ksm_stable_node_chains_prune_millisecs))) 1439 prune_stale_stable_nodes = false; 1440 else 1441 stable_node->chain_prune_time = jiffies; 1442 1443 hlist_for_each_entry_safe(dup, hlist_safe, 1444 &stable_node->hlist, hlist_dup) { 1445 cond_resched(); 1446 /* 1447 * We must walk all stable_node_dup to prune the stale 1448 * stable nodes during lookup. 1449 * 1450 * get_ksm_page can drop the nodes from the 1451 * stable_node->hlist if they point to freed pages 1452 * (that's why we do a _safe walk). The "dup" 1453 * stable_node parameter itself will be freed from 1454 * under us if it returns NULL. 1455 */ 1456 _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); 1457 if (!_tree_page) 1458 continue; 1459 nr += 1; 1460 if (is_page_sharing_candidate(dup)) { 1461 if (!found || 1462 dup->rmap_hlist_len > found_rmap_hlist_len) { 1463 if (found) 1464 put_page(tree_page); 1465 found = dup; 1466 found_rmap_hlist_len = found->rmap_hlist_len; 1467 tree_page = _tree_page; 1468 1469 /* skip put_page for found dup */ 1470 if (!prune_stale_stable_nodes) 1471 break; 1472 continue; 1473 } 1474 } 1475 put_page(_tree_page); 1476 } 1477 1478 if (found) { 1479 /* 1480 * nr is counting all dups in the chain only if 1481 * prune_stale_stable_nodes is true, otherwise we may 1482 * break the loop at nr == 1 even if there are 1483 * multiple entries. 1484 */ 1485 if (prune_stale_stable_nodes && nr == 1) { 1486 /* 1487 * If there's not just one entry it would 1488 * corrupt memory, better BUG_ON. In KSM 1489 * context with no lock held it's not even 1490 * fatal. 1491 */ 1492 BUG_ON(stable_node->hlist.first->next); 1493 1494 /* 1495 * There's just one entry and it is below the 1496 * deduplication limit so drop the chain. 1497 */ 1498 rb_replace_node(&stable_node->node, &found->node, 1499 root); 1500 free_stable_node(stable_node); 1501 ksm_stable_node_chains--; 1502 ksm_stable_node_dups--; 1503 /* 1504 * NOTE: the caller depends on the stable_node 1505 * to be equal to stable_node_dup if the chain 1506 * was collapsed. 1507 */ 1508 *_stable_node = found; 1509 /* 1510 * Just for robustness, as stable_node is 1511 * otherwise left as a stable pointer, the 1512 * compiler shall optimize it away at build 1513 * time. 1514 */ 1515 stable_node = NULL; 1516 } else if (stable_node->hlist.first != &found->hlist_dup && 1517 __is_page_sharing_candidate(found, 1)) { 1518 /* 1519 * If the found stable_node dup can accept one 1520 * more future merge (in addition to the one 1521 * that is underway) and is not at the head of 1522 * the chain, put it there so next search will 1523 * be quicker in the !prune_stale_stable_nodes 1524 * case. 1525 * 1526 * NOTE: it would be inaccurate to use nr > 1 1527 * instead of checking the hlist.first pointer 1528 * directly, because in the 1529 * prune_stale_stable_nodes case "nr" isn't 1530 * the position of the found dup in the chain, 1531 * but the total number of dups in the chain. 1532 */ 1533 hlist_del(&found->hlist_dup); 1534 hlist_add_head(&found->hlist_dup, 1535 &stable_node->hlist); 1536 } 1537 } 1538 1539 *_stable_node_dup = found; 1540 return tree_page; 1541 } 1542 1543 static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, 1544 struct rb_root *root) 1545 { 1546 if (!is_stable_node_chain(stable_node)) 1547 return stable_node; 1548 if (hlist_empty(&stable_node->hlist)) { 1549 free_stable_node_chain(stable_node, root); 1550 return NULL; 1551 } 1552 return hlist_entry(stable_node->hlist.first, 1553 typeof(*stable_node), hlist_dup); 1554 } 1555 1556 /* 1557 * Like for get_ksm_page, this function can free the *_stable_node and 1558 * *_stable_node_dup if the returned tree_page is NULL. 1559 * 1560 * It can also free and overwrite *_stable_node with the found 1561 * stable_node_dup if the chain is collapsed (in which case 1562 * *_stable_node will be equal to *_stable_node_dup like if the chain 1563 * never existed). It's up to the caller to verify tree_page is not 1564 * NULL before dereferencing *_stable_node or *_stable_node_dup. 1565 * 1566 * *_stable_node_dup is really a second output parameter of this 1567 * function and will be overwritten in all cases, the caller doesn't 1568 * need to initialize it. 1569 */ 1570 static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, 1571 struct ksm_stable_node **_stable_node, 1572 struct rb_root *root, 1573 bool prune_stale_stable_nodes) 1574 { 1575 struct ksm_stable_node *stable_node = *_stable_node; 1576 if (!is_stable_node_chain(stable_node)) { 1577 if (is_page_sharing_candidate(stable_node)) { 1578 *_stable_node_dup = stable_node; 1579 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); 1580 } 1581 /* 1582 * _stable_node_dup set to NULL means the stable_node 1583 * reached the ksm_max_page_sharing limit. 1584 */ 1585 *_stable_node_dup = NULL; 1586 return NULL; 1587 } 1588 return stable_node_dup(_stable_node_dup, _stable_node, root, 1589 prune_stale_stable_nodes); 1590 } 1591 1592 static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, 1593 struct ksm_stable_node **s_n, 1594 struct rb_root *root) 1595 { 1596 return __stable_node_chain(s_n_d, s_n, root, true); 1597 } 1598 1599 static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, 1600 struct ksm_stable_node *s_n, 1601 struct rb_root *root) 1602 { 1603 struct ksm_stable_node *old_stable_node = s_n; 1604 struct page *tree_page; 1605 1606 tree_page = __stable_node_chain(s_n_d, &s_n, root, false); 1607 /* not pruning dups so s_n cannot have changed */ 1608 VM_BUG_ON(s_n != old_stable_node); 1609 return tree_page; 1610 } 1611 1612 /* 1613 * stable_tree_search - search for page inside the stable tree 1614 * 1615 * This function checks if there is a page inside the stable tree 1616 * with identical content to the page that we are scanning right now. 1617 * 1618 * This function returns the stable tree node of identical content if found, 1619 * NULL otherwise. 1620 */ 1621 static struct page *stable_tree_search(struct page *page) 1622 { 1623 int nid; 1624 struct rb_root *root; 1625 struct rb_node **new; 1626 struct rb_node *parent; 1627 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 1628 struct ksm_stable_node *page_node; 1629 1630 page_node = page_stable_node(page); 1631 if (page_node && page_node->head != &migrate_nodes) { 1632 /* ksm page forked */ 1633 get_page(page); 1634 return page; 1635 } 1636 1637 nid = get_kpfn_nid(page_to_pfn(page)); 1638 root = root_stable_tree + nid; 1639 again: 1640 new = &root->rb_node; 1641 parent = NULL; 1642 1643 while (*new) { 1644 struct page *tree_page; 1645 int ret; 1646 1647 cond_resched(); 1648 stable_node = rb_entry(*new, struct ksm_stable_node, node); 1649 stable_node_any = NULL; 1650 tree_page = chain_prune(&stable_node_dup, &stable_node, root); 1651 /* 1652 * NOTE: stable_node may have been freed by 1653 * chain_prune() if the returned stable_node_dup is 1654 * not NULL. stable_node_dup may have been inserted in 1655 * the rbtree instead as a regular stable_node (in 1656 * order to collapse the stable_node chain if a single 1657 * stable_node dup was found in it). In such case the 1658 * stable_node is overwritten by the callee to point 1659 * to the stable_node_dup that was collapsed in the 1660 * stable rbtree and stable_node will be equal to 1661 * stable_node_dup like if the chain never existed. 1662 */ 1663 if (!stable_node_dup) { 1664 /* 1665 * Either all stable_node dups were full in 1666 * this stable_node chain, or this chain was 1667 * empty and should be rb_erased. 1668 */ 1669 stable_node_any = stable_node_dup_any(stable_node, 1670 root); 1671 if (!stable_node_any) { 1672 /* rb_erase just run */ 1673 goto again; 1674 } 1675 /* 1676 * Take any of the stable_node dups page of 1677 * this stable_node chain to let the tree walk 1678 * continue. All KSM pages belonging to the 1679 * stable_node dups in a stable_node chain 1680 * have the same content and they're 1681 * write protected at all times. Any will work 1682 * fine to continue the walk. 1683 */ 1684 tree_page = get_ksm_page(stable_node_any, 1685 GET_KSM_PAGE_NOLOCK); 1686 } 1687 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1688 if (!tree_page) { 1689 /* 1690 * If we walked over a stale stable_node, 1691 * get_ksm_page() will call rb_erase() and it 1692 * may rebalance the tree from under us. So 1693 * restart the search from scratch. Returning 1694 * NULL would be safe too, but we'd generate 1695 * false negative insertions just because some 1696 * stable_node was stale. 1697 */ 1698 goto again; 1699 } 1700 1701 ret = memcmp_pages(page, tree_page); 1702 put_page(tree_page); 1703 1704 parent = *new; 1705 if (ret < 0) 1706 new = &parent->rb_left; 1707 else if (ret > 0) 1708 new = &parent->rb_right; 1709 else { 1710 if (page_node) { 1711 VM_BUG_ON(page_node->head != &migrate_nodes); 1712 /* 1713 * Test if the migrated page should be merged 1714 * into a stable node dup. If the mapcount is 1715 * 1 we can migrate it with another KSM page 1716 * without adding it to the chain. 1717 */ 1718 if (page_mapcount(page) > 1) 1719 goto chain_append; 1720 } 1721 1722 if (!stable_node_dup) { 1723 /* 1724 * If the stable_node is a chain and 1725 * we got a payload match in memcmp 1726 * but we cannot merge the scanned 1727 * page in any of the existing 1728 * stable_node dups because they're 1729 * all full, we need to wait the 1730 * scanned page to find itself a match 1731 * in the unstable tree to create a 1732 * brand new KSM page to add later to 1733 * the dups of this stable_node. 1734 */ 1735 return NULL; 1736 } 1737 1738 /* 1739 * Lock and unlock the stable_node's page (which 1740 * might already have been migrated) so that page 1741 * migration is sure to notice its raised count. 1742 * It would be more elegant to return stable_node 1743 * than kpage, but that involves more changes. 1744 */ 1745 tree_page = get_ksm_page(stable_node_dup, 1746 GET_KSM_PAGE_TRYLOCK); 1747 1748 if (PTR_ERR(tree_page) == -EBUSY) 1749 return ERR_PTR(-EBUSY); 1750 1751 if (unlikely(!tree_page)) 1752 /* 1753 * The tree may have been rebalanced, 1754 * so re-evaluate parent and new. 1755 */ 1756 goto again; 1757 unlock_page(tree_page); 1758 1759 if (get_kpfn_nid(stable_node_dup->kpfn) != 1760 NUMA(stable_node_dup->nid)) { 1761 put_page(tree_page); 1762 goto replace; 1763 } 1764 return tree_page; 1765 } 1766 } 1767 1768 if (!page_node) 1769 return NULL; 1770 1771 list_del(&page_node->list); 1772 DO_NUMA(page_node->nid = nid); 1773 rb_link_node(&page_node->node, parent, new); 1774 rb_insert_color(&page_node->node, root); 1775 out: 1776 if (is_page_sharing_candidate(page_node)) { 1777 get_page(page); 1778 return page; 1779 } else 1780 return NULL; 1781 1782 replace: 1783 /* 1784 * If stable_node was a chain and chain_prune collapsed it, 1785 * stable_node has been updated to be the new regular 1786 * stable_node. A collapse of the chain is indistinguishable 1787 * from the case there was no chain in the stable 1788 * rbtree. Otherwise stable_node is the chain and 1789 * stable_node_dup is the dup to replace. 1790 */ 1791 if (stable_node_dup == stable_node) { 1792 VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1793 VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 1794 /* there is no chain */ 1795 if (page_node) { 1796 VM_BUG_ON(page_node->head != &migrate_nodes); 1797 list_del(&page_node->list); 1798 DO_NUMA(page_node->nid = nid); 1799 rb_replace_node(&stable_node_dup->node, 1800 &page_node->node, 1801 root); 1802 if (is_page_sharing_candidate(page_node)) 1803 get_page(page); 1804 else 1805 page = NULL; 1806 } else { 1807 rb_erase(&stable_node_dup->node, root); 1808 page = NULL; 1809 } 1810 } else { 1811 VM_BUG_ON(!is_stable_node_chain(stable_node)); 1812 __stable_node_dup_del(stable_node_dup); 1813 if (page_node) { 1814 VM_BUG_ON(page_node->head != &migrate_nodes); 1815 list_del(&page_node->list); 1816 DO_NUMA(page_node->nid = nid); 1817 stable_node_chain_add_dup(page_node, stable_node); 1818 if (is_page_sharing_candidate(page_node)) 1819 get_page(page); 1820 else 1821 page = NULL; 1822 } else { 1823 page = NULL; 1824 } 1825 } 1826 stable_node_dup->head = &migrate_nodes; 1827 list_add(&stable_node_dup->list, stable_node_dup->head); 1828 return page; 1829 1830 chain_append: 1831 /* stable_node_dup could be null if it reached the limit */ 1832 if (!stable_node_dup) 1833 stable_node_dup = stable_node_any; 1834 /* 1835 * If stable_node was a chain and chain_prune collapsed it, 1836 * stable_node has been updated to be the new regular 1837 * stable_node. A collapse of the chain is indistinguishable 1838 * from the case there was no chain in the stable 1839 * rbtree. Otherwise stable_node is the chain and 1840 * stable_node_dup is the dup to replace. 1841 */ 1842 if (stable_node_dup == stable_node) { 1843 VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 1844 /* chain is missing so create it */ 1845 stable_node = alloc_stable_node_chain(stable_node_dup, 1846 root); 1847 if (!stable_node) 1848 return NULL; 1849 } 1850 /* 1851 * Add this stable_node dup that was 1852 * migrated to the stable_node chain 1853 * of the current nid for this page 1854 * content. 1855 */ 1856 VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 1857 VM_BUG_ON(page_node->head != &migrate_nodes); 1858 list_del(&page_node->list); 1859 DO_NUMA(page_node->nid = nid); 1860 stable_node_chain_add_dup(page_node, stable_node); 1861 goto out; 1862 } 1863 1864 /* 1865 * stable_tree_insert - insert stable tree node pointing to new ksm page 1866 * into the stable tree. 1867 * 1868 * This function returns the stable tree node just allocated on success, 1869 * NULL otherwise. 1870 */ 1871 static struct ksm_stable_node *stable_tree_insert(struct page *kpage) 1872 { 1873 int nid; 1874 unsigned long kpfn; 1875 struct rb_root *root; 1876 struct rb_node **new; 1877 struct rb_node *parent; 1878 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; 1879 bool need_chain = false; 1880 1881 kpfn = page_to_pfn(kpage); 1882 nid = get_kpfn_nid(kpfn); 1883 root = root_stable_tree + nid; 1884 again: 1885 parent = NULL; 1886 new = &root->rb_node; 1887 1888 while (*new) { 1889 struct page *tree_page; 1890 int ret; 1891 1892 cond_resched(); 1893 stable_node = rb_entry(*new, struct ksm_stable_node, node); 1894 stable_node_any = NULL; 1895 tree_page = chain(&stable_node_dup, stable_node, root); 1896 if (!stable_node_dup) { 1897 /* 1898 * Either all stable_node dups were full in 1899 * this stable_node chain, or this chain was 1900 * empty and should be rb_erased. 1901 */ 1902 stable_node_any = stable_node_dup_any(stable_node, 1903 root); 1904 if (!stable_node_any) { 1905 /* rb_erase just run */ 1906 goto again; 1907 } 1908 /* 1909 * Take any of the stable_node dups page of 1910 * this stable_node chain to let the tree walk 1911 * continue. All KSM pages belonging to the 1912 * stable_node dups in a stable_node chain 1913 * have the same content and they're 1914 * write protected at all times. Any will work 1915 * fine to continue the walk. 1916 */ 1917 tree_page = get_ksm_page(stable_node_any, 1918 GET_KSM_PAGE_NOLOCK); 1919 } 1920 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); 1921 if (!tree_page) { 1922 /* 1923 * If we walked over a stale stable_node, 1924 * get_ksm_page() will call rb_erase() and it 1925 * may rebalance the tree from under us. So 1926 * restart the search from scratch. Returning 1927 * NULL would be safe too, but we'd generate 1928 * false negative insertions just because some 1929 * stable_node was stale. 1930 */ 1931 goto again; 1932 } 1933 1934 ret = memcmp_pages(kpage, tree_page); 1935 put_page(tree_page); 1936 1937 parent = *new; 1938 if (ret < 0) 1939 new = &parent->rb_left; 1940 else if (ret > 0) 1941 new = &parent->rb_right; 1942 else { 1943 need_chain = true; 1944 break; 1945 } 1946 } 1947 1948 stable_node_dup = alloc_stable_node(); 1949 if (!stable_node_dup) 1950 return NULL; 1951 1952 INIT_HLIST_HEAD(&stable_node_dup->hlist); 1953 stable_node_dup->kpfn = kpfn; 1954 set_page_stable_node(kpage, stable_node_dup); 1955 stable_node_dup->rmap_hlist_len = 0; 1956 DO_NUMA(stable_node_dup->nid = nid); 1957 if (!need_chain) { 1958 rb_link_node(&stable_node_dup->node, parent, new); 1959 rb_insert_color(&stable_node_dup->node, root); 1960 } else { 1961 if (!is_stable_node_chain(stable_node)) { 1962 struct ksm_stable_node *orig = stable_node; 1963 /* chain is missing so create it */ 1964 stable_node = alloc_stable_node_chain(orig, root); 1965 if (!stable_node) { 1966 free_stable_node(stable_node_dup); 1967 return NULL; 1968 } 1969 } 1970 stable_node_chain_add_dup(stable_node_dup, stable_node); 1971 } 1972 1973 return stable_node_dup; 1974 } 1975 1976 /* 1977 * unstable_tree_search_insert - search for identical page, 1978 * else insert rmap_item into the unstable tree. 1979 * 1980 * This function searches for a page in the unstable tree identical to the 1981 * page currently being scanned; and if no identical page is found in the 1982 * tree, we insert rmap_item as a new object into the unstable tree. 1983 * 1984 * This function returns pointer to rmap_item found to be identical 1985 * to the currently scanned page, NULL otherwise. 1986 * 1987 * This function does both searching and inserting, because they share 1988 * the same walking algorithm in an rbtree. 1989 */ 1990 static 1991 struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, 1992 struct page *page, 1993 struct page **tree_pagep) 1994 { 1995 struct rb_node **new; 1996 struct rb_root *root; 1997 struct rb_node *parent = NULL; 1998 int nid; 1999 2000 nid = get_kpfn_nid(page_to_pfn(page)); 2001 root = root_unstable_tree + nid; 2002 new = &root->rb_node; 2003 2004 while (*new) { 2005 struct ksm_rmap_item *tree_rmap_item; 2006 struct page *tree_page; 2007 int ret; 2008 2009 cond_resched(); 2010 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); 2011 tree_page = get_mergeable_page(tree_rmap_item); 2012 if (!tree_page) 2013 return NULL; 2014 2015 /* 2016 * Don't substitute a ksm page for a forked page. 2017 */ 2018 if (page == tree_page) { 2019 put_page(tree_page); 2020 return NULL; 2021 } 2022 2023 ret = memcmp_pages(page, tree_page); 2024 2025 parent = *new; 2026 if (ret < 0) { 2027 put_page(tree_page); 2028 new = &parent->rb_left; 2029 } else if (ret > 0) { 2030 put_page(tree_page); 2031 new = &parent->rb_right; 2032 } else if (!ksm_merge_across_nodes && 2033 page_to_nid(tree_page) != nid) { 2034 /* 2035 * If tree_page has been migrated to another NUMA node, 2036 * it will be flushed out and put in the right unstable 2037 * tree next time: only merge with it when across_nodes. 2038 */ 2039 put_page(tree_page); 2040 return NULL; 2041 } else { 2042 *tree_pagep = tree_page; 2043 return tree_rmap_item; 2044 } 2045 } 2046 2047 rmap_item->address |= UNSTABLE_FLAG; 2048 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 2049 DO_NUMA(rmap_item->nid = nid); 2050 rb_link_node(&rmap_item->node, parent, new); 2051 rb_insert_color(&rmap_item->node, root); 2052 2053 ksm_pages_unshared++; 2054 return NULL; 2055 } 2056 2057 /* 2058 * stable_tree_append - add another rmap_item to the linked list of 2059 * rmap_items hanging off a given node of the stable tree, all sharing 2060 * the same ksm page. 2061 */ 2062 static void stable_tree_append(struct ksm_rmap_item *rmap_item, 2063 struct ksm_stable_node *stable_node, 2064 bool max_page_sharing_bypass) 2065 { 2066 /* 2067 * rmap won't find this mapping if we don't insert the 2068 * rmap_item in the right stable_node 2069 * duplicate. page_migration could break later if rmap breaks, 2070 * so we can as well crash here. We really need to check for 2071 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 2072 * for other negative values as an underflow if detected here 2073 * for the first time (and not when decreasing rmap_hlist_len) 2074 * would be sign of memory corruption in the stable_node. 2075 */ 2076 BUG_ON(stable_node->rmap_hlist_len < 0); 2077 2078 stable_node->rmap_hlist_len++; 2079 if (!max_page_sharing_bypass) 2080 /* possibly non fatal but unexpected overflow, only warn */ 2081 WARN_ON_ONCE(stable_node->rmap_hlist_len > 2082 ksm_max_page_sharing); 2083 2084 rmap_item->head = stable_node; 2085 rmap_item->address |= STABLE_FLAG; 2086 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 2087 2088 if (rmap_item->hlist.next) 2089 ksm_pages_sharing++; 2090 else 2091 ksm_pages_shared++; 2092 2093 rmap_item->mm->ksm_merging_pages++; 2094 } 2095 2096 /* 2097 * cmp_and_merge_page - first see if page can be merged into the stable tree; 2098 * if not, compare checksum to previous and if it's the same, see if page can 2099 * be inserted into the unstable tree, or merged with a page already there and 2100 * both transferred to the stable tree. 2101 * 2102 * @page: the page that we are searching identical page to. 2103 * @rmap_item: the reverse mapping into the virtual address of this page 2104 */ 2105 static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) 2106 { 2107 struct mm_struct *mm = rmap_item->mm; 2108 struct ksm_rmap_item *tree_rmap_item; 2109 struct page *tree_page = NULL; 2110 struct ksm_stable_node *stable_node; 2111 struct page *kpage; 2112 unsigned int checksum; 2113 int err; 2114 bool max_page_sharing_bypass = false; 2115 2116 stable_node = page_stable_node(page); 2117 if (stable_node) { 2118 if (stable_node->head != &migrate_nodes && 2119 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 2120 NUMA(stable_node->nid)) { 2121 stable_node_dup_del(stable_node); 2122 stable_node->head = &migrate_nodes; 2123 list_add(&stable_node->list, stable_node->head); 2124 } 2125 if (stable_node->head != &migrate_nodes && 2126 rmap_item->head == stable_node) 2127 return; 2128 /* 2129 * If it's a KSM fork, allow it to go over the sharing limit 2130 * without warnings. 2131 */ 2132 if (!is_page_sharing_candidate(stable_node)) 2133 max_page_sharing_bypass = true; 2134 } 2135 2136 /* We first start with searching the page inside the stable tree */ 2137 kpage = stable_tree_search(page); 2138 if (kpage == page && rmap_item->head == stable_node) { 2139 put_page(kpage); 2140 return; 2141 } 2142 2143 remove_rmap_item_from_tree(rmap_item); 2144 2145 if (kpage) { 2146 if (PTR_ERR(kpage) == -EBUSY) 2147 return; 2148 2149 err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 2150 if (!err) { 2151 /* 2152 * The page was successfully merged: 2153 * add its rmap_item to the stable tree. 2154 */ 2155 lock_page(kpage); 2156 stable_tree_append(rmap_item, page_stable_node(kpage), 2157 max_page_sharing_bypass); 2158 unlock_page(kpage); 2159 } 2160 put_page(kpage); 2161 return; 2162 } 2163 2164 /* 2165 * If the hash value of the page has changed from the last time 2166 * we calculated it, this page is changing frequently: therefore we 2167 * don't want to insert it in the unstable tree, and we don't want 2168 * to waste our time searching for something identical to it there. 2169 */ 2170 checksum = calc_checksum(page); 2171 if (rmap_item->oldchecksum != checksum) { 2172 rmap_item->oldchecksum = checksum; 2173 return; 2174 } 2175 2176 /* 2177 * Same checksum as an empty page. We attempt to merge it with the 2178 * appropriate zero page if the user enabled this via sysfs. 2179 */ 2180 if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2181 struct vm_area_struct *vma; 2182 2183 mmap_read_lock(mm); 2184 vma = find_mergeable_vma(mm, rmap_item->address); 2185 if (vma) { 2186 err = try_to_merge_one_page(vma, page, 2187 ZERO_PAGE(rmap_item->address)); 2188 trace_ksm_merge_one_page( 2189 page_to_pfn(ZERO_PAGE(rmap_item->address)), 2190 rmap_item, mm, err); 2191 } else { 2192 /* 2193 * If the vma is out of date, we do not need to 2194 * continue. 2195 */ 2196 err = 0; 2197 } 2198 mmap_read_unlock(mm); 2199 /* 2200 * In case of failure, the page was not really empty, so we 2201 * need to continue. Otherwise we're done. 2202 */ 2203 if (!err) 2204 return; 2205 } 2206 tree_rmap_item = 2207 unstable_tree_search_insert(rmap_item, page, &tree_page); 2208 if (tree_rmap_item) { 2209 bool split; 2210 2211 kpage = try_to_merge_two_pages(rmap_item, page, 2212 tree_rmap_item, tree_page); 2213 /* 2214 * If both pages we tried to merge belong to the same compound 2215 * page, then we actually ended up increasing the reference 2216 * count of the same compound page twice, and split_huge_page 2217 * failed. 2218 * Here we set a flag if that happened, and we use it later to 2219 * try split_huge_page again. Since we call put_page right 2220 * afterwards, the reference count will be correct and 2221 * split_huge_page should succeed. 2222 */ 2223 split = PageTransCompound(page) 2224 && compound_head(page) == compound_head(tree_page); 2225 put_page(tree_page); 2226 if (kpage) { 2227 /* 2228 * The pages were successfully merged: insert new 2229 * node in the stable tree and add both rmap_items. 2230 */ 2231 lock_page(kpage); 2232 stable_node = stable_tree_insert(kpage); 2233 if (stable_node) { 2234 stable_tree_append(tree_rmap_item, stable_node, 2235 false); 2236 stable_tree_append(rmap_item, stable_node, 2237 false); 2238 } 2239 unlock_page(kpage); 2240 2241 /* 2242 * If we fail to insert the page into the stable tree, 2243 * we will have 2 virtual addresses that are pointing 2244 * to a ksm page left outside the stable tree, 2245 * in which case we need to break_cow on both. 2246 */ 2247 if (!stable_node) { 2248 break_cow(tree_rmap_item); 2249 break_cow(rmap_item); 2250 } 2251 } else if (split) { 2252 /* 2253 * We are here if we tried to merge two pages and 2254 * failed because they both belonged to the same 2255 * compound page. We will split the page now, but no 2256 * merging will take place. 2257 * We do not want to add the cost of a full lock; if 2258 * the page is locked, it is better to skip it and 2259 * perhaps try again later. 2260 */ 2261 if (!trylock_page(page)) 2262 return; 2263 split_huge_page(page); 2264 unlock_page(page); 2265 } 2266 } 2267 } 2268 2269 static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, 2270 struct ksm_rmap_item **rmap_list, 2271 unsigned long addr) 2272 { 2273 struct ksm_rmap_item *rmap_item; 2274 2275 while (*rmap_list) { 2276 rmap_item = *rmap_list; 2277 if ((rmap_item->address & PAGE_MASK) == addr) 2278 return rmap_item; 2279 if (rmap_item->address > addr) 2280 break; 2281 *rmap_list = rmap_item->rmap_list; 2282 remove_rmap_item_from_tree(rmap_item); 2283 free_rmap_item(rmap_item); 2284 } 2285 2286 rmap_item = alloc_rmap_item(); 2287 if (rmap_item) { 2288 /* It has already been zeroed */ 2289 rmap_item->mm = mm_slot->slot.mm; 2290 rmap_item->mm->ksm_rmap_items++; 2291 rmap_item->address = addr; 2292 rmap_item->rmap_list = *rmap_list; 2293 *rmap_list = rmap_item; 2294 } 2295 return rmap_item; 2296 } 2297 2298 static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) 2299 { 2300 struct mm_struct *mm; 2301 struct ksm_mm_slot *mm_slot; 2302 struct mm_slot *slot; 2303 struct vm_area_struct *vma; 2304 struct ksm_rmap_item *rmap_item; 2305 struct vma_iterator vmi; 2306 int nid; 2307 2308 if (list_empty(&ksm_mm_head.slot.mm_node)) 2309 return NULL; 2310 2311 mm_slot = ksm_scan.mm_slot; 2312 if (mm_slot == &ksm_mm_head) { 2313 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); 2314 2315 /* 2316 * A number of pages can hang around indefinitely in per-cpu 2317 * LRU cache, raised page count preventing write_protect_page 2318 * from merging them. Though it doesn't really matter much, 2319 * it is puzzling to see some stuck in pages_volatile until 2320 * other activity jostles them out, and they also prevented 2321 * LTP's KSM test from succeeding deterministically; so drain 2322 * them here (here rather than on entry to ksm_do_scan(), 2323 * so we don't IPI too often when pages_to_scan is set low). 2324 */ 2325 lru_add_drain_all(); 2326 2327 /* 2328 * Whereas stale stable_nodes on the stable_tree itself 2329 * get pruned in the regular course of stable_tree_search(), 2330 * those moved out to the migrate_nodes list can accumulate: 2331 * so prune them once before each full scan. 2332 */ 2333 if (!ksm_merge_across_nodes) { 2334 struct ksm_stable_node *stable_node, *next; 2335 struct page *page; 2336 2337 list_for_each_entry_safe(stable_node, next, 2338 &migrate_nodes, list) { 2339 page = get_ksm_page(stable_node, 2340 GET_KSM_PAGE_NOLOCK); 2341 if (page) 2342 put_page(page); 2343 cond_resched(); 2344 } 2345 } 2346 2347 for (nid = 0; nid < ksm_nr_node_ids; nid++) 2348 root_unstable_tree[nid] = RB_ROOT; 2349 2350 spin_lock(&ksm_mmlist_lock); 2351 slot = list_entry(mm_slot->slot.mm_node.next, 2352 struct mm_slot, mm_node); 2353 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2354 ksm_scan.mm_slot = mm_slot; 2355 spin_unlock(&ksm_mmlist_lock); 2356 /* 2357 * Although we tested list_empty() above, a racing __ksm_exit 2358 * of the last mm on the list may have removed it since then. 2359 */ 2360 if (mm_slot == &ksm_mm_head) 2361 return NULL; 2362 next_mm: 2363 ksm_scan.address = 0; 2364 ksm_scan.rmap_list = &mm_slot->rmap_list; 2365 } 2366 2367 slot = &mm_slot->slot; 2368 mm = slot->mm; 2369 vma_iter_init(&vmi, mm, ksm_scan.address); 2370 2371 mmap_read_lock(mm); 2372 if (ksm_test_exit(mm)) 2373 goto no_vmas; 2374 2375 for_each_vma(vmi, vma) { 2376 if (!(vma->vm_flags & VM_MERGEABLE)) 2377 continue; 2378 if (ksm_scan.address < vma->vm_start) 2379 ksm_scan.address = vma->vm_start; 2380 if (!vma->anon_vma) 2381 ksm_scan.address = vma->vm_end; 2382 2383 while (ksm_scan.address < vma->vm_end) { 2384 if (ksm_test_exit(mm)) 2385 break; 2386 *page = follow_page(vma, ksm_scan.address, FOLL_GET); 2387 if (IS_ERR_OR_NULL(*page)) { 2388 ksm_scan.address += PAGE_SIZE; 2389 cond_resched(); 2390 continue; 2391 } 2392 if (is_zone_device_page(*page)) 2393 goto next_page; 2394 if (PageAnon(*page)) { 2395 flush_anon_page(vma, *page, ksm_scan.address); 2396 flush_dcache_page(*page); 2397 rmap_item = get_next_rmap_item(mm_slot, 2398 ksm_scan.rmap_list, ksm_scan.address); 2399 if (rmap_item) { 2400 ksm_scan.rmap_list = 2401 &rmap_item->rmap_list; 2402 ksm_scan.address += PAGE_SIZE; 2403 } else 2404 put_page(*page); 2405 mmap_read_unlock(mm); 2406 return rmap_item; 2407 } 2408 next_page: 2409 put_page(*page); 2410 ksm_scan.address += PAGE_SIZE; 2411 cond_resched(); 2412 } 2413 } 2414 2415 if (ksm_test_exit(mm)) { 2416 no_vmas: 2417 ksm_scan.address = 0; 2418 ksm_scan.rmap_list = &mm_slot->rmap_list; 2419 } 2420 /* 2421 * Nuke all the rmap_items that are above this current rmap: 2422 * because there were no VM_MERGEABLE vmas with such addresses. 2423 */ 2424 remove_trailing_rmap_items(ksm_scan.rmap_list); 2425 2426 spin_lock(&ksm_mmlist_lock); 2427 slot = list_entry(mm_slot->slot.mm_node.next, 2428 struct mm_slot, mm_node); 2429 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2430 if (ksm_scan.address == 0) { 2431 /* 2432 * We've completed a full scan of all vmas, holding mmap_lock 2433 * throughout, and found no VM_MERGEABLE: so do the same as 2434 * __ksm_exit does to remove this mm from all our lists now. 2435 * This applies either when cleaning up after __ksm_exit 2436 * (but beware: we can reach here even before __ksm_exit), 2437 * or when all VM_MERGEABLE areas have been unmapped (and 2438 * mmap_lock then protects against race with MADV_MERGEABLE). 2439 */ 2440 hash_del(&mm_slot->slot.hash); 2441 list_del(&mm_slot->slot.mm_node); 2442 spin_unlock(&ksm_mmlist_lock); 2443 2444 mm_slot_free(mm_slot_cache, mm_slot); 2445 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2446 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2447 mmap_read_unlock(mm); 2448 mmdrop(mm); 2449 } else { 2450 mmap_read_unlock(mm); 2451 /* 2452 * mmap_read_unlock(mm) first because after 2453 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 2454 * already have been freed under us by __ksm_exit() 2455 * because the "mm_slot" is still hashed and 2456 * ksm_scan.mm_slot doesn't point to it anymore. 2457 */ 2458 spin_unlock(&ksm_mmlist_lock); 2459 } 2460 2461 /* Repeat until we've completed scanning the whole list */ 2462 mm_slot = ksm_scan.mm_slot; 2463 if (mm_slot != &ksm_mm_head) 2464 goto next_mm; 2465 2466 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); 2467 ksm_scan.seqnr++; 2468 return NULL; 2469 } 2470 2471 /** 2472 * ksm_do_scan - the ksm scanner main worker function. 2473 * @scan_npages: number of pages we want to scan before we return. 2474 */ 2475 static void ksm_do_scan(unsigned int scan_npages) 2476 { 2477 struct ksm_rmap_item *rmap_item; 2478 struct page *page; 2479 2480 while (scan_npages-- && likely(!freezing(current))) { 2481 cond_resched(); 2482 rmap_item = scan_get_next_rmap_item(&page); 2483 if (!rmap_item) 2484 return; 2485 cmp_and_merge_page(page, rmap_item); 2486 put_page(page); 2487 } 2488 } 2489 2490 static int ksmd_should_run(void) 2491 { 2492 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); 2493 } 2494 2495 static int ksm_scan_thread(void *nothing) 2496 { 2497 unsigned int sleep_ms; 2498 2499 set_freezable(); 2500 set_user_nice(current, 5); 2501 2502 while (!kthread_should_stop()) { 2503 mutex_lock(&ksm_thread_mutex); 2504 wait_while_offlining(); 2505 if (ksmd_should_run()) 2506 ksm_do_scan(ksm_thread_pages_to_scan); 2507 mutex_unlock(&ksm_thread_mutex); 2508 2509 try_to_freeze(); 2510 2511 if (ksmd_should_run()) { 2512 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); 2513 wait_event_interruptible_timeout(ksm_iter_wait, 2514 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), 2515 msecs_to_jiffies(sleep_ms)); 2516 } else { 2517 wait_event_freezable(ksm_thread_wait, 2518 ksmd_should_run() || kthread_should_stop()); 2519 } 2520 } 2521 return 0; 2522 } 2523 2524 static void __ksm_add_vma(struct vm_area_struct *vma) 2525 { 2526 unsigned long vm_flags = vma->vm_flags; 2527 2528 if (vm_flags & VM_MERGEABLE) 2529 return; 2530 2531 if (vma_ksm_compatible(vma)) 2532 vm_flags_set(vma, VM_MERGEABLE); 2533 } 2534 2535 static int __ksm_del_vma(struct vm_area_struct *vma) 2536 { 2537 int err; 2538 2539 if (!(vma->vm_flags & VM_MERGEABLE)) 2540 return 0; 2541 2542 if (vma->anon_vma) { 2543 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); 2544 if (err) 2545 return err; 2546 } 2547 2548 vm_flags_clear(vma, VM_MERGEABLE); 2549 return 0; 2550 } 2551 /** 2552 * ksm_add_vma - Mark vma as mergeable if compatible 2553 * 2554 * @vma: Pointer to vma 2555 */ 2556 void ksm_add_vma(struct vm_area_struct *vma) 2557 { 2558 struct mm_struct *mm = vma->vm_mm; 2559 2560 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2561 __ksm_add_vma(vma); 2562 } 2563 2564 static void ksm_add_vmas(struct mm_struct *mm) 2565 { 2566 struct vm_area_struct *vma; 2567 2568 VMA_ITERATOR(vmi, mm, 0); 2569 for_each_vma(vmi, vma) 2570 __ksm_add_vma(vma); 2571 } 2572 2573 static int ksm_del_vmas(struct mm_struct *mm) 2574 { 2575 struct vm_area_struct *vma; 2576 int err; 2577 2578 VMA_ITERATOR(vmi, mm, 0); 2579 for_each_vma(vmi, vma) { 2580 err = __ksm_del_vma(vma); 2581 if (err) 2582 return err; 2583 } 2584 return 0; 2585 } 2586 2587 /** 2588 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all 2589 * compatible VMA's 2590 * 2591 * @mm: Pointer to mm 2592 * 2593 * Returns 0 on success, otherwise error code 2594 */ 2595 int ksm_enable_merge_any(struct mm_struct *mm) 2596 { 2597 int err; 2598 2599 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2600 return 0; 2601 2602 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2603 err = __ksm_enter(mm); 2604 if (err) 2605 return err; 2606 } 2607 2608 set_bit(MMF_VM_MERGE_ANY, &mm->flags); 2609 ksm_add_vmas(mm); 2610 2611 return 0; 2612 } 2613 2614 /** 2615 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, 2616 * previously enabled via ksm_enable_merge_any(). 2617 * 2618 * Disabling merging implies unmerging any merged pages, like setting 2619 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and 2620 * merging on all compatible VMA's remains enabled. 2621 * 2622 * @mm: Pointer to mm 2623 * 2624 * Returns 0 on success, otherwise error code 2625 */ 2626 int ksm_disable_merge_any(struct mm_struct *mm) 2627 { 2628 int err; 2629 2630 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2631 return 0; 2632 2633 err = ksm_del_vmas(mm); 2634 if (err) { 2635 ksm_add_vmas(mm); 2636 return err; 2637 } 2638 2639 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2640 return 0; 2641 } 2642 2643 int ksm_disable(struct mm_struct *mm) 2644 { 2645 mmap_assert_write_locked(mm); 2646 2647 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) 2648 return 0; 2649 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2650 return ksm_disable_merge_any(mm); 2651 return ksm_del_vmas(mm); 2652 } 2653 2654 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2655 unsigned long end, int advice, unsigned long *vm_flags) 2656 { 2657 struct mm_struct *mm = vma->vm_mm; 2658 int err; 2659 2660 switch (advice) { 2661 case MADV_MERGEABLE: 2662 if (vma->vm_flags & VM_MERGEABLE) 2663 return 0; 2664 if (!vma_ksm_compatible(vma)) 2665 return 0; 2666 2667 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2668 err = __ksm_enter(mm); 2669 if (err) 2670 return err; 2671 } 2672 2673 *vm_flags |= VM_MERGEABLE; 2674 break; 2675 2676 case MADV_UNMERGEABLE: 2677 if (!(*vm_flags & VM_MERGEABLE)) 2678 return 0; /* just ignore the advice */ 2679 2680 if (vma->anon_vma) { 2681 err = unmerge_ksm_pages(vma, start, end); 2682 if (err) 2683 return err; 2684 } 2685 2686 *vm_flags &= ~VM_MERGEABLE; 2687 break; 2688 } 2689 2690 return 0; 2691 } 2692 EXPORT_SYMBOL_GPL(ksm_madvise); 2693 2694 int __ksm_enter(struct mm_struct *mm) 2695 { 2696 struct ksm_mm_slot *mm_slot; 2697 struct mm_slot *slot; 2698 int needs_wakeup; 2699 2700 mm_slot = mm_slot_alloc(mm_slot_cache); 2701 if (!mm_slot) 2702 return -ENOMEM; 2703 2704 slot = &mm_slot->slot; 2705 2706 /* Check ksm_run too? Would need tighter locking */ 2707 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); 2708 2709 spin_lock(&ksm_mmlist_lock); 2710 mm_slot_insert(mm_slots_hash, mm, slot); 2711 /* 2712 * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2713 * insert just behind the scanning cursor, to let the area settle 2714 * down a little; when fork is followed by immediate exec, we don't 2715 * want ksmd to waste time setting up and tearing down an rmap_list. 2716 * 2717 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2718 * scanning cursor, otherwise KSM pages in newly forked mms will be 2719 * missed: then we might as well insert at the end of the list. 2720 */ 2721 if (ksm_run & KSM_RUN_UNMERGE) 2722 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); 2723 else 2724 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); 2725 spin_unlock(&ksm_mmlist_lock); 2726 2727 set_bit(MMF_VM_MERGEABLE, &mm->flags); 2728 mmgrab(mm); 2729 2730 if (needs_wakeup) 2731 wake_up_interruptible(&ksm_thread_wait); 2732 2733 trace_ksm_enter(mm); 2734 return 0; 2735 } 2736 2737 void __ksm_exit(struct mm_struct *mm) 2738 { 2739 struct ksm_mm_slot *mm_slot; 2740 struct mm_slot *slot; 2741 int easy_to_free = 0; 2742 2743 /* 2744 * This process is exiting: if it's straightforward (as is the 2745 * case when ksmd was never running), free mm_slot immediately. 2746 * But if it's at the cursor or has rmap_items linked to it, use 2747 * mmap_lock to synchronize with any break_cows before pagetables 2748 * are freed, and leave the mm_slot on the list for ksmd to free. 2749 * Beware: ksm may already have noticed it exiting and freed the slot. 2750 */ 2751 2752 spin_lock(&ksm_mmlist_lock); 2753 slot = mm_slot_lookup(mm_slots_hash, mm); 2754 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2755 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 2756 if (!mm_slot->rmap_list) { 2757 hash_del(&slot->hash); 2758 list_del(&slot->mm_node); 2759 easy_to_free = 1; 2760 } else { 2761 list_move(&slot->mm_node, 2762 &ksm_scan.mm_slot->slot.mm_node); 2763 } 2764 } 2765 spin_unlock(&ksm_mmlist_lock); 2766 2767 if (easy_to_free) { 2768 mm_slot_free(mm_slot_cache, mm_slot); 2769 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2770 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2771 mmdrop(mm); 2772 } else if (mm_slot) { 2773 mmap_write_lock(mm); 2774 mmap_write_unlock(mm); 2775 } 2776 2777 trace_ksm_exit(mm); 2778 } 2779 2780 struct page *ksm_might_need_to_copy(struct page *page, 2781 struct vm_area_struct *vma, unsigned long address) 2782 { 2783 struct folio *folio = page_folio(page); 2784 struct anon_vma *anon_vma = folio_anon_vma(folio); 2785 struct page *new_page; 2786 2787 if (PageKsm(page)) { 2788 if (page_stable_node(page) && 2789 !(ksm_run & KSM_RUN_UNMERGE)) 2790 return page; /* no need to copy it */ 2791 } else if (!anon_vma) { 2792 return page; /* no need to copy it */ 2793 } else if (page->index == linear_page_index(vma, address) && 2794 anon_vma->root == vma->anon_vma->root) { 2795 return page; /* still no need to copy it */ 2796 } 2797 if (!PageUptodate(page)) 2798 return page; /* let do_swap_page report the error */ 2799 2800 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 2801 if (new_page && 2802 mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { 2803 put_page(new_page); 2804 new_page = NULL; 2805 } 2806 if (new_page) { 2807 if (copy_mc_user_highpage(new_page, page, address, vma)) { 2808 put_page(new_page); 2809 memory_failure_queue(page_to_pfn(page), 0); 2810 return ERR_PTR(-EHWPOISON); 2811 } 2812 SetPageDirty(new_page); 2813 __SetPageUptodate(new_page); 2814 __SetPageLocked(new_page); 2815 #ifdef CONFIG_SWAP 2816 count_vm_event(KSM_SWPIN_COPY); 2817 #endif 2818 } 2819 2820 return new_page; 2821 } 2822 2823 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) 2824 { 2825 struct ksm_stable_node *stable_node; 2826 struct ksm_rmap_item *rmap_item; 2827 int search_new_forks = 0; 2828 2829 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); 2830 2831 /* 2832 * Rely on the page lock to protect against concurrent modifications 2833 * to that page's node of the stable tree. 2834 */ 2835 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2836 2837 stable_node = folio_stable_node(folio); 2838 if (!stable_node) 2839 return; 2840 again: 2841 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2842 struct anon_vma *anon_vma = rmap_item->anon_vma; 2843 struct anon_vma_chain *vmac; 2844 struct vm_area_struct *vma; 2845 2846 cond_resched(); 2847 if (!anon_vma_trylock_read(anon_vma)) { 2848 if (rwc->try_lock) { 2849 rwc->contended = true; 2850 return; 2851 } 2852 anon_vma_lock_read(anon_vma); 2853 } 2854 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2855 0, ULONG_MAX) { 2856 unsigned long addr; 2857 2858 cond_resched(); 2859 vma = vmac->vma; 2860 2861 /* Ignore the stable/unstable/sqnr flags */ 2862 addr = rmap_item->address & PAGE_MASK; 2863 2864 if (addr < vma->vm_start || addr >= vma->vm_end) 2865 continue; 2866 /* 2867 * Initially we examine only the vma which covers this 2868 * rmap_item; but later, if there is still work to do, 2869 * we examine covering vmas in other mms: in case they 2870 * were forked from the original since ksmd passed. 2871 */ 2872 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 2873 continue; 2874 2875 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2876 continue; 2877 2878 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { 2879 anon_vma_unlock_read(anon_vma); 2880 return; 2881 } 2882 if (rwc->done && rwc->done(folio)) { 2883 anon_vma_unlock_read(anon_vma); 2884 return; 2885 } 2886 } 2887 anon_vma_unlock_read(anon_vma); 2888 } 2889 if (!search_new_forks++) 2890 goto again; 2891 } 2892 2893 #ifdef CONFIG_MEMORY_FAILURE 2894 /* 2895 * Collect processes when the error hit an ksm page. 2896 */ 2897 void collect_procs_ksm(struct page *page, struct list_head *to_kill, 2898 int force_early) 2899 { 2900 struct ksm_stable_node *stable_node; 2901 struct ksm_rmap_item *rmap_item; 2902 struct folio *folio = page_folio(page); 2903 struct vm_area_struct *vma; 2904 struct task_struct *tsk; 2905 2906 stable_node = folio_stable_node(folio); 2907 if (!stable_node) 2908 return; 2909 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 2910 struct anon_vma *av = rmap_item->anon_vma; 2911 2912 anon_vma_lock_read(av); 2913 read_lock(&tasklist_lock); 2914 for_each_process(tsk) { 2915 struct anon_vma_chain *vmac; 2916 unsigned long addr; 2917 struct task_struct *t = 2918 task_early_kill(tsk, force_early); 2919 if (!t) 2920 continue; 2921 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, 2922 ULONG_MAX) 2923 { 2924 vma = vmac->vma; 2925 if (vma->vm_mm == t->mm) { 2926 addr = rmap_item->address & PAGE_MASK; 2927 add_to_kill_ksm(t, page, vma, to_kill, 2928 addr); 2929 } 2930 } 2931 } 2932 read_unlock(&tasklist_lock); 2933 anon_vma_unlock_read(av); 2934 } 2935 } 2936 #endif 2937 2938 #ifdef CONFIG_MIGRATION 2939 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) 2940 { 2941 struct ksm_stable_node *stable_node; 2942 2943 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2944 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); 2945 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); 2946 2947 stable_node = folio_stable_node(folio); 2948 if (stable_node) { 2949 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); 2950 stable_node->kpfn = folio_pfn(newfolio); 2951 /* 2952 * newfolio->mapping was set in advance; now we need smp_wmb() 2953 * to make sure that the new stable_node->kpfn is visible 2954 * to get_ksm_page() before it can see that folio->mapping 2955 * has gone stale (or that folio_test_swapcache has been cleared). 2956 */ 2957 smp_wmb(); 2958 set_page_stable_node(&folio->page, NULL); 2959 } 2960 } 2961 #endif /* CONFIG_MIGRATION */ 2962 2963 #ifdef CONFIG_MEMORY_HOTREMOVE 2964 static void wait_while_offlining(void) 2965 { 2966 while (ksm_run & KSM_RUN_OFFLINE) { 2967 mutex_unlock(&ksm_thread_mutex); 2968 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 2969 TASK_UNINTERRUPTIBLE); 2970 mutex_lock(&ksm_thread_mutex); 2971 } 2972 } 2973 2974 static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, 2975 unsigned long start_pfn, 2976 unsigned long end_pfn) 2977 { 2978 if (stable_node->kpfn >= start_pfn && 2979 stable_node->kpfn < end_pfn) { 2980 /* 2981 * Don't get_ksm_page, page has already gone: 2982 * which is why we keep kpfn instead of page* 2983 */ 2984 remove_node_from_stable_tree(stable_node); 2985 return true; 2986 } 2987 return false; 2988 } 2989 2990 static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, 2991 unsigned long start_pfn, 2992 unsigned long end_pfn, 2993 struct rb_root *root) 2994 { 2995 struct ksm_stable_node *dup; 2996 struct hlist_node *hlist_safe; 2997 2998 if (!is_stable_node_chain(stable_node)) { 2999 VM_BUG_ON(is_stable_node_dup(stable_node)); 3000 return stable_node_dup_remove_range(stable_node, start_pfn, 3001 end_pfn); 3002 } 3003 3004 hlist_for_each_entry_safe(dup, hlist_safe, 3005 &stable_node->hlist, hlist_dup) { 3006 VM_BUG_ON(!is_stable_node_dup(dup)); 3007 stable_node_dup_remove_range(dup, start_pfn, end_pfn); 3008 } 3009 if (hlist_empty(&stable_node->hlist)) { 3010 free_stable_node_chain(stable_node, root); 3011 return true; /* notify caller that tree was rebalanced */ 3012 } else 3013 return false; 3014 } 3015 3016 static void ksm_check_stable_tree(unsigned long start_pfn, 3017 unsigned long end_pfn) 3018 { 3019 struct ksm_stable_node *stable_node, *next; 3020 struct rb_node *node; 3021 int nid; 3022 3023 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 3024 node = rb_first(root_stable_tree + nid); 3025 while (node) { 3026 stable_node = rb_entry(node, struct ksm_stable_node, node); 3027 if (stable_node_chain_remove_range(stable_node, 3028 start_pfn, end_pfn, 3029 root_stable_tree + 3030 nid)) 3031 node = rb_first(root_stable_tree + nid); 3032 else 3033 node = rb_next(node); 3034 cond_resched(); 3035 } 3036 } 3037 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 3038 if (stable_node->kpfn >= start_pfn && 3039 stable_node->kpfn < end_pfn) 3040 remove_node_from_stable_tree(stable_node); 3041 cond_resched(); 3042 } 3043 } 3044 3045 static int ksm_memory_callback(struct notifier_block *self, 3046 unsigned long action, void *arg) 3047 { 3048 struct memory_notify *mn = arg; 3049 3050 switch (action) { 3051 case MEM_GOING_OFFLINE: 3052 /* 3053 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 3054 * and remove_all_stable_nodes() while memory is going offline: 3055 * it is unsafe for them to touch the stable tree at this time. 3056 * But unmerge_ksm_pages(), rmap lookups and other entry points 3057 * which do not need the ksm_thread_mutex are all safe. 3058 */ 3059 mutex_lock(&ksm_thread_mutex); 3060 ksm_run |= KSM_RUN_OFFLINE; 3061 mutex_unlock(&ksm_thread_mutex); 3062 break; 3063 3064 case MEM_OFFLINE: 3065 /* 3066 * Most of the work is done by page migration; but there might 3067 * be a few stable_nodes left over, still pointing to struct 3068 * pages which have been offlined: prune those from the tree, 3069 * otherwise get_ksm_page() might later try to access a 3070 * non-existent struct page. 3071 */ 3072 ksm_check_stable_tree(mn->start_pfn, 3073 mn->start_pfn + mn->nr_pages); 3074 fallthrough; 3075 case MEM_CANCEL_OFFLINE: 3076 mutex_lock(&ksm_thread_mutex); 3077 ksm_run &= ~KSM_RUN_OFFLINE; 3078 mutex_unlock(&ksm_thread_mutex); 3079 3080 smp_mb(); /* wake_up_bit advises this */ 3081 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 3082 break; 3083 } 3084 return NOTIFY_OK; 3085 } 3086 #else 3087 static void wait_while_offlining(void) 3088 { 3089 } 3090 #endif /* CONFIG_MEMORY_HOTREMOVE */ 3091 3092 #ifdef CONFIG_PROC_FS 3093 long ksm_process_profit(struct mm_struct *mm) 3094 { 3095 return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - 3096 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); 3097 } 3098 #endif /* CONFIG_PROC_FS */ 3099 3100 #ifdef CONFIG_SYSFS 3101 /* 3102 * This all compiles without CONFIG_SYSFS, but is a waste of space. 3103 */ 3104 3105 #define KSM_ATTR_RO(_name) \ 3106 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3107 #define KSM_ATTR(_name) \ 3108 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3109 3110 static ssize_t sleep_millisecs_show(struct kobject *kobj, 3111 struct kobj_attribute *attr, char *buf) 3112 { 3113 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); 3114 } 3115 3116 static ssize_t sleep_millisecs_store(struct kobject *kobj, 3117 struct kobj_attribute *attr, 3118 const char *buf, size_t count) 3119 { 3120 unsigned int msecs; 3121 int err; 3122 3123 err = kstrtouint(buf, 10, &msecs); 3124 if (err) 3125 return -EINVAL; 3126 3127 ksm_thread_sleep_millisecs = msecs; 3128 wake_up_interruptible(&ksm_iter_wait); 3129 3130 return count; 3131 } 3132 KSM_ATTR(sleep_millisecs); 3133 3134 static ssize_t pages_to_scan_show(struct kobject *kobj, 3135 struct kobj_attribute *attr, char *buf) 3136 { 3137 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); 3138 } 3139 3140 static ssize_t pages_to_scan_store(struct kobject *kobj, 3141 struct kobj_attribute *attr, 3142 const char *buf, size_t count) 3143 { 3144 unsigned int nr_pages; 3145 int err; 3146 3147 err = kstrtouint(buf, 10, &nr_pages); 3148 if (err) 3149 return -EINVAL; 3150 3151 ksm_thread_pages_to_scan = nr_pages; 3152 3153 return count; 3154 } 3155 KSM_ATTR(pages_to_scan); 3156 3157 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 3158 char *buf) 3159 { 3160 return sysfs_emit(buf, "%lu\n", ksm_run); 3161 } 3162 3163 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 3164 const char *buf, size_t count) 3165 { 3166 unsigned int flags; 3167 int err; 3168 3169 err = kstrtouint(buf, 10, &flags); 3170 if (err) 3171 return -EINVAL; 3172 if (flags > KSM_RUN_UNMERGE) 3173 return -EINVAL; 3174 3175 /* 3176 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 3177 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 3178 * breaking COW to free the pages_shared (but leaves mm_slots 3179 * on the list for when ksmd may be set running again). 3180 */ 3181 3182 mutex_lock(&ksm_thread_mutex); 3183 wait_while_offlining(); 3184 if (ksm_run != flags) { 3185 ksm_run = flags; 3186 if (flags & KSM_RUN_UNMERGE) { 3187 set_current_oom_origin(); 3188 err = unmerge_and_remove_all_rmap_items(); 3189 clear_current_oom_origin(); 3190 if (err) { 3191 ksm_run = KSM_RUN_STOP; 3192 count = err; 3193 } 3194 } 3195 } 3196 mutex_unlock(&ksm_thread_mutex); 3197 3198 if (flags & KSM_RUN_MERGE) 3199 wake_up_interruptible(&ksm_thread_wait); 3200 3201 return count; 3202 } 3203 KSM_ATTR(run); 3204 3205 #ifdef CONFIG_NUMA 3206 static ssize_t merge_across_nodes_show(struct kobject *kobj, 3207 struct kobj_attribute *attr, char *buf) 3208 { 3209 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); 3210 } 3211 3212 static ssize_t merge_across_nodes_store(struct kobject *kobj, 3213 struct kobj_attribute *attr, 3214 const char *buf, size_t count) 3215 { 3216 int err; 3217 unsigned long knob; 3218 3219 err = kstrtoul(buf, 10, &knob); 3220 if (err) 3221 return err; 3222 if (knob > 1) 3223 return -EINVAL; 3224 3225 mutex_lock(&ksm_thread_mutex); 3226 wait_while_offlining(); 3227 if (ksm_merge_across_nodes != knob) { 3228 if (ksm_pages_shared || remove_all_stable_nodes()) 3229 err = -EBUSY; 3230 else if (root_stable_tree == one_stable_tree) { 3231 struct rb_root *buf; 3232 /* 3233 * This is the first time that we switch away from the 3234 * default of merging across nodes: must now allocate 3235 * a buffer to hold as many roots as may be needed. 3236 * Allocate stable and unstable together: 3237 * MAXSMP NODES_SHIFT 10 will use 16kB. 3238 */ 3239 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 3240 GFP_KERNEL); 3241 /* Let us assume that RB_ROOT is NULL is zero */ 3242 if (!buf) 3243 err = -ENOMEM; 3244 else { 3245 root_stable_tree = buf; 3246 root_unstable_tree = buf + nr_node_ids; 3247 /* Stable tree is empty but not the unstable */ 3248 root_unstable_tree[0] = one_unstable_tree[0]; 3249 } 3250 } 3251 if (!err) { 3252 ksm_merge_across_nodes = knob; 3253 ksm_nr_node_ids = knob ? 1 : nr_node_ids; 3254 } 3255 } 3256 mutex_unlock(&ksm_thread_mutex); 3257 3258 return err ? err : count; 3259 } 3260 KSM_ATTR(merge_across_nodes); 3261 #endif 3262 3263 static ssize_t use_zero_pages_show(struct kobject *kobj, 3264 struct kobj_attribute *attr, char *buf) 3265 { 3266 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); 3267 } 3268 static ssize_t use_zero_pages_store(struct kobject *kobj, 3269 struct kobj_attribute *attr, 3270 const char *buf, size_t count) 3271 { 3272 int err; 3273 bool value; 3274 3275 err = kstrtobool(buf, &value); 3276 if (err) 3277 return -EINVAL; 3278 3279 ksm_use_zero_pages = value; 3280 3281 return count; 3282 } 3283 KSM_ATTR(use_zero_pages); 3284 3285 static ssize_t max_page_sharing_show(struct kobject *kobj, 3286 struct kobj_attribute *attr, char *buf) 3287 { 3288 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); 3289 } 3290 3291 static ssize_t max_page_sharing_store(struct kobject *kobj, 3292 struct kobj_attribute *attr, 3293 const char *buf, size_t count) 3294 { 3295 int err; 3296 int knob; 3297 3298 err = kstrtoint(buf, 10, &knob); 3299 if (err) 3300 return err; 3301 /* 3302 * When a KSM page is created it is shared by 2 mappings. This 3303 * being a signed comparison, it implicitly verifies it's not 3304 * negative. 3305 */ 3306 if (knob < 2) 3307 return -EINVAL; 3308 3309 if (READ_ONCE(ksm_max_page_sharing) == knob) 3310 return count; 3311 3312 mutex_lock(&ksm_thread_mutex); 3313 wait_while_offlining(); 3314 if (ksm_max_page_sharing != knob) { 3315 if (ksm_pages_shared || remove_all_stable_nodes()) 3316 err = -EBUSY; 3317 else 3318 ksm_max_page_sharing = knob; 3319 } 3320 mutex_unlock(&ksm_thread_mutex); 3321 3322 return err ? err : count; 3323 } 3324 KSM_ATTR(max_page_sharing); 3325 3326 static ssize_t pages_shared_show(struct kobject *kobj, 3327 struct kobj_attribute *attr, char *buf) 3328 { 3329 return sysfs_emit(buf, "%lu\n", ksm_pages_shared); 3330 } 3331 KSM_ATTR_RO(pages_shared); 3332 3333 static ssize_t pages_sharing_show(struct kobject *kobj, 3334 struct kobj_attribute *attr, char *buf) 3335 { 3336 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); 3337 } 3338 KSM_ATTR_RO(pages_sharing); 3339 3340 static ssize_t pages_unshared_show(struct kobject *kobj, 3341 struct kobj_attribute *attr, char *buf) 3342 { 3343 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); 3344 } 3345 KSM_ATTR_RO(pages_unshared); 3346 3347 static ssize_t pages_volatile_show(struct kobject *kobj, 3348 struct kobj_attribute *attr, char *buf) 3349 { 3350 long ksm_pages_volatile; 3351 3352 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3353 - ksm_pages_sharing - ksm_pages_unshared; 3354 /* 3355 * It was not worth any locking to calculate that statistic, 3356 * but it might therefore sometimes be negative: conceal that. 3357 */ 3358 if (ksm_pages_volatile < 0) 3359 ksm_pages_volatile = 0; 3360 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); 3361 } 3362 KSM_ATTR_RO(pages_volatile); 3363 3364 static ssize_t ksm_zero_pages_show(struct kobject *kobj, 3365 struct kobj_attribute *attr, char *buf) 3366 { 3367 return sysfs_emit(buf, "%ld\n", ksm_zero_pages); 3368 } 3369 KSM_ATTR_RO(ksm_zero_pages); 3370 3371 static ssize_t general_profit_show(struct kobject *kobj, 3372 struct kobj_attribute *attr, char *buf) 3373 { 3374 long general_profit; 3375 3376 general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - 3377 ksm_rmap_items * sizeof(struct ksm_rmap_item); 3378 3379 return sysfs_emit(buf, "%ld\n", general_profit); 3380 } 3381 KSM_ATTR_RO(general_profit); 3382 3383 static ssize_t stable_node_dups_show(struct kobject *kobj, 3384 struct kobj_attribute *attr, char *buf) 3385 { 3386 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); 3387 } 3388 KSM_ATTR_RO(stable_node_dups); 3389 3390 static ssize_t stable_node_chains_show(struct kobject *kobj, 3391 struct kobj_attribute *attr, char *buf) 3392 { 3393 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); 3394 } 3395 KSM_ATTR_RO(stable_node_chains); 3396 3397 static ssize_t 3398 stable_node_chains_prune_millisecs_show(struct kobject *kobj, 3399 struct kobj_attribute *attr, 3400 char *buf) 3401 { 3402 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 3403 } 3404 3405 static ssize_t 3406 stable_node_chains_prune_millisecs_store(struct kobject *kobj, 3407 struct kobj_attribute *attr, 3408 const char *buf, size_t count) 3409 { 3410 unsigned int msecs; 3411 int err; 3412 3413 err = kstrtouint(buf, 10, &msecs); 3414 if (err) 3415 return -EINVAL; 3416 3417 ksm_stable_node_chains_prune_millisecs = msecs; 3418 3419 return count; 3420 } 3421 KSM_ATTR(stable_node_chains_prune_millisecs); 3422 3423 static ssize_t full_scans_show(struct kobject *kobj, 3424 struct kobj_attribute *attr, char *buf) 3425 { 3426 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); 3427 } 3428 KSM_ATTR_RO(full_scans); 3429 3430 static struct attribute *ksm_attrs[] = { 3431 &sleep_millisecs_attr.attr, 3432 &pages_to_scan_attr.attr, 3433 &run_attr.attr, 3434 &pages_shared_attr.attr, 3435 &pages_sharing_attr.attr, 3436 &pages_unshared_attr.attr, 3437 &pages_volatile_attr.attr, 3438 &ksm_zero_pages_attr.attr, 3439 &full_scans_attr.attr, 3440 #ifdef CONFIG_NUMA 3441 &merge_across_nodes_attr.attr, 3442 #endif 3443 &max_page_sharing_attr.attr, 3444 &stable_node_chains_attr.attr, 3445 &stable_node_dups_attr.attr, 3446 &stable_node_chains_prune_millisecs_attr.attr, 3447 &use_zero_pages_attr.attr, 3448 &general_profit_attr.attr, 3449 NULL, 3450 }; 3451 3452 static const struct attribute_group ksm_attr_group = { 3453 .attrs = ksm_attrs, 3454 .name = "ksm", 3455 }; 3456 #endif /* CONFIG_SYSFS */ 3457 3458 static int __init ksm_init(void) 3459 { 3460 struct task_struct *ksm_thread; 3461 int err; 3462 3463 /* The correct value depends on page size and endianness */ 3464 zero_checksum = calc_checksum(ZERO_PAGE(0)); 3465 /* Default to false for backwards compatibility */ 3466 ksm_use_zero_pages = false; 3467 3468 err = ksm_slab_init(); 3469 if (err) 3470 goto out; 3471 3472 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 3473 if (IS_ERR(ksm_thread)) { 3474 pr_err("ksm: creating kthread failed\n"); 3475 err = PTR_ERR(ksm_thread); 3476 goto out_free; 3477 } 3478 3479 #ifdef CONFIG_SYSFS 3480 err = sysfs_create_group(mm_kobj, &ksm_attr_group); 3481 if (err) { 3482 pr_err("ksm: register sysfs failed\n"); 3483 kthread_stop(ksm_thread); 3484 goto out_free; 3485 } 3486 #else 3487 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3488 3489 #endif /* CONFIG_SYSFS */ 3490 3491 #ifdef CONFIG_MEMORY_HOTREMOVE 3492 /* There is no significance to this priority 100 */ 3493 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); 3494 #endif 3495 return 0; 3496 3497 out_free: 3498 ksm_slab_free(); 3499 out: 3500 return err; 3501 } 3502 subsys_initcall(ksm_init); 3503