1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Memory merging support. 4 * 5 * This code enables dynamic sharing of identical pages found in different 6 * memory areas, even if they are not shared by fork() 7 * 8 * Copyright (C) 2008-2009 Red Hat, Inc. 9 * Authors: 10 * Izik Eidus 11 * Andrea Arcangeli 12 * Chris Wright 13 * Hugh Dickins 14 */ 15 16 #include <linux/errno.h> 17 #include <linux/mm.h> 18 #include <linux/mm_inline.h> 19 #include <linux/fs.h> 20 #include <linux/mman.h> 21 #include <linux/sched.h> 22 #include <linux/sched/mm.h> 23 #include <linux/sched/cputime.h> 24 #include <linux/rwsem.h> 25 #include <linux/pagemap.h> 26 #include <linux/rmap.h> 27 #include <linux/spinlock.h> 28 #include <linux/xxhash.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/wait.h> 32 #include <linux/slab.h> 33 #include <linux/rbtree.h> 34 #include <linux/memory.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/swap.h> 37 #include <linux/ksm.h> 38 #include <linux/hashtable.h> 39 #include <linux/freezer.h> 40 #include <linux/oom.h> 41 #include <linux/numa.h> 42 #include <linux/pagewalk.h> 43 44 #include <asm/tlbflush.h> 45 #include "internal.h" 46 #include "mm_slot.h" 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/ksm.h> 50 51 #ifdef CONFIG_NUMA 52 #define NUMA(x) (x) 53 #define DO_NUMA(x) do { (x); } while (0) 54 #else 55 #define NUMA(x) (0) 56 #define DO_NUMA(x) do { } while (0) 57 #endif 58 59 typedef u8 rmap_age_t; 60 61 /** 62 * DOC: Overview 63 * 64 * A few notes about the KSM scanning process, 65 * to make it easier to understand the data structures below: 66 * 67 * In order to reduce excessive scanning, KSM sorts the memory pages by their 68 * contents into a data structure that holds pointers to the pages' locations. 69 * 70 * Since the contents of the pages may change at any moment, KSM cannot just 71 * insert the pages into a normal sorted tree and expect it to find anything. 72 * Therefore KSM uses two data structures - the stable and the unstable tree. 73 * 74 * The stable tree holds pointers to all the merged pages (ksm pages), sorted 75 * by their contents. Because each such page is write-protected, searching on 76 * this tree is fully assured to be working (except when pages are unmapped), 77 * and therefore this tree is called the stable tree. 78 * 79 * The stable tree node includes information required for reverse 80 * mapping from a KSM page to virtual addresses that map this page. 81 * 82 * In order to avoid large latencies of the rmap walks on KSM pages, 83 * KSM maintains two types of nodes in the stable tree: 84 * 85 * * the regular nodes that keep the reverse mapping structures in a 86 * linked list 87 * * the "chains" that link nodes ("dups") that represent the same 88 * write protected memory content, but each "dup" corresponds to a 89 * different KSM page copy of that content 90 * 91 * Internally, the regular nodes, "dups" and "chains" are represented 92 * using the same struct ksm_stable_node structure. 93 * 94 * In addition to the stable tree, KSM uses a second data structure called the 95 * unstable tree: this tree holds pointers to pages which have been found to 96 * be "unchanged for a period of time". The unstable tree sorts these pages 97 * by their contents, but since they are not write-protected, KSM cannot rely 98 * upon the unstable tree to work correctly - the unstable tree is liable to 99 * be corrupted as its contents are modified, and so it is called unstable. 100 * 101 * KSM solves this problem by several techniques: 102 * 103 * 1) The unstable tree is flushed every time KSM completes scanning all 104 * memory areas, and then the tree is rebuilt again from the beginning. 105 * 2) KSM will only insert into the unstable tree, pages whose hash value 106 * has not changed since the previous scan of all memory areas. 107 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 108 * colors of the nodes and not on their contents, assuring that even when 109 * the tree gets "corrupted" it won't get out of balance, so scanning time 110 * remains the same (also, searching and inserting nodes in an rbtree uses 111 * the same algorithm, so we have no overhead when we flush and rebuild). 112 * 4) KSM never flushes the stable tree, which means that even if it were to 113 * take 10 attempts to find a page in the unstable tree, once it is found, 114 * it is secured in the stable tree. (When we scan a new page, we first 115 * compare it against the stable tree, and then against the unstable tree.) 116 * 117 * If the merge_across_nodes tunable is unset, then KSM maintains multiple 118 * stable trees and multiple unstable trees: one of each for each NUMA node. 119 */ 120 121 /** 122 * struct ksm_mm_slot - ksm information per mm that is being scanned 123 * @slot: hash lookup from mm to mm_slot 124 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 125 */ 126 struct ksm_mm_slot { 127 struct mm_slot slot; 128 struct ksm_rmap_item *rmap_list; 129 }; 130 131 /** 132 * struct ksm_scan - cursor for scanning 133 * @mm_slot: the current mm_slot we are scanning 134 * @address: the next address inside that to be scanned 135 * @rmap_list: link to the next rmap to be scanned in the rmap_list 136 * @seqnr: count of completed full scans (needed when removing unstable node) 137 * 138 * There is only the one ksm_scan instance of this cursor structure. 139 */ 140 struct ksm_scan { 141 struct ksm_mm_slot *mm_slot; 142 unsigned long address; 143 struct ksm_rmap_item **rmap_list; 144 unsigned long seqnr; 145 }; 146 147 /** 148 * struct ksm_stable_node - node of the stable rbtree 149 * @node: rb node of this ksm page in the stable tree 150 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 151 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain 152 * @list: linked into migrate_nodes, pending placement in the proper node tree 153 * @hlist: hlist head of rmap_items using this ksm page 154 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 155 * @chain_prune_time: time of the last full garbage collection 156 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN 157 * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 158 */ 159 struct ksm_stable_node { 160 union { 161 struct rb_node node; /* when node of stable tree */ 162 struct { /* when listed for migration */ 163 struct list_head *head; 164 struct { 165 struct hlist_node hlist_dup; 166 struct list_head list; 167 }; 168 }; 169 }; 170 struct hlist_head hlist; 171 union { 172 unsigned long kpfn; 173 unsigned long chain_prune_time; 174 }; 175 /* 176 * STABLE_NODE_CHAIN can be any negative number in 177 * rmap_hlist_len negative range, but better not -1 to be able 178 * to reliably detect underflows. 179 */ 180 #define STABLE_NODE_CHAIN -1024 181 int rmap_hlist_len; 182 #ifdef CONFIG_NUMA 183 int nid; 184 #endif 185 }; 186 187 /** 188 * struct ksm_rmap_item - reverse mapping item for virtual addresses 189 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 190 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 191 * @nid: NUMA node id of unstable tree in which linked (may not match page) 192 * @mm: the memory structure this rmap_item is pointing into 193 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 194 * @oldchecksum: previous checksum of the page at that virtual address 195 * @node: rb node of this rmap_item in the unstable tree 196 * @head: pointer to stable_node heading this list in the stable tree 197 * @hlist: link into hlist of rmap_items hanging off that stable_node 198 * @age: number of scan iterations since creation 199 * @remaining_skips: how many scans to skip 200 */ 201 struct ksm_rmap_item { 202 struct ksm_rmap_item *rmap_list; 203 union { 204 struct anon_vma *anon_vma; /* when stable */ 205 #ifdef CONFIG_NUMA 206 int nid; /* when node of unstable tree */ 207 #endif 208 }; 209 struct mm_struct *mm; 210 unsigned long address; /* + low bits used for flags below */ 211 unsigned int oldchecksum; /* when unstable */ 212 rmap_age_t age; 213 rmap_age_t remaining_skips; 214 union { 215 struct rb_node node; /* when node of unstable tree */ 216 struct { /* when listed from stable tree */ 217 struct ksm_stable_node *head; 218 struct hlist_node hlist; 219 }; 220 }; 221 }; 222 223 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 224 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 225 #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 226 227 /* The stable and unstable tree heads */ 228 static struct rb_root one_stable_tree[1] = { RB_ROOT }; 229 static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 230 static struct rb_root *root_stable_tree = one_stable_tree; 231 static struct rb_root *root_unstable_tree = one_unstable_tree; 232 233 /* Recently migrated nodes of stable tree, pending proper placement */ 234 static LIST_HEAD(migrate_nodes); 235 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) 236 237 #define MM_SLOTS_HASH_BITS 10 238 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 239 240 static struct ksm_mm_slot ksm_mm_head = { 241 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), 242 }; 243 static struct ksm_scan ksm_scan = { 244 .mm_slot = &ksm_mm_head, 245 }; 246 247 static struct kmem_cache *rmap_item_cache; 248 static struct kmem_cache *stable_node_cache; 249 static struct kmem_cache *mm_slot_cache; 250 251 /* Default number of pages to scan per batch */ 252 #define DEFAULT_PAGES_TO_SCAN 100 253 254 /* The number of pages scanned */ 255 static unsigned long ksm_pages_scanned; 256 257 /* The number of nodes in the stable tree */ 258 static unsigned long ksm_pages_shared; 259 260 /* The number of page slots additionally sharing those nodes */ 261 static unsigned long ksm_pages_sharing; 262 263 /* The number of nodes in the unstable tree */ 264 static unsigned long ksm_pages_unshared; 265 266 /* The number of rmap_items in use: to calculate pages_volatile */ 267 static unsigned long ksm_rmap_items; 268 269 /* The number of stable_node chains */ 270 static unsigned long ksm_stable_node_chains; 271 272 /* The number of stable_node dups linked to the stable_node chains */ 273 static unsigned long ksm_stable_node_dups; 274 275 /* Delay in pruning stale stable_node_dups in the stable_node_chains */ 276 static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; 277 278 /* Maximum number of page slots sharing a stable node */ 279 static int ksm_max_page_sharing = 256; 280 281 /* Number of pages ksmd should scan in one batch */ 282 static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; 283 284 /* Milliseconds ksmd should sleep between batches */ 285 static unsigned int ksm_thread_sleep_millisecs = 20; 286 287 /* Checksum of an empty (zeroed) page */ 288 static unsigned int zero_checksum __read_mostly; 289 290 /* Whether to merge empty (zeroed) pages with actual zero pages */ 291 static bool ksm_use_zero_pages __read_mostly; 292 293 /* Skip pages that couldn't be de-duplicated previously */ 294 /* Default to true at least temporarily, for testing */ 295 static bool ksm_smart_scan = true; 296 297 /* The number of zero pages which is placed by KSM */ 298 atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); 299 300 /* The number of pages that have been skipped due to "smart scanning" */ 301 static unsigned long ksm_pages_skipped; 302 303 /* Don't scan more than max pages per batch. */ 304 static unsigned long ksm_advisor_max_pages_to_scan = 30000; 305 306 /* Min CPU for scanning pages per scan */ 307 #define KSM_ADVISOR_MIN_CPU 10 308 309 /* Max CPU for scanning pages per scan */ 310 static unsigned int ksm_advisor_max_cpu = 70; 311 312 /* Target scan time in seconds to analyze all KSM candidate pages. */ 313 static unsigned long ksm_advisor_target_scan_time = 200; 314 315 /* Exponentially weighted moving average. */ 316 #define EWMA_WEIGHT 30 317 318 /** 319 * struct advisor_ctx - metadata for KSM advisor 320 * @start_scan: start time of the current scan 321 * @scan_time: scan time of previous scan 322 * @change: change in percent to pages_to_scan parameter 323 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan 324 */ 325 struct advisor_ctx { 326 ktime_t start_scan; 327 unsigned long scan_time; 328 unsigned long change; 329 unsigned long long cpu_time; 330 }; 331 static struct advisor_ctx advisor_ctx; 332 333 /* Define different advisor's */ 334 enum ksm_advisor_type { 335 KSM_ADVISOR_NONE, 336 KSM_ADVISOR_SCAN_TIME, 337 }; 338 static enum ksm_advisor_type ksm_advisor; 339 340 #ifdef CONFIG_SYSFS 341 /* 342 * Only called through the sysfs control interface: 343 */ 344 345 /* At least scan this many pages per batch. */ 346 static unsigned long ksm_advisor_min_pages_to_scan = 500; 347 348 static void set_advisor_defaults(void) 349 { 350 if (ksm_advisor == KSM_ADVISOR_NONE) { 351 ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; 352 } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) { 353 advisor_ctx = (const struct advisor_ctx){ 0 }; 354 ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan; 355 } 356 } 357 #endif /* CONFIG_SYSFS */ 358 359 static inline void advisor_start_scan(void) 360 { 361 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) 362 advisor_ctx.start_scan = ktime_get(); 363 } 364 365 /* 366 * Use previous scan time if available, otherwise use current scan time as an 367 * approximation for the previous scan time. 368 */ 369 static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, 370 unsigned long scan_time) 371 { 372 return ctx->scan_time ? ctx->scan_time : scan_time; 373 } 374 375 /* Calculate exponential weighted moving average */ 376 static unsigned long ewma(unsigned long prev, unsigned long curr) 377 { 378 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; 379 } 380 381 /* 382 * The scan time advisor is based on the current scan rate and the target 383 * scan rate. 384 * 385 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time) 386 * 387 * To avoid perturbations it calculates a change factor of previous changes. 388 * A new change factor is calculated for each iteration and it uses an 389 * exponentially weighted moving average. The new pages_to_scan value is 390 * multiplied with that change factor: 391 * 392 * new_pages_to_scan *= change facor 393 * 394 * The new_pages_to_scan value is limited by the cpu min and max values. It 395 * calculates the cpu percent for the last scan and calculates the new 396 * estimated cpu percent cost for the next scan. That value is capped by the 397 * cpu min and max setting. 398 * 399 * In addition the new pages_to_scan value is capped by the max and min 400 * limits. 401 */ 402 static void scan_time_advisor(void) 403 { 404 unsigned int cpu_percent; 405 unsigned long cpu_time; 406 unsigned long cpu_time_diff; 407 unsigned long cpu_time_diff_ms; 408 unsigned long pages; 409 unsigned long per_page_cost; 410 unsigned long factor; 411 unsigned long change; 412 unsigned long last_scan_time; 413 unsigned long scan_time; 414 415 /* Convert scan time to seconds */ 416 scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan), 417 MSEC_PER_SEC); 418 scan_time = scan_time ? scan_time : 1; 419 420 /* Calculate CPU consumption of ksmd background thread */ 421 cpu_time = task_sched_runtime(current); 422 cpu_time_diff = cpu_time - advisor_ctx.cpu_time; 423 cpu_time_diff_ms = cpu_time_diff / 1000 / 1000; 424 425 cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000); 426 cpu_percent = cpu_percent ? cpu_percent : 1; 427 last_scan_time = prev_scan_time(&advisor_ctx, scan_time); 428 429 /* Calculate scan time as percentage of target scan time */ 430 factor = ksm_advisor_target_scan_time * 100 / scan_time; 431 factor = factor ? factor : 1; 432 433 /* 434 * Calculate scan time as percentage of last scan time and use 435 * exponentially weighted average to smooth it 436 */ 437 change = scan_time * 100 / last_scan_time; 438 change = change ? change : 1; 439 change = ewma(advisor_ctx.change, change); 440 441 /* Calculate new scan rate based on target scan rate. */ 442 pages = ksm_thread_pages_to_scan * 100 / factor; 443 /* Update pages_to_scan by weighted change percentage. */ 444 pages = pages * change / 100; 445 446 /* Cap new pages_to_scan value */ 447 per_page_cost = ksm_thread_pages_to_scan / cpu_percent; 448 per_page_cost = per_page_cost ? per_page_cost : 1; 449 450 pages = min(pages, per_page_cost * ksm_advisor_max_cpu); 451 pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU); 452 pages = min(pages, ksm_advisor_max_pages_to_scan); 453 454 /* Update advisor context */ 455 advisor_ctx.change = change; 456 advisor_ctx.scan_time = scan_time; 457 advisor_ctx.cpu_time = cpu_time; 458 459 ksm_thread_pages_to_scan = pages; 460 trace_ksm_advisor(scan_time, pages, cpu_percent); 461 } 462 463 static void advisor_stop_scan(void) 464 { 465 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) 466 scan_time_advisor(); 467 } 468 469 #ifdef CONFIG_NUMA 470 /* Zeroed when merging across nodes is not allowed */ 471 static unsigned int ksm_merge_across_nodes = 1; 472 static int ksm_nr_node_ids = 1; 473 #else 474 #define ksm_merge_across_nodes 1U 475 #define ksm_nr_node_ids 1 476 #endif 477 478 #define KSM_RUN_STOP 0 479 #define KSM_RUN_MERGE 1 480 #define KSM_RUN_UNMERGE 2 481 #define KSM_RUN_OFFLINE 4 482 static unsigned long ksm_run = KSM_RUN_STOP; 483 static void wait_while_offlining(void); 484 485 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 486 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); 487 static DEFINE_MUTEX(ksm_thread_mutex); 488 static DEFINE_SPINLOCK(ksm_mmlist_lock); 489 490 static int __init ksm_slab_init(void) 491 { 492 rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0); 493 if (!rmap_item_cache) 494 goto out; 495 496 stable_node_cache = KMEM_CACHE(ksm_stable_node, 0); 497 if (!stable_node_cache) 498 goto out_free1; 499 500 mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0); 501 if (!mm_slot_cache) 502 goto out_free2; 503 504 return 0; 505 506 out_free2: 507 kmem_cache_destroy(stable_node_cache); 508 out_free1: 509 kmem_cache_destroy(rmap_item_cache); 510 out: 511 return -ENOMEM; 512 } 513 514 static void __init ksm_slab_free(void) 515 { 516 kmem_cache_destroy(mm_slot_cache); 517 kmem_cache_destroy(stable_node_cache); 518 kmem_cache_destroy(rmap_item_cache); 519 mm_slot_cache = NULL; 520 } 521 522 static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) 523 { 524 return chain->rmap_hlist_len == STABLE_NODE_CHAIN; 525 } 526 527 static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) 528 { 529 return dup->head == STABLE_NODE_DUP_HEAD; 530 } 531 532 static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, 533 struct ksm_stable_node *chain) 534 { 535 VM_BUG_ON(is_stable_node_dup(dup)); 536 dup->head = STABLE_NODE_DUP_HEAD; 537 VM_BUG_ON(!is_stable_node_chain(chain)); 538 hlist_add_head(&dup->hlist_dup, &chain->hlist); 539 ksm_stable_node_dups++; 540 } 541 542 static inline void __stable_node_dup_del(struct ksm_stable_node *dup) 543 { 544 VM_BUG_ON(!is_stable_node_dup(dup)); 545 hlist_del(&dup->hlist_dup); 546 ksm_stable_node_dups--; 547 } 548 549 static inline void stable_node_dup_del(struct ksm_stable_node *dup) 550 { 551 VM_BUG_ON(is_stable_node_chain(dup)); 552 if (is_stable_node_dup(dup)) 553 __stable_node_dup_del(dup); 554 else 555 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); 556 #ifdef CONFIG_DEBUG_VM 557 dup->head = NULL; 558 #endif 559 } 560 561 static inline struct ksm_rmap_item *alloc_rmap_item(void) 562 { 563 struct ksm_rmap_item *rmap_item; 564 565 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 566 __GFP_NORETRY | __GFP_NOWARN); 567 if (rmap_item) 568 ksm_rmap_items++; 569 return rmap_item; 570 } 571 572 static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) 573 { 574 ksm_rmap_items--; 575 rmap_item->mm->ksm_rmap_items--; 576 rmap_item->mm = NULL; /* debug safety */ 577 kmem_cache_free(rmap_item_cache, rmap_item); 578 } 579 580 static inline struct ksm_stable_node *alloc_stable_node(void) 581 { 582 /* 583 * The allocation can take too long with GFP_KERNEL when memory is under 584 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH 585 * grants access to memory reserves, helping to avoid this problem. 586 */ 587 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); 588 } 589 590 static inline void free_stable_node(struct ksm_stable_node *stable_node) 591 { 592 VM_BUG_ON(stable_node->rmap_hlist_len && 593 !is_stable_node_chain(stable_node)); 594 kmem_cache_free(stable_node_cache, stable_node); 595 } 596 597 /* 598 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 599 * page tables after it has passed through ksm_exit() - which, if necessary, 600 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set 601 * a special flag: they can just back out as soon as mm_users goes to zero. 602 * ksm_test_exit() is used throughout to make this test for exit: in some 603 * places for correctness, in some places just to avoid unnecessary work. 604 */ 605 static inline bool ksm_test_exit(struct mm_struct *mm) 606 { 607 return atomic_read(&mm->mm_users) == 0; 608 } 609 610 /* 611 * We use break_ksm to break COW on a ksm page by triggering unsharing, 612 * such that the ksm page will get replaced by an exclusive anonymous page. 613 * 614 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, 615 * in case the application has unmapped and remapped mm,addr meanwhile. 616 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 617 * mmap of /dev/mem, where we would not want to touch it. 618 * 619 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context 620 * of the process that owns 'vma'. We also do not want to enforce 621 * protection keys here anyway. 622 */ 623 static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) 624 { 625 vm_fault_t ret = 0; 626 627 if (lock_vma) 628 vma_start_write(vma); 629 630 do { 631 bool ksm_page = false; 632 struct folio_walk fw; 633 struct folio *folio; 634 635 cond_resched(); 636 folio = folio_walk_start(&fw, vma, addr, 637 FW_MIGRATION | FW_ZEROPAGE); 638 if (folio) { 639 /* Small folio implies FW_LEVEL_PTE. */ 640 if (!folio_test_large(folio) && 641 (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte))) 642 ksm_page = true; 643 folio_walk_end(&fw, vma); 644 } 645 646 if (!ksm_page) 647 return 0; 648 ret = handle_mm_fault(vma, addr, 649 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 650 NULL); 651 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 652 /* 653 * We must loop until we no longer find a KSM page because 654 * handle_mm_fault() may back out if there's any difficulty e.g. if 655 * pte accessed bit gets updated concurrently. 656 * 657 * VM_FAULT_SIGBUS could occur if we race with truncation of the 658 * backing file, which also invalidates anonymous pages: that's 659 * okay, that truncation will have unmapped the KSM page for us. 660 * 661 * VM_FAULT_OOM: at the time of writing (late July 2009), setting 662 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 663 * current task has TIF_MEMDIE set, and will be OOM killed on return 664 * to user; and ksmd, having no mm, would never be chosen for that. 665 * 666 * But if the mm is in a limited mem_cgroup, then the fault may fail 667 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 668 * even ksmd can fail in this way - though it's usually breaking ksm 669 * just to undo a merge it made a moment before, so unlikely to oom. 670 * 671 * That's a pity: we might therefore have more kernel pages allocated 672 * than we're counting as nodes in the stable tree; but ksm_do_scan 673 * will retry to break_cow on each pass, so should recover the page 674 * in due course. The important thing is to not let VM_MERGEABLE 675 * be cleared while any such pages might remain in the area. 676 */ 677 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 678 } 679 680 static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags) 681 { 682 if (vm_flags & (VM_SHARED | VM_MAYSHARE | VM_SPECIAL | 683 VM_HUGETLB | VM_DROPPABLE)) 684 return false; /* just ignore the advice */ 685 686 if (file_is_dax(file)) 687 return false; 688 689 #ifdef VM_SAO 690 if (vm_flags & VM_SAO) 691 return false; 692 #endif 693 #ifdef VM_SPARC_ADI 694 if (vm_flags & VM_SPARC_ADI) 695 return false; 696 #endif 697 698 return true; 699 } 700 701 static bool vma_ksm_compatible(struct vm_area_struct *vma) 702 { 703 return ksm_compatible(vma->vm_file, vma->vm_flags); 704 } 705 706 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 707 unsigned long addr) 708 { 709 struct vm_area_struct *vma; 710 if (ksm_test_exit(mm)) 711 return NULL; 712 vma = vma_lookup(mm, addr); 713 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 714 return NULL; 715 return vma; 716 } 717 718 static void break_cow(struct ksm_rmap_item *rmap_item) 719 { 720 struct mm_struct *mm = rmap_item->mm; 721 unsigned long addr = rmap_item->address; 722 struct vm_area_struct *vma; 723 724 /* 725 * It is not an accident that whenever we want to break COW 726 * to undo, we also need to drop a reference to the anon_vma. 727 */ 728 put_anon_vma(rmap_item->anon_vma); 729 730 mmap_read_lock(mm); 731 vma = find_mergeable_vma(mm, addr); 732 if (vma) 733 break_ksm(vma, addr, false); 734 mmap_read_unlock(mm); 735 } 736 737 static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) 738 { 739 struct mm_struct *mm = rmap_item->mm; 740 unsigned long addr = rmap_item->address; 741 struct vm_area_struct *vma; 742 struct page *page = NULL; 743 struct folio_walk fw; 744 struct folio *folio; 745 746 mmap_read_lock(mm); 747 vma = find_mergeable_vma(mm, addr); 748 if (!vma) 749 goto out; 750 751 folio = folio_walk_start(&fw, vma, addr, 0); 752 if (folio) { 753 if (!folio_is_zone_device(folio) && 754 folio_test_anon(folio)) { 755 folio_get(folio); 756 page = fw.page; 757 } 758 folio_walk_end(&fw, vma); 759 } 760 out: 761 if (page) { 762 flush_anon_page(vma, page, addr); 763 flush_dcache_page(page); 764 } 765 mmap_read_unlock(mm); 766 return page; 767 } 768 769 /* 770 * This helper is used for getting right index into array of tree roots. 771 * When merge_across_nodes knob is set to 1, there are only two rb-trees for 772 * stable and unstable pages from all nodes with roots in index 0. Otherwise, 773 * every node has its own stable and unstable tree. 774 */ 775 static inline int get_kpfn_nid(unsigned long kpfn) 776 { 777 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 778 } 779 780 static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, 781 struct rb_root *root) 782 { 783 struct ksm_stable_node *chain = alloc_stable_node(); 784 VM_BUG_ON(is_stable_node_chain(dup)); 785 if (likely(chain)) { 786 INIT_HLIST_HEAD(&chain->hlist); 787 chain->chain_prune_time = jiffies; 788 chain->rmap_hlist_len = STABLE_NODE_CHAIN; 789 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 790 chain->nid = NUMA_NO_NODE; /* debug */ 791 #endif 792 ksm_stable_node_chains++; 793 794 /* 795 * Put the stable node chain in the first dimension of 796 * the stable tree and at the same time remove the old 797 * stable node. 798 */ 799 rb_replace_node(&dup->node, &chain->node, root); 800 801 /* 802 * Move the old stable node to the second dimension 803 * queued in the hlist_dup. The invariant is that all 804 * dup stable_nodes in the chain->hlist point to pages 805 * that are write protected and have the exact same 806 * content. 807 */ 808 stable_node_chain_add_dup(dup, chain); 809 } 810 return chain; 811 } 812 813 static inline void free_stable_node_chain(struct ksm_stable_node *chain, 814 struct rb_root *root) 815 { 816 rb_erase(&chain->node, root); 817 free_stable_node(chain); 818 ksm_stable_node_chains--; 819 } 820 821 static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) 822 { 823 struct ksm_rmap_item *rmap_item; 824 825 /* check it's not STABLE_NODE_CHAIN or negative */ 826 BUG_ON(stable_node->rmap_hlist_len < 0); 827 828 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 829 if (rmap_item->hlist.next) { 830 ksm_pages_sharing--; 831 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); 832 } else { 833 ksm_pages_shared--; 834 } 835 836 rmap_item->mm->ksm_merging_pages--; 837 838 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 839 stable_node->rmap_hlist_len--; 840 put_anon_vma(rmap_item->anon_vma); 841 rmap_item->address &= PAGE_MASK; 842 cond_resched(); 843 } 844 845 /* 846 * We need the second aligned pointer of the migrate_nodes 847 * list_head to stay clear from the rb_parent_color union 848 * (aligned and different than any node) and also different 849 * from &migrate_nodes. This will verify that future list.h changes 850 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. 851 */ 852 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); 853 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); 854 855 trace_ksm_remove_ksm_page(stable_node->kpfn); 856 if (stable_node->head == &migrate_nodes) 857 list_del(&stable_node->list); 858 else 859 stable_node_dup_del(stable_node); 860 free_stable_node(stable_node); 861 } 862 863 enum ksm_get_folio_flags { 864 KSM_GET_FOLIO_NOLOCK, 865 KSM_GET_FOLIO_LOCK, 866 KSM_GET_FOLIO_TRYLOCK 867 }; 868 869 /* 870 * ksm_get_folio: checks if the page indicated by the stable node 871 * is still its ksm page, despite having held no reference to it. 872 * In which case we can trust the content of the page, and it 873 * returns the gotten page; but if the page has now been zapped, 874 * remove the stale node from the stable tree and return NULL. 875 * But beware, the stable node's page might be being migrated. 876 * 877 * You would expect the stable_node to hold a reference to the ksm page. 878 * But if it increments the page's count, swapping out has to wait for 879 * ksmd to come around again before it can free the page, which may take 880 * seconds or even minutes: much too unresponsive. So instead we use a 881 * "keyhole reference": access to the ksm page from the stable node peeps 882 * out through its keyhole to see if that page still holds the right key, 883 * pointing back to this stable node. This relies on freeing a PageAnon 884 * page to reset its page->mapping to NULL, and relies on no other use of 885 * a page to put something that might look like our key in page->mapping. 886 * is on its way to being freed; but it is an anomaly to bear in mind. 887 */ 888 static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, 889 enum ksm_get_folio_flags flags) 890 { 891 struct folio *folio; 892 void *expected_mapping; 893 unsigned long kpfn; 894 895 expected_mapping = (void *)((unsigned long)stable_node | 896 FOLIO_MAPPING_KSM); 897 again: 898 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ 899 folio = pfn_folio(kpfn); 900 if (READ_ONCE(folio->mapping) != expected_mapping) 901 goto stale; 902 903 /* 904 * We cannot do anything with the page while its refcount is 0. 905 * Usually 0 means free, or tail of a higher-order page: in which 906 * case this node is no longer referenced, and should be freed; 907 * however, it might mean that the page is under page_ref_freeze(). 908 * The __remove_mapping() case is easy, again the node is now stale; 909 * the same is in reuse_ksm_page() case; but if page is swapcache 910 * in folio_migrate_mapping(), it might still be our page, 911 * in which case it's essential to keep the node. 912 */ 913 while (!folio_try_get(folio)) { 914 /* 915 * Another check for folio->mapping != expected_mapping 916 * would work here too. We have chosen to test the 917 * swapcache flag to optimize the common case, when the 918 * folio is or is about to be freed: the swapcache flag 919 * is cleared (under spin_lock_irq) in the ref_freeze 920 * section of __remove_mapping(); but anon folio->mapping 921 * is reset to NULL later, in free_pages_prepare(). 922 */ 923 if (!folio_test_swapcache(folio)) 924 goto stale; 925 cpu_relax(); 926 } 927 928 if (READ_ONCE(folio->mapping) != expected_mapping) { 929 folio_put(folio); 930 goto stale; 931 } 932 933 if (flags == KSM_GET_FOLIO_TRYLOCK) { 934 if (!folio_trylock(folio)) { 935 folio_put(folio); 936 return ERR_PTR(-EBUSY); 937 } 938 } else if (flags == KSM_GET_FOLIO_LOCK) 939 folio_lock(folio); 940 941 if (flags != KSM_GET_FOLIO_NOLOCK) { 942 if (READ_ONCE(folio->mapping) != expected_mapping) { 943 folio_unlock(folio); 944 folio_put(folio); 945 goto stale; 946 } 947 } 948 return folio; 949 950 stale: 951 /* 952 * We come here from above when folio->mapping or the swapcache flag 953 * suggests that the node is stale; but it might be under migration. 954 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), 955 * before checking whether node->kpfn has been changed. 956 */ 957 smp_rmb(); 958 if (READ_ONCE(stable_node->kpfn) != kpfn) 959 goto again; 960 remove_node_from_stable_tree(stable_node); 961 return NULL; 962 } 963 964 /* 965 * Removing rmap_item from stable or unstable tree. 966 * This function will clean the information from the stable/unstable tree. 967 */ 968 static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) 969 { 970 if (rmap_item->address & STABLE_FLAG) { 971 struct ksm_stable_node *stable_node; 972 struct folio *folio; 973 974 stable_node = rmap_item->head; 975 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); 976 if (!folio) 977 goto out; 978 979 hlist_del(&rmap_item->hlist); 980 folio_unlock(folio); 981 folio_put(folio); 982 983 if (!hlist_empty(&stable_node->hlist)) 984 ksm_pages_sharing--; 985 else 986 ksm_pages_shared--; 987 988 rmap_item->mm->ksm_merging_pages--; 989 990 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); 991 stable_node->rmap_hlist_len--; 992 993 put_anon_vma(rmap_item->anon_vma); 994 rmap_item->head = NULL; 995 rmap_item->address &= PAGE_MASK; 996 997 } else if (rmap_item->address & UNSTABLE_FLAG) { 998 unsigned char age; 999 /* 1000 * Usually ksmd can and must skip the rb_erase, because 1001 * root_unstable_tree was already reset to RB_ROOT. 1002 * But be careful when an mm is exiting: do the rb_erase 1003 * if this rmap_item was inserted by this scan, rather 1004 * than left over from before. 1005 */ 1006 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 1007 BUG_ON(age > 1); 1008 if (!age) 1009 rb_erase(&rmap_item->node, 1010 root_unstable_tree + NUMA(rmap_item->nid)); 1011 ksm_pages_unshared--; 1012 rmap_item->address &= PAGE_MASK; 1013 } 1014 out: 1015 cond_resched(); /* we're called from many long loops */ 1016 } 1017 1018 static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) 1019 { 1020 while (*rmap_list) { 1021 struct ksm_rmap_item *rmap_item = *rmap_list; 1022 *rmap_list = rmap_item->rmap_list; 1023 remove_rmap_item_from_tree(rmap_item); 1024 free_rmap_item(rmap_item); 1025 } 1026 } 1027 1028 /* 1029 * Though it's very tempting to unmerge rmap_items from stable tree rather 1030 * than check every pte of a given vma, the locking doesn't quite work for 1031 * that - an rmap_item is assigned to the stable tree after inserting ksm 1032 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing 1033 * rmap_items from parent to child at fork time (so as not to waste time 1034 * if exit comes before the next scan reaches it). 1035 * 1036 * Similarly, although we'd like to remove rmap_items (so updating counts 1037 * and freeing memory) when unmerging an area, it's easier to leave that 1038 * to the next pass of ksmd - consider, for example, how ksmd might be 1039 * in cmp_and_merge_page on one of the rmap_items we would be removing. 1040 */ 1041 static int unmerge_ksm_pages(struct vm_area_struct *vma, 1042 unsigned long start, unsigned long end, bool lock_vma) 1043 { 1044 unsigned long addr; 1045 int err = 0; 1046 1047 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 1048 if (ksm_test_exit(vma->vm_mm)) 1049 break; 1050 if (signal_pending(current)) 1051 err = -ERESTARTSYS; 1052 else 1053 err = break_ksm(vma, addr, lock_vma); 1054 } 1055 return err; 1056 } 1057 1058 static inline 1059 struct ksm_stable_node *folio_stable_node(const struct folio *folio) 1060 { 1061 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; 1062 } 1063 1064 static inline void folio_set_stable_node(struct folio *folio, 1065 struct ksm_stable_node *stable_node) 1066 { 1067 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); 1068 folio->mapping = (void *)((unsigned long)stable_node | FOLIO_MAPPING_KSM); 1069 } 1070 1071 #ifdef CONFIG_SYSFS 1072 /* 1073 * Only called through the sysfs control interface: 1074 */ 1075 static int remove_stable_node(struct ksm_stable_node *stable_node) 1076 { 1077 struct folio *folio; 1078 int err; 1079 1080 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); 1081 if (!folio) { 1082 /* 1083 * ksm_get_folio did remove_node_from_stable_tree itself. 1084 */ 1085 return 0; 1086 } 1087 1088 /* 1089 * Page could be still mapped if this races with __mmput() running in 1090 * between ksm_exit() and exit_mmap(). Just refuse to let 1091 * merge_across_nodes/max_page_sharing be switched. 1092 */ 1093 err = -EBUSY; 1094 if (!folio_mapped(folio)) { 1095 /* 1096 * The stable node did not yet appear stale to ksm_get_folio(), 1097 * since that allows for an unmapped ksm folio to be recognized 1098 * right up until it is freed; but the node is safe to remove. 1099 * This folio might be in an LRU cache waiting to be freed, 1100 * or it might be in the swapcache (perhaps under writeback), 1101 * or it might have been removed from swapcache a moment ago. 1102 */ 1103 folio_set_stable_node(folio, NULL); 1104 remove_node_from_stable_tree(stable_node); 1105 err = 0; 1106 } 1107 1108 folio_unlock(folio); 1109 folio_put(folio); 1110 return err; 1111 } 1112 1113 static int remove_stable_node_chain(struct ksm_stable_node *stable_node, 1114 struct rb_root *root) 1115 { 1116 struct ksm_stable_node *dup; 1117 struct hlist_node *hlist_safe; 1118 1119 if (!is_stable_node_chain(stable_node)) { 1120 VM_BUG_ON(is_stable_node_dup(stable_node)); 1121 if (remove_stable_node(stable_node)) 1122 return true; 1123 else 1124 return false; 1125 } 1126 1127 hlist_for_each_entry_safe(dup, hlist_safe, 1128 &stable_node->hlist, hlist_dup) { 1129 VM_BUG_ON(!is_stable_node_dup(dup)); 1130 if (remove_stable_node(dup)) 1131 return true; 1132 } 1133 BUG_ON(!hlist_empty(&stable_node->hlist)); 1134 free_stable_node_chain(stable_node, root); 1135 return false; 1136 } 1137 1138 static int remove_all_stable_nodes(void) 1139 { 1140 struct ksm_stable_node *stable_node, *next; 1141 int nid; 1142 int err = 0; 1143 1144 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 1145 while (root_stable_tree[nid].rb_node) { 1146 stable_node = rb_entry(root_stable_tree[nid].rb_node, 1147 struct ksm_stable_node, node); 1148 if (remove_stable_node_chain(stable_node, 1149 root_stable_tree + nid)) { 1150 err = -EBUSY; 1151 break; /* proceed to next nid */ 1152 } 1153 cond_resched(); 1154 } 1155 } 1156 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 1157 if (remove_stable_node(stable_node)) 1158 err = -EBUSY; 1159 cond_resched(); 1160 } 1161 return err; 1162 } 1163 1164 static int unmerge_and_remove_all_rmap_items(void) 1165 { 1166 struct ksm_mm_slot *mm_slot; 1167 struct mm_slot *slot; 1168 struct mm_struct *mm; 1169 struct vm_area_struct *vma; 1170 int err = 0; 1171 1172 spin_lock(&ksm_mmlist_lock); 1173 slot = list_entry(ksm_mm_head.slot.mm_node.next, 1174 struct mm_slot, mm_node); 1175 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 1176 spin_unlock(&ksm_mmlist_lock); 1177 1178 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; 1179 mm_slot = ksm_scan.mm_slot) { 1180 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); 1181 1182 mm = mm_slot->slot.mm; 1183 mmap_read_lock(mm); 1184 1185 /* 1186 * Exit right away if mm is exiting to avoid lockdep issue in 1187 * the maple tree 1188 */ 1189 if (ksm_test_exit(mm)) 1190 goto mm_exiting; 1191 1192 for_each_vma(vmi, vma) { 1193 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 1194 continue; 1195 err = unmerge_ksm_pages(vma, 1196 vma->vm_start, vma->vm_end, false); 1197 if (err) 1198 goto error; 1199 } 1200 1201 mm_exiting: 1202 remove_trailing_rmap_items(&mm_slot->rmap_list); 1203 mmap_read_unlock(mm); 1204 1205 spin_lock(&ksm_mmlist_lock); 1206 slot = list_entry(mm_slot->slot.mm_node.next, 1207 struct mm_slot, mm_node); 1208 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 1209 if (ksm_test_exit(mm)) { 1210 hash_del(&mm_slot->slot.hash); 1211 list_del(&mm_slot->slot.mm_node); 1212 spin_unlock(&ksm_mmlist_lock); 1213 1214 mm_slot_free(mm_slot_cache, mm_slot); 1215 mm_flags_clear(MMF_VM_MERGEABLE, mm); 1216 mm_flags_clear(MMF_VM_MERGE_ANY, mm); 1217 mmdrop(mm); 1218 } else 1219 spin_unlock(&ksm_mmlist_lock); 1220 } 1221 1222 /* Clean up stable nodes, but don't worry if some are still busy */ 1223 remove_all_stable_nodes(); 1224 ksm_scan.seqnr = 0; 1225 return 0; 1226 1227 error: 1228 mmap_read_unlock(mm); 1229 spin_lock(&ksm_mmlist_lock); 1230 ksm_scan.mm_slot = &ksm_mm_head; 1231 spin_unlock(&ksm_mmlist_lock); 1232 return err; 1233 } 1234 #endif /* CONFIG_SYSFS */ 1235 1236 static u32 calc_checksum(struct page *page) 1237 { 1238 u32 checksum; 1239 void *addr = kmap_local_page(page); 1240 checksum = xxhash(addr, PAGE_SIZE, 0); 1241 kunmap_local(addr); 1242 return checksum; 1243 } 1244 1245 static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, 1246 pte_t *orig_pte) 1247 { 1248 struct mm_struct *mm = vma->vm_mm; 1249 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); 1250 int swapped; 1251 int err = -EFAULT; 1252 struct mmu_notifier_range range; 1253 bool anon_exclusive; 1254 pte_t entry; 1255 1256 if (WARN_ON_ONCE(folio_test_large(folio))) 1257 return err; 1258 1259 pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma); 1260 if (pvmw.address == -EFAULT) 1261 goto out; 1262 1263 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, 1264 pvmw.address + PAGE_SIZE); 1265 mmu_notifier_invalidate_range_start(&range); 1266 1267 if (!page_vma_mapped_walk(&pvmw)) 1268 goto out_mn; 1269 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 1270 goto out_unlock; 1271 1272 entry = ptep_get(pvmw.pte); 1273 /* 1274 * Handle PFN swap PTEs, such as device-exclusive ones, that actually 1275 * map pages: give up just like the next folio_walk would. 1276 */ 1277 if (unlikely(!pte_present(entry))) 1278 goto out_unlock; 1279 1280 anon_exclusive = PageAnonExclusive(&folio->page); 1281 if (pte_write(entry) || pte_dirty(entry) || 1282 anon_exclusive || mm_tlb_flush_pending(mm)) { 1283 swapped = folio_test_swapcache(folio); 1284 flush_cache_page(vma, pvmw.address, folio_pfn(folio)); 1285 /* 1286 * Ok this is tricky, when get_user_pages_fast() run it doesn't 1287 * take any lock, therefore the check that we are going to make 1288 * with the pagecount against the mapcount is racy and 1289 * O_DIRECT can happen right after the check. 1290 * So we clear the pte and flush the tlb before the check 1291 * this assure us that no O_DIRECT can happen after the check 1292 * or in the middle of the check. 1293 * 1294 * No need to notify as we are downgrading page table to read 1295 * only not changing it to point to a new page. 1296 * 1297 * See Documentation/mm/mmu_notifier.rst 1298 */ 1299 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 1300 /* 1301 * Check that no O_DIRECT or similar I/O is in progress on the 1302 * page 1303 */ 1304 if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { 1305 set_pte_at(mm, pvmw.address, pvmw.pte, entry); 1306 goto out_unlock; 1307 } 1308 1309 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ 1310 if (anon_exclusive && 1311 folio_try_share_anon_rmap_pte(folio, &folio->page)) { 1312 set_pte_at(mm, pvmw.address, pvmw.pte, entry); 1313 goto out_unlock; 1314 } 1315 1316 if (pte_dirty(entry)) 1317 folio_mark_dirty(folio); 1318 entry = pte_mkclean(entry); 1319 1320 if (pte_write(entry)) 1321 entry = pte_wrprotect(entry); 1322 1323 set_pte_at(mm, pvmw.address, pvmw.pte, entry); 1324 } 1325 *orig_pte = entry; 1326 err = 0; 1327 1328 out_unlock: 1329 page_vma_mapped_walk_done(&pvmw); 1330 out_mn: 1331 mmu_notifier_invalidate_range_end(&range); 1332 out: 1333 return err; 1334 } 1335 1336 /** 1337 * replace_page - replace page in vma by new ksm page 1338 * @vma: vma that holds the pte pointing to page 1339 * @page: the page we are replacing by kpage 1340 * @kpage: the ksm page we replace page by 1341 * @orig_pte: the original value of the pte 1342 * 1343 * Returns 0 on success, -EFAULT on failure. 1344 */ 1345 static int replace_page(struct vm_area_struct *vma, struct page *page, 1346 struct page *kpage, pte_t orig_pte) 1347 { 1348 struct folio *kfolio = page_folio(kpage); 1349 struct mm_struct *mm = vma->vm_mm; 1350 struct folio *folio = page_folio(page); 1351 pmd_t *pmd; 1352 pmd_t pmde; 1353 pte_t *ptep; 1354 pte_t newpte; 1355 spinlock_t *ptl; 1356 unsigned long addr; 1357 int err = -EFAULT; 1358 struct mmu_notifier_range range; 1359 1360 addr = page_address_in_vma(folio, page, vma); 1361 if (addr == -EFAULT) 1362 goto out; 1363 1364 pmd = mm_find_pmd(mm, addr); 1365 if (!pmd) 1366 goto out; 1367 /* 1368 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 1369 * without holding anon_vma lock for write. So when looking for a 1370 * genuine pmde (in which to find pte), test present and !THP together. 1371 */ 1372 pmde = pmdp_get_lockless(pmd); 1373 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 1374 goto out; 1375 1376 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 1377 addr + PAGE_SIZE); 1378 mmu_notifier_invalidate_range_start(&range); 1379 1380 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 1381 if (!ptep) 1382 goto out_mn; 1383 if (!pte_same(ptep_get(ptep), orig_pte)) { 1384 pte_unmap_unlock(ptep, ptl); 1385 goto out_mn; 1386 } 1387 VM_BUG_ON_PAGE(PageAnonExclusive(page), page); 1388 VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), 1389 kfolio); 1390 1391 /* 1392 * No need to check ksm_use_zero_pages here: we can only have a 1393 * zero_page here if ksm_use_zero_pages was enabled already. 1394 */ 1395 if (!is_zero_pfn(page_to_pfn(kpage))) { 1396 folio_get(kfolio); 1397 folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); 1398 newpte = mk_pte(kpage, vma->vm_page_prot); 1399 } else { 1400 /* 1401 * Use pte_mkdirty to mark the zero page mapped by KSM, and then 1402 * we can easily track all KSM-placed zero pages by checking if 1403 * the dirty bit in zero page's PTE is set. 1404 */ 1405 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); 1406 ksm_map_zero_page(mm); 1407 /* 1408 * We're replacing an anonymous page with a zero page, which is 1409 * not anonymous. We need to do proper accounting otherwise we 1410 * will get wrong values in /proc, and a BUG message in dmesg 1411 * when tearing down the mm. 1412 */ 1413 dec_mm_counter(mm, MM_ANONPAGES); 1414 } 1415 1416 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); 1417 /* 1418 * No need to notify as we are replacing a read only page with another 1419 * read only page with the same content. 1420 * 1421 * See Documentation/mm/mmu_notifier.rst 1422 */ 1423 ptep_clear_flush(vma, addr, ptep); 1424 set_pte_at(mm, addr, ptep, newpte); 1425 1426 folio_remove_rmap_pte(folio, page, vma); 1427 if (!folio_mapped(folio)) 1428 folio_free_swap(folio); 1429 folio_put(folio); 1430 1431 pte_unmap_unlock(ptep, ptl); 1432 err = 0; 1433 out_mn: 1434 mmu_notifier_invalidate_range_end(&range); 1435 out: 1436 return err; 1437 } 1438 1439 /* 1440 * try_to_merge_one_page - take two pages and merge them into one 1441 * @vma: the vma that holds the pte pointing to page 1442 * @page: the PageAnon page that we want to replace with kpage 1443 * @kpage: the KSM page that we want to map instead of page, 1444 * or NULL the first time when we want to use page as kpage. 1445 * 1446 * This function returns 0 if the pages were merged, -EFAULT otherwise. 1447 */ 1448 static int try_to_merge_one_page(struct vm_area_struct *vma, 1449 struct page *page, struct page *kpage) 1450 { 1451 struct folio *folio = page_folio(page); 1452 pte_t orig_pte = __pte(0); 1453 int err = -EFAULT; 1454 1455 if (page == kpage) /* ksm page forked */ 1456 return 0; 1457 1458 if (!folio_test_anon(folio)) 1459 goto out; 1460 1461 /* 1462 * We need the folio lock to read a stable swapcache flag in 1463 * write_protect_page(). We trylock because we don't want to wait 1464 * here - we prefer to continue scanning and merging different 1465 * pages, then come back to this page when it is unlocked. 1466 */ 1467 if (!folio_trylock(folio)) 1468 goto out; 1469 1470 if (folio_test_large(folio)) { 1471 if (split_huge_page(page)) 1472 goto out_unlock; 1473 folio = page_folio(page); 1474 } 1475 1476 /* 1477 * If this anonymous page is mapped only here, its pte may need 1478 * to be write-protected. If it's mapped elsewhere, all of its 1479 * ptes are necessarily already write-protected. But in either 1480 * case, we need to lock and check page_count is not raised. 1481 */ 1482 if (write_protect_page(vma, folio, &orig_pte) == 0) { 1483 if (!kpage) { 1484 /* 1485 * While we hold folio lock, upgrade folio from 1486 * anon to a NULL stable_node with the KSM flag set: 1487 * stable_tree_insert() will update stable_node. 1488 */ 1489 folio_set_stable_node(folio, NULL); 1490 folio_mark_accessed(folio); 1491 /* 1492 * Page reclaim just frees a clean folio with no dirty 1493 * ptes: make sure that the ksm page would be swapped. 1494 */ 1495 if (!folio_test_dirty(folio)) 1496 folio_mark_dirty(folio); 1497 err = 0; 1498 } else if (pages_identical(page, kpage)) 1499 err = replace_page(vma, page, kpage, orig_pte); 1500 } 1501 1502 out_unlock: 1503 folio_unlock(folio); 1504 out: 1505 return err; 1506 } 1507 1508 /* 1509 * This function returns 0 if the pages were merged or if they are 1510 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise. 1511 */ 1512 static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item, 1513 struct page *page) 1514 { 1515 struct mm_struct *mm = rmap_item->mm; 1516 int err = -EFAULT; 1517 1518 /* 1519 * Same checksum as an empty page. We attempt to merge it with the 1520 * appropriate zero page if the user enabled this via sysfs. 1521 */ 1522 if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { 1523 struct vm_area_struct *vma; 1524 1525 mmap_read_lock(mm); 1526 vma = find_mergeable_vma(mm, rmap_item->address); 1527 if (vma) { 1528 err = try_to_merge_one_page(vma, page, 1529 ZERO_PAGE(rmap_item->address)); 1530 trace_ksm_merge_one_page( 1531 page_to_pfn(ZERO_PAGE(rmap_item->address)), 1532 rmap_item, mm, err); 1533 } else { 1534 /* 1535 * If the vma is out of date, we do not need to 1536 * continue. 1537 */ 1538 err = 0; 1539 } 1540 mmap_read_unlock(mm); 1541 } 1542 1543 return err; 1544 } 1545 1546 /* 1547 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 1548 * but no new kernel page is allocated: kpage must already be a ksm page. 1549 * 1550 * This function returns 0 if the pages were merged, -EFAULT otherwise. 1551 */ 1552 static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, 1553 struct page *page, struct page *kpage) 1554 { 1555 struct mm_struct *mm = rmap_item->mm; 1556 struct vm_area_struct *vma; 1557 int err = -EFAULT; 1558 1559 mmap_read_lock(mm); 1560 vma = find_mergeable_vma(mm, rmap_item->address); 1561 if (!vma) 1562 goto out; 1563 1564 err = try_to_merge_one_page(vma, page, kpage); 1565 if (err) 1566 goto out; 1567 1568 /* Unstable nid is in union with stable anon_vma: remove first */ 1569 remove_rmap_item_from_tree(rmap_item); 1570 1571 /* Must get reference to anon_vma while still holding mmap_lock */ 1572 rmap_item->anon_vma = vma->anon_vma; 1573 get_anon_vma(vma->anon_vma); 1574 out: 1575 mmap_read_unlock(mm); 1576 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), 1577 rmap_item, mm, err); 1578 return err; 1579 } 1580 1581 /* 1582 * try_to_merge_two_pages - take two identical pages and prepare them 1583 * to be merged into one page. 1584 * 1585 * This function returns the kpage if we successfully merged two identical 1586 * pages into one ksm page, NULL otherwise. 1587 * 1588 * Note that this function upgrades page to ksm page: if one of the pages 1589 * is already a ksm page, try_to_merge_with_ksm_page should be used. 1590 */ 1591 static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, 1592 struct page *page, 1593 struct ksm_rmap_item *tree_rmap_item, 1594 struct page *tree_page) 1595 { 1596 int err; 1597 1598 err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 1599 if (!err) { 1600 err = try_to_merge_with_ksm_page(tree_rmap_item, 1601 tree_page, page); 1602 /* 1603 * If that fails, we have a ksm page with only one pte 1604 * pointing to it: so break it. 1605 */ 1606 if (err) 1607 break_cow(rmap_item); 1608 } 1609 return err ? NULL : page_folio(page); 1610 } 1611 1612 static __always_inline 1613 bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) 1614 { 1615 VM_BUG_ON(stable_node->rmap_hlist_len < 0); 1616 /* 1617 * Check that at least one mapping still exists, otherwise 1618 * there's no much point to merge and share with this 1619 * stable_node, as the underlying tree_page of the other 1620 * sharer is going to be freed soon. 1621 */ 1622 return stable_node->rmap_hlist_len && 1623 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; 1624 } 1625 1626 static __always_inline 1627 bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) 1628 { 1629 return __is_page_sharing_candidate(stable_node, 0); 1630 } 1631 1632 static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, 1633 struct ksm_stable_node **_stable_node, 1634 struct rb_root *root, 1635 bool prune_stale_stable_nodes) 1636 { 1637 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; 1638 struct hlist_node *hlist_safe; 1639 struct folio *folio, *tree_folio = NULL; 1640 int found_rmap_hlist_len; 1641 1642 if (!prune_stale_stable_nodes || 1643 time_before(jiffies, stable_node->chain_prune_time + 1644 msecs_to_jiffies( 1645 ksm_stable_node_chains_prune_millisecs))) 1646 prune_stale_stable_nodes = false; 1647 else 1648 stable_node->chain_prune_time = jiffies; 1649 1650 hlist_for_each_entry_safe(dup, hlist_safe, 1651 &stable_node->hlist, hlist_dup) { 1652 cond_resched(); 1653 /* 1654 * We must walk all stable_node_dup to prune the stale 1655 * stable nodes during lookup. 1656 * 1657 * ksm_get_folio can drop the nodes from the 1658 * stable_node->hlist if they point to freed pages 1659 * (that's why we do a _safe walk). The "dup" 1660 * stable_node parameter itself will be freed from 1661 * under us if it returns NULL. 1662 */ 1663 folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); 1664 if (!folio) 1665 continue; 1666 /* Pick the best candidate if possible. */ 1667 if (!found || (is_page_sharing_candidate(dup) && 1668 (!is_page_sharing_candidate(found) || 1669 dup->rmap_hlist_len > found_rmap_hlist_len))) { 1670 if (found) 1671 folio_put(tree_folio); 1672 found = dup; 1673 found_rmap_hlist_len = found->rmap_hlist_len; 1674 tree_folio = folio; 1675 /* skip put_page for found candidate */ 1676 if (!prune_stale_stable_nodes && 1677 is_page_sharing_candidate(found)) 1678 break; 1679 continue; 1680 } 1681 folio_put(folio); 1682 } 1683 1684 if (found) { 1685 if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { 1686 /* 1687 * If there's not just one entry it would 1688 * corrupt memory, better BUG_ON. In KSM 1689 * context with no lock held it's not even 1690 * fatal. 1691 */ 1692 BUG_ON(stable_node->hlist.first->next); 1693 1694 /* 1695 * There's just one entry and it is below the 1696 * deduplication limit so drop the chain. 1697 */ 1698 rb_replace_node(&stable_node->node, &found->node, 1699 root); 1700 free_stable_node(stable_node); 1701 ksm_stable_node_chains--; 1702 ksm_stable_node_dups--; 1703 /* 1704 * NOTE: the caller depends on the stable_node 1705 * to be equal to stable_node_dup if the chain 1706 * was collapsed. 1707 */ 1708 *_stable_node = found; 1709 /* 1710 * Just for robustness, as stable_node is 1711 * otherwise left as a stable pointer, the 1712 * compiler shall optimize it away at build 1713 * time. 1714 */ 1715 stable_node = NULL; 1716 } else if (stable_node->hlist.first != &found->hlist_dup && 1717 __is_page_sharing_candidate(found, 1)) { 1718 /* 1719 * If the found stable_node dup can accept one 1720 * more future merge (in addition to the one 1721 * that is underway) and is not at the head of 1722 * the chain, put it there so next search will 1723 * be quicker in the !prune_stale_stable_nodes 1724 * case. 1725 * 1726 * NOTE: it would be inaccurate to use nr > 1 1727 * instead of checking the hlist.first pointer 1728 * directly, because in the 1729 * prune_stale_stable_nodes case "nr" isn't 1730 * the position of the found dup in the chain, 1731 * but the total number of dups in the chain. 1732 */ 1733 hlist_del(&found->hlist_dup); 1734 hlist_add_head(&found->hlist_dup, 1735 &stable_node->hlist); 1736 } 1737 } else { 1738 /* Its hlist must be empty if no one found. */ 1739 free_stable_node_chain(stable_node, root); 1740 } 1741 1742 *_stable_node_dup = found; 1743 return tree_folio; 1744 } 1745 1746 /* 1747 * Like for ksm_get_folio, this function can free the *_stable_node and 1748 * *_stable_node_dup if the returned tree_page is NULL. 1749 * 1750 * It can also free and overwrite *_stable_node with the found 1751 * stable_node_dup if the chain is collapsed (in which case 1752 * *_stable_node will be equal to *_stable_node_dup like if the chain 1753 * never existed). It's up to the caller to verify tree_page is not 1754 * NULL before dereferencing *_stable_node or *_stable_node_dup. 1755 * 1756 * *_stable_node_dup is really a second output parameter of this 1757 * function and will be overwritten in all cases, the caller doesn't 1758 * need to initialize it. 1759 */ 1760 static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, 1761 struct ksm_stable_node **_stable_node, 1762 struct rb_root *root, 1763 bool prune_stale_stable_nodes) 1764 { 1765 struct ksm_stable_node *stable_node = *_stable_node; 1766 1767 if (!is_stable_node_chain(stable_node)) { 1768 *_stable_node_dup = stable_node; 1769 return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); 1770 } 1771 return stable_node_dup(_stable_node_dup, _stable_node, root, 1772 prune_stale_stable_nodes); 1773 } 1774 1775 static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, 1776 struct ksm_stable_node **s_n, 1777 struct rb_root *root) 1778 { 1779 return __stable_node_chain(s_n_d, s_n, root, true); 1780 } 1781 1782 static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, 1783 struct ksm_stable_node **s_n, 1784 struct rb_root *root) 1785 { 1786 return __stable_node_chain(s_n_d, s_n, root, false); 1787 } 1788 1789 /* 1790 * stable_tree_search - search for page inside the stable tree 1791 * 1792 * This function checks if there is a page inside the stable tree 1793 * with identical content to the page that we are scanning right now. 1794 * 1795 * This function returns the stable tree node of identical content if found, 1796 * -EBUSY if the stable node's page is being migrated, NULL otherwise. 1797 */ 1798 static struct folio *stable_tree_search(struct page *page) 1799 { 1800 int nid; 1801 struct rb_root *root; 1802 struct rb_node **new; 1803 struct rb_node *parent; 1804 struct ksm_stable_node *stable_node, *stable_node_dup; 1805 struct ksm_stable_node *page_node; 1806 struct folio *folio; 1807 1808 folio = page_folio(page); 1809 page_node = folio_stable_node(folio); 1810 if (page_node && page_node->head != &migrate_nodes) { 1811 /* ksm page forked */ 1812 folio_get(folio); 1813 return folio; 1814 } 1815 1816 nid = get_kpfn_nid(folio_pfn(folio)); 1817 root = root_stable_tree + nid; 1818 again: 1819 new = &root->rb_node; 1820 parent = NULL; 1821 1822 while (*new) { 1823 struct folio *tree_folio; 1824 int ret; 1825 1826 cond_resched(); 1827 stable_node = rb_entry(*new, struct ksm_stable_node, node); 1828 tree_folio = chain_prune(&stable_node_dup, &stable_node, root); 1829 if (!tree_folio) { 1830 /* 1831 * If we walked over a stale stable_node, 1832 * ksm_get_folio() will call rb_erase() and it 1833 * may rebalance the tree from under us. So 1834 * restart the search from scratch. Returning 1835 * NULL would be safe too, but we'd generate 1836 * false negative insertions just because some 1837 * stable_node was stale. 1838 */ 1839 goto again; 1840 } 1841 1842 ret = memcmp_pages(page, &tree_folio->page); 1843 folio_put(tree_folio); 1844 1845 parent = *new; 1846 if (ret < 0) 1847 new = &parent->rb_left; 1848 else if (ret > 0) 1849 new = &parent->rb_right; 1850 else { 1851 if (page_node) { 1852 VM_BUG_ON(page_node->head != &migrate_nodes); 1853 /* 1854 * If the mapcount of our migrated KSM folio is 1855 * at most 1, we can merge it with another 1856 * KSM folio where we know that we have space 1857 * for one more mapping without exceeding the 1858 * ksm_max_page_sharing limit: see 1859 * chain_prune(). This way, we can avoid adding 1860 * this stable node to the chain. 1861 */ 1862 if (folio_mapcount(folio) > 1) 1863 goto chain_append; 1864 } 1865 1866 if (!is_page_sharing_candidate(stable_node_dup)) { 1867 /* 1868 * If the stable_node is a chain and 1869 * we got a payload match in memcmp 1870 * but we cannot merge the scanned 1871 * page in any of the existing 1872 * stable_node dups because they're 1873 * all full, we need to wait the 1874 * scanned page to find itself a match 1875 * in the unstable tree to create a 1876 * brand new KSM page to add later to 1877 * the dups of this stable_node. 1878 */ 1879 return NULL; 1880 } 1881 1882 /* 1883 * Lock and unlock the stable_node's page (which 1884 * might already have been migrated) so that page 1885 * migration is sure to notice its raised count. 1886 * It would be more elegant to return stable_node 1887 * than kpage, but that involves more changes. 1888 */ 1889 tree_folio = ksm_get_folio(stable_node_dup, 1890 KSM_GET_FOLIO_TRYLOCK); 1891 1892 if (PTR_ERR(tree_folio) == -EBUSY) 1893 return ERR_PTR(-EBUSY); 1894 1895 if (unlikely(!tree_folio)) 1896 /* 1897 * The tree may have been rebalanced, 1898 * so re-evaluate parent and new. 1899 */ 1900 goto again; 1901 folio_unlock(tree_folio); 1902 1903 if (get_kpfn_nid(stable_node_dup->kpfn) != 1904 NUMA(stable_node_dup->nid)) { 1905 folio_put(tree_folio); 1906 goto replace; 1907 } 1908 return tree_folio; 1909 } 1910 } 1911 1912 if (!page_node) 1913 return NULL; 1914 1915 list_del(&page_node->list); 1916 DO_NUMA(page_node->nid = nid); 1917 rb_link_node(&page_node->node, parent, new); 1918 rb_insert_color(&page_node->node, root); 1919 out: 1920 if (is_page_sharing_candidate(page_node)) { 1921 folio_get(folio); 1922 return folio; 1923 } else 1924 return NULL; 1925 1926 replace: 1927 /* 1928 * If stable_node was a chain and chain_prune collapsed it, 1929 * stable_node has been updated to be the new regular 1930 * stable_node. A collapse of the chain is indistinguishable 1931 * from the case there was no chain in the stable 1932 * rbtree. Otherwise stable_node is the chain and 1933 * stable_node_dup is the dup to replace. 1934 */ 1935 if (stable_node_dup == stable_node) { 1936 VM_BUG_ON(is_stable_node_chain(stable_node_dup)); 1937 VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 1938 /* there is no chain */ 1939 if (page_node) { 1940 VM_BUG_ON(page_node->head != &migrate_nodes); 1941 list_del(&page_node->list); 1942 DO_NUMA(page_node->nid = nid); 1943 rb_replace_node(&stable_node_dup->node, 1944 &page_node->node, 1945 root); 1946 if (is_page_sharing_candidate(page_node)) 1947 folio_get(folio); 1948 else 1949 folio = NULL; 1950 } else { 1951 rb_erase(&stable_node_dup->node, root); 1952 folio = NULL; 1953 } 1954 } else { 1955 VM_BUG_ON(!is_stable_node_chain(stable_node)); 1956 __stable_node_dup_del(stable_node_dup); 1957 if (page_node) { 1958 VM_BUG_ON(page_node->head != &migrate_nodes); 1959 list_del(&page_node->list); 1960 DO_NUMA(page_node->nid = nid); 1961 stable_node_chain_add_dup(page_node, stable_node); 1962 if (is_page_sharing_candidate(page_node)) 1963 folio_get(folio); 1964 else 1965 folio = NULL; 1966 } else { 1967 folio = NULL; 1968 } 1969 } 1970 stable_node_dup->head = &migrate_nodes; 1971 list_add(&stable_node_dup->list, stable_node_dup->head); 1972 return folio; 1973 1974 chain_append: 1975 /* 1976 * If stable_node was a chain and chain_prune collapsed it, 1977 * stable_node has been updated to be the new regular 1978 * stable_node. A collapse of the chain is indistinguishable 1979 * from the case there was no chain in the stable 1980 * rbtree. Otherwise stable_node is the chain and 1981 * stable_node_dup is the dup to replace. 1982 */ 1983 if (stable_node_dup == stable_node) { 1984 VM_BUG_ON(is_stable_node_dup(stable_node_dup)); 1985 /* chain is missing so create it */ 1986 stable_node = alloc_stable_node_chain(stable_node_dup, 1987 root); 1988 if (!stable_node) 1989 return NULL; 1990 } 1991 /* 1992 * Add this stable_node dup that was 1993 * migrated to the stable_node chain 1994 * of the current nid for this page 1995 * content. 1996 */ 1997 VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); 1998 VM_BUG_ON(page_node->head != &migrate_nodes); 1999 list_del(&page_node->list); 2000 DO_NUMA(page_node->nid = nid); 2001 stable_node_chain_add_dup(page_node, stable_node); 2002 goto out; 2003 } 2004 2005 /* 2006 * stable_tree_insert - insert stable tree node pointing to new ksm page 2007 * into the stable tree. 2008 * 2009 * This function returns the stable tree node just allocated on success, 2010 * NULL otherwise. 2011 */ 2012 static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) 2013 { 2014 int nid; 2015 unsigned long kpfn; 2016 struct rb_root *root; 2017 struct rb_node **new; 2018 struct rb_node *parent; 2019 struct ksm_stable_node *stable_node, *stable_node_dup; 2020 bool need_chain = false; 2021 2022 kpfn = folio_pfn(kfolio); 2023 nid = get_kpfn_nid(kpfn); 2024 root = root_stable_tree + nid; 2025 again: 2026 parent = NULL; 2027 new = &root->rb_node; 2028 2029 while (*new) { 2030 struct folio *tree_folio; 2031 int ret; 2032 2033 cond_resched(); 2034 stable_node = rb_entry(*new, struct ksm_stable_node, node); 2035 tree_folio = chain(&stable_node_dup, &stable_node, root); 2036 if (!tree_folio) { 2037 /* 2038 * If we walked over a stale stable_node, 2039 * ksm_get_folio() will call rb_erase() and it 2040 * may rebalance the tree from under us. So 2041 * restart the search from scratch. Returning 2042 * NULL would be safe too, but we'd generate 2043 * false negative insertions just because some 2044 * stable_node was stale. 2045 */ 2046 goto again; 2047 } 2048 2049 ret = memcmp_pages(&kfolio->page, &tree_folio->page); 2050 folio_put(tree_folio); 2051 2052 parent = *new; 2053 if (ret < 0) 2054 new = &parent->rb_left; 2055 else if (ret > 0) 2056 new = &parent->rb_right; 2057 else { 2058 need_chain = true; 2059 break; 2060 } 2061 } 2062 2063 stable_node_dup = alloc_stable_node(); 2064 if (!stable_node_dup) 2065 return NULL; 2066 2067 INIT_HLIST_HEAD(&stable_node_dup->hlist); 2068 stable_node_dup->kpfn = kpfn; 2069 stable_node_dup->rmap_hlist_len = 0; 2070 DO_NUMA(stable_node_dup->nid = nid); 2071 if (!need_chain) { 2072 rb_link_node(&stable_node_dup->node, parent, new); 2073 rb_insert_color(&stable_node_dup->node, root); 2074 } else { 2075 if (!is_stable_node_chain(stable_node)) { 2076 struct ksm_stable_node *orig = stable_node; 2077 /* chain is missing so create it */ 2078 stable_node = alloc_stable_node_chain(orig, root); 2079 if (!stable_node) { 2080 free_stable_node(stable_node_dup); 2081 return NULL; 2082 } 2083 } 2084 stable_node_chain_add_dup(stable_node_dup, stable_node); 2085 } 2086 2087 folio_set_stable_node(kfolio, stable_node_dup); 2088 2089 return stable_node_dup; 2090 } 2091 2092 /* 2093 * unstable_tree_search_insert - search for identical page, 2094 * else insert rmap_item into the unstable tree. 2095 * 2096 * This function searches for a page in the unstable tree identical to the 2097 * page currently being scanned; and if no identical page is found in the 2098 * tree, we insert rmap_item as a new object into the unstable tree. 2099 * 2100 * This function returns pointer to rmap_item found to be identical 2101 * to the currently scanned page, NULL otherwise. 2102 * 2103 * This function does both searching and inserting, because they share 2104 * the same walking algorithm in an rbtree. 2105 */ 2106 static 2107 struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, 2108 struct page *page, 2109 struct page **tree_pagep) 2110 { 2111 struct rb_node **new; 2112 struct rb_root *root; 2113 struct rb_node *parent = NULL; 2114 int nid; 2115 2116 nid = get_kpfn_nid(page_to_pfn(page)); 2117 root = root_unstable_tree + nid; 2118 new = &root->rb_node; 2119 2120 while (*new) { 2121 struct ksm_rmap_item *tree_rmap_item; 2122 struct page *tree_page; 2123 int ret; 2124 2125 cond_resched(); 2126 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); 2127 tree_page = get_mergeable_page(tree_rmap_item); 2128 if (!tree_page) 2129 return NULL; 2130 2131 /* 2132 * Don't substitute a ksm page for a forked page. 2133 */ 2134 if (page == tree_page) { 2135 put_page(tree_page); 2136 return NULL; 2137 } 2138 2139 ret = memcmp_pages(page, tree_page); 2140 2141 parent = *new; 2142 if (ret < 0) { 2143 put_page(tree_page); 2144 new = &parent->rb_left; 2145 } else if (ret > 0) { 2146 put_page(tree_page); 2147 new = &parent->rb_right; 2148 } else if (!ksm_merge_across_nodes && 2149 page_to_nid(tree_page) != nid) { 2150 /* 2151 * If tree_page has been migrated to another NUMA node, 2152 * it will be flushed out and put in the right unstable 2153 * tree next time: only merge with it when across_nodes. 2154 */ 2155 put_page(tree_page); 2156 return NULL; 2157 } else { 2158 *tree_pagep = tree_page; 2159 return tree_rmap_item; 2160 } 2161 } 2162 2163 rmap_item->address |= UNSTABLE_FLAG; 2164 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 2165 DO_NUMA(rmap_item->nid = nid); 2166 rb_link_node(&rmap_item->node, parent, new); 2167 rb_insert_color(&rmap_item->node, root); 2168 2169 ksm_pages_unshared++; 2170 return NULL; 2171 } 2172 2173 /* 2174 * stable_tree_append - add another rmap_item to the linked list of 2175 * rmap_items hanging off a given node of the stable tree, all sharing 2176 * the same ksm page. 2177 */ 2178 static void stable_tree_append(struct ksm_rmap_item *rmap_item, 2179 struct ksm_stable_node *stable_node, 2180 bool max_page_sharing_bypass) 2181 { 2182 /* 2183 * rmap won't find this mapping if we don't insert the 2184 * rmap_item in the right stable_node 2185 * duplicate. page_migration could break later if rmap breaks, 2186 * so we can as well crash here. We really need to check for 2187 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check 2188 * for other negative values as an underflow if detected here 2189 * for the first time (and not when decreasing rmap_hlist_len) 2190 * would be sign of memory corruption in the stable_node. 2191 */ 2192 BUG_ON(stable_node->rmap_hlist_len < 0); 2193 2194 stable_node->rmap_hlist_len++; 2195 if (!max_page_sharing_bypass) 2196 /* possibly non fatal but unexpected overflow, only warn */ 2197 WARN_ON_ONCE(stable_node->rmap_hlist_len > 2198 ksm_max_page_sharing); 2199 2200 rmap_item->head = stable_node; 2201 rmap_item->address |= STABLE_FLAG; 2202 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 2203 2204 if (rmap_item->hlist.next) 2205 ksm_pages_sharing++; 2206 else 2207 ksm_pages_shared++; 2208 2209 rmap_item->mm->ksm_merging_pages++; 2210 } 2211 2212 /* 2213 * cmp_and_merge_page - first see if page can be merged into the stable tree; 2214 * if not, compare checksum to previous and if it's the same, see if page can 2215 * be inserted into the unstable tree, or merged with a page already there and 2216 * both transferred to the stable tree. 2217 * 2218 * @page: the page that we are searching identical page to. 2219 * @rmap_item: the reverse mapping into the virtual address of this page 2220 */ 2221 static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) 2222 { 2223 struct folio *folio = page_folio(page); 2224 struct ksm_rmap_item *tree_rmap_item; 2225 struct page *tree_page = NULL; 2226 struct ksm_stable_node *stable_node; 2227 struct folio *kfolio; 2228 unsigned int checksum; 2229 int err; 2230 bool max_page_sharing_bypass = false; 2231 2232 stable_node = folio_stable_node(folio); 2233 if (stable_node) { 2234 if (stable_node->head != &migrate_nodes && 2235 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != 2236 NUMA(stable_node->nid)) { 2237 stable_node_dup_del(stable_node); 2238 stable_node->head = &migrate_nodes; 2239 list_add(&stable_node->list, stable_node->head); 2240 } 2241 if (stable_node->head != &migrate_nodes && 2242 rmap_item->head == stable_node) 2243 return; 2244 /* 2245 * If it's a KSM fork, allow it to go over the sharing limit 2246 * without warnings. 2247 */ 2248 if (!is_page_sharing_candidate(stable_node)) 2249 max_page_sharing_bypass = true; 2250 } else { 2251 remove_rmap_item_from_tree(rmap_item); 2252 2253 /* 2254 * If the hash value of the page has changed from the last time 2255 * we calculated it, this page is changing frequently: therefore we 2256 * don't want to insert it in the unstable tree, and we don't want 2257 * to waste our time searching for something identical to it there. 2258 */ 2259 checksum = calc_checksum(page); 2260 if (rmap_item->oldchecksum != checksum) { 2261 rmap_item->oldchecksum = checksum; 2262 return; 2263 } 2264 2265 if (!try_to_merge_with_zero_page(rmap_item, page)) 2266 return; 2267 } 2268 2269 /* Start by searching for the folio in the stable tree */ 2270 kfolio = stable_tree_search(page); 2271 if (kfolio == folio && rmap_item->head == stable_node) { 2272 folio_put(kfolio); 2273 return; 2274 } 2275 2276 remove_rmap_item_from_tree(rmap_item); 2277 2278 if (kfolio) { 2279 if (kfolio == ERR_PTR(-EBUSY)) 2280 return; 2281 2282 err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page); 2283 if (!err) { 2284 /* 2285 * The page was successfully merged: 2286 * add its rmap_item to the stable tree. 2287 */ 2288 folio_lock(kfolio); 2289 stable_tree_append(rmap_item, folio_stable_node(kfolio), 2290 max_page_sharing_bypass); 2291 folio_unlock(kfolio); 2292 } 2293 folio_put(kfolio); 2294 return; 2295 } 2296 2297 tree_rmap_item = 2298 unstable_tree_search_insert(rmap_item, page, &tree_page); 2299 if (tree_rmap_item) { 2300 bool split; 2301 2302 kfolio = try_to_merge_two_pages(rmap_item, page, 2303 tree_rmap_item, tree_page); 2304 /* 2305 * If both pages we tried to merge belong to the same compound 2306 * page, then we actually ended up increasing the reference 2307 * count of the same compound page twice, and split_huge_page 2308 * failed. 2309 * Here we set a flag if that happened, and we use it later to 2310 * try split_huge_page again. Since we call put_page right 2311 * afterwards, the reference count will be correct and 2312 * split_huge_page should succeed. 2313 */ 2314 split = PageTransCompound(page) 2315 && compound_head(page) == compound_head(tree_page); 2316 put_page(tree_page); 2317 if (kfolio) { 2318 /* 2319 * The pages were successfully merged: insert new 2320 * node in the stable tree and add both rmap_items. 2321 */ 2322 folio_lock(kfolio); 2323 stable_node = stable_tree_insert(kfolio); 2324 if (stable_node) { 2325 stable_tree_append(tree_rmap_item, stable_node, 2326 false); 2327 stable_tree_append(rmap_item, stable_node, 2328 false); 2329 } 2330 folio_unlock(kfolio); 2331 2332 /* 2333 * If we fail to insert the page into the stable tree, 2334 * we will have 2 virtual addresses that are pointing 2335 * to a ksm page left outside the stable tree, 2336 * in which case we need to break_cow on both. 2337 */ 2338 if (!stable_node) { 2339 break_cow(tree_rmap_item); 2340 break_cow(rmap_item); 2341 } 2342 } else if (split) { 2343 /* 2344 * We are here if we tried to merge two pages and 2345 * failed because they both belonged to the same 2346 * compound page. We will split the page now, but no 2347 * merging will take place. 2348 * We do not want to add the cost of a full lock; if 2349 * the page is locked, it is better to skip it and 2350 * perhaps try again later. 2351 */ 2352 if (!folio_trylock(folio)) 2353 return; 2354 split_huge_page(page); 2355 folio = page_folio(page); 2356 folio_unlock(folio); 2357 } 2358 } 2359 } 2360 2361 static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, 2362 struct ksm_rmap_item **rmap_list, 2363 unsigned long addr) 2364 { 2365 struct ksm_rmap_item *rmap_item; 2366 2367 while (*rmap_list) { 2368 rmap_item = *rmap_list; 2369 if ((rmap_item->address & PAGE_MASK) == addr) 2370 return rmap_item; 2371 if (rmap_item->address > addr) 2372 break; 2373 *rmap_list = rmap_item->rmap_list; 2374 remove_rmap_item_from_tree(rmap_item); 2375 free_rmap_item(rmap_item); 2376 } 2377 2378 rmap_item = alloc_rmap_item(); 2379 if (rmap_item) { 2380 /* It has already been zeroed */ 2381 rmap_item->mm = mm_slot->slot.mm; 2382 rmap_item->mm->ksm_rmap_items++; 2383 rmap_item->address = addr; 2384 rmap_item->rmap_list = *rmap_list; 2385 *rmap_list = rmap_item; 2386 } 2387 return rmap_item; 2388 } 2389 2390 /* 2391 * Calculate skip age for the ksm page age. The age determines how often 2392 * de-duplicating has already been tried unsuccessfully. If the age is 2393 * smaller, the scanning of this page is skipped for less scans. 2394 * 2395 * @age: rmap_item age of page 2396 */ 2397 static unsigned int skip_age(rmap_age_t age) 2398 { 2399 if (age <= 3) 2400 return 1; 2401 if (age <= 5) 2402 return 2; 2403 if (age <= 8) 2404 return 4; 2405 2406 return 8; 2407 } 2408 2409 /* 2410 * Determines if a page should be skipped for the current scan. 2411 * 2412 * @folio: folio containing the page to check 2413 * @rmap_item: associated rmap_item of page 2414 */ 2415 static bool should_skip_rmap_item(struct folio *folio, 2416 struct ksm_rmap_item *rmap_item) 2417 { 2418 rmap_age_t age; 2419 2420 if (!ksm_smart_scan) 2421 return false; 2422 2423 /* 2424 * Never skip pages that are already KSM; pages cmp_and_merge_page() 2425 * will essentially ignore them, but we still have to process them 2426 * properly. 2427 */ 2428 if (folio_test_ksm(folio)) 2429 return false; 2430 2431 age = rmap_item->age; 2432 if (age != U8_MAX) 2433 rmap_item->age++; 2434 2435 /* 2436 * Smaller ages are not skipped, they need to get a chance to go 2437 * through the different phases of the KSM merging. 2438 */ 2439 if (age < 3) 2440 return false; 2441 2442 /* 2443 * Are we still allowed to skip? If not, then don't skip it 2444 * and determine how much more often we are allowed to skip next. 2445 */ 2446 if (!rmap_item->remaining_skips) { 2447 rmap_item->remaining_skips = skip_age(age); 2448 return false; 2449 } 2450 2451 /* Skip this page */ 2452 ksm_pages_skipped++; 2453 rmap_item->remaining_skips--; 2454 remove_rmap_item_from_tree(rmap_item); 2455 return true; 2456 } 2457 2458 struct ksm_next_page_arg { 2459 struct folio *folio; 2460 struct page *page; 2461 unsigned long addr; 2462 }; 2463 2464 static int ksm_next_page_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long end, 2465 struct mm_walk *walk) 2466 { 2467 struct ksm_next_page_arg *private = walk->private; 2468 struct vm_area_struct *vma = walk->vma; 2469 pte_t *start_ptep = NULL, *ptep, pte; 2470 struct mm_struct *mm = walk->mm; 2471 struct folio *folio; 2472 struct page *page; 2473 spinlock_t *ptl; 2474 pmd_t pmd; 2475 2476 if (ksm_test_exit(mm)) 2477 return 0; 2478 2479 cond_resched(); 2480 2481 pmd = pmdp_get_lockless(pmdp); 2482 if (!pmd_present(pmd)) 2483 return 0; 2484 2485 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && pmd_leaf(pmd)) { 2486 ptl = pmd_lock(mm, pmdp); 2487 pmd = pmdp_get(pmdp); 2488 2489 if (!pmd_present(pmd)) { 2490 goto not_found_unlock; 2491 } else if (pmd_leaf(pmd)) { 2492 page = vm_normal_page_pmd(vma, addr, pmd); 2493 if (!page) 2494 goto not_found_unlock; 2495 folio = page_folio(page); 2496 2497 if (folio_is_zone_device(folio) || !folio_test_anon(folio)) 2498 goto not_found_unlock; 2499 2500 page += ((addr & (PMD_SIZE - 1)) >> PAGE_SHIFT); 2501 goto found_unlock; 2502 } 2503 spin_unlock(ptl); 2504 } 2505 2506 start_ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 2507 if (!start_ptep) 2508 return 0; 2509 2510 for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) { 2511 pte = ptep_get(ptep); 2512 2513 if (!pte_present(pte)) 2514 continue; 2515 2516 page = vm_normal_page(vma, addr, pte); 2517 if (!page) 2518 continue; 2519 folio = page_folio(page); 2520 2521 if (folio_is_zone_device(folio) || !folio_test_anon(folio)) 2522 continue; 2523 goto found_unlock; 2524 } 2525 2526 not_found_unlock: 2527 spin_unlock(ptl); 2528 if (start_ptep) 2529 pte_unmap(start_ptep); 2530 return 0; 2531 found_unlock: 2532 folio_get(folio); 2533 spin_unlock(ptl); 2534 if (start_ptep) 2535 pte_unmap(start_ptep); 2536 private->page = page; 2537 private->folio = folio; 2538 private->addr = addr; 2539 return 1; 2540 } 2541 2542 static struct mm_walk_ops ksm_next_page_ops = { 2543 .pmd_entry = ksm_next_page_pmd_entry, 2544 .walk_lock = PGWALK_RDLOCK, 2545 }; 2546 2547 static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) 2548 { 2549 struct mm_struct *mm; 2550 struct ksm_mm_slot *mm_slot; 2551 struct mm_slot *slot; 2552 struct vm_area_struct *vma; 2553 struct ksm_rmap_item *rmap_item; 2554 struct vma_iterator vmi; 2555 int nid; 2556 2557 if (list_empty(&ksm_mm_head.slot.mm_node)) 2558 return NULL; 2559 2560 mm_slot = ksm_scan.mm_slot; 2561 if (mm_slot == &ksm_mm_head) { 2562 advisor_start_scan(); 2563 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); 2564 2565 /* 2566 * A number of pages can hang around indefinitely in per-cpu 2567 * LRU cache, raised page count preventing write_protect_page 2568 * from merging them. Though it doesn't really matter much, 2569 * it is puzzling to see some stuck in pages_volatile until 2570 * other activity jostles them out, and they also prevented 2571 * LTP's KSM test from succeeding deterministically; so drain 2572 * them here (here rather than on entry to ksm_do_scan(), 2573 * so we don't IPI too often when pages_to_scan is set low). 2574 */ 2575 lru_add_drain_all(); 2576 2577 /* 2578 * Whereas stale stable_nodes on the stable_tree itself 2579 * get pruned in the regular course of stable_tree_search(), 2580 * those moved out to the migrate_nodes list can accumulate: 2581 * so prune them once before each full scan. 2582 */ 2583 if (!ksm_merge_across_nodes) { 2584 struct ksm_stable_node *stable_node, *next; 2585 struct folio *folio; 2586 2587 list_for_each_entry_safe(stable_node, next, 2588 &migrate_nodes, list) { 2589 folio = ksm_get_folio(stable_node, 2590 KSM_GET_FOLIO_NOLOCK); 2591 if (folio) 2592 folio_put(folio); 2593 cond_resched(); 2594 } 2595 } 2596 2597 for (nid = 0; nid < ksm_nr_node_ids; nid++) 2598 root_unstable_tree[nid] = RB_ROOT; 2599 2600 spin_lock(&ksm_mmlist_lock); 2601 slot = list_entry(mm_slot->slot.mm_node.next, 2602 struct mm_slot, mm_node); 2603 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2604 ksm_scan.mm_slot = mm_slot; 2605 spin_unlock(&ksm_mmlist_lock); 2606 /* 2607 * Although we tested list_empty() above, a racing __ksm_exit 2608 * of the last mm on the list may have removed it since then. 2609 */ 2610 if (mm_slot == &ksm_mm_head) 2611 return NULL; 2612 next_mm: 2613 ksm_scan.address = 0; 2614 ksm_scan.rmap_list = &mm_slot->rmap_list; 2615 } 2616 2617 slot = &mm_slot->slot; 2618 mm = slot->mm; 2619 vma_iter_init(&vmi, mm, ksm_scan.address); 2620 2621 mmap_read_lock(mm); 2622 if (ksm_test_exit(mm)) 2623 goto no_vmas; 2624 2625 for_each_vma(vmi, vma) { 2626 if (!(vma->vm_flags & VM_MERGEABLE)) 2627 continue; 2628 if (ksm_scan.address < vma->vm_start) 2629 ksm_scan.address = vma->vm_start; 2630 if (!vma->anon_vma) 2631 ksm_scan.address = vma->vm_end; 2632 2633 while (ksm_scan.address < vma->vm_end) { 2634 struct ksm_next_page_arg ksm_next_page_arg; 2635 struct page *tmp_page = NULL; 2636 struct folio *folio; 2637 2638 if (ksm_test_exit(mm)) 2639 break; 2640 2641 int found; 2642 2643 found = walk_page_range_vma(vma, ksm_scan.address, 2644 vma->vm_end, 2645 &ksm_next_page_ops, 2646 &ksm_next_page_arg); 2647 2648 if (found > 0) { 2649 folio = ksm_next_page_arg.folio; 2650 tmp_page = ksm_next_page_arg.page; 2651 ksm_scan.address = ksm_next_page_arg.addr; 2652 } else { 2653 VM_WARN_ON_ONCE(found < 0); 2654 ksm_scan.address = vma->vm_end - PAGE_SIZE; 2655 } 2656 2657 if (tmp_page) { 2658 flush_anon_page(vma, tmp_page, ksm_scan.address); 2659 flush_dcache_page(tmp_page); 2660 rmap_item = get_next_rmap_item(mm_slot, 2661 ksm_scan.rmap_list, ksm_scan.address); 2662 if (rmap_item) { 2663 ksm_scan.rmap_list = 2664 &rmap_item->rmap_list; 2665 2666 if (should_skip_rmap_item(folio, rmap_item)) { 2667 folio_put(folio); 2668 goto next_page; 2669 } 2670 2671 ksm_scan.address += PAGE_SIZE; 2672 *page = tmp_page; 2673 } else { 2674 folio_put(folio); 2675 } 2676 mmap_read_unlock(mm); 2677 return rmap_item; 2678 } 2679 next_page: 2680 ksm_scan.address += PAGE_SIZE; 2681 cond_resched(); 2682 } 2683 } 2684 2685 if (ksm_test_exit(mm)) { 2686 no_vmas: 2687 ksm_scan.address = 0; 2688 ksm_scan.rmap_list = &mm_slot->rmap_list; 2689 } 2690 /* 2691 * Nuke all the rmap_items that are above this current rmap: 2692 * because there were no VM_MERGEABLE vmas with such addresses. 2693 */ 2694 remove_trailing_rmap_items(ksm_scan.rmap_list); 2695 2696 spin_lock(&ksm_mmlist_lock); 2697 slot = list_entry(mm_slot->slot.mm_node.next, 2698 struct mm_slot, mm_node); 2699 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 2700 if (ksm_scan.address == 0) { 2701 /* 2702 * We've completed a full scan of all vmas, holding mmap_lock 2703 * throughout, and found no VM_MERGEABLE: so do the same as 2704 * __ksm_exit does to remove this mm from all our lists now. 2705 * This applies either when cleaning up after __ksm_exit 2706 * (but beware: we can reach here even before __ksm_exit), 2707 * or when all VM_MERGEABLE areas have been unmapped (and 2708 * mmap_lock then protects against race with MADV_MERGEABLE). 2709 */ 2710 hash_del(&mm_slot->slot.hash); 2711 list_del(&mm_slot->slot.mm_node); 2712 spin_unlock(&ksm_mmlist_lock); 2713 2714 mm_slot_free(mm_slot_cache, mm_slot); 2715 mm_flags_clear(MMF_VM_MERGEABLE, mm); 2716 mm_flags_clear(MMF_VM_MERGE_ANY, mm); 2717 mmap_read_unlock(mm); 2718 mmdrop(mm); 2719 } else { 2720 mmap_read_unlock(mm); 2721 /* 2722 * mmap_read_unlock(mm) first because after 2723 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 2724 * already have been freed under us by __ksm_exit() 2725 * because the "mm_slot" is still hashed and 2726 * ksm_scan.mm_slot doesn't point to it anymore. 2727 */ 2728 spin_unlock(&ksm_mmlist_lock); 2729 } 2730 2731 /* Repeat until we've completed scanning the whole list */ 2732 mm_slot = ksm_scan.mm_slot; 2733 if (mm_slot != &ksm_mm_head) 2734 goto next_mm; 2735 2736 advisor_stop_scan(); 2737 2738 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); 2739 ksm_scan.seqnr++; 2740 return NULL; 2741 } 2742 2743 /** 2744 * ksm_do_scan - the ksm scanner main worker function. 2745 * @scan_npages: number of pages we want to scan before we return. 2746 */ 2747 static void ksm_do_scan(unsigned int scan_npages) 2748 { 2749 struct ksm_rmap_item *rmap_item; 2750 struct page *page; 2751 2752 while (scan_npages-- && likely(!freezing(current))) { 2753 cond_resched(); 2754 rmap_item = scan_get_next_rmap_item(&page); 2755 if (!rmap_item) 2756 return; 2757 cmp_and_merge_page(page, rmap_item); 2758 put_page(page); 2759 ksm_pages_scanned++; 2760 } 2761 } 2762 2763 static int ksmd_should_run(void) 2764 { 2765 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); 2766 } 2767 2768 static int ksm_scan_thread(void *nothing) 2769 { 2770 unsigned int sleep_ms; 2771 2772 set_freezable(); 2773 set_user_nice(current, 5); 2774 2775 while (!kthread_should_stop()) { 2776 mutex_lock(&ksm_thread_mutex); 2777 wait_while_offlining(); 2778 if (ksmd_should_run()) 2779 ksm_do_scan(ksm_thread_pages_to_scan); 2780 mutex_unlock(&ksm_thread_mutex); 2781 2782 if (ksmd_should_run()) { 2783 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); 2784 wait_event_freezable_timeout(ksm_iter_wait, 2785 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), 2786 msecs_to_jiffies(sleep_ms)); 2787 } else { 2788 wait_event_freezable(ksm_thread_wait, 2789 ksmd_should_run() || kthread_should_stop()); 2790 } 2791 } 2792 return 0; 2793 } 2794 2795 static bool __ksm_should_add_vma(const struct file *file, vm_flags_t vm_flags) 2796 { 2797 if (vm_flags & VM_MERGEABLE) 2798 return false; 2799 2800 return ksm_compatible(file, vm_flags); 2801 } 2802 2803 static void __ksm_add_vma(struct vm_area_struct *vma) 2804 { 2805 if (__ksm_should_add_vma(vma->vm_file, vma->vm_flags)) 2806 vm_flags_set(vma, VM_MERGEABLE); 2807 } 2808 2809 static int __ksm_del_vma(struct vm_area_struct *vma) 2810 { 2811 int err; 2812 2813 if (!(vma->vm_flags & VM_MERGEABLE)) 2814 return 0; 2815 2816 if (vma->anon_vma) { 2817 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); 2818 if (err) 2819 return err; 2820 } 2821 2822 vm_flags_clear(vma, VM_MERGEABLE); 2823 return 0; 2824 } 2825 /** 2826 * ksm_vma_flags - Update VMA flags to mark as mergeable if compatible 2827 * 2828 * @mm: Proposed VMA's mm_struct 2829 * @file: Proposed VMA's file-backed mapping, if any. 2830 * @vm_flags: Proposed VMA"s flags. 2831 * 2832 * Returns: @vm_flags possibly updated to mark mergeable. 2833 */ 2834 vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, 2835 vm_flags_t vm_flags) 2836 { 2837 if (mm_flags_test(MMF_VM_MERGE_ANY, mm) && 2838 __ksm_should_add_vma(file, vm_flags)) 2839 vm_flags |= VM_MERGEABLE; 2840 2841 return vm_flags; 2842 } 2843 2844 static void ksm_add_vmas(struct mm_struct *mm) 2845 { 2846 struct vm_area_struct *vma; 2847 2848 VMA_ITERATOR(vmi, mm, 0); 2849 for_each_vma(vmi, vma) 2850 __ksm_add_vma(vma); 2851 } 2852 2853 static int ksm_del_vmas(struct mm_struct *mm) 2854 { 2855 struct vm_area_struct *vma; 2856 int err; 2857 2858 VMA_ITERATOR(vmi, mm, 0); 2859 for_each_vma(vmi, vma) { 2860 err = __ksm_del_vma(vma); 2861 if (err) 2862 return err; 2863 } 2864 return 0; 2865 } 2866 2867 /** 2868 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all 2869 * compatible VMA's 2870 * 2871 * @mm: Pointer to mm 2872 * 2873 * Returns 0 on success, otherwise error code 2874 */ 2875 int ksm_enable_merge_any(struct mm_struct *mm) 2876 { 2877 int err; 2878 2879 if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2880 return 0; 2881 2882 if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { 2883 err = __ksm_enter(mm); 2884 if (err) 2885 return err; 2886 } 2887 2888 mm_flags_set(MMF_VM_MERGE_ANY, mm); 2889 ksm_add_vmas(mm); 2890 2891 return 0; 2892 } 2893 2894 /** 2895 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, 2896 * previously enabled via ksm_enable_merge_any(). 2897 * 2898 * Disabling merging implies unmerging any merged pages, like setting 2899 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and 2900 * merging on all compatible VMA's remains enabled. 2901 * 2902 * @mm: Pointer to mm 2903 * 2904 * Returns 0 on success, otherwise error code 2905 */ 2906 int ksm_disable_merge_any(struct mm_struct *mm) 2907 { 2908 int err; 2909 2910 if (!mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2911 return 0; 2912 2913 err = ksm_del_vmas(mm); 2914 if (err) { 2915 ksm_add_vmas(mm); 2916 return err; 2917 } 2918 2919 mm_flags_clear(MMF_VM_MERGE_ANY, mm); 2920 return 0; 2921 } 2922 2923 int ksm_disable(struct mm_struct *mm) 2924 { 2925 mmap_assert_write_locked(mm); 2926 2927 if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) 2928 return 0; 2929 if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2930 return ksm_disable_merge_any(mm); 2931 return ksm_del_vmas(mm); 2932 } 2933 2934 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2935 unsigned long end, int advice, vm_flags_t *vm_flags) 2936 { 2937 struct mm_struct *mm = vma->vm_mm; 2938 int err; 2939 2940 switch (advice) { 2941 case MADV_MERGEABLE: 2942 if (vma->vm_flags & VM_MERGEABLE) 2943 return 0; 2944 if (!vma_ksm_compatible(vma)) 2945 return 0; 2946 2947 if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { 2948 err = __ksm_enter(mm); 2949 if (err) 2950 return err; 2951 } 2952 2953 *vm_flags |= VM_MERGEABLE; 2954 break; 2955 2956 case MADV_UNMERGEABLE: 2957 if (!(*vm_flags & VM_MERGEABLE)) 2958 return 0; /* just ignore the advice */ 2959 2960 if (vma->anon_vma) { 2961 err = unmerge_ksm_pages(vma, start, end, true); 2962 if (err) 2963 return err; 2964 } 2965 2966 *vm_flags &= ~VM_MERGEABLE; 2967 break; 2968 } 2969 2970 return 0; 2971 } 2972 EXPORT_SYMBOL_GPL(ksm_madvise); 2973 2974 int __ksm_enter(struct mm_struct *mm) 2975 { 2976 struct ksm_mm_slot *mm_slot; 2977 struct mm_slot *slot; 2978 int needs_wakeup; 2979 2980 mm_slot = mm_slot_alloc(mm_slot_cache); 2981 if (!mm_slot) 2982 return -ENOMEM; 2983 2984 slot = &mm_slot->slot; 2985 2986 /* Check ksm_run too? Would need tighter locking */ 2987 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); 2988 2989 spin_lock(&ksm_mmlist_lock); 2990 mm_slot_insert(mm_slots_hash, mm, slot); 2991 /* 2992 * When KSM_RUN_MERGE (or KSM_RUN_STOP), 2993 * insert just behind the scanning cursor, to let the area settle 2994 * down a little; when fork is followed by immediate exec, we don't 2995 * want ksmd to waste time setting up and tearing down an rmap_list. 2996 * 2997 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 2998 * scanning cursor, otherwise KSM pages in newly forked mms will be 2999 * missed: then we might as well insert at the end of the list. 3000 */ 3001 if (ksm_run & KSM_RUN_UNMERGE) 3002 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); 3003 else 3004 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); 3005 spin_unlock(&ksm_mmlist_lock); 3006 3007 mm_flags_set(MMF_VM_MERGEABLE, mm); 3008 mmgrab(mm); 3009 3010 if (needs_wakeup) 3011 wake_up_interruptible(&ksm_thread_wait); 3012 3013 trace_ksm_enter(mm); 3014 return 0; 3015 } 3016 3017 void __ksm_exit(struct mm_struct *mm) 3018 { 3019 struct ksm_mm_slot *mm_slot = NULL; 3020 struct mm_slot *slot; 3021 int easy_to_free = 0; 3022 3023 /* 3024 * This process is exiting: if it's straightforward (as is the 3025 * case when ksmd was never running), free mm_slot immediately. 3026 * But if it's at the cursor or has rmap_items linked to it, use 3027 * mmap_lock to synchronize with any break_cows before pagetables 3028 * are freed, and leave the mm_slot on the list for ksmd to free. 3029 * Beware: ksm may already have noticed it exiting and freed the slot. 3030 */ 3031 3032 spin_lock(&ksm_mmlist_lock); 3033 slot = mm_slot_lookup(mm_slots_hash, mm); 3034 if (!slot) 3035 goto unlock; 3036 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); 3037 if (ksm_scan.mm_slot == mm_slot) 3038 goto unlock; 3039 if (!mm_slot->rmap_list) { 3040 hash_del(&slot->hash); 3041 list_del(&slot->mm_node); 3042 easy_to_free = 1; 3043 } else { 3044 list_move(&slot->mm_node, 3045 &ksm_scan.mm_slot->slot.mm_node); 3046 } 3047 unlock: 3048 spin_unlock(&ksm_mmlist_lock); 3049 3050 if (easy_to_free) { 3051 mm_slot_free(mm_slot_cache, mm_slot); 3052 mm_flags_clear(MMF_VM_MERGE_ANY, mm); 3053 mm_flags_clear(MMF_VM_MERGEABLE, mm); 3054 mmdrop(mm); 3055 } else if (mm_slot) { 3056 mmap_write_lock(mm); 3057 mmap_write_unlock(mm); 3058 } 3059 3060 trace_ksm_exit(mm); 3061 } 3062 3063 struct folio *ksm_might_need_to_copy(struct folio *folio, 3064 struct vm_area_struct *vma, unsigned long addr) 3065 { 3066 struct page *page = folio_page(folio, 0); 3067 struct anon_vma *anon_vma = folio_anon_vma(folio); 3068 struct folio *new_folio; 3069 3070 if (folio_test_large(folio)) 3071 return folio; 3072 3073 if (folio_test_ksm(folio)) { 3074 if (folio_stable_node(folio) && 3075 !(ksm_run & KSM_RUN_UNMERGE)) 3076 return folio; /* no need to copy it */ 3077 } else if (!anon_vma) { 3078 return folio; /* no need to copy it */ 3079 } else if (folio->index == linear_page_index(vma, addr) && 3080 anon_vma->root == vma->anon_vma->root) { 3081 return folio; /* still no need to copy it */ 3082 } 3083 if (PageHWPoison(page)) 3084 return ERR_PTR(-EHWPOISON); 3085 if (!folio_test_uptodate(folio)) 3086 return folio; /* let do_swap_page report the error */ 3087 3088 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); 3089 if (new_folio && 3090 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { 3091 folio_put(new_folio); 3092 new_folio = NULL; 3093 } 3094 if (new_folio) { 3095 if (copy_mc_user_highpage(folio_page(new_folio, 0), page, 3096 addr, vma)) { 3097 folio_put(new_folio); 3098 return ERR_PTR(-EHWPOISON); 3099 } 3100 folio_set_dirty(new_folio); 3101 __folio_mark_uptodate(new_folio); 3102 __folio_set_locked(new_folio); 3103 #ifdef CONFIG_SWAP 3104 count_vm_event(KSM_SWPIN_COPY); 3105 #endif 3106 } 3107 3108 return new_folio; 3109 } 3110 3111 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) 3112 { 3113 struct ksm_stable_node *stable_node; 3114 struct ksm_rmap_item *rmap_item; 3115 int search_new_forks = 0; 3116 3117 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); 3118 3119 /* 3120 * Rely on the page lock to protect against concurrent modifications 3121 * to that page's node of the stable tree. 3122 */ 3123 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3124 3125 stable_node = folio_stable_node(folio); 3126 if (!stable_node) 3127 return; 3128 again: 3129 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 3130 struct anon_vma *anon_vma = rmap_item->anon_vma; 3131 struct anon_vma_chain *vmac; 3132 struct vm_area_struct *vma; 3133 3134 cond_resched(); 3135 if (!anon_vma_trylock_read(anon_vma)) { 3136 if (rwc->try_lock) { 3137 rwc->contended = true; 3138 return; 3139 } 3140 anon_vma_lock_read(anon_vma); 3141 } 3142 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 3143 0, ULONG_MAX) { 3144 unsigned long addr; 3145 3146 cond_resched(); 3147 vma = vmac->vma; 3148 3149 /* Ignore the stable/unstable/sqnr flags */ 3150 addr = rmap_item->address & PAGE_MASK; 3151 3152 if (addr < vma->vm_start || addr >= vma->vm_end) 3153 continue; 3154 /* 3155 * Initially we examine only the vma which covers this 3156 * rmap_item; but later, if there is still work to do, 3157 * we examine covering vmas in other mms: in case they 3158 * were forked from the original since ksmd passed. 3159 */ 3160 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 3161 continue; 3162 3163 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 3164 continue; 3165 3166 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { 3167 anon_vma_unlock_read(anon_vma); 3168 return; 3169 } 3170 if (rwc->done && rwc->done(folio)) { 3171 anon_vma_unlock_read(anon_vma); 3172 return; 3173 } 3174 } 3175 anon_vma_unlock_read(anon_vma); 3176 } 3177 if (!search_new_forks++) 3178 goto again; 3179 } 3180 3181 #ifdef CONFIG_MEMORY_FAILURE 3182 /* 3183 * Collect processes when the error hit an ksm page. 3184 */ 3185 void collect_procs_ksm(const struct folio *folio, const struct page *page, 3186 struct list_head *to_kill, int force_early) 3187 { 3188 struct ksm_stable_node *stable_node; 3189 struct ksm_rmap_item *rmap_item; 3190 struct vm_area_struct *vma; 3191 struct task_struct *tsk; 3192 3193 stable_node = folio_stable_node(folio); 3194 if (!stable_node) 3195 return; 3196 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 3197 struct anon_vma *av = rmap_item->anon_vma; 3198 3199 anon_vma_lock_read(av); 3200 rcu_read_lock(); 3201 for_each_process(tsk) { 3202 struct anon_vma_chain *vmac; 3203 unsigned long addr; 3204 struct task_struct *t = 3205 task_early_kill(tsk, force_early); 3206 if (!t) 3207 continue; 3208 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, 3209 ULONG_MAX) 3210 { 3211 vma = vmac->vma; 3212 if (vma->vm_mm == t->mm) { 3213 addr = rmap_item->address & PAGE_MASK; 3214 add_to_kill_ksm(t, page, vma, to_kill, 3215 addr); 3216 } 3217 } 3218 } 3219 rcu_read_unlock(); 3220 anon_vma_unlock_read(av); 3221 } 3222 } 3223 #endif 3224 3225 #ifdef CONFIG_MIGRATION 3226 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) 3227 { 3228 struct ksm_stable_node *stable_node; 3229 3230 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3231 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); 3232 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); 3233 3234 stable_node = folio_stable_node(folio); 3235 if (stable_node) { 3236 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); 3237 stable_node->kpfn = folio_pfn(newfolio); 3238 /* 3239 * newfolio->mapping was set in advance; now we need smp_wmb() 3240 * to make sure that the new stable_node->kpfn is visible 3241 * to ksm_get_folio() before it can see that folio->mapping 3242 * has gone stale (or that the swapcache flag has been cleared). 3243 */ 3244 smp_wmb(); 3245 folio_set_stable_node(folio, NULL); 3246 } 3247 } 3248 #endif /* CONFIG_MIGRATION */ 3249 3250 #ifdef CONFIG_MEMORY_HOTREMOVE 3251 static void wait_while_offlining(void) 3252 { 3253 while (ksm_run & KSM_RUN_OFFLINE) { 3254 mutex_unlock(&ksm_thread_mutex); 3255 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 3256 TASK_UNINTERRUPTIBLE); 3257 mutex_lock(&ksm_thread_mutex); 3258 } 3259 } 3260 3261 static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, 3262 unsigned long start_pfn, 3263 unsigned long end_pfn) 3264 { 3265 if (stable_node->kpfn >= start_pfn && 3266 stable_node->kpfn < end_pfn) { 3267 /* 3268 * Don't ksm_get_folio, page has already gone: 3269 * which is why we keep kpfn instead of page* 3270 */ 3271 remove_node_from_stable_tree(stable_node); 3272 return true; 3273 } 3274 return false; 3275 } 3276 3277 static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, 3278 unsigned long start_pfn, 3279 unsigned long end_pfn, 3280 struct rb_root *root) 3281 { 3282 struct ksm_stable_node *dup; 3283 struct hlist_node *hlist_safe; 3284 3285 if (!is_stable_node_chain(stable_node)) { 3286 VM_BUG_ON(is_stable_node_dup(stable_node)); 3287 return stable_node_dup_remove_range(stable_node, start_pfn, 3288 end_pfn); 3289 } 3290 3291 hlist_for_each_entry_safe(dup, hlist_safe, 3292 &stable_node->hlist, hlist_dup) { 3293 VM_BUG_ON(!is_stable_node_dup(dup)); 3294 stable_node_dup_remove_range(dup, start_pfn, end_pfn); 3295 } 3296 if (hlist_empty(&stable_node->hlist)) { 3297 free_stable_node_chain(stable_node, root); 3298 return true; /* notify caller that tree was rebalanced */ 3299 } else 3300 return false; 3301 } 3302 3303 static void ksm_check_stable_tree(unsigned long start_pfn, 3304 unsigned long end_pfn) 3305 { 3306 struct ksm_stable_node *stable_node, *next; 3307 struct rb_node *node; 3308 int nid; 3309 3310 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 3311 node = rb_first(root_stable_tree + nid); 3312 while (node) { 3313 stable_node = rb_entry(node, struct ksm_stable_node, node); 3314 if (stable_node_chain_remove_range(stable_node, 3315 start_pfn, end_pfn, 3316 root_stable_tree + 3317 nid)) 3318 node = rb_first(root_stable_tree + nid); 3319 else 3320 node = rb_next(node); 3321 cond_resched(); 3322 } 3323 } 3324 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 3325 if (stable_node->kpfn >= start_pfn && 3326 stable_node->kpfn < end_pfn) 3327 remove_node_from_stable_tree(stable_node); 3328 cond_resched(); 3329 } 3330 } 3331 3332 static int ksm_memory_callback(struct notifier_block *self, 3333 unsigned long action, void *arg) 3334 { 3335 struct memory_notify *mn = arg; 3336 3337 switch (action) { 3338 case MEM_GOING_OFFLINE: 3339 /* 3340 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 3341 * and remove_all_stable_nodes() while memory is going offline: 3342 * it is unsafe for them to touch the stable tree at this time. 3343 * But unmerge_ksm_pages(), rmap lookups and other entry points 3344 * which do not need the ksm_thread_mutex are all safe. 3345 */ 3346 mutex_lock(&ksm_thread_mutex); 3347 ksm_run |= KSM_RUN_OFFLINE; 3348 mutex_unlock(&ksm_thread_mutex); 3349 break; 3350 3351 case MEM_OFFLINE: 3352 /* 3353 * Most of the work is done by page migration; but there might 3354 * be a few stable_nodes left over, still pointing to struct 3355 * pages which have been offlined: prune those from the tree, 3356 * otherwise ksm_get_folio() might later try to access a 3357 * non-existent struct page. 3358 */ 3359 ksm_check_stable_tree(mn->start_pfn, 3360 mn->start_pfn + mn->nr_pages); 3361 fallthrough; 3362 case MEM_CANCEL_OFFLINE: 3363 mutex_lock(&ksm_thread_mutex); 3364 ksm_run &= ~KSM_RUN_OFFLINE; 3365 mutex_unlock(&ksm_thread_mutex); 3366 3367 smp_mb(); /* wake_up_bit advises this */ 3368 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 3369 break; 3370 } 3371 return NOTIFY_OK; 3372 } 3373 #else 3374 static void wait_while_offlining(void) 3375 { 3376 } 3377 #endif /* CONFIG_MEMORY_HOTREMOVE */ 3378 3379 #ifdef CONFIG_PROC_FS 3380 /* 3381 * The process is mergeable only if any VMA is currently 3382 * applicable to KSM. 3383 * 3384 * The mmap lock must be held in read mode. 3385 */ 3386 bool ksm_process_mergeable(struct mm_struct *mm) 3387 { 3388 struct vm_area_struct *vma; 3389 3390 mmap_assert_locked(mm); 3391 VMA_ITERATOR(vmi, mm, 0); 3392 for_each_vma(vmi, vma) 3393 if (vma->vm_flags & VM_MERGEABLE) 3394 return true; 3395 3396 return false; 3397 } 3398 3399 long ksm_process_profit(struct mm_struct *mm) 3400 { 3401 return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - 3402 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); 3403 } 3404 #endif /* CONFIG_PROC_FS */ 3405 3406 #ifdef CONFIG_SYSFS 3407 /* 3408 * This all compiles without CONFIG_SYSFS, but is a waste of space. 3409 */ 3410 3411 #define KSM_ATTR_RO(_name) \ 3412 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3413 #define KSM_ATTR(_name) \ 3414 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3415 3416 static ssize_t sleep_millisecs_show(struct kobject *kobj, 3417 struct kobj_attribute *attr, char *buf) 3418 { 3419 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); 3420 } 3421 3422 static ssize_t sleep_millisecs_store(struct kobject *kobj, 3423 struct kobj_attribute *attr, 3424 const char *buf, size_t count) 3425 { 3426 unsigned int msecs; 3427 int err; 3428 3429 err = kstrtouint(buf, 10, &msecs); 3430 if (err) 3431 return -EINVAL; 3432 3433 ksm_thread_sleep_millisecs = msecs; 3434 wake_up_interruptible(&ksm_iter_wait); 3435 3436 return count; 3437 } 3438 KSM_ATTR(sleep_millisecs); 3439 3440 static ssize_t pages_to_scan_show(struct kobject *kobj, 3441 struct kobj_attribute *attr, char *buf) 3442 { 3443 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); 3444 } 3445 3446 static ssize_t pages_to_scan_store(struct kobject *kobj, 3447 struct kobj_attribute *attr, 3448 const char *buf, size_t count) 3449 { 3450 unsigned int nr_pages; 3451 int err; 3452 3453 if (ksm_advisor != KSM_ADVISOR_NONE) 3454 return -EINVAL; 3455 3456 err = kstrtouint(buf, 10, &nr_pages); 3457 if (err) 3458 return -EINVAL; 3459 3460 ksm_thread_pages_to_scan = nr_pages; 3461 3462 return count; 3463 } 3464 KSM_ATTR(pages_to_scan); 3465 3466 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 3467 char *buf) 3468 { 3469 return sysfs_emit(buf, "%lu\n", ksm_run); 3470 } 3471 3472 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 3473 const char *buf, size_t count) 3474 { 3475 unsigned int flags; 3476 int err; 3477 3478 err = kstrtouint(buf, 10, &flags); 3479 if (err) 3480 return -EINVAL; 3481 if (flags > KSM_RUN_UNMERGE) 3482 return -EINVAL; 3483 3484 /* 3485 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 3486 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 3487 * breaking COW to free the pages_shared (but leaves mm_slots 3488 * on the list for when ksmd may be set running again). 3489 */ 3490 3491 mutex_lock(&ksm_thread_mutex); 3492 wait_while_offlining(); 3493 if (ksm_run != flags) { 3494 ksm_run = flags; 3495 if (flags & KSM_RUN_UNMERGE) { 3496 set_current_oom_origin(); 3497 err = unmerge_and_remove_all_rmap_items(); 3498 clear_current_oom_origin(); 3499 if (err) { 3500 ksm_run = KSM_RUN_STOP; 3501 count = err; 3502 } 3503 } 3504 } 3505 mutex_unlock(&ksm_thread_mutex); 3506 3507 if (flags & KSM_RUN_MERGE) 3508 wake_up_interruptible(&ksm_thread_wait); 3509 3510 return count; 3511 } 3512 KSM_ATTR(run); 3513 3514 #ifdef CONFIG_NUMA 3515 static ssize_t merge_across_nodes_show(struct kobject *kobj, 3516 struct kobj_attribute *attr, char *buf) 3517 { 3518 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); 3519 } 3520 3521 static ssize_t merge_across_nodes_store(struct kobject *kobj, 3522 struct kobj_attribute *attr, 3523 const char *buf, size_t count) 3524 { 3525 int err; 3526 unsigned long knob; 3527 3528 err = kstrtoul(buf, 10, &knob); 3529 if (err) 3530 return err; 3531 if (knob > 1) 3532 return -EINVAL; 3533 3534 mutex_lock(&ksm_thread_mutex); 3535 wait_while_offlining(); 3536 if (ksm_merge_across_nodes != knob) { 3537 if (ksm_pages_shared || remove_all_stable_nodes()) 3538 err = -EBUSY; 3539 else if (root_stable_tree == one_stable_tree) { 3540 struct rb_root *buf; 3541 /* 3542 * This is the first time that we switch away from the 3543 * default of merging across nodes: must now allocate 3544 * a buffer to hold as many roots as may be needed. 3545 * Allocate stable and unstable together: 3546 * MAXSMP NODES_SHIFT 10 will use 16kB. 3547 */ 3548 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 3549 GFP_KERNEL); 3550 /* Let us assume that RB_ROOT is NULL is zero */ 3551 if (!buf) 3552 err = -ENOMEM; 3553 else { 3554 root_stable_tree = buf; 3555 root_unstable_tree = buf + nr_node_ids; 3556 /* Stable tree is empty but not the unstable */ 3557 root_unstable_tree[0] = one_unstable_tree[0]; 3558 } 3559 } 3560 if (!err) { 3561 ksm_merge_across_nodes = knob; 3562 ksm_nr_node_ids = knob ? 1 : nr_node_ids; 3563 } 3564 } 3565 mutex_unlock(&ksm_thread_mutex); 3566 3567 return err ? err : count; 3568 } 3569 KSM_ATTR(merge_across_nodes); 3570 #endif 3571 3572 static ssize_t use_zero_pages_show(struct kobject *kobj, 3573 struct kobj_attribute *attr, char *buf) 3574 { 3575 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); 3576 } 3577 static ssize_t use_zero_pages_store(struct kobject *kobj, 3578 struct kobj_attribute *attr, 3579 const char *buf, size_t count) 3580 { 3581 int err; 3582 bool value; 3583 3584 err = kstrtobool(buf, &value); 3585 if (err) 3586 return -EINVAL; 3587 3588 ksm_use_zero_pages = value; 3589 3590 return count; 3591 } 3592 KSM_ATTR(use_zero_pages); 3593 3594 static ssize_t max_page_sharing_show(struct kobject *kobj, 3595 struct kobj_attribute *attr, char *buf) 3596 { 3597 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); 3598 } 3599 3600 static ssize_t max_page_sharing_store(struct kobject *kobj, 3601 struct kobj_attribute *attr, 3602 const char *buf, size_t count) 3603 { 3604 int err; 3605 int knob; 3606 3607 err = kstrtoint(buf, 10, &knob); 3608 if (err) 3609 return err; 3610 /* 3611 * When a KSM page is created it is shared by 2 mappings. This 3612 * being a signed comparison, it implicitly verifies it's not 3613 * negative. 3614 */ 3615 if (knob < 2) 3616 return -EINVAL; 3617 3618 if (READ_ONCE(ksm_max_page_sharing) == knob) 3619 return count; 3620 3621 mutex_lock(&ksm_thread_mutex); 3622 wait_while_offlining(); 3623 if (ksm_max_page_sharing != knob) { 3624 if (ksm_pages_shared || remove_all_stable_nodes()) 3625 err = -EBUSY; 3626 else 3627 ksm_max_page_sharing = knob; 3628 } 3629 mutex_unlock(&ksm_thread_mutex); 3630 3631 return err ? err : count; 3632 } 3633 KSM_ATTR(max_page_sharing); 3634 3635 static ssize_t pages_scanned_show(struct kobject *kobj, 3636 struct kobj_attribute *attr, char *buf) 3637 { 3638 return sysfs_emit(buf, "%lu\n", ksm_pages_scanned); 3639 } 3640 KSM_ATTR_RO(pages_scanned); 3641 3642 static ssize_t pages_shared_show(struct kobject *kobj, 3643 struct kobj_attribute *attr, char *buf) 3644 { 3645 return sysfs_emit(buf, "%lu\n", ksm_pages_shared); 3646 } 3647 KSM_ATTR_RO(pages_shared); 3648 3649 static ssize_t pages_sharing_show(struct kobject *kobj, 3650 struct kobj_attribute *attr, char *buf) 3651 { 3652 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); 3653 } 3654 KSM_ATTR_RO(pages_sharing); 3655 3656 static ssize_t pages_unshared_show(struct kobject *kobj, 3657 struct kobj_attribute *attr, char *buf) 3658 { 3659 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); 3660 } 3661 KSM_ATTR_RO(pages_unshared); 3662 3663 static ssize_t pages_volatile_show(struct kobject *kobj, 3664 struct kobj_attribute *attr, char *buf) 3665 { 3666 long ksm_pages_volatile; 3667 3668 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 3669 - ksm_pages_sharing - ksm_pages_unshared; 3670 /* 3671 * It was not worth any locking to calculate that statistic, 3672 * but it might therefore sometimes be negative: conceal that. 3673 */ 3674 if (ksm_pages_volatile < 0) 3675 ksm_pages_volatile = 0; 3676 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); 3677 } 3678 KSM_ATTR_RO(pages_volatile); 3679 3680 static ssize_t pages_skipped_show(struct kobject *kobj, 3681 struct kobj_attribute *attr, char *buf) 3682 { 3683 return sysfs_emit(buf, "%lu\n", ksm_pages_skipped); 3684 } 3685 KSM_ATTR_RO(pages_skipped); 3686 3687 static ssize_t ksm_zero_pages_show(struct kobject *kobj, 3688 struct kobj_attribute *attr, char *buf) 3689 { 3690 return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); 3691 } 3692 KSM_ATTR_RO(ksm_zero_pages); 3693 3694 static ssize_t general_profit_show(struct kobject *kobj, 3695 struct kobj_attribute *attr, char *buf) 3696 { 3697 long general_profit; 3698 3699 general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - 3700 ksm_rmap_items * sizeof(struct ksm_rmap_item); 3701 3702 return sysfs_emit(buf, "%ld\n", general_profit); 3703 } 3704 KSM_ATTR_RO(general_profit); 3705 3706 static ssize_t stable_node_dups_show(struct kobject *kobj, 3707 struct kobj_attribute *attr, char *buf) 3708 { 3709 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); 3710 } 3711 KSM_ATTR_RO(stable_node_dups); 3712 3713 static ssize_t stable_node_chains_show(struct kobject *kobj, 3714 struct kobj_attribute *attr, char *buf) 3715 { 3716 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); 3717 } 3718 KSM_ATTR_RO(stable_node_chains); 3719 3720 static ssize_t 3721 stable_node_chains_prune_millisecs_show(struct kobject *kobj, 3722 struct kobj_attribute *attr, 3723 char *buf) 3724 { 3725 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); 3726 } 3727 3728 static ssize_t 3729 stable_node_chains_prune_millisecs_store(struct kobject *kobj, 3730 struct kobj_attribute *attr, 3731 const char *buf, size_t count) 3732 { 3733 unsigned int msecs; 3734 int err; 3735 3736 err = kstrtouint(buf, 10, &msecs); 3737 if (err) 3738 return -EINVAL; 3739 3740 ksm_stable_node_chains_prune_millisecs = msecs; 3741 3742 return count; 3743 } 3744 KSM_ATTR(stable_node_chains_prune_millisecs); 3745 3746 static ssize_t full_scans_show(struct kobject *kobj, 3747 struct kobj_attribute *attr, char *buf) 3748 { 3749 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); 3750 } 3751 KSM_ATTR_RO(full_scans); 3752 3753 static ssize_t smart_scan_show(struct kobject *kobj, 3754 struct kobj_attribute *attr, char *buf) 3755 { 3756 return sysfs_emit(buf, "%u\n", ksm_smart_scan); 3757 } 3758 3759 static ssize_t smart_scan_store(struct kobject *kobj, 3760 struct kobj_attribute *attr, 3761 const char *buf, size_t count) 3762 { 3763 int err; 3764 bool value; 3765 3766 err = kstrtobool(buf, &value); 3767 if (err) 3768 return -EINVAL; 3769 3770 ksm_smart_scan = value; 3771 return count; 3772 } 3773 KSM_ATTR(smart_scan); 3774 3775 static ssize_t advisor_mode_show(struct kobject *kobj, 3776 struct kobj_attribute *attr, char *buf) 3777 { 3778 const char *output; 3779 3780 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) 3781 output = "none [scan-time]"; 3782 else 3783 output = "[none] scan-time"; 3784 3785 return sysfs_emit(buf, "%s\n", output); 3786 } 3787 3788 static ssize_t advisor_mode_store(struct kobject *kobj, 3789 struct kobj_attribute *attr, const char *buf, 3790 size_t count) 3791 { 3792 enum ksm_advisor_type curr_advisor = ksm_advisor; 3793 3794 if (sysfs_streq("scan-time", buf)) 3795 ksm_advisor = KSM_ADVISOR_SCAN_TIME; 3796 else if (sysfs_streq("none", buf)) 3797 ksm_advisor = KSM_ADVISOR_NONE; 3798 else 3799 return -EINVAL; 3800 3801 /* Set advisor default values */ 3802 if (curr_advisor != ksm_advisor) 3803 set_advisor_defaults(); 3804 3805 return count; 3806 } 3807 KSM_ATTR(advisor_mode); 3808 3809 static ssize_t advisor_max_cpu_show(struct kobject *kobj, 3810 struct kobj_attribute *attr, char *buf) 3811 { 3812 return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu); 3813 } 3814 3815 static ssize_t advisor_max_cpu_store(struct kobject *kobj, 3816 struct kobj_attribute *attr, 3817 const char *buf, size_t count) 3818 { 3819 int err; 3820 unsigned long value; 3821 3822 err = kstrtoul(buf, 10, &value); 3823 if (err) 3824 return -EINVAL; 3825 3826 ksm_advisor_max_cpu = value; 3827 return count; 3828 } 3829 KSM_ATTR(advisor_max_cpu); 3830 3831 static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj, 3832 struct kobj_attribute *attr, char *buf) 3833 { 3834 return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan); 3835 } 3836 3837 static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj, 3838 struct kobj_attribute *attr, 3839 const char *buf, size_t count) 3840 { 3841 int err; 3842 unsigned long value; 3843 3844 err = kstrtoul(buf, 10, &value); 3845 if (err) 3846 return -EINVAL; 3847 3848 ksm_advisor_min_pages_to_scan = value; 3849 return count; 3850 } 3851 KSM_ATTR(advisor_min_pages_to_scan); 3852 3853 static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj, 3854 struct kobj_attribute *attr, char *buf) 3855 { 3856 return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan); 3857 } 3858 3859 static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj, 3860 struct kobj_attribute *attr, 3861 const char *buf, size_t count) 3862 { 3863 int err; 3864 unsigned long value; 3865 3866 err = kstrtoul(buf, 10, &value); 3867 if (err) 3868 return -EINVAL; 3869 3870 ksm_advisor_max_pages_to_scan = value; 3871 return count; 3872 } 3873 KSM_ATTR(advisor_max_pages_to_scan); 3874 3875 static ssize_t advisor_target_scan_time_show(struct kobject *kobj, 3876 struct kobj_attribute *attr, char *buf) 3877 { 3878 return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time); 3879 } 3880 3881 static ssize_t advisor_target_scan_time_store(struct kobject *kobj, 3882 struct kobj_attribute *attr, 3883 const char *buf, size_t count) 3884 { 3885 int err; 3886 unsigned long value; 3887 3888 err = kstrtoul(buf, 10, &value); 3889 if (err) 3890 return -EINVAL; 3891 if (value < 1) 3892 return -EINVAL; 3893 3894 ksm_advisor_target_scan_time = value; 3895 return count; 3896 } 3897 KSM_ATTR(advisor_target_scan_time); 3898 3899 static struct attribute *ksm_attrs[] = { 3900 &sleep_millisecs_attr.attr, 3901 &pages_to_scan_attr.attr, 3902 &run_attr.attr, 3903 &pages_scanned_attr.attr, 3904 &pages_shared_attr.attr, 3905 &pages_sharing_attr.attr, 3906 &pages_unshared_attr.attr, 3907 &pages_volatile_attr.attr, 3908 &pages_skipped_attr.attr, 3909 &ksm_zero_pages_attr.attr, 3910 &full_scans_attr.attr, 3911 #ifdef CONFIG_NUMA 3912 &merge_across_nodes_attr.attr, 3913 #endif 3914 &max_page_sharing_attr.attr, 3915 &stable_node_chains_attr.attr, 3916 &stable_node_dups_attr.attr, 3917 &stable_node_chains_prune_millisecs_attr.attr, 3918 &use_zero_pages_attr.attr, 3919 &general_profit_attr.attr, 3920 &smart_scan_attr.attr, 3921 &advisor_mode_attr.attr, 3922 &advisor_max_cpu_attr.attr, 3923 &advisor_min_pages_to_scan_attr.attr, 3924 &advisor_max_pages_to_scan_attr.attr, 3925 &advisor_target_scan_time_attr.attr, 3926 NULL, 3927 }; 3928 3929 static const struct attribute_group ksm_attr_group = { 3930 .attrs = ksm_attrs, 3931 .name = "ksm", 3932 }; 3933 #endif /* CONFIG_SYSFS */ 3934 3935 static int __init ksm_init(void) 3936 { 3937 struct task_struct *ksm_thread; 3938 int err; 3939 3940 /* The correct value depends on page size and endianness */ 3941 zero_checksum = calc_checksum(ZERO_PAGE(0)); 3942 /* Default to false for backwards compatibility */ 3943 ksm_use_zero_pages = false; 3944 3945 err = ksm_slab_init(); 3946 if (err) 3947 goto out; 3948 3949 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 3950 if (IS_ERR(ksm_thread)) { 3951 pr_err("ksm: creating kthread failed\n"); 3952 err = PTR_ERR(ksm_thread); 3953 goto out_free; 3954 } 3955 3956 #ifdef CONFIG_SYSFS 3957 err = sysfs_create_group(mm_kobj, &ksm_attr_group); 3958 if (err) { 3959 pr_err("ksm: register sysfs failed\n"); 3960 kthread_stop(ksm_thread); 3961 goto out_free; 3962 } 3963 #else 3964 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 3965 3966 #endif /* CONFIG_SYSFS */ 3967 3968 #ifdef CONFIG_MEMORY_HOTREMOVE 3969 /* There is no significance to this priority 100 */ 3970 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); 3971 #endif 3972 return 0; 3973 3974 out_free: 3975 ksm_slab_free(); 3976 out: 3977 return err; 3978 } 3979 subsys_initcall(ksm_init); 3980