1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/mm.h> 4 #include <linux/sched.h> 5 #include <linux/mmu_notifier.h> 6 #include <linux/rmap.h> 7 #include <linux/swap.h> 8 #include <linux/mm_inline.h> 9 #include <linux/kthread.h> 10 #include <linux/khugepaged.h> 11 #include <linux/freezer.h> 12 #include <linux/mman.h> 13 #include <linux/hashtable.h> 14 #include <linux/userfaultfd_k.h> 15 #include <linux/page_idle.h> 16 #include <linux/swapops.h> 17 #include <linux/shmem_fs.h> 18 19 #include <asm/tlb.h> 20 #include <asm/pgalloc.h> 21 #include "internal.h" 22 23 enum scan_result { 24 SCAN_FAIL, 25 SCAN_SUCCEED, 26 SCAN_PMD_NULL, 27 SCAN_EXCEED_NONE_PTE, 28 SCAN_PTE_NON_PRESENT, 29 SCAN_PAGE_RO, 30 SCAN_LACK_REFERENCED_PAGE, 31 SCAN_PAGE_NULL, 32 SCAN_SCAN_ABORT, 33 SCAN_PAGE_COUNT, 34 SCAN_PAGE_LRU, 35 SCAN_PAGE_LOCK, 36 SCAN_PAGE_ANON, 37 SCAN_PAGE_COMPOUND, 38 SCAN_ANY_PROCESS, 39 SCAN_VMA_NULL, 40 SCAN_VMA_CHECK, 41 SCAN_ADDRESS_RANGE, 42 SCAN_SWAP_CACHE_PAGE, 43 SCAN_DEL_PAGE_LRU, 44 SCAN_ALLOC_HUGE_PAGE_FAIL, 45 SCAN_CGROUP_CHARGE_FAIL, 46 SCAN_EXCEED_SWAP_PTE, 47 SCAN_TRUNCATED, 48 }; 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/huge_memory.h> 52 53 /* default scan 8*512 pte (or vmas) every 30 second */ 54 static unsigned int khugepaged_pages_to_scan __read_mostly; 55 static unsigned int khugepaged_pages_collapsed; 56 static unsigned int khugepaged_full_scans; 57 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 58 /* during fragmentation poll the hugepage allocator once every minute */ 59 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 60 static unsigned long khugepaged_sleep_expire; 61 static DEFINE_SPINLOCK(khugepaged_mm_lock); 62 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 63 /* 64 * default collapse hugepages if there is at least one pte mapped like 65 * it would have happened if the vma was large enough during page 66 * fault. 67 */ 68 static unsigned int khugepaged_max_ptes_none __read_mostly; 69 static unsigned int khugepaged_max_ptes_swap __read_mostly; 70 71 #define MM_SLOTS_HASH_BITS 10 72 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 73 74 static struct kmem_cache *mm_slot_cache __read_mostly; 75 76 /** 77 * struct mm_slot - hash lookup from mm to mm_slot 78 * @hash: hash collision list 79 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 80 * @mm: the mm that this information is valid for 81 */ 82 struct mm_slot { 83 struct hlist_node hash; 84 struct list_head mm_node; 85 struct mm_struct *mm; 86 }; 87 88 /** 89 * struct khugepaged_scan - cursor for scanning 90 * @mm_head: the head of the mm list to scan 91 * @mm_slot: the current mm_slot we are scanning 92 * @address: the next address inside that to be scanned 93 * 94 * There is only the one khugepaged_scan instance of this cursor structure. 95 */ 96 struct khugepaged_scan { 97 struct list_head mm_head; 98 struct mm_slot *mm_slot; 99 unsigned long address; 100 }; 101 102 static struct khugepaged_scan khugepaged_scan = { 103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 104 }; 105 106 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 107 struct kobj_attribute *attr, 108 char *buf) 109 { 110 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 111 } 112 113 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 114 struct kobj_attribute *attr, 115 const char *buf, size_t count) 116 { 117 unsigned long msecs; 118 int err; 119 120 err = kstrtoul(buf, 10, &msecs); 121 if (err || msecs > UINT_MAX) 122 return -EINVAL; 123 124 khugepaged_scan_sleep_millisecs = msecs; 125 khugepaged_sleep_expire = 0; 126 wake_up_interruptible(&khugepaged_wait); 127 128 return count; 129 } 130 static struct kobj_attribute scan_sleep_millisecs_attr = 131 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 132 scan_sleep_millisecs_store); 133 134 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 135 struct kobj_attribute *attr, 136 char *buf) 137 { 138 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 139 } 140 141 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 142 struct kobj_attribute *attr, 143 const char *buf, size_t count) 144 { 145 unsigned long msecs; 146 int err; 147 148 err = kstrtoul(buf, 10, &msecs); 149 if (err || msecs > UINT_MAX) 150 return -EINVAL; 151 152 khugepaged_alloc_sleep_millisecs = msecs; 153 khugepaged_sleep_expire = 0; 154 wake_up_interruptible(&khugepaged_wait); 155 156 return count; 157 } 158 static struct kobj_attribute alloc_sleep_millisecs_attr = 159 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 160 alloc_sleep_millisecs_store); 161 162 static ssize_t pages_to_scan_show(struct kobject *kobj, 163 struct kobj_attribute *attr, 164 char *buf) 165 { 166 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 167 } 168 static ssize_t pages_to_scan_store(struct kobject *kobj, 169 struct kobj_attribute *attr, 170 const char *buf, size_t count) 171 { 172 int err; 173 unsigned long pages; 174 175 err = kstrtoul(buf, 10, &pages); 176 if (err || !pages || pages > UINT_MAX) 177 return -EINVAL; 178 179 khugepaged_pages_to_scan = pages; 180 181 return count; 182 } 183 static struct kobj_attribute pages_to_scan_attr = 184 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 185 pages_to_scan_store); 186 187 static ssize_t pages_collapsed_show(struct kobject *kobj, 188 struct kobj_attribute *attr, 189 char *buf) 190 { 191 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 192 } 193 static struct kobj_attribute pages_collapsed_attr = 194 __ATTR_RO(pages_collapsed); 195 196 static ssize_t full_scans_show(struct kobject *kobj, 197 struct kobj_attribute *attr, 198 char *buf) 199 { 200 return sprintf(buf, "%u\n", khugepaged_full_scans); 201 } 202 static struct kobj_attribute full_scans_attr = 203 __ATTR_RO(full_scans); 204 205 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 206 struct kobj_attribute *attr, char *buf) 207 { 208 return single_hugepage_flag_show(kobj, attr, buf, 209 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 210 } 211 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 212 struct kobj_attribute *attr, 213 const char *buf, size_t count) 214 { 215 return single_hugepage_flag_store(kobj, attr, buf, count, 216 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 217 } 218 static struct kobj_attribute khugepaged_defrag_attr = 219 __ATTR(defrag, 0644, khugepaged_defrag_show, 220 khugepaged_defrag_store); 221 222 /* 223 * max_ptes_none controls if khugepaged should collapse hugepages over 224 * any unmapped ptes in turn potentially increasing the memory 225 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 226 * reduce the available free memory in the system as it 227 * runs. Increasing max_ptes_none will instead potentially reduce the 228 * free memory in the system during the khugepaged scan. 229 */ 230 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 231 struct kobj_attribute *attr, 232 char *buf) 233 { 234 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 235 } 236 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 237 struct kobj_attribute *attr, 238 const char *buf, size_t count) 239 { 240 int err; 241 unsigned long max_ptes_none; 242 243 err = kstrtoul(buf, 10, &max_ptes_none); 244 if (err || max_ptes_none > HPAGE_PMD_NR-1) 245 return -EINVAL; 246 247 khugepaged_max_ptes_none = max_ptes_none; 248 249 return count; 250 } 251 static struct kobj_attribute khugepaged_max_ptes_none_attr = 252 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 253 khugepaged_max_ptes_none_store); 254 255 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, 256 struct kobj_attribute *attr, 257 char *buf) 258 { 259 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); 260 } 261 262 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, 263 struct kobj_attribute *attr, 264 const char *buf, size_t count) 265 { 266 int err; 267 unsigned long max_ptes_swap; 268 269 err = kstrtoul(buf, 10, &max_ptes_swap); 270 if (err || max_ptes_swap > HPAGE_PMD_NR-1) 271 return -EINVAL; 272 273 khugepaged_max_ptes_swap = max_ptes_swap; 274 275 return count; 276 } 277 278 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 279 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, 280 khugepaged_max_ptes_swap_store); 281 282 static struct attribute *khugepaged_attr[] = { 283 &khugepaged_defrag_attr.attr, 284 &khugepaged_max_ptes_none_attr.attr, 285 &pages_to_scan_attr.attr, 286 &pages_collapsed_attr.attr, 287 &full_scans_attr.attr, 288 &scan_sleep_millisecs_attr.attr, 289 &alloc_sleep_millisecs_attr.attr, 290 &khugepaged_max_ptes_swap_attr.attr, 291 NULL, 292 }; 293 294 struct attribute_group khugepaged_attr_group = { 295 .attrs = khugepaged_attr, 296 .name = "khugepaged", 297 }; 298 299 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 300 301 int hugepage_madvise(struct vm_area_struct *vma, 302 unsigned long *vm_flags, int advice) 303 { 304 switch (advice) { 305 case MADV_HUGEPAGE: 306 #ifdef CONFIG_S390 307 /* 308 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 309 * can't handle this properly after s390_enable_sie, so we simply 310 * ignore the madvise to prevent qemu from causing a SIGSEGV. 311 */ 312 if (mm_has_pgste(vma->vm_mm)) 313 return 0; 314 #endif 315 *vm_flags &= ~VM_NOHUGEPAGE; 316 *vm_flags |= VM_HUGEPAGE; 317 /* 318 * If the vma become good for khugepaged to scan, 319 * register it here without waiting a page fault that 320 * may not happen any time soon. 321 */ 322 if (!(*vm_flags & VM_NO_KHUGEPAGED) && 323 khugepaged_enter_vma_merge(vma, *vm_flags)) 324 return -ENOMEM; 325 break; 326 case MADV_NOHUGEPAGE: 327 *vm_flags &= ~VM_HUGEPAGE; 328 *vm_flags |= VM_NOHUGEPAGE; 329 /* 330 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 331 * this vma even if we leave the mm registered in khugepaged if 332 * it got registered before VM_NOHUGEPAGE was set. 333 */ 334 break; 335 } 336 337 return 0; 338 } 339 340 int __init khugepaged_init(void) 341 { 342 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 343 sizeof(struct mm_slot), 344 __alignof__(struct mm_slot), 0, NULL); 345 if (!mm_slot_cache) 346 return -ENOMEM; 347 348 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 349 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 350 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 351 352 return 0; 353 } 354 355 void __init khugepaged_destroy(void) 356 { 357 kmem_cache_destroy(mm_slot_cache); 358 } 359 360 static inline struct mm_slot *alloc_mm_slot(void) 361 { 362 if (!mm_slot_cache) /* initialization failed */ 363 return NULL; 364 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 365 } 366 367 static inline void free_mm_slot(struct mm_slot *mm_slot) 368 { 369 kmem_cache_free(mm_slot_cache, mm_slot); 370 } 371 372 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 373 { 374 struct mm_slot *mm_slot; 375 376 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 377 if (mm == mm_slot->mm) 378 return mm_slot; 379 380 return NULL; 381 } 382 383 static void insert_to_mm_slots_hash(struct mm_struct *mm, 384 struct mm_slot *mm_slot) 385 { 386 mm_slot->mm = mm; 387 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 388 } 389 390 static inline int khugepaged_test_exit(struct mm_struct *mm) 391 { 392 return atomic_read(&mm->mm_users) == 0; 393 } 394 395 int __khugepaged_enter(struct mm_struct *mm) 396 { 397 struct mm_slot *mm_slot; 398 int wakeup; 399 400 mm_slot = alloc_mm_slot(); 401 if (!mm_slot) 402 return -ENOMEM; 403 404 /* __khugepaged_exit() must not run from under us */ 405 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 406 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 407 free_mm_slot(mm_slot); 408 return 0; 409 } 410 411 spin_lock(&khugepaged_mm_lock); 412 insert_to_mm_slots_hash(mm, mm_slot); 413 /* 414 * Insert just behind the scanning cursor, to let the area settle 415 * down a little. 416 */ 417 wakeup = list_empty(&khugepaged_scan.mm_head); 418 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 419 spin_unlock(&khugepaged_mm_lock); 420 421 atomic_inc(&mm->mm_count); 422 if (wakeup) 423 wake_up_interruptible(&khugepaged_wait); 424 425 return 0; 426 } 427 428 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 429 unsigned long vm_flags) 430 { 431 unsigned long hstart, hend; 432 if (!vma->anon_vma) 433 /* 434 * Not yet faulted in so we will register later in the 435 * page fault if needed. 436 */ 437 return 0; 438 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) 439 /* khugepaged not yet working on file or special mappings */ 440 return 0; 441 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 442 hend = vma->vm_end & HPAGE_PMD_MASK; 443 if (hstart < hend) 444 return khugepaged_enter(vma, vm_flags); 445 return 0; 446 } 447 448 void __khugepaged_exit(struct mm_struct *mm) 449 { 450 struct mm_slot *mm_slot; 451 int free = 0; 452 453 spin_lock(&khugepaged_mm_lock); 454 mm_slot = get_mm_slot(mm); 455 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 456 hash_del(&mm_slot->hash); 457 list_del(&mm_slot->mm_node); 458 free = 1; 459 } 460 spin_unlock(&khugepaged_mm_lock); 461 462 if (free) { 463 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 464 free_mm_slot(mm_slot); 465 mmdrop(mm); 466 } else if (mm_slot) { 467 /* 468 * This is required to serialize against 469 * khugepaged_test_exit() (which is guaranteed to run 470 * under mmap sem read mode). Stop here (after we 471 * return all pagetables will be destroyed) until 472 * khugepaged has finished working on the pagetables 473 * under the mmap_sem. 474 */ 475 down_write(&mm->mmap_sem); 476 up_write(&mm->mmap_sem); 477 } 478 } 479 480 static void release_pte_page(struct page *page) 481 { 482 /* 0 stands for page_is_file_cache(page) == false */ 483 dec_node_page_state(page, NR_ISOLATED_ANON + 0); 484 unlock_page(page); 485 putback_lru_page(page); 486 } 487 488 static void release_pte_pages(pte_t *pte, pte_t *_pte) 489 { 490 while (--_pte >= pte) { 491 pte_t pteval = *_pte; 492 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) 493 release_pte_page(pte_page(pteval)); 494 } 495 } 496 497 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 498 unsigned long address, 499 pte_t *pte) 500 { 501 struct page *page = NULL; 502 pte_t *_pte; 503 int none_or_zero = 0, result = 0, referenced = 0; 504 bool writable = false; 505 506 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 507 _pte++, address += PAGE_SIZE) { 508 pte_t pteval = *_pte; 509 if (pte_none(pteval) || (pte_present(pteval) && 510 is_zero_pfn(pte_pfn(pteval)))) { 511 if (!userfaultfd_armed(vma) && 512 ++none_or_zero <= khugepaged_max_ptes_none) { 513 continue; 514 } else { 515 result = SCAN_EXCEED_NONE_PTE; 516 goto out; 517 } 518 } 519 if (!pte_present(pteval)) { 520 result = SCAN_PTE_NON_PRESENT; 521 goto out; 522 } 523 page = vm_normal_page(vma, address, pteval); 524 if (unlikely(!page)) { 525 result = SCAN_PAGE_NULL; 526 goto out; 527 } 528 529 VM_BUG_ON_PAGE(PageCompound(page), page); 530 VM_BUG_ON_PAGE(!PageAnon(page), page); 531 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 532 533 /* 534 * We can do it before isolate_lru_page because the 535 * page can't be freed from under us. NOTE: PG_lock 536 * is needed to serialize against split_huge_page 537 * when invoked from the VM. 538 */ 539 if (!trylock_page(page)) { 540 result = SCAN_PAGE_LOCK; 541 goto out; 542 } 543 544 /* 545 * cannot use mapcount: can't collapse if there's a gup pin. 546 * The page must only be referenced by the scanned process 547 * and page swap cache. 548 */ 549 if (page_count(page) != 1 + !!PageSwapCache(page)) { 550 unlock_page(page); 551 result = SCAN_PAGE_COUNT; 552 goto out; 553 } 554 if (pte_write(pteval)) { 555 writable = true; 556 } else { 557 if (PageSwapCache(page) && 558 !reuse_swap_page(page, NULL)) { 559 unlock_page(page); 560 result = SCAN_SWAP_CACHE_PAGE; 561 goto out; 562 } 563 /* 564 * Page is not in the swap cache. It can be collapsed 565 * into a THP. 566 */ 567 } 568 569 /* 570 * Isolate the page to avoid collapsing an hugepage 571 * currently in use by the VM. 572 */ 573 if (isolate_lru_page(page)) { 574 unlock_page(page); 575 result = SCAN_DEL_PAGE_LRU; 576 goto out; 577 } 578 /* 0 stands for page_is_file_cache(page) == false */ 579 inc_node_page_state(page, NR_ISOLATED_ANON + 0); 580 VM_BUG_ON_PAGE(!PageLocked(page), page); 581 VM_BUG_ON_PAGE(PageLRU(page), page); 582 583 /* There should be enough young pte to collapse the page */ 584 if (pte_young(pteval) || 585 page_is_young(page) || PageReferenced(page) || 586 mmu_notifier_test_young(vma->vm_mm, address)) 587 referenced++; 588 } 589 if (likely(writable)) { 590 if (likely(referenced)) { 591 result = SCAN_SUCCEED; 592 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 593 referenced, writable, result); 594 return 1; 595 } 596 } else { 597 result = SCAN_PAGE_RO; 598 } 599 600 out: 601 release_pte_pages(pte, _pte); 602 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 603 referenced, writable, result); 604 return 0; 605 } 606 607 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 608 struct vm_area_struct *vma, 609 unsigned long address, 610 spinlock_t *ptl) 611 { 612 pte_t *_pte; 613 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 614 pte_t pteval = *_pte; 615 struct page *src_page; 616 617 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 618 clear_user_highpage(page, address); 619 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 620 if (is_zero_pfn(pte_pfn(pteval))) { 621 /* 622 * ptl mostly unnecessary. 623 */ 624 spin_lock(ptl); 625 /* 626 * paravirt calls inside pte_clear here are 627 * superfluous. 628 */ 629 pte_clear(vma->vm_mm, address, _pte); 630 spin_unlock(ptl); 631 } 632 } else { 633 src_page = pte_page(pteval); 634 copy_user_highpage(page, src_page, address, vma); 635 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 636 release_pte_page(src_page); 637 /* 638 * ptl mostly unnecessary, but preempt has to 639 * be disabled to update the per-cpu stats 640 * inside page_remove_rmap(). 641 */ 642 spin_lock(ptl); 643 /* 644 * paravirt calls inside pte_clear here are 645 * superfluous. 646 */ 647 pte_clear(vma->vm_mm, address, _pte); 648 page_remove_rmap(src_page, false); 649 spin_unlock(ptl); 650 free_page_and_swap_cache(src_page); 651 } 652 653 address += PAGE_SIZE; 654 page++; 655 } 656 } 657 658 static void khugepaged_alloc_sleep(void) 659 { 660 DEFINE_WAIT(wait); 661 662 add_wait_queue(&khugepaged_wait, &wait); 663 freezable_schedule_timeout_interruptible( 664 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 665 remove_wait_queue(&khugepaged_wait, &wait); 666 } 667 668 static int khugepaged_node_load[MAX_NUMNODES]; 669 670 static bool khugepaged_scan_abort(int nid) 671 { 672 int i; 673 674 /* 675 * If node_reclaim_mode is disabled, then no extra effort is made to 676 * allocate memory locally. 677 */ 678 if (!node_reclaim_mode) 679 return false; 680 681 /* If there is a count for this node already, it must be acceptable */ 682 if (khugepaged_node_load[nid]) 683 return false; 684 685 for (i = 0; i < MAX_NUMNODES; i++) { 686 if (!khugepaged_node_load[i]) 687 continue; 688 if (node_distance(nid, i) > RECLAIM_DISTANCE) 689 return true; 690 } 691 return false; 692 } 693 694 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 695 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 696 { 697 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 698 } 699 700 #ifdef CONFIG_NUMA 701 static int khugepaged_find_target_node(void) 702 { 703 static int last_khugepaged_target_node = NUMA_NO_NODE; 704 int nid, target_node = 0, max_value = 0; 705 706 /* find first node with max normal pages hit */ 707 for (nid = 0; nid < MAX_NUMNODES; nid++) 708 if (khugepaged_node_load[nid] > max_value) { 709 max_value = khugepaged_node_load[nid]; 710 target_node = nid; 711 } 712 713 /* do some balance if several nodes have the same hit record */ 714 if (target_node <= last_khugepaged_target_node) 715 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 716 nid++) 717 if (max_value == khugepaged_node_load[nid]) { 718 target_node = nid; 719 break; 720 } 721 722 last_khugepaged_target_node = target_node; 723 return target_node; 724 } 725 726 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 727 { 728 if (IS_ERR(*hpage)) { 729 if (!*wait) 730 return false; 731 732 *wait = false; 733 *hpage = NULL; 734 khugepaged_alloc_sleep(); 735 } else if (*hpage) { 736 put_page(*hpage); 737 *hpage = NULL; 738 } 739 740 return true; 741 } 742 743 static struct page * 744 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 745 { 746 VM_BUG_ON_PAGE(*hpage, *hpage); 747 748 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 749 if (unlikely(!*hpage)) { 750 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 751 *hpage = ERR_PTR(-ENOMEM); 752 return NULL; 753 } 754 755 prep_transhuge_page(*hpage); 756 count_vm_event(THP_COLLAPSE_ALLOC); 757 return *hpage; 758 } 759 #else 760 static int khugepaged_find_target_node(void) 761 { 762 return 0; 763 } 764 765 static inline struct page *alloc_khugepaged_hugepage(void) 766 { 767 struct page *page; 768 769 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), 770 HPAGE_PMD_ORDER); 771 if (page) 772 prep_transhuge_page(page); 773 return page; 774 } 775 776 static struct page *khugepaged_alloc_hugepage(bool *wait) 777 { 778 struct page *hpage; 779 780 do { 781 hpage = alloc_khugepaged_hugepage(); 782 if (!hpage) { 783 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 784 if (!*wait) 785 return NULL; 786 787 *wait = false; 788 khugepaged_alloc_sleep(); 789 } else 790 count_vm_event(THP_COLLAPSE_ALLOC); 791 } while (unlikely(!hpage) && likely(khugepaged_enabled())); 792 793 return hpage; 794 } 795 796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 797 { 798 if (!*hpage) 799 *hpage = khugepaged_alloc_hugepage(wait); 800 801 if (unlikely(!*hpage)) 802 return false; 803 804 return true; 805 } 806 807 static struct page * 808 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 809 { 810 VM_BUG_ON(!*hpage); 811 812 return *hpage; 813 } 814 #endif 815 816 static bool hugepage_vma_check(struct vm_area_struct *vma) 817 { 818 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 819 (vma->vm_flags & VM_NOHUGEPAGE)) 820 return false; 821 if (shmem_file(vma->vm_file)) { 822 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 823 return false; 824 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, 825 HPAGE_PMD_NR); 826 } 827 if (!vma->anon_vma || vma->vm_ops) 828 return false; 829 if (is_vma_temporary_stack(vma)) 830 return false; 831 return !(vma->vm_flags & VM_NO_KHUGEPAGED); 832 } 833 834 /* 835 * If mmap_sem temporarily dropped, revalidate vma 836 * before taking mmap_sem. 837 * Return 0 if succeeds, otherwise return none-zero 838 * value (scan code). 839 */ 840 841 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 842 struct vm_area_struct **vmap) 843 { 844 struct vm_area_struct *vma; 845 unsigned long hstart, hend; 846 847 if (unlikely(khugepaged_test_exit(mm))) 848 return SCAN_ANY_PROCESS; 849 850 *vmap = vma = find_vma(mm, address); 851 if (!vma) 852 return SCAN_VMA_NULL; 853 854 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 855 hend = vma->vm_end & HPAGE_PMD_MASK; 856 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 857 return SCAN_ADDRESS_RANGE; 858 if (!hugepage_vma_check(vma)) 859 return SCAN_VMA_CHECK; 860 return 0; 861 } 862 863 /* 864 * Bring missing pages in from swap, to complete THP collapse. 865 * Only done if khugepaged_scan_pmd believes it is worthwhile. 866 * 867 * Called and returns without pte mapped or spinlocks held, 868 * but with mmap_sem held to protect against vma changes. 869 */ 870 871 static bool __collapse_huge_page_swapin(struct mm_struct *mm, 872 struct vm_area_struct *vma, 873 unsigned long address, pmd_t *pmd, 874 int referenced) 875 { 876 pte_t pteval; 877 int swapped_in = 0, ret = 0; 878 struct fault_env fe = { 879 .vma = vma, 880 .address = address, 881 .flags = FAULT_FLAG_ALLOW_RETRY, 882 .pmd = pmd, 883 }; 884 885 /* we only decide to swapin, if there is enough young ptes */ 886 if (referenced < HPAGE_PMD_NR/2) { 887 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 888 return false; 889 } 890 fe.pte = pte_offset_map(pmd, address); 891 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 892 fe.pte++, fe.address += PAGE_SIZE) { 893 pteval = *fe.pte; 894 if (!is_swap_pte(pteval)) 895 continue; 896 swapped_in++; 897 ret = do_swap_page(&fe, pteval); 898 899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 900 if (ret & VM_FAULT_RETRY) { 901 down_read(&mm->mmap_sem); 902 if (hugepage_vma_revalidate(mm, address, &fe.vma)) { 903 /* vma is no longer available, don't continue to swapin */ 904 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 905 return false; 906 } 907 /* check if the pmd is still valid */ 908 if (mm_find_pmd(mm, address) != pmd) 909 return false; 910 } 911 if (ret & VM_FAULT_ERROR) { 912 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 913 return false; 914 } 915 /* pte is unmapped now, we need to map it */ 916 fe.pte = pte_offset_map(pmd, fe.address); 917 } 918 fe.pte--; 919 pte_unmap(fe.pte); 920 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 921 return true; 922 } 923 924 static void collapse_huge_page(struct mm_struct *mm, 925 unsigned long address, 926 struct page **hpage, 927 int node, int referenced) 928 { 929 pmd_t *pmd, _pmd; 930 pte_t *pte; 931 pgtable_t pgtable; 932 struct page *new_page; 933 spinlock_t *pmd_ptl, *pte_ptl; 934 int isolated = 0, result = 0; 935 struct mem_cgroup *memcg; 936 struct vm_area_struct *vma; 937 unsigned long mmun_start; /* For mmu_notifiers */ 938 unsigned long mmun_end; /* For mmu_notifiers */ 939 gfp_t gfp; 940 941 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 942 943 /* Only allocate from the target node */ 944 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; 945 946 /* 947 * Before allocating the hugepage, release the mmap_sem read lock. 948 * The allocation can take potentially a long time if it involves 949 * sync compaction, and we do not need to hold the mmap_sem during 950 * that. We will recheck the vma after taking it again in write mode. 951 */ 952 up_read(&mm->mmap_sem); 953 new_page = khugepaged_alloc_page(hpage, gfp, node); 954 if (!new_page) { 955 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 956 goto out_nolock; 957 } 958 959 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 960 result = SCAN_CGROUP_CHARGE_FAIL; 961 goto out_nolock; 962 } 963 964 down_read(&mm->mmap_sem); 965 result = hugepage_vma_revalidate(mm, address, &vma); 966 if (result) { 967 mem_cgroup_cancel_charge(new_page, memcg, true); 968 up_read(&mm->mmap_sem); 969 goto out_nolock; 970 } 971 972 pmd = mm_find_pmd(mm, address); 973 if (!pmd) { 974 result = SCAN_PMD_NULL; 975 mem_cgroup_cancel_charge(new_page, memcg, true); 976 up_read(&mm->mmap_sem); 977 goto out_nolock; 978 } 979 980 /* 981 * __collapse_huge_page_swapin always returns with mmap_sem locked. 982 * If it fails, we release mmap_sem and jump out_nolock. 983 * Continuing to collapse causes inconsistency. 984 */ 985 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { 986 mem_cgroup_cancel_charge(new_page, memcg, true); 987 up_read(&mm->mmap_sem); 988 goto out_nolock; 989 } 990 991 up_read(&mm->mmap_sem); 992 /* 993 * Prevent all access to pagetables with the exception of 994 * gup_fast later handled by the ptep_clear_flush and the VM 995 * handled by the anon_vma lock + PG_lock. 996 */ 997 down_write(&mm->mmap_sem); 998 result = hugepage_vma_revalidate(mm, address, &vma); 999 if (result) 1000 goto out; 1001 /* check if the pmd is still valid */ 1002 if (mm_find_pmd(mm, address) != pmd) 1003 goto out; 1004 1005 anon_vma_lock_write(vma->anon_vma); 1006 1007 pte = pte_offset_map(pmd, address); 1008 pte_ptl = pte_lockptr(mm, pmd); 1009 1010 mmun_start = address; 1011 mmun_end = address + HPAGE_PMD_SIZE; 1012 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1013 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1014 /* 1015 * After this gup_fast can't run anymore. This also removes 1016 * any huge TLB entry from the CPU so we won't allow 1017 * huge and small TLB entries for the same virtual address 1018 * to avoid the risk of CPU bugs in that area. 1019 */ 1020 _pmd = pmdp_collapse_flush(vma, address, pmd); 1021 spin_unlock(pmd_ptl); 1022 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1023 1024 spin_lock(pte_ptl); 1025 isolated = __collapse_huge_page_isolate(vma, address, pte); 1026 spin_unlock(pte_ptl); 1027 1028 if (unlikely(!isolated)) { 1029 pte_unmap(pte); 1030 spin_lock(pmd_ptl); 1031 BUG_ON(!pmd_none(*pmd)); 1032 /* 1033 * We can only use set_pmd_at when establishing 1034 * hugepmds and never for establishing regular pmds that 1035 * points to regular pagetables. Use pmd_populate for that 1036 */ 1037 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1038 spin_unlock(pmd_ptl); 1039 anon_vma_unlock_write(vma->anon_vma); 1040 result = SCAN_FAIL; 1041 goto out; 1042 } 1043 1044 /* 1045 * All pages are isolated and locked so anon_vma rmap 1046 * can't run anymore. 1047 */ 1048 anon_vma_unlock_write(vma->anon_vma); 1049 1050 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 1051 pte_unmap(pte); 1052 __SetPageUptodate(new_page); 1053 pgtable = pmd_pgtable(_pmd); 1054 1055 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1056 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1057 1058 /* 1059 * spin_lock() below is not the equivalent of smp_wmb(), so 1060 * this is needed to avoid the copy_huge_page writes to become 1061 * visible after the set_pmd_at() write. 1062 */ 1063 smp_wmb(); 1064 1065 spin_lock(pmd_ptl); 1066 BUG_ON(!pmd_none(*pmd)); 1067 page_add_new_anon_rmap(new_page, vma, address, true); 1068 mem_cgroup_commit_charge(new_page, memcg, false, true); 1069 lru_cache_add_active_or_unevictable(new_page, vma); 1070 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1071 set_pmd_at(mm, address, pmd, _pmd); 1072 update_mmu_cache_pmd(vma, address, pmd); 1073 spin_unlock(pmd_ptl); 1074 1075 *hpage = NULL; 1076 1077 khugepaged_pages_collapsed++; 1078 result = SCAN_SUCCEED; 1079 out_up_write: 1080 up_write(&mm->mmap_sem); 1081 out_nolock: 1082 trace_mm_collapse_huge_page(mm, isolated, result); 1083 return; 1084 out: 1085 mem_cgroup_cancel_charge(new_page, memcg, true); 1086 goto out_up_write; 1087 } 1088 1089 static int khugepaged_scan_pmd(struct mm_struct *mm, 1090 struct vm_area_struct *vma, 1091 unsigned long address, 1092 struct page **hpage) 1093 { 1094 pmd_t *pmd; 1095 pte_t *pte, *_pte; 1096 int ret = 0, none_or_zero = 0, result = 0, referenced = 0; 1097 struct page *page = NULL; 1098 unsigned long _address; 1099 spinlock_t *ptl; 1100 int node = NUMA_NO_NODE, unmapped = 0; 1101 bool writable = false; 1102 1103 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1104 1105 pmd = mm_find_pmd(mm, address); 1106 if (!pmd) { 1107 result = SCAN_PMD_NULL; 1108 goto out; 1109 } 1110 1111 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1112 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1113 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 1114 _pte++, _address += PAGE_SIZE) { 1115 pte_t pteval = *_pte; 1116 if (is_swap_pte(pteval)) { 1117 if (++unmapped <= khugepaged_max_ptes_swap) { 1118 continue; 1119 } else { 1120 result = SCAN_EXCEED_SWAP_PTE; 1121 goto out_unmap; 1122 } 1123 } 1124 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1125 if (!userfaultfd_armed(vma) && 1126 ++none_or_zero <= khugepaged_max_ptes_none) { 1127 continue; 1128 } else { 1129 result = SCAN_EXCEED_NONE_PTE; 1130 goto out_unmap; 1131 } 1132 } 1133 if (!pte_present(pteval)) { 1134 result = SCAN_PTE_NON_PRESENT; 1135 goto out_unmap; 1136 } 1137 if (pte_write(pteval)) 1138 writable = true; 1139 1140 page = vm_normal_page(vma, _address, pteval); 1141 if (unlikely(!page)) { 1142 result = SCAN_PAGE_NULL; 1143 goto out_unmap; 1144 } 1145 1146 /* TODO: teach khugepaged to collapse THP mapped with pte */ 1147 if (PageCompound(page)) { 1148 result = SCAN_PAGE_COMPOUND; 1149 goto out_unmap; 1150 } 1151 1152 /* 1153 * Record which node the original page is from and save this 1154 * information to khugepaged_node_load[]. 1155 * Khupaged will allocate hugepage from the node has the max 1156 * hit record. 1157 */ 1158 node = page_to_nid(page); 1159 if (khugepaged_scan_abort(node)) { 1160 result = SCAN_SCAN_ABORT; 1161 goto out_unmap; 1162 } 1163 khugepaged_node_load[node]++; 1164 if (!PageLRU(page)) { 1165 result = SCAN_PAGE_LRU; 1166 goto out_unmap; 1167 } 1168 if (PageLocked(page)) { 1169 result = SCAN_PAGE_LOCK; 1170 goto out_unmap; 1171 } 1172 if (!PageAnon(page)) { 1173 result = SCAN_PAGE_ANON; 1174 goto out_unmap; 1175 } 1176 1177 /* 1178 * cannot use mapcount: can't collapse if there's a gup pin. 1179 * The page must only be referenced by the scanned process 1180 * and page swap cache. 1181 */ 1182 if (page_count(page) != 1 + !!PageSwapCache(page)) { 1183 result = SCAN_PAGE_COUNT; 1184 goto out_unmap; 1185 } 1186 if (pte_young(pteval) || 1187 page_is_young(page) || PageReferenced(page) || 1188 mmu_notifier_test_young(vma->vm_mm, address)) 1189 referenced++; 1190 } 1191 if (writable) { 1192 if (referenced) { 1193 result = SCAN_SUCCEED; 1194 ret = 1; 1195 } else { 1196 result = SCAN_LACK_REFERENCED_PAGE; 1197 } 1198 } else { 1199 result = SCAN_PAGE_RO; 1200 } 1201 out_unmap: 1202 pte_unmap_unlock(pte, ptl); 1203 if (ret) { 1204 node = khugepaged_find_target_node(); 1205 /* collapse_huge_page will return with the mmap_sem released */ 1206 collapse_huge_page(mm, address, hpage, node, referenced); 1207 } 1208 out: 1209 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1210 none_or_zero, result, unmapped); 1211 return ret; 1212 } 1213 1214 static void collect_mm_slot(struct mm_slot *mm_slot) 1215 { 1216 struct mm_struct *mm = mm_slot->mm; 1217 1218 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 1219 1220 if (khugepaged_test_exit(mm)) { 1221 /* free mm_slot */ 1222 hash_del(&mm_slot->hash); 1223 list_del(&mm_slot->mm_node); 1224 1225 /* 1226 * Not strictly needed because the mm exited already. 1227 * 1228 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1229 */ 1230 1231 /* khugepaged_mm_lock actually not necessary for the below */ 1232 free_mm_slot(mm_slot); 1233 mmdrop(mm); 1234 } 1235 } 1236 1237 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 1238 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1239 { 1240 struct vm_area_struct *vma; 1241 unsigned long addr; 1242 pmd_t *pmd, _pmd; 1243 1244 i_mmap_lock_write(mapping); 1245 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1246 /* probably overkill */ 1247 if (vma->anon_vma) 1248 continue; 1249 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1250 if (addr & ~HPAGE_PMD_MASK) 1251 continue; 1252 if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1253 continue; 1254 pmd = mm_find_pmd(vma->vm_mm, addr); 1255 if (!pmd) 1256 continue; 1257 /* 1258 * We need exclusive mmap_sem to retract page table. 1259 * If trylock fails we would end up with pte-mapped THP after 1260 * re-fault. Not ideal, but it's more important to not disturb 1261 * the system too much. 1262 */ 1263 if (down_write_trylock(&vma->vm_mm->mmap_sem)) { 1264 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); 1265 /* assume page table is clear */ 1266 _pmd = pmdp_collapse_flush(vma, addr, pmd); 1267 spin_unlock(ptl); 1268 up_write(&vma->vm_mm->mmap_sem); 1269 atomic_long_dec(&vma->vm_mm->nr_ptes); 1270 pte_free(vma->vm_mm, pmd_pgtable(_pmd)); 1271 } 1272 } 1273 i_mmap_unlock_write(mapping); 1274 } 1275 1276 /** 1277 * collapse_shmem - collapse small tmpfs/shmem pages into huge one. 1278 * 1279 * Basic scheme is simple, details are more complex: 1280 * - allocate and freeze a new huge page; 1281 * - scan over radix tree replacing old pages the new one 1282 * + swap in pages if necessary; 1283 * + fill in gaps; 1284 * + keep old pages around in case if rollback is required; 1285 * - if replacing succeed: 1286 * + copy data over; 1287 * + free old pages; 1288 * + unfreeze huge page; 1289 * - if replacing failed; 1290 * + put all pages back and unfreeze them; 1291 * + restore gaps in the radix-tree; 1292 * + free huge page; 1293 */ 1294 static void collapse_shmem(struct mm_struct *mm, 1295 struct address_space *mapping, pgoff_t start, 1296 struct page **hpage, int node) 1297 { 1298 gfp_t gfp; 1299 struct page *page, *new_page, *tmp; 1300 struct mem_cgroup *memcg; 1301 pgoff_t index, end = start + HPAGE_PMD_NR; 1302 LIST_HEAD(pagelist); 1303 struct radix_tree_iter iter; 1304 void **slot; 1305 int nr_none = 0, result = SCAN_SUCCEED; 1306 1307 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1308 1309 /* Only allocate from the target node */ 1310 gfp = alloc_hugepage_khugepaged_gfpmask() | 1311 __GFP_OTHER_NODE | __GFP_THISNODE; 1312 1313 new_page = khugepaged_alloc_page(hpage, gfp, node); 1314 if (!new_page) { 1315 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1316 goto out; 1317 } 1318 1319 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 1320 result = SCAN_CGROUP_CHARGE_FAIL; 1321 goto out; 1322 } 1323 1324 new_page->index = start; 1325 new_page->mapping = mapping; 1326 __SetPageSwapBacked(new_page); 1327 __SetPageLocked(new_page); 1328 BUG_ON(!page_ref_freeze(new_page, 1)); 1329 1330 1331 /* 1332 * At this point the new_page is 'frozen' (page_count() is zero), locked 1333 * and not up-to-date. It's safe to insert it into radix tree, because 1334 * nobody would be able to map it or use it in other way until we 1335 * unfreeze it. 1336 */ 1337 1338 index = start; 1339 spin_lock_irq(&mapping->tree_lock); 1340 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1341 int n = min(iter.index, end) - index; 1342 1343 /* 1344 * Handle holes in the radix tree: charge it from shmem and 1345 * insert relevant subpage of new_page into the radix-tree. 1346 */ 1347 if (n && !shmem_charge(mapping->host, n)) { 1348 result = SCAN_FAIL; 1349 break; 1350 } 1351 nr_none += n; 1352 for (; index < min(iter.index, end); index++) { 1353 radix_tree_insert(&mapping->page_tree, index, 1354 new_page + (index % HPAGE_PMD_NR)); 1355 } 1356 1357 /* We are done. */ 1358 if (index >= end) 1359 break; 1360 1361 page = radix_tree_deref_slot_protected(slot, 1362 &mapping->tree_lock); 1363 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { 1364 spin_unlock_irq(&mapping->tree_lock); 1365 /* swap in or instantiate fallocated page */ 1366 if (shmem_getpage(mapping->host, index, &page, 1367 SGP_NOHUGE)) { 1368 result = SCAN_FAIL; 1369 goto tree_unlocked; 1370 } 1371 spin_lock_irq(&mapping->tree_lock); 1372 } else if (trylock_page(page)) { 1373 get_page(page); 1374 } else { 1375 result = SCAN_PAGE_LOCK; 1376 break; 1377 } 1378 1379 /* 1380 * The page must be locked, so we can drop the tree_lock 1381 * without racing with truncate. 1382 */ 1383 VM_BUG_ON_PAGE(!PageLocked(page), page); 1384 VM_BUG_ON_PAGE(!PageUptodate(page), page); 1385 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1386 1387 if (page_mapping(page) != mapping) { 1388 result = SCAN_TRUNCATED; 1389 goto out_unlock; 1390 } 1391 spin_unlock_irq(&mapping->tree_lock); 1392 1393 if (isolate_lru_page(page)) { 1394 result = SCAN_DEL_PAGE_LRU; 1395 goto out_isolate_failed; 1396 } 1397 1398 if (page_mapped(page)) 1399 unmap_mapping_range(mapping, index << PAGE_SHIFT, 1400 PAGE_SIZE, 0); 1401 1402 spin_lock_irq(&mapping->tree_lock); 1403 1404 VM_BUG_ON_PAGE(page_mapped(page), page); 1405 1406 /* 1407 * The page is expected to have page_count() == 3: 1408 * - we hold a pin on it; 1409 * - one reference from radix tree; 1410 * - one from isolate_lru_page; 1411 */ 1412 if (!page_ref_freeze(page, 3)) { 1413 result = SCAN_PAGE_COUNT; 1414 goto out_lru; 1415 } 1416 1417 /* 1418 * Add the page to the list to be able to undo the collapse if 1419 * something go wrong. 1420 */ 1421 list_add_tail(&page->lru, &pagelist); 1422 1423 /* Finally, replace with the new page. */ 1424 radix_tree_replace_slot(slot, 1425 new_page + (index % HPAGE_PMD_NR)); 1426 1427 index++; 1428 continue; 1429 out_lru: 1430 spin_unlock_irq(&mapping->tree_lock); 1431 putback_lru_page(page); 1432 out_isolate_failed: 1433 unlock_page(page); 1434 put_page(page); 1435 goto tree_unlocked; 1436 out_unlock: 1437 unlock_page(page); 1438 put_page(page); 1439 break; 1440 } 1441 1442 /* 1443 * Handle hole in radix tree at the end of the range. 1444 * This code only triggers if there's nothing in radix tree 1445 * beyond 'end'. 1446 */ 1447 if (result == SCAN_SUCCEED && index < end) { 1448 int n = end - index; 1449 1450 if (!shmem_charge(mapping->host, n)) { 1451 result = SCAN_FAIL; 1452 goto tree_locked; 1453 } 1454 1455 for (; index < end; index++) { 1456 radix_tree_insert(&mapping->page_tree, index, 1457 new_page + (index % HPAGE_PMD_NR)); 1458 } 1459 nr_none += n; 1460 } 1461 1462 tree_locked: 1463 spin_unlock_irq(&mapping->tree_lock); 1464 tree_unlocked: 1465 1466 if (result == SCAN_SUCCEED) { 1467 unsigned long flags; 1468 struct zone *zone = page_zone(new_page); 1469 1470 /* 1471 * Replacing old pages with new one has succeed, now we need to 1472 * copy the content and free old pages. 1473 */ 1474 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1475 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1476 page); 1477 list_del(&page->lru); 1478 unlock_page(page); 1479 page_ref_unfreeze(page, 1); 1480 page->mapping = NULL; 1481 ClearPageActive(page); 1482 ClearPageUnevictable(page); 1483 put_page(page); 1484 } 1485 1486 local_irq_save(flags); 1487 __inc_node_page_state(new_page, NR_SHMEM_THPS); 1488 if (nr_none) { 1489 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none); 1490 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none); 1491 } 1492 local_irq_restore(flags); 1493 1494 /* 1495 * Remove pte page tables, so we can re-faulti 1496 * the page as huge. 1497 */ 1498 retract_page_tables(mapping, start); 1499 1500 /* Everything is ready, let's unfreeze the new_page */ 1501 set_page_dirty(new_page); 1502 SetPageUptodate(new_page); 1503 page_ref_unfreeze(new_page, HPAGE_PMD_NR); 1504 mem_cgroup_commit_charge(new_page, memcg, false, true); 1505 lru_cache_add_anon(new_page); 1506 unlock_page(new_page); 1507 1508 *hpage = NULL; 1509 } else { 1510 /* Something went wrong: rollback changes to the radix-tree */ 1511 shmem_uncharge(mapping->host, nr_none); 1512 spin_lock_irq(&mapping->tree_lock); 1513 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, 1514 start) { 1515 if (iter.index >= end) 1516 break; 1517 page = list_first_entry_or_null(&pagelist, 1518 struct page, lru); 1519 if (!page || iter.index < page->index) { 1520 if (!nr_none) 1521 break; 1522 /* Put holes back where they were */ 1523 radix_tree_replace_slot(slot, NULL); 1524 nr_none--; 1525 continue; 1526 } 1527 1528 VM_BUG_ON_PAGE(page->index != iter.index, page); 1529 1530 /* Unfreeze the page. */ 1531 list_del(&page->lru); 1532 page_ref_unfreeze(page, 2); 1533 radix_tree_replace_slot(slot, page); 1534 spin_unlock_irq(&mapping->tree_lock); 1535 putback_lru_page(page); 1536 unlock_page(page); 1537 spin_lock_irq(&mapping->tree_lock); 1538 } 1539 VM_BUG_ON(nr_none); 1540 spin_unlock_irq(&mapping->tree_lock); 1541 1542 /* Unfreeze new_page, caller would take care about freeing it */ 1543 page_ref_unfreeze(new_page, 1); 1544 mem_cgroup_cancel_charge(new_page, memcg, true); 1545 unlock_page(new_page); 1546 new_page->mapping = NULL; 1547 } 1548 out: 1549 VM_BUG_ON(!list_empty(&pagelist)); 1550 /* TODO: tracepoints */ 1551 } 1552 1553 static void khugepaged_scan_shmem(struct mm_struct *mm, 1554 struct address_space *mapping, 1555 pgoff_t start, struct page **hpage) 1556 { 1557 struct page *page = NULL; 1558 struct radix_tree_iter iter; 1559 void **slot; 1560 int present, swap; 1561 int node = NUMA_NO_NODE; 1562 int result = SCAN_SUCCEED; 1563 1564 present = 0; 1565 swap = 0; 1566 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1567 rcu_read_lock(); 1568 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1569 if (iter.index >= start + HPAGE_PMD_NR) 1570 break; 1571 1572 page = radix_tree_deref_slot(slot); 1573 if (radix_tree_deref_retry(page)) { 1574 slot = radix_tree_iter_retry(&iter); 1575 continue; 1576 } 1577 1578 if (radix_tree_exception(page)) { 1579 if (++swap > khugepaged_max_ptes_swap) { 1580 result = SCAN_EXCEED_SWAP_PTE; 1581 break; 1582 } 1583 continue; 1584 } 1585 1586 if (PageTransCompound(page)) { 1587 result = SCAN_PAGE_COMPOUND; 1588 break; 1589 } 1590 1591 node = page_to_nid(page); 1592 if (khugepaged_scan_abort(node)) { 1593 result = SCAN_SCAN_ABORT; 1594 break; 1595 } 1596 khugepaged_node_load[node]++; 1597 1598 if (!PageLRU(page)) { 1599 result = SCAN_PAGE_LRU; 1600 break; 1601 } 1602 1603 if (page_count(page) != 1 + page_mapcount(page)) { 1604 result = SCAN_PAGE_COUNT; 1605 break; 1606 } 1607 1608 /* 1609 * We probably should check if the page is referenced here, but 1610 * nobody would transfer pte_young() to PageReferenced() for us. 1611 * And rmap walk here is just too costly... 1612 */ 1613 1614 present++; 1615 1616 if (need_resched()) { 1617 cond_resched_rcu(); 1618 slot = radix_tree_iter_next(&iter); 1619 } 1620 } 1621 rcu_read_unlock(); 1622 1623 if (result == SCAN_SUCCEED) { 1624 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 1625 result = SCAN_EXCEED_NONE_PTE; 1626 } else { 1627 node = khugepaged_find_target_node(); 1628 collapse_shmem(mm, mapping, start, hpage, node); 1629 } 1630 } 1631 1632 /* TODO: tracepoints */ 1633 } 1634 #else 1635 static void khugepaged_scan_shmem(struct mm_struct *mm, 1636 struct address_space *mapping, 1637 pgoff_t start, struct page **hpage) 1638 { 1639 BUILD_BUG(); 1640 } 1641 #endif 1642 1643 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 1644 struct page **hpage) 1645 __releases(&khugepaged_mm_lock) 1646 __acquires(&khugepaged_mm_lock) 1647 { 1648 struct mm_slot *mm_slot; 1649 struct mm_struct *mm; 1650 struct vm_area_struct *vma; 1651 int progress = 0; 1652 1653 VM_BUG_ON(!pages); 1654 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 1655 1656 if (khugepaged_scan.mm_slot) 1657 mm_slot = khugepaged_scan.mm_slot; 1658 else { 1659 mm_slot = list_entry(khugepaged_scan.mm_head.next, 1660 struct mm_slot, mm_node); 1661 khugepaged_scan.address = 0; 1662 khugepaged_scan.mm_slot = mm_slot; 1663 } 1664 spin_unlock(&khugepaged_mm_lock); 1665 1666 mm = mm_slot->mm; 1667 down_read(&mm->mmap_sem); 1668 if (unlikely(khugepaged_test_exit(mm))) 1669 vma = NULL; 1670 else 1671 vma = find_vma(mm, khugepaged_scan.address); 1672 1673 progress++; 1674 for (; vma; vma = vma->vm_next) { 1675 unsigned long hstart, hend; 1676 1677 cond_resched(); 1678 if (unlikely(khugepaged_test_exit(mm))) { 1679 progress++; 1680 break; 1681 } 1682 if (!hugepage_vma_check(vma)) { 1683 skip: 1684 progress++; 1685 continue; 1686 } 1687 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1688 hend = vma->vm_end & HPAGE_PMD_MASK; 1689 if (hstart >= hend) 1690 goto skip; 1691 if (khugepaged_scan.address > hend) 1692 goto skip; 1693 if (khugepaged_scan.address < hstart) 1694 khugepaged_scan.address = hstart; 1695 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 1696 1697 while (khugepaged_scan.address < hend) { 1698 int ret; 1699 cond_resched(); 1700 if (unlikely(khugepaged_test_exit(mm))) 1701 goto breakouterloop; 1702 1703 VM_BUG_ON(khugepaged_scan.address < hstart || 1704 khugepaged_scan.address + HPAGE_PMD_SIZE > 1705 hend); 1706 if (shmem_file(vma->vm_file)) { 1707 struct file *file; 1708 pgoff_t pgoff = linear_page_index(vma, 1709 khugepaged_scan.address); 1710 if (!shmem_huge_enabled(vma)) 1711 goto skip; 1712 file = get_file(vma->vm_file); 1713 up_read(&mm->mmap_sem); 1714 ret = 1; 1715 khugepaged_scan_shmem(mm, file->f_mapping, 1716 pgoff, hpage); 1717 fput(file); 1718 } else { 1719 ret = khugepaged_scan_pmd(mm, vma, 1720 khugepaged_scan.address, 1721 hpage); 1722 } 1723 /* move to next address */ 1724 khugepaged_scan.address += HPAGE_PMD_SIZE; 1725 progress += HPAGE_PMD_NR; 1726 if (ret) 1727 /* we released mmap_sem so break loop */ 1728 goto breakouterloop_mmap_sem; 1729 if (progress >= pages) 1730 goto breakouterloop; 1731 } 1732 } 1733 breakouterloop: 1734 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 1735 breakouterloop_mmap_sem: 1736 1737 spin_lock(&khugepaged_mm_lock); 1738 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 1739 /* 1740 * Release the current mm_slot if this mm is about to die, or 1741 * if we scanned all vmas of this mm. 1742 */ 1743 if (khugepaged_test_exit(mm) || !vma) { 1744 /* 1745 * Make sure that if mm_users is reaching zero while 1746 * khugepaged runs here, khugepaged_exit will find 1747 * mm_slot not pointing to the exiting mm. 1748 */ 1749 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 1750 khugepaged_scan.mm_slot = list_entry( 1751 mm_slot->mm_node.next, 1752 struct mm_slot, mm_node); 1753 khugepaged_scan.address = 0; 1754 } else { 1755 khugepaged_scan.mm_slot = NULL; 1756 khugepaged_full_scans++; 1757 } 1758 1759 collect_mm_slot(mm_slot); 1760 } 1761 1762 return progress; 1763 } 1764 1765 static int khugepaged_has_work(void) 1766 { 1767 return !list_empty(&khugepaged_scan.mm_head) && 1768 khugepaged_enabled(); 1769 } 1770 1771 static int khugepaged_wait_event(void) 1772 { 1773 return !list_empty(&khugepaged_scan.mm_head) || 1774 kthread_should_stop(); 1775 } 1776 1777 static void khugepaged_do_scan(void) 1778 { 1779 struct page *hpage = NULL; 1780 unsigned int progress = 0, pass_through_head = 0; 1781 unsigned int pages = khugepaged_pages_to_scan; 1782 bool wait = true; 1783 1784 barrier(); /* write khugepaged_pages_to_scan to local stack */ 1785 1786 while (progress < pages) { 1787 if (!khugepaged_prealloc_page(&hpage, &wait)) 1788 break; 1789 1790 cond_resched(); 1791 1792 if (unlikely(kthread_should_stop() || try_to_freeze())) 1793 break; 1794 1795 spin_lock(&khugepaged_mm_lock); 1796 if (!khugepaged_scan.mm_slot) 1797 pass_through_head++; 1798 if (khugepaged_has_work() && 1799 pass_through_head < 2) 1800 progress += khugepaged_scan_mm_slot(pages - progress, 1801 &hpage); 1802 else 1803 progress = pages; 1804 spin_unlock(&khugepaged_mm_lock); 1805 } 1806 1807 if (!IS_ERR_OR_NULL(hpage)) 1808 put_page(hpage); 1809 } 1810 1811 static bool khugepaged_should_wakeup(void) 1812 { 1813 return kthread_should_stop() || 1814 time_after_eq(jiffies, khugepaged_sleep_expire); 1815 } 1816 1817 static void khugepaged_wait_work(void) 1818 { 1819 if (khugepaged_has_work()) { 1820 const unsigned long scan_sleep_jiffies = 1821 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 1822 1823 if (!scan_sleep_jiffies) 1824 return; 1825 1826 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 1827 wait_event_freezable_timeout(khugepaged_wait, 1828 khugepaged_should_wakeup(), 1829 scan_sleep_jiffies); 1830 return; 1831 } 1832 1833 if (khugepaged_enabled()) 1834 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 1835 } 1836 1837 static int khugepaged(void *none) 1838 { 1839 struct mm_slot *mm_slot; 1840 1841 set_freezable(); 1842 set_user_nice(current, MAX_NICE); 1843 1844 while (!kthread_should_stop()) { 1845 khugepaged_do_scan(); 1846 khugepaged_wait_work(); 1847 } 1848 1849 spin_lock(&khugepaged_mm_lock); 1850 mm_slot = khugepaged_scan.mm_slot; 1851 khugepaged_scan.mm_slot = NULL; 1852 if (mm_slot) 1853 collect_mm_slot(mm_slot); 1854 spin_unlock(&khugepaged_mm_lock); 1855 return 0; 1856 } 1857 1858 static void set_recommended_min_free_kbytes(void) 1859 { 1860 struct zone *zone; 1861 int nr_zones = 0; 1862 unsigned long recommended_min; 1863 1864 for_each_populated_zone(zone) 1865 nr_zones++; 1866 1867 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 1868 recommended_min = pageblock_nr_pages * nr_zones * 2; 1869 1870 /* 1871 * Make sure that on average at least two pageblocks are almost free 1872 * of another type, one for a migratetype to fall back to and a 1873 * second to avoid subsequent fallbacks of other types There are 3 1874 * MIGRATE_TYPES we care about. 1875 */ 1876 recommended_min += pageblock_nr_pages * nr_zones * 1877 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 1878 1879 /* don't ever allow to reserve more than 5% of the lowmem */ 1880 recommended_min = min(recommended_min, 1881 (unsigned long) nr_free_buffer_pages() / 20); 1882 recommended_min <<= (PAGE_SHIFT-10); 1883 1884 if (recommended_min > min_free_kbytes) { 1885 if (user_min_free_kbytes >= 0) 1886 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 1887 min_free_kbytes, recommended_min); 1888 1889 min_free_kbytes = recommended_min; 1890 } 1891 setup_per_zone_wmarks(); 1892 } 1893 1894 int start_stop_khugepaged(void) 1895 { 1896 static struct task_struct *khugepaged_thread __read_mostly; 1897 static DEFINE_MUTEX(khugepaged_mutex); 1898 int err = 0; 1899 1900 mutex_lock(&khugepaged_mutex); 1901 if (khugepaged_enabled()) { 1902 if (!khugepaged_thread) 1903 khugepaged_thread = kthread_run(khugepaged, NULL, 1904 "khugepaged"); 1905 if (IS_ERR(khugepaged_thread)) { 1906 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 1907 err = PTR_ERR(khugepaged_thread); 1908 khugepaged_thread = NULL; 1909 goto fail; 1910 } 1911 1912 if (!list_empty(&khugepaged_scan.mm_head)) 1913 wake_up_interruptible(&khugepaged_wait); 1914 1915 set_recommended_min_free_kbytes(); 1916 } else if (khugepaged_thread) { 1917 kthread_stop(khugepaged_thread); 1918 khugepaged_thread = NULL; 1919 } 1920 fail: 1921 mutex_unlock(&khugepaged_mutex); 1922 return err; 1923 } 1924