1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/mm.h> 11 #include <linux/sched.h> 12 #include <linux/highmem.h> 13 #include <linux/hugetlb.h> 14 #include <linux/mmu_notifier.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/shrinker.h> 18 #include <linux/mm_inline.h> 19 #include <linux/kthread.h> 20 #include <linux/khugepaged.h> 21 #include <linux/freezer.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/migrate.h> 25 #include <linux/hashtable.h> 26 27 #include <asm/tlb.h> 28 #include <asm/pgalloc.h> 29 #include "internal.h" 30 31 /* 32 * By default transparent hugepage support is disabled in order that avoid 33 * to risk increase the memory footprint of applications without a guaranteed 34 * benefit. When transparent hugepage support is enabled, is for all mappings, 35 * and khugepaged scans all mappings. 36 * Defrag is invoked by khugepaged hugepage allocations and by page faults 37 * for all hugepage allocations. 38 */ 39 unsigned long transparent_hugepage_flags __read_mostly = 40 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 41 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 42 #endif 43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 44 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 45 #endif 46 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 47 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 48 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 49 50 /* default scan 8*512 pte (or vmas) every 30 second */ 51 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 52 static unsigned int khugepaged_pages_collapsed; 53 static unsigned int khugepaged_full_scans; 54 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 55 /* during fragmentation poll the hugepage allocator once every minute */ 56 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 57 static struct task_struct *khugepaged_thread __read_mostly; 58 static DEFINE_MUTEX(khugepaged_mutex); 59 static DEFINE_SPINLOCK(khugepaged_mm_lock); 60 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 61 /* 62 * default collapse hugepages if there is at least one pte mapped like 63 * it would have happened if the vma was large enough during page 64 * fault. 65 */ 66 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 67 68 static int khugepaged(void *none); 69 static int khugepaged_slab_init(void); 70 71 #define MM_SLOTS_HASH_BITS 10 72 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 73 74 static struct kmem_cache *mm_slot_cache __read_mostly; 75 76 /** 77 * struct mm_slot - hash lookup from mm to mm_slot 78 * @hash: hash collision list 79 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 80 * @mm: the mm that this information is valid for 81 */ 82 struct mm_slot { 83 struct hlist_node hash; 84 struct list_head mm_node; 85 struct mm_struct *mm; 86 }; 87 88 /** 89 * struct khugepaged_scan - cursor for scanning 90 * @mm_head: the head of the mm list to scan 91 * @mm_slot: the current mm_slot we are scanning 92 * @address: the next address inside that to be scanned 93 * 94 * There is only the one khugepaged_scan instance of this cursor structure. 95 */ 96 struct khugepaged_scan { 97 struct list_head mm_head; 98 struct mm_slot *mm_slot; 99 unsigned long address; 100 }; 101 static struct khugepaged_scan khugepaged_scan = { 102 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 103 }; 104 105 106 static int set_recommended_min_free_kbytes(void) 107 { 108 struct zone *zone; 109 int nr_zones = 0; 110 unsigned long recommended_min; 111 112 if (!khugepaged_enabled()) 113 return 0; 114 115 for_each_populated_zone(zone) 116 nr_zones++; 117 118 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 119 recommended_min = pageblock_nr_pages * nr_zones * 2; 120 121 /* 122 * Make sure that on average at least two pageblocks are almost free 123 * of another type, one for a migratetype to fall back to and a 124 * second to avoid subsequent fallbacks of other types There are 3 125 * MIGRATE_TYPES we care about. 126 */ 127 recommended_min += pageblock_nr_pages * nr_zones * 128 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 129 130 /* don't ever allow to reserve more than 5% of the lowmem */ 131 recommended_min = min(recommended_min, 132 (unsigned long) nr_free_buffer_pages() / 20); 133 recommended_min <<= (PAGE_SHIFT-10); 134 135 if (recommended_min > min_free_kbytes) { 136 if (user_min_free_kbytes >= 0) 137 pr_info("raising min_free_kbytes from %d to %lu " 138 "to help transparent hugepage allocations\n", 139 min_free_kbytes, recommended_min); 140 141 min_free_kbytes = recommended_min; 142 } 143 setup_per_zone_wmarks(); 144 return 0; 145 } 146 late_initcall(set_recommended_min_free_kbytes); 147 148 static int start_khugepaged(void) 149 { 150 int err = 0; 151 if (khugepaged_enabled()) { 152 if (!khugepaged_thread) 153 khugepaged_thread = kthread_run(khugepaged, NULL, 154 "khugepaged"); 155 if (unlikely(IS_ERR(khugepaged_thread))) { 156 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 157 err = PTR_ERR(khugepaged_thread); 158 khugepaged_thread = NULL; 159 } 160 161 if (!list_empty(&khugepaged_scan.mm_head)) 162 wake_up_interruptible(&khugepaged_wait); 163 164 set_recommended_min_free_kbytes(); 165 } else if (khugepaged_thread) { 166 kthread_stop(khugepaged_thread); 167 khugepaged_thread = NULL; 168 } 169 170 return err; 171 } 172 173 static atomic_t huge_zero_refcount; 174 static struct page *huge_zero_page __read_mostly; 175 176 static inline bool is_huge_zero_page(struct page *page) 177 { 178 return ACCESS_ONCE(huge_zero_page) == page; 179 } 180 181 static inline bool is_huge_zero_pmd(pmd_t pmd) 182 { 183 return is_huge_zero_page(pmd_page(pmd)); 184 } 185 186 static struct page *get_huge_zero_page(void) 187 { 188 struct page *zero_page; 189 retry: 190 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 191 return ACCESS_ONCE(huge_zero_page); 192 193 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 194 HPAGE_PMD_ORDER); 195 if (!zero_page) { 196 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 197 return NULL; 198 } 199 count_vm_event(THP_ZERO_PAGE_ALLOC); 200 preempt_disable(); 201 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 202 preempt_enable(); 203 __free_page(zero_page); 204 goto retry; 205 } 206 207 /* We take additional reference here. It will be put back by shrinker */ 208 atomic_set(&huge_zero_refcount, 2); 209 preempt_enable(); 210 return ACCESS_ONCE(huge_zero_page); 211 } 212 213 static void put_huge_zero_page(void) 214 { 215 /* 216 * Counter should never go to zero here. Only shrinker can put 217 * last reference. 218 */ 219 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 220 } 221 222 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 223 struct shrink_control *sc) 224 { 225 /* we can free zero page only if last reference remains */ 226 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 227 } 228 229 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 230 struct shrink_control *sc) 231 { 232 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 233 struct page *zero_page = xchg(&huge_zero_page, NULL); 234 BUG_ON(zero_page == NULL); 235 __free_page(zero_page); 236 return HPAGE_PMD_NR; 237 } 238 239 return 0; 240 } 241 242 static struct shrinker huge_zero_page_shrinker = { 243 .count_objects = shrink_huge_zero_page_count, 244 .scan_objects = shrink_huge_zero_page_scan, 245 .seeks = DEFAULT_SEEKS, 246 }; 247 248 #ifdef CONFIG_SYSFS 249 250 static ssize_t double_flag_show(struct kobject *kobj, 251 struct kobj_attribute *attr, char *buf, 252 enum transparent_hugepage_flag enabled, 253 enum transparent_hugepage_flag req_madv) 254 { 255 if (test_bit(enabled, &transparent_hugepage_flags)) { 256 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 257 return sprintf(buf, "[always] madvise never\n"); 258 } else if (test_bit(req_madv, &transparent_hugepage_flags)) 259 return sprintf(buf, "always [madvise] never\n"); 260 else 261 return sprintf(buf, "always madvise [never]\n"); 262 } 263 static ssize_t double_flag_store(struct kobject *kobj, 264 struct kobj_attribute *attr, 265 const char *buf, size_t count, 266 enum transparent_hugepage_flag enabled, 267 enum transparent_hugepage_flag req_madv) 268 { 269 if (!memcmp("always", buf, 270 min(sizeof("always")-1, count))) { 271 set_bit(enabled, &transparent_hugepage_flags); 272 clear_bit(req_madv, &transparent_hugepage_flags); 273 } else if (!memcmp("madvise", buf, 274 min(sizeof("madvise")-1, count))) { 275 clear_bit(enabled, &transparent_hugepage_flags); 276 set_bit(req_madv, &transparent_hugepage_flags); 277 } else if (!memcmp("never", buf, 278 min(sizeof("never")-1, count))) { 279 clear_bit(enabled, &transparent_hugepage_flags); 280 clear_bit(req_madv, &transparent_hugepage_flags); 281 } else 282 return -EINVAL; 283 284 return count; 285 } 286 287 static ssize_t enabled_show(struct kobject *kobj, 288 struct kobj_attribute *attr, char *buf) 289 { 290 return double_flag_show(kobj, attr, buf, 291 TRANSPARENT_HUGEPAGE_FLAG, 292 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 293 } 294 static ssize_t enabled_store(struct kobject *kobj, 295 struct kobj_attribute *attr, 296 const char *buf, size_t count) 297 { 298 ssize_t ret; 299 300 ret = double_flag_store(kobj, attr, buf, count, 301 TRANSPARENT_HUGEPAGE_FLAG, 302 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 303 304 if (ret > 0) { 305 int err; 306 307 mutex_lock(&khugepaged_mutex); 308 err = start_khugepaged(); 309 mutex_unlock(&khugepaged_mutex); 310 311 if (err) 312 ret = err; 313 } 314 315 return ret; 316 } 317 static struct kobj_attribute enabled_attr = 318 __ATTR(enabled, 0644, enabled_show, enabled_store); 319 320 static ssize_t single_flag_show(struct kobject *kobj, 321 struct kobj_attribute *attr, char *buf, 322 enum transparent_hugepage_flag flag) 323 { 324 return sprintf(buf, "%d\n", 325 !!test_bit(flag, &transparent_hugepage_flags)); 326 } 327 328 static ssize_t single_flag_store(struct kobject *kobj, 329 struct kobj_attribute *attr, 330 const char *buf, size_t count, 331 enum transparent_hugepage_flag flag) 332 { 333 unsigned long value; 334 int ret; 335 336 ret = kstrtoul(buf, 10, &value); 337 if (ret < 0) 338 return ret; 339 if (value > 1) 340 return -EINVAL; 341 342 if (value) 343 set_bit(flag, &transparent_hugepage_flags); 344 else 345 clear_bit(flag, &transparent_hugepage_flags); 346 347 return count; 348 } 349 350 /* 351 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 352 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 353 * memory just to allocate one more hugepage. 354 */ 355 static ssize_t defrag_show(struct kobject *kobj, 356 struct kobj_attribute *attr, char *buf) 357 { 358 return double_flag_show(kobj, attr, buf, 359 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 360 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 361 } 362 static ssize_t defrag_store(struct kobject *kobj, 363 struct kobj_attribute *attr, 364 const char *buf, size_t count) 365 { 366 return double_flag_store(kobj, attr, buf, count, 367 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 368 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 369 } 370 static struct kobj_attribute defrag_attr = 371 __ATTR(defrag, 0644, defrag_show, defrag_store); 372 373 static ssize_t use_zero_page_show(struct kobject *kobj, 374 struct kobj_attribute *attr, char *buf) 375 { 376 return single_flag_show(kobj, attr, buf, 377 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 378 } 379 static ssize_t use_zero_page_store(struct kobject *kobj, 380 struct kobj_attribute *attr, const char *buf, size_t count) 381 { 382 return single_flag_store(kobj, attr, buf, count, 383 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 384 } 385 static struct kobj_attribute use_zero_page_attr = 386 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 387 #ifdef CONFIG_DEBUG_VM 388 static ssize_t debug_cow_show(struct kobject *kobj, 389 struct kobj_attribute *attr, char *buf) 390 { 391 return single_flag_show(kobj, attr, buf, 392 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 393 } 394 static ssize_t debug_cow_store(struct kobject *kobj, 395 struct kobj_attribute *attr, 396 const char *buf, size_t count) 397 { 398 return single_flag_store(kobj, attr, buf, count, 399 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 400 } 401 static struct kobj_attribute debug_cow_attr = 402 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 403 #endif /* CONFIG_DEBUG_VM */ 404 405 static struct attribute *hugepage_attr[] = { 406 &enabled_attr.attr, 407 &defrag_attr.attr, 408 &use_zero_page_attr.attr, 409 #ifdef CONFIG_DEBUG_VM 410 &debug_cow_attr.attr, 411 #endif 412 NULL, 413 }; 414 415 static struct attribute_group hugepage_attr_group = { 416 .attrs = hugepage_attr, 417 }; 418 419 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 420 struct kobj_attribute *attr, 421 char *buf) 422 { 423 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 424 } 425 426 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 427 struct kobj_attribute *attr, 428 const char *buf, size_t count) 429 { 430 unsigned long msecs; 431 int err; 432 433 err = kstrtoul(buf, 10, &msecs); 434 if (err || msecs > UINT_MAX) 435 return -EINVAL; 436 437 khugepaged_scan_sleep_millisecs = msecs; 438 wake_up_interruptible(&khugepaged_wait); 439 440 return count; 441 } 442 static struct kobj_attribute scan_sleep_millisecs_attr = 443 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 444 scan_sleep_millisecs_store); 445 446 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 447 struct kobj_attribute *attr, 448 char *buf) 449 { 450 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 451 } 452 453 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 454 struct kobj_attribute *attr, 455 const char *buf, size_t count) 456 { 457 unsigned long msecs; 458 int err; 459 460 err = kstrtoul(buf, 10, &msecs); 461 if (err || msecs > UINT_MAX) 462 return -EINVAL; 463 464 khugepaged_alloc_sleep_millisecs = msecs; 465 wake_up_interruptible(&khugepaged_wait); 466 467 return count; 468 } 469 static struct kobj_attribute alloc_sleep_millisecs_attr = 470 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 471 alloc_sleep_millisecs_store); 472 473 static ssize_t pages_to_scan_show(struct kobject *kobj, 474 struct kobj_attribute *attr, 475 char *buf) 476 { 477 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 478 } 479 static ssize_t pages_to_scan_store(struct kobject *kobj, 480 struct kobj_attribute *attr, 481 const char *buf, size_t count) 482 { 483 int err; 484 unsigned long pages; 485 486 err = kstrtoul(buf, 10, &pages); 487 if (err || !pages || pages > UINT_MAX) 488 return -EINVAL; 489 490 khugepaged_pages_to_scan = pages; 491 492 return count; 493 } 494 static struct kobj_attribute pages_to_scan_attr = 495 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 496 pages_to_scan_store); 497 498 static ssize_t pages_collapsed_show(struct kobject *kobj, 499 struct kobj_attribute *attr, 500 char *buf) 501 { 502 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 503 } 504 static struct kobj_attribute pages_collapsed_attr = 505 __ATTR_RO(pages_collapsed); 506 507 static ssize_t full_scans_show(struct kobject *kobj, 508 struct kobj_attribute *attr, 509 char *buf) 510 { 511 return sprintf(buf, "%u\n", khugepaged_full_scans); 512 } 513 static struct kobj_attribute full_scans_attr = 514 __ATTR_RO(full_scans); 515 516 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 517 struct kobj_attribute *attr, char *buf) 518 { 519 return single_flag_show(kobj, attr, buf, 520 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 521 } 522 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 523 struct kobj_attribute *attr, 524 const char *buf, size_t count) 525 { 526 return single_flag_store(kobj, attr, buf, count, 527 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 528 } 529 static struct kobj_attribute khugepaged_defrag_attr = 530 __ATTR(defrag, 0644, khugepaged_defrag_show, 531 khugepaged_defrag_store); 532 533 /* 534 * max_ptes_none controls if khugepaged should collapse hugepages over 535 * any unmapped ptes in turn potentially increasing the memory 536 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 537 * reduce the available free memory in the system as it 538 * runs. Increasing max_ptes_none will instead potentially reduce the 539 * free memory in the system during the khugepaged scan. 540 */ 541 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 542 struct kobj_attribute *attr, 543 char *buf) 544 { 545 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 546 } 547 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 548 struct kobj_attribute *attr, 549 const char *buf, size_t count) 550 { 551 int err; 552 unsigned long max_ptes_none; 553 554 err = kstrtoul(buf, 10, &max_ptes_none); 555 if (err || max_ptes_none > HPAGE_PMD_NR-1) 556 return -EINVAL; 557 558 khugepaged_max_ptes_none = max_ptes_none; 559 560 return count; 561 } 562 static struct kobj_attribute khugepaged_max_ptes_none_attr = 563 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 564 khugepaged_max_ptes_none_store); 565 566 static struct attribute *khugepaged_attr[] = { 567 &khugepaged_defrag_attr.attr, 568 &khugepaged_max_ptes_none_attr.attr, 569 &pages_to_scan_attr.attr, 570 &pages_collapsed_attr.attr, 571 &full_scans_attr.attr, 572 &scan_sleep_millisecs_attr.attr, 573 &alloc_sleep_millisecs_attr.attr, 574 NULL, 575 }; 576 577 static struct attribute_group khugepaged_attr_group = { 578 .attrs = khugepaged_attr, 579 .name = "khugepaged", 580 }; 581 582 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 583 { 584 int err; 585 586 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 587 if (unlikely(!*hugepage_kobj)) { 588 pr_err("failed to create transparent hugepage kobject\n"); 589 return -ENOMEM; 590 } 591 592 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 593 if (err) { 594 pr_err("failed to register transparent hugepage group\n"); 595 goto delete_obj; 596 } 597 598 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 599 if (err) { 600 pr_err("failed to register transparent hugepage group\n"); 601 goto remove_hp_group; 602 } 603 604 return 0; 605 606 remove_hp_group: 607 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 608 delete_obj: 609 kobject_put(*hugepage_kobj); 610 return err; 611 } 612 613 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 614 { 615 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 616 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 617 kobject_put(hugepage_kobj); 618 } 619 #else 620 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 621 { 622 return 0; 623 } 624 625 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 626 { 627 } 628 #endif /* CONFIG_SYSFS */ 629 630 static int __init hugepage_init(void) 631 { 632 int err; 633 struct kobject *hugepage_kobj; 634 635 if (!has_transparent_hugepage()) { 636 transparent_hugepage_flags = 0; 637 return -EINVAL; 638 } 639 640 err = hugepage_init_sysfs(&hugepage_kobj); 641 if (err) 642 return err; 643 644 err = khugepaged_slab_init(); 645 if (err) 646 goto out; 647 648 register_shrinker(&huge_zero_page_shrinker); 649 650 /* 651 * By default disable transparent hugepages on smaller systems, 652 * where the extra memory used could hurt more than TLB overhead 653 * is likely to save. The admin can still enable it through /sys. 654 */ 655 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 656 transparent_hugepage_flags = 0; 657 658 start_khugepaged(); 659 660 return 0; 661 out: 662 hugepage_exit_sysfs(hugepage_kobj); 663 return err; 664 } 665 subsys_initcall(hugepage_init); 666 667 static int __init setup_transparent_hugepage(char *str) 668 { 669 int ret = 0; 670 if (!str) 671 goto out; 672 if (!strcmp(str, "always")) { 673 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 674 &transparent_hugepage_flags); 675 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 676 &transparent_hugepage_flags); 677 ret = 1; 678 } else if (!strcmp(str, "madvise")) { 679 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 680 &transparent_hugepage_flags); 681 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 682 &transparent_hugepage_flags); 683 ret = 1; 684 } else if (!strcmp(str, "never")) { 685 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 686 &transparent_hugepage_flags); 687 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 688 &transparent_hugepage_flags); 689 ret = 1; 690 } 691 out: 692 if (!ret) 693 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 694 return ret; 695 } 696 __setup("transparent_hugepage=", setup_transparent_hugepage); 697 698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 699 { 700 if (likely(vma->vm_flags & VM_WRITE)) 701 pmd = pmd_mkwrite(pmd); 702 return pmd; 703 } 704 705 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) 706 { 707 pmd_t entry; 708 entry = mk_pmd(page, prot); 709 entry = pmd_mkhuge(entry); 710 return entry; 711 } 712 713 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 714 struct vm_area_struct *vma, 715 unsigned long haddr, pmd_t *pmd, 716 struct page *page) 717 { 718 pgtable_t pgtable; 719 spinlock_t *ptl; 720 721 VM_BUG_ON_PAGE(!PageCompound(page), page); 722 pgtable = pte_alloc_one(mm, haddr); 723 if (unlikely(!pgtable)) 724 return VM_FAULT_OOM; 725 726 clear_huge_page(page, haddr, HPAGE_PMD_NR); 727 /* 728 * The memory barrier inside __SetPageUptodate makes sure that 729 * clear_huge_page writes become visible before the set_pmd_at() 730 * write. 731 */ 732 __SetPageUptodate(page); 733 734 ptl = pmd_lock(mm, pmd); 735 if (unlikely(!pmd_none(*pmd))) { 736 spin_unlock(ptl); 737 mem_cgroup_uncharge_page(page); 738 put_page(page); 739 pte_free(mm, pgtable); 740 } else { 741 pmd_t entry; 742 entry = mk_huge_pmd(page, vma->vm_page_prot); 743 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 744 page_add_new_anon_rmap(page, vma, haddr); 745 pgtable_trans_huge_deposit(mm, pmd, pgtable); 746 set_pmd_at(mm, haddr, pmd, entry); 747 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 748 atomic_long_inc(&mm->nr_ptes); 749 spin_unlock(ptl); 750 } 751 752 return 0; 753 } 754 755 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 756 { 757 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 758 } 759 760 static inline struct page *alloc_hugepage_vma(int defrag, 761 struct vm_area_struct *vma, 762 unsigned long haddr, int nd, 763 gfp_t extra_gfp) 764 { 765 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 766 HPAGE_PMD_ORDER, vma, haddr, nd); 767 } 768 769 /* Caller must hold page table lock. */ 770 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 771 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 772 struct page *zero_page) 773 { 774 pmd_t entry; 775 if (!pmd_none(*pmd)) 776 return false; 777 entry = mk_pmd(zero_page, vma->vm_page_prot); 778 entry = pmd_wrprotect(entry); 779 entry = pmd_mkhuge(entry); 780 pgtable_trans_huge_deposit(mm, pmd, pgtable); 781 set_pmd_at(mm, haddr, pmd, entry); 782 atomic_long_inc(&mm->nr_ptes); 783 return true; 784 } 785 786 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 787 unsigned long address, pmd_t *pmd, 788 unsigned int flags) 789 { 790 struct page *page; 791 unsigned long haddr = address & HPAGE_PMD_MASK; 792 793 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 794 return VM_FAULT_FALLBACK; 795 if (unlikely(anon_vma_prepare(vma))) 796 return VM_FAULT_OOM; 797 if (unlikely(khugepaged_enter(vma))) 798 return VM_FAULT_OOM; 799 if (!(flags & FAULT_FLAG_WRITE) && 800 transparent_hugepage_use_zero_page()) { 801 spinlock_t *ptl; 802 pgtable_t pgtable; 803 struct page *zero_page; 804 bool set; 805 pgtable = pte_alloc_one(mm, haddr); 806 if (unlikely(!pgtable)) 807 return VM_FAULT_OOM; 808 zero_page = get_huge_zero_page(); 809 if (unlikely(!zero_page)) { 810 pte_free(mm, pgtable); 811 count_vm_event(THP_FAULT_FALLBACK); 812 return VM_FAULT_FALLBACK; 813 } 814 ptl = pmd_lock(mm, pmd); 815 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 816 zero_page); 817 spin_unlock(ptl); 818 if (!set) { 819 pte_free(mm, pgtable); 820 put_huge_zero_page(); 821 } 822 return 0; 823 } 824 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 825 vma, haddr, numa_node_id(), 0); 826 if (unlikely(!page)) { 827 count_vm_event(THP_FAULT_FALLBACK); 828 return VM_FAULT_FALLBACK; 829 } 830 if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) { 831 put_page(page); 832 count_vm_event(THP_FAULT_FALLBACK); 833 return VM_FAULT_FALLBACK; 834 } 835 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { 836 mem_cgroup_uncharge_page(page); 837 put_page(page); 838 count_vm_event(THP_FAULT_FALLBACK); 839 return VM_FAULT_FALLBACK; 840 } 841 842 count_vm_event(THP_FAULT_ALLOC); 843 return 0; 844 } 845 846 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 847 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 848 struct vm_area_struct *vma) 849 { 850 spinlock_t *dst_ptl, *src_ptl; 851 struct page *src_page; 852 pmd_t pmd; 853 pgtable_t pgtable; 854 int ret; 855 856 ret = -ENOMEM; 857 pgtable = pte_alloc_one(dst_mm, addr); 858 if (unlikely(!pgtable)) 859 goto out; 860 861 dst_ptl = pmd_lock(dst_mm, dst_pmd); 862 src_ptl = pmd_lockptr(src_mm, src_pmd); 863 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 864 865 ret = -EAGAIN; 866 pmd = *src_pmd; 867 if (unlikely(!pmd_trans_huge(pmd))) { 868 pte_free(dst_mm, pgtable); 869 goto out_unlock; 870 } 871 /* 872 * When page table lock is held, the huge zero pmd should not be 873 * under splitting since we don't split the page itself, only pmd to 874 * a page table. 875 */ 876 if (is_huge_zero_pmd(pmd)) { 877 struct page *zero_page; 878 bool set; 879 /* 880 * get_huge_zero_page() will never allocate a new page here, 881 * since we already have a zero page to copy. It just takes a 882 * reference. 883 */ 884 zero_page = get_huge_zero_page(); 885 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 886 zero_page); 887 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ 888 ret = 0; 889 goto out_unlock; 890 } 891 892 if (unlikely(pmd_trans_splitting(pmd))) { 893 /* split huge page running from under us */ 894 spin_unlock(src_ptl); 895 spin_unlock(dst_ptl); 896 pte_free(dst_mm, pgtable); 897 898 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 899 goto out; 900 } 901 src_page = pmd_page(pmd); 902 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 903 get_page(src_page); 904 page_dup_rmap(src_page); 905 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 906 907 pmdp_set_wrprotect(src_mm, addr, src_pmd); 908 pmd = pmd_mkold(pmd_wrprotect(pmd)); 909 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 910 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 911 atomic_long_inc(&dst_mm->nr_ptes); 912 913 ret = 0; 914 out_unlock: 915 spin_unlock(src_ptl); 916 spin_unlock(dst_ptl); 917 out: 918 return ret; 919 } 920 921 void huge_pmd_set_accessed(struct mm_struct *mm, 922 struct vm_area_struct *vma, 923 unsigned long address, 924 pmd_t *pmd, pmd_t orig_pmd, 925 int dirty) 926 { 927 spinlock_t *ptl; 928 pmd_t entry; 929 unsigned long haddr; 930 931 ptl = pmd_lock(mm, pmd); 932 if (unlikely(!pmd_same(*pmd, orig_pmd))) 933 goto unlock; 934 935 entry = pmd_mkyoung(orig_pmd); 936 haddr = address & HPAGE_PMD_MASK; 937 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 938 update_mmu_cache_pmd(vma, address, pmd); 939 940 unlock: 941 spin_unlock(ptl); 942 } 943 944 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 945 struct vm_area_struct *vma, 946 unsigned long address, 947 pmd_t *pmd, pmd_t orig_pmd, 948 struct page *page, 949 unsigned long haddr) 950 { 951 spinlock_t *ptl; 952 pgtable_t pgtable; 953 pmd_t _pmd; 954 int ret = 0, i; 955 struct page **pages; 956 unsigned long mmun_start; /* For mmu_notifiers */ 957 unsigned long mmun_end; /* For mmu_notifiers */ 958 959 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 960 GFP_KERNEL); 961 if (unlikely(!pages)) { 962 ret |= VM_FAULT_OOM; 963 goto out; 964 } 965 966 for (i = 0; i < HPAGE_PMD_NR; i++) { 967 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 968 __GFP_OTHER_NODE, 969 vma, address, page_to_nid(page)); 970 if (unlikely(!pages[i] || 971 mem_cgroup_charge_anon(pages[i], mm, 972 GFP_KERNEL))) { 973 if (pages[i]) 974 put_page(pages[i]); 975 mem_cgroup_uncharge_start(); 976 while (--i >= 0) { 977 mem_cgroup_uncharge_page(pages[i]); 978 put_page(pages[i]); 979 } 980 mem_cgroup_uncharge_end(); 981 kfree(pages); 982 ret |= VM_FAULT_OOM; 983 goto out; 984 } 985 } 986 987 for (i = 0; i < HPAGE_PMD_NR; i++) { 988 copy_user_highpage(pages[i], page + i, 989 haddr + PAGE_SIZE * i, vma); 990 __SetPageUptodate(pages[i]); 991 cond_resched(); 992 } 993 994 mmun_start = haddr; 995 mmun_end = haddr + HPAGE_PMD_SIZE; 996 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 997 998 ptl = pmd_lock(mm, pmd); 999 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1000 goto out_free_pages; 1001 VM_BUG_ON_PAGE(!PageHead(page), page); 1002 1003 pmdp_clear_flush(vma, haddr, pmd); 1004 /* leave pmd empty until pte is filled */ 1005 1006 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1007 pmd_populate(mm, &_pmd, pgtable); 1008 1009 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1010 pte_t *pte, entry; 1011 entry = mk_pte(pages[i], vma->vm_page_prot); 1012 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1013 page_add_new_anon_rmap(pages[i], vma, haddr); 1014 pte = pte_offset_map(&_pmd, haddr); 1015 VM_BUG_ON(!pte_none(*pte)); 1016 set_pte_at(mm, haddr, pte, entry); 1017 pte_unmap(pte); 1018 } 1019 kfree(pages); 1020 1021 smp_wmb(); /* make pte visible before pmd */ 1022 pmd_populate(mm, pmd, pgtable); 1023 page_remove_rmap(page); 1024 spin_unlock(ptl); 1025 1026 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1027 1028 ret |= VM_FAULT_WRITE; 1029 put_page(page); 1030 1031 out: 1032 return ret; 1033 1034 out_free_pages: 1035 spin_unlock(ptl); 1036 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1037 mem_cgroup_uncharge_start(); 1038 for (i = 0; i < HPAGE_PMD_NR; i++) { 1039 mem_cgroup_uncharge_page(pages[i]); 1040 put_page(pages[i]); 1041 } 1042 mem_cgroup_uncharge_end(); 1043 kfree(pages); 1044 goto out; 1045 } 1046 1047 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1048 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 1049 { 1050 spinlock_t *ptl; 1051 int ret = 0; 1052 struct page *page = NULL, *new_page; 1053 unsigned long haddr; 1054 unsigned long mmun_start; /* For mmu_notifiers */ 1055 unsigned long mmun_end; /* For mmu_notifiers */ 1056 1057 ptl = pmd_lockptr(mm, pmd); 1058 VM_BUG_ON(!vma->anon_vma); 1059 haddr = address & HPAGE_PMD_MASK; 1060 if (is_huge_zero_pmd(orig_pmd)) 1061 goto alloc; 1062 spin_lock(ptl); 1063 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1064 goto out_unlock; 1065 1066 page = pmd_page(orig_pmd); 1067 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1068 if (page_mapcount(page) == 1) { 1069 pmd_t entry; 1070 entry = pmd_mkyoung(orig_pmd); 1071 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1072 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1073 update_mmu_cache_pmd(vma, address, pmd); 1074 ret |= VM_FAULT_WRITE; 1075 goto out_unlock; 1076 } 1077 get_page(page); 1078 spin_unlock(ptl); 1079 alloc: 1080 if (transparent_hugepage_enabled(vma) && 1081 !transparent_hugepage_debug_cow()) 1082 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 1083 vma, haddr, numa_node_id(), 0); 1084 else 1085 new_page = NULL; 1086 1087 if (unlikely(!new_page)) { 1088 if (!page) { 1089 split_huge_page_pmd(vma, address, pmd); 1090 ret |= VM_FAULT_FALLBACK; 1091 } else { 1092 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 1093 pmd, orig_pmd, page, haddr); 1094 if (ret & VM_FAULT_OOM) { 1095 split_huge_page(page); 1096 ret |= VM_FAULT_FALLBACK; 1097 } 1098 put_page(page); 1099 } 1100 count_vm_event(THP_FAULT_FALLBACK); 1101 goto out; 1102 } 1103 1104 if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) { 1105 put_page(new_page); 1106 if (page) { 1107 split_huge_page(page); 1108 put_page(page); 1109 } else 1110 split_huge_page_pmd(vma, address, pmd); 1111 ret |= VM_FAULT_FALLBACK; 1112 count_vm_event(THP_FAULT_FALLBACK); 1113 goto out; 1114 } 1115 1116 count_vm_event(THP_FAULT_ALLOC); 1117 1118 if (!page) 1119 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1120 else 1121 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1122 __SetPageUptodate(new_page); 1123 1124 mmun_start = haddr; 1125 mmun_end = haddr + HPAGE_PMD_SIZE; 1126 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1127 1128 spin_lock(ptl); 1129 if (page) 1130 put_page(page); 1131 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1132 spin_unlock(ptl); 1133 mem_cgroup_uncharge_page(new_page); 1134 put_page(new_page); 1135 goto out_mn; 1136 } else { 1137 pmd_t entry; 1138 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1139 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1140 pmdp_clear_flush(vma, haddr, pmd); 1141 page_add_new_anon_rmap(new_page, vma, haddr); 1142 set_pmd_at(mm, haddr, pmd, entry); 1143 update_mmu_cache_pmd(vma, address, pmd); 1144 if (!page) { 1145 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1146 put_huge_zero_page(); 1147 } else { 1148 VM_BUG_ON_PAGE(!PageHead(page), page); 1149 page_remove_rmap(page); 1150 put_page(page); 1151 } 1152 ret |= VM_FAULT_WRITE; 1153 } 1154 spin_unlock(ptl); 1155 out_mn: 1156 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1157 out: 1158 return ret; 1159 out_unlock: 1160 spin_unlock(ptl); 1161 return ret; 1162 } 1163 1164 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1165 unsigned long addr, 1166 pmd_t *pmd, 1167 unsigned int flags) 1168 { 1169 struct mm_struct *mm = vma->vm_mm; 1170 struct page *page = NULL; 1171 1172 assert_spin_locked(pmd_lockptr(mm, pmd)); 1173 1174 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1175 goto out; 1176 1177 /* Avoid dumping huge zero page */ 1178 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1179 return ERR_PTR(-EFAULT); 1180 1181 /* Full NUMA hinting faults to serialise migration in fault paths */ 1182 if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) 1183 goto out; 1184 1185 page = pmd_page(*pmd); 1186 VM_BUG_ON_PAGE(!PageHead(page), page); 1187 if (flags & FOLL_TOUCH) { 1188 pmd_t _pmd; 1189 /* 1190 * We should set the dirty bit only for FOLL_WRITE but 1191 * for now the dirty bit in the pmd is meaningless. 1192 * And if the dirty bit will become meaningful and 1193 * we'll only set it with FOLL_WRITE, an atomic 1194 * set_bit will be required on the pmd to set the 1195 * young bit, instead of the current set_pmd_at. 1196 */ 1197 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1198 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1199 pmd, _pmd, 1)) 1200 update_mmu_cache_pmd(vma, addr, pmd); 1201 } 1202 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1203 if (page->mapping && trylock_page(page)) { 1204 lru_add_drain(); 1205 if (page->mapping) 1206 mlock_vma_page(page); 1207 unlock_page(page); 1208 } 1209 } 1210 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1211 VM_BUG_ON_PAGE(!PageCompound(page), page); 1212 if (flags & FOLL_GET) 1213 get_page_foll(page); 1214 1215 out: 1216 return page; 1217 } 1218 1219 /* NUMA hinting page fault entry point for trans huge pmds */ 1220 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1221 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1222 { 1223 spinlock_t *ptl; 1224 struct anon_vma *anon_vma = NULL; 1225 struct page *page; 1226 unsigned long haddr = addr & HPAGE_PMD_MASK; 1227 int page_nid = -1, this_nid = numa_node_id(); 1228 int target_nid, last_cpupid = -1; 1229 bool page_locked; 1230 bool migrated = false; 1231 int flags = 0; 1232 1233 ptl = pmd_lock(mm, pmdp); 1234 if (unlikely(!pmd_same(pmd, *pmdp))) 1235 goto out_unlock; 1236 1237 /* 1238 * If there are potential migrations, wait for completion and retry 1239 * without disrupting NUMA hinting information. Do not relock and 1240 * check_same as the page may no longer be mapped. 1241 */ 1242 if (unlikely(pmd_trans_migrating(*pmdp))) { 1243 spin_unlock(ptl); 1244 wait_migrate_huge_page(vma->anon_vma, pmdp); 1245 goto out; 1246 } 1247 1248 page = pmd_page(pmd); 1249 BUG_ON(is_huge_zero_page(page)); 1250 page_nid = page_to_nid(page); 1251 last_cpupid = page_cpupid_last(page); 1252 count_vm_numa_event(NUMA_HINT_FAULTS); 1253 if (page_nid == this_nid) { 1254 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1255 flags |= TNF_FAULT_LOCAL; 1256 } 1257 1258 /* 1259 * Avoid grouping on DSO/COW pages in specific and RO pages 1260 * in general, RO pages shouldn't hurt as much anyway since 1261 * they can be in shared cache state. 1262 */ 1263 if (!pmd_write(pmd)) 1264 flags |= TNF_NO_GROUP; 1265 1266 /* 1267 * Acquire the page lock to serialise THP migrations but avoid dropping 1268 * page_table_lock if at all possible 1269 */ 1270 page_locked = trylock_page(page); 1271 target_nid = mpol_misplaced(page, vma, haddr); 1272 if (target_nid == -1) { 1273 /* If the page was locked, there are no parallel migrations */ 1274 if (page_locked) 1275 goto clear_pmdnuma; 1276 } 1277 1278 /* Migration could have started since the pmd_trans_migrating check */ 1279 if (!page_locked) { 1280 spin_unlock(ptl); 1281 wait_on_page_locked(page); 1282 page_nid = -1; 1283 goto out; 1284 } 1285 1286 /* 1287 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 1288 * to serialises splits 1289 */ 1290 get_page(page); 1291 spin_unlock(ptl); 1292 anon_vma = page_lock_anon_vma_read(page); 1293 1294 /* Confirm the PMD did not change while page_table_lock was released */ 1295 spin_lock(ptl); 1296 if (unlikely(!pmd_same(pmd, *pmdp))) { 1297 unlock_page(page); 1298 put_page(page); 1299 page_nid = -1; 1300 goto out_unlock; 1301 } 1302 1303 /* Bail if we fail to protect against THP splits for any reason */ 1304 if (unlikely(!anon_vma)) { 1305 put_page(page); 1306 page_nid = -1; 1307 goto clear_pmdnuma; 1308 } 1309 1310 /* 1311 * Migrate the THP to the requested node, returns with page unlocked 1312 * and pmd_numa cleared. 1313 */ 1314 spin_unlock(ptl); 1315 migrated = migrate_misplaced_transhuge_page(mm, vma, 1316 pmdp, pmd, addr, page, target_nid); 1317 if (migrated) { 1318 flags |= TNF_MIGRATED; 1319 page_nid = target_nid; 1320 } 1321 1322 goto out; 1323 clear_pmdnuma: 1324 BUG_ON(!PageLocked(page)); 1325 pmd = pmd_mknonnuma(pmd); 1326 set_pmd_at(mm, haddr, pmdp, pmd); 1327 VM_BUG_ON(pmd_numa(*pmdp)); 1328 update_mmu_cache_pmd(vma, addr, pmdp); 1329 unlock_page(page); 1330 out_unlock: 1331 spin_unlock(ptl); 1332 1333 out: 1334 if (anon_vma) 1335 page_unlock_anon_vma_read(anon_vma); 1336 1337 if (page_nid != -1) 1338 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); 1339 1340 return 0; 1341 } 1342 1343 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1344 pmd_t *pmd, unsigned long addr) 1345 { 1346 spinlock_t *ptl; 1347 int ret = 0; 1348 1349 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1350 struct page *page; 1351 pgtable_t pgtable; 1352 pmd_t orig_pmd; 1353 /* 1354 * For architectures like ppc64 we look at deposited pgtable 1355 * when calling pmdp_get_and_clear. So do the 1356 * pgtable_trans_huge_withdraw after finishing pmdp related 1357 * operations. 1358 */ 1359 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1360 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1361 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1362 if (is_huge_zero_pmd(orig_pmd)) { 1363 atomic_long_dec(&tlb->mm->nr_ptes); 1364 spin_unlock(ptl); 1365 put_huge_zero_page(); 1366 } else { 1367 page = pmd_page(orig_pmd); 1368 page_remove_rmap(page); 1369 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1370 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1371 VM_BUG_ON_PAGE(!PageHead(page), page); 1372 atomic_long_dec(&tlb->mm->nr_ptes); 1373 spin_unlock(ptl); 1374 tlb_remove_page(tlb, page); 1375 } 1376 pte_free(tlb->mm, pgtable); 1377 ret = 1; 1378 } 1379 return ret; 1380 } 1381 1382 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1383 unsigned long addr, unsigned long end, 1384 unsigned char *vec) 1385 { 1386 spinlock_t *ptl; 1387 int ret = 0; 1388 1389 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1390 /* 1391 * All logical pages in the range are present 1392 * if backed by a huge page. 1393 */ 1394 spin_unlock(ptl); 1395 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1396 ret = 1; 1397 } 1398 1399 return ret; 1400 } 1401 1402 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1403 unsigned long old_addr, 1404 unsigned long new_addr, unsigned long old_end, 1405 pmd_t *old_pmd, pmd_t *new_pmd) 1406 { 1407 spinlock_t *old_ptl, *new_ptl; 1408 int ret = 0; 1409 pmd_t pmd; 1410 1411 struct mm_struct *mm = vma->vm_mm; 1412 1413 if ((old_addr & ~HPAGE_PMD_MASK) || 1414 (new_addr & ~HPAGE_PMD_MASK) || 1415 old_end - old_addr < HPAGE_PMD_SIZE || 1416 (new_vma->vm_flags & VM_NOHUGEPAGE)) 1417 goto out; 1418 1419 /* 1420 * The destination pmd shouldn't be established, free_pgtables() 1421 * should have release it. 1422 */ 1423 if (WARN_ON(!pmd_none(*new_pmd))) { 1424 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1425 goto out; 1426 } 1427 1428 /* 1429 * We don't have to worry about the ordering of src and dst 1430 * ptlocks because exclusive mmap_sem prevents deadlock. 1431 */ 1432 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); 1433 if (ret == 1) { 1434 new_ptl = pmd_lockptr(mm, new_pmd); 1435 if (new_ptl != old_ptl) 1436 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1437 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1438 VM_BUG_ON(!pmd_none(*new_pmd)); 1439 1440 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1441 pgtable_t pgtable; 1442 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1443 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1444 } 1445 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1446 if (new_ptl != old_ptl) 1447 spin_unlock(new_ptl); 1448 spin_unlock(old_ptl); 1449 } 1450 out: 1451 return ret; 1452 } 1453 1454 /* 1455 * Returns 1456 * - 0 if PMD could not be locked 1457 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1458 * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1459 */ 1460 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1461 unsigned long addr, pgprot_t newprot, int prot_numa) 1462 { 1463 struct mm_struct *mm = vma->vm_mm; 1464 spinlock_t *ptl; 1465 int ret = 0; 1466 1467 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1468 pmd_t entry; 1469 ret = 1; 1470 if (!prot_numa) { 1471 entry = pmdp_get_and_clear(mm, addr, pmd); 1472 if (pmd_numa(entry)) 1473 entry = pmd_mknonnuma(entry); 1474 entry = pmd_modify(entry, newprot); 1475 ret = HPAGE_PMD_NR; 1476 set_pmd_at(mm, addr, pmd, entry); 1477 BUG_ON(pmd_write(entry)); 1478 } else { 1479 struct page *page = pmd_page(*pmd); 1480 1481 /* 1482 * Do not trap faults against the zero page. The 1483 * read-only data is likely to be read-cached on the 1484 * local CPU cache and it is less useful to know about 1485 * local vs remote hits on the zero page. 1486 */ 1487 if (!is_huge_zero_page(page) && 1488 !pmd_numa(*pmd)) { 1489 pmdp_set_numa(mm, addr, pmd); 1490 ret = HPAGE_PMD_NR; 1491 } 1492 } 1493 spin_unlock(ptl); 1494 } 1495 1496 return ret; 1497 } 1498 1499 /* 1500 * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1501 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1502 * 1503 * Note that if it returns 1, this routine returns without unlocking page 1504 * table locks. So callers must unlock them. 1505 */ 1506 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1507 spinlock_t **ptl) 1508 { 1509 *ptl = pmd_lock(vma->vm_mm, pmd); 1510 if (likely(pmd_trans_huge(*pmd))) { 1511 if (unlikely(pmd_trans_splitting(*pmd))) { 1512 spin_unlock(*ptl); 1513 wait_split_huge_page(vma->anon_vma, pmd); 1514 return -1; 1515 } else { 1516 /* Thp mapped by 'pmd' is stable, so we can 1517 * handle it as it is. */ 1518 return 1; 1519 } 1520 } 1521 spin_unlock(*ptl); 1522 return 0; 1523 } 1524 1525 /* 1526 * This function returns whether a given @page is mapped onto the @address 1527 * in the virtual space of @mm. 1528 * 1529 * When it's true, this function returns *pmd with holding the page table lock 1530 * and passing it back to the caller via @ptl. 1531 * If it's false, returns NULL without holding the page table lock. 1532 */ 1533 pmd_t *page_check_address_pmd(struct page *page, 1534 struct mm_struct *mm, 1535 unsigned long address, 1536 enum page_check_address_pmd_flag flag, 1537 spinlock_t **ptl) 1538 { 1539 pgd_t *pgd; 1540 pud_t *pud; 1541 pmd_t *pmd; 1542 1543 if (address & ~HPAGE_PMD_MASK) 1544 return NULL; 1545 1546 pgd = pgd_offset(mm, address); 1547 if (!pgd_present(*pgd)) 1548 return NULL; 1549 pud = pud_offset(pgd, address); 1550 if (!pud_present(*pud)) 1551 return NULL; 1552 pmd = pmd_offset(pud, address); 1553 1554 *ptl = pmd_lock(mm, pmd); 1555 if (!pmd_present(*pmd)) 1556 goto unlock; 1557 if (pmd_page(*pmd) != page) 1558 goto unlock; 1559 /* 1560 * split_vma() may create temporary aliased mappings. There is 1561 * no risk as long as all huge pmd are found and have their 1562 * splitting bit set before __split_huge_page_refcount 1563 * runs. Finding the same huge pmd more than once during the 1564 * same rmap walk is not a problem. 1565 */ 1566 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1567 pmd_trans_splitting(*pmd)) 1568 goto unlock; 1569 if (pmd_trans_huge(*pmd)) { 1570 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1571 !pmd_trans_splitting(*pmd)); 1572 return pmd; 1573 } 1574 unlock: 1575 spin_unlock(*ptl); 1576 return NULL; 1577 } 1578 1579 static int __split_huge_page_splitting(struct page *page, 1580 struct vm_area_struct *vma, 1581 unsigned long address) 1582 { 1583 struct mm_struct *mm = vma->vm_mm; 1584 spinlock_t *ptl; 1585 pmd_t *pmd; 1586 int ret = 0; 1587 /* For mmu_notifiers */ 1588 const unsigned long mmun_start = address; 1589 const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 1590 1591 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1592 pmd = page_check_address_pmd(page, mm, address, 1593 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl); 1594 if (pmd) { 1595 /* 1596 * We can't temporarily set the pmd to null in order 1597 * to split it, the pmd must remain marked huge at all 1598 * times or the VM won't take the pmd_trans_huge paths 1599 * and it won't wait on the anon_vma->root->rwsem to 1600 * serialize against split_huge_page*. 1601 */ 1602 pmdp_splitting_flush(vma, address, pmd); 1603 ret = 1; 1604 spin_unlock(ptl); 1605 } 1606 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1607 1608 return ret; 1609 } 1610 1611 static void __split_huge_page_refcount(struct page *page, 1612 struct list_head *list) 1613 { 1614 int i; 1615 struct zone *zone = page_zone(page); 1616 struct lruvec *lruvec; 1617 int tail_count = 0; 1618 1619 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1620 spin_lock_irq(&zone->lru_lock); 1621 lruvec = mem_cgroup_page_lruvec(page, zone); 1622 1623 compound_lock(page); 1624 /* complete memcg works before add pages to LRU */ 1625 mem_cgroup_split_huge_fixup(page); 1626 1627 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 1628 struct page *page_tail = page + i; 1629 1630 /* tail_page->_mapcount cannot change */ 1631 BUG_ON(page_mapcount(page_tail) < 0); 1632 tail_count += page_mapcount(page_tail); 1633 /* check for overflow */ 1634 BUG_ON(tail_count < 0); 1635 BUG_ON(atomic_read(&page_tail->_count) != 0); 1636 /* 1637 * tail_page->_count is zero and not changing from 1638 * under us. But get_page_unless_zero() may be running 1639 * from under us on the tail_page. If we used 1640 * atomic_set() below instead of atomic_add(), we 1641 * would then run atomic_set() concurrently with 1642 * get_page_unless_zero(), and atomic_set() is 1643 * implemented in C not using locked ops. spin_unlock 1644 * on x86 sometime uses locked ops because of PPro 1645 * errata 66, 92, so unless somebody can guarantee 1646 * atomic_set() here would be safe on all archs (and 1647 * not only on x86), it's safer to use atomic_add(). 1648 */ 1649 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 1650 &page_tail->_count); 1651 1652 /* after clearing PageTail the gup refcount can be released */ 1653 smp_mb(); 1654 1655 /* 1656 * retain hwpoison flag of the poisoned tail page: 1657 * fix for the unsuitable process killed on Guest Machine(KVM) 1658 * by the memory-failure. 1659 */ 1660 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 1661 page_tail->flags |= (page->flags & 1662 ((1L << PG_referenced) | 1663 (1L << PG_swapbacked) | 1664 (1L << PG_mlocked) | 1665 (1L << PG_uptodate) | 1666 (1L << PG_active) | 1667 (1L << PG_unevictable))); 1668 page_tail->flags |= (1L << PG_dirty); 1669 1670 /* clear PageTail before overwriting first_page */ 1671 smp_wmb(); 1672 1673 /* 1674 * __split_huge_page_splitting() already set the 1675 * splitting bit in all pmd that could map this 1676 * hugepage, that will ensure no CPU can alter the 1677 * mapcount on the head page. The mapcount is only 1678 * accounted in the head page and it has to be 1679 * transferred to all tail pages in the below code. So 1680 * for this code to be safe, the split the mapcount 1681 * can't change. But that doesn't mean userland can't 1682 * keep changing and reading the page contents while 1683 * we transfer the mapcount, so the pmd splitting 1684 * status is achieved setting a reserved bit in the 1685 * pmd, not by clearing the present bit. 1686 */ 1687 page_tail->_mapcount = page->_mapcount; 1688 1689 BUG_ON(page_tail->mapping); 1690 page_tail->mapping = page->mapping; 1691 1692 page_tail->index = page->index + i; 1693 page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); 1694 1695 BUG_ON(!PageAnon(page_tail)); 1696 BUG_ON(!PageUptodate(page_tail)); 1697 BUG_ON(!PageDirty(page_tail)); 1698 BUG_ON(!PageSwapBacked(page_tail)); 1699 1700 lru_add_page_tail(page, page_tail, lruvec, list); 1701 } 1702 atomic_sub(tail_count, &page->_count); 1703 BUG_ON(atomic_read(&page->_count) <= 0); 1704 1705 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 1706 1707 ClearPageCompound(page); 1708 compound_unlock(page); 1709 spin_unlock_irq(&zone->lru_lock); 1710 1711 for (i = 1; i < HPAGE_PMD_NR; i++) { 1712 struct page *page_tail = page + i; 1713 BUG_ON(page_count(page_tail) <= 0); 1714 /* 1715 * Tail pages may be freed if there wasn't any mapping 1716 * like if add_to_swap() is running on a lru page that 1717 * had its mapping zapped. And freeing these pages 1718 * requires taking the lru_lock so we do the put_page 1719 * of the tail pages after the split is complete. 1720 */ 1721 put_page(page_tail); 1722 } 1723 1724 /* 1725 * Only the head page (now become a regular page) is required 1726 * to be pinned by the caller. 1727 */ 1728 BUG_ON(page_count(page) <= 0); 1729 } 1730 1731 static int __split_huge_page_map(struct page *page, 1732 struct vm_area_struct *vma, 1733 unsigned long address) 1734 { 1735 struct mm_struct *mm = vma->vm_mm; 1736 spinlock_t *ptl; 1737 pmd_t *pmd, _pmd; 1738 int ret = 0, i; 1739 pgtable_t pgtable; 1740 unsigned long haddr; 1741 1742 pmd = page_check_address_pmd(page, mm, address, 1743 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl); 1744 if (pmd) { 1745 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1746 pmd_populate(mm, &_pmd, pgtable); 1747 1748 haddr = address; 1749 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1750 pte_t *pte, entry; 1751 BUG_ON(PageCompound(page+i)); 1752 entry = mk_pte(page + i, vma->vm_page_prot); 1753 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1754 if (!pmd_write(*pmd)) 1755 entry = pte_wrprotect(entry); 1756 else 1757 BUG_ON(page_mapcount(page) != 1); 1758 if (!pmd_young(*pmd)) 1759 entry = pte_mkold(entry); 1760 if (pmd_numa(*pmd)) 1761 entry = pte_mknuma(entry); 1762 pte = pte_offset_map(&_pmd, haddr); 1763 BUG_ON(!pte_none(*pte)); 1764 set_pte_at(mm, haddr, pte, entry); 1765 pte_unmap(pte); 1766 } 1767 1768 smp_wmb(); /* make pte visible before pmd */ 1769 /* 1770 * Up to this point the pmd is present and huge and 1771 * userland has the whole access to the hugepage 1772 * during the split (which happens in place). If we 1773 * overwrite the pmd with the not-huge version 1774 * pointing to the pte here (which of course we could 1775 * if all CPUs were bug free), userland could trigger 1776 * a small page size TLB miss on the small sized TLB 1777 * while the hugepage TLB entry is still established 1778 * in the huge TLB. Some CPU doesn't like that. See 1779 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1780 * Erratum 383 on page 93. Intel should be safe but is 1781 * also warns that it's only safe if the permission 1782 * and cache attributes of the two entries loaded in 1783 * the two TLB is identical (which should be the case 1784 * here). But it is generally safer to never allow 1785 * small and huge TLB entries for the same virtual 1786 * address to be loaded simultaneously. So instead of 1787 * doing "pmd_populate(); flush_tlb_range();" we first 1788 * mark the current pmd notpresent (atomically because 1789 * here the pmd_trans_huge and pmd_trans_splitting 1790 * must remain set at all times on the pmd until the 1791 * split is complete for this pmd), then we flush the 1792 * SMP TLB and finally we write the non-huge version 1793 * of the pmd entry with pmd_populate. 1794 */ 1795 pmdp_invalidate(vma, address, pmd); 1796 pmd_populate(mm, pmd, pgtable); 1797 ret = 1; 1798 spin_unlock(ptl); 1799 } 1800 1801 return ret; 1802 } 1803 1804 /* must be called with anon_vma->root->rwsem held */ 1805 static void __split_huge_page(struct page *page, 1806 struct anon_vma *anon_vma, 1807 struct list_head *list) 1808 { 1809 int mapcount, mapcount2; 1810 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1811 struct anon_vma_chain *avc; 1812 1813 BUG_ON(!PageHead(page)); 1814 BUG_ON(PageTail(page)); 1815 1816 mapcount = 0; 1817 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1818 struct vm_area_struct *vma = avc->vma; 1819 unsigned long addr = vma_address(page, vma); 1820 BUG_ON(is_vma_temporary_stack(vma)); 1821 mapcount += __split_huge_page_splitting(page, vma, addr); 1822 } 1823 /* 1824 * It is critical that new vmas are added to the tail of the 1825 * anon_vma list. This guarantes that if copy_huge_pmd() runs 1826 * and establishes a child pmd before 1827 * __split_huge_page_splitting() freezes the parent pmd (so if 1828 * we fail to prevent copy_huge_pmd() from running until the 1829 * whole __split_huge_page() is complete), we will still see 1830 * the newly established pmd of the child later during the 1831 * walk, to be able to set it as pmd_trans_splitting too. 1832 */ 1833 if (mapcount != page_mapcount(page)) { 1834 pr_err("mapcount %d page_mapcount %d\n", 1835 mapcount, page_mapcount(page)); 1836 BUG(); 1837 } 1838 1839 __split_huge_page_refcount(page, list); 1840 1841 mapcount2 = 0; 1842 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1843 struct vm_area_struct *vma = avc->vma; 1844 unsigned long addr = vma_address(page, vma); 1845 BUG_ON(is_vma_temporary_stack(vma)); 1846 mapcount2 += __split_huge_page_map(page, vma, addr); 1847 } 1848 if (mapcount != mapcount2) { 1849 pr_err("mapcount %d mapcount2 %d page_mapcount %d\n", 1850 mapcount, mapcount2, page_mapcount(page)); 1851 BUG(); 1852 } 1853 } 1854 1855 /* 1856 * Split a hugepage into normal pages. This doesn't change the position of head 1857 * page. If @list is null, tail pages will be added to LRU list, otherwise, to 1858 * @list. Both head page and tail pages will inherit mapping, flags, and so on 1859 * from the hugepage. 1860 * Return 0 if the hugepage is split successfully otherwise return 1. 1861 */ 1862 int split_huge_page_to_list(struct page *page, struct list_head *list) 1863 { 1864 struct anon_vma *anon_vma; 1865 int ret = 1; 1866 1867 BUG_ON(is_huge_zero_page(page)); 1868 BUG_ON(!PageAnon(page)); 1869 1870 /* 1871 * The caller does not necessarily hold an mmap_sem that would prevent 1872 * the anon_vma disappearing so we first we take a reference to it 1873 * and then lock the anon_vma for write. This is similar to 1874 * page_lock_anon_vma_read except the write lock is taken to serialise 1875 * against parallel split or collapse operations. 1876 */ 1877 anon_vma = page_get_anon_vma(page); 1878 if (!anon_vma) 1879 goto out; 1880 anon_vma_lock_write(anon_vma); 1881 1882 ret = 0; 1883 if (!PageCompound(page)) 1884 goto out_unlock; 1885 1886 BUG_ON(!PageSwapBacked(page)); 1887 __split_huge_page(page, anon_vma, list); 1888 count_vm_event(THP_SPLIT); 1889 1890 BUG_ON(PageCompound(page)); 1891 out_unlock: 1892 anon_vma_unlock_write(anon_vma); 1893 put_anon_vma(anon_vma); 1894 out: 1895 return ret; 1896 } 1897 1898 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 1899 1900 int hugepage_madvise(struct vm_area_struct *vma, 1901 unsigned long *vm_flags, int advice) 1902 { 1903 switch (advice) { 1904 case MADV_HUGEPAGE: 1905 #ifdef CONFIG_S390 1906 /* 1907 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 1908 * can't handle this properly after s390_enable_sie, so we simply 1909 * ignore the madvise to prevent qemu from causing a SIGSEGV. 1910 */ 1911 if (mm_has_pgste(vma->vm_mm)) 1912 return 0; 1913 #endif 1914 /* 1915 * Be somewhat over-protective like KSM for now! 1916 */ 1917 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 1918 return -EINVAL; 1919 *vm_flags &= ~VM_NOHUGEPAGE; 1920 *vm_flags |= VM_HUGEPAGE; 1921 /* 1922 * If the vma become good for khugepaged to scan, 1923 * register it here without waiting a page fault that 1924 * may not happen any time soon. 1925 */ 1926 if (unlikely(khugepaged_enter_vma_merge(vma))) 1927 return -ENOMEM; 1928 break; 1929 case MADV_NOHUGEPAGE: 1930 /* 1931 * Be somewhat over-protective like KSM for now! 1932 */ 1933 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1934 return -EINVAL; 1935 *vm_flags &= ~VM_HUGEPAGE; 1936 *vm_flags |= VM_NOHUGEPAGE; 1937 /* 1938 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 1939 * this vma even if we leave the mm registered in khugepaged if 1940 * it got registered before VM_NOHUGEPAGE was set. 1941 */ 1942 break; 1943 } 1944 1945 return 0; 1946 } 1947 1948 static int __init khugepaged_slab_init(void) 1949 { 1950 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1951 sizeof(struct mm_slot), 1952 __alignof__(struct mm_slot), 0, NULL); 1953 if (!mm_slot_cache) 1954 return -ENOMEM; 1955 1956 return 0; 1957 } 1958 1959 static inline struct mm_slot *alloc_mm_slot(void) 1960 { 1961 if (!mm_slot_cache) /* initialization failed */ 1962 return NULL; 1963 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 1964 } 1965 1966 static inline void free_mm_slot(struct mm_slot *mm_slot) 1967 { 1968 kmem_cache_free(mm_slot_cache, mm_slot); 1969 } 1970 1971 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1972 { 1973 struct mm_slot *mm_slot; 1974 1975 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 1976 if (mm == mm_slot->mm) 1977 return mm_slot; 1978 1979 return NULL; 1980 } 1981 1982 static void insert_to_mm_slots_hash(struct mm_struct *mm, 1983 struct mm_slot *mm_slot) 1984 { 1985 mm_slot->mm = mm; 1986 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 1987 } 1988 1989 static inline int khugepaged_test_exit(struct mm_struct *mm) 1990 { 1991 return atomic_read(&mm->mm_users) == 0; 1992 } 1993 1994 int __khugepaged_enter(struct mm_struct *mm) 1995 { 1996 struct mm_slot *mm_slot; 1997 int wakeup; 1998 1999 mm_slot = alloc_mm_slot(); 2000 if (!mm_slot) 2001 return -ENOMEM; 2002 2003 /* __khugepaged_exit() must not run from under us */ 2004 VM_BUG_ON(khugepaged_test_exit(mm)); 2005 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 2006 free_mm_slot(mm_slot); 2007 return 0; 2008 } 2009 2010 spin_lock(&khugepaged_mm_lock); 2011 insert_to_mm_slots_hash(mm, mm_slot); 2012 /* 2013 * Insert just behind the scanning cursor, to let the area settle 2014 * down a little. 2015 */ 2016 wakeup = list_empty(&khugepaged_scan.mm_head); 2017 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 2018 spin_unlock(&khugepaged_mm_lock); 2019 2020 atomic_inc(&mm->mm_count); 2021 if (wakeup) 2022 wake_up_interruptible(&khugepaged_wait); 2023 2024 return 0; 2025 } 2026 2027 int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2028 { 2029 unsigned long hstart, hend; 2030 if (!vma->anon_vma) 2031 /* 2032 * Not yet faulted in so we will register later in the 2033 * page fault if needed. 2034 */ 2035 return 0; 2036 if (vma->vm_ops) 2037 /* khugepaged not yet working on file or special mappings */ 2038 return 0; 2039 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2040 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2041 hend = vma->vm_end & HPAGE_PMD_MASK; 2042 if (hstart < hend) 2043 return khugepaged_enter(vma); 2044 return 0; 2045 } 2046 2047 void __khugepaged_exit(struct mm_struct *mm) 2048 { 2049 struct mm_slot *mm_slot; 2050 int free = 0; 2051 2052 spin_lock(&khugepaged_mm_lock); 2053 mm_slot = get_mm_slot(mm); 2054 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 2055 hash_del(&mm_slot->hash); 2056 list_del(&mm_slot->mm_node); 2057 free = 1; 2058 } 2059 spin_unlock(&khugepaged_mm_lock); 2060 2061 if (free) { 2062 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2063 free_mm_slot(mm_slot); 2064 mmdrop(mm); 2065 } else if (mm_slot) { 2066 /* 2067 * This is required to serialize against 2068 * khugepaged_test_exit() (which is guaranteed to run 2069 * under mmap sem read mode). Stop here (after we 2070 * return all pagetables will be destroyed) until 2071 * khugepaged has finished working on the pagetables 2072 * under the mmap_sem. 2073 */ 2074 down_write(&mm->mmap_sem); 2075 up_write(&mm->mmap_sem); 2076 } 2077 } 2078 2079 static void release_pte_page(struct page *page) 2080 { 2081 /* 0 stands for page_is_file_cache(page) == false */ 2082 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2083 unlock_page(page); 2084 putback_lru_page(page); 2085 } 2086 2087 static void release_pte_pages(pte_t *pte, pte_t *_pte) 2088 { 2089 while (--_pte >= pte) { 2090 pte_t pteval = *_pte; 2091 if (!pte_none(pteval)) 2092 release_pte_page(pte_page(pteval)); 2093 } 2094 } 2095 2096 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2097 unsigned long address, 2098 pte_t *pte) 2099 { 2100 struct page *page; 2101 pte_t *_pte; 2102 int referenced = 0, none = 0; 2103 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2104 _pte++, address += PAGE_SIZE) { 2105 pte_t pteval = *_pte; 2106 if (pte_none(pteval)) { 2107 if (++none <= khugepaged_max_ptes_none) 2108 continue; 2109 else 2110 goto out; 2111 } 2112 if (!pte_present(pteval) || !pte_write(pteval)) 2113 goto out; 2114 page = vm_normal_page(vma, address, pteval); 2115 if (unlikely(!page)) 2116 goto out; 2117 2118 VM_BUG_ON_PAGE(PageCompound(page), page); 2119 VM_BUG_ON_PAGE(!PageAnon(page), page); 2120 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2121 2122 /* cannot use mapcount: can't collapse if there's a gup pin */ 2123 if (page_count(page) != 1) 2124 goto out; 2125 /* 2126 * We can do it before isolate_lru_page because the 2127 * page can't be freed from under us. NOTE: PG_lock 2128 * is needed to serialize against split_huge_page 2129 * when invoked from the VM. 2130 */ 2131 if (!trylock_page(page)) 2132 goto out; 2133 /* 2134 * Isolate the page to avoid collapsing an hugepage 2135 * currently in use by the VM. 2136 */ 2137 if (isolate_lru_page(page)) { 2138 unlock_page(page); 2139 goto out; 2140 } 2141 /* 0 stands for page_is_file_cache(page) == false */ 2142 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2143 VM_BUG_ON_PAGE(!PageLocked(page), page); 2144 VM_BUG_ON_PAGE(PageLRU(page), page); 2145 2146 /* If there is no mapped pte young don't collapse the page */ 2147 if (pte_young(pteval) || PageReferenced(page) || 2148 mmu_notifier_test_young(vma->vm_mm, address)) 2149 referenced = 1; 2150 } 2151 if (likely(referenced)) 2152 return 1; 2153 out: 2154 release_pte_pages(pte, _pte); 2155 return 0; 2156 } 2157 2158 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2159 struct vm_area_struct *vma, 2160 unsigned long address, 2161 spinlock_t *ptl) 2162 { 2163 pte_t *_pte; 2164 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2165 pte_t pteval = *_pte; 2166 struct page *src_page; 2167 2168 if (pte_none(pteval)) { 2169 clear_user_highpage(page, address); 2170 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2171 } else { 2172 src_page = pte_page(pteval); 2173 copy_user_highpage(page, src_page, address, vma); 2174 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2175 release_pte_page(src_page); 2176 /* 2177 * ptl mostly unnecessary, but preempt has to 2178 * be disabled to update the per-cpu stats 2179 * inside page_remove_rmap(). 2180 */ 2181 spin_lock(ptl); 2182 /* 2183 * paravirt calls inside pte_clear here are 2184 * superfluous. 2185 */ 2186 pte_clear(vma->vm_mm, address, _pte); 2187 page_remove_rmap(src_page); 2188 spin_unlock(ptl); 2189 free_page_and_swap_cache(src_page); 2190 } 2191 2192 address += PAGE_SIZE; 2193 page++; 2194 } 2195 } 2196 2197 static void khugepaged_alloc_sleep(void) 2198 { 2199 wait_event_freezable_timeout(khugepaged_wait, false, 2200 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 2201 } 2202 2203 static int khugepaged_node_load[MAX_NUMNODES]; 2204 2205 #ifdef CONFIG_NUMA 2206 static int khugepaged_find_target_node(void) 2207 { 2208 static int last_khugepaged_target_node = NUMA_NO_NODE; 2209 int nid, target_node = 0, max_value = 0; 2210 2211 /* find first node with max normal pages hit */ 2212 for (nid = 0; nid < MAX_NUMNODES; nid++) 2213 if (khugepaged_node_load[nid] > max_value) { 2214 max_value = khugepaged_node_load[nid]; 2215 target_node = nid; 2216 } 2217 2218 /* do some balance if several nodes have the same hit record */ 2219 if (target_node <= last_khugepaged_target_node) 2220 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 2221 nid++) 2222 if (max_value == khugepaged_node_load[nid]) { 2223 target_node = nid; 2224 break; 2225 } 2226 2227 last_khugepaged_target_node = target_node; 2228 return target_node; 2229 } 2230 2231 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2232 { 2233 if (IS_ERR(*hpage)) { 2234 if (!*wait) 2235 return false; 2236 2237 *wait = false; 2238 *hpage = NULL; 2239 khugepaged_alloc_sleep(); 2240 } else if (*hpage) { 2241 put_page(*hpage); 2242 *hpage = NULL; 2243 } 2244 2245 return true; 2246 } 2247 2248 static struct page 2249 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2250 struct vm_area_struct *vma, unsigned long address, 2251 int node) 2252 { 2253 VM_BUG_ON_PAGE(*hpage, *hpage); 2254 /* 2255 * Allocate the page while the vma is still valid and under 2256 * the mmap_sem read mode so there is no memory allocation 2257 * later when we take the mmap_sem in write mode. This is more 2258 * friendly behavior (OTOH it may actually hide bugs) to 2259 * filesystems in userland with daemons allocating memory in 2260 * the userland I/O paths. Allocating memory with the 2261 * mmap_sem in read mode is good idea also to allow greater 2262 * scalability. 2263 */ 2264 *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( 2265 khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); 2266 /* 2267 * After allocating the hugepage, release the mmap_sem read lock in 2268 * preparation for taking it in write mode. 2269 */ 2270 up_read(&mm->mmap_sem); 2271 if (unlikely(!*hpage)) { 2272 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2273 *hpage = ERR_PTR(-ENOMEM); 2274 return NULL; 2275 } 2276 2277 count_vm_event(THP_COLLAPSE_ALLOC); 2278 return *hpage; 2279 } 2280 #else 2281 static int khugepaged_find_target_node(void) 2282 { 2283 return 0; 2284 } 2285 2286 static inline struct page *alloc_hugepage(int defrag) 2287 { 2288 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 2289 HPAGE_PMD_ORDER); 2290 } 2291 2292 static struct page *khugepaged_alloc_hugepage(bool *wait) 2293 { 2294 struct page *hpage; 2295 2296 do { 2297 hpage = alloc_hugepage(khugepaged_defrag()); 2298 if (!hpage) { 2299 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2300 if (!*wait) 2301 return NULL; 2302 2303 *wait = false; 2304 khugepaged_alloc_sleep(); 2305 } else 2306 count_vm_event(THP_COLLAPSE_ALLOC); 2307 } while (unlikely(!hpage) && likely(khugepaged_enabled())); 2308 2309 return hpage; 2310 } 2311 2312 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2313 { 2314 if (!*hpage) 2315 *hpage = khugepaged_alloc_hugepage(wait); 2316 2317 if (unlikely(!*hpage)) 2318 return false; 2319 2320 return true; 2321 } 2322 2323 static struct page 2324 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2325 struct vm_area_struct *vma, unsigned long address, 2326 int node) 2327 { 2328 up_read(&mm->mmap_sem); 2329 VM_BUG_ON(!*hpage); 2330 return *hpage; 2331 } 2332 #endif 2333 2334 static bool hugepage_vma_check(struct vm_area_struct *vma) 2335 { 2336 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2337 (vma->vm_flags & VM_NOHUGEPAGE)) 2338 return false; 2339 2340 if (!vma->anon_vma || vma->vm_ops) 2341 return false; 2342 if (is_vma_temporary_stack(vma)) 2343 return false; 2344 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2345 return true; 2346 } 2347 2348 static void collapse_huge_page(struct mm_struct *mm, 2349 unsigned long address, 2350 struct page **hpage, 2351 struct vm_area_struct *vma, 2352 int node) 2353 { 2354 pmd_t *pmd, _pmd; 2355 pte_t *pte; 2356 pgtable_t pgtable; 2357 struct page *new_page; 2358 spinlock_t *pmd_ptl, *pte_ptl; 2359 int isolated; 2360 unsigned long hstart, hend; 2361 unsigned long mmun_start; /* For mmu_notifiers */ 2362 unsigned long mmun_end; /* For mmu_notifiers */ 2363 2364 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2365 2366 /* release the mmap_sem read lock. */ 2367 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); 2368 if (!new_page) 2369 return; 2370 2371 if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) 2372 return; 2373 2374 /* 2375 * Prevent all access to pagetables with the exception of 2376 * gup_fast later hanlded by the ptep_clear_flush and the VM 2377 * handled by the anon_vma lock + PG_lock. 2378 */ 2379 down_write(&mm->mmap_sem); 2380 if (unlikely(khugepaged_test_exit(mm))) 2381 goto out; 2382 2383 vma = find_vma(mm, address); 2384 if (!vma) 2385 goto out; 2386 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2387 hend = vma->vm_end & HPAGE_PMD_MASK; 2388 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2389 goto out; 2390 if (!hugepage_vma_check(vma)) 2391 goto out; 2392 pmd = mm_find_pmd(mm, address); 2393 if (!pmd) 2394 goto out; 2395 if (pmd_trans_huge(*pmd)) 2396 goto out; 2397 2398 anon_vma_lock_write(vma->anon_vma); 2399 2400 pte = pte_offset_map(pmd, address); 2401 pte_ptl = pte_lockptr(mm, pmd); 2402 2403 mmun_start = address; 2404 mmun_end = address + HPAGE_PMD_SIZE; 2405 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2406 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 2407 /* 2408 * After this gup_fast can't run anymore. This also removes 2409 * any huge TLB entry from the CPU so we won't allow 2410 * huge and small TLB entries for the same virtual address 2411 * to avoid the risk of CPU bugs in that area. 2412 */ 2413 _pmd = pmdp_clear_flush(vma, address, pmd); 2414 spin_unlock(pmd_ptl); 2415 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2416 2417 spin_lock(pte_ptl); 2418 isolated = __collapse_huge_page_isolate(vma, address, pte); 2419 spin_unlock(pte_ptl); 2420 2421 if (unlikely(!isolated)) { 2422 pte_unmap(pte); 2423 spin_lock(pmd_ptl); 2424 BUG_ON(!pmd_none(*pmd)); 2425 /* 2426 * We can only use set_pmd_at when establishing 2427 * hugepmds and never for establishing regular pmds that 2428 * points to regular pagetables. Use pmd_populate for that 2429 */ 2430 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 2431 spin_unlock(pmd_ptl); 2432 anon_vma_unlock_write(vma->anon_vma); 2433 goto out; 2434 } 2435 2436 /* 2437 * All pages are isolated and locked so anon_vma rmap 2438 * can't run anymore. 2439 */ 2440 anon_vma_unlock_write(vma->anon_vma); 2441 2442 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 2443 pte_unmap(pte); 2444 __SetPageUptodate(new_page); 2445 pgtable = pmd_pgtable(_pmd); 2446 2447 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 2448 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 2449 2450 /* 2451 * spin_lock() below is not the equivalent of smp_wmb(), so 2452 * this is needed to avoid the copy_huge_page writes to become 2453 * visible after the set_pmd_at() write. 2454 */ 2455 smp_wmb(); 2456 2457 spin_lock(pmd_ptl); 2458 BUG_ON(!pmd_none(*pmd)); 2459 page_add_new_anon_rmap(new_page, vma, address); 2460 pgtable_trans_huge_deposit(mm, pmd, pgtable); 2461 set_pmd_at(mm, address, pmd, _pmd); 2462 update_mmu_cache_pmd(vma, address, pmd); 2463 spin_unlock(pmd_ptl); 2464 2465 *hpage = NULL; 2466 2467 khugepaged_pages_collapsed++; 2468 out_up_write: 2469 up_write(&mm->mmap_sem); 2470 return; 2471 2472 out: 2473 mem_cgroup_uncharge_page(new_page); 2474 goto out_up_write; 2475 } 2476 2477 static int khugepaged_scan_pmd(struct mm_struct *mm, 2478 struct vm_area_struct *vma, 2479 unsigned long address, 2480 struct page **hpage) 2481 { 2482 pmd_t *pmd; 2483 pte_t *pte, *_pte; 2484 int ret = 0, referenced = 0, none = 0; 2485 struct page *page; 2486 unsigned long _address; 2487 spinlock_t *ptl; 2488 int node = NUMA_NO_NODE; 2489 2490 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2491 2492 pmd = mm_find_pmd(mm, address); 2493 if (!pmd) 2494 goto out; 2495 if (pmd_trans_huge(*pmd)) 2496 goto out; 2497 2498 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2499 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2500 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2501 _pte++, _address += PAGE_SIZE) { 2502 pte_t pteval = *_pte; 2503 if (pte_none(pteval)) { 2504 if (++none <= khugepaged_max_ptes_none) 2505 continue; 2506 else 2507 goto out_unmap; 2508 } 2509 if (!pte_present(pteval) || !pte_write(pteval)) 2510 goto out_unmap; 2511 page = vm_normal_page(vma, _address, pteval); 2512 if (unlikely(!page)) 2513 goto out_unmap; 2514 /* 2515 * Record which node the original page is from and save this 2516 * information to khugepaged_node_load[]. 2517 * Khupaged will allocate hugepage from the node has the max 2518 * hit record. 2519 */ 2520 node = page_to_nid(page); 2521 khugepaged_node_load[node]++; 2522 VM_BUG_ON_PAGE(PageCompound(page), page); 2523 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2524 goto out_unmap; 2525 /* cannot use mapcount: can't collapse if there's a gup pin */ 2526 if (page_count(page) != 1) 2527 goto out_unmap; 2528 if (pte_young(pteval) || PageReferenced(page) || 2529 mmu_notifier_test_young(vma->vm_mm, address)) 2530 referenced = 1; 2531 } 2532 if (referenced) 2533 ret = 1; 2534 out_unmap: 2535 pte_unmap_unlock(pte, ptl); 2536 if (ret) { 2537 node = khugepaged_find_target_node(); 2538 /* collapse_huge_page will return with the mmap_sem released */ 2539 collapse_huge_page(mm, address, hpage, vma, node); 2540 } 2541 out: 2542 return ret; 2543 } 2544 2545 static void collect_mm_slot(struct mm_slot *mm_slot) 2546 { 2547 struct mm_struct *mm = mm_slot->mm; 2548 2549 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2550 2551 if (khugepaged_test_exit(mm)) { 2552 /* free mm_slot */ 2553 hash_del(&mm_slot->hash); 2554 list_del(&mm_slot->mm_node); 2555 2556 /* 2557 * Not strictly needed because the mm exited already. 2558 * 2559 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2560 */ 2561 2562 /* khugepaged_mm_lock actually not necessary for the below */ 2563 free_mm_slot(mm_slot); 2564 mmdrop(mm); 2565 } 2566 } 2567 2568 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2569 struct page **hpage) 2570 __releases(&khugepaged_mm_lock) 2571 __acquires(&khugepaged_mm_lock) 2572 { 2573 struct mm_slot *mm_slot; 2574 struct mm_struct *mm; 2575 struct vm_area_struct *vma; 2576 int progress = 0; 2577 2578 VM_BUG_ON(!pages); 2579 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2580 2581 if (khugepaged_scan.mm_slot) 2582 mm_slot = khugepaged_scan.mm_slot; 2583 else { 2584 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2585 struct mm_slot, mm_node); 2586 khugepaged_scan.address = 0; 2587 khugepaged_scan.mm_slot = mm_slot; 2588 } 2589 spin_unlock(&khugepaged_mm_lock); 2590 2591 mm = mm_slot->mm; 2592 down_read(&mm->mmap_sem); 2593 if (unlikely(khugepaged_test_exit(mm))) 2594 vma = NULL; 2595 else 2596 vma = find_vma(mm, khugepaged_scan.address); 2597 2598 progress++; 2599 for (; vma; vma = vma->vm_next) { 2600 unsigned long hstart, hend; 2601 2602 cond_resched(); 2603 if (unlikely(khugepaged_test_exit(mm))) { 2604 progress++; 2605 break; 2606 } 2607 if (!hugepage_vma_check(vma)) { 2608 skip: 2609 progress++; 2610 continue; 2611 } 2612 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2613 hend = vma->vm_end & HPAGE_PMD_MASK; 2614 if (hstart >= hend) 2615 goto skip; 2616 if (khugepaged_scan.address > hend) 2617 goto skip; 2618 if (khugepaged_scan.address < hstart) 2619 khugepaged_scan.address = hstart; 2620 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2621 2622 while (khugepaged_scan.address < hend) { 2623 int ret; 2624 cond_resched(); 2625 if (unlikely(khugepaged_test_exit(mm))) 2626 goto breakouterloop; 2627 2628 VM_BUG_ON(khugepaged_scan.address < hstart || 2629 khugepaged_scan.address + HPAGE_PMD_SIZE > 2630 hend); 2631 ret = khugepaged_scan_pmd(mm, vma, 2632 khugepaged_scan.address, 2633 hpage); 2634 /* move to next address */ 2635 khugepaged_scan.address += HPAGE_PMD_SIZE; 2636 progress += HPAGE_PMD_NR; 2637 if (ret) 2638 /* we released mmap_sem so break loop */ 2639 goto breakouterloop_mmap_sem; 2640 if (progress >= pages) 2641 goto breakouterloop; 2642 } 2643 } 2644 breakouterloop: 2645 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2646 breakouterloop_mmap_sem: 2647 2648 spin_lock(&khugepaged_mm_lock); 2649 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2650 /* 2651 * Release the current mm_slot if this mm is about to die, or 2652 * if we scanned all vmas of this mm. 2653 */ 2654 if (khugepaged_test_exit(mm) || !vma) { 2655 /* 2656 * Make sure that if mm_users is reaching zero while 2657 * khugepaged runs here, khugepaged_exit will find 2658 * mm_slot not pointing to the exiting mm. 2659 */ 2660 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2661 khugepaged_scan.mm_slot = list_entry( 2662 mm_slot->mm_node.next, 2663 struct mm_slot, mm_node); 2664 khugepaged_scan.address = 0; 2665 } else { 2666 khugepaged_scan.mm_slot = NULL; 2667 khugepaged_full_scans++; 2668 } 2669 2670 collect_mm_slot(mm_slot); 2671 } 2672 2673 return progress; 2674 } 2675 2676 static int khugepaged_has_work(void) 2677 { 2678 return !list_empty(&khugepaged_scan.mm_head) && 2679 khugepaged_enabled(); 2680 } 2681 2682 static int khugepaged_wait_event(void) 2683 { 2684 return !list_empty(&khugepaged_scan.mm_head) || 2685 kthread_should_stop(); 2686 } 2687 2688 static void khugepaged_do_scan(void) 2689 { 2690 struct page *hpage = NULL; 2691 unsigned int progress = 0, pass_through_head = 0; 2692 unsigned int pages = khugepaged_pages_to_scan; 2693 bool wait = true; 2694 2695 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2696 2697 while (progress < pages) { 2698 if (!khugepaged_prealloc_page(&hpage, &wait)) 2699 break; 2700 2701 cond_resched(); 2702 2703 if (unlikely(kthread_should_stop() || freezing(current))) 2704 break; 2705 2706 spin_lock(&khugepaged_mm_lock); 2707 if (!khugepaged_scan.mm_slot) 2708 pass_through_head++; 2709 if (khugepaged_has_work() && 2710 pass_through_head < 2) 2711 progress += khugepaged_scan_mm_slot(pages - progress, 2712 &hpage); 2713 else 2714 progress = pages; 2715 spin_unlock(&khugepaged_mm_lock); 2716 } 2717 2718 if (!IS_ERR_OR_NULL(hpage)) 2719 put_page(hpage); 2720 } 2721 2722 static void khugepaged_wait_work(void) 2723 { 2724 try_to_freeze(); 2725 2726 if (khugepaged_has_work()) { 2727 if (!khugepaged_scan_sleep_millisecs) 2728 return; 2729 2730 wait_event_freezable_timeout(khugepaged_wait, 2731 kthread_should_stop(), 2732 msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 2733 return; 2734 } 2735 2736 if (khugepaged_enabled()) 2737 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2738 } 2739 2740 static int khugepaged(void *none) 2741 { 2742 struct mm_slot *mm_slot; 2743 2744 set_freezable(); 2745 set_user_nice(current, MAX_NICE); 2746 2747 while (!kthread_should_stop()) { 2748 khugepaged_do_scan(); 2749 khugepaged_wait_work(); 2750 } 2751 2752 spin_lock(&khugepaged_mm_lock); 2753 mm_slot = khugepaged_scan.mm_slot; 2754 khugepaged_scan.mm_slot = NULL; 2755 if (mm_slot) 2756 collect_mm_slot(mm_slot); 2757 spin_unlock(&khugepaged_mm_lock); 2758 return 0; 2759 } 2760 2761 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2762 unsigned long haddr, pmd_t *pmd) 2763 { 2764 struct mm_struct *mm = vma->vm_mm; 2765 pgtable_t pgtable; 2766 pmd_t _pmd; 2767 int i; 2768 2769 pmdp_clear_flush(vma, haddr, pmd); 2770 /* leave pmd empty until pte is filled */ 2771 2772 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2773 pmd_populate(mm, &_pmd, pgtable); 2774 2775 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2776 pte_t *pte, entry; 2777 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2778 entry = pte_mkspecial(entry); 2779 pte = pte_offset_map(&_pmd, haddr); 2780 VM_BUG_ON(!pte_none(*pte)); 2781 set_pte_at(mm, haddr, pte, entry); 2782 pte_unmap(pte); 2783 } 2784 smp_wmb(); /* make pte visible before pmd */ 2785 pmd_populate(mm, pmd, pgtable); 2786 put_huge_zero_page(); 2787 } 2788 2789 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2790 pmd_t *pmd) 2791 { 2792 spinlock_t *ptl; 2793 struct page *page; 2794 struct mm_struct *mm = vma->vm_mm; 2795 unsigned long haddr = address & HPAGE_PMD_MASK; 2796 unsigned long mmun_start; /* For mmu_notifiers */ 2797 unsigned long mmun_end; /* For mmu_notifiers */ 2798 2799 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 2800 2801 mmun_start = haddr; 2802 mmun_end = haddr + HPAGE_PMD_SIZE; 2803 again: 2804 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2805 ptl = pmd_lock(mm, pmd); 2806 if (unlikely(!pmd_trans_huge(*pmd))) { 2807 spin_unlock(ptl); 2808 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2809 return; 2810 } 2811 if (is_huge_zero_pmd(*pmd)) { 2812 __split_huge_zero_page_pmd(vma, haddr, pmd); 2813 spin_unlock(ptl); 2814 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2815 return; 2816 } 2817 page = pmd_page(*pmd); 2818 VM_BUG_ON_PAGE(!page_count(page), page); 2819 get_page(page); 2820 spin_unlock(ptl); 2821 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2822 2823 split_huge_page(page); 2824 2825 put_page(page); 2826 2827 /* 2828 * We don't always have down_write of mmap_sem here: a racing 2829 * do_huge_pmd_wp_page() might have copied-on-write to another 2830 * huge page before our split_huge_page() got the anon_vma lock. 2831 */ 2832 if (unlikely(pmd_trans_huge(*pmd))) 2833 goto again; 2834 } 2835 2836 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2837 pmd_t *pmd) 2838 { 2839 struct vm_area_struct *vma; 2840 2841 vma = find_vma(mm, address); 2842 BUG_ON(vma == NULL); 2843 split_huge_page_pmd(vma, address, pmd); 2844 } 2845 2846 static void split_huge_page_address(struct mm_struct *mm, 2847 unsigned long address) 2848 { 2849 pmd_t *pmd; 2850 2851 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2852 2853 pmd = mm_find_pmd(mm, address); 2854 if (!pmd) 2855 return; 2856 /* 2857 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2858 * materialize from under us. 2859 */ 2860 split_huge_page_pmd_mm(mm, address, pmd); 2861 } 2862 2863 void __vma_adjust_trans_huge(struct vm_area_struct *vma, 2864 unsigned long start, 2865 unsigned long end, 2866 long adjust_next) 2867 { 2868 /* 2869 * If the new start address isn't hpage aligned and it could 2870 * previously contain an hugepage: check if we need to split 2871 * an huge pmd. 2872 */ 2873 if (start & ~HPAGE_PMD_MASK && 2874 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2875 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2876 split_huge_page_address(vma->vm_mm, start); 2877 2878 /* 2879 * If the new end address isn't hpage aligned and it could 2880 * previously contain an hugepage: check if we need to split 2881 * an huge pmd. 2882 */ 2883 if (end & ~HPAGE_PMD_MASK && 2884 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2885 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2886 split_huge_page_address(vma->vm_mm, end); 2887 2888 /* 2889 * If we're also updating the vma->vm_next->vm_start, if the new 2890 * vm_next->vm_start isn't page aligned and it could previously 2891 * contain an hugepage: check if we need to split an huge pmd. 2892 */ 2893 if (adjust_next > 0) { 2894 struct vm_area_struct *next = vma->vm_next; 2895 unsigned long nstart = next->vm_start; 2896 nstart += adjust_next << PAGE_SHIFT; 2897 if (nstart & ~HPAGE_PMD_MASK && 2898 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2899 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2900 split_huge_page_address(next->vm_mm, nstart); 2901 } 2902 } 2903