1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/mm.h> 11 #include <linux/sched.h> 12 #include <linux/sched/coredump.h> 13 #include <linux/sched/numa_balancing.h> 14 #include <linux/highmem.h> 15 #include <linux/hugetlb.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/rmap.h> 18 #include <linux/swap.h> 19 #include <linux/shrinker.h> 20 #include <linux/mm_inline.h> 21 #include <linux/swapops.h> 22 #include <linux/dax.h> 23 #include <linux/khugepaged.h> 24 #include <linux/freezer.h> 25 #include <linux/pfn_t.h> 26 #include <linux/mman.h> 27 #include <linux/memremap.h> 28 #include <linux/pagemap.h> 29 #include <linux/debugfs.h> 30 #include <linux/migrate.h> 31 #include <linux/hashtable.h> 32 #include <linux/userfaultfd_k.h> 33 #include <linux/page_idle.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/oom.h> 36 37 #include <asm/tlb.h> 38 #include <asm/pgalloc.h> 39 #include "internal.h" 40 41 /* 42 * By default transparent hugepage support is disabled in order that avoid 43 * to risk increase the memory footprint of applications without a guaranteed 44 * benefit. When transparent hugepage support is enabled, is for all mappings, 45 * and khugepaged scans all mappings. 46 * Defrag is invoked by khugepaged hugepage allocations and by page faults 47 * for all hugepage allocations. 48 */ 49 unsigned long transparent_hugepage_flags __read_mostly = 50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 51 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 52 #endif 53 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 55 #endif 56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 59 60 static struct shrinker deferred_split_shrinker; 61 62 static atomic_t huge_zero_refcount; 63 struct page *huge_zero_page __read_mostly; 64 65 static struct page *get_huge_zero_page(void) 66 { 67 struct page *zero_page; 68 retry: 69 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 70 return READ_ONCE(huge_zero_page); 71 72 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 73 HPAGE_PMD_ORDER); 74 if (!zero_page) { 75 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 76 return NULL; 77 } 78 count_vm_event(THP_ZERO_PAGE_ALLOC); 79 preempt_disable(); 80 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 81 preempt_enable(); 82 __free_pages(zero_page, compound_order(zero_page)); 83 goto retry; 84 } 85 86 /* We take additional reference here. It will be put back by shrinker */ 87 atomic_set(&huge_zero_refcount, 2); 88 preempt_enable(); 89 return READ_ONCE(huge_zero_page); 90 } 91 92 static void put_huge_zero_page(void) 93 { 94 /* 95 * Counter should never go to zero here. Only shrinker can put 96 * last reference. 97 */ 98 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 99 } 100 101 struct page *mm_get_huge_zero_page(struct mm_struct *mm) 102 { 103 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 104 return READ_ONCE(huge_zero_page); 105 106 if (!get_huge_zero_page()) 107 return NULL; 108 109 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 110 put_huge_zero_page(); 111 112 return READ_ONCE(huge_zero_page); 113 } 114 115 void mm_put_huge_zero_page(struct mm_struct *mm) 116 { 117 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 118 put_huge_zero_page(); 119 } 120 121 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 122 struct shrink_control *sc) 123 { 124 /* we can free zero page only if last reference remains */ 125 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 126 } 127 128 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 129 struct shrink_control *sc) 130 { 131 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 132 struct page *zero_page = xchg(&huge_zero_page, NULL); 133 BUG_ON(zero_page == NULL); 134 __free_pages(zero_page, compound_order(zero_page)); 135 return HPAGE_PMD_NR; 136 } 137 138 return 0; 139 } 140 141 static struct shrinker huge_zero_page_shrinker = { 142 .count_objects = shrink_huge_zero_page_count, 143 .scan_objects = shrink_huge_zero_page_scan, 144 .seeks = DEFAULT_SEEKS, 145 }; 146 147 #ifdef CONFIG_SYSFS 148 static ssize_t enabled_show(struct kobject *kobj, 149 struct kobj_attribute *attr, char *buf) 150 { 151 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 152 return sprintf(buf, "[always] madvise never\n"); 153 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 154 return sprintf(buf, "always [madvise] never\n"); 155 else 156 return sprintf(buf, "always madvise [never]\n"); 157 } 158 159 static ssize_t enabled_store(struct kobject *kobj, 160 struct kobj_attribute *attr, 161 const char *buf, size_t count) 162 { 163 ssize_t ret = count; 164 165 if (!memcmp("always", buf, 166 min(sizeof("always")-1, count))) { 167 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 168 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 169 } else if (!memcmp("madvise", buf, 170 min(sizeof("madvise")-1, count))) { 171 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 172 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 173 } else if (!memcmp("never", buf, 174 min(sizeof("never")-1, count))) { 175 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 176 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 177 } else 178 ret = -EINVAL; 179 180 if (ret > 0) { 181 int err = start_stop_khugepaged(); 182 if (err) 183 ret = err; 184 } 185 return ret; 186 } 187 static struct kobj_attribute enabled_attr = 188 __ATTR(enabled, 0644, enabled_show, enabled_store); 189 190 ssize_t single_hugepage_flag_show(struct kobject *kobj, 191 struct kobj_attribute *attr, char *buf, 192 enum transparent_hugepage_flag flag) 193 { 194 return sprintf(buf, "%d\n", 195 !!test_bit(flag, &transparent_hugepage_flags)); 196 } 197 198 ssize_t single_hugepage_flag_store(struct kobject *kobj, 199 struct kobj_attribute *attr, 200 const char *buf, size_t count, 201 enum transparent_hugepage_flag flag) 202 { 203 unsigned long value; 204 int ret; 205 206 ret = kstrtoul(buf, 10, &value); 207 if (ret < 0) 208 return ret; 209 if (value > 1) 210 return -EINVAL; 211 212 if (value) 213 set_bit(flag, &transparent_hugepage_flags); 214 else 215 clear_bit(flag, &transparent_hugepage_flags); 216 217 return count; 218 } 219 220 static ssize_t defrag_show(struct kobject *kobj, 221 struct kobj_attribute *attr, char *buf) 222 { 223 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 224 return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 225 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 226 return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 227 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 228 return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 229 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 230 return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 231 return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 232 } 233 234 static ssize_t defrag_store(struct kobject *kobj, 235 struct kobj_attribute *attr, 236 const char *buf, size_t count) 237 { 238 if (!memcmp("always", buf, 239 min(sizeof("always")-1, count))) { 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 243 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 244 } else if (!memcmp("defer+madvise", buf, 245 min(sizeof("defer+madvise")-1, count))) { 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 249 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 250 } else if (!memcmp("defer", buf, 251 min(sizeof("defer")-1, count))) { 252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 255 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 256 } else if (!memcmp("madvise", buf, 257 min(sizeof("madvise")-1, count))) { 258 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 261 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 262 } else if (!memcmp("never", buf, 263 min(sizeof("never")-1, count))) { 264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 268 } else 269 return -EINVAL; 270 271 return count; 272 } 273 static struct kobj_attribute defrag_attr = 274 __ATTR(defrag, 0644, defrag_show, defrag_store); 275 276 static ssize_t use_zero_page_show(struct kobject *kobj, 277 struct kobj_attribute *attr, char *buf) 278 { 279 return single_hugepage_flag_show(kobj, attr, buf, 280 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 281 } 282 static ssize_t use_zero_page_store(struct kobject *kobj, 283 struct kobj_attribute *attr, const char *buf, size_t count) 284 { 285 return single_hugepage_flag_store(kobj, attr, buf, count, 286 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 287 } 288 static struct kobj_attribute use_zero_page_attr = 289 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 290 291 static ssize_t hpage_pmd_size_show(struct kobject *kobj, 292 struct kobj_attribute *attr, char *buf) 293 { 294 return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 295 } 296 static struct kobj_attribute hpage_pmd_size_attr = 297 __ATTR_RO(hpage_pmd_size); 298 299 #ifdef CONFIG_DEBUG_VM 300 static ssize_t debug_cow_show(struct kobject *kobj, 301 struct kobj_attribute *attr, char *buf) 302 { 303 return single_hugepage_flag_show(kobj, attr, buf, 304 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 305 } 306 static ssize_t debug_cow_store(struct kobject *kobj, 307 struct kobj_attribute *attr, 308 const char *buf, size_t count) 309 { 310 return single_hugepage_flag_store(kobj, attr, buf, count, 311 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 312 } 313 static struct kobj_attribute debug_cow_attr = 314 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 315 #endif /* CONFIG_DEBUG_VM */ 316 317 static struct attribute *hugepage_attr[] = { 318 &enabled_attr.attr, 319 &defrag_attr.attr, 320 &use_zero_page_attr.attr, 321 &hpage_pmd_size_attr.attr, 322 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 323 &shmem_enabled_attr.attr, 324 #endif 325 #ifdef CONFIG_DEBUG_VM 326 &debug_cow_attr.attr, 327 #endif 328 NULL, 329 }; 330 331 static struct attribute_group hugepage_attr_group = { 332 .attrs = hugepage_attr, 333 }; 334 335 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 336 { 337 int err; 338 339 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 340 if (unlikely(!*hugepage_kobj)) { 341 pr_err("failed to create transparent hugepage kobject\n"); 342 return -ENOMEM; 343 } 344 345 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 346 if (err) { 347 pr_err("failed to register transparent hugepage group\n"); 348 goto delete_obj; 349 } 350 351 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 352 if (err) { 353 pr_err("failed to register transparent hugepage group\n"); 354 goto remove_hp_group; 355 } 356 357 return 0; 358 359 remove_hp_group: 360 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 361 delete_obj: 362 kobject_put(*hugepage_kobj); 363 return err; 364 } 365 366 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 367 { 368 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 369 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 370 kobject_put(hugepage_kobj); 371 } 372 #else 373 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 374 { 375 return 0; 376 } 377 378 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 379 { 380 } 381 #endif /* CONFIG_SYSFS */ 382 383 static int __init hugepage_init(void) 384 { 385 int err; 386 struct kobject *hugepage_kobj; 387 388 if (!has_transparent_hugepage()) { 389 transparent_hugepage_flags = 0; 390 return -EINVAL; 391 } 392 393 /* 394 * hugepages can't be allocated by the buddy allocator 395 */ 396 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 397 /* 398 * we use page->mapping and page->index in second tail page 399 * as list_head: assuming THP order >= 2 400 */ 401 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 402 403 err = hugepage_init_sysfs(&hugepage_kobj); 404 if (err) 405 goto err_sysfs; 406 407 err = khugepaged_init(); 408 if (err) 409 goto err_slab; 410 411 err = register_shrinker(&huge_zero_page_shrinker); 412 if (err) 413 goto err_hzp_shrinker; 414 err = register_shrinker(&deferred_split_shrinker); 415 if (err) 416 goto err_split_shrinker; 417 418 /* 419 * By default disable transparent hugepages on smaller systems, 420 * where the extra memory used could hurt more than TLB overhead 421 * is likely to save. The admin can still enable it through /sys. 422 */ 423 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 424 transparent_hugepage_flags = 0; 425 return 0; 426 } 427 428 err = start_stop_khugepaged(); 429 if (err) 430 goto err_khugepaged; 431 432 return 0; 433 err_khugepaged: 434 unregister_shrinker(&deferred_split_shrinker); 435 err_split_shrinker: 436 unregister_shrinker(&huge_zero_page_shrinker); 437 err_hzp_shrinker: 438 khugepaged_destroy(); 439 err_slab: 440 hugepage_exit_sysfs(hugepage_kobj); 441 err_sysfs: 442 return err; 443 } 444 subsys_initcall(hugepage_init); 445 446 static int __init setup_transparent_hugepage(char *str) 447 { 448 int ret = 0; 449 if (!str) 450 goto out; 451 if (!strcmp(str, "always")) { 452 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 453 &transparent_hugepage_flags); 454 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 455 &transparent_hugepage_flags); 456 ret = 1; 457 } else if (!strcmp(str, "madvise")) { 458 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 459 &transparent_hugepage_flags); 460 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 461 &transparent_hugepage_flags); 462 ret = 1; 463 } else if (!strcmp(str, "never")) { 464 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 465 &transparent_hugepage_flags); 466 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 467 &transparent_hugepage_flags); 468 ret = 1; 469 } 470 out: 471 if (!ret) 472 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 473 return ret; 474 } 475 __setup("transparent_hugepage=", setup_transparent_hugepage); 476 477 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 478 { 479 if (likely(vma->vm_flags & VM_WRITE)) 480 pmd = pmd_mkwrite(pmd); 481 return pmd; 482 } 483 484 static inline struct list_head *page_deferred_list(struct page *page) 485 { 486 /* 487 * ->lru in the tail pages is occupied by compound_head. 488 * Let's use ->mapping + ->index in the second tail page as list_head. 489 */ 490 return (struct list_head *)&page[2].mapping; 491 } 492 493 void prep_transhuge_page(struct page *page) 494 { 495 /* 496 * we use page->mapping and page->indexlru in second tail page 497 * as list_head: assuming THP order >= 2 498 */ 499 500 INIT_LIST_HEAD(page_deferred_list(page)); 501 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 502 } 503 504 unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, 505 loff_t off, unsigned long flags, unsigned long size) 506 { 507 unsigned long addr; 508 loff_t off_end = off + len; 509 loff_t off_align = round_up(off, size); 510 unsigned long len_pad; 511 512 if (off_end <= off_align || (off_end - off_align) < size) 513 return 0; 514 515 len_pad = len + size; 516 if (len_pad < len || (off + len_pad) < off) 517 return 0; 518 519 addr = current->mm->get_unmapped_area(filp, 0, len_pad, 520 off >> PAGE_SHIFT, flags); 521 if (IS_ERR_VALUE(addr)) 522 return 0; 523 524 addr += (off - addr) & (size - 1); 525 return addr; 526 } 527 528 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 529 unsigned long len, unsigned long pgoff, unsigned long flags) 530 { 531 loff_t off = (loff_t)pgoff << PAGE_SHIFT; 532 533 if (addr) 534 goto out; 535 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 536 goto out; 537 538 addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); 539 if (addr) 540 return addr; 541 542 out: 543 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 544 } 545 EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 546 547 static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, 548 gfp_t gfp) 549 { 550 struct vm_area_struct *vma = vmf->vma; 551 struct mem_cgroup *memcg; 552 pgtable_t pgtable; 553 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 554 int ret = 0; 555 556 VM_BUG_ON_PAGE(!PageCompound(page), page); 557 558 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { 559 put_page(page); 560 count_vm_event(THP_FAULT_FALLBACK); 561 return VM_FAULT_FALLBACK; 562 } 563 564 pgtable = pte_alloc_one(vma->vm_mm, haddr); 565 if (unlikely(!pgtable)) { 566 ret = VM_FAULT_OOM; 567 goto release; 568 } 569 570 clear_huge_page(page, haddr, HPAGE_PMD_NR); 571 /* 572 * The memory barrier inside __SetPageUptodate makes sure that 573 * clear_huge_page writes become visible before the set_pmd_at() 574 * write. 575 */ 576 __SetPageUptodate(page); 577 578 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 579 if (unlikely(!pmd_none(*vmf->pmd))) { 580 goto unlock_release; 581 } else { 582 pmd_t entry; 583 584 ret = check_stable_address_space(vma->vm_mm); 585 if (ret) 586 goto unlock_release; 587 588 /* Deliver the page fault to userland */ 589 if (userfaultfd_missing(vma)) { 590 int ret; 591 592 spin_unlock(vmf->ptl); 593 mem_cgroup_cancel_charge(page, memcg, true); 594 put_page(page); 595 pte_free(vma->vm_mm, pgtable); 596 ret = handle_userfault(vmf, VM_UFFD_MISSING); 597 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 598 return ret; 599 } 600 601 entry = mk_huge_pmd(page, vma->vm_page_prot); 602 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 603 page_add_new_anon_rmap(page, vma, haddr, true); 604 mem_cgroup_commit_charge(page, memcg, false, true); 605 lru_cache_add_active_or_unevictable(page, vma); 606 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 607 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 608 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 609 atomic_long_inc(&vma->vm_mm->nr_ptes); 610 spin_unlock(vmf->ptl); 611 count_vm_event(THP_FAULT_ALLOC); 612 } 613 614 return 0; 615 unlock_release: 616 spin_unlock(vmf->ptl); 617 release: 618 if (pgtable) 619 pte_free(vma->vm_mm, pgtable); 620 mem_cgroup_cancel_charge(page, memcg, true); 621 put_page(page); 622 return ret; 623 624 } 625 626 /* 627 * always: directly stall for all thp allocations 628 * defer: wake kswapd and fail if not immediately available 629 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 630 * fail if not immediately available 631 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 632 * available 633 * never: never stall for any thp allocation 634 */ 635 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 636 { 637 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 638 639 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 640 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 641 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 642 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 643 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 644 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 645 __GFP_KSWAPD_RECLAIM); 646 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 647 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 648 0); 649 return GFP_TRANSHUGE_LIGHT; 650 } 651 652 /* Caller must hold page table lock. */ 653 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 654 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 655 struct page *zero_page) 656 { 657 pmd_t entry; 658 if (!pmd_none(*pmd)) 659 return false; 660 entry = mk_pmd(zero_page, vma->vm_page_prot); 661 entry = pmd_mkhuge(entry); 662 if (pgtable) 663 pgtable_trans_huge_deposit(mm, pmd, pgtable); 664 set_pmd_at(mm, haddr, pmd, entry); 665 atomic_long_inc(&mm->nr_ptes); 666 return true; 667 } 668 669 int do_huge_pmd_anonymous_page(struct vm_fault *vmf) 670 { 671 struct vm_area_struct *vma = vmf->vma; 672 gfp_t gfp; 673 struct page *page; 674 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 675 676 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 677 return VM_FAULT_FALLBACK; 678 if (unlikely(anon_vma_prepare(vma))) 679 return VM_FAULT_OOM; 680 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 681 return VM_FAULT_OOM; 682 if (!(vmf->flags & FAULT_FLAG_WRITE) && 683 !mm_forbids_zeropage(vma->vm_mm) && 684 transparent_hugepage_use_zero_page()) { 685 pgtable_t pgtable; 686 struct page *zero_page; 687 bool set; 688 int ret; 689 pgtable = pte_alloc_one(vma->vm_mm, haddr); 690 if (unlikely(!pgtable)) 691 return VM_FAULT_OOM; 692 zero_page = mm_get_huge_zero_page(vma->vm_mm); 693 if (unlikely(!zero_page)) { 694 pte_free(vma->vm_mm, pgtable); 695 count_vm_event(THP_FAULT_FALLBACK); 696 return VM_FAULT_FALLBACK; 697 } 698 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 699 ret = 0; 700 set = false; 701 if (pmd_none(*vmf->pmd)) { 702 ret = check_stable_address_space(vma->vm_mm); 703 if (ret) { 704 spin_unlock(vmf->ptl); 705 } else if (userfaultfd_missing(vma)) { 706 spin_unlock(vmf->ptl); 707 ret = handle_userfault(vmf, VM_UFFD_MISSING); 708 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 709 } else { 710 set_huge_zero_page(pgtable, vma->vm_mm, vma, 711 haddr, vmf->pmd, zero_page); 712 spin_unlock(vmf->ptl); 713 set = true; 714 } 715 } else 716 spin_unlock(vmf->ptl); 717 if (!set) 718 pte_free(vma->vm_mm, pgtable); 719 return ret; 720 } 721 gfp = alloc_hugepage_direct_gfpmask(vma); 722 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 723 if (unlikely(!page)) { 724 count_vm_event(THP_FAULT_FALLBACK); 725 return VM_FAULT_FALLBACK; 726 } 727 prep_transhuge_page(page); 728 return __do_huge_pmd_anonymous_page(vmf, page, gfp); 729 } 730 731 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 732 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 733 pgtable_t pgtable) 734 { 735 struct mm_struct *mm = vma->vm_mm; 736 pmd_t entry; 737 spinlock_t *ptl; 738 739 ptl = pmd_lock(mm, pmd); 740 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 741 if (pfn_t_devmap(pfn)) 742 entry = pmd_mkdevmap(entry); 743 if (write) { 744 entry = pmd_mkyoung(pmd_mkdirty(entry)); 745 entry = maybe_pmd_mkwrite(entry, vma); 746 } 747 748 if (pgtable) { 749 pgtable_trans_huge_deposit(mm, pmd, pgtable); 750 atomic_long_inc(&mm->nr_ptes); 751 } 752 753 set_pmd_at(mm, addr, pmd, entry); 754 update_mmu_cache_pmd(vma, addr, pmd); 755 spin_unlock(ptl); 756 } 757 758 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 759 pmd_t *pmd, pfn_t pfn, bool write) 760 { 761 pgprot_t pgprot = vma->vm_page_prot; 762 pgtable_t pgtable = NULL; 763 /* 764 * If we had pmd_special, we could avoid all these restrictions, 765 * but we need to be consistent with PTEs and architectures that 766 * can't support a 'special' bit. 767 */ 768 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 769 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 770 (VM_PFNMAP|VM_MIXEDMAP)); 771 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 772 BUG_ON(!pfn_t_devmap(pfn)); 773 774 if (addr < vma->vm_start || addr >= vma->vm_end) 775 return VM_FAULT_SIGBUS; 776 777 if (arch_needs_pgtable_deposit()) { 778 pgtable = pte_alloc_one(vma->vm_mm, addr); 779 if (!pgtable) 780 return VM_FAULT_OOM; 781 } 782 783 track_pfn_insert(vma, &pgprot, pfn); 784 785 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); 786 return VM_FAULT_NOPAGE; 787 } 788 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 789 790 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 791 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 792 { 793 if (likely(vma->vm_flags & VM_WRITE)) 794 pud = pud_mkwrite(pud); 795 return pud; 796 } 797 798 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 799 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 800 { 801 struct mm_struct *mm = vma->vm_mm; 802 pud_t entry; 803 spinlock_t *ptl; 804 805 ptl = pud_lock(mm, pud); 806 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 807 if (pfn_t_devmap(pfn)) 808 entry = pud_mkdevmap(entry); 809 if (write) { 810 entry = pud_mkyoung(pud_mkdirty(entry)); 811 entry = maybe_pud_mkwrite(entry, vma); 812 } 813 set_pud_at(mm, addr, pud, entry); 814 update_mmu_cache_pud(vma, addr, pud); 815 spin_unlock(ptl); 816 } 817 818 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 819 pud_t *pud, pfn_t pfn, bool write) 820 { 821 pgprot_t pgprot = vma->vm_page_prot; 822 /* 823 * If we had pud_special, we could avoid all these restrictions, 824 * but we need to be consistent with PTEs and architectures that 825 * can't support a 'special' bit. 826 */ 827 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 828 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 829 (VM_PFNMAP|VM_MIXEDMAP)); 830 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 831 BUG_ON(!pfn_t_devmap(pfn)); 832 833 if (addr < vma->vm_start || addr >= vma->vm_end) 834 return VM_FAULT_SIGBUS; 835 836 track_pfn_insert(vma, &pgprot, pfn); 837 838 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); 839 return VM_FAULT_NOPAGE; 840 } 841 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 842 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 843 844 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 845 pmd_t *pmd) 846 { 847 pmd_t _pmd; 848 849 /* 850 * We should set the dirty bit only for FOLL_WRITE but for now 851 * the dirty bit in the pmd is meaningless. And if the dirty 852 * bit will become meaningful and we'll only set it with 853 * FOLL_WRITE, an atomic set_bit will be required on the pmd to 854 * set the young bit, instead of the current set_pmd_at. 855 */ 856 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 857 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 858 pmd, _pmd, 1)) 859 update_mmu_cache_pmd(vma, addr, pmd); 860 } 861 862 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 863 pmd_t *pmd, int flags) 864 { 865 unsigned long pfn = pmd_pfn(*pmd); 866 struct mm_struct *mm = vma->vm_mm; 867 struct dev_pagemap *pgmap; 868 struct page *page; 869 870 assert_spin_locked(pmd_lockptr(mm, pmd)); 871 872 /* 873 * When we COW a devmap PMD entry, we split it into PTEs, so we should 874 * not be in this function with `flags & FOLL_COW` set. 875 */ 876 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 877 878 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 879 return NULL; 880 881 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 882 /* pass */; 883 else 884 return NULL; 885 886 if (flags & FOLL_TOUCH) 887 touch_pmd(vma, addr, pmd); 888 889 /* 890 * device mapped pages can only be returned if the 891 * caller will manage the page reference count. 892 */ 893 if (!(flags & FOLL_GET)) 894 return ERR_PTR(-EEXIST); 895 896 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 897 pgmap = get_dev_pagemap(pfn, NULL); 898 if (!pgmap) 899 return ERR_PTR(-EFAULT); 900 page = pfn_to_page(pfn); 901 get_page(page); 902 put_dev_pagemap(pgmap); 903 904 return page; 905 } 906 907 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 908 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 909 struct vm_area_struct *vma) 910 { 911 spinlock_t *dst_ptl, *src_ptl; 912 struct page *src_page; 913 pmd_t pmd; 914 pgtable_t pgtable = NULL; 915 int ret = -ENOMEM; 916 917 /* Skip if can be re-fill on fault */ 918 if (!vma_is_anonymous(vma)) 919 return 0; 920 921 pgtable = pte_alloc_one(dst_mm, addr); 922 if (unlikely(!pgtable)) 923 goto out; 924 925 dst_ptl = pmd_lock(dst_mm, dst_pmd); 926 src_ptl = pmd_lockptr(src_mm, src_pmd); 927 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 928 929 ret = -EAGAIN; 930 pmd = *src_pmd; 931 if (unlikely(!pmd_trans_huge(pmd))) { 932 pte_free(dst_mm, pgtable); 933 goto out_unlock; 934 } 935 /* 936 * When page table lock is held, the huge zero pmd should not be 937 * under splitting since we don't split the page itself, only pmd to 938 * a page table. 939 */ 940 if (is_huge_zero_pmd(pmd)) { 941 struct page *zero_page; 942 /* 943 * get_huge_zero_page() will never allocate a new page here, 944 * since we already have a zero page to copy. It just takes a 945 * reference. 946 */ 947 zero_page = mm_get_huge_zero_page(dst_mm); 948 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 949 zero_page); 950 ret = 0; 951 goto out_unlock; 952 } 953 954 src_page = pmd_page(pmd); 955 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 956 get_page(src_page); 957 page_dup_rmap(src_page, true); 958 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 959 atomic_long_inc(&dst_mm->nr_ptes); 960 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 961 962 pmdp_set_wrprotect(src_mm, addr, src_pmd); 963 pmd = pmd_mkold(pmd_wrprotect(pmd)); 964 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 965 966 ret = 0; 967 out_unlock: 968 spin_unlock(src_ptl); 969 spin_unlock(dst_ptl); 970 out: 971 return ret; 972 } 973 974 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 975 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 976 pud_t *pud) 977 { 978 pud_t _pud; 979 980 /* 981 * We should set the dirty bit only for FOLL_WRITE but for now 982 * the dirty bit in the pud is meaningless. And if the dirty 983 * bit will become meaningful and we'll only set it with 984 * FOLL_WRITE, an atomic set_bit will be required on the pud to 985 * set the young bit, instead of the current set_pud_at. 986 */ 987 _pud = pud_mkyoung(pud_mkdirty(*pud)); 988 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 989 pud, _pud, 1)) 990 update_mmu_cache_pud(vma, addr, pud); 991 } 992 993 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 994 pud_t *pud, int flags) 995 { 996 unsigned long pfn = pud_pfn(*pud); 997 struct mm_struct *mm = vma->vm_mm; 998 struct dev_pagemap *pgmap; 999 struct page *page; 1000 1001 assert_spin_locked(pud_lockptr(mm, pud)); 1002 1003 if (flags & FOLL_WRITE && !pud_write(*pud)) 1004 return NULL; 1005 1006 if (pud_present(*pud) && pud_devmap(*pud)) 1007 /* pass */; 1008 else 1009 return NULL; 1010 1011 if (flags & FOLL_TOUCH) 1012 touch_pud(vma, addr, pud); 1013 1014 /* 1015 * device mapped pages can only be returned if the 1016 * caller will manage the page reference count. 1017 */ 1018 if (!(flags & FOLL_GET)) 1019 return ERR_PTR(-EEXIST); 1020 1021 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1022 pgmap = get_dev_pagemap(pfn, NULL); 1023 if (!pgmap) 1024 return ERR_PTR(-EFAULT); 1025 page = pfn_to_page(pfn); 1026 get_page(page); 1027 put_dev_pagemap(pgmap); 1028 1029 return page; 1030 } 1031 1032 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1033 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1034 struct vm_area_struct *vma) 1035 { 1036 spinlock_t *dst_ptl, *src_ptl; 1037 pud_t pud; 1038 int ret; 1039 1040 dst_ptl = pud_lock(dst_mm, dst_pud); 1041 src_ptl = pud_lockptr(src_mm, src_pud); 1042 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1043 1044 ret = -EAGAIN; 1045 pud = *src_pud; 1046 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1047 goto out_unlock; 1048 1049 /* 1050 * When page table lock is held, the huge zero pud should not be 1051 * under splitting since we don't split the page itself, only pud to 1052 * a page table. 1053 */ 1054 if (is_huge_zero_pud(pud)) { 1055 /* No huge zero pud yet */ 1056 } 1057 1058 pudp_set_wrprotect(src_mm, addr, src_pud); 1059 pud = pud_mkold(pud_wrprotect(pud)); 1060 set_pud_at(dst_mm, addr, dst_pud, pud); 1061 1062 ret = 0; 1063 out_unlock: 1064 spin_unlock(src_ptl); 1065 spin_unlock(dst_ptl); 1066 return ret; 1067 } 1068 1069 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1070 { 1071 pud_t entry; 1072 unsigned long haddr; 1073 bool write = vmf->flags & FAULT_FLAG_WRITE; 1074 1075 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1076 if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1077 goto unlock; 1078 1079 entry = pud_mkyoung(orig_pud); 1080 if (write) 1081 entry = pud_mkdirty(entry); 1082 haddr = vmf->address & HPAGE_PUD_MASK; 1083 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1084 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1085 1086 unlock: 1087 spin_unlock(vmf->ptl); 1088 } 1089 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1090 1091 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 1092 { 1093 pmd_t entry; 1094 unsigned long haddr; 1095 bool write = vmf->flags & FAULT_FLAG_WRITE; 1096 1097 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1098 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1099 goto unlock; 1100 1101 entry = pmd_mkyoung(orig_pmd); 1102 if (write) 1103 entry = pmd_mkdirty(entry); 1104 haddr = vmf->address & HPAGE_PMD_MASK; 1105 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 1106 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1107 1108 unlock: 1109 spin_unlock(vmf->ptl); 1110 } 1111 1112 static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, 1113 struct page *page) 1114 { 1115 struct vm_area_struct *vma = vmf->vma; 1116 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1117 struct mem_cgroup *memcg; 1118 pgtable_t pgtable; 1119 pmd_t _pmd; 1120 int ret = 0, i; 1121 struct page **pages; 1122 unsigned long mmun_start; /* For mmu_notifiers */ 1123 unsigned long mmun_end; /* For mmu_notifiers */ 1124 1125 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 1126 GFP_KERNEL); 1127 if (unlikely(!pages)) { 1128 ret |= VM_FAULT_OOM; 1129 goto out; 1130 } 1131 1132 for (i = 0; i < HPAGE_PMD_NR; i++) { 1133 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, 1134 vmf->address, page_to_nid(page)); 1135 if (unlikely(!pages[i] || 1136 mem_cgroup_try_charge(pages[i], vma->vm_mm, 1137 GFP_KERNEL, &memcg, false))) { 1138 if (pages[i]) 1139 put_page(pages[i]); 1140 while (--i >= 0) { 1141 memcg = (void *)page_private(pages[i]); 1142 set_page_private(pages[i], 0); 1143 mem_cgroup_cancel_charge(pages[i], memcg, 1144 false); 1145 put_page(pages[i]); 1146 } 1147 kfree(pages); 1148 ret |= VM_FAULT_OOM; 1149 goto out; 1150 } 1151 set_page_private(pages[i], (unsigned long)memcg); 1152 } 1153 1154 for (i = 0; i < HPAGE_PMD_NR; i++) { 1155 copy_user_highpage(pages[i], page + i, 1156 haddr + PAGE_SIZE * i, vma); 1157 __SetPageUptodate(pages[i]); 1158 cond_resched(); 1159 } 1160 1161 mmun_start = haddr; 1162 mmun_end = haddr + HPAGE_PMD_SIZE; 1163 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 1164 1165 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1166 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1167 goto out_free_pages; 1168 VM_BUG_ON_PAGE(!PageHead(page), page); 1169 1170 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1171 /* leave pmd empty until pte is filled */ 1172 1173 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); 1174 pmd_populate(vma->vm_mm, &_pmd, pgtable); 1175 1176 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1177 pte_t entry; 1178 entry = mk_pte(pages[i], vma->vm_page_prot); 1179 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1180 memcg = (void *)page_private(pages[i]); 1181 set_page_private(pages[i], 0); 1182 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); 1183 mem_cgroup_commit_charge(pages[i], memcg, false, false); 1184 lru_cache_add_active_or_unevictable(pages[i], vma); 1185 vmf->pte = pte_offset_map(&_pmd, haddr); 1186 VM_BUG_ON(!pte_none(*vmf->pte)); 1187 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); 1188 pte_unmap(vmf->pte); 1189 } 1190 kfree(pages); 1191 1192 smp_wmb(); /* make pte visible before pmd */ 1193 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); 1194 page_remove_rmap(page, true); 1195 spin_unlock(vmf->ptl); 1196 1197 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1198 1199 ret |= VM_FAULT_WRITE; 1200 put_page(page); 1201 1202 out: 1203 return ret; 1204 1205 out_free_pages: 1206 spin_unlock(vmf->ptl); 1207 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1208 for (i = 0; i < HPAGE_PMD_NR; i++) { 1209 memcg = (void *)page_private(pages[i]); 1210 set_page_private(pages[i], 0); 1211 mem_cgroup_cancel_charge(pages[i], memcg, false); 1212 put_page(pages[i]); 1213 } 1214 kfree(pages); 1215 goto out; 1216 } 1217 1218 int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 1219 { 1220 struct vm_area_struct *vma = vmf->vma; 1221 struct page *page = NULL, *new_page; 1222 struct mem_cgroup *memcg; 1223 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1224 unsigned long mmun_start; /* For mmu_notifiers */ 1225 unsigned long mmun_end; /* For mmu_notifiers */ 1226 gfp_t huge_gfp; /* for allocation and charge */ 1227 int ret = 0; 1228 1229 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 1230 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1231 if (is_huge_zero_pmd(orig_pmd)) 1232 goto alloc; 1233 spin_lock(vmf->ptl); 1234 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1235 goto out_unlock; 1236 1237 page = pmd_page(orig_pmd); 1238 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1239 /* 1240 * We can only reuse the page if nobody else maps the huge page or it's 1241 * part. 1242 */ 1243 if (page_trans_huge_mapcount(page, NULL) == 1) { 1244 pmd_t entry; 1245 entry = pmd_mkyoung(orig_pmd); 1246 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1247 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 1248 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1249 ret |= VM_FAULT_WRITE; 1250 goto out_unlock; 1251 } 1252 get_page(page); 1253 spin_unlock(vmf->ptl); 1254 alloc: 1255 if (transparent_hugepage_enabled(vma) && 1256 !transparent_hugepage_debug_cow()) { 1257 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1258 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1259 } else 1260 new_page = NULL; 1261 1262 if (likely(new_page)) { 1263 prep_transhuge_page(new_page); 1264 } else { 1265 if (!page) { 1266 split_huge_pmd(vma, vmf->pmd, vmf->address); 1267 ret |= VM_FAULT_FALLBACK; 1268 } else { 1269 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); 1270 if (ret & VM_FAULT_OOM) { 1271 split_huge_pmd(vma, vmf->pmd, vmf->address); 1272 ret |= VM_FAULT_FALLBACK; 1273 } 1274 put_page(page); 1275 } 1276 count_vm_event(THP_FAULT_FALLBACK); 1277 goto out; 1278 } 1279 1280 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 1281 huge_gfp, &memcg, true))) { 1282 put_page(new_page); 1283 split_huge_pmd(vma, vmf->pmd, vmf->address); 1284 if (page) 1285 put_page(page); 1286 ret |= VM_FAULT_FALLBACK; 1287 count_vm_event(THP_FAULT_FALLBACK); 1288 goto out; 1289 } 1290 1291 count_vm_event(THP_FAULT_ALLOC); 1292 1293 if (!page) 1294 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1295 else 1296 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1297 __SetPageUptodate(new_page); 1298 1299 mmun_start = haddr; 1300 mmun_end = haddr + HPAGE_PMD_SIZE; 1301 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 1302 1303 spin_lock(vmf->ptl); 1304 if (page) 1305 put_page(page); 1306 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1307 spin_unlock(vmf->ptl); 1308 mem_cgroup_cancel_charge(new_page, memcg, true); 1309 put_page(new_page); 1310 goto out_mn; 1311 } else { 1312 pmd_t entry; 1313 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1314 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1315 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1316 page_add_new_anon_rmap(new_page, vma, haddr, true); 1317 mem_cgroup_commit_charge(new_page, memcg, false, true); 1318 lru_cache_add_active_or_unevictable(new_page, vma); 1319 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 1320 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1321 if (!page) { 1322 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1323 } else { 1324 VM_BUG_ON_PAGE(!PageHead(page), page); 1325 page_remove_rmap(page, true); 1326 put_page(page); 1327 } 1328 ret |= VM_FAULT_WRITE; 1329 } 1330 spin_unlock(vmf->ptl); 1331 out_mn: 1332 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1333 out: 1334 return ret; 1335 out_unlock: 1336 spin_unlock(vmf->ptl); 1337 return ret; 1338 } 1339 1340 /* 1341 * FOLL_FORCE can write to even unwritable pmd's, but only 1342 * after we've gone through a COW cycle and they are dirty. 1343 */ 1344 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1345 { 1346 return pmd_write(pmd) || 1347 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1348 } 1349 1350 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1351 unsigned long addr, 1352 pmd_t *pmd, 1353 unsigned int flags) 1354 { 1355 struct mm_struct *mm = vma->vm_mm; 1356 struct page *page = NULL; 1357 1358 assert_spin_locked(pmd_lockptr(mm, pmd)); 1359 1360 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 1361 goto out; 1362 1363 /* Avoid dumping huge zero page */ 1364 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1365 return ERR_PTR(-EFAULT); 1366 1367 /* Full NUMA hinting faults to serialise migration in fault paths */ 1368 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 1369 goto out; 1370 1371 page = pmd_page(*pmd); 1372 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 1373 if (flags & FOLL_TOUCH) 1374 touch_pmd(vma, addr, pmd); 1375 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1376 /* 1377 * We don't mlock() pte-mapped THPs. This way we can avoid 1378 * leaking mlocked pages into non-VM_LOCKED VMAs. 1379 * 1380 * For anon THP: 1381 * 1382 * In most cases the pmd is the only mapping of the page as we 1383 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1384 * writable private mappings in populate_vma_page_range(). 1385 * 1386 * The only scenario when we have the page shared here is if we 1387 * mlocking read-only mapping shared over fork(). We skip 1388 * mlocking such pages. 1389 * 1390 * For file THP: 1391 * 1392 * We can expect PageDoubleMap() to be stable under page lock: 1393 * for file pages we set it in page_add_file_rmap(), which 1394 * requires page to be locked. 1395 */ 1396 1397 if (PageAnon(page) && compound_mapcount(page) != 1) 1398 goto skip_mlock; 1399 if (PageDoubleMap(page) || !page->mapping) 1400 goto skip_mlock; 1401 if (!trylock_page(page)) 1402 goto skip_mlock; 1403 lru_add_drain(); 1404 if (page->mapping && !PageDoubleMap(page)) 1405 mlock_vma_page(page); 1406 unlock_page(page); 1407 } 1408 skip_mlock: 1409 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1410 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 1411 if (flags & FOLL_GET) 1412 get_page(page); 1413 1414 out: 1415 return page; 1416 } 1417 1418 /* NUMA hinting page fault entry point for trans huge pmds */ 1419 int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1420 { 1421 struct vm_area_struct *vma = vmf->vma; 1422 struct anon_vma *anon_vma = NULL; 1423 struct page *page; 1424 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1425 int page_nid = -1, this_nid = numa_node_id(); 1426 int target_nid, last_cpupid = -1; 1427 bool page_locked; 1428 bool migrated = false; 1429 bool was_writable; 1430 int flags = 0; 1431 1432 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1433 if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1434 goto out_unlock; 1435 1436 /* 1437 * If there are potential migrations, wait for completion and retry 1438 * without disrupting NUMA hinting information. Do not relock and 1439 * check_same as the page may no longer be mapped. 1440 */ 1441 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1442 page = pmd_page(*vmf->pmd); 1443 if (!get_page_unless_zero(page)) 1444 goto out_unlock; 1445 spin_unlock(vmf->ptl); 1446 wait_on_page_locked(page); 1447 put_page(page); 1448 goto out; 1449 } 1450 1451 page = pmd_page(pmd); 1452 BUG_ON(is_huge_zero_page(page)); 1453 page_nid = page_to_nid(page); 1454 last_cpupid = page_cpupid_last(page); 1455 count_vm_numa_event(NUMA_HINT_FAULTS); 1456 if (page_nid == this_nid) { 1457 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1458 flags |= TNF_FAULT_LOCAL; 1459 } 1460 1461 /* See similar comment in do_numa_page for explanation */ 1462 if (!pmd_savedwrite(pmd)) 1463 flags |= TNF_NO_GROUP; 1464 1465 /* 1466 * Acquire the page lock to serialise THP migrations but avoid dropping 1467 * page_table_lock if at all possible 1468 */ 1469 page_locked = trylock_page(page); 1470 target_nid = mpol_misplaced(page, vma, haddr); 1471 if (target_nid == -1) { 1472 /* If the page was locked, there are no parallel migrations */ 1473 if (page_locked) 1474 goto clear_pmdnuma; 1475 } 1476 1477 /* Migration could have started since the pmd_trans_migrating check */ 1478 if (!page_locked) { 1479 page_nid = -1; 1480 if (!get_page_unless_zero(page)) 1481 goto out_unlock; 1482 spin_unlock(vmf->ptl); 1483 wait_on_page_locked(page); 1484 put_page(page); 1485 goto out; 1486 } 1487 1488 /* 1489 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 1490 * to serialises splits 1491 */ 1492 get_page(page); 1493 spin_unlock(vmf->ptl); 1494 anon_vma = page_lock_anon_vma_read(page); 1495 1496 /* Confirm the PMD did not change while page_table_lock was released */ 1497 spin_lock(vmf->ptl); 1498 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1499 unlock_page(page); 1500 put_page(page); 1501 page_nid = -1; 1502 goto out_unlock; 1503 } 1504 1505 /* Bail if we fail to protect against THP splits for any reason */ 1506 if (unlikely(!anon_vma)) { 1507 put_page(page); 1508 page_nid = -1; 1509 goto clear_pmdnuma; 1510 } 1511 1512 /* 1513 * Since we took the NUMA fault, we must have observed the !accessible 1514 * bit. Make sure all other CPUs agree with that, to avoid them 1515 * modifying the page we're about to migrate. 1516 * 1517 * Must be done under PTL such that we'll observe the relevant 1518 * inc_tlb_flush_pending(). 1519 * 1520 * We are not sure a pending tlb flush here is for a huge page 1521 * mapping or not. Hence use the tlb range variant 1522 */ 1523 if (mm_tlb_flush_pending(vma->vm_mm)) 1524 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1525 1526 /* 1527 * Migrate the THP to the requested node, returns with page unlocked 1528 * and access rights restored. 1529 */ 1530 spin_unlock(vmf->ptl); 1531 1532 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 1533 vmf->pmd, pmd, vmf->address, page, target_nid); 1534 if (migrated) { 1535 flags |= TNF_MIGRATED; 1536 page_nid = target_nid; 1537 } else 1538 flags |= TNF_MIGRATE_FAIL; 1539 1540 goto out; 1541 clear_pmdnuma: 1542 BUG_ON(!PageLocked(page)); 1543 was_writable = pmd_savedwrite(pmd); 1544 pmd = pmd_modify(pmd, vma->vm_page_prot); 1545 pmd = pmd_mkyoung(pmd); 1546 if (was_writable) 1547 pmd = pmd_mkwrite(pmd); 1548 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1549 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1550 unlock_page(page); 1551 out_unlock: 1552 spin_unlock(vmf->ptl); 1553 1554 out: 1555 if (anon_vma) 1556 page_unlock_anon_vma_read(anon_vma); 1557 1558 if (page_nid != -1) 1559 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 1560 flags); 1561 1562 return 0; 1563 } 1564 1565 /* 1566 * Return true if we do MADV_FREE successfully on entire pmd page. 1567 * Otherwise, return false. 1568 */ 1569 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1570 pmd_t *pmd, unsigned long addr, unsigned long next) 1571 { 1572 spinlock_t *ptl; 1573 pmd_t orig_pmd; 1574 struct page *page; 1575 struct mm_struct *mm = tlb->mm; 1576 bool ret = false; 1577 1578 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1579 1580 ptl = pmd_trans_huge_lock(pmd, vma); 1581 if (!ptl) 1582 goto out_unlocked; 1583 1584 orig_pmd = *pmd; 1585 if (is_huge_zero_pmd(orig_pmd)) 1586 goto out; 1587 1588 page = pmd_page(orig_pmd); 1589 /* 1590 * If other processes are mapping this page, we couldn't discard 1591 * the page unless they all do MADV_FREE so let's skip the page. 1592 */ 1593 if (page_mapcount(page) != 1) 1594 goto out; 1595 1596 if (!trylock_page(page)) 1597 goto out; 1598 1599 /* 1600 * If user want to discard part-pages of THP, split it so MADV_FREE 1601 * will deactivate only them. 1602 */ 1603 if (next - addr != HPAGE_PMD_SIZE) { 1604 get_page(page); 1605 spin_unlock(ptl); 1606 split_huge_page(page); 1607 unlock_page(page); 1608 put_page(page); 1609 goto out_unlocked; 1610 } 1611 1612 if (PageDirty(page)) 1613 ClearPageDirty(page); 1614 unlock_page(page); 1615 1616 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1617 pmdp_invalidate(vma, addr, pmd); 1618 orig_pmd = pmd_mkold(orig_pmd); 1619 orig_pmd = pmd_mkclean(orig_pmd); 1620 1621 set_pmd_at(mm, addr, pmd, orig_pmd); 1622 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1623 } 1624 1625 mark_page_lazyfree(page); 1626 ret = true; 1627 out: 1628 spin_unlock(ptl); 1629 out_unlocked: 1630 return ret; 1631 } 1632 1633 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1634 { 1635 pgtable_t pgtable; 1636 1637 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1638 pte_free(mm, pgtable); 1639 atomic_long_dec(&mm->nr_ptes); 1640 } 1641 1642 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1643 pmd_t *pmd, unsigned long addr) 1644 { 1645 pmd_t orig_pmd; 1646 spinlock_t *ptl; 1647 1648 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1649 1650 ptl = __pmd_trans_huge_lock(pmd, vma); 1651 if (!ptl) 1652 return 0; 1653 /* 1654 * For architectures like ppc64 we look at deposited pgtable 1655 * when calling pmdp_huge_get_and_clear. So do the 1656 * pgtable_trans_huge_withdraw after finishing pmdp related 1657 * operations. 1658 */ 1659 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1660 tlb->fullmm); 1661 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1662 if (vma_is_dax(vma)) { 1663 if (arch_needs_pgtable_deposit()) 1664 zap_deposited_table(tlb->mm, pmd); 1665 spin_unlock(ptl); 1666 if (is_huge_zero_pmd(orig_pmd)) 1667 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1668 } else if (is_huge_zero_pmd(orig_pmd)) { 1669 zap_deposited_table(tlb->mm, pmd); 1670 spin_unlock(ptl); 1671 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1672 } else { 1673 struct page *page = pmd_page(orig_pmd); 1674 page_remove_rmap(page, true); 1675 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1676 VM_BUG_ON_PAGE(!PageHead(page), page); 1677 if (PageAnon(page)) { 1678 zap_deposited_table(tlb->mm, pmd); 1679 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1680 } else { 1681 if (arch_needs_pgtable_deposit()) 1682 zap_deposited_table(tlb->mm, pmd); 1683 add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1684 } 1685 spin_unlock(ptl); 1686 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1687 } 1688 return 1; 1689 } 1690 1691 #ifndef pmd_move_must_withdraw 1692 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 1693 spinlock_t *old_pmd_ptl, 1694 struct vm_area_struct *vma) 1695 { 1696 /* 1697 * With split pmd lock we also need to move preallocated 1698 * PTE page table if new_pmd is on different PMD page table. 1699 * 1700 * We also don't deposit and withdraw tables for file pages. 1701 */ 1702 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 1703 } 1704 #endif 1705 1706 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1707 unsigned long new_addr, unsigned long old_end, 1708 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) 1709 { 1710 spinlock_t *old_ptl, *new_ptl; 1711 pmd_t pmd; 1712 struct mm_struct *mm = vma->vm_mm; 1713 bool force_flush = false; 1714 1715 if ((old_addr & ~HPAGE_PMD_MASK) || 1716 (new_addr & ~HPAGE_PMD_MASK) || 1717 old_end - old_addr < HPAGE_PMD_SIZE) 1718 return false; 1719 1720 /* 1721 * The destination pmd shouldn't be established, free_pgtables() 1722 * should have release it. 1723 */ 1724 if (WARN_ON(!pmd_none(*new_pmd))) { 1725 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1726 return false; 1727 } 1728 1729 /* 1730 * We don't have to worry about the ordering of src and dst 1731 * ptlocks because exclusive mmap_sem prevents deadlock. 1732 */ 1733 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1734 if (old_ptl) { 1735 new_ptl = pmd_lockptr(mm, new_pmd); 1736 if (new_ptl != old_ptl) 1737 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1738 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1739 if (pmd_present(pmd) && pmd_dirty(pmd)) 1740 force_flush = true; 1741 VM_BUG_ON(!pmd_none(*new_pmd)); 1742 1743 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1744 pgtable_t pgtable; 1745 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1746 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1747 } 1748 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1749 if (new_ptl != old_ptl) 1750 spin_unlock(new_ptl); 1751 if (force_flush) 1752 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1753 else 1754 *need_flush = true; 1755 spin_unlock(old_ptl); 1756 return true; 1757 } 1758 return false; 1759 } 1760 1761 /* 1762 * Returns 1763 * - 0 if PMD could not be locked 1764 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1765 * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1766 */ 1767 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1768 unsigned long addr, pgprot_t newprot, int prot_numa) 1769 { 1770 struct mm_struct *mm = vma->vm_mm; 1771 spinlock_t *ptl; 1772 pmd_t entry; 1773 bool preserve_write; 1774 int ret; 1775 1776 ptl = __pmd_trans_huge_lock(pmd, vma); 1777 if (!ptl) 1778 return 0; 1779 1780 preserve_write = prot_numa && pmd_write(*pmd); 1781 ret = 1; 1782 1783 /* 1784 * Avoid trapping faults against the zero page. The read-only 1785 * data is likely to be read-cached on the local CPU and 1786 * local/remote hits to the zero page are not interesting. 1787 */ 1788 if (prot_numa && is_huge_zero_pmd(*pmd)) 1789 goto unlock; 1790 1791 if (prot_numa && pmd_protnone(*pmd)) 1792 goto unlock; 1793 1794 /* 1795 * In case prot_numa, we are under down_read(mmap_sem). It's critical 1796 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 1797 * which is also under down_read(mmap_sem): 1798 * 1799 * CPU0: CPU1: 1800 * change_huge_pmd(prot_numa=1) 1801 * pmdp_huge_get_and_clear_notify() 1802 * madvise_dontneed() 1803 * zap_pmd_range() 1804 * pmd_trans_huge(*pmd) == 0 (without ptl) 1805 * // skip the pmd 1806 * set_pmd_at(); 1807 * // pmd is re-established 1808 * 1809 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1810 * which may break userspace. 1811 * 1812 * pmdp_invalidate() is required to make sure we don't miss 1813 * dirty/young flags set by hardware. 1814 */ 1815 entry = *pmd; 1816 pmdp_invalidate(vma, addr, pmd); 1817 1818 /* 1819 * Recover dirty/young flags. It relies on pmdp_invalidate to not 1820 * corrupt them. 1821 */ 1822 if (pmd_dirty(*pmd)) 1823 entry = pmd_mkdirty(entry); 1824 if (pmd_young(*pmd)) 1825 entry = pmd_mkyoung(entry); 1826 1827 entry = pmd_modify(entry, newprot); 1828 if (preserve_write) 1829 entry = pmd_mk_savedwrite(entry); 1830 ret = HPAGE_PMD_NR; 1831 set_pmd_at(mm, addr, pmd, entry); 1832 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 1833 unlock: 1834 spin_unlock(ptl); 1835 return ret; 1836 } 1837 1838 /* 1839 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1840 * 1841 * Note that if it returns page table lock pointer, this routine returns without 1842 * unlocking page table lock. So callers must unlock it. 1843 */ 1844 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1845 { 1846 spinlock_t *ptl; 1847 ptl = pmd_lock(vma->vm_mm, pmd); 1848 if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 1849 return ptl; 1850 spin_unlock(ptl); 1851 return NULL; 1852 } 1853 1854 /* 1855 * Returns true if a given pud maps a thp, false otherwise. 1856 * 1857 * Note that if it returns true, this routine returns without unlocking page 1858 * table lock. So callers must unlock it. 1859 */ 1860 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1861 { 1862 spinlock_t *ptl; 1863 1864 ptl = pud_lock(vma->vm_mm, pud); 1865 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1866 return ptl; 1867 spin_unlock(ptl); 1868 return NULL; 1869 } 1870 1871 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1872 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1873 pud_t *pud, unsigned long addr) 1874 { 1875 pud_t orig_pud; 1876 spinlock_t *ptl; 1877 1878 ptl = __pud_trans_huge_lock(pud, vma); 1879 if (!ptl) 1880 return 0; 1881 /* 1882 * For architectures like ppc64 we look at deposited pgtable 1883 * when calling pudp_huge_get_and_clear. So do the 1884 * pgtable_trans_huge_withdraw after finishing pudp related 1885 * operations. 1886 */ 1887 orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, 1888 tlb->fullmm); 1889 tlb_remove_pud_tlb_entry(tlb, pud, addr); 1890 if (vma_is_dax(vma)) { 1891 spin_unlock(ptl); 1892 /* No zero page support yet */ 1893 } else { 1894 /* No support for anonymous PUD pages yet */ 1895 BUG(); 1896 } 1897 return 1; 1898 } 1899 1900 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 1901 unsigned long haddr) 1902 { 1903 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 1904 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1905 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1906 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1907 1908 count_vm_event(THP_SPLIT_PUD); 1909 1910 pudp_huge_clear_flush_notify(vma, haddr, pud); 1911 } 1912 1913 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 1914 unsigned long address) 1915 { 1916 spinlock_t *ptl; 1917 struct mm_struct *mm = vma->vm_mm; 1918 unsigned long haddr = address & HPAGE_PUD_MASK; 1919 1920 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); 1921 ptl = pud_lock(mm, pud); 1922 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 1923 goto out; 1924 __split_huge_pud_locked(vma, pud, haddr); 1925 1926 out: 1927 spin_unlock(ptl); 1928 mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE); 1929 } 1930 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1931 1932 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 1933 unsigned long haddr, pmd_t *pmd) 1934 { 1935 struct mm_struct *mm = vma->vm_mm; 1936 pgtable_t pgtable; 1937 pmd_t _pmd; 1938 int i; 1939 1940 /* leave pmd empty until pte is filled */ 1941 pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1942 1943 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1944 pmd_populate(mm, &_pmd, pgtable); 1945 1946 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1947 pte_t *pte, entry; 1948 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 1949 entry = pte_mkspecial(entry); 1950 pte = pte_offset_map(&_pmd, haddr); 1951 VM_BUG_ON(!pte_none(*pte)); 1952 set_pte_at(mm, haddr, pte, entry); 1953 pte_unmap(pte); 1954 } 1955 smp_wmb(); /* make pte visible before pmd */ 1956 pmd_populate(mm, pmd, pgtable); 1957 } 1958 1959 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 1960 unsigned long haddr, bool freeze) 1961 { 1962 struct mm_struct *mm = vma->vm_mm; 1963 struct page *page; 1964 pgtable_t pgtable; 1965 pmd_t _pmd; 1966 bool young, write, dirty, soft_dirty; 1967 unsigned long addr; 1968 int i; 1969 1970 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 1971 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 1972 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 1973 VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); 1974 1975 count_vm_event(THP_SPLIT_PMD); 1976 1977 if (!vma_is_anonymous(vma)) { 1978 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1979 /* 1980 * We are going to unmap this huge page. So 1981 * just go ahead and zap it 1982 */ 1983 if (arch_needs_pgtable_deposit()) 1984 zap_deposited_table(mm, pmd); 1985 if (vma_is_dax(vma)) 1986 return; 1987 page = pmd_page(_pmd); 1988 if (!PageReferenced(page) && pmd_young(_pmd)) 1989 SetPageReferenced(page); 1990 page_remove_rmap(page, true); 1991 put_page(page); 1992 add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1993 return; 1994 } else if (is_huge_zero_pmd(*pmd)) { 1995 return __split_huge_zero_page_pmd(vma, haddr, pmd); 1996 } 1997 1998 page = pmd_page(*pmd); 1999 VM_BUG_ON_PAGE(!page_count(page), page); 2000 page_ref_add(page, HPAGE_PMD_NR - 1); 2001 write = pmd_write(*pmd); 2002 young = pmd_young(*pmd); 2003 dirty = pmd_dirty(*pmd); 2004 soft_dirty = pmd_soft_dirty(*pmd); 2005 2006 pmdp_huge_split_prepare(vma, haddr, pmd); 2007 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2008 pmd_populate(mm, &_pmd, pgtable); 2009 2010 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2011 pte_t entry, *pte; 2012 /* 2013 * Note that NUMA hinting access restrictions are not 2014 * transferred to avoid any possibility of altering 2015 * permissions across VMAs. 2016 */ 2017 if (freeze) { 2018 swp_entry_t swp_entry; 2019 swp_entry = make_migration_entry(page + i, write); 2020 entry = swp_entry_to_pte(swp_entry); 2021 if (soft_dirty) 2022 entry = pte_swp_mksoft_dirty(entry); 2023 } else { 2024 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2025 entry = maybe_mkwrite(entry, vma); 2026 if (!write) 2027 entry = pte_wrprotect(entry); 2028 if (!young) 2029 entry = pte_mkold(entry); 2030 if (soft_dirty) 2031 entry = pte_mksoft_dirty(entry); 2032 } 2033 if (dirty) 2034 SetPageDirty(page + i); 2035 pte = pte_offset_map(&_pmd, addr); 2036 BUG_ON(!pte_none(*pte)); 2037 set_pte_at(mm, addr, pte, entry); 2038 atomic_inc(&page[i]._mapcount); 2039 pte_unmap(pte); 2040 } 2041 2042 /* 2043 * Set PG_double_map before dropping compound_mapcount to avoid 2044 * false-negative page_mapped(). 2045 */ 2046 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 2047 for (i = 0; i < HPAGE_PMD_NR; i++) 2048 atomic_inc(&page[i]._mapcount); 2049 } 2050 2051 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2052 /* Last compound_mapcount is gone. */ 2053 __dec_node_page_state(page, NR_ANON_THPS); 2054 if (TestClearPageDoubleMap(page)) { 2055 /* No need in mapcount reference anymore */ 2056 for (i = 0; i < HPAGE_PMD_NR; i++) 2057 atomic_dec(&page[i]._mapcount); 2058 } 2059 } 2060 2061 smp_wmb(); /* make pte visible before pmd */ 2062 /* 2063 * Up to this point the pmd is present and huge and userland has the 2064 * whole access to the hugepage during the split (which happens in 2065 * place). If we overwrite the pmd with the not-huge version pointing 2066 * to the pte here (which of course we could if all CPUs were bug 2067 * free), userland could trigger a small page size TLB miss on the 2068 * small sized TLB while the hugepage TLB entry is still established in 2069 * the huge TLB. Some CPU doesn't like that. 2070 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 2071 * 383 on page 93. Intel should be safe but is also warns that it's 2072 * only safe if the permission and cache attributes of the two entries 2073 * loaded in the two TLB is identical (which should be the case here). 2074 * But it is generally safer to never allow small and huge TLB entries 2075 * for the same virtual address to be loaded simultaneously. So instead 2076 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2077 * current pmd notpresent (atomically because here the pmd_trans_huge 2078 * and pmd_trans_splitting must remain set at all times on the pmd 2079 * until the split is complete for this pmd), then we flush the SMP TLB 2080 * and finally we write the non-huge version of the pmd entry with 2081 * pmd_populate. 2082 */ 2083 pmdp_invalidate(vma, haddr, pmd); 2084 pmd_populate(mm, pmd, pgtable); 2085 2086 if (freeze) { 2087 for (i = 0; i < HPAGE_PMD_NR; i++) { 2088 page_remove_rmap(page + i, false); 2089 put_page(page + i); 2090 } 2091 } 2092 } 2093 2094 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2095 unsigned long address, bool freeze, struct page *page) 2096 { 2097 spinlock_t *ptl; 2098 struct mm_struct *mm = vma->vm_mm; 2099 unsigned long haddr = address & HPAGE_PMD_MASK; 2100 2101 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); 2102 ptl = pmd_lock(mm, pmd); 2103 2104 /* 2105 * If caller asks to setup a migration entries, we need a page to check 2106 * pmd against. Otherwise we can end up replacing wrong page. 2107 */ 2108 VM_BUG_ON(freeze && !page); 2109 if (page && page != pmd_page(*pmd)) 2110 goto out; 2111 2112 if (pmd_trans_huge(*pmd)) { 2113 page = pmd_page(*pmd); 2114 if (PageMlocked(page)) 2115 clear_page_mlock(page); 2116 } else if (!pmd_devmap(*pmd)) 2117 goto out; 2118 __split_huge_pmd_locked(vma, pmd, haddr, freeze); 2119 out: 2120 spin_unlock(ptl); 2121 mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); 2122 } 2123 2124 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2125 bool freeze, struct page *page) 2126 { 2127 pgd_t *pgd; 2128 p4d_t *p4d; 2129 pud_t *pud; 2130 pmd_t *pmd; 2131 2132 pgd = pgd_offset(vma->vm_mm, address); 2133 if (!pgd_present(*pgd)) 2134 return; 2135 2136 p4d = p4d_offset(pgd, address); 2137 if (!p4d_present(*p4d)) 2138 return; 2139 2140 pud = pud_offset(p4d, address); 2141 if (!pud_present(*pud)) 2142 return; 2143 2144 pmd = pmd_offset(pud, address); 2145 2146 __split_huge_pmd(vma, pmd, address, freeze, page); 2147 } 2148 2149 void vma_adjust_trans_huge(struct vm_area_struct *vma, 2150 unsigned long start, 2151 unsigned long end, 2152 long adjust_next) 2153 { 2154 /* 2155 * If the new start address isn't hpage aligned and it could 2156 * previously contain an hugepage: check if we need to split 2157 * an huge pmd. 2158 */ 2159 if (start & ~HPAGE_PMD_MASK && 2160 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2161 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2162 split_huge_pmd_address(vma, start, false, NULL); 2163 2164 /* 2165 * If the new end address isn't hpage aligned and it could 2166 * previously contain an hugepage: check if we need to split 2167 * an huge pmd. 2168 */ 2169 if (end & ~HPAGE_PMD_MASK && 2170 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2171 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2172 split_huge_pmd_address(vma, end, false, NULL); 2173 2174 /* 2175 * If we're also updating the vma->vm_next->vm_start, if the new 2176 * vm_next->vm_start isn't page aligned and it could previously 2177 * contain an hugepage: check if we need to split an huge pmd. 2178 */ 2179 if (adjust_next > 0) { 2180 struct vm_area_struct *next = vma->vm_next; 2181 unsigned long nstart = next->vm_start; 2182 nstart += adjust_next << PAGE_SHIFT; 2183 if (nstart & ~HPAGE_PMD_MASK && 2184 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2185 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2186 split_huge_pmd_address(next, nstart, false, NULL); 2187 } 2188 } 2189 2190 static void freeze_page(struct page *page) 2191 { 2192 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2193 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2194 bool unmap_success; 2195 2196 VM_BUG_ON_PAGE(!PageHead(page), page); 2197 2198 if (PageAnon(page)) 2199 ttu_flags |= TTU_MIGRATION; 2200 2201 unmap_success = try_to_unmap(page, ttu_flags); 2202 VM_BUG_ON_PAGE(!unmap_success, page); 2203 } 2204 2205 static void unfreeze_page(struct page *page) 2206 { 2207 int i; 2208 if (PageTransHuge(page)) { 2209 remove_migration_ptes(page, page, true); 2210 } else { 2211 for (i = 0; i < HPAGE_PMD_NR; i++) 2212 remove_migration_ptes(page + i, page + i, true); 2213 } 2214 } 2215 2216 static void __split_huge_page_tail(struct page *head, int tail, 2217 struct lruvec *lruvec, struct list_head *list) 2218 { 2219 struct page *page_tail = head + tail; 2220 2221 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2222 VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail); 2223 2224 /* 2225 * tail_page->_refcount is zero and not changing from under us. But 2226 * get_page_unless_zero() may be running from under us on the 2227 * tail_page. If we used atomic_set() below instead of atomic_inc() or 2228 * atomic_add(), we would then run atomic_set() concurrently with 2229 * get_page_unless_zero(), and atomic_set() is implemented in C not 2230 * using locked ops. spin_unlock on x86 sometime uses locked ops 2231 * because of PPro errata 66, 92, so unless somebody can guarantee 2232 * atomic_set() here would be safe on all archs (and not only on x86), 2233 * it's safer to use atomic_inc()/atomic_add(). 2234 */ 2235 if (PageAnon(head) && !PageSwapCache(head)) { 2236 page_ref_inc(page_tail); 2237 } else { 2238 /* Additional pin to radix tree */ 2239 page_ref_add(page_tail, 2); 2240 } 2241 2242 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2243 page_tail->flags |= (head->flags & 2244 ((1L << PG_referenced) | 2245 (1L << PG_swapbacked) | 2246 (1L << PG_swapcache) | 2247 (1L << PG_mlocked) | 2248 (1L << PG_uptodate) | 2249 (1L << PG_active) | 2250 (1L << PG_locked) | 2251 (1L << PG_unevictable) | 2252 (1L << PG_dirty))); 2253 2254 /* 2255 * After clearing PageTail the gup refcount can be released. 2256 * Page flags also must be visible before we make the page non-compound. 2257 */ 2258 smp_wmb(); 2259 2260 clear_compound_head(page_tail); 2261 2262 if (page_is_young(head)) 2263 set_page_young(page_tail); 2264 if (page_is_idle(head)) 2265 set_page_idle(page_tail); 2266 2267 /* ->mapping in first tail page is compound_mapcount */ 2268 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2269 page_tail); 2270 page_tail->mapping = head->mapping; 2271 2272 page_tail->index = head->index + tail; 2273 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 2274 lru_add_page_tail(head, page_tail, lruvec, list); 2275 } 2276 2277 static void __split_huge_page(struct page *page, struct list_head *list, 2278 unsigned long flags) 2279 { 2280 struct page *head = compound_head(page); 2281 struct zone *zone = page_zone(head); 2282 struct lruvec *lruvec; 2283 pgoff_t end = -1; 2284 int i; 2285 2286 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2287 2288 /* complete memcg works before add pages to LRU */ 2289 mem_cgroup_split_huge_fixup(head); 2290 2291 if (!PageAnon(page)) 2292 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); 2293 2294 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2295 __split_huge_page_tail(head, i, lruvec, list); 2296 /* Some pages can be beyond i_size: drop them from page cache */ 2297 if (head[i].index >= end) { 2298 __ClearPageDirty(head + i); 2299 __delete_from_page_cache(head + i, NULL); 2300 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2301 shmem_uncharge(head->mapping->host, 1); 2302 put_page(head + i); 2303 } 2304 } 2305 2306 ClearPageCompound(head); 2307 /* See comment in __split_huge_page_tail() */ 2308 if (PageAnon(head)) { 2309 /* Additional pin to radix tree of swap cache */ 2310 if (PageSwapCache(head)) 2311 page_ref_add(head, 2); 2312 else 2313 page_ref_inc(head); 2314 } else { 2315 /* Additional pin to radix tree */ 2316 page_ref_add(head, 2); 2317 spin_unlock(&head->mapping->tree_lock); 2318 } 2319 2320 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2321 2322 unfreeze_page(head); 2323 2324 for (i = 0; i < HPAGE_PMD_NR; i++) { 2325 struct page *subpage = head + i; 2326 if (subpage == page) 2327 continue; 2328 unlock_page(subpage); 2329 2330 /* 2331 * Subpages may be freed if there wasn't any mapping 2332 * like if add_to_swap() is running on a lru page that 2333 * had its mapping zapped. And freeing these pages 2334 * requires taking the lru_lock so we do the put_page 2335 * of the tail pages after the split is complete. 2336 */ 2337 put_page(subpage); 2338 } 2339 } 2340 2341 int total_mapcount(struct page *page) 2342 { 2343 int i, compound, ret; 2344 2345 VM_BUG_ON_PAGE(PageTail(page), page); 2346 2347 if (likely(!PageCompound(page))) 2348 return atomic_read(&page->_mapcount) + 1; 2349 2350 compound = compound_mapcount(page); 2351 if (PageHuge(page)) 2352 return compound; 2353 ret = compound; 2354 for (i = 0; i < HPAGE_PMD_NR; i++) 2355 ret += atomic_read(&page[i]._mapcount) + 1; 2356 /* File pages has compound_mapcount included in _mapcount */ 2357 if (!PageAnon(page)) 2358 return ret - compound * HPAGE_PMD_NR; 2359 if (PageDoubleMap(page)) 2360 ret -= HPAGE_PMD_NR; 2361 return ret; 2362 } 2363 2364 /* 2365 * This calculates accurately how many mappings a transparent hugepage 2366 * has (unlike page_mapcount() which isn't fully accurate). This full 2367 * accuracy is primarily needed to know if copy-on-write faults can 2368 * reuse the page and change the mapping to read-write instead of 2369 * copying them. At the same time this returns the total_mapcount too. 2370 * 2371 * The function returns the highest mapcount any one of the subpages 2372 * has. If the return value is one, even if different processes are 2373 * mapping different subpages of the transparent hugepage, they can 2374 * all reuse it, because each process is reusing a different subpage. 2375 * 2376 * The total_mapcount is instead counting all virtual mappings of the 2377 * subpages. If the total_mapcount is equal to "one", it tells the 2378 * caller all mappings belong to the same "mm" and in turn the 2379 * anon_vma of the transparent hugepage can become the vma->anon_vma 2380 * local one as no other process may be mapping any of the subpages. 2381 * 2382 * It would be more accurate to replace page_mapcount() with 2383 * page_trans_huge_mapcount(), however we only use 2384 * page_trans_huge_mapcount() in the copy-on-write faults where we 2385 * need full accuracy to avoid breaking page pinning, because 2386 * page_trans_huge_mapcount() is slower than page_mapcount(). 2387 */ 2388 int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 2389 { 2390 int i, ret, _total_mapcount, mapcount; 2391 2392 /* hugetlbfs shouldn't call it */ 2393 VM_BUG_ON_PAGE(PageHuge(page), page); 2394 2395 if (likely(!PageTransCompound(page))) { 2396 mapcount = atomic_read(&page->_mapcount) + 1; 2397 if (total_mapcount) 2398 *total_mapcount = mapcount; 2399 return mapcount; 2400 } 2401 2402 page = compound_head(page); 2403 2404 _total_mapcount = ret = 0; 2405 for (i = 0; i < HPAGE_PMD_NR; i++) { 2406 mapcount = atomic_read(&page[i]._mapcount) + 1; 2407 ret = max(ret, mapcount); 2408 _total_mapcount += mapcount; 2409 } 2410 if (PageDoubleMap(page)) { 2411 ret -= 1; 2412 _total_mapcount -= HPAGE_PMD_NR; 2413 } 2414 mapcount = compound_mapcount(page); 2415 ret += mapcount; 2416 _total_mapcount += mapcount; 2417 if (total_mapcount) 2418 *total_mapcount = _total_mapcount; 2419 return ret; 2420 } 2421 2422 /* Racy check whether the huge page can be split */ 2423 bool can_split_huge_page(struct page *page, int *pextra_pins) 2424 { 2425 int extra_pins; 2426 2427 /* Additional pins from radix tree */ 2428 if (PageAnon(page)) 2429 extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; 2430 else 2431 extra_pins = HPAGE_PMD_NR; 2432 if (pextra_pins) 2433 *pextra_pins = extra_pins; 2434 return total_mapcount(page) == page_count(page) - extra_pins - 1; 2435 } 2436 2437 /* 2438 * This function splits huge page into normal pages. @page can point to any 2439 * subpage of huge page to split. Split doesn't change the position of @page. 2440 * 2441 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2442 * The huge page must be locked. 2443 * 2444 * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2445 * 2446 * Both head page and tail pages will inherit mapping, flags, and so on from 2447 * the hugepage. 2448 * 2449 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2450 * they are not mapped. 2451 * 2452 * Returns 0 if the hugepage is split successfully. 2453 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2454 * us. 2455 */ 2456 int split_huge_page_to_list(struct page *page, struct list_head *list) 2457 { 2458 struct page *head = compound_head(page); 2459 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2460 struct anon_vma *anon_vma = NULL; 2461 struct address_space *mapping = NULL; 2462 int count, mapcount, extra_pins, ret; 2463 bool mlocked; 2464 unsigned long flags; 2465 2466 VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2467 VM_BUG_ON_PAGE(!PageLocked(page), page); 2468 VM_BUG_ON_PAGE(!PageCompound(page), page); 2469 2470 if (PageAnon(head)) { 2471 /* 2472 * The caller does not necessarily hold an mmap_sem that would 2473 * prevent the anon_vma disappearing so we first we take a 2474 * reference to it and then lock the anon_vma for write. This 2475 * is similar to page_lock_anon_vma_read except the write lock 2476 * is taken to serialise against parallel split or collapse 2477 * operations. 2478 */ 2479 anon_vma = page_get_anon_vma(head); 2480 if (!anon_vma) { 2481 ret = -EBUSY; 2482 goto out; 2483 } 2484 mapping = NULL; 2485 anon_vma_lock_write(anon_vma); 2486 } else { 2487 mapping = head->mapping; 2488 2489 /* Truncated ? */ 2490 if (!mapping) { 2491 ret = -EBUSY; 2492 goto out; 2493 } 2494 2495 anon_vma = NULL; 2496 i_mmap_lock_read(mapping); 2497 } 2498 2499 /* 2500 * Racy check if we can split the page, before freeze_page() will 2501 * split PMDs 2502 */ 2503 if (!can_split_huge_page(head, &extra_pins)) { 2504 ret = -EBUSY; 2505 goto out_unlock; 2506 } 2507 2508 mlocked = PageMlocked(page); 2509 freeze_page(head); 2510 VM_BUG_ON_PAGE(compound_mapcount(head), head); 2511 2512 /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2513 if (mlocked) 2514 lru_add_drain(); 2515 2516 /* prevent PageLRU to go away from under us, and freeze lru stats */ 2517 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2518 2519 if (mapping) { 2520 void **pslot; 2521 2522 spin_lock(&mapping->tree_lock); 2523 pslot = radix_tree_lookup_slot(&mapping->page_tree, 2524 page_index(head)); 2525 /* 2526 * Check if the head page is present in radix tree. 2527 * We assume all tail are present too, if head is there. 2528 */ 2529 if (radix_tree_deref_slot_protected(pslot, 2530 &mapping->tree_lock) != head) 2531 goto fail; 2532 } 2533 2534 /* Prevent deferred_split_scan() touching ->_refcount */ 2535 spin_lock(&pgdata->split_queue_lock); 2536 count = page_count(head); 2537 mapcount = total_mapcount(head); 2538 if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 2539 if (!list_empty(page_deferred_list(head))) { 2540 pgdata->split_queue_len--; 2541 list_del(page_deferred_list(head)); 2542 } 2543 if (mapping) 2544 __dec_node_page_state(page, NR_SHMEM_THPS); 2545 spin_unlock(&pgdata->split_queue_lock); 2546 __split_huge_page(page, list, flags); 2547 ret = 0; 2548 } else { 2549 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2550 pr_alert("total_mapcount: %u, page_count(): %u\n", 2551 mapcount, count); 2552 if (PageTail(page)) 2553 dump_page(head, NULL); 2554 dump_page(page, "total_mapcount(head) > 0"); 2555 BUG(); 2556 } 2557 spin_unlock(&pgdata->split_queue_lock); 2558 fail: if (mapping) 2559 spin_unlock(&mapping->tree_lock); 2560 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2561 unfreeze_page(head); 2562 ret = -EBUSY; 2563 } 2564 2565 out_unlock: 2566 if (anon_vma) { 2567 anon_vma_unlock_write(anon_vma); 2568 put_anon_vma(anon_vma); 2569 } 2570 if (mapping) 2571 i_mmap_unlock_read(mapping); 2572 out: 2573 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2574 return ret; 2575 } 2576 2577 void free_transhuge_page(struct page *page) 2578 { 2579 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 2580 unsigned long flags; 2581 2582 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2583 if (!list_empty(page_deferred_list(page))) { 2584 pgdata->split_queue_len--; 2585 list_del(page_deferred_list(page)); 2586 } 2587 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2588 free_compound_page(page); 2589 } 2590 2591 void deferred_split_huge_page(struct page *page) 2592 { 2593 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 2594 unsigned long flags; 2595 2596 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 2597 2598 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2599 if (list_empty(page_deferred_list(page))) { 2600 count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2601 list_add_tail(page_deferred_list(page), &pgdata->split_queue); 2602 pgdata->split_queue_len++; 2603 } 2604 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2605 } 2606 2607 static unsigned long deferred_split_count(struct shrinker *shrink, 2608 struct shrink_control *sc) 2609 { 2610 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2611 return ACCESS_ONCE(pgdata->split_queue_len); 2612 } 2613 2614 static unsigned long deferred_split_scan(struct shrinker *shrink, 2615 struct shrink_control *sc) 2616 { 2617 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2618 unsigned long flags; 2619 LIST_HEAD(list), *pos, *next; 2620 struct page *page; 2621 int split = 0; 2622 2623 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2624 /* Take pin on all head pages to avoid freeing them under us */ 2625 list_for_each_safe(pos, next, &pgdata->split_queue) { 2626 page = list_entry((void *)pos, struct page, mapping); 2627 page = compound_head(page); 2628 if (get_page_unless_zero(page)) { 2629 list_move(page_deferred_list(page), &list); 2630 } else { 2631 /* We lost race with put_compound_page() */ 2632 list_del_init(page_deferred_list(page)); 2633 pgdata->split_queue_len--; 2634 } 2635 if (!--sc->nr_to_scan) 2636 break; 2637 } 2638 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2639 2640 list_for_each_safe(pos, next, &list) { 2641 page = list_entry((void *)pos, struct page, mapping); 2642 lock_page(page); 2643 /* split_huge_page() removes page from list on success */ 2644 if (!split_huge_page(page)) 2645 split++; 2646 unlock_page(page); 2647 put_page(page); 2648 } 2649 2650 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2651 list_splice_tail(&list, &pgdata->split_queue); 2652 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2653 2654 /* 2655 * Stop shrinker if we didn't split any page, but the queue is empty. 2656 * This can happen if pages were freed under us. 2657 */ 2658 if (!split && list_empty(&pgdata->split_queue)) 2659 return SHRINK_STOP; 2660 return split; 2661 } 2662 2663 static struct shrinker deferred_split_shrinker = { 2664 .count_objects = deferred_split_count, 2665 .scan_objects = deferred_split_scan, 2666 .seeks = DEFAULT_SEEKS, 2667 .flags = SHRINKER_NUMA_AWARE, 2668 }; 2669 2670 #ifdef CONFIG_DEBUG_FS 2671 static int split_huge_pages_set(void *data, u64 val) 2672 { 2673 struct zone *zone; 2674 struct page *page; 2675 unsigned long pfn, max_zone_pfn; 2676 unsigned long total = 0, split = 0; 2677 2678 if (val != 1) 2679 return -EINVAL; 2680 2681 for_each_populated_zone(zone) { 2682 max_zone_pfn = zone_end_pfn(zone); 2683 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2684 if (!pfn_valid(pfn)) 2685 continue; 2686 2687 page = pfn_to_page(pfn); 2688 if (!get_page_unless_zero(page)) 2689 continue; 2690 2691 if (zone != page_zone(page)) 2692 goto next; 2693 2694 if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 2695 goto next; 2696 2697 total++; 2698 lock_page(page); 2699 if (!split_huge_page(page)) 2700 split++; 2701 unlock_page(page); 2702 next: 2703 put_page(page); 2704 } 2705 } 2706 2707 pr_info("%lu of %lu THP split\n", split, total); 2708 2709 return 0; 2710 } 2711 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 2712 "%llu\n"); 2713 2714 static int __init split_huge_pages_debugfs(void) 2715 { 2716 void *ret; 2717 2718 ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 2719 &split_huge_pages_fops); 2720 if (!ret) 2721 pr_warn("Failed to create split_huge_pages in debugfs"); 2722 return 0; 2723 } 2724 late_initcall(split_huge_pages_debugfs); 2725 #endif 2726