1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/mm.h> 11 #include <linux/sched.h> 12 #include <linux/sched/coredump.h> 13 #include <linux/sched/numa_balancing.h> 14 #include <linux/highmem.h> 15 #include <linux/hugetlb.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/rmap.h> 18 #include <linux/swap.h> 19 #include <linux/shrinker.h> 20 #include <linux/mm_inline.h> 21 #include <linux/swapops.h> 22 #include <linux/dax.h> 23 #include <linux/khugepaged.h> 24 #include <linux/freezer.h> 25 #include <linux/pfn_t.h> 26 #include <linux/mman.h> 27 #include <linux/memremap.h> 28 #include <linux/pagemap.h> 29 #include <linux/debugfs.h> 30 #include <linux/migrate.h> 31 #include <linux/hashtable.h> 32 #include <linux/userfaultfd_k.h> 33 #include <linux/page_idle.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/oom.h> 36 37 #include <asm/tlb.h> 38 #include <asm/pgalloc.h> 39 #include "internal.h" 40 41 /* 42 * By default, transparent hugepage support is disabled in order to avoid 43 * risking an increased memory footprint for applications that are not 44 * guaranteed to benefit from it. When transparent hugepage support is 45 * enabled, it is for all mappings, and khugepaged scans all mappings. 46 * Defrag is invoked by khugepaged hugepage allocations and by page faults 47 * for all hugepage allocations. 48 */ 49 unsigned long transparent_hugepage_flags __read_mostly = 50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 51 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 52 #endif 53 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 55 #endif 56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 59 60 static struct shrinker deferred_split_shrinker; 61 62 static atomic_t huge_zero_refcount; 63 struct page *huge_zero_page __read_mostly; 64 65 static struct page *get_huge_zero_page(void) 66 { 67 struct page *zero_page; 68 retry: 69 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 70 return READ_ONCE(huge_zero_page); 71 72 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 73 HPAGE_PMD_ORDER); 74 if (!zero_page) { 75 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 76 return NULL; 77 } 78 count_vm_event(THP_ZERO_PAGE_ALLOC); 79 preempt_disable(); 80 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 81 preempt_enable(); 82 __free_pages(zero_page, compound_order(zero_page)); 83 goto retry; 84 } 85 86 /* We take additional reference here. It will be put back by shrinker */ 87 atomic_set(&huge_zero_refcount, 2); 88 preempt_enable(); 89 return READ_ONCE(huge_zero_page); 90 } 91 92 static void put_huge_zero_page(void) 93 { 94 /* 95 * Counter should never go to zero here. Only shrinker can put 96 * last reference. 97 */ 98 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 99 } 100 101 struct page *mm_get_huge_zero_page(struct mm_struct *mm) 102 { 103 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 104 return READ_ONCE(huge_zero_page); 105 106 if (!get_huge_zero_page()) 107 return NULL; 108 109 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 110 put_huge_zero_page(); 111 112 return READ_ONCE(huge_zero_page); 113 } 114 115 void mm_put_huge_zero_page(struct mm_struct *mm) 116 { 117 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 118 put_huge_zero_page(); 119 } 120 121 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 122 struct shrink_control *sc) 123 { 124 /* we can free zero page only if last reference remains */ 125 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 126 } 127 128 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 129 struct shrink_control *sc) 130 { 131 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 132 struct page *zero_page = xchg(&huge_zero_page, NULL); 133 BUG_ON(zero_page == NULL); 134 __free_pages(zero_page, compound_order(zero_page)); 135 return HPAGE_PMD_NR; 136 } 137 138 return 0; 139 } 140 141 static struct shrinker huge_zero_page_shrinker = { 142 .count_objects = shrink_huge_zero_page_count, 143 .scan_objects = shrink_huge_zero_page_scan, 144 .seeks = DEFAULT_SEEKS, 145 }; 146 147 #ifdef CONFIG_SYSFS 148 static ssize_t enabled_show(struct kobject *kobj, 149 struct kobj_attribute *attr, char *buf) 150 { 151 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 152 return sprintf(buf, "[always] madvise never\n"); 153 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 154 return sprintf(buf, "always [madvise] never\n"); 155 else 156 return sprintf(buf, "always madvise [never]\n"); 157 } 158 159 static ssize_t enabled_store(struct kobject *kobj, 160 struct kobj_attribute *attr, 161 const char *buf, size_t count) 162 { 163 ssize_t ret = count; 164 165 if (!memcmp("always", buf, 166 min(sizeof("always")-1, count))) { 167 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 168 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 169 } else if (!memcmp("madvise", buf, 170 min(sizeof("madvise")-1, count))) { 171 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 172 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 173 } else if (!memcmp("never", buf, 174 min(sizeof("never")-1, count))) { 175 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 176 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 177 } else 178 ret = -EINVAL; 179 180 if (ret > 0) { 181 int err = start_stop_khugepaged(); 182 if (err) 183 ret = err; 184 } 185 return ret; 186 } 187 static struct kobj_attribute enabled_attr = 188 __ATTR(enabled, 0644, enabled_show, enabled_store); 189 190 ssize_t single_hugepage_flag_show(struct kobject *kobj, 191 struct kobj_attribute *attr, char *buf, 192 enum transparent_hugepage_flag flag) 193 { 194 return sprintf(buf, "%d\n", 195 !!test_bit(flag, &transparent_hugepage_flags)); 196 } 197 198 ssize_t single_hugepage_flag_store(struct kobject *kobj, 199 struct kobj_attribute *attr, 200 const char *buf, size_t count, 201 enum transparent_hugepage_flag flag) 202 { 203 unsigned long value; 204 int ret; 205 206 ret = kstrtoul(buf, 10, &value); 207 if (ret < 0) 208 return ret; 209 if (value > 1) 210 return -EINVAL; 211 212 if (value) 213 set_bit(flag, &transparent_hugepage_flags); 214 else 215 clear_bit(flag, &transparent_hugepage_flags); 216 217 return count; 218 } 219 220 static ssize_t defrag_show(struct kobject *kobj, 221 struct kobj_attribute *attr, char *buf) 222 { 223 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 224 return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 225 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 226 return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 227 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 228 return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 229 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 230 return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 231 return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 232 } 233 234 static ssize_t defrag_store(struct kobject *kobj, 235 struct kobj_attribute *attr, 236 const char *buf, size_t count) 237 { 238 if (!memcmp("always", buf, 239 min(sizeof("always")-1, count))) { 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 243 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 244 } else if (!memcmp("defer+madvise", buf, 245 min(sizeof("defer+madvise")-1, count))) { 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 249 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 250 } else if (!memcmp("defer", buf, 251 min(sizeof("defer")-1, count))) { 252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 255 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 256 } else if (!memcmp("madvise", buf, 257 min(sizeof("madvise")-1, count))) { 258 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 261 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 262 } else if (!memcmp("never", buf, 263 min(sizeof("never")-1, count))) { 264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 268 } else 269 return -EINVAL; 270 271 return count; 272 } 273 static struct kobj_attribute defrag_attr = 274 __ATTR(defrag, 0644, defrag_show, defrag_store); 275 276 static ssize_t use_zero_page_show(struct kobject *kobj, 277 struct kobj_attribute *attr, char *buf) 278 { 279 return single_hugepage_flag_show(kobj, attr, buf, 280 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 281 } 282 static ssize_t use_zero_page_store(struct kobject *kobj, 283 struct kobj_attribute *attr, const char *buf, size_t count) 284 { 285 return single_hugepage_flag_store(kobj, attr, buf, count, 286 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 287 } 288 static struct kobj_attribute use_zero_page_attr = 289 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 290 291 static ssize_t hpage_pmd_size_show(struct kobject *kobj, 292 struct kobj_attribute *attr, char *buf) 293 { 294 return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 295 } 296 static struct kobj_attribute hpage_pmd_size_attr = 297 __ATTR_RO(hpage_pmd_size); 298 299 #ifdef CONFIG_DEBUG_VM 300 static ssize_t debug_cow_show(struct kobject *kobj, 301 struct kobj_attribute *attr, char *buf) 302 { 303 return single_hugepage_flag_show(kobj, attr, buf, 304 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 305 } 306 static ssize_t debug_cow_store(struct kobject *kobj, 307 struct kobj_attribute *attr, 308 const char *buf, size_t count) 309 { 310 return single_hugepage_flag_store(kobj, attr, buf, count, 311 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 312 } 313 static struct kobj_attribute debug_cow_attr = 314 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 315 #endif /* CONFIG_DEBUG_VM */ 316 317 static struct attribute *hugepage_attr[] = { 318 &enabled_attr.attr, 319 &defrag_attr.attr, 320 &use_zero_page_attr.attr, 321 &hpage_pmd_size_attr.attr, 322 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 323 &shmem_enabled_attr.attr, 324 #endif 325 #ifdef CONFIG_DEBUG_VM 326 &debug_cow_attr.attr, 327 #endif 328 NULL, 329 }; 330 331 static const struct attribute_group hugepage_attr_group = { 332 .attrs = hugepage_attr, 333 }; 334 335 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 336 { 337 int err; 338 339 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 340 if (unlikely(!*hugepage_kobj)) { 341 pr_err("failed to create transparent hugepage kobject\n"); 342 return -ENOMEM; 343 } 344 345 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 346 if (err) { 347 pr_err("failed to register transparent hugepage group\n"); 348 goto delete_obj; 349 } 350 351 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 352 if (err) { 353 pr_err("failed to register transparent hugepage group\n"); 354 goto remove_hp_group; 355 } 356 357 return 0; 358 359 remove_hp_group: 360 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 361 delete_obj: 362 kobject_put(*hugepage_kobj); 363 return err; 364 } 365 366 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 367 { 368 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 369 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 370 kobject_put(hugepage_kobj); 371 } 372 #else 373 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 374 { 375 return 0; 376 } 377 378 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 379 { 380 } 381 #endif /* CONFIG_SYSFS */ 382 383 static int __init hugepage_init(void) 384 { 385 int err; 386 struct kobject *hugepage_kobj; 387 388 if (!has_transparent_hugepage()) { 389 transparent_hugepage_flags = 0; 390 return -EINVAL; 391 } 392 393 /* 394 * hugepages can't be allocated by the buddy allocator 395 */ 396 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 397 /* 398 * we use page->mapping and page->index in second tail page 399 * as list_head: assuming THP order >= 2 400 */ 401 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 402 403 err = hugepage_init_sysfs(&hugepage_kobj); 404 if (err) 405 goto err_sysfs; 406 407 err = khugepaged_init(); 408 if (err) 409 goto err_slab; 410 411 err = register_shrinker(&huge_zero_page_shrinker); 412 if (err) 413 goto err_hzp_shrinker; 414 err = register_shrinker(&deferred_split_shrinker); 415 if (err) 416 goto err_split_shrinker; 417 418 /* 419 * By default disable transparent hugepages on smaller systems, 420 * where the extra memory used could hurt more than TLB overhead 421 * is likely to save. The admin can still enable it through /sys. 422 */ 423 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 424 transparent_hugepage_flags = 0; 425 return 0; 426 } 427 428 err = start_stop_khugepaged(); 429 if (err) 430 goto err_khugepaged; 431 432 return 0; 433 err_khugepaged: 434 unregister_shrinker(&deferred_split_shrinker); 435 err_split_shrinker: 436 unregister_shrinker(&huge_zero_page_shrinker); 437 err_hzp_shrinker: 438 khugepaged_destroy(); 439 err_slab: 440 hugepage_exit_sysfs(hugepage_kobj); 441 err_sysfs: 442 return err; 443 } 444 subsys_initcall(hugepage_init); 445 446 static int __init setup_transparent_hugepage(char *str) 447 { 448 int ret = 0; 449 if (!str) 450 goto out; 451 if (!strcmp(str, "always")) { 452 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 453 &transparent_hugepage_flags); 454 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 455 &transparent_hugepage_flags); 456 ret = 1; 457 } else if (!strcmp(str, "madvise")) { 458 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 459 &transparent_hugepage_flags); 460 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 461 &transparent_hugepage_flags); 462 ret = 1; 463 } else if (!strcmp(str, "never")) { 464 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 465 &transparent_hugepage_flags); 466 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 467 &transparent_hugepage_flags); 468 ret = 1; 469 } 470 out: 471 if (!ret) 472 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 473 return ret; 474 } 475 __setup("transparent_hugepage=", setup_transparent_hugepage); 476 477 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 478 { 479 if (likely(vma->vm_flags & VM_WRITE)) 480 pmd = pmd_mkwrite(pmd); 481 return pmd; 482 } 483 484 static inline struct list_head *page_deferred_list(struct page *page) 485 { 486 /* 487 * ->lru in the tail pages is occupied by compound_head. 488 * Let's use ->mapping + ->index in the second tail page as list_head. 489 */ 490 return (struct list_head *)&page[2].mapping; 491 } 492 493 void prep_transhuge_page(struct page *page) 494 { 495 /* 496 * we use page->mapping and page->indexlru in second tail page 497 * as list_head: assuming THP order >= 2 498 */ 499 500 INIT_LIST_HEAD(page_deferred_list(page)); 501 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 502 } 503 504 unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, 505 loff_t off, unsigned long flags, unsigned long size) 506 { 507 unsigned long addr; 508 loff_t off_end = off + len; 509 loff_t off_align = round_up(off, size); 510 unsigned long len_pad; 511 512 if (off_end <= off_align || (off_end - off_align) < size) 513 return 0; 514 515 len_pad = len + size; 516 if (len_pad < len || (off + len_pad) < off) 517 return 0; 518 519 addr = current->mm->get_unmapped_area(filp, 0, len_pad, 520 off >> PAGE_SHIFT, flags); 521 if (IS_ERR_VALUE(addr)) 522 return 0; 523 524 addr += (off - addr) & (size - 1); 525 return addr; 526 } 527 528 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 529 unsigned long len, unsigned long pgoff, unsigned long flags) 530 { 531 loff_t off = (loff_t)pgoff << PAGE_SHIFT; 532 533 if (addr) 534 goto out; 535 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 536 goto out; 537 538 addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); 539 if (addr) 540 return addr; 541 542 out: 543 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 544 } 545 EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 546 547 static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, 548 gfp_t gfp) 549 { 550 struct vm_area_struct *vma = vmf->vma; 551 struct mem_cgroup *memcg; 552 pgtable_t pgtable; 553 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 554 int ret = 0; 555 556 VM_BUG_ON_PAGE(!PageCompound(page), page); 557 558 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { 559 put_page(page); 560 count_vm_event(THP_FAULT_FALLBACK); 561 return VM_FAULT_FALLBACK; 562 } 563 564 pgtable = pte_alloc_one(vma->vm_mm, haddr); 565 if (unlikely(!pgtable)) { 566 ret = VM_FAULT_OOM; 567 goto release; 568 } 569 570 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 571 /* 572 * The memory barrier inside __SetPageUptodate makes sure that 573 * clear_huge_page writes become visible before the set_pmd_at() 574 * write. 575 */ 576 __SetPageUptodate(page); 577 578 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 579 if (unlikely(!pmd_none(*vmf->pmd))) { 580 goto unlock_release; 581 } else { 582 pmd_t entry; 583 584 ret = check_stable_address_space(vma->vm_mm); 585 if (ret) 586 goto unlock_release; 587 588 /* Deliver the page fault to userland */ 589 if (userfaultfd_missing(vma)) { 590 int ret; 591 592 spin_unlock(vmf->ptl); 593 mem_cgroup_cancel_charge(page, memcg, true); 594 put_page(page); 595 pte_free(vma->vm_mm, pgtable); 596 ret = handle_userfault(vmf, VM_UFFD_MISSING); 597 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 598 return ret; 599 } 600 601 entry = mk_huge_pmd(page, vma->vm_page_prot); 602 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 603 page_add_new_anon_rmap(page, vma, haddr, true); 604 mem_cgroup_commit_charge(page, memcg, false, true); 605 lru_cache_add_active_or_unevictable(page, vma); 606 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 607 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 608 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 609 mm_inc_nr_ptes(vma->vm_mm); 610 spin_unlock(vmf->ptl); 611 count_vm_event(THP_FAULT_ALLOC); 612 } 613 614 return 0; 615 unlock_release: 616 spin_unlock(vmf->ptl); 617 release: 618 if (pgtable) 619 pte_free(vma->vm_mm, pgtable); 620 mem_cgroup_cancel_charge(page, memcg, true); 621 put_page(page); 622 return ret; 623 624 } 625 626 /* 627 * always: directly stall for all thp allocations 628 * defer: wake kswapd and fail if not immediately available 629 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 630 * fail if not immediately available 631 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 632 * available 633 * never: never stall for any thp allocation 634 */ 635 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 636 { 637 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 638 639 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 640 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 641 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 642 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 643 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 644 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 645 __GFP_KSWAPD_RECLAIM); 646 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 647 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 648 0); 649 return GFP_TRANSHUGE_LIGHT; 650 } 651 652 /* Caller must hold page table lock. */ 653 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 654 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 655 struct page *zero_page) 656 { 657 pmd_t entry; 658 if (!pmd_none(*pmd)) 659 return false; 660 entry = mk_pmd(zero_page, vma->vm_page_prot); 661 entry = pmd_mkhuge(entry); 662 if (pgtable) 663 pgtable_trans_huge_deposit(mm, pmd, pgtable); 664 set_pmd_at(mm, haddr, pmd, entry); 665 mm_inc_nr_ptes(mm); 666 return true; 667 } 668 669 int do_huge_pmd_anonymous_page(struct vm_fault *vmf) 670 { 671 struct vm_area_struct *vma = vmf->vma; 672 gfp_t gfp; 673 struct page *page; 674 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 675 676 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 677 return VM_FAULT_FALLBACK; 678 if (unlikely(anon_vma_prepare(vma))) 679 return VM_FAULT_OOM; 680 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 681 return VM_FAULT_OOM; 682 if (!(vmf->flags & FAULT_FLAG_WRITE) && 683 !mm_forbids_zeropage(vma->vm_mm) && 684 transparent_hugepage_use_zero_page()) { 685 pgtable_t pgtable; 686 struct page *zero_page; 687 bool set; 688 int ret; 689 pgtable = pte_alloc_one(vma->vm_mm, haddr); 690 if (unlikely(!pgtable)) 691 return VM_FAULT_OOM; 692 zero_page = mm_get_huge_zero_page(vma->vm_mm); 693 if (unlikely(!zero_page)) { 694 pte_free(vma->vm_mm, pgtable); 695 count_vm_event(THP_FAULT_FALLBACK); 696 return VM_FAULT_FALLBACK; 697 } 698 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 699 ret = 0; 700 set = false; 701 if (pmd_none(*vmf->pmd)) { 702 ret = check_stable_address_space(vma->vm_mm); 703 if (ret) { 704 spin_unlock(vmf->ptl); 705 } else if (userfaultfd_missing(vma)) { 706 spin_unlock(vmf->ptl); 707 ret = handle_userfault(vmf, VM_UFFD_MISSING); 708 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 709 } else { 710 set_huge_zero_page(pgtable, vma->vm_mm, vma, 711 haddr, vmf->pmd, zero_page); 712 spin_unlock(vmf->ptl); 713 set = true; 714 } 715 } else 716 spin_unlock(vmf->ptl); 717 if (!set) 718 pte_free(vma->vm_mm, pgtable); 719 return ret; 720 } 721 gfp = alloc_hugepage_direct_gfpmask(vma); 722 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 723 if (unlikely(!page)) { 724 count_vm_event(THP_FAULT_FALLBACK); 725 return VM_FAULT_FALLBACK; 726 } 727 prep_transhuge_page(page); 728 return __do_huge_pmd_anonymous_page(vmf, page, gfp); 729 } 730 731 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 732 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 733 pgtable_t pgtable) 734 { 735 struct mm_struct *mm = vma->vm_mm; 736 pmd_t entry; 737 spinlock_t *ptl; 738 739 ptl = pmd_lock(mm, pmd); 740 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 741 if (pfn_t_devmap(pfn)) 742 entry = pmd_mkdevmap(entry); 743 if (write) { 744 entry = pmd_mkyoung(pmd_mkdirty(entry)); 745 entry = maybe_pmd_mkwrite(entry, vma); 746 } 747 748 if (pgtable) { 749 pgtable_trans_huge_deposit(mm, pmd, pgtable); 750 mm_inc_nr_ptes(mm); 751 } 752 753 set_pmd_at(mm, addr, pmd, entry); 754 update_mmu_cache_pmd(vma, addr, pmd); 755 spin_unlock(ptl); 756 } 757 758 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 759 pmd_t *pmd, pfn_t pfn, bool write) 760 { 761 pgprot_t pgprot = vma->vm_page_prot; 762 pgtable_t pgtable = NULL; 763 /* 764 * If we had pmd_special, we could avoid all these restrictions, 765 * but we need to be consistent with PTEs and architectures that 766 * can't support a 'special' bit. 767 */ 768 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 769 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 770 (VM_PFNMAP|VM_MIXEDMAP)); 771 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 772 BUG_ON(!pfn_t_devmap(pfn)); 773 774 if (addr < vma->vm_start || addr >= vma->vm_end) 775 return VM_FAULT_SIGBUS; 776 777 if (arch_needs_pgtable_deposit()) { 778 pgtable = pte_alloc_one(vma->vm_mm, addr); 779 if (!pgtable) 780 return VM_FAULT_OOM; 781 } 782 783 track_pfn_insert(vma, &pgprot, pfn); 784 785 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); 786 return VM_FAULT_NOPAGE; 787 } 788 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 789 790 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 791 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 792 { 793 if (likely(vma->vm_flags & VM_WRITE)) 794 pud = pud_mkwrite(pud); 795 return pud; 796 } 797 798 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 799 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 800 { 801 struct mm_struct *mm = vma->vm_mm; 802 pud_t entry; 803 spinlock_t *ptl; 804 805 ptl = pud_lock(mm, pud); 806 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 807 if (pfn_t_devmap(pfn)) 808 entry = pud_mkdevmap(entry); 809 if (write) { 810 entry = pud_mkyoung(pud_mkdirty(entry)); 811 entry = maybe_pud_mkwrite(entry, vma); 812 } 813 set_pud_at(mm, addr, pud, entry); 814 update_mmu_cache_pud(vma, addr, pud); 815 spin_unlock(ptl); 816 } 817 818 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 819 pud_t *pud, pfn_t pfn, bool write) 820 { 821 pgprot_t pgprot = vma->vm_page_prot; 822 /* 823 * If we had pud_special, we could avoid all these restrictions, 824 * but we need to be consistent with PTEs and architectures that 825 * can't support a 'special' bit. 826 */ 827 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 828 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 829 (VM_PFNMAP|VM_MIXEDMAP)); 830 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 831 BUG_ON(!pfn_t_devmap(pfn)); 832 833 if (addr < vma->vm_start || addr >= vma->vm_end) 834 return VM_FAULT_SIGBUS; 835 836 track_pfn_insert(vma, &pgprot, pfn); 837 838 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); 839 return VM_FAULT_NOPAGE; 840 } 841 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 842 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 843 844 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 845 pmd_t *pmd) 846 { 847 pmd_t _pmd; 848 849 /* 850 * We should set the dirty bit only for FOLL_WRITE but for now 851 * the dirty bit in the pmd is meaningless. And if the dirty 852 * bit will become meaningful and we'll only set it with 853 * FOLL_WRITE, an atomic set_bit will be required on the pmd to 854 * set the young bit, instead of the current set_pmd_at. 855 */ 856 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 857 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 858 pmd, _pmd, 1)) 859 update_mmu_cache_pmd(vma, addr, pmd); 860 } 861 862 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 863 pmd_t *pmd, int flags) 864 { 865 unsigned long pfn = pmd_pfn(*pmd); 866 struct mm_struct *mm = vma->vm_mm; 867 struct dev_pagemap *pgmap; 868 struct page *page; 869 870 assert_spin_locked(pmd_lockptr(mm, pmd)); 871 872 /* 873 * When we COW a devmap PMD entry, we split it into PTEs, so we should 874 * not be in this function with `flags & FOLL_COW` set. 875 */ 876 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 877 878 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 879 return NULL; 880 881 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 882 /* pass */; 883 else 884 return NULL; 885 886 if (flags & FOLL_TOUCH) 887 touch_pmd(vma, addr, pmd); 888 889 /* 890 * device mapped pages can only be returned if the 891 * caller will manage the page reference count. 892 */ 893 if (!(flags & FOLL_GET)) 894 return ERR_PTR(-EEXIST); 895 896 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 897 pgmap = get_dev_pagemap(pfn, NULL); 898 if (!pgmap) 899 return ERR_PTR(-EFAULT); 900 page = pfn_to_page(pfn); 901 get_page(page); 902 put_dev_pagemap(pgmap); 903 904 return page; 905 } 906 907 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 908 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 909 struct vm_area_struct *vma) 910 { 911 spinlock_t *dst_ptl, *src_ptl; 912 struct page *src_page; 913 pmd_t pmd; 914 pgtable_t pgtable = NULL; 915 int ret = -ENOMEM; 916 917 /* Skip if can be re-fill on fault */ 918 if (!vma_is_anonymous(vma)) 919 return 0; 920 921 pgtable = pte_alloc_one(dst_mm, addr); 922 if (unlikely(!pgtable)) 923 goto out; 924 925 dst_ptl = pmd_lock(dst_mm, dst_pmd); 926 src_ptl = pmd_lockptr(src_mm, src_pmd); 927 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 928 929 ret = -EAGAIN; 930 pmd = *src_pmd; 931 932 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 933 if (unlikely(is_swap_pmd(pmd))) { 934 swp_entry_t entry = pmd_to_swp_entry(pmd); 935 936 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 937 if (is_write_migration_entry(entry)) { 938 make_migration_entry_read(&entry); 939 pmd = swp_entry_to_pmd(entry); 940 if (pmd_swp_soft_dirty(*src_pmd)) 941 pmd = pmd_swp_mksoft_dirty(pmd); 942 set_pmd_at(src_mm, addr, src_pmd, pmd); 943 } 944 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 945 mm_inc_nr_ptes(dst_mm); 946 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 947 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 948 ret = 0; 949 goto out_unlock; 950 } 951 #endif 952 953 if (unlikely(!pmd_trans_huge(pmd))) { 954 pte_free(dst_mm, pgtable); 955 goto out_unlock; 956 } 957 /* 958 * When page table lock is held, the huge zero pmd should not be 959 * under splitting since we don't split the page itself, only pmd to 960 * a page table. 961 */ 962 if (is_huge_zero_pmd(pmd)) { 963 struct page *zero_page; 964 /* 965 * get_huge_zero_page() will never allocate a new page here, 966 * since we already have a zero page to copy. It just takes a 967 * reference. 968 */ 969 zero_page = mm_get_huge_zero_page(dst_mm); 970 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 971 zero_page); 972 ret = 0; 973 goto out_unlock; 974 } 975 976 src_page = pmd_page(pmd); 977 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 978 get_page(src_page); 979 page_dup_rmap(src_page, true); 980 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 981 mm_inc_nr_ptes(dst_mm); 982 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 983 984 pmdp_set_wrprotect(src_mm, addr, src_pmd); 985 pmd = pmd_mkold(pmd_wrprotect(pmd)); 986 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 987 988 ret = 0; 989 out_unlock: 990 spin_unlock(src_ptl); 991 spin_unlock(dst_ptl); 992 out: 993 return ret; 994 } 995 996 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 997 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 998 pud_t *pud) 999 { 1000 pud_t _pud; 1001 1002 /* 1003 * We should set the dirty bit only for FOLL_WRITE but for now 1004 * the dirty bit in the pud is meaningless. And if the dirty 1005 * bit will become meaningful and we'll only set it with 1006 * FOLL_WRITE, an atomic set_bit will be required on the pud to 1007 * set the young bit, instead of the current set_pud_at. 1008 */ 1009 _pud = pud_mkyoung(pud_mkdirty(*pud)); 1010 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1011 pud, _pud, 1)) 1012 update_mmu_cache_pud(vma, addr, pud); 1013 } 1014 1015 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1016 pud_t *pud, int flags) 1017 { 1018 unsigned long pfn = pud_pfn(*pud); 1019 struct mm_struct *mm = vma->vm_mm; 1020 struct dev_pagemap *pgmap; 1021 struct page *page; 1022 1023 assert_spin_locked(pud_lockptr(mm, pud)); 1024 1025 if (flags & FOLL_WRITE && !pud_write(*pud)) 1026 return NULL; 1027 1028 if (pud_present(*pud) && pud_devmap(*pud)) 1029 /* pass */; 1030 else 1031 return NULL; 1032 1033 if (flags & FOLL_TOUCH) 1034 touch_pud(vma, addr, pud); 1035 1036 /* 1037 * device mapped pages can only be returned if the 1038 * caller will manage the page reference count. 1039 */ 1040 if (!(flags & FOLL_GET)) 1041 return ERR_PTR(-EEXIST); 1042 1043 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1044 pgmap = get_dev_pagemap(pfn, NULL); 1045 if (!pgmap) 1046 return ERR_PTR(-EFAULT); 1047 page = pfn_to_page(pfn); 1048 get_page(page); 1049 put_dev_pagemap(pgmap); 1050 1051 return page; 1052 } 1053 1054 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1055 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1056 struct vm_area_struct *vma) 1057 { 1058 spinlock_t *dst_ptl, *src_ptl; 1059 pud_t pud; 1060 int ret; 1061 1062 dst_ptl = pud_lock(dst_mm, dst_pud); 1063 src_ptl = pud_lockptr(src_mm, src_pud); 1064 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1065 1066 ret = -EAGAIN; 1067 pud = *src_pud; 1068 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1069 goto out_unlock; 1070 1071 /* 1072 * When page table lock is held, the huge zero pud should not be 1073 * under splitting since we don't split the page itself, only pud to 1074 * a page table. 1075 */ 1076 if (is_huge_zero_pud(pud)) { 1077 /* No huge zero pud yet */ 1078 } 1079 1080 pudp_set_wrprotect(src_mm, addr, src_pud); 1081 pud = pud_mkold(pud_wrprotect(pud)); 1082 set_pud_at(dst_mm, addr, dst_pud, pud); 1083 1084 ret = 0; 1085 out_unlock: 1086 spin_unlock(src_ptl); 1087 spin_unlock(dst_ptl); 1088 return ret; 1089 } 1090 1091 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1092 { 1093 pud_t entry; 1094 unsigned long haddr; 1095 bool write = vmf->flags & FAULT_FLAG_WRITE; 1096 1097 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1098 if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1099 goto unlock; 1100 1101 entry = pud_mkyoung(orig_pud); 1102 if (write) 1103 entry = pud_mkdirty(entry); 1104 haddr = vmf->address & HPAGE_PUD_MASK; 1105 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1106 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1107 1108 unlock: 1109 spin_unlock(vmf->ptl); 1110 } 1111 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1112 1113 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 1114 { 1115 pmd_t entry; 1116 unsigned long haddr; 1117 bool write = vmf->flags & FAULT_FLAG_WRITE; 1118 1119 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1120 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1121 goto unlock; 1122 1123 entry = pmd_mkyoung(orig_pmd); 1124 if (write) 1125 entry = pmd_mkdirty(entry); 1126 haddr = vmf->address & HPAGE_PMD_MASK; 1127 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 1128 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1129 1130 unlock: 1131 spin_unlock(vmf->ptl); 1132 } 1133 1134 static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, 1135 struct page *page) 1136 { 1137 struct vm_area_struct *vma = vmf->vma; 1138 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1139 struct mem_cgroup *memcg; 1140 pgtable_t pgtable; 1141 pmd_t _pmd; 1142 int ret = 0, i; 1143 struct page **pages; 1144 unsigned long mmun_start; /* For mmu_notifiers */ 1145 unsigned long mmun_end; /* For mmu_notifiers */ 1146 1147 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 1148 GFP_KERNEL); 1149 if (unlikely(!pages)) { 1150 ret |= VM_FAULT_OOM; 1151 goto out; 1152 } 1153 1154 for (i = 0; i < HPAGE_PMD_NR; i++) { 1155 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, 1156 vmf->address, page_to_nid(page)); 1157 if (unlikely(!pages[i] || 1158 mem_cgroup_try_charge(pages[i], vma->vm_mm, 1159 GFP_KERNEL, &memcg, false))) { 1160 if (pages[i]) 1161 put_page(pages[i]); 1162 while (--i >= 0) { 1163 memcg = (void *)page_private(pages[i]); 1164 set_page_private(pages[i], 0); 1165 mem_cgroup_cancel_charge(pages[i], memcg, 1166 false); 1167 put_page(pages[i]); 1168 } 1169 kfree(pages); 1170 ret |= VM_FAULT_OOM; 1171 goto out; 1172 } 1173 set_page_private(pages[i], (unsigned long)memcg); 1174 } 1175 1176 for (i = 0; i < HPAGE_PMD_NR; i++) { 1177 copy_user_highpage(pages[i], page + i, 1178 haddr + PAGE_SIZE * i, vma); 1179 __SetPageUptodate(pages[i]); 1180 cond_resched(); 1181 } 1182 1183 mmun_start = haddr; 1184 mmun_end = haddr + HPAGE_PMD_SIZE; 1185 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 1186 1187 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1188 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1189 goto out_free_pages; 1190 VM_BUG_ON_PAGE(!PageHead(page), page); 1191 1192 /* 1193 * Leave pmd empty until pte is filled note we must notify here as 1194 * concurrent CPU thread might write to new page before the call to 1195 * mmu_notifier_invalidate_range_end() happens which can lead to a 1196 * device seeing memory write in different order than CPU. 1197 * 1198 * See Documentation/vm/mmu_notifier.txt 1199 */ 1200 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1201 1202 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); 1203 pmd_populate(vma->vm_mm, &_pmd, pgtable); 1204 1205 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1206 pte_t entry; 1207 entry = mk_pte(pages[i], vma->vm_page_prot); 1208 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1209 memcg = (void *)page_private(pages[i]); 1210 set_page_private(pages[i], 0); 1211 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); 1212 mem_cgroup_commit_charge(pages[i], memcg, false, false); 1213 lru_cache_add_active_or_unevictable(pages[i], vma); 1214 vmf->pte = pte_offset_map(&_pmd, haddr); 1215 VM_BUG_ON(!pte_none(*vmf->pte)); 1216 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); 1217 pte_unmap(vmf->pte); 1218 } 1219 kfree(pages); 1220 1221 smp_wmb(); /* make pte visible before pmd */ 1222 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); 1223 page_remove_rmap(page, true); 1224 spin_unlock(vmf->ptl); 1225 1226 /* 1227 * No need to double call mmu_notifier->invalidate_range() callback as 1228 * the above pmdp_huge_clear_flush_notify() did already call it. 1229 */ 1230 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, 1231 mmun_end); 1232 1233 ret |= VM_FAULT_WRITE; 1234 put_page(page); 1235 1236 out: 1237 return ret; 1238 1239 out_free_pages: 1240 spin_unlock(vmf->ptl); 1241 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1242 for (i = 0; i < HPAGE_PMD_NR; i++) { 1243 memcg = (void *)page_private(pages[i]); 1244 set_page_private(pages[i], 0); 1245 mem_cgroup_cancel_charge(pages[i], memcg, false); 1246 put_page(pages[i]); 1247 } 1248 kfree(pages); 1249 goto out; 1250 } 1251 1252 int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 1253 { 1254 struct vm_area_struct *vma = vmf->vma; 1255 struct page *page = NULL, *new_page; 1256 struct mem_cgroup *memcg; 1257 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1258 unsigned long mmun_start; /* For mmu_notifiers */ 1259 unsigned long mmun_end; /* For mmu_notifiers */ 1260 gfp_t huge_gfp; /* for allocation and charge */ 1261 int ret = 0; 1262 1263 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 1264 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1265 if (is_huge_zero_pmd(orig_pmd)) 1266 goto alloc; 1267 spin_lock(vmf->ptl); 1268 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1269 goto out_unlock; 1270 1271 page = pmd_page(orig_pmd); 1272 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1273 /* 1274 * We can only reuse the page if nobody else maps the huge page or it's 1275 * part. 1276 */ 1277 if (!trylock_page(page)) { 1278 get_page(page); 1279 spin_unlock(vmf->ptl); 1280 lock_page(page); 1281 spin_lock(vmf->ptl); 1282 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1283 unlock_page(page); 1284 put_page(page); 1285 goto out_unlock; 1286 } 1287 put_page(page); 1288 } 1289 if (reuse_swap_page(page, NULL)) { 1290 pmd_t entry; 1291 entry = pmd_mkyoung(orig_pmd); 1292 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1293 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 1294 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1295 ret |= VM_FAULT_WRITE; 1296 unlock_page(page); 1297 goto out_unlock; 1298 } 1299 unlock_page(page); 1300 get_page(page); 1301 spin_unlock(vmf->ptl); 1302 alloc: 1303 if (transparent_hugepage_enabled(vma) && 1304 !transparent_hugepage_debug_cow()) { 1305 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1306 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1307 } else 1308 new_page = NULL; 1309 1310 if (likely(new_page)) { 1311 prep_transhuge_page(new_page); 1312 } else { 1313 if (!page) { 1314 split_huge_pmd(vma, vmf->pmd, vmf->address); 1315 ret |= VM_FAULT_FALLBACK; 1316 } else { 1317 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); 1318 if (ret & VM_FAULT_OOM) { 1319 split_huge_pmd(vma, vmf->pmd, vmf->address); 1320 ret |= VM_FAULT_FALLBACK; 1321 } 1322 put_page(page); 1323 } 1324 count_vm_event(THP_FAULT_FALLBACK); 1325 goto out; 1326 } 1327 1328 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 1329 huge_gfp, &memcg, true))) { 1330 put_page(new_page); 1331 split_huge_pmd(vma, vmf->pmd, vmf->address); 1332 if (page) 1333 put_page(page); 1334 ret |= VM_FAULT_FALLBACK; 1335 count_vm_event(THP_FAULT_FALLBACK); 1336 goto out; 1337 } 1338 1339 count_vm_event(THP_FAULT_ALLOC); 1340 1341 if (!page) 1342 clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); 1343 else 1344 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1345 __SetPageUptodate(new_page); 1346 1347 mmun_start = haddr; 1348 mmun_end = haddr + HPAGE_PMD_SIZE; 1349 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 1350 1351 spin_lock(vmf->ptl); 1352 if (page) 1353 put_page(page); 1354 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1355 spin_unlock(vmf->ptl); 1356 mem_cgroup_cancel_charge(new_page, memcg, true); 1357 put_page(new_page); 1358 goto out_mn; 1359 } else { 1360 pmd_t entry; 1361 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1362 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1363 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1364 page_add_new_anon_rmap(new_page, vma, haddr, true); 1365 mem_cgroup_commit_charge(new_page, memcg, false, true); 1366 lru_cache_add_active_or_unevictable(new_page, vma); 1367 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 1368 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1369 if (!page) { 1370 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1371 } else { 1372 VM_BUG_ON_PAGE(!PageHead(page), page); 1373 page_remove_rmap(page, true); 1374 put_page(page); 1375 } 1376 ret |= VM_FAULT_WRITE; 1377 } 1378 spin_unlock(vmf->ptl); 1379 out_mn: 1380 /* 1381 * No need to double call mmu_notifier->invalidate_range() callback as 1382 * the above pmdp_huge_clear_flush_notify() did already call it. 1383 */ 1384 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, 1385 mmun_end); 1386 out: 1387 return ret; 1388 out_unlock: 1389 spin_unlock(vmf->ptl); 1390 return ret; 1391 } 1392 1393 /* 1394 * FOLL_FORCE can write to even unwritable pmd's, but only 1395 * after we've gone through a COW cycle and they are dirty. 1396 */ 1397 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1398 { 1399 return pmd_write(pmd) || 1400 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1401 } 1402 1403 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1404 unsigned long addr, 1405 pmd_t *pmd, 1406 unsigned int flags) 1407 { 1408 struct mm_struct *mm = vma->vm_mm; 1409 struct page *page = NULL; 1410 1411 assert_spin_locked(pmd_lockptr(mm, pmd)); 1412 1413 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 1414 goto out; 1415 1416 /* Avoid dumping huge zero page */ 1417 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1418 return ERR_PTR(-EFAULT); 1419 1420 /* Full NUMA hinting faults to serialise migration in fault paths */ 1421 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 1422 goto out; 1423 1424 page = pmd_page(*pmd); 1425 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 1426 if (flags & FOLL_TOUCH) 1427 touch_pmd(vma, addr, pmd); 1428 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1429 /* 1430 * We don't mlock() pte-mapped THPs. This way we can avoid 1431 * leaking mlocked pages into non-VM_LOCKED VMAs. 1432 * 1433 * For anon THP: 1434 * 1435 * In most cases the pmd is the only mapping of the page as we 1436 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1437 * writable private mappings in populate_vma_page_range(). 1438 * 1439 * The only scenario when we have the page shared here is if we 1440 * mlocking read-only mapping shared over fork(). We skip 1441 * mlocking such pages. 1442 * 1443 * For file THP: 1444 * 1445 * We can expect PageDoubleMap() to be stable under page lock: 1446 * for file pages we set it in page_add_file_rmap(), which 1447 * requires page to be locked. 1448 */ 1449 1450 if (PageAnon(page) && compound_mapcount(page) != 1) 1451 goto skip_mlock; 1452 if (PageDoubleMap(page) || !page->mapping) 1453 goto skip_mlock; 1454 if (!trylock_page(page)) 1455 goto skip_mlock; 1456 lru_add_drain(); 1457 if (page->mapping && !PageDoubleMap(page)) 1458 mlock_vma_page(page); 1459 unlock_page(page); 1460 } 1461 skip_mlock: 1462 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1463 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 1464 if (flags & FOLL_GET) 1465 get_page(page); 1466 1467 out: 1468 return page; 1469 } 1470 1471 /* NUMA hinting page fault entry point for trans huge pmds */ 1472 int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1473 { 1474 struct vm_area_struct *vma = vmf->vma; 1475 struct anon_vma *anon_vma = NULL; 1476 struct page *page; 1477 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1478 int page_nid = -1, this_nid = numa_node_id(); 1479 int target_nid, last_cpupid = -1; 1480 bool page_locked; 1481 bool migrated = false; 1482 bool was_writable; 1483 int flags = 0; 1484 1485 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1486 if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1487 goto out_unlock; 1488 1489 /* 1490 * If there are potential migrations, wait for completion and retry 1491 * without disrupting NUMA hinting information. Do not relock and 1492 * check_same as the page may no longer be mapped. 1493 */ 1494 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1495 page = pmd_page(*vmf->pmd); 1496 if (!get_page_unless_zero(page)) 1497 goto out_unlock; 1498 spin_unlock(vmf->ptl); 1499 wait_on_page_locked(page); 1500 put_page(page); 1501 goto out; 1502 } 1503 1504 page = pmd_page(pmd); 1505 BUG_ON(is_huge_zero_page(page)); 1506 page_nid = page_to_nid(page); 1507 last_cpupid = page_cpupid_last(page); 1508 count_vm_numa_event(NUMA_HINT_FAULTS); 1509 if (page_nid == this_nid) { 1510 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1511 flags |= TNF_FAULT_LOCAL; 1512 } 1513 1514 /* See similar comment in do_numa_page for explanation */ 1515 if (!pmd_savedwrite(pmd)) 1516 flags |= TNF_NO_GROUP; 1517 1518 /* 1519 * Acquire the page lock to serialise THP migrations but avoid dropping 1520 * page_table_lock if at all possible 1521 */ 1522 page_locked = trylock_page(page); 1523 target_nid = mpol_misplaced(page, vma, haddr); 1524 if (target_nid == -1) { 1525 /* If the page was locked, there are no parallel migrations */ 1526 if (page_locked) 1527 goto clear_pmdnuma; 1528 } 1529 1530 /* Migration could have started since the pmd_trans_migrating check */ 1531 if (!page_locked) { 1532 page_nid = -1; 1533 if (!get_page_unless_zero(page)) 1534 goto out_unlock; 1535 spin_unlock(vmf->ptl); 1536 wait_on_page_locked(page); 1537 put_page(page); 1538 goto out; 1539 } 1540 1541 /* 1542 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 1543 * to serialises splits 1544 */ 1545 get_page(page); 1546 spin_unlock(vmf->ptl); 1547 anon_vma = page_lock_anon_vma_read(page); 1548 1549 /* Confirm the PMD did not change while page_table_lock was released */ 1550 spin_lock(vmf->ptl); 1551 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1552 unlock_page(page); 1553 put_page(page); 1554 page_nid = -1; 1555 goto out_unlock; 1556 } 1557 1558 /* Bail if we fail to protect against THP splits for any reason */ 1559 if (unlikely(!anon_vma)) { 1560 put_page(page); 1561 page_nid = -1; 1562 goto clear_pmdnuma; 1563 } 1564 1565 /* 1566 * Since we took the NUMA fault, we must have observed the !accessible 1567 * bit. Make sure all other CPUs agree with that, to avoid them 1568 * modifying the page we're about to migrate. 1569 * 1570 * Must be done under PTL such that we'll observe the relevant 1571 * inc_tlb_flush_pending(). 1572 * 1573 * We are not sure a pending tlb flush here is for a huge page 1574 * mapping or not. Hence use the tlb range variant 1575 */ 1576 if (mm_tlb_flush_pending(vma->vm_mm)) 1577 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1578 1579 /* 1580 * Migrate the THP to the requested node, returns with page unlocked 1581 * and access rights restored. 1582 */ 1583 spin_unlock(vmf->ptl); 1584 1585 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 1586 vmf->pmd, pmd, vmf->address, page, target_nid); 1587 if (migrated) { 1588 flags |= TNF_MIGRATED; 1589 page_nid = target_nid; 1590 } else 1591 flags |= TNF_MIGRATE_FAIL; 1592 1593 goto out; 1594 clear_pmdnuma: 1595 BUG_ON(!PageLocked(page)); 1596 was_writable = pmd_savedwrite(pmd); 1597 pmd = pmd_modify(pmd, vma->vm_page_prot); 1598 pmd = pmd_mkyoung(pmd); 1599 if (was_writable) 1600 pmd = pmd_mkwrite(pmd); 1601 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1602 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1603 unlock_page(page); 1604 out_unlock: 1605 spin_unlock(vmf->ptl); 1606 1607 out: 1608 if (anon_vma) 1609 page_unlock_anon_vma_read(anon_vma); 1610 1611 if (page_nid != -1) 1612 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 1613 flags); 1614 1615 return 0; 1616 } 1617 1618 /* 1619 * Return true if we do MADV_FREE successfully on entire pmd page. 1620 * Otherwise, return false. 1621 */ 1622 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1623 pmd_t *pmd, unsigned long addr, unsigned long next) 1624 { 1625 spinlock_t *ptl; 1626 pmd_t orig_pmd; 1627 struct page *page; 1628 struct mm_struct *mm = tlb->mm; 1629 bool ret = false; 1630 1631 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1632 1633 ptl = pmd_trans_huge_lock(pmd, vma); 1634 if (!ptl) 1635 goto out_unlocked; 1636 1637 orig_pmd = *pmd; 1638 if (is_huge_zero_pmd(orig_pmd)) 1639 goto out; 1640 1641 if (unlikely(!pmd_present(orig_pmd))) { 1642 VM_BUG_ON(thp_migration_supported() && 1643 !is_pmd_migration_entry(orig_pmd)); 1644 goto out; 1645 } 1646 1647 page = pmd_page(orig_pmd); 1648 /* 1649 * If other processes are mapping this page, we couldn't discard 1650 * the page unless they all do MADV_FREE so let's skip the page. 1651 */ 1652 if (page_mapcount(page) != 1) 1653 goto out; 1654 1655 if (!trylock_page(page)) 1656 goto out; 1657 1658 /* 1659 * If user want to discard part-pages of THP, split it so MADV_FREE 1660 * will deactivate only them. 1661 */ 1662 if (next - addr != HPAGE_PMD_SIZE) { 1663 get_page(page); 1664 spin_unlock(ptl); 1665 split_huge_page(page); 1666 unlock_page(page); 1667 put_page(page); 1668 goto out_unlocked; 1669 } 1670 1671 if (PageDirty(page)) 1672 ClearPageDirty(page); 1673 unlock_page(page); 1674 1675 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1676 pmdp_invalidate(vma, addr, pmd); 1677 orig_pmd = pmd_mkold(orig_pmd); 1678 orig_pmd = pmd_mkclean(orig_pmd); 1679 1680 set_pmd_at(mm, addr, pmd, orig_pmd); 1681 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1682 } 1683 1684 mark_page_lazyfree(page); 1685 ret = true; 1686 out: 1687 spin_unlock(ptl); 1688 out_unlocked: 1689 return ret; 1690 } 1691 1692 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1693 { 1694 pgtable_t pgtable; 1695 1696 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1697 pte_free(mm, pgtable); 1698 mm_dec_nr_ptes(mm); 1699 } 1700 1701 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1702 pmd_t *pmd, unsigned long addr) 1703 { 1704 pmd_t orig_pmd; 1705 spinlock_t *ptl; 1706 1707 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); 1708 1709 ptl = __pmd_trans_huge_lock(pmd, vma); 1710 if (!ptl) 1711 return 0; 1712 /* 1713 * For architectures like ppc64 we look at deposited pgtable 1714 * when calling pmdp_huge_get_and_clear. So do the 1715 * pgtable_trans_huge_withdraw after finishing pmdp related 1716 * operations. 1717 */ 1718 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1719 tlb->fullmm); 1720 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1721 if (vma_is_dax(vma)) { 1722 if (arch_needs_pgtable_deposit()) 1723 zap_deposited_table(tlb->mm, pmd); 1724 spin_unlock(ptl); 1725 if (is_huge_zero_pmd(orig_pmd)) 1726 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1727 } else if (is_huge_zero_pmd(orig_pmd)) { 1728 zap_deposited_table(tlb->mm, pmd); 1729 spin_unlock(ptl); 1730 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1731 } else { 1732 struct page *page = NULL; 1733 int flush_needed = 1; 1734 1735 if (pmd_present(orig_pmd)) { 1736 page = pmd_page(orig_pmd); 1737 page_remove_rmap(page, true); 1738 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1739 VM_BUG_ON_PAGE(!PageHead(page), page); 1740 } else if (thp_migration_supported()) { 1741 swp_entry_t entry; 1742 1743 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1744 entry = pmd_to_swp_entry(orig_pmd); 1745 page = pfn_to_page(swp_offset(entry)); 1746 flush_needed = 0; 1747 } else 1748 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1749 1750 if (PageAnon(page)) { 1751 zap_deposited_table(tlb->mm, pmd); 1752 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1753 } else { 1754 if (arch_needs_pgtable_deposit()) 1755 zap_deposited_table(tlb->mm, pmd); 1756 add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1757 } 1758 1759 spin_unlock(ptl); 1760 if (flush_needed) 1761 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1762 } 1763 return 1; 1764 } 1765 1766 #ifndef pmd_move_must_withdraw 1767 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 1768 spinlock_t *old_pmd_ptl, 1769 struct vm_area_struct *vma) 1770 { 1771 /* 1772 * With split pmd lock we also need to move preallocated 1773 * PTE page table if new_pmd is on different PMD page table. 1774 * 1775 * We also don't deposit and withdraw tables for file pages. 1776 */ 1777 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 1778 } 1779 #endif 1780 1781 static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1782 { 1783 #ifdef CONFIG_MEM_SOFT_DIRTY 1784 if (unlikely(is_pmd_migration_entry(pmd))) 1785 pmd = pmd_swp_mksoft_dirty(pmd); 1786 else if (pmd_present(pmd)) 1787 pmd = pmd_mksoft_dirty(pmd); 1788 #endif 1789 return pmd; 1790 } 1791 1792 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1793 unsigned long new_addr, unsigned long old_end, 1794 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) 1795 { 1796 spinlock_t *old_ptl, *new_ptl; 1797 pmd_t pmd; 1798 struct mm_struct *mm = vma->vm_mm; 1799 bool force_flush = false; 1800 1801 if ((old_addr & ~HPAGE_PMD_MASK) || 1802 (new_addr & ~HPAGE_PMD_MASK) || 1803 old_end - old_addr < HPAGE_PMD_SIZE) 1804 return false; 1805 1806 /* 1807 * The destination pmd shouldn't be established, free_pgtables() 1808 * should have release it. 1809 */ 1810 if (WARN_ON(!pmd_none(*new_pmd))) { 1811 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1812 return false; 1813 } 1814 1815 /* 1816 * We don't have to worry about the ordering of src and dst 1817 * ptlocks because exclusive mmap_sem prevents deadlock. 1818 */ 1819 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1820 if (old_ptl) { 1821 new_ptl = pmd_lockptr(mm, new_pmd); 1822 if (new_ptl != old_ptl) 1823 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1824 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1825 if (pmd_present(pmd) && pmd_dirty(pmd)) 1826 force_flush = true; 1827 VM_BUG_ON(!pmd_none(*new_pmd)); 1828 1829 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1830 pgtable_t pgtable; 1831 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1832 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1833 } 1834 pmd = move_soft_dirty_pmd(pmd); 1835 set_pmd_at(mm, new_addr, new_pmd, pmd); 1836 if (new_ptl != old_ptl) 1837 spin_unlock(new_ptl); 1838 if (force_flush) 1839 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1840 else 1841 *need_flush = true; 1842 spin_unlock(old_ptl); 1843 return true; 1844 } 1845 return false; 1846 } 1847 1848 /* 1849 * Returns 1850 * - 0 if PMD could not be locked 1851 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1852 * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1853 */ 1854 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1855 unsigned long addr, pgprot_t newprot, int prot_numa) 1856 { 1857 struct mm_struct *mm = vma->vm_mm; 1858 spinlock_t *ptl; 1859 pmd_t entry; 1860 bool preserve_write; 1861 int ret; 1862 1863 ptl = __pmd_trans_huge_lock(pmd, vma); 1864 if (!ptl) 1865 return 0; 1866 1867 preserve_write = prot_numa && pmd_write(*pmd); 1868 ret = 1; 1869 1870 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1871 if (is_swap_pmd(*pmd)) { 1872 swp_entry_t entry = pmd_to_swp_entry(*pmd); 1873 1874 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 1875 if (is_write_migration_entry(entry)) { 1876 pmd_t newpmd; 1877 /* 1878 * A protection check is difficult so 1879 * just be safe and disable write 1880 */ 1881 make_migration_entry_read(&entry); 1882 newpmd = swp_entry_to_pmd(entry); 1883 if (pmd_swp_soft_dirty(*pmd)) 1884 newpmd = pmd_swp_mksoft_dirty(newpmd); 1885 set_pmd_at(mm, addr, pmd, newpmd); 1886 } 1887 goto unlock; 1888 } 1889 #endif 1890 1891 /* 1892 * Avoid trapping faults against the zero page. The read-only 1893 * data is likely to be read-cached on the local CPU and 1894 * local/remote hits to the zero page are not interesting. 1895 */ 1896 if (prot_numa && is_huge_zero_pmd(*pmd)) 1897 goto unlock; 1898 1899 if (prot_numa && pmd_protnone(*pmd)) 1900 goto unlock; 1901 1902 /* 1903 * In case prot_numa, we are under down_read(mmap_sem). It's critical 1904 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 1905 * which is also under down_read(mmap_sem): 1906 * 1907 * CPU0: CPU1: 1908 * change_huge_pmd(prot_numa=1) 1909 * pmdp_huge_get_and_clear_notify() 1910 * madvise_dontneed() 1911 * zap_pmd_range() 1912 * pmd_trans_huge(*pmd) == 0 (without ptl) 1913 * // skip the pmd 1914 * set_pmd_at(); 1915 * // pmd is re-established 1916 * 1917 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1918 * which may break userspace. 1919 * 1920 * pmdp_invalidate() is required to make sure we don't miss 1921 * dirty/young flags set by hardware. 1922 */ 1923 entry = *pmd; 1924 pmdp_invalidate(vma, addr, pmd); 1925 1926 /* 1927 * Recover dirty/young flags. It relies on pmdp_invalidate to not 1928 * corrupt them. 1929 */ 1930 if (pmd_dirty(*pmd)) 1931 entry = pmd_mkdirty(entry); 1932 if (pmd_young(*pmd)) 1933 entry = pmd_mkyoung(entry); 1934 1935 entry = pmd_modify(entry, newprot); 1936 if (preserve_write) 1937 entry = pmd_mk_savedwrite(entry); 1938 ret = HPAGE_PMD_NR; 1939 set_pmd_at(mm, addr, pmd, entry); 1940 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 1941 unlock: 1942 spin_unlock(ptl); 1943 return ret; 1944 } 1945 1946 /* 1947 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 1948 * 1949 * Note that if it returns page table lock pointer, this routine returns without 1950 * unlocking page table lock. So callers must unlock it. 1951 */ 1952 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1953 { 1954 spinlock_t *ptl; 1955 ptl = pmd_lock(vma->vm_mm, pmd); 1956 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 1957 pmd_devmap(*pmd))) 1958 return ptl; 1959 spin_unlock(ptl); 1960 return NULL; 1961 } 1962 1963 /* 1964 * Returns true if a given pud maps a thp, false otherwise. 1965 * 1966 * Note that if it returns true, this routine returns without unlocking page 1967 * table lock. So callers must unlock it. 1968 */ 1969 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 1970 { 1971 spinlock_t *ptl; 1972 1973 ptl = pud_lock(vma->vm_mm, pud); 1974 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 1975 return ptl; 1976 spin_unlock(ptl); 1977 return NULL; 1978 } 1979 1980 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1981 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 1982 pud_t *pud, unsigned long addr) 1983 { 1984 pud_t orig_pud; 1985 spinlock_t *ptl; 1986 1987 ptl = __pud_trans_huge_lock(pud, vma); 1988 if (!ptl) 1989 return 0; 1990 /* 1991 * For architectures like ppc64 we look at deposited pgtable 1992 * when calling pudp_huge_get_and_clear. So do the 1993 * pgtable_trans_huge_withdraw after finishing pudp related 1994 * operations. 1995 */ 1996 orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, 1997 tlb->fullmm); 1998 tlb_remove_pud_tlb_entry(tlb, pud, addr); 1999 if (vma_is_dax(vma)) { 2000 spin_unlock(ptl); 2001 /* No zero page support yet */ 2002 } else { 2003 /* No support for anonymous PUD pages yet */ 2004 BUG(); 2005 } 2006 return 1; 2007 } 2008 2009 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2010 unsigned long haddr) 2011 { 2012 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2013 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2014 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2015 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2016 2017 count_vm_event(THP_SPLIT_PUD); 2018 2019 pudp_huge_clear_flush_notify(vma, haddr, pud); 2020 } 2021 2022 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2023 unsigned long address) 2024 { 2025 spinlock_t *ptl; 2026 struct mm_struct *mm = vma->vm_mm; 2027 unsigned long haddr = address & HPAGE_PUD_MASK; 2028 2029 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); 2030 ptl = pud_lock(mm, pud); 2031 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2032 goto out; 2033 __split_huge_pud_locked(vma, pud, haddr); 2034 2035 out: 2036 spin_unlock(ptl); 2037 /* 2038 * No need to double call mmu_notifier->invalidate_range() callback as 2039 * the above pudp_huge_clear_flush_notify() did already call it. 2040 */ 2041 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + 2042 HPAGE_PUD_SIZE); 2043 } 2044 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2045 2046 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2047 unsigned long haddr, pmd_t *pmd) 2048 { 2049 struct mm_struct *mm = vma->vm_mm; 2050 pgtable_t pgtable; 2051 pmd_t _pmd; 2052 int i; 2053 2054 /* 2055 * Leave pmd empty until pte is filled note that it is fine to delay 2056 * notification until mmu_notifier_invalidate_range_end() as we are 2057 * replacing a zero pmd write protected page with a zero pte write 2058 * protected page. 2059 * 2060 * See Documentation/vm/mmu_notifier.txt 2061 */ 2062 pmdp_huge_clear_flush(vma, haddr, pmd); 2063 2064 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2065 pmd_populate(mm, &_pmd, pgtable); 2066 2067 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2068 pte_t *pte, entry; 2069 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2070 entry = pte_mkspecial(entry); 2071 pte = pte_offset_map(&_pmd, haddr); 2072 VM_BUG_ON(!pte_none(*pte)); 2073 set_pte_at(mm, haddr, pte, entry); 2074 pte_unmap(pte); 2075 } 2076 smp_wmb(); /* make pte visible before pmd */ 2077 pmd_populate(mm, pmd, pgtable); 2078 } 2079 2080 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2081 unsigned long haddr, bool freeze) 2082 { 2083 struct mm_struct *mm = vma->vm_mm; 2084 struct page *page; 2085 pgtable_t pgtable; 2086 pmd_t _pmd; 2087 bool young, write, dirty, soft_dirty, pmd_migration = false; 2088 unsigned long addr; 2089 int i; 2090 2091 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2092 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2093 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 2094 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 2095 && !pmd_devmap(*pmd)); 2096 2097 count_vm_event(THP_SPLIT_PMD); 2098 2099 if (!vma_is_anonymous(vma)) { 2100 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2101 /* 2102 * We are going to unmap this huge page. So 2103 * just go ahead and zap it 2104 */ 2105 if (arch_needs_pgtable_deposit()) 2106 zap_deposited_table(mm, pmd); 2107 if (vma_is_dax(vma)) 2108 return; 2109 page = pmd_page(_pmd); 2110 if (!PageReferenced(page) && pmd_young(_pmd)) 2111 SetPageReferenced(page); 2112 page_remove_rmap(page, true); 2113 put_page(page); 2114 add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); 2115 return; 2116 } else if (is_huge_zero_pmd(*pmd)) { 2117 /* 2118 * FIXME: Do we want to invalidate secondary mmu by calling 2119 * mmu_notifier_invalidate_range() see comments below inside 2120 * __split_huge_pmd() ? 2121 * 2122 * We are going from a zero huge page write protected to zero 2123 * small page also write protected so it does not seems useful 2124 * to invalidate secondary mmu at this time. 2125 */ 2126 return __split_huge_zero_page_pmd(vma, haddr, pmd); 2127 } 2128 2129 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2130 pmd_migration = is_pmd_migration_entry(*pmd); 2131 if (pmd_migration) { 2132 swp_entry_t entry; 2133 2134 entry = pmd_to_swp_entry(*pmd); 2135 page = pfn_to_page(swp_offset(entry)); 2136 } else 2137 #endif 2138 page = pmd_page(*pmd); 2139 VM_BUG_ON_PAGE(!page_count(page), page); 2140 page_ref_add(page, HPAGE_PMD_NR - 1); 2141 write = pmd_write(*pmd); 2142 young = pmd_young(*pmd); 2143 dirty = pmd_dirty(*pmd); 2144 soft_dirty = pmd_soft_dirty(*pmd); 2145 2146 pmdp_huge_split_prepare(vma, haddr, pmd); 2147 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2148 pmd_populate(mm, &_pmd, pgtable); 2149 2150 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2151 pte_t entry, *pte; 2152 /* 2153 * Note that NUMA hinting access restrictions are not 2154 * transferred to avoid any possibility of altering 2155 * permissions across VMAs. 2156 */ 2157 if (freeze || pmd_migration) { 2158 swp_entry_t swp_entry; 2159 swp_entry = make_migration_entry(page + i, write); 2160 entry = swp_entry_to_pte(swp_entry); 2161 if (soft_dirty) 2162 entry = pte_swp_mksoft_dirty(entry); 2163 } else { 2164 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2165 entry = maybe_mkwrite(entry, vma); 2166 if (!write) 2167 entry = pte_wrprotect(entry); 2168 if (!young) 2169 entry = pte_mkold(entry); 2170 if (soft_dirty) 2171 entry = pte_mksoft_dirty(entry); 2172 } 2173 if (dirty) 2174 SetPageDirty(page + i); 2175 pte = pte_offset_map(&_pmd, addr); 2176 BUG_ON(!pte_none(*pte)); 2177 set_pte_at(mm, addr, pte, entry); 2178 atomic_inc(&page[i]._mapcount); 2179 pte_unmap(pte); 2180 } 2181 2182 /* 2183 * Set PG_double_map before dropping compound_mapcount to avoid 2184 * false-negative page_mapped(). 2185 */ 2186 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 2187 for (i = 0; i < HPAGE_PMD_NR; i++) 2188 atomic_inc(&page[i]._mapcount); 2189 } 2190 2191 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2192 /* Last compound_mapcount is gone. */ 2193 __dec_node_page_state(page, NR_ANON_THPS); 2194 if (TestClearPageDoubleMap(page)) { 2195 /* No need in mapcount reference anymore */ 2196 for (i = 0; i < HPAGE_PMD_NR; i++) 2197 atomic_dec(&page[i]._mapcount); 2198 } 2199 } 2200 2201 smp_wmb(); /* make pte visible before pmd */ 2202 /* 2203 * Up to this point the pmd is present and huge and userland has the 2204 * whole access to the hugepage during the split (which happens in 2205 * place). If we overwrite the pmd with the not-huge version pointing 2206 * to the pte here (which of course we could if all CPUs were bug 2207 * free), userland could trigger a small page size TLB miss on the 2208 * small sized TLB while the hugepage TLB entry is still established in 2209 * the huge TLB. Some CPU doesn't like that. 2210 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 2211 * 383 on page 93. Intel should be safe but is also warns that it's 2212 * only safe if the permission and cache attributes of the two entries 2213 * loaded in the two TLB is identical (which should be the case here). 2214 * But it is generally safer to never allow small and huge TLB entries 2215 * for the same virtual address to be loaded simultaneously. So instead 2216 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2217 * current pmd notpresent (atomically because here the pmd_trans_huge 2218 * and pmd_trans_splitting must remain set at all times on the pmd 2219 * until the split is complete for this pmd), then we flush the SMP TLB 2220 * and finally we write the non-huge version of the pmd entry with 2221 * pmd_populate. 2222 */ 2223 pmdp_invalidate(vma, haddr, pmd); 2224 pmd_populate(mm, pmd, pgtable); 2225 2226 if (freeze) { 2227 for (i = 0; i < HPAGE_PMD_NR; i++) { 2228 page_remove_rmap(page + i, false); 2229 put_page(page + i); 2230 } 2231 } 2232 } 2233 2234 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2235 unsigned long address, bool freeze, struct page *page) 2236 { 2237 spinlock_t *ptl; 2238 struct mm_struct *mm = vma->vm_mm; 2239 unsigned long haddr = address & HPAGE_PMD_MASK; 2240 2241 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); 2242 ptl = pmd_lock(mm, pmd); 2243 2244 /* 2245 * If caller asks to setup a migration entries, we need a page to check 2246 * pmd against. Otherwise we can end up replacing wrong page. 2247 */ 2248 VM_BUG_ON(freeze && !page); 2249 if (page && page != pmd_page(*pmd)) 2250 goto out; 2251 2252 if (pmd_trans_huge(*pmd)) { 2253 page = pmd_page(*pmd); 2254 if (PageMlocked(page)) 2255 clear_page_mlock(page); 2256 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) 2257 goto out; 2258 __split_huge_pmd_locked(vma, pmd, haddr, freeze); 2259 out: 2260 spin_unlock(ptl); 2261 /* 2262 * No need to double call mmu_notifier->invalidate_range() callback. 2263 * They are 3 cases to consider inside __split_huge_pmd_locked(): 2264 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 2265 * 2) __split_huge_zero_page_pmd() read only zero page and any write 2266 * fault will trigger a flush_notify before pointing to a new page 2267 * (it is fine if the secondary mmu keeps pointing to the old zero 2268 * page in the meantime) 2269 * 3) Split a huge pmd into pte pointing to the same page. No need 2270 * to invalidate secondary tlb entry they are all still valid. 2271 * any further changes to individual pte will notify. So no need 2272 * to call mmu_notifier->invalidate_range() 2273 */ 2274 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + 2275 HPAGE_PMD_SIZE); 2276 } 2277 2278 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2279 bool freeze, struct page *page) 2280 { 2281 pgd_t *pgd; 2282 p4d_t *p4d; 2283 pud_t *pud; 2284 pmd_t *pmd; 2285 2286 pgd = pgd_offset(vma->vm_mm, address); 2287 if (!pgd_present(*pgd)) 2288 return; 2289 2290 p4d = p4d_offset(pgd, address); 2291 if (!p4d_present(*p4d)) 2292 return; 2293 2294 pud = pud_offset(p4d, address); 2295 if (!pud_present(*pud)) 2296 return; 2297 2298 pmd = pmd_offset(pud, address); 2299 2300 __split_huge_pmd(vma, pmd, address, freeze, page); 2301 } 2302 2303 void vma_adjust_trans_huge(struct vm_area_struct *vma, 2304 unsigned long start, 2305 unsigned long end, 2306 long adjust_next) 2307 { 2308 /* 2309 * If the new start address isn't hpage aligned and it could 2310 * previously contain an hugepage: check if we need to split 2311 * an huge pmd. 2312 */ 2313 if (start & ~HPAGE_PMD_MASK && 2314 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2315 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2316 split_huge_pmd_address(vma, start, false, NULL); 2317 2318 /* 2319 * If the new end address isn't hpage aligned and it could 2320 * previously contain an hugepage: check if we need to split 2321 * an huge pmd. 2322 */ 2323 if (end & ~HPAGE_PMD_MASK && 2324 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2325 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2326 split_huge_pmd_address(vma, end, false, NULL); 2327 2328 /* 2329 * If we're also updating the vma->vm_next->vm_start, if the new 2330 * vm_next->vm_start isn't page aligned and it could previously 2331 * contain an hugepage: check if we need to split an huge pmd. 2332 */ 2333 if (adjust_next > 0) { 2334 struct vm_area_struct *next = vma->vm_next; 2335 unsigned long nstart = next->vm_start; 2336 nstart += adjust_next << PAGE_SHIFT; 2337 if (nstart & ~HPAGE_PMD_MASK && 2338 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2339 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2340 split_huge_pmd_address(next, nstart, false, NULL); 2341 } 2342 } 2343 2344 static void freeze_page(struct page *page) 2345 { 2346 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2347 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2348 bool unmap_success; 2349 2350 VM_BUG_ON_PAGE(!PageHead(page), page); 2351 2352 if (PageAnon(page)) 2353 ttu_flags |= TTU_SPLIT_FREEZE; 2354 2355 unmap_success = try_to_unmap(page, ttu_flags); 2356 VM_BUG_ON_PAGE(!unmap_success, page); 2357 } 2358 2359 static void unfreeze_page(struct page *page) 2360 { 2361 int i; 2362 if (PageTransHuge(page)) { 2363 remove_migration_ptes(page, page, true); 2364 } else { 2365 for (i = 0; i < HPAGE_PMD_NR; i++) 2366 remove_migration_ptes(page + i, page + i, true); 2367 } 2368 } 2369 2370 static void __split_huge_page_tail(struct page *head, int tail, 2371 struct lruvec *lruvec, struct list_head *list) 2372 { 2373 struct page *page_tail = head + tail; 2374 2375 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2376 VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail); 2377 2378 /* 2379 * tail_page->_refcount is zero and not changing from under us. But 2380 * get_page_unless_zero() may be running from under us on the 2381 * tail_page. If we used atomic_set() below instead of atomic_inc() or 2382 * atomic_add(), we would then run atomic_set() concurrently with 2383 * get_page_unless_zero(), and atomic_set() is implemented in C not 2384 * using locked ops. spin_unlock on x86 sometime uses locked ops 2385 * because of PPro errata 66, 92, so unless somebody can guarantee 2386 * atomic_set() here would be safe on all archs (and not only on x86), 2387 * it's safer to use atomic_inc()/atomic_add(). 2388 */ 2389 if (PageAnon(head) && !PageSwapCache(head)) { 2390 page_ref_inc(page_tail); 2391 } else { 2392 /* Additional pin to radix tree */ 2393 page_ref_add(page_tail, 2); 2394 } 2395 2396 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2397 page_tail->flags |= (head->flags & 2398 ((1L << PG_referenced) | 2399 (1L << PG_swapbacked) | 2400 (1L << PG_swapcache) | 2401 (1L << PG_mlocked) | 2402 (1L << PG_uptodate) | 2403 (1L << PG_active) | 2404 (1L << PG_locked) | 2405 (1L << PG_unevictable) | 2406 (1L << PG_dirty))); 2407 2408 /* 2409 * After clearing PageTail the gup refcount can be released. 2410 * Page flags also must be visible before we make the page non-compound. 2411 */ 2412 smp_wmb(); 2413 2414 clear_compound_head(page_tail); 2415 2416 if (page_is_young(head)) 2417 set_page_young(page_tail); 2418 if (page_is_idle(head)) 2419 set_page_idle(page_tail); 2420 2421 /* ->mapping in first tail page is compound_mapcount */ 2422 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2423 page_tail); 2424 page_tail->mapping = head->mapping; 2425 2426 page_tail->index = head->index + tail; 2427 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 2428 lru_add_page_tail(head, page_tail, lruvec, list); 2429 } 2430 2431 static void __split_huge_page(struct page *page, struct list_head *list, 2432 unsigned long flags) 2433 { 2434 struct page *head = compound_head(page); 2435 struct zone *zone = page_zone(head); 2436 struct lruvec *lruvec; 2437 pgoff_t end = -1; 2438 int i; 2439 2440 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2441 2442 /* complete memcg works before add pages to LRU */ 2443 mem_cgroup_split_huge_fixup(head); 2444 2445 if (!PageAnon(page)) 2446 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); 2447 2448 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2449 __split_huge_page_tail(head, i, lruvec, list); 2450 /* Some pages can be beyond i_size: drop them from page cache */ 2451 if (head[i].index >= end) { 2452 __ClearPageDirty(head + i); 2453 __delete_from_page_cache(head + i, NULL); 2454 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2455 shmem_uncharge(head->mapping->host, 1); 2456 put_page(head + i); 2457 } 2458 } 2459 2460 ClearPageCompound(head); 2461 /* See comment in __split_huge_page_tail() */ 2462 if (PageAnon(head)) { 2463 /* Additional pin to radix tree of swap cache */ 2464 if (PageSwapCache(head)) 2465 page_ref_add(head, 2); 2466 else 2467 page_ref_inc(head); 2468 } else { 2469 /* Additional pin to radix tree */ 2470 page_ref_add(head, 2); 2471 spin_unlock(&head->mapping->tree_lock); 2472 } 2473 2474 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2475 2476 unfreeze_page(head); 2477 2478 for (i = 0; i < HPAGE_PMD_NR; i++) { 2479 struct page *subpage = head + i; 2480 if (subpage == page) 2481 continue; 2482 unlock_page(subpage); 2483 2484 /* 2485 * Subpages may be freed if there wasn't any mapping 2486 * like if add_to_swap() is running on a lru page that 2487 * had its mapping zapped. And freeing these pages 2488 * requires taking the lru_lock so we do the put_page 2489 * of the tail pages after the split is complete. 2490 */ 2491 put_page(subpage); 2492 } 2493 } 2494 2495 int total_mapcount(struct page *page) 2496 { 2497 int i, compound, ret; 2498 2499 VM_BUG_ON_PAGE(PageTail(page), page); 2500 2501 if (likely(!PageCompound(page))) 2502 return atomic_read(&page->_mapcount) + 1; 2503 2504 compound = compound_mapcount(page); 2505 if (PageHuge(page)) 2506 return compound; 2507 ret = compound; 2508 for (i = 0; i < HPAGE_PMD_NR; i++) 2509 ret += atomic_read(&page[i]._mapcount) + 1; 2510 /* File pages has compound_mapcount included in _mapcount */ 2511 if (!PageAnon(page)) 2512 return ret - compound * HPAGE_PMD_NR; 2513 if (PageDoubleMap(page)) 2514 ret -= HPAGE_PMD_NR; 2515 return ret; 2516 } 2517 2518 /* 2519 * This calculates accurately how many mappings a transparent hugepage 2520 * has (unlike page_mapcount() which isn't fully accurate). This full 2521 * accuracy is primarily needed to know if copy-on-write faults can 2522 * reuse the page and change the mapping to read-write instead of 2523 * copying them. At the same time this returns the total_mapcount too. 2524 * 2525 * The function returns the highest mapcount any one of the subpages 2526 * has. If the return value is one, even if different processes are 2527 * mapping different subpages of the transparent hugepage, they can 2528 * all reuse it, because each process is reusing a different subpage. 2529 * 2530 * The total_mapcount is instead counting all virtual mappings of the 2531 * subpages. If the total_mapcount is equal to "one", it tells the 2532 * caller all mappings belong to the same "mm" and in turn the 2533 * anon_vma of the transparent hugepage can become the vma->anon_vma 2534 * local one as no other process may be mapping any of the subpages. 2535 * 2536 * It would be more accurate to replace page_mapcount() with 2537 * page_trans_huge_mapcount(), however we only use 2538 * page_trans_huge_mapcount() in the copy-on-write faults where we 2539 * need full accuracy to avoid breaking page pinning, because 2540 * page_trans_huge_mapcount() is slower than page_mapcount(). 2541 */ 2542 int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 2543 { 2544 int i, ret, _total_mapcount, mapcount; 2545 2546 /* hugetlbfs shouldn't call it */ 2547 VM_BUG_ON_PAGE(PageHuge(page), page); 2548 2549 if (likely(!PageTransCompound(page))) { 2550 mapcount = atomic_read(&page->_mapcount) + 1; 2551 if (total_mapcount) 2552 *total_mapcount = mapcount; 2553 return mapcount; 2554 } 2555 2556 page = compound_head(page); 2557 2558 _total_mapcount = ret = 0; 2559 for (i = 0; i < HPAGE_PMD_NR; i++) { 2560 mapcount = atomic_read(&page[i]._mapcount) + 1; 2561 ret = max(ret, mapcount); 2562 _total_mapcount += mapcount; 2563 } 2564 if (PageDoubleMap(page)) { 2565 ret -= 1; 2566 _total_mapcount -= HPAGE_PMD_NR; 2567 } 2568 mapcount = compound_mapcount(page); 2569 ret += mapcount; 2570 _total_mapcount += mapcount; 2571 if (total_mapcount) 2572 *total_mapcount = _total_mapcount; 2573 return ret; 2574 } 2575 2576 /* Racy check whether the huge page can be split */ 2577 bool can_split_huge_page(struct page *page, int *pextra_pins) 2578 { 2579 int extra_pins; 2580 2581 /* Additional pins from radix tree */ 2582 if (PageAnon(page)) 2583 extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; 2584 else 2585 extra_pins = HPAGE_PMD_NR; 2586 if (pextra_pins) 2587 *pextra_pins = extra_pins; 2588 return total_mapcount(page) == page_count(page) - extra_pins - 1; 2589 } 2590 2591 /* 2592 * This function splits huge page into normal pages. @page can point to any 2593 * subpage of huge page to split. Split doesn't change the position of @page. 2594 * 2595 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2596 * The huge page must be locked. 2597 * 2598 * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2599 * 2600 * Both head page and tail pages will inherit mapping, flags, and so on from 2601 * the hugepage. 2602 * 2603 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2604 * they are not mapped. 2605 * 2606 * Returns 0 if the hugepage is split successfully. 2607 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2608 * us. 2609 */ 2610 int split_huge_page_to_list(struct page *page, struct list_head *list) 2611 { 2612 struct page *head = compound_head(page); 2613 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2614 struct anon_vma *anon_vma = NULL; 2615 struct address_space *mapping = NULL; 2616 int count, mapcount, extra_pins, ret; 2617 bool mlocked; 2618 unsigned long flags; 2619 2620 VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2621 VM_BUG_ON_PAGE(!PageLocked(page), page); 2622 VM_BUG_ON_PAGE(!PageCompound(page), page); 2623 2624 if (PageWriteback(page)) 2625 return -EBUSY; 2626 2627 if (PageAnon(head)) { 2628 /* 2629 * The caller does not necessarily hold an mmap_sem that would 2630 * prevent the anon_vma disappearing so we first we take a 2631 * reference to it and then lock the anon_vma for write. This 2632 * is similar to page_lock_anon_vma_read except the write lock 2633 * is taken to serialise against parallel split or collapse 2634 * operations. 2635 */ 2636 anon_vma = page_get_anon_vma(head); 2637 if (!anon_vma) { 2638 ret = -EBUSY; 2639 goto out; 2640 } 2641 mapping = NULL; 2642 anon_vma_lock_write(anon_vma); 2643 } else { 2644 mapping = head->mapping; 2645 2646 /* Truncated ? */ 2647 if (!mapping) { 2648 ret = -EBUSY; 2649 goto out; 2650 } 2651 2652 anon_vma = NULL; 2653 i_mmap_lock_read(mapping); 2654 } 2655 2656 /* 2657 * Racy check if we can split the page, before freeze_page() will 2658 * split PMDs 2659 */ 2660 if (!can_split_huge_page(head, &extra_pins)) { 2661 ret = -EBUSY; 2662 goto out_unlock; 2663 } 2664 2665 mlocked = PageMlocked(page); 2666 freeze_page(head); 2667 VM_BUG_ON_PAGE(compound_mapcount(head), head); 2668 2669 /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2670 if (mlocked) 2671 lru_add_drain(); 2672 2673 /* prevent PageLRU to go away from under us, and freeze lru stats */ 2674 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2675 2676 if (mapping) { 2677 void **pslot; 2678 2679 spin_lock(&mapping->tree_lock); 2680 pslot = radix_tree_lookup_slot(&mapping->page_tree, 2681 page_index(head)); 2682 /* 2683 * Check if the head page is present in radix tree. 2684 * We assume all tail are present too, if head is there. 2685 */ 2686 if (radix_tree_deref_slot_protected(pslot, 2687 &mapping->tree_lock) != head) 2688 goto fail; 2689 } 2690 2691 /* Prevent deferred_split_scan() touching ->_refcount */ 2692 spin_lock(&pgdata->split_queue_lock); 2693 count = page_count(head); 2694 mapcount = total_mapcount(head); 2695 if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 2696 if (!list_empty(page_deferred_list(head))) { 2697 pgdata->split_queue_len--; 2698 list_del(page_deferred_list(head)); 2699 } 2700 if (mapping) 2701 __dec_node_page_state(page, NR_SHMEM_THPS); 2702 spin_unlock(&pgdata->split_queue_lock); 2703 __split_huge_page(page, list, flags); 2704 if (PageSwapCache(head)) { 2705 swp_entry_t entry = { .val = page_private(head) }; 2706 2707 ret = split_swap_cluster(entry); 2708 } else 2709 ret = 0; 2710 } else { 2711 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2712 pr_alert("total_mapcount: %u, page_count(): %u\n", 2713 mapcount, count); 2714 if (PageTail(page)) 2715 dump_page(head, NULL); 2716 dump_page(page, "total_mapcount(head) > 0"); 2717 BUG(); 2718 } 2719 spin_unlock(&pgdata->split_queue_lock); 2720 fail: if (mapping) 2721 spin_unlock(&mapping->tree_lock); 2722 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2723 unfreeze_page(head); 2724 ret = -EBUSY; 2725 } 2726 2727 out_unlock: 2728 if (anon_vma) { 2729 anon_vma_unlock_write(anon_vma); 2730 put_anon_vma(anon_vma); 2731 } 2732 if (mapping) 2733 i_mmap_unlock_read(mapping); 2734 out: 2735 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2736 return ret; 2737 } 2738 2739 void free_transhuge_page(struct page *page) 2740 { 2741 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 2742 unsigned long flags; 2743 2744 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2745 if (!list_empty(page_deferred_list(page))) { 2746 pgdata->split_queue_len--; 2747 list_del(page_deferred_list(page)); 2748 } 2749 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2750 free_compound_page(page); 2751 } 2752 2753 void deferred_split_huge_page(struct page *page) 2754 { 2755 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); 2756 unsigned long flags; 2757 2758 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 2759 2760 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2761 if (list_empty(page_deferred_list(page))) { 2762 count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2763 list_add_tail(page_deferred_list(page), &pgdata->split_queue); 2764 pgdata->split_queue_len++; 2765 } 2766 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2767 } 2768 2769 static unsigned long deferred_split_count(struct shrinker *shrink, 2770 struct shrink_control *sc) 2771 { 2772 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2773 return READ_ONCE(pgdata->split_queue_len); 2774 } 2775 2776 static unsigned long deferred_split_scan(struct shrinker *shrink, 2777 struct shrink_control *sc) 2778 { 2779 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2780 unsigned long flags; 2781 LIST_HEAD(list), *pos, *next; 2782 struct page *page; 2783 int split = 0; 2784 2785 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2786 /* Take pin on all head pages to avoid freeing them under us */ 2787 list_for_each_safe(pos, next, &pgdata->split_queue) { 2788 page = list_entry((void *)pos, struct page, mapping); 2789 page = compound_head(page); 2790 if (get_page_unless_zero(page)) { 2791 list_move(page_deferred_list(page), &list); 2792 } else { 2793 /* We lost race with put_compound_page() */ 2794 list_del_init(page_deferred_list(page)); 2795 pgdata->split_queue_len--; 2796 } 2797 if (!--sc->nr_to_scan) 2798 break; 2799 } 2800 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2801 2802 list_for_each_safe(pos, next, &list) { 2803 page = list_entry((void *)pos, struct page, mapping); 2804 lock_page(page); 2805 /* split_huge_page() removes page from list on success */ 2806 if (!split_huge_page(page)) 2807 split++; 2808 unlock_page(page); 2809 put_page(page); 2810 } 2811 2812 spin_lock_irqsave(&pgdata->split_queue_lock, flags); 2813 list_splice_tail(&list, &pgdata->split_queue); 2814 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); 2815 2816 /* 2817 * Stop shrinker if we didn't split any page, but the queue is empty. 2818 * This can happen if pages were freed under us. 2819 */ 2820 if (!split && list_empty(&pgdata->split_queue)) 2821 return SHRINK_STOP; 2822 return split; 2823 } 2824 2825 static struct shrinker deferred_split_shrinker = { 2826 .count_objects = deferred_split_count, 2827 .scan_objects = deferred_split_scan, 2828 .seeks = DEFAULT_SEEKS, 2829 .flags = SHRINKER_NUMA_AWARE, 2830 }; 2831 2832 #ifdef CONFIG_DEBUG_FS 2833 static int split_huge_pages_set(void *data, u64 val) 2834 { 2835 struct zone *zone; 2836 struct page *page; 2837 unsigned long pfn, max_zone_pfn; 2838 unsigned long total = 0, split = 0; 2839 2840 if (val != 1) 2841 return -EINVAL; 2842 2843 for_each_populated_zone(zone) { 2844 max_zone_pfn = zone_end_pfn(zone); 2845 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2846 if (!pfn_valid(pfn)) 2847 continue; 2848 2849 page = pfn_to_page(pfn); 2850 if (!get_page_unless_zero(page)) 2851 continue; 2852 2853 if (zone != page_zone(page)) 2854 goto next; 2855 2856 if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 2857 goto next; 2858 2859 total++; 2860 lock_page(page); 2861 if (!split_huge_page(page)) 2862 split++; 2863 unlock_page(page); 2864 next: 2865 put_page(page); 2866 } 2867 } 2868 2869 pr_info("%lu of %lu THP split\n", split, total); 2870 2871 return 0; 2872 } 2873 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 2874 "%llu\n"); 2875 2876 static int __init split_huge_pages_debugfs(void) 2877 { 2878 void *ret; 2879 2880 ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 2881 &split_huge_pages_fops); 2882 if (!ret) 2883 pr_warn("Failed to create split_huge_pages in debugfs"); 2884 return 0; 2885 } 2886 late_initcall(split_huge_pages_debugfs); 2887 #endif 2888 2889 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2890 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 2891 struct page *page) 2892 { 2893 struct vm_area_struct *vma = pvmw->vma; 2894 struct mm_struct *mm = vma->vm_mm; 2895 unsigned long address = pvmw->address; 2896 pmd_t pmdval; 2897 swp_entry_t entry; 2898 pmd_t pmdswp; 2899 2900 if (!(pvmw->pmd && !pvmw->pte)) 2901 return; 2902 2903 mmu_notifier_invalidate_range_start(mm, address, 2904 address + HPAGE_PMD_SIZE); 2905 2906 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 2907 pmdval = *pvmw->pmd; 2908 pmdp_invalidate(vma, address, pvmw->pmd); 2909 if (pmd_dirty(pmdval)) 2910 set_page_dirty(page); 2911 entry = make_migration_entry(page, pmd_write(pmdval)); 2912 pmdswp = swp_entry_to_pmd(entry); 2913 if (pmd_soft_dirty(pmdval)) 2914 pmdswp = pmd_swp_mksoft_dirty(pmdswp); 2915 set_pmd_at(mm, address, pvmw->pmd, pmdswp); 2916 page_remove_rmap(page, true); 2917 put_page(page); 2918 2919 mmu_notifier_invalidate_range_end(mm, address, 2920 address + HPAGE_PMD_SIZE); 2921 } 2922 2923 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 2924 { 2925 struct vm_area_struct *vma = pvmw->vma; 2926 struct mm_struct *mm = vma->vm_mm; 2927 unsigned long address = pvmw->address; 2928 unsigned long mmun_start = address & HPAGE_PMD_MASK; 2929 pmd_t pmde; 2930 swp_entry_t entry; 2931 2932 if (!(pvmw->pmd && !pvmw->pte)) 2933 return; 2934 2935 entry = pmd_to_swp_entry(*pvmw->pmd); 2936 get_page(new); 2937 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); 2938 if (pmd_swp_soft_dirty(*pvmw->pmd)) 2939 pmde = pmd_mksoft_dirty(pmde); 2940 if (is_write_migration_entry(entry)) 2941 pmde = maybe_pmd_mkwrite(pmde, vma); 2942 2943 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2944 page_add_anon_rmap(new, vma, mmun_start, true); 2945 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2946 if (vma->vm_flags & VM_LOCKED) 2947 mlock_vma_page(new); 2948 update_mmu_cache_pmd(vma, address, pvmw->pmd); 2949 } 2950 #endif 2951