1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009 Red Hat, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/sched/mm.h> 11 #include <linux/sched/numa_balancing.h> 12 #include <linux/highmem.h> 13 #include <linux/hugetlb.h> 14 #include <linux/mmu_notifier.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/shrinker.h> 18 #include <linux/mm_inline.h> 19 #include <linux/swapops.h> 20 #include <linux/backing-dev.h> 21 #include <linux/dax.h> 22 #include <linux/mm_types.h> 23 #include <linux/khugepaged.h> 24 #include <linux/freezer.h> 25 #include <linux/pfn_t.h> 26 #include <linux/mman.h> 27 #include <linux/memremap.h> 28 #include <linux/pagemap.h> 29 #include <linux/debugfs.h> 30 #include <linux/migrate.h> 31 #include <linux/hashtable.h> 32 #include <linux/userfaultfd_k.h> 33 #include <linux/page_idle.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/oom.h> 36 #include <linux/numa.h> 37 #include <linux/page_owner.h> 38 #include <linux/sched/sysctl.h> 39 #include <linux/memory-tiers.h> 40 #include <linux/compat.h> 41 #include <linux/pgalloc_tag.h> 42 #include <linux/pagewalk.h> 43 44 #include <asm/tlb.h> 45 #include <asm/pgalloc.h> 46 #include "internal.h" 47 #include "swap.h" 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/thp.h> 51 52 /* 53 * By default, transparent hugepage support is disabled in order to avoid 54 * risking an increased memory footprint for applications that are not 55 * guaranteed to benefit from it. When transparent hugepage support is 56 * enabled, it is for all mappings, and khugepaged scans all mappings. 57 * Defrag is invoked by khugepaged hugepage allocations and by page faults 58 * for all hugepage allocations. 59 */ 60 unsigned long transparent_hugepage_flags __read_mostly = 61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 62 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 63 #endif 64 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 66 #endif 67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 69 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 70 71 static struct shrinker *deferred_split_shrinker; 72 static unsigned long deferred_split_count(struct shrinker *shrink, 73 struct shrink_control *sc); 74 static unsigned long deferred_split_scan(struct shrinker *shrink, 75 struct shrink_control *sc); 76 static bool split_underused_thp = true; 77 78 static atomic_t huge_zero_refcount; 79 struct folio *huge_zero_folio __read_mostly; 80 unsigned long huge_zero_pfn __read_mostly = ~0UL; 81 unsigned long huge_anon_orders_always __read_mostly; 82 unsigned long huge_anon_orders_madvise __read_mostly; 83 unsigned long huge_anon_orders_inherit __read_mostly; 84 static bool anon_orders_configured __initdata; 85 86 static inline bool file_thp_enabled(struct vm_area_struct *vma) 87 { 88 struct inode *inode; 89 90 if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) 91 return false; 92 93 if (!vma->vm_file) 94 return false; 95 96 inode = file_inode(vma->vm_file); 97 98 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); 99 } 100 101 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, 102 unsigned long vm_flags, 103 unsigned long tva_flags, 104 unsigned long orders) 105 { 106 bool smaps = tva_flags & TVA_SMAPS; 107 bool in_pf = tva_flags & TVA_IN_PF; 108 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS; 109 unsigned long supported_orders; 110 111 /* Check the intersection of requested and supported orders. */ 112 if (vma_is_anonymous(vma)) 113 supported_orders = THP_ORDERS_ALL_ANON; 114 else if (vma_is_special_huge(vma)) 115 supported_orders = THP_ORDERS_ALL_SPECIAL; 116 else 117 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; 118 119 orders &= supported_orders; 120 if (!orders) 121 return 0; 122 123 if (!vma->vm_mm) /* vdso */ 124 return 0; 125 126 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) 127 return 0; 128 129 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 130 if (vma_is_dax(vma)) 131 return in_pf ? orders : 0; 132 133 /* 134 * khugepaged special VMA and hugetlb VMA. 135 * Must be checked after dax since some dax mappings may have 136 * VM_MIXEDMAP set. 137 */ 138 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) 139 return 0; 140 141 /* 142 * Check alignment for file vma and size for both file and anon vma by 143 * filtering out the unsuitable orders. 144 * 145 * Skip the check for page fault. Huge fault does the check in fault 146 * handlers. 147 */ 148 if (!in_pf) { 149 int order = highest_order(orders); 150 unsigned long addr; 151 152 while (orders) { 153 addr = vma->vm_end - (PAGE_SIZE << order); 154 if (thp_vma_suitable_order(vma, addr, order)) 155 break; 156 order = next_order(&orders, order); 157 } 158 159 if (!orders) 160 return 0; 161 } 162 163 /* 164 * Enabled via shmem mount options or sysfs settings. 165 * Must be done before hugepage flags check since shmem has its 166 * own flags. 167 */ 168 if (!in_pf && shmem_file(vma->vm_file)) 169 return shmem_allowable_huge_orders(file_inode(vma->vm_file), 170 vma, vma->vm_pgoff, 0, 171 !enforce_sysfs); 172 173 if (!vma_is_anonymous(vma)) { 174 /* 175 * Enforce sysfs THP requirements as necessary. Anonymous vmas 176 * were already handled in thp_vma_allowable_orders(). 177 */ 178 if (enforce_sysfs && 179 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) && 180 !hugepage_global_always()))) 181 return 0; 182 183 /* 184 * Trust that ->huge_fault() handlers know what they are doing 185 * in fault path. 186 */ 187 if (((in_pf || smaps)) && vma->vm_ops->huge_fault) 188 return orders; 189 /* Only regular file is valid in collapse path */ 190 if (((!in_pf || smaps)) && file_thp_enabled(vma)) 191 return orders; 192 return 0; 193 } 194 195 if (vma_is_temporary_stack(vma)) 196 return 0; 197 198 /* 199 * THPeligible bit of smaps should show 1 for proper VMAs even 200 * though anon_vma is not initialized yet. 201 * 202 * Allow page fault since anon_vma may be not initialized until 203 * the first page fault. 204 */ 205 if (!vma->anon_vma) 206 return (smaps || in_pf) ? orders : 0; 207 208 return orders; 209 } 210 211 static bool get_huge_zero_page(void) 212 { 213 struct folio *zero_folio; 214 retry: 215 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 216 return true; 217 218 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 219 HPAGE_PMD_ORDER); 220 if (!zero_folio) { 221 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 222 return false; 223 } 224 /* Ensure zero folio won't have large_rmappable flag set. */ 225 folio_clear_large_rmappable(zero_folio); 226 preempt_disable(); 227 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) { 228 preempt_enable(); 229 folio_put(zero_folio); 230 goto retry; 231 } 232 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio)); 233 234 /* We take additional reference here. It will be put back by shrinker */ 235 atomic_set(&huge_zero_refcount, 2); 236 preempt_enable(); 237 count_vm_event(THP_ZERO_PAGE_ALLOC); 238 return true; 239 } 240 241 static void put_huge_zero_page(void) 242 { 243 /* 244 * Counter should never go to zero here. Only shrinker can put 245 * last reference. 246 */ 247 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 248 } 249 250 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) 251 { 252 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 253 return READ_ONCE(huge_zero_folio); 254 255 if (!get_huge_zero_page()) 256 return NULL; 257 258 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 259 put_huge_zero_page(); 260 261 return READ_ONCE(huge_zero_folio); 262 } 263 264 void mm_put_huge_zero_folio(struct mm_struct *mm) 265 { 266 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 267 put_huge_zero_page(); 268 } 269 270 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 271 struct shrink_control *sc) 272 { 273 /* we can free zero page only if last reference remains */ 274 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 275 } 276 277 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 278 struct shrink_control *sc) 279 { 280 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 281 struct folio *zero_folio = xchg(&huge_zero_folio, NULL); 282 BUG_ON(zero_folio == NULL); 283 WRITE_ONCE(huge_zero_pfn, ~0UL); 284 folio_put(zero_folio); 285 return HPAGE_PMD_NR; 286 } 287 288 return 0; 289 } 290 291 static struct shrinker *huge_zero_page_shrinker; 292 293 #ifdef CONFIG_SYSFS 294 static ssize_t enabled_show(struct kobject *kobj, 295 struct kobj_attribute *attr, char *buf) 296 { 297 const char *output; 298 299 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 300 output = "[always] madvise never"; 301 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 302 &transparent_hugepage_flags)) 303 output = "always [madvise] never"; 304 else 305 output = "always madvise [never]"; 306 307 return sysfs_emit(buf, "%s\n", output); 308 } 309 310 static ssize_t enabled_store(struct kobject *kobj, 311 struct kobj_attribute *attr, 312 const char *buf, size_t count) 313 { 314 ssize_t ret = count; 315 316 if (sysfs_streq(buf, "always")) { 317 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 318 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 319 } else if (sysfs_streq(buf, "madvise")) { 320 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 321 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 322 } else if (sysfs_streq(buf, "never")) { 323 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 324 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 325 } else 326 ret = -EINVAL; 327 328 if (ret > 0) { 329 int err = start_stop_khugepaged(); 330 if (err) 331 ret = err; 332 } 333 return ret; 334 } 335 336 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 337 338 ssize_t single_hugepage_flag_show(struct kobject *kobj, 339 struct kobj_attribute *attr, char *buf, 340 enum transparent_hugepage_flag flag) 341 { 342 return sysfs_emit(buf, "%d\n", 343 !!test_bit(flag, &transparent_hugepage_flags)); 344 } 345 346 ssize_t single_hugepage_flag_store(struct kobject *kobj, 347 struct kobj_attribute *attr, 348 const char *buf, size_t count, 349 enum transparent_hugepage_flag flag) 350 { 351 unsigned long value; 352 int ret; 353 354 ret = kstrtoul(buf, 10, &value); 355 if (ret < 0) 356 return ret; 357 if (value > 1) 358 return -EINVAL; 359 360 if (value) 361 set_bit(flag, &transparent_hugepage_flags); 362 else 363 clear_bit(flag, &transparent_hugepage_flags); 364 365 return count; 366 } 367 368 static ssize_t defrag_show(struct kobject *kobj, 369 struct kobj_attribute *attr, char *buf) 370 { 371 const char *output; 372 373 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 374 &transparent_hugepage_flags)) 375 output = "[always] defer defer+madvise madvise never"; 376 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 377 &transparent_hugepage_flags)) 378 output = "always [defer] defer+madvise madvise never"; 379 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 380 &transparent_hugepage_flags)) 381 output = "always defer [defer+madvise] madvise never"; 382 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 383 &transparent_hugepage_flags)) 384 output = "always defer defer+madvise [madvise] never"; 385 else 386 output = "always defer defer+madvise madvise [never]"; 387 388 return sysfs_emit(buf, "%s\n", output); 389 } 390 391 static ssize_t defrag_store(struct kobject *kobj, 392 struct kobj_attribute *attr, 393 const char *buf, size_t count) 394 { 395 if (sysfs_streq(buf, "always")) { 396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 399 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 400 } else if (sysfs_streq(buf, "defer+madvise")) { 401 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 402 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 403 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 404 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 405 } else if (sysfs_streq(buf, "defer")) { 406 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 407 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 408 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 409 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 410 } else if (sysfs_streq(buf, "madvise")) { 411 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 412 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 413 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 414 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 415 } else if (sysfs_streq(buf, "never")) { 416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 417 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 418 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 419 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 420 } else 421 return -EINVAL; 422 423 return count; 424 } 425 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 426 427 static ssize_t use_zero_page_show(struct kobject *kobj, 428 struct kobj_attribute *attr, char *buf) 429 { 430 return single_hugepage_flag_show(kobj, attr, buf, 431 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 432 } 433 static ssize_t use_zero_page_store(struct kobject *kobj, 434 struct kobj_attribute *attr, const char *buf, size_t count) 435 { 436 return single_hugepage_flag_store(kobj, attr, buf, count, 437 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 438 } 439 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 440 441 static ssize_t hpage_pmd_size_show(struct kobject *kobj, 442 struct kobj_attribute *attr, char *buf) 443 { 444 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 445 } 446 static struct kobj_attribute hpage_pmd_size_attr = 447 __ATTR_RO(hpage_pmd_size); 448 449 static ssize_t split_underused_thp_show(struct kobject *kobj, 450 struct kobj_attribute *attr, char *buf) 451 { 452 return sysfs_emit(buf, "%d\n", split_underused_thp); 453 } 454 455 static ssize_t split_underused_thp_store(struct kobject *kobj, 456 struct kobj_attribute *attr, 457 const char *buf, size_t count) 458 { 459 int err = kstrtobool(buf, &split_underused_thp); 460 461 if (err < 0) 462 return err; 463 464 return count; 465 } 466 467 static struct kobj_attribute split_underused_thp_attr = __ATTR( 468 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store); 469 470 static struct attribute *hugepage_attr[] = { 471 &enabled_attr.attr, 472 &defrag_attr.attr, 473 &use_zero_page_attr.attr, 474 &hpage_pmd_size_attr.attr, 475 #ifdef CONFIG_SHMEM 476 &shmem_enabled_attr.attr, 477 #endif 478 &split_underused_thp_attr.attr, 479 NULL, 480 }; 481 482 static const struct attribute_group hugepage_attr_group = { 483 .attrs = hugepage_attr, 484 }; 485 486 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); 487 static void thpsize_release(struct kobject *kobj); 488 static DEFINE_SPINLOCK(huge_anon_orders_lock); 489 static LIST_HEAD(thpsize_list); 490 491 static ssize_t anon_enabled_show(struct kobject *kobj, 492 struct kobj_attribute *attr, char *buf) 493 { 494 int order = to_thpsize(kobj)->order; 495 const char *output; 496 497 if (test_bit(order, &huge_anon_orders_always)) 498 output = "[always] inherit madvise never"; 499 else if (test_bit(order, &huge_anon_orders_inherit)) 500 output = "always [inherit] madvise never"; 501 else if (test_bit(order, &huge_anon_orders_madvise)) 502 output = "always inherit [madvise] never"; 503 else 504 output = "always inherit madvise [never]"; 505 506 return sysfs_emit(buf, "%s\n", output); 507 } 508 509 static ssize_t anon_enabled_store(struct kobject *kobj, 510 struct kobj_attribute *attr, 511 const char *buf, size_t count) 512 { 513 int order = to_thpsize(kobj)->order; 514 ssize_t ret = count; 515 516 if (sysfs_streq(buf, "always")) { 517 spin_lock(&huge_anon_orders_lock); 518 clear_bit(order, &huge_anon_orders_inherit); 519 clear_bit(order, &huge_anon_orders_madvise); 520 set_bit(order, &huge_anon_orders_always); 521 spin_unlock(&huge_anon_orders_lock); 522 } else if (sysfs_streq(buf, "inherit")) { 523 spin_lock(&huge_anon_orders_lock); 524 clear_bit(order, &huge_anon_orders_always); 525 clear_bit(order, &huge_anon_orders_madvise); 526 set_bit(order, &huge_anon_orders_inherit); 527 spin_unlock(&huge_anon_orders_lock); 528 } else if (sysfs_streq(buf, "madvise")) { 529 spin_lock(&huge_anon_orders_lock); 530 clear_bit(order, &huge_anon_orders_always); 531 clear_bit(order, &huge_anon_orders_inherit); 532 set_bit(order, &huge_anon_orders_madvise); 533 spin_unlock(&huge_anon_orders_lock); 534 } else if (sysfs_streq(buf, "never")) { 535 spin_lock(&huge_anon_orders_lock); 536 clear_bit(order, &huge_anon_orders_always); 537 clear_bit(order, &huge_anon_orders_inherit); 538 clear_bit(order, &huge_anon_orders_madvise); 539 spin_unlock(&huge_anon_orders_lock); 540 } else 541 ret = -EINVAL; 542 543 if (ret > 0) { 544 int err; 545 546 err = start_stop_khugepaged(); 547 if (err) 548 ret = err; 549 } 550 return ret; 551 } 552 553 static struct kobj_attribute anon_enabled_attr = 554 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); 555 556 static struct attribute *anon_ctrl_attrs[] = { 557 &anon_enabled_attr.attr, 558 NULL, 559 }; 560 561 static const struct attribute_group anon_ctrl_attr_grp = { 562 .attrs = anon_ctrl_attrs, 563 }; 564 565 static struct attribute *file_ctrl_attrs[] = { 566 #ifdef CONFIG_SHMEM 567 &thpsize_shmem_enabled_attr.attr, 568 #endif 569 NULL, 570 }; 571 572 static const struct attribute_group file_ctrl_attr_grp = { 573 .attrs = file_ctrl_attrs, 574 }; 575 576 static struct attribute *any_ctrl_attrs[] = { 577 NULL, 578 }; 579 580 static const struct attribute_group any_ctrl_attr_grp = { 581 .attrs = any_ctrl_attrs, 582 }; 583 584 static const struct kobj_type thpsize_ktype = { 585 .release = &thpsize_release, 586 .sysfs_ops = &kobj_sysfs_ops, 587 }; 588 589 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}}; 590 591 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item) 592 { 593 unsigned long sum = 0; 594 int cpu; 595 596 for_each_possible_cpu(cpu) { 597 struct mthp_stat *this = &per_cpu(mthp_stats, cpu); 598 599 sum += this->stats[order][item]; 600 } 601 602 return sum; 603 } 604 605 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \ 606 static ssize_t _name##_show(struct kobject *kobj, \ 607 struct kobj_attribute *attr, char *buf) \ 608 { \ 609 int order = to_thpsize(kobj)->order; \ 610 \ 611 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \ 612 } \ 613 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 614 615 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); 616 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); 617 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 618 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT); 619 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN); 620 DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK); 621 DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE); 622 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); 623 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); 624 #ifdef CONFIG_SHMEM 625 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); 626 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); 627 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); 628 #endif 629 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); 630 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); 631 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); 632 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); 633 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); 634 635 static struct attribute *anon_stats_attrs[] = { 636 &anon_fault_alloc_attr.attr, 637 &anon_fault_fallback_attr.attr, 638 &anon_fault_fallback_charge_attr.attr, 639 #ifndef CONFIG_SHMEM 640 &zswpout_attr.attr, 641 &swpin_attr.attr, 642 &swpin_fallback_attr.attr, 643 &swpin_fallback_charge_attr.attr, 644 &swpout_attr.attr, 645 &swpout_fallback_attr.attr, 646 #endif 647 &split_deferred_attr.attr, 648 &nr_anon_attr.attr, 649 &nr_anon_partially_mapped_attr.attr, 650 NULL, 651 }; 652 653 static struct attribute_group anon_stats_attr_grp = { 654 .name = "stats", 655 .attrs = anon_stats_attrs, 656 }; 657 658 static struct attribute *file_stats_attrs[] = { 659 #ifdef CONFIG_SHMEM 660 &shmem_alloc_attr.attr, 661 &shmem_fallback_attr.attr, 662 &shmem_fallback_charge_attr.attr, 663 #endif 664 NULL, 665 }; 666 667 static struct attribute_group file_stats_attr_grp = { 668 .name = "stats", 669 .attrs = file_stats_attrs, 670 }; 671 672 static struct attribute *any_stats_attrs[] = { 673 #ifdef CONFIG_SHMEM 674 &zswpout_attr.attr, 675 &swpin_attr.attr, 676 &swpin_fallback_attr.attr, 677 &swpin_fallback_charge_attr.attr, 678 &swpout_attr.attr, 679 &swpout_fallback_attr.attr, 680 #endif 681 &split_attr.attr, 682 &split_failed_attr.attr, 683 NULL, 684 }; 685 686 static struct attribute_group any_stats_attr_grp = { 687 .name = "stats", 688 .attrs = any_stats_attrs, 689 }; 690 691 static int sysfs_add_group(struct kobject *kobj, 692 const struct attribute_group *grp) 693 { 694 int ret = -ENOENT; 695 696 /* 697 * If the group is named, try to merge first, assuming the subdirectory 698 * was already created. This avoids the warning emitted by 699 * sysfs_create_group() if the directory already exists. 700 */ 701 if (grp->name) 702 ret = sysfs_merge_group(kobj, grp); 703 if (ret) 704 ret = sysfs_create_group(kobj, grp); 705 706 return ret; 707 } 708 709 static struct thpsize *thpsize_create(int order, struct kobject *parent) 710 { 711 unsigned long size = (PAGE_SIZE << order) / SZ_1K; 712 struct thpsize *thpsize; 713 int ret = -ENOMEM; 714 715 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); 716 if (!thpsize) 717 goto err; 718 719 thpsize->order = order; 720 721 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, 722 "hugepages-%lukB", size); 723 if (ret) { 724 kfree(thpsize); 725 goto err; 726 } 727 728 729 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); 730 if (ret) 731 goto err_put; 732 733 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); 734 if (ret) 735 goto err_put; 736 737 if (BIT(order) & THP_ORDERS_ALL_ANON) { 738 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); 739 if (ret) 740 goto err_put; 741 742 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); 743 if (ret) 744 goto err_put; 745 } 746 747 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) { 748 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); 749 if (ret) 750 goto err_put; 751 752 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); 753 if (ret) 754 goto err_put; 755 } 756 757 return thpsize; 758 err_put: 759 kobject_put(&thpsize->kobj); 760 err: 761 return ERR_PTR(ret); 762 } 763 764 static void thpsize_release(struct kobject *kobj) 765 { 766 kfree(to_thpsize(kobj)); 767 } 768 769 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 770 { 771 int err; 772 struct thpsize *thpsize; 773 unsigned long orders; 774 int order; 775 776 /* 777 * Default to setting PMD-sized THP to inherit the global setting and 778 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time 779 * constant so we have to do this here. 780 */ 781 if (!anon_orders_configured) 782 huge_anon_orders_inherit = BIT(PMD_ORDER); 783 784 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 785 if (unlikely(!*hugepage_kobj)) { 786 pr_err("failed to create transparent hugepage kobject\n"); 787 return -ENOMEM; 788 } 789 790 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 791 if (err) { 792 pr_err("failed to register transparent hugepage group\n"); 793 goto delete_obj; 794 } 795 796 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 797 if (err) { 798 pr_err("failed to register transparent hugepage group\n"); 799 goto remove_hp_group; 800 } 801 802 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; 803 order = highest_order(orders); 804 while (orders) { 805 thpsize = thpsize_create(order, *hugepage_kobj); 806 if (IS_ERR(thpsize)) { 807 pr_err("failed to create thpsize for order %d\n", order); 808 err = PTR_ERR(thpsize); 809 goto remove_all; 810 } 811 list_add(&thpsize->node, &thpsize_list); 812 order = next_order(&orders, order); 813 } 814 815 return 0; 816 817 remove_all: 818 hugepage_exit_sysfs(*hugepage_kobj); 819 return err; 820 remove_hp_group: 821 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 822 delete_obj: 823 kobject_put(*hugepage_kobj); 824 return err; 825 } 826 827 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 828 { 829 struct thpsize *thpsize, *tmp; 830 831 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { 832 list_del(&thpsize->node); 833 kobject_put(&thpsize->kobj); 834 } 835 836 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 837 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 838 kobject_put(hugepage_kobj); 839 } 840 #else 841 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 842 { 843 return 0; 844 } 845 846 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 847 { 848 } 849 #endif /* CONFIG_SYSFS */ 850 851 static int __init thp_shrinker_init(void) 852 { 853 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero"); 854 if (!huge_zero_page_shrinker) 855 return -ENOMEM; 856 857 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | 858 SHRINKER_MEMCG_AWARE | 859 SHRINKER_NONSLAB, 860 "thp-deferred_split"); 861 if (!deferred_split_shrinker) { 862 shrinker_free(huge_zero_page_shrinker); 863 return -ENOMEM; 864 } 865 866 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count; 867 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan; 868 shrinker_register(huge_zero_page_shrinker); 869 870 deferred_split_shrinker->count_objects = deferred_split_count; 871 deferred_split_shrinker->scan_objects = deferred_split_scan; 872 shrinker_register(deferred_split_shrinker); 873 874 return 0; 875 } 876 877 static void __init thp_shrinker_exit(void) 878 { 879 shrinker_free(huge_zero_page_shrinker); 880 shrinker_free(deferred_split_shrinker); 881 } 882 883 static int __init hugepage_init(void) 884 { 885 int err; 886 struct kobject *hugepage_kobj; 887 888 if (!has_transparent_hugepage()) { 889 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; 890 return -EINVAL; 891 } 892 893 /* 894 * hugepages can't be allocated by the buddy allocator 895 */ 896 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER); 897 898 err = hugepage_init_sysfs(&hugepage_kobj); 899 if (err) 900 goto err_sysfs; 901 902 err = khugepaged_init(); 903 if (err) 904 goto err_slab; 905 906 err = thp_shrinker_init(); 907 if (err) 908 goto err_shrinker; 909 910 /* 911 * By default disable transparent hugepages on smaller systems, 912 * where the extra memory used could hurt more than TLB overhead 913 * is likely to save. The admin can still enable it through /sys. 914 */ 915 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 916 transparent_hugepage_flags = 0; 917 return 0; 918 } 919 920 err = start_stop_khugepaged(); 921 if (err) 922 goto err_khugepaged; 923 924 return 0; 925 err_khugepaged: 926 thp_shrinker_exit(); 927 err_shrinker: 928 khugepaged_destroy(); 929 err_slab: 930 hugepage_exit_sysfs(hugepage_kobj); 931 err_sysfs: 932 return err; 933 } 934 subsys_initcall(hugepage_init); 935 936 static int __init setup_transparent_hugepage(char *str) 937 { 938 int ret = 0; 939 if (!str) 940 goto out; 941 if (!strcmp(str, "always")) { 942 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 943 &transparent_hugepage_flags); 944 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 945 &transparent_hugepage_flags); 946 ret = 1; 947 } else if (!strcmp(str, "madvise")) { 948 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 949 &transparent_hugepage_flags); 950 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 951 &transparent_hugepage_flags); 952 ret = 1; 953 } else if (!strcmp(str, "never")) { 954 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 955 &transparent_hugepage_flags); 956 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 957 &transparent_hugepage_flags); 958 ret = 1; 959 } 960 out: 961 if (!ret) 962 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 963 return ret; 964 } 965 __setup("transparent_hugepage=", setup_transparent_hugepage); 966 967 static char str_dup[PAGE_SIZE] __initdata; 968 static int __init setup_thp_anon(char *str) 969 { 970 char *token, *range, *policy, *subtoken; 971 unsigned long always, inherit, madvise; 972 char *start_size, *end_size; 973 int start, end, nr; 974 char *p; 975 976 if (!str || strlen(str) + 1 > PAGE_SIZE) 977 goto err; 978 strscpy(str_dup, str); 979 980 always = huge_anon_orders_always; 981 madvise = huge_anon_orders_madvise; 982 inherit = huge_anon_orders_inherit; 983 p = str_dup; 984 while ((token = strsep(&p, ";")) != NULL) { 985 range = strsep(&token, ":"); 986 policy = token; 987 988 if (!policy) 989 goto err; 990 991 while ((subtoken = strsep(&range, ",")) != NULL) { 992 if (strchr(subtoken, '-')) { 993 start_size = strsep(&subtoken, "-"); 994 end_size = subtoken; 995 996 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON); 997 end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON); 998 } else { 999 start_size = end_size = subtoken; 1000 start = end = get_order_from_str(subtoken, 1001 THP_ORDERS_ALL_ANON); 1002 } 1003 1004 if (start == -EINVAL) { 1005 pr_err("invalid size %s in thp_anon boot parameter\n", start_size); 1006 goto err; 1007 } 1008 1009 if (end == -EINVAL) { 1010 pr_err("invalid size %s in thp_anon boot parameter\n", end_size); 1011 goto err; 1012 } 1013 1014 if (start < 0 || end < 0 || start > end) 1015 goto err; 1016 1017 nr = end - start + 1; 1018 if (!strcmp(policy, "always")) { 1019 bitmap_set(&always, start, nr); 1020 bitmap_clear(&inherit, start, nr); 1021 bitmap_clear(&madvise, start, nr); 1022 } else if (!strcmp(policy, "madvise")) { 1023 bitmap_set(&madvise, start, nr); 1024 bitmap_clear(&inherit, start, nr); 1025 bitmap_clear(&always, start, nr); 1026 } else if (!strcmp(policy, "inherit")) { 1027 bitmap_set(&inherit, start, nr); 1028 bitmap_clear(&madvise, start, nr); 1029 bitmap_clear(&always, start, nr); 1030 } else if (!strcmp(policy, "never")) { 1031 bitmap_clear(&inherit, start, nr); 1032 bitmap_clear(&madvise, start, nr); 1033 bitmap_clear(&always, start, nr); 1034 } else { 1035 pr_err("invalid policy %s in thp_anon boot parameter\n", policy); 1036 goto err; 1037 } 1038 } 1039 } 1040 1041 huge_anon_orders_always = always; 1042 huge_anon_orders_madvise = madvise; 1043 huge_anon_orders_inherit = inherit; 1044 anon_orders_configured = true; 1045 return 1; 1046 1047 err: 1048 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str); 1049 return 0; 1050 } 1051 __setup("thp_anon=", setup_thp_anon); 1052 1053 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 1054 { 1055 if (likely(vma->vm_flags & VM_WRITE)) 1056 pmd = pmd_mkwrite(pmd, vma); 1057 return pmd; 1058 } 1059 1060 #ifdef CONFIG_MEMCG 1061 static inline 1062 struct deferred_split *get_deferred_split_queue(struct folio *folio) 1063 { 1064 struct mem_cgroup *memcg = folio_memcg(folio); 1065 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 1066 1067 if (memcg) 1068 return &memcg->deferred_split_queue; 1069 else 1070 return &pgdat->deferred_split_queue; 1071 } 1072 #else 1073 static inline 1074 struct deferred_split *get_deferred_split_queue(struct folio *folio) 1075 { 1076 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 1077 1078 return &pgdat->deferred_split_queue; 1079 } 1080 #endif 1081 1082 static inline bool is_transparent_hugepage(const struct folio *folio) 1083 { 1084 if (!folio_test_large(folio)) 1085 return false; 1086 1087 return is_huge_zero_folio(folio) || 1088 folio_test_large_rmappable(folio); 1089 } 1090 1091 static unsigned long __thp_get_unmapped_area(struct file *filp, 1092 unsigned long addr, unsigned long len, 1093 loff_t off, unsigned long flags, unsigned long size, 1094 vm_flags_t vm_flags) 1095 { 1096 loff_t off_end = off + len; 1097 loff_t off_align = round_up(off, size); 1098 unsigned long len_pad, ret, off_sub; 1099 1100 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) 1101 return 0; 1102 1103 if (off_end <= off_align || (off_end - off_align) < size) 1104 return 0; 1105 1106 len_pad = len + size; 1107 if (len_pad < len || (off + len_pad) < off) 1108 return 0; 1109 1110 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, 1111 off >> PAGE_SHIFT, flags, vm_flags); 1112 1113 /* 1114 * The failure might be due to length padding. The caller will retry 1115 * without the padding. 1116 */ 1117 if (IS_ERR_VALUE(ret)) 1118 return 0; 1119 1120 /* 1121 * Do not try to align to THP boundary if allocation at the address 1122 * hint succeeds. 1123 */ 1124 if (ret == addr) 1125 return addr; 1126 1127 off_sub = (off - ret) & (size - 1); 1128 1129 if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub) 1130 return ret + size; 1131 1132 ret += off_sub; 1133 return ret; 1134 } 1135 1136 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, 1137 unsigned long len, unsigned long pgoff, unsigned long flags, 1138 vm_flags_t vm_flags) 1139 { 1140 unsigned long ret; 1141 loff_t off = (loff_t)pgoff << PAGE_SHIFT; 1142 1143 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); 1144 if (ret) 1145 return ret; 1146 1147 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, 1148 vm_flags); 1149 } 1150 1151 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 1152 unsigned long len, unsigned long pgoff, unsigned long flags) 1153 { 1154 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); 1155 } 1156 EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 1157 1158 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, 1159 unsigned long addr) 1160 { 1161 gfp_t gfp = vma_thp_gfp_mask(vma); 1162 const int order = HPAGE_PMD_ORDER; 1163 struct folio *folio; 1164 1165 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK); 1166 1167 if (unlikely(!folio)) { 1168 count_vm_event(THP_FAULT_FALLBACK); 1169 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1170 return NULL; 1171 } 1172 1173 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 1174 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 1175 folio_put(folio); 1176 count_vm_event(THP_FAULT_FALLBACK); 1177 count_vm_event(THP_FAULT_FALLBACK_CHARGE); 1178 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1179 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 1180 return NULL; 1181 } 1182 folio_throttle_swaprate(folio, gfp); 1183 1184 /* 1185 * When a folio is not zeroed during allocation (__GFP_ZERO not used) 1186 * or user folios require special handling, folio_zero_user() is used to 1187 * make sure that the page corresponding to the faulting address will be 1188 * hot in the cache after zeroing. 1189 */ 1190 if (user_alloc_needs_zeroing()) 1191 folio_zero_user(folio, addr); 1192 /* 1193 * The memory barrier inside __folio_mark_uptodate makes sure that 1194 * folio_zero_user writes become visible before the set_pmd_at() 1195 * write. 1196 */ 1197 __folio_mark_uptodate(folio); 1198 return folio; 1199 } 1200 1201 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, 1202 struct vm_area_struct *vma, unsigned long haddr) 1203 { 1204 pmd_t entry; 1205 1206 entry = folio_mk_pmd(folio, vma->vm_page_prot); 1207 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1208 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); 1209 folio_add_lru_vma(folio, vma); 1210 set_pmd_at(vma->vm_mm, haddr, pmd, entry); 1211 update_mmu_cache_pmd(vma, haddr, pmd); 1212 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1213 count_vm_event(THP_FAULT_ALLOC); 1214 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); 1215 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 1216 } 1217 1218 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) 1219 { 1220 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1221 struct vm_area_struct *vma = vmf->vma; 1222 struct folio *folio; 1223 pgtable_t pgtable; 1224 vm_fault_t ret = 0; 1225 1226 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); 1227 if (unlikely(!folio)) 1228 return VM_FAULT_FALLBACK; 1229 1230 pgtable = pte_alloc_one(vma->vm_mm); 1231 if (unlikely(!pgtable)) { 1232 ret = VM_FAULT_OOM; 1233 goto release; 1234 } 1235 1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1237 if (unlikely(!pmd_none(*vmf->pmd))) { 1238 goto unlock_release; 1239 } else { 1240 ret = check_stable_address_space(vma->vm_mm); 1241 if (ret) 1242 goto unlock_release; 1243 1244 /* Deliver the page fault to userland */ 1245 if (userfaultfd_missing(vma)) { 1246 spin_unlock(vmf->ptl); 1247 folio_put(folio); 1248 pte_free(vma->vm_mm, pgtable); 1249 ret = handle_userfault(vmf, VM_UFFD_MISSING); 1250 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 1251 return ret; 1252 } 1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1254 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); 1255 mm_inc_nr_ptes(vma->vm_mm); 1256 deferred_split_folio(folio, false); 1257 spin_unlock(vmf->ptl); 1258 } 1259 1260 return 0; 1261 unlock_release: 1262 spin_unlock(vmf->ptl); 1263 release: 1264 if (pgtable) 1265 pte_free(vma->vm_mm, pgtable); 1266 folio_put(folio); 1267 return ret; 1268 1269 } 1270 1271 /* 1272 * always: directly stall for all thp allocations 1273 * defer: wake kswapd and fail if not immediately available 1274 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 1275 * fail if not immediately available 1276 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 1277 * available 1278 * never: never stall for any thp allocation 1279 */ 1280 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 1281 { 1282 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 1283 1284 /* Always do synchronous compaction */ 1285 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 1286 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 1287 1288 /* Kick kcompactd and fail quickly */ 1289 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 1290 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 1291 1292 /* Synchronous compaction if madvised, otherwise kick kcompactd */ 1293 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 1294 return GFP_TRANSHUGE_LIGHT | 1295 (vma_madvised ? __GFP_DIRECT_RECLAIM : 1296 __GFP_KSWAPD_RECLAIM); 1297 1298 /* Only do synchronous compaction if madvised */ 1299 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 1300 return GFP_TRANSHUGE_LIGHT | 1301 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 1302 1303 return GFP_TRANSHUGE_LIGHT; 1304 } 1305 1306 /* Caller must hold page table lock. */ 1307 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, 1308 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 1309 struct folio *zero_folio) 1310 { 1311 pmd_t entry; 1312 entry = folio_mk_pmd(zero_folio, vma->vm_page_prot); 1313 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1314 set_pmd_at(mm, haddr, pmd, entry); 1315 mm_inc_nr_ptes(mm); 1316 } 1317 1318 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 1319 { 1320 struct vm_area_struct *vma = vmf->vma; 1321 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1322 vm_fault_t ret; 1323 1324 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) 1325 return VM_FAULT_FALLBACK; 1326 ret = vmf_anon_prepare(vmf); 1327 if (ret) 1328 return ret; 1329 khugepaged_enter_vma(vma, vma->vm_flags); 1330 1331 if (!(vmf->flags & FAULT_FLAG_WRITE) && 1332 !mm_forbids_zeropage(vma->vm_mm) && 1333 transparent_hugepage_use_zero_page()) { 1334 pgtable_t pgtable; 1335 struct folio *zero_folio; 1336 vm_fault_t ret; 1337 1338 pgtable = pte_alloc_one(vma->vm_mm); 1339 if (unlikely(!pgtable)) 1340 return VM_FAULT_OOM; 1341 zero_folio = mm_get_huge_zero_folio(vma->vm_mm); 1342 if (unlikely(!zero_folio)) { 1343 pte_free(vma->vm_mm, pgtable); 1344 count_vm_event(THP_FAULT_FALLBACK); 1345 return VM_FAULT_FALLBACK; 1346 } 1347 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1348 ret = 0; 1349 if (pmd_none(*vmf->pmd)) { 1350 ret = check_stable_address_space(vma->vm_mm); 1351 if (ret) { 1352 spin_unlock(vmf->ptl); 1353 pte_free(vma->vm_mm, pgtable); 1354 } else if (userfaultfd_missing(vma)) { 1355 spin_unlock(vmf->ptl); 1356 pte_free(vma->vm_mm, pgtable); 1357 ret = handle_userfault(vmf, VM_UFFD_MISSING); 1358 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 1359 } else { 1360 set_huge_zero_folio(pgtable, vma->vm_mm, vma, 1361 haddr, vmf->pmd, zero_folio); 1362 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1363 spin_unlock(vmf->ptl); 1364 } 1365 } else { 1366 spin_unlock(vmf->ptl); 1367 pte_free(vma->vm_mm, pgtable); 1368 } 1369 return ret; 1370 } 1371 1372 return __do_huge_pmd_anonymous_page(vmf); 1373 } 1374 1375 static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 1376 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 1377 pgtable_t pgtable) 1378 { 1379 struct mm_struct *mm = vma->vm_mm; 1380 pmd_t entry; 1381 1382 lockdep_assert_held(pmd_lockptr(mm, pmd)); 1383 1384 if (!pmd_none(*pmd)) { 1385 if (write) { 1386 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 1387 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 1388 return -EEXIST; 1389 } 1390 entry = pmd_mkyoung(*pmd); 1391 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1392 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 1393 update_mmu_cache_pmd(vma, addr, pmd); 1394 } 1395 1396 return -EEXIST; 1397 } 1398 1399 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 1400 if (pfn_t_devmap(pfn)) 1401 entry = pmd_mkdevmap(entry); 1402 else 1403 entry = pmd_mkspecial(entry); 1404 if (write) { 1405 entry = pmd_mkyoung(pmd_mkdirty(entry)); 1406 entry = maybe_pmd_mkwrite(entry, vma); 1407 } 1408 1409 if (pgtable) { 1410 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1411 mm_inc_nr_ptes(mm); 1412 } 1413 1414 set_pmd_at(mm, addr, pmd, entry); 1415 update_mmu_cache_pmd(vma, addr, pmd); 1416 return 0; 1417 } 1418 1419 /** 1420 * vmf_insert_pfn_pmd - insert a pmd size pfn 1421 * @vmf: Structure describing the fault 1422 * @pfn: pfn to insert 1423 * @write: whether it's a write fault 1424 * 1425 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. 1426 * 1427 * Return: vm_fault_t value. 1428 */ 1429 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 1430 { 1431 unsigned long addr = vmf->address & PMD_MASK; 1432 struct vm_area_struct *vma = vmf->vma; 1433 pgprot_t pgprot = vma->vm_page_prot; 1434 pgtable_t pgtable = NULL; 1435 spinlock_t *ptl; 1436 int error; 1437 1438 /* 1439 * If we had pmd_special, we could avoid all these restrictions, 1440 * but we need to be consistent with PTEs and architectures that 1441 * can't support a 'special' bit. 1442 */ 1443 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 1444 !pfn_t_devmap(pfn)); 1445 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1446 (VM_PFNMAP|VM_MIXEDMAP)); 1447 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1448 1449 if (addr < vma->vm_start || addr >= vma->vm_end) 1450 return VM_FAULT_SIGBUS; 1451 1452 if (arch_needs_pgtable_deposit()) { 1453 pgtable = pte_alloc_one(vma->vm_mm); 1454 if (!pgtable) 1455 return VM_FAULT_OOM; 1456 } 1457 1458 track_pfn_insert(vma, &pgprot, pfn); 1459 ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1460 error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, 1461 pgtable); 1462 spin_unlock(ptl); 1463 if (error && pgtable) 1464 pte_free(vma->vm_mm, pgtable); 1465 1466 return VM_FAULT_NOPAGE; 1467 } 1468 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 1469 1470 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, 1471 bool write) 1472 { 1473 struct vm_area_struct *vma = vmf->vma; 1474 unsigned long addr = vmf->address & PMD_MASK; 1475 struct mm_struct *mm = vma->vm_mm; 1476 spinlock_t *ptl; 1477 pgtable_t pgtable = NULL; 1478 int error; 1479 1480 if (addr < vma->vm_start || addr >= vma->vm_end) 1481 return VM_FAULT_SIGBUS; 1482 1483 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) 1484 return VM_FAULT_SIGBUS; 1485 1486 if (arch_needs_pgtable_deposit()) { 1487 pgtable = pte_alloc_one(vma->vm_mm); 1488 if (!pgtable) 1489 return VM_FAULT_OOM; 1490 } 1491 1492 ptl = pmd_lock(mm, vmf->pmd); 1493 if (pmd_none(*vmf->pmd)) { 1494 folio_get(folio); 1495 folio_add_file_rmap_pmd(folio, &folio->page, vma); 1496 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR); 1497 } 1498 error = insert_pfn_pmd(vma, addr, vmf->pmd, 1499 pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot, 1500 write, pgtable); 1501 spin_unlock(ptl); 1502 if (error && pgtable) 1503 pte_free(mm, pgtable); 1504 1505 return VM_FAULT_NOPAGE; 1506 } 1507 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd); 1508 1509 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1510 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 1511 { 1512 if (likely(vma->vm_flags & VM_WRITE)) 1513 pud = pud_mkwrite(pud); 1514 return pud; 1515 } 1516 1517 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 1518 pud_t *pud, pfn_t pfn, bool write) 1519 { 1520 struct mm_struct *mm = vma->vm_mm; 1521 pgprot_t prot = vma->vm_page_prot; 1522 pud_t entry; 1523 1524 if (!pud_none(*pud)) { 1525 if (write) { 1526 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn))) 1527 return; 1528 entry = pud_mkyoung(*pud); 1529 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 1530 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 1531 update_mmu_cache_pud(vma, addr, pud); 1532 } 1533 return; 1534 } 1535 1536 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 1537 if (pfn_t_devmap(pfn)) 1538 entry = pud_mkdevmap(entry); 1539 else 1540 entry = pud_mkspecial(entry); 1541 if (write) { 1542 entry = pud_mkyoung(pud_mkdirty(entry)); 1543 entry = maybe_pud_mkwrite(entry, vma); 1544 } 1545 set_pud_at(mm, addr, pud, entry); 1546 update_mmu_cache_pud(vma, addr, pud); 1547 } 1548 1549 /** 1550 * vmf_insert_pfn_pud - insert a pud size pfn 1551 * @vmf: Structure describing the fault 1552 * @pfn: pfn to insert 1553 * @write: whether it's a write fault 1554 * 1555 * Insert a pud size pfn. See vmf_insert_pfn() for additional info. 1556 * 1557 * Return: vm_fault_t value. 1558 */ 1559 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 1560 { 1561 unsigned long addr = vmf->address & PUD_MASK; 1562 struct vm_area_struct *vma = vmf->vma; 1563 pgprot_t pgprot = vma->vm_page_prot; 1564 spinlock_t *ptl; 1565 1566 /* 1567 * If we had pud_special, we could avoid all these restrictions, 1568 * but we need to be consistent with PTEs and architectures that 1569 * can't support a 'special' bit. 1570 */ 1571 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 1572 !pfn_t_devmap(pfn)); 1573 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1574 (VM_PFNMAP|VM_MIXEDMAP)); 1575 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1576 1577 if (addr < vma->vm_start || addr >= vma->vm_end) 1578 return VM_FAULT_SIGBUS; 1579 1580 track_pfn_insert(vma, &pgprot, pfn); 1581 1582 ptl = pud_lock(vma->vm_mm, vmf->pud); 1583 insert_pfn_pud(vma, addr, vmf->pud, pfn, write); 1584 spin_unlock(ptl); 1585 1586 return VM_FAULT_NOPAGE; 1587 } 1588 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 1589 1590 /** 1591 * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry 1592 * @vmf: Structure describing the fault 1593 * @folio: folio to insert 1594 * @write: whether it's a write fault 1595 * 1596 * Return: vm_fault_t value. 1597 */ 1598 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, 1599 bool write) 1600 { 1601 struct vm_area_struct *vma = vmf->vma; 1602 unsigned long addr = vmf->address & PUD_MASK; 1603 pud_t *pud = vmf->pud; 1604 struct mm_struct *mm = vma->vm_mm; 1605 spinlock_t *ptl; 1606 1607 if (addr < vma->vm_start || addr >= vma->vm_end) 1608 return VM_FAULT_SIGBUS; 1609 1610 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) 1611 return VM_FAULT_SIGBUS; 1612 1613 ptl = pud_lock(mm, pud); 1614 1615 /* 1616 * If there is already an entry present we assume the folio is 1617 * already mapped, hence no need to take another reference. We 1618 * still call insert_pfn_pud() though in case the mapping needs 1619 * upgrading to writeable. 1620 */ 1621 if (pud_none(*vmf->pud)) { 1622 folio_get(folio); 1623 folio_add_file_rmap_pud(folio, &folio->page, vma); 1624 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR); 1625 } 1626 insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)), 1627 write); 1628 spin_unlock(ptl); 1629 1630 return VM_FAULT_NOPAGE; 1631 } 1632 EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); 1633 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1634 1635 void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1636 pmd_t *pmd, bool write) 1637 { 1638 pmd_t _pmd; 1639 1640 _pmd = pmd_mkyoung(*pmd); 1641 if (write) 1642 _pmd = pmd_mkdirty(_pmd); 1643 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1644 pmd, _pmd, write)) 1645 update_mmu_cache_pmd(vma, addr, pmd); 1646 } 1647 1648 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1649 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 1650 { 1651 unsigned long pfn = pmd_pfn(*pmd); 1652 struct mm_struct *mm = vma->vm_mm; 1653 struct page *page; 1654 int ret; 1655 1656 assert_spin_locked(pmd_lockptr(mm, pmd)); 1657 1658 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1659 return NULL; 1660 1661 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 1662 /* pass */; 1663 else 1664 return NULL; 1665 1666 if (flags & FOLL_TOUCH) 1667 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 1668 1669 /* 1670 * device mapped pages can only be returned if the 1671 * caller will manage the page reference count. 1672 */ 1673 if (!(flags & (FOLL_GET | FOLL_PIN))) 1674 return ERR_PTR(-EEXIST); 1675 1676 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1677 *pgmap = get_dev_pagemap(pfn, *pgmap); 1678 if (!*pgmap) 1679 return ERR_PTR(-EFAULT); 1680 page = pfn_to_page(pfn); 1681 ret = try_grab_folio(page_folio(page), 1, flags); 1682 if (ret) 1683 page = ERR_PTR(ret); 1684 1685 return page; 1686 } 1687 1688 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1689 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1690 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1691 { 1692 spinlock_t *dst_ptl, *src_ptl; 1693 struct page *src_page; 1694 struct folio *src_folio; 1695 pmd_t pmd; 1696 pgtable_t pgtable = NULL; 1697 int ret = -ENOMEM; 1698 1699 pmd = pmdp_get_lockless(src_pmd); 1700 if (unlikely(pmd_present(pmd) && pmd_special(pmd))) { 1701 dst_ptl = pmd_lock(dst_mm, dst_pmd); 1702 src_ptl = pmd_lockptr(src_mm, src_pmd); 1703 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1704 /* 1705 * No need to recheck the pmd, it can't change with write 1706 * mmap lock held here. 1707 * 1708 * Meanwhile, making sure it's not a CoW VMA with writable 1709 * mapping, otherwise it means either the anon page wrongly 1710 * applied special bit, or we made the PRIVATE mapping be 1711 * able to wrongly write to the backend MMIO. 1712 */ 1713 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); 1714 goto set_pmd; 1715 } 1716 1717 /* Skip if can be re-fill on fault */ 1718 if (!vma_is_anonymous(dst_vma)) 1719 return 0; 1720 1721 pgtable = pte_alloc_one(dst_mm); 1722 if (unlikely(!pgtable)) 1723 goto out; 1724 1725 dst_ptl = pmd_lock(dst_mm, dst_pmd); 1726 src_ptl = pmd_lockptr(src_mm, src_pmd); 1727 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1728 1729 ret = -EAGAIN; 1730 pmd = *src_pmd; 1731 1732 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1733 if (unlikely(is_swap_pmd(pmd))) { 1734 swp_entry_t entry = pmd_to_swp_entry(pmd); 1735 1736 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1737 if (!is_readable_migration_entry(entry)) { 1738 entry = make_readable_migration_entry( 1739 swp_offset(entry)); 1740 pmd = swp_entry_to_pmd(entry); 1741 if (pmd_swp_soft_dirty(*src_pmd)) 1742 pmd = pmd_swp_mksoft_dirty(pmd); 1743 if (pmd_swp_uffd_wp(*src_pmd)) 1744 pmd = pmd_swp_mkuffd_wp(pmd); 1745 set_pmd_at(src_mm, addr, src_pmd, pmd); 1746 } 1747 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1748 mm_inc_nr_ptes(dst_mm); 1749 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1750 if (!userfaultfd_wp(dst_vma)) 1751 pmd = pmd_swp_clear_uffd_wp(pmd); 1752 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1753 ret = 0; 1754 goto out_unlock; 1755 } 1756 #endif 1757 1758 if (unlikely(!pmd_trans_huge(pmd))) { 1759 pte_free(dst_mm, pgtable); 1760 goto out_unlock; 1761 } 1762 /* 1763 * When page table lock is held, the huge zero pmd should not be 1764 * under splitting since we don't split the page itself, only pmd to 1765 * a page table. 1766 */ 1767 if (is_huge_zero_pmd(pmd)) { 1768 /* 1769 * mm_get_huge_zero_folio() will never allocate a new 1770 * folio here, since we already have a zero page to 1771 * copy. It just takes a reference. 1772 */ 1773 mm_get_huge_zero_folio(dst_mm); 1774 goto out_zero_page; 1775 } 1776 1777 src_page = pmd_page(pmd); 1778 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1779 src_folio = page_folio(src_page); 1780 1781 folio_get(src_folio); 1782 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { 1783 /* Page maybe pinned: split and retry the fault on PTEs. */ 1784 folio_put(src_folio); 1785 pte_free(dst_mm, pgtable); 1786 spin_unlock(src_ptl); 1787 spin_unlock(dst_ptl); 1788 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1789 return -EAGAIN; 1790 } 1791 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1792 out_zero_page: 1793 mm_inc_nr_ptes(dst_mm); 1794 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1795 pmdp_set_wrprotect(src_mm, addr, src_pmd); 1796 if (!userfaultfd_wp(dst_vma)) 1797 pmd = pmd_clear_uffd_wp(pmd); 1798 pmd = pmd_wrprotect(pmd); 1799 set_pmd: 1800 pmd = pmd_mkold(pmd); 1801 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1802 1803 ret = 0; 1804 out_unlock: 1805 spin_unlock(src_ptl); 1806 spin_unlock(dst_ptl); 1807 out: 1808 return ret; 1809 } 1810 1811 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1812 void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1813 pud_t *pud, bool write) 1814 { 1815 pud_t _pud; 1816 1817 _pud = pud_mkyoung(*pud); 1818 if (write) 1819 _pud = pud_mkdirty(_pud); 1820 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1821 pud, _pud, write)) 1822 update_mmu_cache_pud(vma, addr, pud); 1823 } 1824 1825 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1826 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1827 struct vm_area_struct *vma) 1828 { 1829 spinlock_t *dst_ptl, *src_ptl; 1830 pud_t pud; 1831 int ret; 1832 1833 dst_ptl = pud_lock(dst_mm, dst_pud); 1834 src_ptl = pud_lockptr(src_mm, src_pud); 1835 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1836 1837 ret = -EAGAIN; 1838 pud = *src_pud; 1839 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1840 goto out_unlock; 1841 1842 /* 1843 * TODO: once we support anonymous pages, use 1844 * folio_try_dup_anon_rmap_*() and split if duplicating fails. 1845 */ 1846 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) { 1847 pudp_set_wrprotect(src_mm, addr, src_pud); 1848 pud = pud_wrprotect(pud); 1849 } 1850 pud = pud_mkold(pud); 1851 set_pud_at(dst_mm, addr, dst_pud, pud); 1852 1853 ret = 0; 1854 out_unlock: 1855 spin_unlock(src_ptl); 1856 spin_unlock(dst_ptl); 1857 return ret; 1858 } 1859 1860 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1861 { 1862 bool write = vmf->flags & FAULT_FLAG_WRITE; 1863 1864 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1865 if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1866 goto unlock; 1867 1868 touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1869 unlock: 1870 spin_unlock(vmf->ptl); 1871 } 1872 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1873 1874 void huge_pmd_set_accessed(struct vm_fault *vmf) 1875 { 1876 bool write = vmf->flags & FAULT_FLAG_WRITE; 1877 1878 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1879 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1880 goto unlock; 1881 1882 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1883 1884 unlock: 1885 spin_unlock(vmf->ptl); 1886 } 1887 1888 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf) 1889 { 1890 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1891 struct vm_area_struct *vma = vmf->vma; 1892 struct mmu_notifier_range range; 1893 struct folio *folio; 1894 vm_fault_t ret = 0; 1895 1896 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); 1897 if (unlikely(!folio)) 1898 return VM_FAULT_FALLBACK; 1899 1900 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr, 1901 haddr + HPAGE_PMD_SIZE); 1902 mmu_notifier_invalidate_range_start(&range); 1903 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1904 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) 1905 goto release; 1906 ret = check_stable_address_space(vma->vm_mm); 1907 if (ret) 1908 goto release; 1909 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); 1910 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); 1911 goto unlock; 1912 release: 1913 folio_put(folio); 1914 unlock: 1915 spin_unlock(vmf->ptl); 1916 mmu_notifier_invalidate_range_end(&range); 1917 return ret; 1918 } 1919 1920 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 1921 { 1922 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 1923 struct vm_area_struct *vma = vmf->vma; 1924 struct folio *folio; 1925 struct page *page; 1926 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1927 pmd_t orig_pmd = vmf->orig_pmd; 1928 1929 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 1930 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1931 1932 if (is_huge_zero_pmd(orig_pmd)) { 1933 vm_fault_t ret = do_huge_zero_wp_pmd(vmf); 1934 1935 if (!(ret & VM_FAULT_FALLBACK)) 1936 return ret; 1937 1938 /* Fallback to splitting PMD if THP cannot be allocated */ 1939 goto fallback; 1940 } 1941 1942 spin_lock(vmf->ptl); 1943 1944 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1945 spin_unlock(vmf->ptl); 1946 return 0; 1947 } 1948 1949 page = pmd_page(orig_pmd); 1950 folio = page_folio(page); 1951 VM_BUG_ON_PAGE(!PageHead(page), page); 1952 1953 /* Early check when only holding the PT lock. */ 1954 if (PageAnonExclusive(page)) 1955 goto reuse; 1956 1957 if (!folio_trylock(folio)) { 1958 folio_get(folio); 1959 spin_unlock(vmf->ptl); 1960 folio_lock(folio); 1961 spin_lock(vmf->ptl); 1962 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1963 spin_unlock(vmf->ptl); 1964 folio_unlock(folio); 1965 folio_put(folio); 1966 return 0; 1967 } 1968 folio_put(folio); 1969 } 1970 1971 /* Recheck after temporarily dropping the PT lock. */ 1972 if (PageAnonExclusive(page)) { 1973 folio_unlock(folio); 1974 goto reuse; 1975 } 1976 1977 /* 1978 * See do_wp_page(): we can only reuse the folio exclusively if 1979 * there are no additional references. Note that we always drain 1980 * the LRU cache immediately after adding a THP. 1981 */ 1982 if (folio_ref_count(folio) > 1983 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) 1984 goto unlock_fallback; 1985 if (folio_test_swapcache(folio)) 1986 folio_free_swap(folio); 1987 if (folio_ref_count(folio) == 1) { 1988 pmd_t entry; 1989 1990 folio_move_anon_rmap(folio, vma); 1991 SetPageAnonExclusive(page); 1992 folio_unlock(folio); 1993 reuse: 1994 if (unlikely(unshare)) { 1995 spin_unlock(vmf->ptl); 1996 return 0; 1997 } 1998 entry = pmd_mkyoung(orig_pmd); 1999 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 2000 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 2001 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 2002 spin_unlock(vmf->ptl); 2003 return 0; 2004 } 2005 2006 unlock_fallback: 2007 folio_unlock(folio); 2008 spin_unlock(vmf->ptl); 2009 fallback: 2010 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 2011 return VM_FAULT_FALLBACK; 2012 } 2013 2014 static inline bool can_change_pmd_writable(struct vm_area_struct *vma, 2015 unsigned long addr, pmd_t pmd) 2016 { 2017 struct page *page; 2018 2019 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) 2020 return false; 2021 2022 /* Don't touch entries that are not even readable (NUMA hinting). */ 2023 if (pmd_protnone(pmd)) 2024 return false; 2025 2026 /* Do we need write faults for softdirty tracking? */ 2027 if (pmd_needs_soft_dirty_wp(vma, pmd)) 2028 return false; 2029 2030 /* Do we need write faults for uffd-wp tracking? */ 2031 if (userfaultfd_huge_pmd_wp(vma, pmd)) 2032 return false; 2033 2034 if (!(vma->vm_flags & VM_SHARED)) { 2035 /* See can_change_pte_writable(). */ 2036 page = vm_normal_page_pmd(vma, addr, pmd); 2037 return page && PageAnon(page) && PageAnonExclusive(page); 2038 } 2039 2040 /* See can_change_pte_writable(). */ 2041 return pmd_dirty(pmd); 2042 } 2043 2044 /* NUMA hinting page fault entry point for trans huge pmds */ 2045 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 2046 { 2047 struct vm_area_struct *vma = vmf->vma; 2048 struct folio *folio; 2049 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 2050 int nid = NUMA_NO_NODE; 2051 int target_nid, last_cpupid; 2052 pmd_t pmd, old_pmd; 2053 bool writable = false; 2054 int flags = 0; 2055 2056 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 2057 old_pmd = pmdp_get(vmf->pmd); 2058 2059 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) { 2060 spin_unlock(vmf->ptl); 2061 return 0; 2062 } 2063 2064 pmd = pmd_modify(old_pmd, vma->vm_page_prot); 2065 2066 /* 2067 * Detect now whether the PMD could be writable; this information 2068 * is only valid while holding the PT lock. 2069 */ 2070 writable = pmd_write(pmd); 2071 if (!writable && vma_wants_manual_pte_write_upgrade(vma) && 2072 can_change_pmd_writable(vma, vmf->address, pmd)) 2073 writable = true; 2074 2075 folio = vm_normal_folio_pmd(vma, haddr, pmd); 2076 if (!folio) 2077 goto out_map; 2078 2079 nid = folio_nid(folio); 2080 2081 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable, 2082 &last_cpupid); 2083 if (target_nid == NUMA_NO_NODE) 2084 goto out_map; 2085 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { 2086 flags |= TNF_MIGRATE_FAIL; 2087 goto out_map; 2088 } 2089 /* The folio is isolated and isolation code holds a folio reference. */ 2090 spin_unlock(vmf->ptl); 2091 writable = false; 2092 2093 if (!migrate_misplaced_folio(folio, target_nid)) { 2094 flags |= TNF_MIGRATED; 2095 nid = target_nid; 2096 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); 2097 return 0; 2098 } 2099 2100 flags |= TNF_MIGRATE_FAIL; 2101 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 2102 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { 2103 spin_unlock(vmf->ptl); 2104 return 0; 2105 } 2106 out_map: 2107 /* Restore the PMD */ 2108 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); 2109 pmd = pmd_mkyoung(pmd); 2110 if (writable) 2111 pmd = pmd_mkwrite(pmd, vma); 2112 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 2113 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 2114 spin_unlock(vmf->ptl); 2115 2116 if (nid != NUMA_NO_NODE) 2117 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); 2118 return 0; 2119 } 2120 2121 /* 2122 * Return true if we do MADV_FREE successfully on entire pmd page. 2123 * Otherwise, return false. 2124 */ 2125 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2126 pmd_t *pmd, unsigned long addr, unsigned long next) 2127 { 2128 spinlock_t *ptl; 2129 pmd_t orig_pmd; 2130 struct folio *folio; 2131 struct mm_struct *mm = tlb->mm; 2132 bool ret = false; 2133 2134 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2135 2136 ptl = pmd_trans_huge_lock(pmd, vma); 2137 if (!ptl) 2138 goto out_unlocked; 2139 2140 orig_pmd = *pmd; 2141 if (is_huge_zero_pmd(orig_pmd)) 2142 goto out; 2143 2144 if (unlikely(!pmd_present(orig_pmd))) { 2145 VM_BUG_ON(thp_migration_supported() && 2146 !is_pmd_migration_entry(orig_pmd)); 2147 goto out; 2148 } 2149 2150 folio = pmd_folio(orig_pmd); 2151 /* 2152 * If other processes are mapping this folio, we couldn't discard 2153 * the folio unless they all do MADV_FREE so let's skip the folio. 2154 */ 2155 if (folio_maybe_mapped_shared(folio)) 2156 goto out; 2157 2158 if (!folio_trylock(folio)) 2159 goto out; 2160 2161 /* 2162 * If user want to discard part-pages of THP, split it so MADV_FREE 2163 * will deactivate only them. 2164 */ 2165 if (next - addr != HPAGE_PMD_SIZE) { 2166 folio_get(folio); 2167 spin_unlock(ptl); 2168 split_folio(folio); 2169 folio_unlock(folio); 2170 folio_put(folio); 2171 goto out_unlocked; 2172 } 2173 2174 if (folio_test_dirty(folio)) 2175 folio_clear_dirty(folio); 2176 folio_unlock(folio); 2177 2178 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 2179 pmdp_invalidate(vma, addr, pmd); 2180 orig_pmd = pmd_mkold(orig_pmd); 2181 orig_pmd = pmd_mkclean(orig_pmd); 2182 2183 set_pmd_at(mm, addr, pmd, orig_pmd); 2184 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 2185 } 2186 2187 folio_mark_lazyfree(folio); 2188 ret = true; 2189 out: 2190 spin_unlock(ptl); 2191 out_unlocked: 2192 return ret; 2193 } 2194 2195 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 2196 { 2197 pgtable_t pgtable; 2198 2199 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2200 pte_free(mm, pgtable); 2201 mm_dec_nr_ptes(mm); 2202 } 2203 2204 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2205 pmd_t *pmd, unsigned long addr) 2206 { 2207 pmd_t orig_pmd; 2208 spinlock_t *ptl; 2209 2210 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2211 2212 ptl = __pmd_trans_huge_lock(pmd, vma); 2213 if (!ptl) 2214 return 0; 2215 /* 2216 * For architectures like ppc64 we look at deposited pgtable 2217 * when calling pmdp_huge_get_and_clear. So do the 2218 * pgtable_trans_huge_withdraw after finishing pmdp related 2219 * operations. 2220 */ 2221 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 2222 tlb->fullmm); 2223 arch_check_zapped_pmd(vma, orig_pmd); 2224 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 2225 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { 2226 if (arch_needs_pgtable_deposit()) 2227 zap_deposited_table(tlb->mm, pmd); 2228 spin_unlock(ptl); 2229 } else if (is_huge_zero_pmd(orig_pmd)) { 2230 if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) 2231 zap_deposited_table(tlb->mm, pmd); 2232 spin_unlock(ptl); 2233 } else { 2234 struct folio *folio = NULL; 2235 int flush_needed = 1; 2236 2237 if (pmd_present(orig_pmd)) { 2238 struct page *page = pmd_page(orig_pmd); 2239 2240 folio = page_folio(page); 2241 folio_remove_rmap_pmd(folio, page, vma); 2242 WARN_ON_ONCE(folio_mapcount(folio) < 0); 2243 VM_BUG_ON_PAGE(!PageHead(page), page); 2244 } else if (thp_migration_supported()) { 2245 swp_entry_t entry; 2246 2247 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 2248 entry = pmd_to_swp_entry(orig_pmd); 2249 folio = pfn_swap_entry_folio(entry); 2250 flush_needed = 0; 2251 } else 2252 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 2253 2254 if (folio_test_anon(folio)) { 2255 zap_deposited_table(tlb->mm, pmd); 2256 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 2257 } else { 2258 if (arch_needs_pgtable_deposit()) 2259 zap_deposited_table(tlb->mm, pmd); 2260 add_mm_counter(tlb->mm, mm_counter_file(folio), 2261 -HPAGE_PMD_NR); 2262 } 2263 2264 spin_unlock(ptl); 2265 if (flush_needed) 2266 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); 2267 } 2268 return 1; 2269 } 2270 2271 #ifndef pmd_move_must_withdraw 2272 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 2273 spinlock_t *old_pmd_ptl, 2274 struct vm_area_struct *vma) 2275 { 2276 /* 2277 * With split pmd lock we also need to move preallocated 2278 * PTE page table if new_pmd is on different PMD page table. 2279 * 2280 * We also don't deposit and withdraw tables for file pages. 2281 */ 2282 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 2283 } 2284 #endif 2285 2286 static pmd_t move_soft_dirty_pmd(pmd_t pmd) 2287 { 2288 #ifdef CONFIG_MEM_SOFT_DIRTY 2289 if (unlikely(is_pmd_migration_entry(pmd))) 2290 pmd = pmd_swp_mksoft_dirty(pmd); 2291 else if (pmd_present(pmd)) 2292 pmd = pmd_mksoft_dirty(pmd); 2293 #endif 2294 return pmd; 2295 } 2296 2297 static pmd_t clear_uffd_wp_pmd(pmd_t pmd) 2298 { 2299 if (pmd_present(pmd)) 2300 pmd = pmd_clear_uffd_wp(pmd); 2301 else if (is_swap_pmd(pmd)) 2302 pmd = pmd_swp_clear_uffd_wp(pmd); 2303 2304 return pmd; 2305 } 2306 2307 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 2308 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 2309 { 2310 spinlock_t *old_ptl, *new_ptl; 2311 pmd_t pmd; 2312 struct mm_struct *mm = vma->vm_mm; 2313 bool force_flush = false; 2314 2315 /* 2316 * The destination pmd shouldn't be established, free_pgtables() 2317 * should have released it; but move_page_tables() might have already 2318 * inserted a page table, if racing against shmem/file collapse. 2319 */ 2320 if (!pmd_none(*new_pmd)) { 2321 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 2322 return false; 2323 } 2324 2325 /* 2326 * We don't have to worry about the ordering of src and dst 2327 * ptlocks because exclusive mmap_lock prevents deadlock. 2328 */ 2329 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 2330 if (old_ptl) { 2331 new_ptl = pmd_lockptr(mm, new_pmd); 2332 if (new_ptl != old_ptl) 2333 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2334 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 2335 if (pmd_present(pmd)) 2336 force_flush = true; 2337 VM_BUG_ON(!pmd_none(*new_pmd)); 2338 2339 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 2340 pgtable_t pgtable; 2341 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 2342 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 2343 } 2344 pmd = move_soft_dirty_pmd(pmd); 2345 if (vma_has_uffd_without_event_remap(vma)) 2346 pmd = clear_uffd_wp_pmd(pmd); 2347 set_pmd_at(mm, new_addr, new_pmd, pmd); 2348 if (force_flush) 2349 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2350 if (new_ptl != old_ptl) 2351 spin_unlock(new_ptl); 2352 spin_unlock(old_ptl); 2353 return true; 2354 } 2355 return false; 2356 } 2357 2358 /* 2359 * Returns 2360 * - 0 if PMD could not be locked 2361 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 2362 * or if prot_numa but THP migration is not supported 2363 * - HPAGE_PMD_NR if protections changed and TLB flush necessary 2364 */ 2365 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2366 pmd_t *pmd, unsigned long addr, pgprot_t newprot, 2367 unsigned long cp_flags) 2368 { 2369 struct mm_struct *mm = vma->vm_mm; 2370 spinlock_t *ptl; 2371 pmd_t oldpmd, entry; 2372 bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 2373 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 2374 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 2375 int ret = 1; 2376 2377 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2378 2379 if (prot_numa && !thp_migration_supported()) 2380 return 1; 2381 2382 ptl = __pmd_trans_huge_lock(pmd, vma); 2383 if (!ptl) 2384 return 0; 2385 2386 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2387 if (is_swap_pmd(*pmd)) { 2388 swp_entry_t entry = pmd_to_swp_entry(*pmd); 2389 struct folio *folio = pfn_swap_entry_folio(entry); 2390 pmd_t newpmd; 2391 2392 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 2393 if (is_writable_migration_entry(entry)) { 2394 /* 2395 * A protection check is difficult so 2396 * just be safe and disable write 2397 */ 2398 if (folio_test_anon(folio)) 2399 entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 2400 else 2401 entry = make_readable_migration_entry(swp_offset(entry)); 2402 newpmd = swp_entry_to_pmd(entry); 2403 if (pmd_swp_soft_dirty(*pmd)) 2404 newpmd = pmd_swp_mksoft_dirty(newpmd); 2405 } else { 2406 newpmd = *pmd; 2407 } 2408 2409 if (uffd_wp) 2410 newpmd = pmd_swp_mkuffd_wp(newpmd); 2411 else if (uffd_wp_resolve) 2412 newpmd = pmd_swp_clear_uffd_wp(newpmd); 2413 if (!pmd_same(*pmd, newpmd)) 2414 set_pmd_at(mm, addr, pmd, newpmd); 2415 goto unlock; 2416 } 2417 #endif 2418 2419 if (prot_numa) { 2420 struct folio *folio; 2421 bool toptier; 2422 /* 2423 * Avoid trapping faults against the zero page. The read-only 2424 * data is likely to be read-cached on the local CPU and 2425 * local/remote hits to the zero page are not interesting. 2426 */ 2427 if (is_huge_zero_pmd(*pmd)) 2428 goto unlock; 2429 2430 if (pmd_protnone(*pmd)) 2431 goto unlock; 2432 2433 folio = pmd_folio(*pmd); 2434 toptier = node_is_toptier(folio_nid(folio)); 2435 /* 2436 * Skip scanning top tier node if normal numa 2437 * balancing is disabled 2438 */ 2439 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 2440 toptier) 2441 goto unlock; 2442 2443 if (folio_use_access_time(folio)) 2444 folio_xchg_access_time(folio, 2445 jiffies_to_msecs(jiffies)); 2446 } 2447 /* 2448 * In case prot_numa, we are under mmap_read_lock(mm). It's critical 2449 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 2450 * which is also under mmap_read_lock(mm): 2451 * 2452 * CPU0: CPU1: 2453 * change_huge_pmd(prot_numa=1) 2454 * pmdp_huge_get_and_clear_notify() 2455 * madvise_dontneed() 2456 * zap_pmd_range() 2457 * pmd_trans_huge(*pmd) == 0 (without ptl) 2458 * // skip the pmd 2459 * set_pmd_at(); 2460 * // pmd is re-established 2461 * 2462 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 2463 * which may break userspace. 2464 * 2465 * pmdp_invalidate_ad() is required to make sure we don't miss 2466 * dirty/young flags set by hardware. 2467 */ 2468 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 2469 2470 entry = pmd_modify(oldpmd, newprot); 2471 if (uffd_wp) 2472 entry = pmd_mkuffd_wp(entry); 2473 else if (uffd_wp_resolve) 2474 /* 2475 * Leave the write bit to be handled by PF interrupt 2476 * handler, then things like COW could be properly 2477 * handled. 2478 */ 2479 entry = pmd_clear_uffd_wp(entry); 2480 2481 /* See change_pte_range(). */ 2482 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && 2483 can_change_pmd_writable(vma, addr, entry)) 2484 entry = pmd_mkwrite(entry, vma); 2485 2486 ret = HPAGE_PMD_NR; 2487 set_pmd_at(mm, addr, pmd, entry); 2488 2489 if (huge_pmd_needs_flush(oldpmd, entry)) 2490 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 2491 unlock: 2492 spin_unlock(ptl); 2493 return ret; 2494 } 2495 2496 /* 2497 * Returns: 2498 * 2499 * - 0: if pud leaf changed from under us 2500 * - 1: if pud can be skipped 2501 * - HPAGE_PUD_NR: if pud was successfully processed 2502 */ 2503 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 2504 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 2505 pud_t *pudp, unsigned long addr, pgprot_t newprot, 2506 unsigned long cp_flags) 2507 { 2508 struct mm_struct *mm = vma->vm_mm; 2509 pud_t oldpud, entry; 2510 spinlock_t *ptl; 2511 2512 tlb_change_page_size(tlb, HPAGE_PUD_SIZE); 2513 2514 /* NUMA balancing doesn't apply to dax */ 2515 if (cp_flags & MM_CP_PROT_NUMA) 2516 return 1; 2517 2518 /* 2519 * Huge entries on userfault-wp only works with anonymous, while we 2520 * don't have anonymous PUDs yet. 2521 */ 2522 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL)) 2523 return 1; 2524 2525 ptl = __pud_trans_huge_lock(pudp, vma); 2526 if (!ptl) 2527 return 0; 2528 2529 /* 2530 * Can't clear PUD or it can race with concurrent zapping. See 2531 * change_huge_pmd(). 2532 */ 2533 oldpud = pudp_invalidate(vma, addr, pudp); 2534 entry = pud_modify(oldpud, newprot); 2535 set_pud_at(mm, addr, pudp, entry); 2536 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE); 2537 2538 spin_unlock(ptl); 2539 return HPAGE_PUD_NR; 2540 } 2541 #endif 2542 2543 #ifdef CONFIG_USERFAULTFD 2544 /* 2545 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by 2546 * the caller, but it must return after releasing the page_table_lock. 2547 * Just move the page from src_pmd to dst_pmd if possible. 2548 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be 2549 * repeated by the caller, or other errors in case of failure. 2550 */ 2551 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, 2552 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 2553 unsigned long dst_addr, unsigned long src_addr) 2554 { 2555 pmd_t _dst_pmd, src_pmdval; 2556 struct page *src_page; 2557 struct folio *src_folio; 2558 struct anon_vma *src_anon_vma; 2559 spinlock_t *src_ptl, *dst_ptl; 2560 pgtable_t src_pgtable; 2561 struct mmu_notifier_range range; 2562 int err = 0; 2563 2564 src_pmdval = *src_pmd; 2565 src_ptl = pmd_lockptr(mm, src_pmd); 2566 2567 lockdep_assert_held(src_ptl); 2568 vma_assert_locked(src_vma); 2569 vma_assert_locked(dst_vma); 2570 2571 /* Sanity checks before the operation */ 2572 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) || 2573 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) { 2574 spin_unlock(src_ptl); 2575 return -EINVAL; 2576 } 2577 2578 if (!pmd_trans_huge(src_pmdval)) { 2579 spin_unlock(src_ptl); 2580 if (is_pmd_migration_entry(src_pmdval)) { 2581 pmd_migration_entry_wait(mm, &src_pmdval); 2582 return -EAGAIN; 2583 } 2584 return -ENOENT; 2585 } 2586 2587 src_page = pmd_page(src_pmdval); 2588 2589 if (!is_huge_zero_pmd(src_pmdval)) { 2590 if (unlikely(!PageAnonExclusive(src_page))) { 2591 spin_unlock(src_ptl); 2592 return -EBUSY; 2593 } 2594 2595 src_folio = page_folio(src_page); 2596 folio_get(src_folio); 2597 } else 2598 src_folio = NULL; 2599 2600 spin_unlock(src_ptl); 2601 2602 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); 2603 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr, 2604 src_addr + HPAGE_PMD_SIZE); 2605 mmu_notifier_invalidate_range_start(&range); 2606 2607 if (src_folio) { 2608 folio_lock(src_folio); 2609 2610 /* 2611 * split_huge_page walks the anon_vma chain without the page 2612 * lock. Serialize against it with the anon_vma lock, the page 2613 * lock is not enough. 2614 */ 2615 src_anon_vma = folio_get_anon_vma(src_folio); 2616 if (!src_anon_vma) { 2617 err = -EAGAIN; 2618 goto unlock_folio; 2619 } 2620 anon_vma_lock_write(src_anon_vma); 2621 } else 2622 src_anon_vma = NULL; 2623 2624 dst_ptl = pmd_lockptr(mm, dst_pmd); 2625 double_pt_lock(src_ptl, dst_ptl); 2626 if (unlikely(!pmd_same(*src_pmd, src_pmdval) || 2627 !pmd_same(*dst_pmd, dst_pmdval))) { 2628 err = -EAGAIN; 2629 goto unlock_ptls; 2630 } 2631 if (src_folio) { 2632 if (folio_maybe_dma_pinned(src_folio) || 2633 !PageAnonExclusive(&src_folio->page)) { 2634 err = -EBUSY; 2635 goto unlock_ptls; 2636 } 2637 2638 if (WARN_ON_ONCE(!folio_test_head(src_folio)) || 2639 WARN_ON_ONCE(!folio_test_anon(src_folio))) { 2640 err = -EBUSY; 2641 goto unlock_ptls; 2642 } 2643 2644 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2645 /* Folio got pinned from under us. Put it back and fail the move. */ 2646 if (folio_maybe_dma_pinned(src_folio)) { 2647 set_pmd_at(mm, src_addr, src_pmd, src_pmdval); 2648 err = -EBUSY; 2649 goto unlock_ptls; 2650 } 2651 2652 folio_move_anon_rmap(src_folio, dst_vma); 2653 src_folio->index = linear_page_index(dst_vma, dst_addr); 2654 2655 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); 2656 /* Follow mremap() behavior and treat the entry dirty after the move */ 2657 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); 2658 } else { 2659 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2660 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); 2661 } 2662 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); 2663 2664 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd); 2665 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable); 2666 unlock_ptls: 2667 double_pt_unlock(src_ptl, dst_ptl); 2668 if (src_anon_vma) { 2669 anon_vma_unlock_write(src_anon_vma); 2670 put_anon_vma(src_anon_vma); 2671 } 2672 unlock_folio: 2673 /* unblock rmap walks */ 2674 if (src_folio) 2675 folio_unlock(src_folio); 2676 mmu_notifier_invalidate_range_end(&range); 2677 if (src_folio) 2678 folio_put(src_folio); 2679 return err; 2680 } 2681 #endif /* CONFIG_USERFAULTFD */ 2682 2683 /* 2684 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 2685 * 2686 * Note that if it returns page table lock pointer, this routine returns without 2687 * unlocking page table lock. So callers must unlock it. 2688 */ 2689 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 2690 { 2691 spinlock_t *ptl; 2692 ptl = pmd_lock(vma->vm_mm, pmd); 2693 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 2694 pmd_devmap(*pmd))) 2695 return ptl; 2696 spin_unlock(ptl); 2697 return NULL; 2698 } 2699 2700 /* 2701 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 2702 * 2703 * Note that if it returns page table lock pointer, this routine returns without 2704 * unlocking page table lock. So callers must unlock it. 2705 */ 2706 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 2707 { 2708 spinlock_t *ptl; 2709 2710 ptl = pud_lock(vma->vm_mm, pud); 2711 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 2712 return ptl; 2713 spin_unlock(ptl); 2714 return NULL; 2715 } 2716 2717 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 2718 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 2719 pud_t *pud, unsigned long addr) 2720 { 2721 spinlock_t *ptl; 2722 pud_t orig_pud; 2723 2724 ptl = __pud_trans_huge_lock(pud, vma); 2725 if (!ptl) 2726 return 0; 2727 2728 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); 2729 arch_check_zapped_pud(vma, orig_pud); 2730 tlb_remove_pud_tlb_entry(tlb, pud, addr); 2731 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { 2732 spin_unlock(ptl); 2733 /* No zero page support yet */ 2734 } else { 2735 struct page *page = NULL; 2736 struct folio *folio; 2737 2738 /* No support for anonymous PUD pages or migration yet */ 2739 VM_WARN_ON_ONCE(vma_is_anonymous(vma) || 2740 !pud_present(orig_pud)); 2741 2742 page = pud_page(orig_pud); 2743 folio = page_folio(page); 2744 folio_remove_rmap_pud(folio, page, vma); 2745 add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR); 2746 2747 spin_unlock(ptl); 2748 tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE); 2749 } 2750 return 1; 2751 } 2752 2753 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2754 unsigned long haddr) 2755 { 2756 struct folio *folio; 2757 struct page *page; 2758 pud_t old_pud; 2759 2760 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2761 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2762 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2763 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2764 2765 count_vm_event(THP_SPLIT_PUD); 2766 2767 old_pud = pudp_huge_clear_flush(vma, haddr, pud); 2768 2769 if (!vma_is_dax(vma)) 2770 return; 2771 2772 page = pud_page(old_pud); 2773 folio = page_folio(page); 2774 2775 if (!folio_test_dirty(folio) && pud_dirty(old_pud)) 2776 folio_mark_dirty(folio); 2777 if (!folio_test_referenced(folio) && pud_young(old_pud)) 2778 folio_set_referenced(folio); 2779 folio_remove_rmap_pud(folio, page, vma); 2780 folio_put(folio); 2781 add_mm_counter(vma->vm_mm, mm_counter_file(folio), 2782 -HPAGE_PUD_NR); 2783 } 2784 2785 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2786 unsigned long address) 2787 { 2788 spinlock_t *ptl; 2789 struct mmu_notifier_range range; 2790 2791 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2792 address & HPAGE_PUD_MASK, 2793 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2794 mmu_notifier_invalidate_range_start(&range); 2795 ptl = pud_lock(vma->vm_mm, pud); 2796 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2797 goto out; 2798 __split_huge_pud_locked(vma, pud, range.start); 2799 2800 out: 2801 spin_unlock(ptl); 2802 mmu_notifier_invalidate_range_end(&range); 2803 } 2804 #else 2805 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2806 unsigned long address) 2807 { 2808 } 2809 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2810 2811 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2812 unsigned long haddr, pmd_t *pmd) 2813 { 2814 struct mm_struct *mm = vma->vm_mm; 2815 pgtable_t pgtable; 2816 pmd_t _pmd, old_pmd; 2817 unsigned long addr; 2818 pte_t *pte; 2819 int i; 2820 2821 /* 2822 * Leave pmd empty until pte is filled note that it is fine to delay 2823 * notification until mmu_notifier_invalidate_range_end() as we are 2824 * replacing a zero pmd write protected page with a zero pte write 2825 * protected page. 2826 * 2827 * See Documentation/mm/mmu_notifier.rst 2828 */ 2829 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2830 2831 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2832 pmd_populate(mm, &_pmd, pgtable); 2833 2834 pte = pte_offset_map(&_pmd, haddr); 2835 VM_BUG_ON(!pte); 2836 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2837 pte_t entry; 2838 2839 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); 2840 entry = pte_mkspecial(entry); 2841 if (pmd_uffd_wp(old_pmd)) 2842 entry = pte_mkuffd_wp(entry); 2843 VM_BUG_ON(!pte_none(ptep_get(pte))); 2844 set_pte_at(mm, addr, pte, entry); 2845 pte++; 2846 } 2847 pte_unmap(pte - 1); 2848 smp_wmb(); /* make pte visible before pmd */ 2849 pmd_populate(mm, pmd, pgtable); 2850 } 2851 2852 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2853 unsigned long haddr, bool freeze) 2854 { 2855 struct mm_struct *mm = vma->vm_mm; 2856 struct folio *folio; 2857 struct page *page; 2858 pgtable_t pgtable; 2859 pmd_t old_pmd, _pmd; 2860 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 2861 bool anon_exclusive = false, dirty = false; 2862 unsigned long addr; 2863 pte_t *pte; 2864 int i; 2865 2866 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2867 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2868 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 2869 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 2870 && !pmd_devmap(*pmd)); 2871 2872 count_vm_event(THP_SPLIT_PMD); 2873 2874 if (!vma_is_anonymous(vma)) { 2875 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2876 /* 2877 * We are going to unmap this huge page. So 2878 * just go ahead and zap it 2879 */ 2880 if (arch_needs_pgtable_deposit()) 2881 zap_deposited_table(mm, pmd); 2882 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) 2883 return; 2884 if (unlikely(is_pmd_migration_entry(old_pmd))) { 2885 swp_entry_t entry; 2886 2887 entry = pmd_to_swp_entry(old_pmd); 2888 folio = pfn_swap_entry_folio(entry); 2889 } else if (is_huge_zero_pmd(old_pmd)) { 2890 return; 2891 } else { 2892 page = pmd_page(old_pmd); 2893 folio = page_folio(page); 2894 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) 2895 folio_mark_dirty(folio); 2896 if (!folio_test_referenced(folio) && pmd_young(old_pmd)) 2897 folio_set_referenced(folio); 2898 folio_remove_rmap_pmd(folio, page, vma); 2899 folio_put(folio); 2900 } 2901 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); 2902 return; 2903 } 2904 2905 if (is_huge_zero_pmd(*pmd)) { 2906 /* 2907 * FIXME: Do we want to invalidate secondary mmu by calling 2908 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below 2909 * inside __split_huge_pmd() ? 2910 * 2911 * We are going from a zero huge page write protected to zero 2912 * small page also write protected so it does not seems useful 2913 * to invalidate secondary mmu at this time. 2914 */ 2915 return __split_huge_zero_page_pmd(vma, haddr, pmd); 2916 } 2917 2918 pmd_migration = is_pmd_migration_entry(*pmd); 2919 if (unlikely(pmd_migration)) { 2920 swp_entry_t entry; 2921 2922 old_pmd = *pmd; 2923 entry = pmd_to_swp_entry(old_pmd); 2924 page = pfn_swap_entry_to_page(entry); 2925 write = is_writable_migration_entry(entry); 2926 if (PageAnon(page)) 2927 anon_exclusive = is_readable_exclusive_migration_entry(entry); 2928 young = is_migration_entry_young(entry); 2929 dirty = is_migration_entry_dirty(entry); 2930 soft_dirty = pmd_swp_soft_dirty(old_pmd); 2931 uffd_wp = pmd_swp_uffd_wp(old_pmd); 2932 } else { 2933 /* 2934 * Up to this point the pmd is present and huge and userland has 2935 * the whole access to the hugepage during the split (which 2936 * happens in place). If we overwrite the pmd with the not-huge 2937 * version pointing to the pte here (which of course we could if 2938 * all CPUs were bug free), userland could trigger a small page 2939 * size TLB miss on the small sized TLB while the hugepage TLB 2940 * entry is still established in the huge TLB. Some CPU doesn't 2941 * like that. See 2942 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 2943 * 383 on page 105. Intel should be safe but is also warns that 2944 * it's only safe if the permission and cache attributes of the 2945 * two entries loaded in the two TLB is identical (which should 2946 * be the case here). But it is generally safer to never allow 2947 * small and huge TLB entries for the same virtual address to be 2948 * loaded simultaneously. So instead of doing "pmd_populate(); 2949 * flush_pmd_tlb_range();" we first mark the current pmd 2950 * notpresent (atomically because here the pmd_trans_huge must 2951 * remain set at all times on the pmd until the split is 2952 * complete for this pmd), then we flush the SMP TLB and finally 2953 * we write the non-huge version of the pmd entry with 2954 * pmd_populate. 2955 */ 2956 old_pmd = pmdp_invalidate(vma, haddr, pmd); 2957 page = pmd_page(old_pmd); 2958 folio = page_folio(page); 2959 if (pmd_dirty(old_pmd)) { 2960 dirty = true; 2961 folio_set_dirty(folio); 2962 } 2963 write = pmd_write(old_pmd); 2964 young = pmd_young(old_pmd); 2965 soft_dirty = pmd_soft_dirty(old_pmd); 2966 uffd_wp = pmd_uffd_wp(old_pmd); 2967 2968 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); 2969 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 2970 2971 /* 2972 * Without "freeze", we'll simply split the PMD, propagating the 2973 * PageAnonExclusive() flag for each PTE by setting it for 2974 * each subpage -- no need to (temporarily) clear. 2975 * 2976 * With "freeze" we want to replace mapped pages by 2977 * migration entries right away. This is only possible if we 2978 * managed to clear PageAnonExclusive() -- see 2979 * set_pmd_migration_entry(). 2980 * 2981 * In case we cannot clear PageAnonExclusive(), split the PMD 2982 * only and let try_to_migrate_one() fail later. 2983 * 2984 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. 2985 */ 2986 anon_exclusive = PageAnonExclusive(page); 2987 if (freeze && anon_exclusive && 2988 folio_try_share_anon_rmap_pmd(folio, page)) 2989 freeze = false; 2990 if (!freeze) { 2991 rmap_t rmap_flags = RMAP_NONE; 2992 2993 folio_ref_add(folio, HPAGE_PMD_NR - 1); 2994 if (anon_exclusive) 2995 rmap_flags |= RMAP_EXCLUSIVE; 2996 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, 2997 vma, haddr, rmap_flags); 2998 } 2999 } 3000 3001 /* 3002 * Withdraw the table only after we mark the pmd entry invalid. 3003 * This's critical for some architectures (Power). 3004 */ 3005 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 3006 pmd_populate(mm, &_pmd, pgtable); 3007 3008 pte = pte_offset_map(&_pmd, haddr); 3009 VM_BUG_ON(!pte); 3010 3011 /* 3012 * Note that NUMA hinting access restrictions are not transferred to 3013 * avoid any possibility of altering permissions across VMAs. 3014 */ 3015 if (freeze || pmd_migration) { 3016 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 3017 pte_t entry; 3018 swp_entry_t swp_entry; 3019 3020 if (write) 3021 swp_entry = make_writable_migration_entry( 3022 page_to_pfn(page + i)); 3023 else if (anon_exclusive) 3024 swp_entry = make_readable_exclusive_migration_entry( 3025 page_to_pfn(page + i)); 3026 else 3027 swp_entry = make_readable_migration_entry( 3028 page_to_pfn(page + i)); 3029 if (young) 3030 swp_entry = make_migration_entry_young(swp_entry); 3031 if (dirty) 3032 swp_entry = make_migration_entry_dirty(swp_entry); 3033 entry = swp_entry_to_pte(swp_entry); 3034 if (soft_dirty) 3035 entry = pte_swp_mksoft_dirty(entry); 3036 if (uffd_wp) 3037 entry = pte_swp_mkuffd_wp(entry); 3038 3039 VM_WARN_ON(!pte_none(ptep_get(pte + i))); 3040 set_pte_at(mm, addr, pte + i, entry); 3041 } 3042 } else { 3043 pte_t entry; 3044 3045 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); 3046 if (write) 3047 entry = pte_mkwrite(entry, vma); 3048 if (!young) 3049 entry = pte_mkold(entry); 3050 /* NOTE: this may set soft-dirty too on some archs */ 3051 if (dirty) 3052 entry = pte_mkdirty(entry); 3053 if (soft_dirty) 3054 entry = pte_mksoft_dirty(entry); 3055 if (uffd_wp) 3056 entry = pte_mkuffd_wp(entry); 3057 3058 for (i = 0; i < HPAGE_PMD_NR; i++) 3059 VM_WARN_ON(!pte_none(ptep_get(pte + i))); 3060 3061 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); 3062 } 3063 pte_unmap(pte); 3064 3065 if (!pmd_migration) 3066 folio_remove_rmap_pmd(folio, page, vma); 3067 if (freeze) 3068 put_page(page); 3069 3070 smp_wmb(); /* make pte visible before pmd */ 3071 pmd_populate(mm, pmd, pgtable); 3072 } 3073 3074 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, 3075 pmd_t *pmd, bool freeze, struct folio *folio) 3076 { 3077 bool pmd_migration = is_pmd_migration_entry(*pmd); 3078 3079 VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio)); 3080 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE)); 3081 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 3082 VM_BUG_ON(freeze && !folio); 3083 3084 /* 3085 * When the caller requests to set up a migration entry, we 3086 * require a folio to check the PMD against. Otherwise, there 3087 * is a risk of replacing the wrong folio. 3088 */ 3089 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) { 3090 /* 3091 * Do not apply pmd_folio() to a migration entry; and folio lock 3092 * guarantees that it must be of the wrong folio anyway. 3093 */ 3094 if (folio && (pmd_migration || folio != pmd_folio(*pmd))) 3095 return; 3096 __split_huge_pmd_locked(vma, pmd, address, freeze); 3097 } 3098 } 3099 3100 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 3101 unsigned long address, bool freeze, struct folio *folio) 3102 { 3103 spinlock_t *ptl; 3104 struct mmu_notifier_range range; 3105 3106 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 3107 address & HPAGE_PMD_MASK, 3108 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 3109 mmu_notifier_invalidate_range_start(&range); 3110 ptl = pmd_lock(vma->vm_mm, pmd); 3111 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio); 3112 spin_unlock(ptl); 3113 mmu_notifier_invalidate_range_end(&range); 3114 } 3115 3116 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 3117 bool freeze, struct folio *folio) 3118 { 3119 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 3120 3121 if (!pmd) 3122 return; 3123 3124 __split_huge_pmd(vma, pmd, address, freeze, folio); 3125 } 3126 3127 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 3128 { 3129 /* 3130 * If the new address isn't hpage aligned and it could previously 3131 * contain an hugepage: check if we need to split an huge pmd. 3132 */ 3133 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 3134 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 3135 ALIGN(address, HPAGE_PMD_SIZE))) 3136 split_huge_pmd_address(vma, address, false, NULL); 3137 } 3138 3139 void vma_adjust_trans_huge(struct vm_area_struct *vma, 3140 unsigned long start, 3141 unsigned long end, 3142 struct vm_area_struct *next) 3143 { 3144 /* Check if we need to split start first. */ 3145 split_huge_pmd_if_needed(vma, start); 3146 3147 /* Check if we need to split end next. */ 3148 split_huge_pmd_if_needed(vma, end); 3149 3150 /* If we're incrementing next->vm_start, we might need to split it. */ 3151 if (next) 3152 split_huge_pmd_if_needed(next, end); 3153 } 3154 3155 static void unmap_folio(struct folio *folio) 3156 { 3157 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC | 3158 TTU_BATCH_FLUSH; 3159 3160 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 3161 3162 if (folio_test_pmd_mappable(folio)) 3163 ttu_flags |= TTU_SPLIT_HUGE_PMD; 3164 3165 /* 3166 * Anon pages need migration entries to preserve them, but file 3167 * pages can simply be left unmapped, then faulted back on demand. 3168 * If that is ever changed (perhaps for mlock), update remap_page(). 3169 */ 3170 if (folio_test_anon(folio)) 3171 try_to_migrate(folio, ttu_flags); 3172 else 3173 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 3174 3175 try_to_unmap_flush(); 3176 } 3177 3178 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, 3179 unsigned long addr, pmd_t *pmdp, 3180 struct folio *folio) 3181 { 3182 struct mm_struct *mm = vma->vm_mm; 3183 int ref_count, map_count; 3184 pmd_t orig_pmd = *pmdp; 3185 3186 if (pmd_dirty(orig_pmd)) 3187 folio_set_dirty(folio); 3188 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 3189 folio_set_swapbacked(folio); 3190 return false; 3191 } 3192 3193 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); 3194 3195 /* 3196 * Syncing against concurrent GUP-fast: 3197 * - clear PMD; barrier; read refcount 3198 * - inc refcount; barrier; read PMD 3199 */ 3200 smp_mb(); 3201 3202 ref_count = folio_ref_count(folio); 3203 map_count = folio_mapcount(folio); 3204 3205 /* 3206 * Order reads for folio refcount and dirty flag 3207 * (see comments in __remove_mapping()). 3208 */ 3209 smp_rmb(); 3210 3211 /* 3212 * If the folio or its PMD is redirtied at this point, or if there 3213 * are unexpected references, we will give up to discard this folio 3214 * and remap it. 3215 * 3216 * The only folio refs must be one from isolation plus the rmap(s). 3217 */ 3218 if (pmd_dirty(orig_pmd)) 3219 folio_set_dirty(folio); 3220 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 3221 folio_set_swapbacked(folio); 3222 set_pmd_at(mm, addr, pmdp, orig_pmd); 3223 return false; 3224 } 3225 3226 if (ref_count != map_count + 1) { 3227 set_pmd_at(mm, addr, pmdp, orig_pmd); 3228 return false; 3229 } 3230 3231 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma); 3232 zap_deposited_table(mm, pmdp); 3233 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); 3234 if (vma->vm_flags & VM_LOCKED) 3235 mlock_drain_local(); 3236 folio_put(folio); 3237 3238 return true; 3239 } 3240 3241 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, 3242 pmd_t *pmdp, struct folio *folio) 3243 { 3244 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); 3245 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 3246 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 3247 VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio); 3248 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); 3249 3250 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); 3251 } 3252 3253 static void remap_page(struct folio *folio, unsigned long nr, int flags) 3254 { 3255 int i = 0; 3256 3257 /* If unmap_folio() uses try_to_migrate() on file, remove this check */ 3258 if (!folio_test_anon(folio)) 3259 return; 3260 for (;;) { 3261 remove_migration_ptes(folio, folio, RMP_LOCKED | flags); 3262 i += folio_nr_pages(folio); 3263 if (i >= nr) 3264 break; 3265 folio = folio_next(folio); 3266 } 3267 } 3268 3269 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, 3270 struct lruvec *lruvec, struct list_head *list) 3271 { 3272 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); 3273 lockdep_assert_held(&lruvec->lru_lock); 3274 3275 if (list) { 3276 /* page reclaim is reclaiming a huge page */ 3277 VM_WARN_ON(folio_test_lru(folio)); 3278 folio_get(new_folio); 3279 list_add_tail(&new_folio->lru, list); 3280 } else { 3281 /* head is still on lru (and we have it frozen) */ 3282 VM_WARN_ON(!folio_test_lru(folio)); 3283 if (folio_test_unevictable(folio)) 3284 new_folio->mlock_count = 0; 3285 else 3286 list_add_tail(&new_folio->lru, &folio->lru); 3287 folio_set_lru(new_folio); 3288 } 3289 } 3290 3291 /* Racy check whether the huge page can be split */ 3292 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) 3293 { 3294 int extra_pins; 3295 3296 /* Additional pins from page cache */ 3297 if (folio_test_anon(folio)) 3298 extra_pins = folio_test_swapcache(folio) ? 3299 folio_nr_pages(folio) : 0; 3300 else 3301 extra_pins = folio_nr_pages(folio); 3302 if (pextra_pins) 3303 *pextra_pins = extra_pins; 3304 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 3305 caller_pins; 3306 } 3307 3308 /* 3309 * It splits @folio into @new_order folios and copies the @folio metadata to 3310 * all the resulting folios. 3311 */ 3312 static void __split_folio_to_order(struct folio *folio, int old_order, 3313 int new_order) 3314 { 3315 long new_nr_pages = 1 << new_order; 3316 long nr_pages = 1 << old_order; 3317 long i; 3318 3319 /* 3320 * Skip the first new_nr_pages, since the new folio from them have all 3321 * the flags from the original folio. 3322 */ 3323 for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) { 3324 struct page *new_head = &folio->page + i; 3325 3326 /* 3327 * Careful: new_folio is not a "real" folio before we cleared PageTail. 3328 * Don't pass it around before clear_compound_head(). 3329 */ 3330 struct folio *new_folio = (struct folio *)new_head; 3331 3332 VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head); 3333 3334 /* 3335 * Clone page flags before unfreezing refcount. 3336 * 3337 * After successful get_page_unless_zero() might follow flags change, 3338 * for example lock_page() which set PG_waiters. 3339 * 3340 * Note that for mapped sub-pages of an anonymous THP, 3341 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in 3342 * the migration entry instead from where remap_page() will restore it. 3343 * We can still have PG_anon_exclusive set on effectively unmapped and 3344 * unreferenced sub-pages of an anonymous THP: we can simply drop 3345 * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 3346 */ 3347 new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 3348 new_folio->flags |= (folio->flags & 3349 ((1L << PG_referenced) | 3350 (1L << PG_swapbacked) | 3351 (1L << PG_swapcache) | 3352 (1L << PG_mlocked) | 3353 (1L << PG_uptodate) | 3354 (1L << PG_active) | 3355 (1L << PG_workingset) | 3356 (1L << PG_locked) | 3357 (1L << PG_unevictable) | 3358 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 3359 (1L << PG_arch_2) | 3360 #endif 3361 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 3362 (1L << PG_arch_3) | 3363 #endif 3364 (1L << PG_dirty) | 3365 LRU_GEN_MASK | LRU_REFS_MASK)); 3366 3367 new_folio->mapping = folio->mapping; 3368 new_folio->index = folio->index + i; 3369 3370 /* 3371 * page->private should not be set in tail pages. Fix up and warn once 3372 * if private is unexpectedly set. 3373 */ 3374 if (unlikely(new_folio->private)) { 3375 VM_WARN_ON_ONCE_PAGE(true, new_head); 3376 new_folio->private = NULL; 3377 } 3378 3379 if (folio_test_swapcache(folio)) 3380 new_folio->swap.val = folio->swap.val + i; 3381 3382 /* Page flags must be visible before we make the page non-compound. */ 3383 smp_wmb(); 3384 3385 /* 3386 * Clear PageTail before unfreezing page refcount. 3387 * 3388 * After successful get_page_unless_zero() might follow put_page() 3389 * which needs correct compound_head(). 3390 */ 3391 clear_compound_head(new_head); 3392 if (new_order) { 3393 prep_compound_page(new_head, new_order); 3394 folio_set_large_rmappable(new_folio); 3395 } 3396 3397 if (folio_test_young(folio)) 3398 folio_set_young(new_folio); 3399 if (folio_test_idle(folio)) 3400 folio_set_idle(new_folio); 3401 #ifdef CONFIG_MEMCG 3402 new_folio->memcg_data = folio->memcg_data; 3403 #endif 3404 3405 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); 3406 } 3407 3408 if (new_order) 3409 folio_set_order(folio, new_order); 3410 else 3411 ClearPageCompound(&folio->page); 3412 } 3413 3414 /* 3415 * It splits an unmapped @folio to lower order smaller folios in two ways. 3416 * @folio: the to-be-split folio 3417 * @new_order: the smallest order of the after split folios (since buddy 3418 * allocator like split generates folios with orders from @folio's 3419 * order - 1 to new_order). 3420 * @split_at: in buddy allocator like split, the folio containing @split_at 3421 * will be split until its order becomes @new_order. 3422 * @lock_at: the folio containing @lock_at is left locked for caller. 3423 * @list: the after split folios will be added to @list if it is not NULL, 3424 * otherwise to LRU lists. 3425 * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory. 3426 * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller 3427 * @mapping: @folio->mapping 3428 * @uniform_split: if the split is uniform or not (buddy allocator like split) 3429 * 3430 * 3431 * 1. uniform split: the given @folio into multiple @new_order small folios, 3432 * where all small folios have the same order. This is done when 3433 * uniform_split is true. 3434 * 2. buddy allocator like (non-uniform) split: the given @folio is split into 3435 * half and one of the half (containing the given page) is split into half 3436 * until the given @page's order becomes @new_order. This is done when 3437 * uniform_split is false. 3438 * 3439 * The high level flow for these two methods are: 3440 * 1. uniform split: a single __split_folio_to_order() is called to split the 3441 * @folio into @new_order, then we traverse all the resulting folios one by 3442 * one in PFN ascending order and perform stats, unfreeze, adding to list, 3443 * and file mapping index operations. 3444 * 2. non-uniform split: in general, folio_order - @new_order calls to 3445 * __split_folio_to_order() are made in a for loop to split the @folio 3446 * to one lower order at a time. The resulting small folios are processed 3447 * like what is done during the traversal in 1, except the one containing 3448 * @page, which is split in next for loop. 3449 * 3450 * After splitting, the caller's folio reference will be transferred to the 3451 * folio containing @page. The other folios may be freed if they are not mapped. 3452 * 3453 * In terms of locking, after splitting, 3454 * 1. uniform split leaves @page (or the folio contains it) locked; 3455 * 2. buddy allocator like (non-uniform) split leaves @folio locked. 3456 * 3457 * 3458 * For !uniform_split, when -ENOMEM is returned, the original folio might be 3459 * split. The caller needs to check the input folio. 3460 */ 3461 static int __split_unmapped_folio(struct folio *folio, int new_order, 3462 struct page *split_at, struct page *lock_at, 3463 struct list_head *list, pgoff_t end, 3464 struct xa_state *xas, struct address_space *mapping, 3465 bool uniform_split) 3466 { 3467 struct lruvec *lruvec; 3468 struct address_space *swap_cache = NULL; 3469 struct folio *origin_folio = folio; 3470 struct folio *next_folio = folio_next(folio); 3471 struct folio *new_folio; 3472 struct folio *next; 3473 int order = folio_order(folio); 3474 int split_order; 3475 int start_order = uniform_split ? new_order : order - 1; 3476 int nr_dropped = 0; 3477 int ret = 0; 3478 bool stop_split = false; 3479 3480 if (folio_test_swapcache(folio)) { 3481 VM_BUG_ON(mapping); 3482 3483 /* a swapcache folio can only be uniformly split to order-0 */ 3484 if (!uniform_split || new_order != 0) 3485 return -EINVAL; 3486 3487 swap_cache = swap_address_space(folio->swap); 3488 xa_lock(&swap_cache->i_pages); 3489 } 3490 3491 if (folio_test_anon(folio)) 3492 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 3493 3494 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 3495 lruvec = folio_lruvec_lock(folio); 3496 3497 folio_clear_has_hwpoisoned(folio); 3498 3499 /* 3500 * split to new_order one order at a time. For uniform split, 3501 * folio is split to new_order directly. 3502 */ 3503 for (split_order = start_order; 3504 split_order >= new_order && !stop_split; 3505 split_order--) { 3506 int old_order = folio_order(folio); 3507 struct folio *release; 3508 struct folio *end_folio = folio_next(folio); 3509 3510 /* order-1 anonymous folio is not supported */ 3511 if (folio_test_anon(folio) && split_order == 1) 3512 continue; 3513 if (uniform_split && split_order != new_order) 3514 continue; 3515 3516 if (mapping) { 3517 /* 3518 * uniform split has xas_split_alloc() called before 3519 * irq is disabled to allocate enough memory, whereas 3520 * non-uniform split can handle ENOMEM. 3521 */ 3522 if (uniform_split) 3523 xas_split(xas, folio, old_order); 3524 else { 3525 xas_set_order(xas, folio->index, split_order); 3526 xas_try_split(xas, folio, old_order); 3527 if (xas_error(xas)) { 3528 ret = xas_error(xas); 3529 stop_split = true; 3530 goto after_split; 3531 } 3532 } 3533 } 3534 3535 folio_split_memcg_refs(folio, old_order, split_order); 3536 split_page_owner(&folio->page, old_order, split_order); 3537 pgalloc_tag_split(folio, old_order, split_order); 3538 3539 __split_folio_to_order(folio, old_order, split_order); 3540 3541 after_split: 3542 /* 3543 * Iterate through after-split folios and perform related 3544 * operations. But in buddy allocator like split, the folio 3545 * containing the specified page is skipped until its order 3546 * is new_order, since the folio will be worked on in next 3547 * iteration. 3548 */ 3549 for (release = folio; release != end_folio; release = next) { 3550 next = folio_next(release); 3551 /* 3552 * for buddy allocator like split, the folio containing 3553 * page will be split next and should not be released, 3554 * until the folio's order is new_order or stop_split 3555 * is set to true by the above xas_split() failure. 3556 */ 3557 if (release == page_folio(split_at)) { 3558 folio = release; 3559 if (split_order != new_order && !stop_split) 3560 continue; 3561 } 3562 if (folio_test_anon(release)) { 3563 mod_mthp_stat(folio_order(release), 3564 MTHP_STAT_NR_ANON, 1); 3565 } 3566 3567 /* 3568 * origin_folio should be kept frozon until page cache 3569 * entries are updated with all the other after-split 3570 * folios to prevent others seeing stale page cache 3571 * entries. 3572 */ 3573 if (release == origin_folio) 3574 continue; 3575 3576 folio_ref_unfreeze(release, 1 + 3577 ((mapping || swap_cache) ? 3578 folio_nr_pages(release) : 0)); 3579 3580 lru_add_split_folio(origin_folio, release, lruvec, 3581 list); 3582 3583 /* Some pages can be beyond EOF: drop them from cache */ 3584 if (release->index >= end) { 3585 if (shmem_mapping(mapping)) 3586 nr_dropped += folio_nr_pages(release); 3587 else if (folio_test_clear_dirty(release)) 3588 folio_account_cleaned(release, 3589 inode_to_wb(mapping->host)); 3590 __filemap_remove_folio(release, NULL); 3591 folio_put_refs(release, folio_nr_pages(release)); 3592 } else if (mapping) { 3593 __xa_store(&mapping->i_pages, 3594 release->index, release, 0); 3595 } else if (swap_cache) { 3596 __xa_store(&swap_cache->i_pages, 3597 swap_cache_index(release->swap), 3598 release, 0); 3599 } 3600 } 3601 } 3602 3603 /* 3604 * Unfreeze origin_folio only after all page cache entries, which used 3605 * to point to it, have been updated with new folios. Otherwise, 3606 * a parallel folio_try_get() can grab origin_folio and its caller can 3607 * see stale page cache entries. 3608 */ 3609 folio_ref_unfreeze(origin_folio, 1 + 3610 ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0)); 3611 3612 unlock_page_lruvec(lruvec); 3613 3614 if (swap_cache) 3615 xa_unlock(&swap_cache->i_pages); 3616 if (mapping) 3617 xa_unlock(&mapping->i_pages); 3618 3619 /* Caller disabled irqs, so they are still disabled here */ 3620 local_irq_enable(); 3621 3622 if (nr_dropped) 3623 shmem_uncharge(mapping->host, nr_dropped); 3624 3625 remap_page(origin_folio, 1 << order, 3626 folio_test_anon(origin_folio) ? 3627 RMP_USE_SHARED_ZEROPAGE : 0); 3628 3629 /* 3630 * At this point, folio should contain the specified page. 3631 * For uniform split, it is left for caller to unlock. 3632 * For buddy allocator like split, the first after-split folio is left 3633 * for caller to unlock. 3634 */ 3635 for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) { 3636 next = folio_next(new_folio); 3637 if (new_folio == page_folio(lock_at)) 3638 continue; 3639 3640 folio_unlock(new_folio); 3641 /* 3642 * Subpages may be freed if there wasn't any mapping 3643 * like if add_to_swap() is running on a lru page that 3644 * had its mapping zapped. And freeing these pages 3645 * requires taking the lru_lock so we do the put_page 3646 * of the tail pages after the split is complete. 3647 */ 3648 free_page_and_swap_cache(&new_folio->page); 3649 } 3650 return ret; 3651 } 3652 3653 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, 3654 bool warns) 3655 { 3656 if (folio_test_anon(folio)) { 3657 /* order-1 is not supported for anonymous THP. */ 3658 VM_WARN_ONCE(warns && new_order == 1, 3659 "Cannot split to order-1 folio"); 3660 return new_order != 1; 3661 } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3662 !mapping_large_folio_support(folio->mapping)) { 3663 /* 3664 * No split if the file system does not support large folio. 3665 * Note that we might still have THPs in such mappings due to 3666 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping 3667 * does not actually support large folios properly. 3668 */ 3669 VM_WARN_ONCE(warns, 3670 "Cannot split file folio to non-0 order"); 3671 return false; 3672 } 3673 3674 /* Only swapping a whole PMD-mapped folio is supported */ 3675 if (folio_test_swapcache(folio)) { 3676 VM_WARN_ONCE(warns, 3677 "Cannot split swapcache folio to non-0 order"); 3678 return false; 3679 } 3680 3681 return true; 3682 } 3683 3684 /* See comments in non_uniform_split_supported() */ 3685 bool uniform_split_supported(struct folio *folio, unsigned int new_order, 3686 bool warns) 3687 { 3688 if (folio_test_anon(folio)) { 3689 VM_WARN_ONCE(warns && new_order == 1, 3690 "Cannot split to order-1 folio"); 3691 return new_order != 1; 3692 } else if (new_order) { 3693 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3694 !mapping_large_folio_support(folio->mapping)) { 3695 VM_WARN_ONCE(warns, 3696 "Cannot split file folio to non-0 order"); 3697 return false; 3698 } 3699 } 3700 3701 if (new_order && folio_test_swapcache(folio)) { 3702 VM_WARN_ONCE(warns, 3703 "Cannot split swapcache folio to non-0 order"); 3704 return false; 3705 } 3706 3707 return true; 3708 } 3709 3710 /* 3711 * __folio_split: split a folio at @split_at to a @new_order folio 3712 * @folio: folio to split 3713 * @new_order: the order of the new folio 3714 * @split_at: a page within the new folio 3715 * @lock_at: a page within @folio to be left locked to caller 3716 * @list: after-split folios will be put on it if non NULL 3717 * @uniform_split: perform uniform split or not (non-uniform split) 3718 * 3719 * It calls __split_unmapped_folio() to perform uniform and non-uniform split. 3720 * It is in charge of checking whether the split is supported or not and 3721 * preparing @folio for __split_unmapped_folio(). 3722 * 3723 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be 3724 * split but not to @new_order, the caller needs to check) 3725 */ 3726 static int __folio_split(struct folio *folio, unsigned int new_order, 3727 struct page *split_at, struct page *lock_at, 3728 struct list_head *list, bool uniform_split) 3729 { 3730 struct deferred_split *ds_queue = get_deferred_split_queue(folio); 3731 XA_STATE(xas, &folio->mapping->i_pages, folio->index); 3732 bool is_anon = folio_test_anon(folio); 3733 struct address_space *mapping = NULL; 3734 struct anon_vma *anon_vma = NULL; 3735 int order = folio_order(folio); 3736 int extra_pins, ret; 3737 pgoff_t end; 3738 bool is_hzp; 3739 3740 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3741 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 3742 3743 if (folio != page_folio(split_at) || folio != page_folio(lock_at)) 3744 return -EINVAL; 3745 3746 if (new_order >= folio_order(folio)) 3747 return -EINVAL; 3748 3749 if (uniform_split && !uniform_split_supported(folio, new_order, true)) 3750 return -EINVAL; 3751 3752 if (!uniform_split && 3753 !non_uniform_split_supported(folio, new_order, true)) 3754 return -EINVAL; 3755 3756 is_hzp = is_huge_zero_folio(folio); 3757 if (is_hzp) { 3758 pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); 3759 return -EBUSY; 3760 } 3761 3762 if (folio_test_writeback(folio)) 3763 return -EBUSY; 3764 3765 if (is_anon) { 3766 /* 3767 * The caller does not necessarily hold an mmap_lock that would 3768 * prevent the anon_vma disappearing so we first we take a 3769 * reference to it and then lock the anon_vma for write. This 3770 * is similar to folio_lock_anon_vma_read except the write lock 3771 * is taken to serialise against parallel split or collapse 3772 * operations. 3773 */ 3774 anon_vma = folio_get_anon_vma(folio); 3775 if (!anon_vma) { 3776 ret = -EBUSY; 3777 goto out; 3778 } 3779 end = -1; 3780 mapping = NULL; 3781 anon_vma_lock_write(anon_vma); 3782 } else { 3783 unsigned int min_order; 3784 gfp_t gfp; 3785 3786 mapping = folio->mapping; 3787 3788 /* Truncated ? */ 3789 /* 3790 * TODO: add support for large shmem folio in swap cache. 3791 * When shmem is in swap cache, mapping is NULL and 3792 * folio_test_swapcache() is true. 3793 */ 3794 if (!mapping) { 3795 ret = -EBUSY; 3796 goto out; 3797 } 3798 3799 min_order = mapping_min_folio_order(folio->mapping); 3800 if (new_order < min_order) { 3801 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u", 3802 min_order); 3803 ret = -EINVAL; 3804 goto out; 3805 } 3806 3807 gfp = current_gfp_context(mapping_gfp_mask(mapping) & 3808 GFP_RECLAIM_MASK); 3809 3810 if (!filemap_release_folio(folio, gfp)) { 3811 ret = -EBUSY; 3812 goto out; 3813 } 3814 3815 if (uniform_split) { 3816 xas_set_order(&xas, folio->index, new_order); 3817 xas_split_alloc(&xas, folio, folio_order(folio), gfp); 3818 if (xas_error(&xas)) { 3819 ret = xas_error(&xas); 3820 goto out; 3821 } 3822 } 3823 3824 anon_vma = NULL; 3825 i_mmap_lock_read(mapping); 3826 3827 /* 3828 *__split_unmapped_folio() may need to trim off pages beyond 3829 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe 3830 * seqlock, which cannot be nested inside the page tree lock. 3831 * So note end now: i_size itself may be changed at any moment, 3832 * but folio lock is good enough to serialize the trimming. 3833 */ 3834 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3835 if (shmem_mapping(mapping)) 3836 end = shmem_fallocend(mapping->host, end); 3837 } 3838 3839 /* 3840 * Racy check if we can split the page, before unmap_folio() will 3841 * split PMDs 3842 */ 3843 if (!can_split_folio(folio, 1, &extra_pins)) { 3844 ret = -EAGAIN; 3845 goto out_unlock; 3846 } 3847 3848 unmap_folio(folio); 3849 3850 /* block interrupt reentry in xa_lock and spinlock */ 3851 local_irq_disable(); 3852 if (mapping) { 3853 /* 3854 * Check if the folio is present in page cache. 3855 * We assume all tail are present too, if folio is there. 3856 */ 3857 xas_lock(&xas); 3858 xas_reset(&xas); 3859 if (xas_load(&xas) != folio) 3860 goto fail; 3861 } 3862 3863 /* Prevent deferred_split_scan() touching ->_refcount */ 3864 spin_lock(&ds_queue->split_queue_lock); 3865 if (folio_ref_freeze(folio, 1 + extra_pins)) { 3866 if (folio_order(folio) > 1 && 3867 !list_empty(&folio->_deferred_list)) { 3868 ds_queue->split_queue_len--; 3869 if (folio_test_partially_mapped(folio)) { 3870 folio_clear_partially_mapped(folio); 3871 mod_mthp_stat(folio_order(folio), 3872 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 3873 } 3874 /* 3875 * Reinitialize page_deferred_list after removing the 3876 * page from the split_queue, otherwise a subsequent 3877 * split will see list corruption when checking the 3878 * page_deferred_list. 3879 */ 3880 list_del_init(&folio->_deferred_list); 3881 } 3882 spin_unlock(&ds_queue->split_queue_lock); 3883 if (mapping) { 3884 int nr = folio_nr_pages(folio); 3885 3886 if (folio_test_pmd_mappable(folio) && 3887 new_order < HPAGE_PMD_ORDER) { 3888 if (folio_test_swapbacked(folio)) { 3889 __lruvec_stat_mod_folio(folio, 3890 NR_SHMEM_THPS, -nr); 3891 } else { 3892 __lruvec_stat_mod_folio(folio, 3893 NR_FILE_THPS, -nr); 3894 filemap_nr_thps_dec(mapping); 3895 } 3896 } 3897 } 3898 3899 ret = __split_unmapped_folio(folio, new_order, 3900 split_at, lock_at, list, end, &xas, mapping, 3901 uniform_split); 3902 } else { 3903 spin_unlock(&ds_queue->split_queue_lock); 3904 fail: 3905 if (mapping) 3906 xas_unlock(&xas); 3907 local_irq_enable(); 3908 remap_page(folio, folio_nr_pages(folio), 0); 3909 ret = -EAGAIN; 3910 } 3911 3912 out_unlock: 3913 if (anon_vma) { 3914 anon_vma_unlock_write(anon_vma); 3915 put_anon_vma(anon_vma); 3916 } 3917 if (mapping) 3918 i_mmap_unlock_read(mapping); 3919 out: 3920 xas_destroy(&xas); 3921 if (order == HPAGE_PMD_ORDER) 3922 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 3923 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); 3924 return ret; 3925 } 3926 3927 /* 3928 * This function splits a large folio into smaller folios of order @new_order. 3929 * @page can point to any page of the large folio to split. The split operation 3930 * does not change the position of @page. 3931 * 3932 * Prerequisites: 3933 * 3934 * 1) The caller must hold a reference on the @page's owning folio, also known 3935 * as the large folio. 3936 * 3937 * 2) The large folio must be locked. 3938 * 3939 * 3) The folio must not be pinned. Any unexpected folio references, including 3940 * GUP pins, will result in the folio not getting split; instead, the caller 3941 * will receive an -EAGAIN. 3942 * 3943 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not 3944 * supported for non-file-backed folios, because folio->_deferred_list, which 3945 * is used by partially mapped folios, is stored in subpage 2, but an order-1 3946 * folio only has subpages 0 and 1. File-backed order-1 folios are supported, 3947 * since they do not use _deferred_list. 3948 * 3949 * After splitting, the caller's folio reference will be transferred to @page, 3950 * resulting in a raised refcount of @page after this call. The other pages may 3951 * be freed if they are not mapped. 3952 * 3953 * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 3954 * 3955 * Pages in @new_order will inherit the mapping, flags, and so on from the 3956 * huge page. 3957 * 3958 * Returns 0 if the huge page was split successfully. 3959 * 3960 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if 3961 * the folio was concurrently removed from the page cache. 3962 * 3963 * Returns -EBUSY when trying to split the huge zeropage, if the folio is 3964 * under writeback, if fs-specific folio metadata cannot currently be 3965 * released, or if some unexpected race happened (e.g., anon VMA disappeared, 3966 * truncation). 3967 * 3968 * Callers should ensure that the order respects the address space mapping 3969 * min-order if one is set for non-anonymous folios. 3970 * 3971 * Returns -EINVAL when trying to split to an order that is incompatible 3972 * with the folio. Splitting to order 0 is compatible with all folios. 3973 */ 3974 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, 3975 unsigned int new_order) 3976 { 3977 struct folio *folio = page_folio(page); 3978 3979 return __folio_split(folio, new_order, &folio->page, page, list, true); 3980 } 3981 3982 /* 3983 * folio_split: split a folio at @split_at to a @new_order folio 3984 * @folio: folio to split 3985 * @new_order: the order of the new folio 3986 * @split_at: a page within the new folio 3987 * 3988 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be 3989 * split but not to @new_order, the caller needs to check) 3990 * 3991 * It has the same prerequisites and returns as 3992 * split_huge_page_to_list_to_order(). 3993 * 3994 * Split a folio at @split_at to a new_order folio, leave the 3995 * remaining subpages of the original folio as large as possible. For example, 3996 * in the case of splitting an order-9 folio at its third order-3 subpages to 3997 * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio. 3998 * After the split, there will be a group of folios with different orders and 3999 * the new folio containing @split_at is marked in bracket: 4000 * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8]. 4001 * 4002 * After split, folio is left locked for caller. 4003 */ 4004 int folio_split(struct folio *folio, unsigned int new_order, 4005 struct page *split_at, struct list_head *list) 4006 { 4007 return __folio_split(folio, new_order, split_at, &folio->page, list, 4008 false); 4009 } 4010 4011 int min_order_for_split(struct folio *folio) 4012 { 4013 if (folio_test_anon(folio)) 4014 return 0; 4015 4016 if (!folio->mapping) { 4017 if (folio_test_pmd_mappable(folio)) 4018 count_vm_event(THP_SPLIT_PAGE_FAILED); 4019 return -EBUSY; 4020 } 4021 4022 return mapping_min_folio_order(folio->mapping); 4023 } 4024 4025 int split_folio_to_list(struct folio *folio, struct list_head *list) 4026 { 4027 int ret = min_order_for_split(folio); 4028 4029 if (ret < 0) 4030 return ret; 4031 4032 return split_huge_page_to_list_to_order(&folio->page, list, ret); 4033 } 4034 4035 /* 4036 * __folio_unqueue_deferred_split() is not to be called directly: 4037 * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h 4038 * limits its calls to those folios which may have a _deferred_list for 4039 * queueing THP splits, and that list is (racily observed to be) non-empty. 4040 * 4041 * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is 4042 * zero: because even when split_queue_lock is held, a non-empty _deferred_list 4043 * might be in use on deferred_split_scan()'s unlocked on-stack list. 4044 * 4045 * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is 4046 * therefore important to unqueue deferred split before changing folio memcg. 4047 */ 4048 bool __folio_unqueue_deferred_split(struct folio *folio) 4049 { 4050 struct deferred_split *ds_queue; 4051 unsigned long flags; 4052 bool unqueued = false; 4053 4054 WARN_ON_ONCE(folio_ref_count(folio)); 4055 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio)); 4056 4057 ds_queue = get_deferred_split_queue(folio); 4058 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4059 if (!list_empty(&folio->_deferred_list)) { 4060 ds_queue->split_queue_len--; 4061 if (folio_test_partially_mapped(folio)) { 4062 folio_clear_partially_mapped(folio); 4063 mod_mthp_stat(folio_order(folio), 4064 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 4065 } 4066 list_del_init(&folio->_deferred_list); 4067 unqueued = true; 4068 } 4069 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4070 4071 return unqueued; /* useful for debug warnings */ 4072 } 4073 4074 /* partially_mapped=false won't clear PG_partially_mapped folio flag */ 4075 void deferred_split_folio(struct folio *folio, bool partially_mapped) 4076 { 4077 struct deferred_split *ds_queue = get_deferred_split_queue(folio); 4078 #ifdef CONFIG_MEMCG 4079 struct mem_cgroup *memcg = folio_memcg(folio); 4080 #endif 4081 unsigned long flags; 4082 4083 /* 4084 * Order 1 folios have no space for a deferred list, but we also 4085 * won't waste much memory by not adding them to the deferred list. 4086 */ 4087 if (folio_order(folio) <= 1) 4088 return; 4089 4090 if (!partially_mapped && !split_underused_thp) 4091 return; 4092 4093 /* 4094 * Exclude swapcache: originally to avoid a corrupt deferred split 4095 * queue. Nowadays that is fully prevented by memcg1_swapout(); 4096 * but if page reclaim is already handling the same folio, it is 4097 * unnecessary to handle it again in the shrinker, so excluding 4098 * swapcache here may still be a useful optimization. 4099 */ 4100 if (folio_test_swapcache(folio)) 4101 return; 4102 4103 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4104 if (partially_mapped) { 4105 if (!folio_test_partially_mapped(folio)) { 4106 folio_set_partially_mapped(folio); 4107 if (folio_test_pmd_mappable(folio)) 4108 count_vm_event(THP_DEFERRED_SPLIT_PAGE); 4109 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); 4110 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); 4111 4112 } 4113 } else { 4114 /* partially mapped folios cannot become non-partially mapped */ 4115 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); 4116 } 4117 if (list_empty(&folio->_deferred_list)) { 4118 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); 4119 ds_queue->split_queue_len++; 4120 #ifdef CONFIG_MEMCG 4121 if (memcg) 4122 set_shrinker_bit(memcg, folio_nid(folio), 4123 deferred_split_shrinker->id); 4124 #endif 4125 } 4126 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4127 } 4128 4129 static unsigned long deferred_split_count(struct shrinker *shrink, 4130 struct shrink_control *sc) 4131 { 4132 struct pglist_data *pgdata = NODE_DATA(sc->nid); 4133 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 4134 4135 #ifdef CONFIG_MEMCG 4136 if (sc->memcg) 4137 ds_queue = &sc->memcg->deferred_split_queue; 4138 #endif 4139 return READ_ONCE(ds_queue->split_queue_len); 4140 } 4141 4142 static bool thp_underused(struct folio *folio) 4143 { 4144 int num_zero_pages = 0, num_filled_pages = 0; 4145 void *kaddr; 4146 int i; 4147 4148 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) 4149 return false; 4150 4151 for (i = 0; i < folio_nr_pages(folio); i++) { 4152 kaddr = kmap_local_folio(folio, i * PAGE_SIZE); 4153 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) { 4154 num_zero_pages++; 4155 if (num_zero_pages > khugepaged_max_ptes_none) { 4156 kunmap_local(kaddr); 4157 return true; 4158 } 4159 } else { 4160 /* 4161 * Another path for early exit once the number 4162 * of non-zero filled pages exceeds threshold. 4163 */ 4164 num_filled_pages++; 4165 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) { 4166 kunmap_local(kaddr); 4167 return false; 4168 } 4169 } 4170 kunmap_local(kaddr); 4171 } 4172 return false; 4173 } 4174 4175 static unsigned long deferred_split_scan(struct shrinker *shrink, 4176 struct shrink_control *sc) 4177 { 4178 struct pglist_data *pgdata = NODE_DATA(sc->nid); 4179 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 4180 unsigned long flags; 4181 LIST_HEAD(list); 4182 struct folio *folio, *next, *prev = NULL; 4183 int split = 0, removed = 0; 4184 4185 #ifdef CONFIG_MEMCG 4186 if (sc->memcg) 4187 ds_queue = &sc->memcg->deferred_split_queue; 4188 #endif 4189 4190 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4191 /* Take pin on all head pages to avoid freeing them under us */ 4192 list_for_each_entry_safe(folio, next, &ds_queue->split_queue, 4193 _deferred_list) { 4194 if (folio_try_get(folio)) { 4195 list_move(&folio->_deferred_list, &list); 4196 } else { 4197 /* We lost race with folio_put() */ 4198 if (folio_test_partially_mapped(folio)) { 4199 folio_clear_partially_mapped(folio); 4200 mod_mthp_stat(folio_order(folio), 4201 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 4202 } 4203 list_del_init(&folio->_deferred_list); 4204 ds_queue->split_queue_len--; 4205 } 4206 if (!--sc->nr_to_scan) 4207 break; 4208 } 4209 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4210 4211 list_for_each_entry_safe(folio, next, &list, _deferred_list) { 4212 bool did_split = false; 4213 bool underused = false; 4214 4215 if (!folio_test_partially_mapped(folio)) { 4216 underused = thp_underused(folio); 4217 if (!underused) 4218 goto next; 4219 } 4220 if (!folio_trylock(folio)) 4221 goto next; 4222 if (!split_folio(folio)) { 4223 did_split = true; 4224 if (underused) 4225 count_vm_event(THP_UNDERUSED_SPLIT_PAGE); 4226 split++; 4227 } 4228 folio_unlock(folio); 4229 next: 4230 /* 4231 * split_folio() removes folio from list on success. 4232 * Only add back to the queue if folio is partially mapped. 4233 * If thp_underused returns false, or if split_folio fails 4234 * in the case it was underused, then consider it used and 4235 * don't add it back to split_queue. 4236 */ 4237 if (did_split) { 4238 ; /* folio already removed from list */ 4239 } else if (!folio_test_partially_mapped(folio)) { 4240 list_del_init(&folio->_deferred_list); 4241 removed++; 4242 } else { 4243 /* 4244 * That unlocked list_del_init() above would be unsafe, 4245 * unless its folio is separated from any earlier folios 4246 * left on the list (which may be concurrently unqueued) 4247 * by one safe folio with refcount still raised. 4248 */ 4249 swap(folio, prev); 4250 } 4251 if (folio) 4252 folio_put(folio); 4253 } 4254 4255 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4256 list_splice_tail(&list, &ds_queue->split_queue); 4257 ds_queue->split_queue_len -= removed; 4258 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4259 4260 if (prev) 4261 folio_put(prev); 4262 4263 /* 4264 * Stop shrinker if we didn't split any page, but the queue is empty. 4265 * This can happen if pages were freed under us. 4266 */ 4267 if (!split && list_empty(&ds_queue->split_queue)) 4268 return SHRINK_STOP; 4269 return split; 4270 } 4271 4272 #ifdef CONFIG_DEBUG_FS 4273 static void split_huge_pages_all(void) 4274 { 4275 struct zone *zone; 4276 struct page *page; 4277 struct folio *folio; 4278 unsigned long pfn, max_zone_pfn; 4279 unsigned long total = 0, split = 0; 4280 4281 pr_debug("Split all THPs\n"); 4282 for_each_zone(zone) { 4283 if (!managed_zone(zone)) 4284 continue; 4285 max_zone_pfn = zone_end_pfn(zone); 4286 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 4287 int nr_pages; 4288 4289 page = pfn_to_online_page(pfn); 4290 if (!page || PageTail(page)) 4291 continue; 4292 folio = page_folio(page); 4293 if (!folio_try_get(folio)) 4294 continue; 4295 4296 if (unlikely(page_folio(page) != folio)) 4297 goto next; 4298 4299 if (zone != folio_zone(folio)) 4300 goto next; 4301 4302 if (!folio_test_large(folio) 4303 || folio_test_hugetlb(folio) 4304 || !folio_test_lru(folio)) 4305 goto next; 4306 4307 total++; 4308 folio_lock(folio); 4309 nr_pages = folio_nr_pages(folio); 4310 if (!split_folio(folio)) 4311 split++; 4312 pfn += nr_pages - 1; 4313 folio_unlock(folio); 4314 next: 4315 folio_put(folio); 4316 cond_resched(); 4317 } 4318 } 4319 4320 pr_debug("%lu of %lu THP split\n", split, total); 4321 } 4322 4323 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 4324 { 4325 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 4326 is_vm_hugetlb_page(vma); 4327 } 4328 4329 static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 4330 unsigned long vaddr_end, unsigned int new_order, 4331 long in_folio_offset) 4332 { 4333 int ret = 0; 4334 struct task_struct *task; 4335 struct mm_struct *mm; 4336 unsigned long total = 0, split = 0; 4337 unsigned long addr; 4338 4339 vaddr_start &= PAGE_MASK; 4340 vaddr_end &= PAGE_MASK; 4341 4342 task = find_get_task_by_vpid(pid); 4343 if (!task) { 4344 ret = -ESRCH; 4345 goto out; 4346 } 4347 4348 /* Find the mm_struct */ 4349 mm = get_task_mm(task); 4350 put_task_struct(task); 4351 4352 if (!mm) { 4353 ret = -EINVAL; 4354 goto out; 4355 } 4356 4357 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 4358 pid, vaddr_start, vaddr_end); 4359 4360 mmap_read_lock(mm); 4361 /* 4362 * always increase addr by PAGE_SIZE, since we could have a PTE page 4363 * table filled with PTE-mapped THPs, each of which is distinct. 4364 */ 4365 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 4366 struct vm_area_struct *vma = vma_lookup(mm, addr); 4367 struct folio_walk fw; 4368 struct folio *folio; 4369 struct address_space *mapping; 4370 unsigned int target_order = new_order; 4371 4372 if (!vma) 4373 break; 4374 4375 /* skip special VMA and hugetlb VMA */ 4376 if (vma_not_suitable_for_thp_split(vma)) { 4377 addr = vma->vm_end; 4378 continue; 4379 } 4380 4381 folio = folio_walk_start(&fw, vma, addr, 0); 4382 if (!folio) 4383 continue; 4384 4385 if (!is_transparent_hugepage(folio)) 4386 goto next; 4387 4388 if (!folio_test_anon(folio)) { 4389 mapping = folio->mapping; 4390 target_order = max(new_order, 4391 mapping_min_folio_order(mapping)); 4392 } 4393 4394 if (target_order >= folio_order(folio)) 4395 goto next; 4396 4397 total++; 4398 /* 4399 * For folios with private, split_huge_page_to_list_to_order() 4400 * will try to drop it before split and then check if the folio 4401 * can be split or not. So skip the check here. 4402 */ 4403 if (!folio_test_private(folio) && 4404 !can_split_folio(folio, 0, NULL)) 4405 goto next; 4406 4407 if (!folio_trylock(folio)) 4408 goto next; 4409 folio_get(folio); 4410 folio_walk_end(&fw, vma); 4411 4412 if (!folio_test_anon(folio) && folio->mapping != mapping) 4413 goto unlock; 4414 4415 if (in_folio_offset < 0 || 4416 in_folio_offset >= folio_nr_pages(folio)) { 4417 if (!split_folio_to_order(folio, target_order)) 4418 split++; 4419 } else { 4420 struct page *split_at = folio_page(folio, 4421 in_folio_offset); 4422 if (!folio_split(folio, target_order, split_at, NULL)) 4423 split++; 4424 } 4425 4426 unlock: 4427 4428 folio_unlock(folio); 4429 folio_put(folio); 4430 4431 cond_resched(); 4432 continue; 4433 next: 4434 folio_walk_end(&fw, vma); 4435 cond_resched(); 4436 } 4437 mmap_read_unlock(mm); 4438 mmput(mm); 4439 4440 pr_debug("%lu of %lu THP split\n", split, total); 4441 4442 out: 4443 return ret; 4444 } 4445 4446 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 4447 pgoff_t off_end, unsigned int new_order, 4448 long in_folio_offset) 4449 { 4450 struct filename *file; 4451 struct file *candidate; 4452 struct address_space *mapping; 4453 int ret = -EINVAL; 4454 pgoff_t index; 4455 int nr_pages = 1; 4456 unsigned long total = 0, split = 0; 4457 unsigned int min_order; 4458 unsigned int target_order; 4459 4460 file = getname_kernel(file_path); 4461 if (IS_ERR(file)) 4462 return ret; 4463 4464 candidate = file_open_name(file, O_RDONLY, 0); 4465 if (IS_ERR(candidate)) 4466 goto out; 4467 4468 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 4469 file_path, off_start, off_end); 4470 4471 mapping = candidate->f_mapping; 4472 min_order = mapping_min_folio_order(mapping); 4473 target_order = max(new_order, min_order); 4474 4475 for (index = off_start; index < off_end; index += nr_pages) { 4476 struct folio *folio = filemap_get_folio(mapping, index); 4477 4478 nr_pages = 1; 4479 if (IS_ERR(folio)) 4480 continue; 4481 4482 if (!folio_test_large(folio)) 4483 goto next; 4484 4485 total++; 4486 nr_pages = folio_nr_pages(folio); 4487 4488 if (target_order >= folio_order(folio)) 4489 goto next; 4490 4491 if (!folio_trylock(folio)) 4492 goto next; 4493 4494 if (folio->mapping != mapping) 4495 goto unlock; 4496 4497 if (in_folio_offset < 0 || in_folio_offset >= nr_pages) { 4498 if (!split_folio_to_order(folio, target_order)) 4499 split++; 4500 } else { 4501 struct page *split_at = folio_page(folio, 4502 in_folio_offset); 4503 if (!folio_split(folio, target_order, split_at, NULL)) 4504 split++; 4505 } 4506 4507 unlock: 4508 folio_unlock(folio); 4509 next: 4510 folio_put(folio); 4511 cond_resched(); 4512 } 4513 4514 filp_close(candidate, NULL); 4515 ret = 0; 4516 4517 pr_debug("%lu of %lu file-backed THP split\n", split, total); 4518 out: 4519 putname(file); 4520 return ret; 4521 } 4522 4523 #define MAX_INPUT_BUF_SZ 255 4524 4525 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 4526 size_t count, loff_t *ppops) 4527 { 4528 static DEFINE_MUTEX(split_debug_mutex); 4529 ssize_t ret; 4530 /* 4531 * hold pid, start_vaddr, end_vaddr, new_order or 4532 * file_path, off_start, off_end, new_order 4533 */ 4534 char input_buf[MAX_INPUT_BUF_SZ]; 4535 int pid; 4536 unsigned long vaddr_start, vaddr_end; 4537 unsigned int new_order = 0; 4538 long in_folio_offset = -1; 4539 4540 ret = mutex_lock_interruptible(&split_debug_mutex); 4541 if (ret) 4542 return ret; 4543 4544 ret = -EFAULT; 4545 4546 memset(input_buf, 0, MAX_INPUT_BUF_SZ); 4547 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 4548 goto out; 4549 4550 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 4551 4552 if (input_buf[0] == '/') { 4553 char *tok; 4554 char *tok_buf = input_buf; 4555 char file_path[MAX_INPUT_BUF_SZ]; 4556 pgoff_t off_start = 0, off_end = 0; 4557 size_t input_len = strlen(input_buf); 4558 4559 tok = strsep(&tok_buf, ","); 4560 if (tok && tok_buf) { 4561 strscpy(file_path, tok); 4562 } else { 4563 ret = -EINVAL; 4564 goto out; 4565 } 4566 4567 ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end, 4568 &new_order, &in_folio_offset); 4569 if (ret != 2 && ret != 3 && ret != 4) { 4570 ret = -EINVAL; 4571 goto out; 4572 } 4573 ret = split_huge_pages_in_file(file_path, off_start, off_end, 4574 new_order, in_folio_offset); 4575 if (!ret) 4576 ret = input_len; 4577 4578 goto out; 4579 } 4580 4581 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start, 4582 &vaddr_end, &new_order, &in_folio_offset); 4583 if (ret == 1 && pid == 1) { 4584 split_huge_pages_all(); 4585 ret = strlen(input_buf); 4586 goto out; 4587 } else if (ret != 3 && ret != 4 && ret != 5) { 4588 ret = -EINVAL; 4589 goto out; 4590 } 4591 4592 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order, 4593 in_folio_offset); 4594 if (!ret) 4595 ret = strlen(input_buf); 4596 out: 4597 mutex_unlock(&split_debug_mutex); 4598 return ret; 4599 4600 } 4601 4602 static const struct file_operations split_huge_pages_fops = { 4603 .owner = THIS_MODULE, 4604 .write = split_huge_pages_write, 4605 }; 4606 4607 static int __init split_huge_pages_debugfs(void) 4608 { 4609 debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 4610 &split_huge_pages_fops); 4611 return 0; 4612 } 4613 late_initcall(split_huge_pages_debugfs); 4614 #endif 4615 4616 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 4617 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 4618 struct page *page) 4619 { 4620 struct folio *folio = page_folio(page); 4621 struct vm_area_struct *vma = pvmw->vma; 4622 struct mm_struct *mm = vma->vm_mm; 4623 unsigned long address = pvmw->address; 4624 bool anon_exclusive; 4625 pmd_t pmdval; 4626 swp_entry_t entry; 4627 pmd_t pmdswp; 4628 4629 if (!(pvmw->pmd && !pvmw->pte)) 4630 return 0; 4631 4632 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 4633 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 4634 4635 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ 4636 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); 4637 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { 4638 set_pmd_at(mm, address, pvmw->pmd, pmdval); 4639 return -EBUSY; 4640 } 4641 4642 if (pmd_dirty(pmdval)) 4643 folio_mark_dirty(folio); 4644 if (pmd_write(pmdval)) 4645 entry = make_writable_migration_entry(page_to_pfn(page)); 4646 else if (anon_exclusive) 4647 entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 4648 else 4649 entry = make_readable_migration_entry(page_to_pfn(page)); 4650 if (pmd_young(pmdval)) 4651 entry = make_migration_entry_young(entry); 4652 if (pmd_dirty(pmdval)) 4653 entry = make_migration_entry_dirty(entry); 4654 pmdswp = swp_entry_to_pmd(entry); 4655 if (pmd_soft_dirty(pmdval)) 4656 pmdswp = pmd_swp_mksoft_dirty(pmdswp); 4657 if (pmd_uffd_wp(pmdval)) 4658 pmdswp = pmd_swp_mkuffd_wp(pmdswp); 4659 set_pmd_at(mm, address, pvmw->pmd, pmdswp); 4660 folio_remove_rmap_pmd(folio, page, vma); 4661 folio_put(folio); 4662 trace_set_migration_pmd(address, pmd_val(pmdswp)); 4663 4664 return 0; 4665 } 4666 4667 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 4668 { 4669 struct folio *folio = page_folio(new); 4670 struct vm_area_struct *vma = pvmw->vma; 4671 struct mm_struct *mm = vma->vm_mm; 4672 unsigned long address = pvmw->address; 4673 unsigned long haddr = address & HPAGE_PMD_MASK; 4674 pmd_t pmde; 4675 swp_entry_t entry; 4676 4677 if (!(pvmw->pmd && !pvmw->pte)) 4678 return; 4679 4680 entry = pmd_to_swp_entry(*pvmw->pmd); 4681 folio_get(folio); 4682 pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot)); 4683 if (pmd_swp_soft_dirty(*pvmw->pmd)) 4684 pmde = pmd_mksoft_dirty(pmde); 4685 if (is_writable_migration_entry(entry)) 4686 pmde = pmd_mkwrite(pmde, vma); 4687 if (pmd_swp_uffd_wp(*pvmw->pmd)) 4688 pmde = pmd_mkuffd_wp(pmde); 4689 if (!is_migration_entry_young(entry)) 4690 pmde = pmd_mkold(pmde); 4691 /* NOTE: this may contain setting soft-dirty on some archs */ 4692 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 4693 pmde = pmd_mkdirty(pmde); 4694 4695 if (folio_test_anon(folio)) { 4696 rmap_t rmap_flags = RMAP_NONE; 4697 4698 if (!is_readable_migration_entry(entry)) 4699 rmap_flags |= RMAP_EXCLUSIVE; 4700 4701 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); 4702 } else { 4703 folio_add_file_rmap_pmd(folio, new, vma); 4704 } 4705 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); 4706 set_pmd_at(mm, haddr, pvmw->pmd, pmde); 4707 4708 /* No need to invalidate - it was non-present before */ 4709 update_mmu_cache_pmd(vma, address, pvmw->pmd); 4710 trace_remove_migration_pmd(address, pmd_val(pmde)); 4711 } 4712 #endif 4713