1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 #include <linux/mm_inline.h> 38 #include <linux/padata.h> 39 40 #include <asm/page.h> 41 #include <asm/pgalloc.h> 42 #include <asm/tlb.h> 43 44 #include <linux/io.h> 45 #include <linux/hugetlb.h> 46 #include <linux/hugetlb_cgroup.h> 47 #include <linux/node.h> 48 #include <linux/page_owner.h> 49 #include "internal.h" 50 #include "hugetlb_vmemmap.h" 51 52 int hugetlb_max_hstate __read_mostly; 53 unsigned int default_hstate_idx; 54 struct hstate hstates[HUGE_MAX_HSTATE]; 55 56 #ifdef CONFIG_CMA 57 static struct cma *hugetlb_cma[MAX_NUMNODES]; 58 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 59 #endif 60 static unsigned long hugetlb_cma_size __initdata; 61 62 __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; 63 64 /* for command line parsing */ 65 static struct hstate * __initdata parsed_hstate; 66 static unsigned long __initdata default_hstate_max_huge_pages; 67 static bool __initdata parsed_valid_hugepagesz = true; 68 static bool __initdata parsed_default_hugepagesz; 69 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 70 71 /* 72 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 73 * free_huge_pages, and surplus_huge_pages. 74 */ 75 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); 76 77 /* 78 * Serializes faults on the same logical page. This is used to 79 * prevent spurious OOMs when the hugepage pool is fully utilized. 80 */ 81 static int num_fault_mutexes __ro_after_init; 82 struct mutex *hugetlb_fault_mutex_table __ro_after_init; 83 84 /* Forward declaration */ 85 static int hugetlb_acct_memory(struct hstate *h, long delta); 86 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 87 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 88 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 89 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 90 unsigned long start, unsigned long end); 91 static struct resv_map *vma_resv_map(struct vm_area_struct *vma); 92 93 static void hugetlb_free_folio(struct folio *folio) 94 { 95 #ifdef CONFIG_CMA 96 int nid = folio_nid(folio); 97 98 if (cma_free_folio(hugetlb_cma[nid], folio)) 99 return; 100 #endif 101 folio_put(folio); 102 } 103 104 static inline bool subpool_is_free(struct hugepage_subpool *spool) 105 { 106 if (spool->count) 107 return false; 108 if (spool->max_hpages != -1) 109 return spool->used_hpages == 0; 110 if (spool->min_hpages != -1) 111 return spool->rsv_hpages == spool->min_hpages; 112 113 return true; 114 } 115 116 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 117 unsigned long irq_flags) 118 { 119 spin_unlock_irqrestore(&spool->lock, irq_flags); 120 121 /* If no pages are used, and no other handles to the subpool 122 * remain, give up any reservations based on minimum size and 123 * free the subpool */ 124 if (subpool_is_free(spool)) { 125 if (spool->min_hpages != -1) 126 hugetlb_acct_memory(spool->hstate, 127 -spool->min_hpages); 128 kfree(spool); 129 } 130 } 131 132 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 133 long min_hpages) 134 { 135 struct hugepage_subpool *spool; 136 137 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 138 if (!spool) 139 return NULL; 140 141 spin_lock_init(&spool->lock); 142 spool->count = 1; 143 spool->max_hpages = max_hpages; 144 spool->hstate = h; 145 spool->min_hpages = min_hpages; 146 147 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 148 kfree(spool); 149 return NULL; 150 } 151 spool->rsv_hpages = min_hpages; 152 153 return spool; 154 } 155 156 void hugepage_put_subpool(struct hugepage_subpool *spool) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&spool->lock, flags); 161 BUG_ON(!spool->count); 162 spool->count--; 163 unlock_or_release_subpool(spool, flags); 164 } 165 166 /* 167 * Subpool accounting for allocating and reserving pages. 168 * Return -ENOMEM if there are not enough resources to satisfy the 169 * request. Otherwise, return the number of pages by which the 170 * global pools must be adjusted (upward). The returned value may 171 * only be different than the passed value (delta) in the case where 172 * a subpool minimum size must be maintained. 173 */ 174 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 175 long delta) 176 { 177 long ret = delta; 178 179 if (!spool) 180 return ret; 181 182 spin_lock_irq(&spool->lock); 183 184 if (spool->max_hpages != -1) { /* maximum size accounting */ 185 if ((spool->used_hpages + delta) <= spool->max_hpages) 186 spool->used_hpages += delta; 187 else { 188 ret = -ENOMEM; 189 goto unlock_ret; 190 } 191 } 192 193 /* minimum size accounting */ 194 if (spool->min_hpages != -1 && spool->rsv_hpages) { 195 if (delta > spool->rsv_hpages) { 196 /* 197 * Asking for more reserves than those already taken on 198 * behalf of subpool. Return difference. 199 */ 200 ret = delta - spool->rsv_hpages; 201 spool->rsv_hpages = 0; 202 } else { 203 ret = 0; /* reserves already accounted for */ 204 spool->rsv_hpages -= delta; 205 } 206 } 207 208 unlock_ret: 209 spin_unlock_irq(&spool->lock); 210 return ret; 211 } 212 213 /* 214 * Subpool accounting for freeing and unreserving pages. 215 * Return the number of global page reservations that must be dropped. 216 * The return value may only be different than the passed value (delta) 217 * in the case where a subpool minimum size must be maintained. 218 */ 219 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 220 long delta) 221 { 222 long ret = delta; 223 unsigned long flags; 224 225 if (!spool) 226 return delta; 227 228 spin_lock_irqsave(&spool->lock, flags); 229 230 if (spool->max_hpages != -1) /* maximum size accounting */ 231 spool->used_hpages -= delta; 232 233 /* minimum size accounting */ 234 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 235 if (spool->rsv_hpages + delta <= spool->min_hpages) 236 ret = 0; 237 else 238 ret = spool->rsv_hpages + delta - spool->min_hpages; 239 240 spool->rsv_hpages += delta; 241 if (spool->rsv_hpages > spool->min_hpages) 242 spool->rsv_hpages = spool->min_hpages; 243 } 244 245 /* 246 * If hugetlbfs_put_super couldn't free spool due to an outstanding 247 * quota reference, free it now. 248 */ 249 unlock_or_release_subpool(spool, flags); 250 251 return ret; 252 } 253 254 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 255 { 256 return HUGETLBFS_SB(inode->i_sb)->spool; 257 } 258 259 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 260 { 261 return subpool_inode(file_inode(vma->vm_file)); 262 } 263 264 /* 265 * hugetlb vma_lock helper routines 266 */ 267 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 268 { 269 if (__vma_shareable_lock(vma)) { 270 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 271 272 down_read(&vma_lock->rw_sema); 273 } else if (__vma_private_lock(vma)) { 274 struct resv_map *resv_map = vma_resv_map(vma); 275 276 down_read(&resv_map->rw_sema); 277 } 278 } 279 280 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 281 { 282 if (__vma_shareable_lock(vma)) { 283 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 284 285 up_read(&vma_lock->rw_sema); 286 } else if (__vma_private_lock(vma)) { 287 struct resv_map *resv_map = vma_resv_map(vma); 288 289 up_read(&resv_map->rw_sema); 290 } 291 } 292 293 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 294 { 295 if (__vma_shareable_lock(vma)) { 296 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 297 298 down_write(&vma_lock->rw_sema); 299 } else if (__vma_private_lock(vma)) { 300 struct resv_map *resv_map = vma_resv_map(vma); 301 302 down_write(&resv_map->rw_sema); 303 } 304 } 305 306 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 307 { 308 if (__vma_shareable_lock(vma)) { 309 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 310 311 up_write(&vma_lock->rw_sema); 312 } else if (__vma_private_lock(vma)) { 313 struct resv_map *resv_map = vma_resv_map(vma); 314 315 up_write(&resv_map->rw_sema); 316 } 317 } 318 319 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 320 { 321 322 if (__vma_shareable_lock(vma)) { 323 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 324 325 return down_write_trylock(&vma_lock->rw_sema); 326 } else if (__vma_private_lock(vma)) { 327 struct resv_map *resv_map = vma_resv_map(vma); 328 329 return down_write_trylock(&resv_map->rw_sema); 330 } 331 332 return 1; 333 } 334 335 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 336 { 337 if (__vma_shareable_lock(vma)) { 338 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 339 340 lockdep_assert_held(&vma_lock->rw_sema); 341 } else if (__vma_private_lock(vma)) { 342 struct resv_map *resv_map = vma_resv_map(vma); 343 344 lockdep_assert_held(&resv_map->rw_sema); 345 } 346 } 347 348 void hugetlb_vma_lock_release(struct kref *kref) 349 { 350 struct hugetlb_vma_lock *vma_lock = container_of(kref, 351 struct hugetlb_vma_lock, refs); 352 353 kfree(vma_lock); 354 } 355 356 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 357 { 358 struct vm_area_struct *vma = vma_lock->vma; 359 360 /* 361 * vma_lock structure may or not be released as a result of put, 362 * it certainly will no longer be attached to vma so clear pointer. 363 * Semaphore synchronizes access to vma_lock->vma field. 364 */ 365 vma_lock->vma = NULL; 366 vma->vm_private_data = NULL; 367 up_write(&vma_lock->rw_sema); 368 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 369 } 370 371 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 372 { 373 if (__vma_shareable_lock(vma)) { 374 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 375 376 __hugetlb_vma_unlock_write_put(vma_lock); 377 } else if (__vma_private_lock(vma)) { 378 struct resv_map *resv_map = vma_resv_map(vma); 379 380 /* no free for anon vmas, but still need to unlock */ 381 up_write(&resv_map->rw_sema); 382 } 383 } 384 385 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 386 { 387 /* 388 * Only present in sharable vmas. 389 */ 390 if (!vma || !__vma_shareable_lock(vma)) 391 return; 392 393 if (vma->vm_private_data) { 394 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 395 396 down_write(&vma_lock->rw_sema); 397 __hugetlb_vma_unlock_write_put(vma_lock); 398 } 399 } 400 401 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 402 { 403 struct hugetlb_vma_lock *vma_lock; 404 405 /* Only establish in (flags) sharable vmas */ 406 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 407 return; 408 409 /* Should never get here with non-NULL vm_private_data */ 410 if (vma->vm_private_data) 411 return; 412 413 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 414 if (!vma_lock) { 415 /* 416 * If we can not allocate structure, then vma can not 417 * participate in pmd sharing. This is only a possible 418 * performance enhancement and memory saving issue. 419 * However, the lock is also used to synchronize page 420 * faults with truncation. If the lock is not present, 421 * unlikely races could leave pages in a file past i_size 422 * until the file is removed. Warn in the unlikely case of 423 * allocation failure. 424 */ 425 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 426 return; 427 } 428 429 kref_init(&vma_lock->refs); 430 init_rwsem(&vma_lock->rw_sema); 431 vma_lock->vma = vma; 432 vma->vm_private_data = vma_lock; 433 } 434 435 /* Helper that removes a struct file_region from the resv_map cache and returns 436 * it for use. 437 */ 438 static struct file_region * 439 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 440 { 441 struct file_region *nrg; 442 443 VM_BUG_ON(resv->region_cache_count <= 0); 444 445 resv->region_cache_count--; 446 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 447 list_del(&nrg->link); 448 449 nrg->from = from; 450 nrg->to = to; 451 452 return nrg; 453 } 454 455 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 456 struct file_region *rg) 457 { 458 #ifdef CONFIG_CGROUP_HUGETLB 459 nrg->reservation_counter = rg->reservation_counter; 460 nrg->css = rg->css; 461 if (rg->css) 462 css_get(rg->css); 463 #endif 464 } 465 466 /* Helper that records hugetlb_cgroup uncharge info. */ 467 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 468 struct hstate *h, 469 struct resv_map *resv, 470 struct file_region *nrg) 471 { 472 #ifdef CONFIG_CGROUP_HUGETLB 473 if (h_cg) { 474 nrg->reservation_counter = 475 &h_cg->rsvd_hugepage[hstate_index(h)]; 476 nrg->css = &h_cg->css; 477 /* 478 * The caller will hold exactly one h_cg->css reference for the 479 * whole contiguous reservation region. But this area might be 480 * scattered when there are already some file_regions reside in 481 * it. As a result, many file_regions may share only one css 482 * reference. In order to ensure that one file_region must hold 483 * exactly one h_cg->css reference, we should do css_get for 484 * each file_region and leave the reference held by caller 485 * untouched. 486 */ 487 css_get(&h_cg->css); 488 if (!resv->pages_per_hpage) 489 resv->pages_per_hpage = pages_per_huge_page(h); 490 /* pages_per_hpage should be the same for all entries in 491 * a resv_map. 492 */ 493 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 494 } else { 495 nrg->reservation_counter = NULL; 496 nrg->css = NULL; 497 } 498 #endif 499 } 500 501 static void put_uncharge_info(struct file_region *rg) 502 { 503 #ifdef CONFIG_CGROUP_HUGETLB 504 if (rg->css) 505 css_put(rg->css); 506 #endif 507 } 508 509 static bool has_same_uncharge_info(struct file_region *rg, 510 struct file_region *org) 511 { 512 #ifdef CONFIG_CGROUP_HUGETLB 513 return rg->reservation_counter == org->reservation_counter && 514 rg->css == org->css; 515 516 #else 517 return true; 518 #endif 519 } 520 521 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 522 { 523 struct file_region *nrg, *prg; 524 525 prg = list_prev_entry(rg, link); 526 if (&prg->link != &resv->regions && prg->to == rg->from && 527 has_same_uncharge_info(prg, rg)) { 528 prg->to = rg->to; 529 530 list_del(&rg->link); 531 put_uncharge_info(rg); 532 kfree(rg); 533 534 rg = prg; 535 } 536 537 nrg = list_next_entry(rg, link); 538 if (&nrg->link != &resv->regions && nrg->from == rg->to && 539 has_same_uncharge_info(nrg, rg)) { 540 nrg->from = rg->from; 541 542 list_del(&rg->link); 543 put_uncharge_info(rg); 544 kfree(rg); 545 } 546 } 547 548 static inline long 549 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 550 long to, struct hstate *h, struct hugetlb_cgroup *cg, 551 long *regions_needed) 552 { 553 struct file_region *nrg; 554 555 if (!regions_needed) { 556 nrg = get_file_region_entry_from_cache(map, from, to); 557 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 558 list_add(&nrg->link, rg); 559 coalesce_file_region(map, nrg); 560 } else 561 *regions_needed += 1; 562 563 return to - from; 564 } 565 566 /* 567 * Must be called with resv->lock held. 568 * 569 * Calling this with regions_needed != NULL will count the number of pages 570 * to be added but will not modify the linked list. And regions_needed will 571 * indicate the number of file_regions needed in the cache to carry out to add 572 * the regions for this range. 573 */ 574 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 575 struct hugetlb_cgroup *h_cg, 576 struct hstate *h, long *regions_needed) 577 { 578 long add = 0; 579 struct list_head *head = &resv->regions; 580 long last_accounted_offset = f; 581 struct file_region *iter, *trg = NULL; 582 struct list_head *rg = NULL; 583 584 if (regions_needed) 585 *regions_needed = 0; 586 587 /* In this loop, we essentially handle an entry for the range 588 * [last_accounted_offset, iter->from), at every iteration, with some 589 * bounds checking. 590 */ 591 list_for_each_entry_safe(iter, trg, head, link) { 592 /* Skip irrelevant regions that start before our range. */ 593 if (iter->from < f) { 594 /* If this region ends after the last accounted offset, 595 * then we need to update last_accounted_offset. 596 */ 597 if (iter->to > last_accounted_offset) 598 last_accounted_offset = iter->to; 599 continue; 600 } 601 602 /* When we find a region that starts beyond our range, we've 603 * finished. 604 */ 605 if (iter->from >= t) { 606 rg = iter->link.prev; 607 break; 608 } 609 610 /* Add an entry for last_accounted_offset -> iter->from, and 611 * update last_accounted_offset. 612 */ 613 if (iter->from > last_accounted_offset) 614 add += hugetlb_resv_map_add(resv, iter->link.prev, 615 last_accounted_offset, 616 iter->from, h, h_cg, 617 regions_needed); 618 619 last_accounted_offset = iter->to; 620 } 621 622 /* Handle the case where our range extends beyond 623 * last_accounted_offset. 624 */ 625 if (!rg) 626 rg = head->prev; 627 if (last_accounted_offset < t) 628 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 629 t, h, h_cg, regions_needed); 630 631 return add; 632 } 633 634 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 635 */ 636 static int allocate_file_region_entries(struct resv_map *resv, 637 int regions_needed) 638 __must_hold(&resv->lock) 639 { 640 LIST_HEAD(allocated_regions); 641 int to_allocate = 0, i = 0; 642 struct file_region *trg = NULL, *rg = NULL; 643 644 VM_BUG_ON(regions_needed < 0); 645 646 /* 647 * Check for sufficient descriptors in the cache to accommodate 648 * the number of in progress add operations plus regions_needed. 649 * 650 * This is a while loop because when we drop the lock, some other call 651 * to region_add or region_del may have consumed some region_entries, 652 * so we keep looping here until we finally have enough entries for 653 * (adds_in_progress + regions_needed). 654 */ 655 while (resv->region_cache_count < 656 (resv->adds_in_progress + regions_needed)) { 657 to_allocate = resv->adds_in_progress + regions_needed - 658 resv->region_cache_count; 659 660 /* At this point, we should have enough entries in the cache 661 * for all the existing adds_in_progress. We should only be 662 * needing to allocate for regions_needed. 663 */ 664 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 665 666 spin_unlock(&resv->lock); 667 for (i = 0; i < to_allocate; i++) { 668 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 669 if (!trg) 670 goto out_of_memory; 671 list_add(&trg->link, &allocated_regions); 672 } 673 674 spin_lock(&resv->lock); 675 676 list_splice(&allocated_regions, &resv->region_cache); 677 resv->region_cache_count += to_allocate; 678 } 679 680 return 0; 681 682 out_of_memory: 683 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 684 list_del(&rg->link); 685 kfree(rg); 686 } 687 return -ENOMEM; 688 } 689 690 /* 691 * Add the huge page range represented by [f, t) to the reserve 692 * map. Regions will be taken from the cache to fill in this range. 693 * Sufficient regions should exist in the cache due to the previous 694 * call to region_chg with the same range, but in some cases the cache will not 695 * have sufficient entries due to races with other code doing region_add or 696 * region_del. The extra needed entries will be allocated. 697 * 698 * regions_needed is the out value provided by a previous call to region_chg. 699 * 700 * Return the number of new huge pages added to the map. This number is greater 701 * than or equal to zero. If file_region entries needed to be allocated for 702 * this operation and we were not able to allocate, it returns -ENOMEM. 703 * region_add of regions of length 1 never allocate file_regions and cannot 704 * fail; region_chg will always allocate at least 1 entry and a region_add for 705 * 1 page will only require at most 1 entry. 706 */ 707 static long region_add(struct resv_map *resv, long f, long t, 708 long in_regions_needed, struct hstate *h, 709 struct hugetlb_cgroup *h_cg) 710 { 711 long add = 0, actual_regions_needed = 0; 712 713 spin_lock(&resv->lock); 714 retry: 715 716 /* Count how many regions are actually needed to execute this add. */ 717 add_reservation_in_range(resv, f, t, NULL, NULL, 718 &actual_regions_needed); 719 720 /* 721 * Check for sufficient descriptors in the cache to accommodate 722 * this add operation. Note that actual_regions_needed may be greater 723 * than in_regions_needed, as the resv_map may have been modified since 724 * the region_chg call. In this case, we need to make sure that we 725 * allocate extra entries, such that we have enough for all the 726 * existing adds_in_progress, plus the excess needed for this 727 * operation. 728 */ 729 if (actual_regions_needed > in_regions_needed && 730 resv->region_cache_count < 731 resv->adds_in_progress + 732 (actual_regions_needed - in_regions_needed)) { 733 /* region_add operation of range 1 should never need to 734 * allocate file_region entries. 735 */ 736 VM_BUG_ON(t - f <= 1); 737 738 if (allocate_file_region_entries( 739 resv, actual_regions_needed - in_regions_needed)) { 740 return -ENOMEM; 741 } 742 743 goto retry; 744 } 745 746 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 747 748 resv->adds_in_progress -= in_regions_needed; 749 750 spin_unlock(&resv->lock); 751 return add; 752 } 753 754 /* 755 * Examine the existing reserve map and determine how many 756 * huge pages in the specified range [f, t) are NOT currently 757 * represented. This routine is called before a subsequent 758 * call to region_add that will actually modify the reserve 759 * map to add the specified range [f, t). region_chg does 760 * not change the number of huge pages represented by the 761 * map. A number of new file_region structures is added to the cache as a 762 * placeholder, for the subsequent region_add call to use. At least 1 763 * file_region structure is added. 764 * 765 * out_regions_needed is the number of regions added to the 766 * resv->adds_in_progress. This value needs to be provided to a follow up call 767 * to region_add or region_abort for proper accounting. 768 * 769 * Returns the number of huge pages that need to be added to the existing 770 * reservation map for the range [f, t). This number is greater or equal to 771 * zero. -ENOMEM is returned if a new file_region structure or cache entry 772 * is needed and can not be allocated. 773 */ 774 static long region_chg(struct resv_map *resv, long f, long t, 775 long *out_regions_needed) 776 { 777 long chg = 0; 778 779 spin_lock(&resv->lock); 780 781 /* Count how many hugepages in this range are NOT represented. */ 782 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 783 out_regions_needed); 784 785 if (*out_regions_needed == 0) 786 *out_regions_needed = 1; 787 788 if (allocate_file_region_entries(resv, *out_regions_needed)) 789 return -ENOMEM; 790 791 resv->adds_in_progress += *out_regions_needed; 792 793 spin_unlock(&resv->lock); 794 return chg; 795 } 796 797 /* 798 * Abort the in progress add operation. The adds_in_progress field 799 * of the resv_map keeps track of the operations in progress between 800 * calls to region_chg and region_add. Operations are sometimes 801 * aborted after the call to region_chg. In such cases, region_abort 802 * is called to decrement the adds_in_progress counter. regions_needed 803 * is the value returned by the region_chg call, it is used to decrement 804 * the adds_in_progress counter. 805 * 806 * NOTE: The range arguments [f, t) are not needed or used in this 807 * routine. They are kept to make reading the calling code easier as 808 * arguments will match the associated region_chg call. 809 */ 810 static void region_abort(struct resv_map *resv, long f, long t, 811 long regions_needed) 812 { 813 spin_lock(&resv->lock); 814 VM_BUG_ON(!resv->region_cache_count); 815 resv->adds_in_progress -= regions_needed; 816 spin_unlock(&resv->lock); 817 } 818 819 /* 820 * Delete the specified range [f, t) from the reserve map. If the 821 * t parameter is LONG_MAX, this indicates that ALL regions after f 822 * should be deleted. Locate the regions which intersect [f, t) 823 * and either trim, delete or split the existing regions. 824 * 825 * Returns the number of huge pages deleted from the reserve map. 826 * In the normal case, the return value is zero or more. In the 827 * case where a region must be split, a new region descriptor must 828 * be allocated. If the allocation fails, -ENOMEM will be returned. 829 * NOTE: If the parameter t == LONG_MAX, then we will never split 830 * a region and possibly return -ENOMEM. Callers specifying 831 * t == LONG_MAX do not need to check for -ENOMEM error. 832 */ 833 static long region_del(struct resv_map *resv, long f, long t) 834 { 835 struct list_head *head = &resv->regions; 836 struct file_region *rg, *trg; 837 struct file_region *nrg = NULL; 838 long del = 0; 839 840 retry: 841 spin_lock(&resv->lock); 842 list_for_each_entry_safe(rg, trg, head, link) { 843 /* 844 * Skip regions before the range to be deleted. file_region 845 * ranges are normally of the form [from, to). However, there 846 * may be a "placeholder" entry in the map which is of the form 847 * (from, to) with from == to. Check for placeholder entries 848 * at the beginning of the range to be deleted. 849 */ 850 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 851 continue; 852 853 if (rg->from >= t) 854 break; 855 856 if (f > rg->from && t < rg->to) { /* Must split region */ 857 /* 858 * Check for an entry in the cache before dropping 859 * lock and attempting allocation. 860 */ 861 if (!nrg && 862 resv->region_cache_count > resv->adds_in_progress) { 863 nrg = list_first_entry(&resv->region_cache, 864 struct file_region, 865 link); 866 list_del(&nrg->link); 867 resv->region_cache_count--; 868 } 869 870 if (!nrg) { 871 spin_unlock(&resv->lock); 872 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 873 if (!nrg) 874 return -ENOMEM; 875 goto retry; 876 } 877 878 del += t - f; 879 hugetlb_cgroup_uncharge_file_region( 880 resv, rg, t - f, false); 881 882 /* New entry for end of split region */ 883 nrg->from = t; 884 nrg->to = rg->to; 885 886 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 887 888 INIT_LIST_HEAD(&nrg->link); 889 890 /* Original entry is trimmed */ 891 rg->to = f; 892 893 list_add(&nrg->link, &rg->link); 894 nrg = NULL; 895 break; 896 } 897 898 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 899 del += rg->to - rg->from; 900 hugetlb_cgroup_uncharge_file_region(resv, rg, 901 rg->to - rg->from, true); 902 list_del(&rg->link); 903 kfree(rg); 904 continue; 905 } 906 907 if (f <= rg->from) { /* Trim beginning of region */ 908 hugetlb_cgroup_uncharge_file_region(resv, rg, 909 t - rg->from, false); 910 911 del += t - rg->from; 912 rg->from = t; 913 } else { /* Trim end of region */ 914 hugetlb_cgroup_uncharge_file_region(resv, rg, 915 rg->to - f, false); 916 917 del += rg->to - f; 918 rg->to = f; 919 } 920 } 921 922 spin_unlock(&resv->lock); 923 kfree(nrg); 924 return del; 925 } 926 927 /* 928 * A rare out of memory error was encountered which prevented removal of 929 * the reserve map region for a page. The huge page itself was free'ed 930 * and removed from the page cache. This routine will adjust the subpool 931 * usage count, and the global reserve count if needed. By incrementing 932 * these counts, the reserve map entry which could not be deleted will 933 * appear as a "reserved" entry instead of simply dangling with incorrect 934 * counts. 935 */ 936 void hugetlb_fix_reserve_counts(struct inode *inode) 937 { 938 struct hugepage_subpool *spool = subpool_inode(inode); 939 long rsv_adjust; 940 bool reserved = false; 941 942 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 943 if (rsv_adjust > 0) { 944 struct hstate *h = hstate_inode(inode); 945 946 if (!hugetlb_acct_memory(h, 1)) 947 reserved = true; 948 } else if (!rsv_adjust) { 949 reserved = true; 950 } 951 952 if (!reserved) 953 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 954 } 955 956 /* 957 * Count and return the number of huge pages in the reserve map 958 * that intersect with the range [f, t). 959 */ 960 static long region_count(struct resv_map *resv, long f, long t) 961 { 962 struct list_head *head = &resv->regions; 963 struct file_region *rg; 964 long chg = 0; 965 966 spin_lock(&resv->lock); 967 /* Locate each segment we overlap with, and count that overlap. */ 968 list_for_each_entry(rg, head, link) { 969 long seg_from; 970 long seg_to; 971 972 if (rg->to <= f) 973 continue; 974 if (rg->from >= t) 975 break; 976 977 seg_from = max(rg->from, f); 978 seg_to = min(rg->to, t); 979 980 chg += seg_to - seg_from; 981 } 982 spin_unlock(&resv->lock); 983 984 return chg; 985 } 986 987 /* 988 * Convert the address within this vma to the page offset within 989 * the mapping, huge page units here. 990 */ 991 static pgoff_t vma_hugecache_offset(struct hstate *h, 992 struct vm_area_struct *vma, unsigned long address) 993 { 994 return ((address - vma->vm_start) >> huge_page_shift(h)) + 995 (vma->vm_pgoff >> huge_page_order(h)); 996 } 997 998 /** 999 * vma_kernel_pagesize - Page size granularity for this VMA. 1000 * @vma: The user mapping. 1001 * 1002 * Folios in this VMA will be aligned to, and at least the size of the 1003 * number of bytes returned by this function. 1004 * 1005 * Return: The default size of the folios allocated when backing a VMA. 1006 */ 1007 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1008 { 1009 if (vma->vm_ops && vma->vm_ops->pagesize) 1010 return vma->vm_ops->pagesize(vma); 1011 return PAGE_SIZE; 1012 } 1013 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 1014 1015 /* 1016 * Return the page size being used by the MMU to back a VMA. In the majority 1017 * of cases, the page size used by the kernel matches the MMU size. On 1018 * architectures where it differs, an architecture-specific 'strong' 1019 * version of this symbol is required. 1020 */ 1021 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1022 { 1023 return vma_kernel_pagesize(vma); 1024 } 1025 1026 /* 1027 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1028 * bits of the reservation map pointer, which are always clear due to 1029 * alignment. 1030 */ 1031 #define HPAGE_RESV_OWNER (1UL << 0) 1032 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1033 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1034 1035 /* 1036 * These helpers are used to track how many pages are reserved for 1037 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1038 * is guaranteed to have their future faults succeed. 1039 * 1040 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1041 * the reserve counters are updated with the hugetlb_lock held. It is safe 1042 * to reset the VMA at fork() time as it is not in use yet and there is no 1043 * chance of the global counters getting corrupted as a result of the values. 1044 * 1045 * The private mapping reservation is represented in a subtly different 1046 * manner to a shared mapping. A shared mapping has a region map associated 1047 * with the underlying file, this region map represents the backing file 1048 * pages which have ever had a reservation assigned which this persists even 1049 * after the page is instantiated. A private mapping has a region map 1050 * associated with the original mmap which is attached to all VMAs which 1051 * reference it, this region map represents those offsets which have consumed 1052 * reservation ie. where pages have been instantiated. 1053 */ 1054 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1055 { 1056 return (unsigned long)vma->vm_private_data; 1057 } 1058 1059 static void set_vma_private_data(struct vm_area_struct *vma, 1060 unsigned long value) 1061 { 1062 vma->vm_private_data = (void *)value; 1063 } 1064 1065 static void 1066 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1067 struct hugetlb_cgroup *h_cg, 1068 struct hstate *h) 1069 { 1070 #ifdef CONFIG_CGROUP_HUGETLB 1071 if (!h_cg || !h) { 1072 resv_map->reservation_counter = NULL; 1073 resv_map->pages_per_hpage = 0; 1074 resv_map->css = NULL; 1075 } else { 1076 resv_map->reservation_counter = 1077 &h_cg->rsvd_hugepage[hstate_index(h)]; 1078 resv_map->pages_per_hpage = pages_per_huge_page(h); 1079 resv_map->css = &h_cg->css; 1080 } 1081 #endif 1082 } 1083 1084 struct resv_map *resv_map_alloc(void) 1085 { 1086 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1087 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1088 1089 if (!resv_map || !rg) { 1090 kfree(resv_map); 1091 kfree(rg); 1092 return NULL; 1093 } 1094 1095 kref_init(&resv_map->refs); 1096 spin_lock_init(&resv_map->lock); 1097 INIT_LIST_HEAD(&resv_map->regions); 1098 init_rwsem(&resv_map->rw_sema); 1099 1100 resv_map->adds_in_progress = 0; 1101 /* 1102 * Initialize these to 0. On shared mappings, 0's here indicate these 1103 * fields don't do cgroup accounting. On private mappings, these will be 1104 * re-initialized to the proper values, to indicate that hugetlb cgroup 1105 * reservations are to be un-charged from here. 1106 */ 1107 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1108 1109 INIT_LIST_HEAD(&resv_map->region_cache); 1110 list_add(&rg->link, &resv_map->region_cache); 1111 resv_map->region_cache_count = 1; 1112 1113 return resv_map; 1114 } 1115 1116 void resv_map_release(struct kref *ref) 1117 { 1118 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1119 struct list_head *head = &resv_map->region_cache; 1120 struct file_region *rg, *trg; 1121 1122 /* Clear out any active regions before we release the map. */ 1123 region_del(resv_map, 0, LONG_MAX); 1124 1125 /* ... and any entries left in the cache */ 1126 list_for_each_entry_safe(rg, trg, head, link) { 1127 list_del(&rg->link); 1128 kfree(rg); 1129 } 1130 1131 VM_BUG_ON(resv_map->adds_in_progress); 1132 1133 kfree(resv_map); 1134 } 1135 1136 static inline struct resv_map *inode_resv_map(struct inode *inode) 1137 { 1138 /* 1139 * At inode evict time, i_mapping may not point to the original 1140 * address space within the inode. This original address space 1141 * contains the pointer to the resv_map. So, always use the 1142 * address space embedded within the inode. 1143 * The VERY common case is inode->mapping == &inode->i_data but, 1144 * this may not be true for device special inodes. 1145 */ 1146 return (struct resv_map *)(&inode->i_data)->i_private_data; 1147 } 1148 1149 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1150 { 1151 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1152 if (vma->vm_flags & VM_MAYSHARE) { 1153 struct address_space *mapping = vma->vm_file->f_mapping; 1154 struct inode *inode = mapping->host; 1155 1156 return inode_resv_map(inode); 1157 1158 } else { 1159 return (struct resv_map *)(get_vma_private_data(vma) & 1160 ~HPAGE_RESV_MASK); 1161 } 1162 } 1163 1164 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1165 { 1166 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1167 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1168 1169 set_vma_private_data(vma, (unsigned long)map); 1170 } 1171 1172 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1173 { 1174 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1175 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1176 1177 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1178 } 1179 1180 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1181 { 1182 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1183 1184 return (get_vma_private_data(vma) & flag) != 0; 1185 } 1186 1187 bool __vma_private_lock(struct vm_area_struct *vma) 1188 { 1189 return !(vma->vm_flags & VM_MAYSHARE) && 1190 get_vma_private_data(vma) & ~HPAGE_RESV_MASK && 1191 is_vma_resv_set(vma, HPAGE_RESV_OWNER); 1192 } 1193 1194 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1195 { 1196 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1197 /* 1198 * Clear vm_private_data 1199 * - For shared mappings this is a per-vma semaphore that may be 1200 * allocated in a subsequent call to hugetlb_vm_op_open. 1201 * Before clearing, make sure pointer is not associated with vma 1202 * as this will leak the structure. This is the case when called 1203 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1204 * been called to allocate a new structure. 1205 * - For MAP_PRIVATE mappings, this is the reserve map which does 1206 * not apply to children. Faults generated by the children are 1207 * not guaranteed to succeed, even if read-only. 1208 */ 1209 if (vma->vm_flags & VM_MAYSHARE) { 1210 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1211 1212 if (vma_lock && vma_lock->vma != vma) 1213 vma->vm_private_data = NULL; 1214 } else 1215 vma->vm_private_data = NULL; 1216 } 1217 1218 /* 1219 * Reset and decrement one ref on hugepage private reservation. 1220 * Called with mm->mmap_lock writer semaphore held. 1221 * This function should be only used by move_vma() and operate on 1222 * same sized vma. It should never come here with last ref on the 1223 * reservation. 1224 */ 1225 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1226 { 1227 /* 1228 * Clear the old hugetlb private page reservation. 1229 * It has already been transferred to new_vma. 1230 * 1231 * During a mremap() operation of a hugetlb vma we call move_vma() 1232 * which copies vma into new_vma and unmaps vma. After the copy 1233 * operation both new_vma and vma share a reference to the resv_map 1234 * struct, and at that point vma is about to be unmapped. We don't 1235 * want to return the reservation to the pool at unmap of vma because 1236 * the reservation still lives on in new_vma, so simply decrement the 1237 * ref here and remove the resv_map reference from this vma. 1238 */ 1239 struct resv_map *reservations = vma_resv_map(vma); 1240 1241 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1242 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1243 kref_put(&reservations->refs, resv_map_release); 1244 } 1245 1246 hugetlb_dup_vma_private(vma); 1247 } 1248 1249 /* Returns true if the VMA has associated reserve pages */ 1250 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1251 { 1252 if (vma->vm_flags & VM_NORESERVE) { 1253 /* 1254 * This address is already reserved by other process(chg == 0), 1255 * so, we should decrement reserved count. Without decrementing, 1256 * reserve count remains after releasing inode, because this 1257 * allocated page will go into page cache and is regarded as 1258 * coming from reserved pool in releasing step. Currently, we 1259 * don't have any other solution to deal with this situation 1260 * properly, so add work-around here. 1261 */ 1262 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1263 return true; 1264 else 1265 return false; 1266 } 1267 1268 /* Shared mappings always use reserves */ 1269 if (vma->vm_flags & VM_MAYSHARE) { 1270 /* 1271 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1272 * be a region map for all pages. The only situation where 1273 * there is no region map is if a hole was punched via 1274 * fallocate. In this case, there really are no reserves to 1275 * use. This situation is indicated if chg != 0. 1276 */ 1277 if (chg) 1278 return false; 1279 else 1280 return true; 1281 } 1282 1283 /* 1284 * Only the process that called mmap() has reserves for 1285 * private mappings. 1286 */ 1287 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1288 /* 1289 * Like the shared case above, a hole punch or truncate 1290 * could have been performed on the private mapping. 1291 * Examine the value of chg to determine if reserves 1292 * actually exist or were previously consumed. 1293 * Very Subtle - The value of chg comes from a previous 1294 * call to vma_needs_reserves(). The reserve map for 1295 * private mappings has different (opposite) semantics 1296 * than that of shared mappings. vma_needs_reserves() 1297 * has already taken this difference in semantics into 1298 * account. Therefore, the meaning of chg is the same 1299 * as in the shared case above. Code could easily be 1300 * combined, but keeping it separate draws attention to 1301 * subtle differences. 1302 */ 1303 if (chg) 1304 return false; 1305 else 1306 return true; 1307 } 1308 1309 return false; 1310 } 1311 1312 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1313 { 1314 int nid = folio_nid(folio); 1315 1316 lockdep_assert_held(&hugetlb_lock); 1317 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1318 1319 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1320 h->free_huge_pages++; 1321 h->free_huge_pages_node[nid]++; 1322 folio_set_hugetlb_freed(folio); 1323 } 1324 1325 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1326 int nid) 1327 { 1328 struct folio *folio; 1329 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1330 1331 lockdep_assert_held(&hugetlb_lock); 1332 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1333 if (pin && !folio_is_longterm_pinnable(folio)) 1334 continue; 1335 1336 if (folio_test_hwpoison(folio)) 1337 continue; 1338 1339 list_move(&folio->lru, &h->hugepage_activelist); 1340 folio_ref_unfreeze(folio, 1); 1341 folio_clear_hugetlb_freed(folio); 1342 h->free_huge_pages--; 1343 h->free_huge_pages_node[nid]--; 1344 return folio; 1345 } 1346 1347 return NULL; 1348 } 1349 1350 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1351 int nid, nodemask_t *nmask) 1352 { 1353 unsigned int cpuset_mems_cookie; 1354 struct zonelist *zonelist; 1355 struct zone *zone; 1356 struct zoneref *z; 1357 int node = NUMA_NO_NODE; 1358 1359 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ 1360 if (nid == NUMA_NO_NODE) 1361 nid = numa_node_id(); 1362 1363 zonelist = node_zonelist(nid, gfp_mask); 1364 1365 retry_cpuset: 1366 cpuset_mems_cookie = read_mems_allowed_begin(); 1367 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1368 struct folio *folio; 1369 1370 if (!cpuset_zone_allowed(zone, gfp_mask)) 1371 continue; 1372 /* 1373 * no need to ask again on the same node. Pool is node rather than 1374 * zone aware 1375 */ 1376 if (zone_to_nid(zone) == node) 1377 continue; 1378 node = zone_to_nid(zone); 1379 1380 folio = dequeue_hugetlb_folio_node_exact(h, node); 1381 if (folio) 1382 return folio; 1383 } 1384 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1385 goto retry_cpuset; 1386 1387 return NULL; 1388 } 1389 1390 static unsigned long available_huge_pages(struct hstate *h) 1391 { 1392 return h->free_huge_pages - h->resv_huge_pages; 1393 } 1394 1395 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1396 struct vm_area_struct *vma, 1397 unsigned long address, int avoid_reserve, 1398 long chg) 1399 { 1400 struct folio *folio = NULL; 1401 struct mempolicy *mpol; 1402 gfp_t gfp_mask; 1403 nodemask_t *nodemask; 1404 int nid; 1405 1406 /* 1407 * A child process with MAP_PRIVATE mappings created by their parent 1408 * have no page reserves. This check ensures that reservations are 1409 * not "stolen". The child may still get SIGKILLed 1410 */ 1411 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1412 goto err; 1413 1414 /* If reserves cannot be used, ensure enough pages are in the pool */ 1415 if (avoid_reserve && !available_huge_pages(h)) 1416 goto err; 1417 1418 gfp_mask = htlb_alloc_mask(h); 1419 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1420 1421 if (mpol_is_preferred_many(mpol)) { 1422 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1423 nid, nodemask); 1424 1425 /* Fallback to all nodes if page==NULL */ 1426 nodemask = NULL; 1427 } 1428 1429 if (!folio) 1430 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1431 nid, nodemask); 1432 1433 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1434 folio_set_hugetlb_restore_reserve(folio); 1435 h->resv_huge_pages--; 1436 } 1437 1438 mpol_cond_put(mpol); 1439 return folio; 1440 1441 err: 1442 return NULL; 1443 } 1444 1445 /* 1446 * common helper functions for hstate_next_node_to_{alloc|free}. 1447 * We may have allocated or freed a huge page based on a different 1448 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1449 * be outside of *nodes_allowed. Ensure that we use an allowed 1450 * node for alloc or free. 1451 */ 1452 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1453 { 1454 nid = next_node_in(nid, *nodes_allowed); 1455 VM_BUG_ON(nid >= MAX_NUMNODES); 1456 1457 return nid; 1458 } 1459 1460 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1461 { 1462 if (!node_isset(nid, *nodes_allowed)) 1463 nid = next_node_allowed(nid, nodes_allowed); 1464 return nid; 1465 } 1466 1467 /* 1468 * returns the previously saved node ["this node"] from which to 1469 * allocate a persistent huge page for the pool and advance the 1470 * next node from which to allocate, handling wrap at end of node 1471 * mask. 1472 */ 1473 static int hstate_next_node_to_alloc(int *next_node, 1474 nodemask_t *nodes_allowed) 1475 { 1476 int nid; 1477 1478 VM_BUG_ON(!nodes_allowed); 1479 1480 nid = get_valid_node_allowed(*next_node, nodes_allowed); 1481 *next_node = next_node_allowed(nid, nodes_allowed); 1482 1483 return nid; 1484 } 1485 1486 /* 1487 * helper for remove_pool_hugetlb_folio() - return the previously saved 1488 * node ["this node"] from which to free a huge page. Advance the 1489 * next node id whether or not we find a free huge page to free so 1490 * that the next attempt to free addresses the next node. 1491 */ 1492 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1493 { 1494 int nid; 1495 1496 VM_BUG_ON(!nodes_allowed); 1497 1498 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1499 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1500 1501 return nid; 1502 } 1503 1504 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \ 1505 for (nr_nodes = nodes_weight(*mask); \ 1506 nr_nodes > 0 && \ 1507 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \ 1508 nr_nodes--) 1509 1510 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1511 for (nr_nodes = nodes_weight(*mask); \ 1512 nr_nodes > 0 && \ 1513 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1514 nr_nodes--) 1515 1516 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1517 #ifdef CONFIG_CONTIG_ALLOC 1518 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1519 int nid, nodemask_t *nodemask) 1520 { 1521 struct folio *folio; 1522 int order = huge_page_order(h); 1523 bool retried = false; 1524 1525 if (nid == NUMA_NO_NODE) 1526 nid = numa_mem_id(); 1527 retry: 1528 folio = NULL; 1529 #ifdef CONFIG_CMA 1530 { 1531 int node; 1532 1533 if (hugetlb_cma[nid]) 1534 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); 1535 1536 if (!folio && !(gfp_mask & __GFP_THISNODE)) { 1537 for_each_node_mask(node, *nodemask) { 1538 if (node == nid || !hugetlb_cma[node]) 1539 continue; 1540 1541 folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); 1542 if (folio) 1543 break; 1544 } 1545 } 1546 } 1547 #endif 1548 if (!folio) { 1549 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); 1550 if (!folio) 1551 return NULL; 1552 } 1553 1554 if (folio_ref_freeze(folio, 1)) 1555 return folio; 1556 1557 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); 1558 hugetlb_free_folio(folio); 1559 if (!retried) { 1560 retried = true; 1561 goto retry; 1562 } 1563 return NULL; 1564 } 1565 1566 #else /* !CONFIG_CONTIG_ALLOC */ 1567 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1568 int nid, nodemask_t *nodemask) 1569 { 1570 return NULL; 1571 } 1572 #endif /* CONFIG_CONTIG_ALLOC */ 1573 1574 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1575 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1576 int nid, nodemask_t *nodemask) 1577 { 1578 return NULL; 1579 } 1580 #endif 1581 1582 /* 1583 * Remove hugetlb folio from lists. 1584 * If vmemmap exists for the folio, clear the hugetlb flag so that the 1585 * folio appears as just a compound page. Otherwise, wait until after 1586 * allocating vmemmap to clear the flag. 1587 * 1588 * Must be called with hugetlb lock held. 1589 */ 1590 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1591 bool adjust_surplus) 1592 { 1593 int nid = folio_nid(folio); 1594 1595 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1596 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1597 1598 lockdep_assert_held(&hugetlb_lock); 1599 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1600 return; 1601 1602 list_del(&folio->lru); 1603 1604 if (folio_test_hugetlb_freed(folio)) { 1605 folio_clear_hugetlb_freed(folio); 1606 h->free_huge_pages--; 1607 h->free_huge_pages_node[nid]--; 1608 } 1609 if (adjust_surplus) { 1610 h->surplus_huge_pages--; 1611 h->surplus_huge_pages_node[nid]--; 1612 } 1613 1614 /* 1615 * We can only clear the hugetlb flag after allocating vmemmap 1616 * pages. Otherwise, someone (memory error handling) may try to write 1617 * to tail struct pages. 1618 */ 1619 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1620 __folio_clear_hugetlb(folio); 1621 1622 h->nr_huge_pages--; 1623 h->nr_huge_pages_node[nid]--; 1624 } 1625 1626 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1627 bool adjust_surplus) 1628 { 1629 int nid = folio_nid(folio); 1630 1631 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1632 1633 lockdep_assert_held(&hugetlb_lock); 1634 1635 INIT_LIST_HEAD(&folio->lru); 1636 h->nr_huge_pages++; 1637 h->nr_huge_pages_node[nid]++; 1638 1639 if (adjust_surplus) { 1640 h->surplus_huge_pages++; 1641 h->surplus_huge_pages_node[nid]++; 1642 } 1643 1644 __folio_set_hugetlb(folio); 1645 folio_change_private(folio, NULL); 1646 /* 1647 * We have to set hugetlb_vmemmap_optimized again as above 1648 * folio_change_private(folio, NULL) cleared it. 1649 */ 1650 folio_set_hugetlb_vmemmap_optimized(folio); 1651 1652 arch_clear_hugetlb_flags(folio); 1653 enqueue_hugetlb_folio(h, folio); 1654 } 1655 1656 static void __update_and_free_hugetlb_folio(struct hstate *h, 1657 struct folio *folio) 1658 { 1659 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); 1660 1661 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1662 return; 1663 1664 /* 1665 * If we don't know which subpages are hwpoisoned, we can't free 1666 * the hugepage, so it's leaked intentionally. 1667 */ 1668 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1669 return; 1670 1671 /* 1672 * If folio is not vmemmap optimized (!clear_flag), then the folio 1673 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio 1674 * can only be passed hugetlb pages and will BUG otherwise. 1675 */ 1676 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { 1677 spin_lock_irq(&hugetlb_lock); 1678 /* 1679 * If we cannot allocate vmemmap pages, just refuse to free the 1680 * page and put the page back on the hugetlb free list and treat 1681 * as a surplus page. 1682 */ 1683 add_hugetlb_folio(h, folio, true); 1684 spin_unlock_irq(&hugetlb_lock); 1685 return; 1686 } 1687 1688 /* 1689 * If vmemmap pages were allocated above, then we need to clear the 1690 * hugetlb flag under the hugetlb lock. 1691 */ 1692 if (folio_test_hugetlb(folio)) { 1693 spin_lock_irq(&hugetlb_lock); 1694 __folio_clear_hugetlb(folio); 1695 spin_unlock_irq(&hugetlb_lock); 1696 } 1697 1698 /* 1699 * Move PageHWPoison flag from head page to the raw error pages, 1700 * which makes any healthy subpages reusable. 1701 */ 1702 if (unlikely(folio_test_hwpoison(folio))) 1703 folio_clear_hugetlb_hwpoison(folio); 1704 1705 folio_ref_unfreeze(folio, 1); 1706 1707 INIT_LIST_HEAD(&folio->_deferred_list); 1708 hugetlb_free_folio(folio); 1709 } 1710 1711 /* 1712 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1713 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1714 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1715 * the vmemmap pages. 1716 * 1717 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1718 * freed and frees them one-by-one. As the page->mapping pointer is going 1719 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1720 * structure of a lockless linked list of huge pages to be freed. 1721 */ 1722 static LLIST_HEAD(hpage_freelist); 1723 1724 static void free_hpage_workfn(struct work_struct *work) 1725 { 1726 struct llist_node *node; 1727 1728 node = llist_del_all(&hpage_freelist); 1729 1730 while (node) { 1731 struct folio *folio; 1732 struct hstate *h; 1733 1734 folio = container_of((struct address_space **)node, 1735 struct folio, mapping); 1736 node = node->next; 1737 folio->mapping = NULL; 1738 /* 1739 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1740 * folio_hstate() is going to trigger because a previous call to 1741 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1742 * not use folio_hstate() directly. 1743 */ 1744 h = size_to_hstate(folio_size(folio)); 1745 1746 __update_and_free_hugetlb_folio(h, folio); 1747 1748 cond_resched(); 1749 } 1750 } 1751 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1752 1753 static inline void flush_free_hpage_work(struct hstate *h) 1754 { 1755 if (hugetlb_vmemmap_optimizable(h)) 1756 flush_work(&free_hpage_work); 1757 } 1758 1759 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1760 bool atomic) 1761 { 1762 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1763 __update_and_free_hugetlb_folio(h, folio); 1764 return; 1765 } 1766 1767 /* 1768 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1769 * 1770 * Only call schedule_work() if hpage_freelist is previously 1771 * empty. Otherwise, schedule_work() had been called but the workfn 1772 * hasn't retrieved the list yet. 1773 */ 1774 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1775 schedule_work(&free_hpage_work); 1776 } 1777 1778 static void bulk_vmemmap_restore_error(struct hstate *h, 1779 struct list_head *folio_list, 1780 struct list_head *non_hvo_folios) 1781 { 1782 struct folio *folio, *t_folio; 1783 1784 if (!list_empty(non_hvo_folios)) { 1785 /* 1786 * Free any restored hugetlb pages so that restore of the 1787 * entire list can be retried. 1788 * The idea is that in the common case of ENOMEM errors freeing 1789 * hugetlb pages with vmemmap we will free up memory so that we 1790 * can allocate vmemmap for more hugetlb pages. 1791 */ 1792 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { 1793 list_del(&folio->lru); 1794 spin_lock_irq(&hugetlb_lock); 1795 __folio_clear_hugetlb(folio); 1796 spin_unlock_irq(&hugetlb_lock); 1797 update_and_free_hugetlb_folio(h, folio, false); 1798 cond_resched(); 1799 } 1800 } else { 1801 /* 1802 * In the case where there are no folios which can be 1803 * immediately freed, we loop through the list trying to restore 1804 * vmemmap individually in the hope that someone elsewhere may 1805 * have done something to cause success (such as freeing some 1806 * memory). If unable to restore a hugetlb page, the hugetlb 1807 * page is made a surplus page and removed from the list. 1808 * If are able to restore vmemmap and free one hugetlb page, we 1809 * quit processing the list to retry the bulk operation. 1810 */ 1811 list_for_each_entry_safe(folio, t_folio, folio_list, lru) 1812 if (hugetlb_vmemmap_restore_folio(h, folio)) { 1813 list_del(&folio->lru); 1814 spin_lock_irq(&hugetlb_lock); 1815 add_hugetlb_folio(h, folio, true); 1816 spin_unlock_irq(&hugetlb_lock); 1817 } else { 1818 list_del(&folio->lru); 1819 spin_lock_irq(&hugetlb_lock); 1820 __folio_clear_hugetlb(folio); 1821 spin_unlock_irq(&hugetlb_lock); 1822 update_and_free_hugetlb_folio(h, folio, false); 1823 cond_resched(); 1824 break; 1825 } 1826 } 1827 } 1828 1829 static void update_and_free_pages_bulk(struct hstate *h, 1830 struct list_head *folio_list) 1831 { 1832 long ret; 1833 struct folio *folio, *t_folio; 1834 LIST_HEAD(non_hvo_folios); 1835 1836 /* 1837 * First allocate required vmemmmap (if necessary) for all folios. 1838 * Carefully handle errors and free up any available hugetlb pages 1839 * in an effort to make forward progress. 1840 */ 1841 retry: 1842 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); 1843 if (ret < 0) { 1844 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); 1845 goto retry; 1846 } 1847 1848 /* 1849 * At this point, list should be empty, ret should be >= 0 and there 1850 * should only be pages on the non_hvo_folios list. 1851 * Do note that the non_hvo_folios list could be empty. 1852 * Without HVO enabled, ret will be 0 and there is no need to call 1853 * __folio_clear_hugetlb as this was done previously. 1854 */ 1855 VM_WARN_ON(!list_empty(folio_list)); 1856 VM_WARN_ON(ret < 0); 1857 if (!list_empty(&non_hvo_folios) && ret) { 1858 spin_lock_irq(&hugetlb_lock); 1859 list_for_each_entry(folio, &non_hvo_folios, lru) 1860 __folio_clear_hugetlb(folio); 1861 spin_unlock_irq(&hugetlb_lock); 1862 } 1863 1864 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { 1865 update_and_free_hugetlb_folio(h, folio, false); 1866 cond_resched(); 1867 } 1868 } 1869 1870 struct hstate *size_to_hstate(unsigned long size) 1871 { 1872 struct hstate *h; 1873 1874 for_each_hstate(h) { 1875 if (huge_page_size(h) == size) 1876 return h; 1877 } 1878 return NULL; 1879 } 1880 1881 void free_huge_folio(struct folio *folio) 1882 { 1883 /* 1884 * Can't pass hstate in here because it is called from the 1885 * generic mm code. 1886 */ 1887 struct hstate *h = folio_hstate(folio); 1888 int nid = folio_nid(folio); 1889 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1890 bool restore_reserve; 1891 unsigned long flags; 1892 1893 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1894 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1895 1896 hugetlb_set_folio_subpool(folio, NULL); 1897 if (folio_test_anon(folio)) 1898 __ClearPageAnonExclusive(&folio->page); 1899 folio->mapping = NULL; 1900 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1901 folio_clear_hugetlb_restore_reserve(folio); 1902 1903 /* 1904 * If HPageRestoreReserve was set on page, page allocation consumed a 1905 * reservation. If the page was associated with a subpool, there 1906 * would have been a page reserved in the subpool before allocation 1907 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1908 * reservation, do not call hugepage_subpool_put_pages() as this will 1909 * remove the reserved page from the subpool. 1910 */ 1911 if (!restore_reserve) { 1912 /* 1913 * A return code of zero implies that the subpool will be 1914 * under its minimum size if the reservation is not restored 1915 * after page is free. Therefore, force restore_reserve 1916 * operation. 1917 */ 1918 if (hugepage_subpool_put_pages(spool, 1) == 0) 1919 restore_reserve = true; 1920 } 1921 1922 spin_lock_irqsave(&hugetlb_lock, flags); 1923 folio_clear_hugetlb_migratable(folio); 1924 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1925 pages_per_huge_page(h), folio); 1926 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1927 pages_per_huge_page(h), folio); 1928 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); 1929 mem_cgroup_uncharge(folio); 1930 if (restore_reserve) 1931 h->resv_huge_pages++; 1932 1933 if (folio_test_hugetlb_temporary(folio)) { 1934 remove_hugetlb_folio(h, folio, false); 1935 spin_unlock_irqrestore(&hugetlb_lock, flags); 1936 update_and_free_hugetlb_folio(h, folio, true); 1937 } else if (h->surplus_huge_pages_node[nid]) { 1938 /* remove the page from active list */ 1939 remove_hugetlb_folio(h, folio, true); 1940 spin_unlock_irqrestore(&hugetlb_lock, flags); 1941 update_and_free_hugetlb_folio(h, folio, true); 1942 } else { 1943 arch_clear_hugetlb_flags(folio); 1944 enqueue_hugetlb_folio(h, folio); 1945 spin_unlock_irqrestore(&hugetlb_lock, flags); 1946 } 1947 } 1948 1949 /* 1950 * Must be called with the hugetlb lock held 1951 */ 1952 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1953 { 1954 lockdep_assert_held(&hugetlb_lock); 1955 h->nr_huge_pages++; 1956 h->nr_huge_pages_node[nid]++; 1957 } 1958 1959 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1960 { 1961 __folio_set_hugetlb(folio); 1962 INIT_LIST_HEAD(&folio->lru); 1963 hugetlb_set_folio_subpool(folio, NULL); 1964 set_hugetlb_cgroup(folio, NULL); 1965 set_hugetlb_cgroup_rsvd(folio, NULL); 1966 } 1967 1968 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1969 { 1970 init_new_hugetlb_folio(h, folio); 1971 hugetlb_vmemmap_optimize_folio(h, folio); 1972 } 1973 1974 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1975 { 1976 __prep_new_hugetlb_folio(h, folio); 1977 spin_lock_irq(&hugetlb_lock); 1978 __prep_account_new_huge_page(h, nid); 1979 spin_unlock_irq(&hugetlb_lock); 1980 } 1981 1982 /* 1983 * Find and lock address space (mapping) in write mode. 1984 * 1985 * Upon entry, the folio is locked which means that folio_mapping() is 1986 * stable. Due to locking order, we can only trylock_write. If we can 1987 * not get the lock, simply return NULL to caller. 1988 */ 1989 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) 1990 { 1991 struct address_space *mapping = folio_mapping(folio); 1992 1993 if (!mapping) 1994 return mapping; 1995 1996 if (i_mmap_trylock_write(mapping)) 1997 return mapping; 1998 1999 return NULL; 2000 } 2001 2002 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2003 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2004 nodemask_t *node_alloc_noretry) 2005 { 2006 int order = huge_page_order(h); 2007 struct folio *folio; 2008 bool alloc_try_hard = true; 2009 bool retry = true; 2010 2011 /* 2012 * By default we always try hard to allocate the folio with 2013 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in 2014 * a loop (to adjust global huge page counts) and previous allocation 2015 * failed, do not continue to try hard on the same node. Use the 2016 * node_alloc_noretry bitmap to manage this state information. 2017 */ 2018 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2019 alloc_try_hard = false; 2020 if (alloc_try_hard) 2021 gfp_mask |= __GFP_RETRY_MAYFAIL; 2022 if (nid == NUMA_NO_NODE) 2023 nid = numa_mem_id(); 2024 retry: 2025 folio = __folio_alloc(gfp_mask, order, nid, nmask); 2026 /* Ensure hugetlb folio won't have large_rmappable flag set. */ 2027 if (folio) 2028 folio_clear_large_rmappable(folio); 2029 2030 if (folio && !folio_ref_freeze(folio, 1)) { 2031 folio_put(folio); 2032 if (retry) { /* retry once */ 2033 retry = false; 2034 goto retry; 2035 } 2036 /* WOW! twice in a row. */ 2037 pr_warn("HugeTLB unexpected inflated folio ref count\n"); 2038 folio = NULL; 2039 } 2040 2041 /* 2042 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a 2043 * folio this indicates an overall state change. Clear bit so 2044 * that we resume normal 'try hard' allocations. 2045 */ 2046 if (node_alloc_noretry && folio && !alloc_try_hard) 2047 node_clear(nid, *node_alloc_noretry); 2048 2049 /* 2050 * If we tried hard to get a folio but failed, set bit so that 2051 * subsequent attempts will not try as hard until there is an 2052 * overall state change. 2053 */ 2054 if (node_alloc_noretry && !folio && alloc_try_hard) 2055 node_set(nid, *node_alloc_noretry); 2056 2057 if (!folio) { 2058 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2059 return NULL; 2060 } 2061 2062 __count_vm_event(HTLB_BUDDY_PGALLOC); 2063 return folio; 2064 } 2065 2066 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, 2067 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2068 nodemask_t *node_alloc_noretry) 2069 { 2070 struct folio *folio; 2071 2072 if (hstate_is_gigantic(h)) 2073 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2074 else 2075 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 2076 if (folio) 2077 init_new_hugetlb_folio(h, folio); 2078 return folio; 2079 } 2080 2081 /* 2082 * Common helper to allocate a fresh hugetlb page. All specific allocators 2083 * should use this function to get new hugetlb pages 2084 * 2085 * Note that returned page is 'frozen': ref count of head page and all tail 2086 * pages is zero. 2087 */ 2088 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2089 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2090 { 2091 struct folio *folio; 2092 2093 if (hstate_is_gigantic(h)) 2094 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2095 else 2096 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2097 if (!folio) 2098 return NULL; 2099 2100 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2101 return folio; 2102 } 2103 2104 static void prep_and_add_allocated_folios(struct hstate *h, 2105 struct list_head *folio_list) 2106 { 2107 unsigned long flags; 2108 struct folio *folio, *tmp_f; 2109 2110 /* Send list for bulk vmemmap optimization processing */ 2111 hugetlb_vmemmap_optimize_folios(h, folio_list); 2112 2113 /* Add all new pool pages to free lists in one lock cycle */ 2114 spin_lock_irqsave(&hugetlb_lock, flags); 2115 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 2116 __prep_account_new_huge_page(h, folio_nid(folio)); 2117 enqueue_hugetlb_folio(h, folio); 2118 } 2119 spin_unlock_irqrestore(&hugetlb_lock, flags); 2120 } 2121 2122 /* 2123 * Allocates a fresh hugetlb page in a node interleaved manner. The page 2124 * will later be added to the appropriate hugetlb pool. 2125 */ 2126 static struct folio *alloc_pool_huge_folio(struct hstate *h, 2127 nodemask_t *nodes_allowed, 2128 nodemask_t *node_alloc_noretry, 2129 int *next_node) 2130 { 2131 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2132 int nr_nodes, node; 2133 2134 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { 2135 struct folio *folio; 2136 2137 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2138 nodes_allowed, node_alloc_noretry); 2139 if (folio) 2140 return folio; 2141 } 2142 2143 return NULL; 2144 } 2145 2146 /* 2147 * Remove huge page from pool from next node to free. Attempt to keep 2148 * persistent huge pages more or less balanced over allowed nodes. 2149 * This routine only 'removes' the hugetlb page. The caller must make 2150 * an additional call to free the page to low level allocators. 2151 * Called with hugetlb_lock locked. 2152 */ 2153 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, 2154 nodemask_t *nodes_allowed, bool acct_surplus) 2155 { 2156 int nr_nodes, node; 2157 struct folio *folio = NULL; 2158 2159 lockdep_assert_held(&hugetlb_lock); 2160 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2161 /* 2162 * If we're returning unused surplus pages, only examine 2163 * nodes with surplus pages. 2164 */ 2165 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2166 !list_empty(&h->hugepage_freelists[node])) { 2167 folio = list_entry(h->hugepage_freelists[node].next, 2168 struct folio, lru); 2169 remove_hugetlb_folio(h, folio, acct_surplus); 2170 break; 2171 } 2172 } 2173 2174 return folio; 2175 } 2176 2177 /* 2178 * Dissolve a given free hugetlb folio into free buddy pages. This function 2179 * does nothing for in-use hugetlb folios and non-hugetlb folios. 2180 * This function returns values like below: 2181 * 2182 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2183 * when the system is under memory pressure and the feature of 2184 * freeing unused vmemmap pages associated with each hugetlb page 2185 * is enabled. 2186 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2187 * (allocated or reserved.) 2188 * 0: successfully dissolved free hugepages or the page is not a 2189 * hugepage (considered as already dissolved) 2190 */ 2191 int dissolve_free_hugetlb_folio(struct folio *folio) 2192 { 2193 int rc = -EBUSY; 2194 2195 retry: 2196 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2197 if (!folio_test_hugetlb(folio)) 2198 return 0; 2199 2200 spin_lock_irq(&hugetlb_lock); 2201 if (!folio_test_hugetlb(folio)) { 2202 rc = 0; 2203 goto out; 2204 } 2205 2206 if (!folio_ref_count(folio)) { 2207 struct hstate *h = folio_hstate(folio); 2208 if (!available_huge_pages(h)) 2209 goto out; 2210 2211 /* 2212 * We should make sure that the page is already on the free list 2213 * when it is dissolved. 2214 */ 2215 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2216 spin_unlock_irq(&hugetlb_lock); 2217 cond_resched(); 2218 2219 /* 2220 * Theoretically, we should return -EBUSY when we 2221 * encounter this race. In fact, we have a chance 2222 * to successfully dissolve the page if we do a 2223 * retry. Because the race window is quite small. 2224 * If we seize this opportunity, it is an optimization 2225 * for increasing the success rate of dissolving page. 2226 */ 2227 goto retry; 2228 } 2229 2230 remove_hugetlb_folio(h, folio, false); 2231 h->max_huge_pages--; 2232 spin_unlock_irq(&hugetlb_lock); 2233 2234 /* 2235 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2236 * before freeing the page. update_and_free_hugtlb_folio will fail to 2237 * free the page if it can not allocate required vmemmap. We 2238 * need to adjust max_huge_pages if the page is not freed. 2239 * Attempt to allocate vmemmmap here so that we can take 2240 * appropriate action on failure. 2241 * 2242 * The folio_test_hugetlb check here is because 2243 * remove_hugetlb_folio will clear hugetlb folio flag for 2244 * non-vmemmap optimized hugetlb folios. 2245 */ 2246 if (folio_test_hugetlb(folio)) { 2247 rc = hugetlb_vmemmap_restore_folio(h, folio); 2248 if (rc) { 2249 spin_lock_irq(&hugetlb_lock); 2250 add_hugetlb_folio(h, folio, false); 2251 h->max_huge_pages++; 2252 goto out; 2253 } 2254 } else 2255 rc = 0; 2256 2257 update_and_free_hugetlb_folio(h, folio, false); 2258 return rc; 2259 } 2260 out: 2261 spin_unlock_irq(&hugetlb_lock); 2262 return rc; 2263 } 2264 2265 /* 2266 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2267 * make specified memory blocks removable from the system. 2268 * Note that this will dissolve a free gigantic hugepage completely, if any 2269 * part of it lies within the given range. 2270 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all 2271 * free hugetlb folios that were dissolved before that error are lost. 2272 */ 2273 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) 2274 { 2275 unsigned long pfn; 2276 struct folio *folio; 2277 int rc = 0; 2278 unsigned int order; 2279 struct hstate *h; 2280 2281 if (!hugepages_supported()) 2282 return rc; 2283 2284 order = huge_page_order(&default_hstate); 2285 for_each_hstate(h) 2286 order = min(order, huge_page_order(h)); 2287 2288 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2289 folio = pfn_folio(pfn); 2290 rc = dissolve_free_hugetlb_folio(folio); 2291 if (rc) 2292 break; 2293 } 2294 2295 return rc; 2296 } 2297 2298 /* 2299 * Allocates a fresh surplus page from the page allocator. 2300 */ 2301 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2302 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2303 { 2304 struct folio *folio = NULL; 2305 2306 if (hstate_is_gigantic(h)) 2307 return NULL; 2308 2309 spin_lock_irq(&hugetlb_lock); 2310 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2311 goto out_unlock; 2312 spin_unlock_irq(&hugetlb_lock); 2313 2314 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2315 if (!folio) 2316 return NULL; 2317 2318 spin_lock_irq(&hugetlb_lock); 2319 /* 2320 * We could have raced with the pool size change. 2321 * Double check that and simply deallocate the new page 2322 * if we would end up overcommiting the surpluses. Abuse 2323 * temporary page to workaround the nasty free_huge_folio 2324 * codeflow 2325 */ 2326 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2327 folio_set_hugetlb_temporary(folio); 2328 spin_unlock_irq(&hugetlb_lock); 2329 free_huge_folio(folio); 2330 return NULL; 2331 } 2332 2333 h->surplus_huge_pages++; 2334 h->surplus_huge_pages_node[folio_nid(folio)]++; 2335 2336 out_unlock: 2337 spin_unlock_irq(&hugetlb_lock); 2338 2339 return folio; 2340 } 2341 2342 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2343 int nid, nodemask_t *nmask) 2344 { 2345 struct folio *folio; 2346 2347 if (hstate_is_gigantic(h)) 2348 return NULL; 2349 2350 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2351 if (!folio) 2352 return NULL; 2353 2354 /* fresh huge pages are frozen */ 2355 folio_ref_unfreeze(folio, 1); 2356 /* 2357 * We do not account these pages as surplus because they are only 2358 * temporary and will be released properly on the last reference 2359 */ 2360 folio_set_hugetlb_temporary(folio); 2361 2362 return folio; 2363 } 2364 2365 /* 2366 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2367 */ 2368 static 2369 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2370 struct vm_area_struct *vma, unsigned long addr) 2371 { 2372 struct folio *folio = NULL; 2373 struct mempolicy *mpol; 2374 gfp_t gfp_mask = htlb_alloc_mask(h); 2375 int nid; 2376 nodemask_t *nodemask; 2377 2378 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2379 if (mpol_is_preferred_many(mpol)) { 2380 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2381 2382 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2383 2384 /* Fallback to all nodes if page==NULL */ 2385 nodemask = NULL; 2386 } 2387 2388 if (!folio) 2389 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2390 mpol_cond_put(mpol); 2391 return folio; 2392 } 2393 2394 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 2395 nodemask_t *nmask, gfp_t gfp_mask) 2396 { 2397 struct folio *folio; 2398 2399 spin_lock_irq(&hugetlb_lock); 2400 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, 2401 nmask); 2402 if (folio) { 2403 VM_BUG_ON(!h->resv_huge_pages); 2404 h->resv_huge_pages--; 2405 } 2406 2407 spin_unlock_irq(&hugetlb_lock); 2408 return folio; 2409 } 2410 2411 /* folio migration callback function */ 2412 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2413 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) 2414 { 2415 spin_lock_irq(&hugetlb_lock); 2416 if (available_huge_pages(h)) { 2417 struct folio *folio; 2418 2419 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2420 preferred_nid, nmask); 2421 if (folio) { 2422 spin_unlock_irq(&hugetlb_lock); 2423 return folio; 2424 } 2425 } 2426 spin_unlock_irq(&hugetlb_lock); 2427 2428 /* We cannot fallback to other nodes, as we could break the per-node pool. */ 2429 if (!allow_alloc_fallback) 2430 gfp_mask |= __GFP_THISNODE; 2431 2432 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2433 } 2434 2435 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 2436 { 2437 #ifdef CONFIG_NUMA 2438 struct mempolicy *mpol = get_task_policy(current); 2439 2440 /* 2441 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 2442 * (from policy_nodemask) specifically for hugetlb case 2443 */ 2444 if (mpol->mode == MPOL_BIND && 2445 (apply_policy_zone(mpol, gfp_zone(gfp)) && 2446 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 2447 return &mpol->nodes; 2448 #endif 2449 return NULL; 2450 } 2451 2452 /* 2453 * Increase the hugetlb pool such that it can accommodate a reservation 2454 * of size 'delta'. 2455 */ 2456 static int gather_surplus_pages(struct hstate *h, long delta) 2457 __must_hold(&hugetlb_lock) 2458 { 2459 LIST_HEAD(surplus_list); 2460 struct folio *folio, *tmp; 2461 int ret; 2462 long i; 2463 long needed, allocated; 2464 bool alloc_ok = true; 2465 int node; 2466 nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); 2467 2468 lockdep_assert_held(&hugetlb_lock); 2469 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2470 if (needed <= 0) { 2471 h->resv_huge_pages += delta; 2472 return 0; 2473 } 2474 2475 allocated = 0; 2476 2477 ret = -ENOMEM; 2478 retry: 2479 spin_unlock_irq(&hugetlb_lock); 2480 for (i = 0; i < needed; i++) { 2481 folio = NULL; 2482 for_each_node_mask(node, cpuset_current_mems_allowed) { 2483 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { 2484 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2485 node, NULL); 2486 if (folio) 2487 break; 2488 } 2489 } 2490 if (!folio) { 2491 alloc_ok = false; 2492 break; 2493 } 2494 list_add(&folio->lru, &surplus_list); 2495 cond_resched(); 2496 } 2497 allocated += i; 2498 2499 /* 2500 * After retaking hugetlb_lock, we need to recalculate 'needed' 2501 * because either resv_huge_pages or free_huge_pages may have changed. 2502 */ 2503 spin_lock_irq(&hugetlb_lock); 2504 needed = (h->resv_huge_pages + delta) - 2505 (h->free_huge_pages + allocated); 2506 if (needed > 0) { 2507 if (alloc_ok) 2508 goto retry; 2509 /* 2510 * We were not able to allocate enough pages to 2511 * satisfy the entire reservation so we free what 2512 * we've allocated so far. 2513 */ 2514 goto free; 2515 } 2516 /* 2517 * The surplus_list now contains _at_least_ the number of extra pages 2518 * needed to accommodate the reservation. Add the appropriate number 2519 * of pages to the hugetlb pool and free the extras back to the buddy 2520 * allocator. Commit the entire reservation here to prevent another 2521 * process from stealing the pages as they are added to the pool but 2522 * before they are reserved. 2523 */ 2524 needed += allocated; 2525 h->resv_huge_pages += delta; 2526 ret = 0; 2527 2528 /* Free the needed pages to the hugetlb pool */ 2529 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2530 if ((--needed) < 0) 2531 break; 2532 /* Add the page to the hugetlb allocator */ 2533 enqueue_hugetlb_folio(h, folio); 2534 } 2535 free: 2536 spin_unlock_irq(&hugetlb_lock); 2537 2538 /* 2539 * Free unnecessary surplus pages to the buddy allocator. 2540 * Pages have no ref count, call free_huge_folio directly. 2541 */ 2542 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2543 free_huge_folio(folio); 2544 spin_lock_irq(&hugetlb_lock); 2545 2546 return ret; 2547 } 2548 2549 /* 2550 * This routine has two main purposes: 2551 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2552 * in unused_resv_pages. This corresponds to the prior adjustments made 2553 * to the associated reservation map. 2554 * 2) Free any unused surplus pages that may have been allocated to satisfy 2555 * the reservation. As many as unused_resv_pages may be freed. 2556 */ 2557 static void return_unused_surplus_pages(struct hstate *h, 2558 unsigned long unused_resv_pages) 2559 { 2560 unsigned long nr_pages; 2561 LIST_HEAD(page_list); 2562 2563 lockdep_assert_held(&hugetlb_lock); 2564 /* Uncommit the reservation */ 2565 h->resv_huge_pages -= unused_resv_pages; 2566 2567 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2568 goto out; 2569 2570 /* 2571 * Part (or even all) of the reservation could have been backed 2572 * by pre-allocated pages. Only free surplus pages. 2573 */ 2574 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2575 2576 /* 2577 * We want to release as many surplus pages as possible, spread 2578 * evenly across all nodes with memory. Iterate across these nodes 2579 * until we can no longer free unreserved surplus pages. This occurs 2580 * when the nodes with surplus pages have no free pages. 2581 * remove_pool_hugetlb_folio() will balance the freed pages across the 2582 * on-line nodes with memory and will handle the hstate accounting. 2583 */ 2584 while (nr_pages--) { 2585 struct folio *folio; 2586 2587 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 2588 if (!folio) 2589 goto out; 2590 2591 list_add(&folio->lru, &page_list); 2592 } 2593 2594 out: 2595 spin_unlock_irq(&hugetlb_lock); 2596 update_and_free_pages_bulk(h, &page_list); 2597 spin_lock_irq(&hugetlb_lock); 2598 } 2599 2600 2601 /* 2602 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2603 * are used by the huge page allocation routines to manage reservations. 2604 * 2605 * vma_needs_reservation is called to determine if the huge page at addr 2606 * within the vma has an associated reservation. If a reservation is 2607 * needed, the value 1 is returned. The caller is then responsible for 2608 * managing the global reservation and subpool usage counts. After 2609 * the huge page has been allocated, vma_commit_reservation is called 2610 * to add the page to the reservation map. If the page allocation fails, 2611 * the reservation must be ended instead of committed. vma_end_reservation 2612 * is called in such cases. 2613 * 2614 * In the normal case, vma_commit_reservation returns the same value 2615 * as the preceding vma_needs_reservation call. The only time this 2616 * is not the case is if a reserve map was changed between calls. It 2617 * is the responsibility of the caller to notice the difference and 2618 * take appropriate action. 2619 * 2620 * vma_add_reservation is used in error paths where a reservation must 2621 * be restored when a newly allocated huge page must be freed. It is 2622 * to be called after calling vma_needs_reservation to determine if a 2623 * reservation exists. 2624 * 2625 * vma_del_reservation is used in error paths where an entry in the reserve 2626 * map was created during huge page allocation and must be removed. It is to 2627 * be called after calling vma_needs_reservation to determine if a reservation 2628 * exists. 2629 */ 2630 enum vma_resv_mode { 2631 VMA_NEEDS_RESV, 2632 VMA_COMMIT_RESV, 2633 VMA_END_RESV, 2634 VMA_ADD_RESV, 2635 VMA_DEL_RESV, 2636 }; 2637 static long __vma_reservation_common(struct hstate *h, 2638 struct vm_area_struct *vma, unsigned long addr, 2639 enum vma_resv_mode mode) 2640 { 2641 struct resv_map *resv; 2642 pgoff_t idx; 2643 long ret; 2644 long dummy_out_regions_needed; 2645 2646 resv = vma_resv_map(vma); 2647 if (!resv) 2648 return 1; 2649 2650 idx = vma_hugecache_offset(h, vma, addr); 2651 switch (mode) { 2652 case VMA_NEEDS_RESV: 2653 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2654 /* We assume that vma_reservation_* routines always operate on 2655 * 1 page, and that adding to resv map a 1 page entry can only 2656 * ever require 1 region. 2657 */ 2658 VM_BUG_ON(dummy_out_regions_needed != 1); 2659 break; 2660 case VMA_COMMIT_RESV: 2661 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2662 /* region_add calls of range 1 should never fail. */ 2663 VM_BUG_ON(ret < 0); 2664 break; 2665 case VMA_END_RESV: 2666 region_abort(resv, idx, idx + 1, 1); 2667 ret = 0; 2668 break; 2669 case VMA_ADD_RESV: 2670 if (vma->vm_flags & VM_MAYSHARE) { 2671 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2672 /* region_add calls of range 1 should never fail. */ 2673 VM_BUG_ON(ret < 0); 2674 } else { 2675 region_abort(resv, idx, idx + 1, 1); 2676 ret = region_del(resv, idx, idx + 1); 2677 } 2678 break; 2679 case VMA_DEL_RESV: 2680 if (vma->vm_flags & VM_MAYSHARE) { 2681 region_abort(resv, idx, idx + 1, 1); 2682 ret = region_del(resv, idx, idx + 1); 2683 } else { 2684 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2685 /* region_add calls of range 1 should never fail. */ 2686 VM_BUG_ON(ret < 0); 2687 } 2688 break; 2689 default: 2690 BUG(); 2691 } 2692 2693 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2694 return ret; 2695 /* 2696 * We know private mapping must have HPAGE_RESV_OWNER set. 2697 * 2698 * In most cases, reserves always exist for private mappings. 2699 * However, a file associated with mapping could have been 2700 * hole punched or truncated after reserves were consumed. 2701 * As subsequent fault on such a range will not use reserves. 2702 * Subtle - The reserve map for private mappings has the 2703 * opposite meaning than that of shared mappings. If NO 2704 * entry is in the reserve map, it means a reservation exists. 2705 * If an entry exists in the reserve map, it means the 2706 * reservation has already been consumed. As a result, the 2707 * return value of this routine is the opposite of the 2708 * value returned from reserve map manipulation routines above. 2709 */ 2710 if (ret > 0) 2711 return 0; 2712 if (ret == 0) 2713 return 1; 2714 return ret; 2715 } 2716 2717 static long vma_needs_reservation(struct hstate *h, 2718 struct vm_area_struct *vma, unsigned long addr) 2719 { 2720 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2721 } 2722 2723 static long vma_commit_reservation(struct hstate *h, 2724 struct vm_area_struct *vma, unsigned long addr) 2725 { 2726 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2727 } 2728 2729 static void vma_end_reservation(struct hstate *h, 2730 struct vm_area_struct *vma, unsigned long addr) 2731 { 2732 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2733 } 2734 2735 static long vma_add_reservation(struct hstate *h, 2736 struct vm_area_struct *vma, unsigned long addr) 2737 { 2738 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2739 } 2740 2741 static long vma_del_reservation(struct hstate *h, 2742 struct vm_area_struct *vma, unsigned long addr) 2743 { 2744 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2745 } 2746 2747 /* 2748 * This routine is called to restore reservation information on error paths. 2749 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2750 * and the hugetlb mutex should remain held when calling this routine. 2751 * 2752 * It handles two specific cases: 2753 * 1) A reservation was in place and the folio consumed the reservation. 2754 * hugetlb_restore_reserve is set in the folio. 2755 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2756 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2757 * 2758 * In case 1, free_huge_folio later in the error path will increment the 2759 * global reserve count. But, free_huge_folio does not have enough context 2760 * to adjust the reservation map. This case deals primarily with private 2761 * mappings. Adjust the reserve map here to be consistent with global 2762 * reserve count adjustments to be made by free_huge_folio. Make sure the 2763 * reserve map indicates there is a reservation present. 2764 * 2765 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2766 */ 2767 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2768 unsigned long address, struct folio *folio) 2769 { 2770 long rc = vma_needs_reservation(h, vma, address); 2771 2772 if (folio_test_hugetlb_restore_reserve(folio)) { 2773 if (unlikely(rc < 0)) 2774 /* 2775 * Rare out of memory condition in reserve map 2776 * manipulation. Clear hugetlb_restore_reserve so 2777 * that global reserve count will not be incremented 2778 * by free_huge_folio. This will make it appear 2779 * as though the reservation for this folio was 2780 * consumed. This may prevent the task from 2781 * faulting in the folio at a later time. This 2782 * is better than inconsistent global huge page 2783 * accounting of reserve counts. 2784 */ 2785 folio_clear_hugetlb_restore_reserve(folio); 2786 else if (rc) 2787 (void)vma_add_reservation(h, vma, address); 2788 else 2789 vma_end_reservation(h, vma, address); 2790 } else { 2791 if (!rc) { 2792 /* 2793 * This indicates there is an entry in the reserve map 2794 * not added by alloc_hugetlb_folio. We know it was added 2795 * before the alloc_hugetlb_folio call, otherwise 2796 * hugetlb_restore_reserve would be set on the folio. 2797 * Remove the entry so that a subsequent allocation 2798 * does not consume a reservation. 2799 */ 2800 rc = vma_del_reservation(h, vma, address); 2801 if (rc < 0) 2802 /* 2803 * VERY rare out of memory condition. Since 2804 * we can not delete the entry, set 2805 * hugetlb_restore_reserve so that the reserve 2806 * count will be incremented when the folio 2807 * is freed. This reserve will be consumed 2808 * on a subsequent allocation. 2809 */ 2810 folio_set_hugetlb_restore_reserve(folio); 2811 } else if (rc < 0) { 2812 /* 2813 * Rare out of memory condition from 2814 * vma_needs_reservation call. Memory allocation is 2815 * only attempted if a new entry is needed. Therefore, 2816 * this implies there is not an entry in the 2817 * reserve map. 2818 * 2819 * For shared mappings, no entry in the map indicates 2820 * no reservation. We are done. 2821 */ 2822 if (!(vma->vm_flags & VM_MAYSHARE)) 2823 /* 2824 * For private mappings, no entry indicates 2825 * a reservation is present. Since we can 2826 * not add an entry, set hugetlb_restore_reserve 2827 * on the folio so reserve count will be 2828 * incremented when freed. This reserve will 2829 * be consumed on a subsequent allocation. 2830 */ 2831 folio_set_hugetlb_restore_reserve(folio); 2832 } else 2833 /* 2834 * No reservation present, do nothing 2835 */ 2836 vma_end_reservation(h, vma, address); 2837 } 2838 } 2839 2840 /* 2841 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2842 * the old one 2843 * @h: struct hstate old page belongs to 2844 * @old_folio: Old folio to dissolve 2845 * @list: List to isolate the page in case we need to 2846 * Returns 0 on success, otherwise negated error. 2847 */ 2848 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2849 struct folio *old_folio, struct list_head *list) 2850 { 2851 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2852 int nid = folio_nid(old_folio); 2853 struct folio *new_folio = NULL; 2854 int ret = 0; 2855 2856 retry: 2857 spin_lock_irq(&hugetlb_lock); 2858 if (!folio_test_hugetlb(old_folio)) { 2859 /* 2860 * Freed from under us. Drop new_folio too. 2861 */ 2862 goto free_new; 2863 } else if (folio_ref_count(old_folio)) { 2864 bool isolated; 2865 2866 /* 2867 * Someone has grabbed the folio, try to isolate it here. 2868 * Fail with -EBUSY if not possible. 2869 */ 2870 spin_unlock_irq(&hugetlb_lock); 2871 isolated = isolate_hugetlb(old_folio, list); 2872 ret = isolated ? 0 : -EBUSY; 2873 spin_lock_irq(&hugetlb_lock); 2874 goto free_new; 2875 } else if (!folio_test_hugetlb_freed(old_folio)) { 2876 /* 2877 * Folio's refcount is 0 but it has not been enqueued in the 2878 * freelist yet. Race window is small, so we can succeed here if 2879 * we retry. 2880 */ 2881 spin_unlock_irq(&hugetlb_lock); 2882 cond_resched(); 2883 goto retry; 2884 } else { 2885 if (!new_folio) { 2886 spin_unlock_irq(&hugetlb_lock); 2887 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, 2888 NULL, NULL); 2889 if (!new_folio) 2890 return -ENOMEM; 2891 __prep_new_hugetlb_folio(h, new_folio); 2892 goto retry; 2893 } 2894 2895 /* 2896 * Ok, old_folio is still a genuine free hugepage. Remove it from 2897 * the freelist and decrease the counters. These will be 2898 * incremented again when calling __prep_account_new_huge_page() 2899 * and enqueue_hugetlb_folio() for new_folio. The counters will 2900 * remain stable since this happens under the lock. 2901 */ 2902 remove_hugetlb_folio(h, old_folio, false); 2903 2904 /* 2905 * Ref count on new_folio is already zero as it was dropped 2906 * earlier. It can be directly added to the pool free list. 2907 */ 2908 __prep_account_new_huge_page(h, nid); 2909 enqueue_hugetlb_folio(h, new_folio); 2910 2911 /* 2912 * Folio has been replaced, we can safely free the old one. 2913 */ 2914 spin_unlock_irq(&hugetlb_lock); 2915 update_and_free_hugetlb_folio(h, old_folio, false); 2916 } 2917 2918 return ret; 2919 2920 free_new: 2921 spin_unlock_irq(&hugetlb_lock); 2922 if (new_folio) 2923 update_and_free_hugetlb_folio(h, new_folio, false); 2924 2925 return ret; 2926 } 2927 2928 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2929 { 2930 struct hstate *h; 2931 struct folio *folio = page_folio(page); 2932 int ret = -EBUSY; 2933 2934 /* 2935 * The page might have been dissolved from under our feet, so make sure 2936 * to carefully check the state under the lock. 2937 * Return success when racing as if we dissolved the page ourselves. 2938 */ 2939 spin_lock_irq(&hugetlb_lock); 2940 if (folio_test_hugetlb(folio)) { 2941 h = folio_hstate(folio); 2942 } else { 2943 spin_unlock_irq(&hugetlb_lock); 2944 return 0; 2945 } 2946 spin_unlock_irq(&hugetlb_lock); 2947 2948 /* 2949 * Fence off gigantic pages as there is a cyclic dependency between 2950 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2951 * of bailing out right away without further retrying. 2952 */ 2953 if (hstate_is_gigantic(h)) 2954 return -ENOMEM; 2955 2956 if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) 2957 ret = 0; 2958 else if (!folio_ref_count(folio)) 2959 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 2960 2961 return ret; 2962 } 2963 2964 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 2965 unsigned long addr, int avoid_reserve) 2966 { 2967 struct hugepage_subpool *spool = subpool_vma(vma); 2968 struct hstate *h = hstate_vma(vma); 2969 struct folio *folio; 2970 long map_chg, map_commit, nr_pages = pages_per_huge_page(h); 2971 long gbl_chg; 2972 int memcg_charge_ret, ret, idx; 2973 struct hugetlb_cgroup *h_cg = NULL; 2974 struct mem_cgroup *memcg; 2975 bool deferred_reserve; 2976 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; 2977 2978 memcg = get_mem_cgroup_from_current(); 2979 memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages); 2980 if (memcg_charge_ret == -ENOMEM) { 2981 mem_cgroup_put(memcg); 2982 return ERR_PTR(-ENOMEM); 2983 } 2984 2985 idx = hstate_index(h); 2986 /* 2987 * Examine the region/reserve map to determine if the process 2988 * has a reservation for the page to be allocated. A return 2989 * code of zero indicates a reservation exists (no change). 2990 */ 2991 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2992 if (map_chg < 0) { 2993 if (!memcg_charge_ret) 2994 mem_cgroup_cancel_charge(memcg, nr_pages); 2995 mem_cgroup_put(memcg); 2996 return ERR_PTR(-ENOMEM); 2997 } 2998 2999 /* 3000 * Processes that did not create the mapping will have no 3001 * reserves as indicated by the region/reserve map. Check 3002 * that the allocation will not exceed the subpool limit. 3003 * Allocations for MAP_NORESERVE mappings also need to be 3004 * checked against any subpool limit. 3005 */ 3006 if (map_chg || avoid_reserve) { 3007 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3008 if (gbl_chg < 0) 3009 goto out_end_reservation; 3010 3011 /* 3012 * Even though there was no reservation in the region/reserve 3013 * map, there could be reservations associated with the 3014 * subpool that can be used. This would be indicated if the 3015 * return value of hugepage_subpool_get_pages() is zero. 3016 * However, if avoid_reserve is specified we still avoid even 3017 * the subpool reservations. 3018 */ 3019 if (avoid_reserve) 3020 gbl_chg = 1; 3021 } 3022 3023 /* If this allocation is not consuming a reservation, charge it now. 3024 */ 3025 deferred_reserve = map_chg || avoid_reserve; 3026 if (deferred_reserve) { 3027 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3028 idx, pages_per_huge_page(h), &h_cg); 3029 if (ret) 3030 goto out_subpool_put; 3031 } 3032 3033 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3034 if (ret) 3035 goto out_uncharge_cgroup_reservation; 3036 3037 spin_lock_irq(&hugetlb_lock); 3038 /* 3039 * glb_chg is passed to indicate whether or not a page must be taken 3040 * from the global free pool (global change). gbl_chg == 0 indicates 3041 * a reservation exists for the allocation. 3042 */ 3043 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3044 if (!folio) { 3045 spin_unlock_irq(&hugetlb_lock); 3046 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3047 if (!folio) 3048 goto out_uncharge_cgroup; 3049 spin_lock_irq(&hugetlb_lock); 3050 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3051 folio_set_hugetlb_restore_reserve(folio); 3052 h->resv_huge_pages--; 3053 } 3054 list_add(&folio->lru, &h->hugepage_activelist); 3055 folio_ref_unfreeze(folio, 1); 3056 /* Fall through */ 3057 } 3058 3059 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3060 /* If allocation is not consuming a reservation, also store the 3061 * hugetlb_cgroup pointer on the page. 3062 */ 3063 if (deferred_reserve) { 3064 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3065 h_cg, folio); 3066 } 3067 3068 spin_unlock_irq(&hugetlb_lock); 3069 3070 hugetlb_set_folio_subpool(folio, spool); 3071 3072 map_commit = vma_commit_reservation(h, vma, addr); 3073 if (unlikely(map_chg > map_commit)) { 3074 /* 3075 * The page was added to the reservation map between 3076 * vma_needs_reservation and vma_commit_reservation. 3077 * This indicates a race with hugetlb_reserve_pages. 3078 * Adjust for the subpool count incremented above AND 3079 * in hugetlb_reserve_pages for the same page. Also, 3080 * the reservation count added in hugetlb_reserve_pages 3081 * no longer applies. 3082 */ 3083 long rsv_adjust; 3084 3085 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3086 hugetlb_acct_memory(h, -rsv_adjust); 3087 if (deferred_reserve) { 3088 spin_lock_irq(&hugetlb_lock); 3089 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3090 pages_per_huge_page(h), folio); 3091 spin_unlock_irq(&hugetlb_lock); 3092 } 3093 } 3094 3095 if (!memcg_charge_ret) 3096 mem_cgroup_commit_charge(folio, memcg); 3097 lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); 3098 mem_cgroup_put(memcg); 3099 3100 return folio; 3101 3102 out_uncharge_cgroup: 3103 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3104 out_uncharge_cgroup_reservation: 3105 if (deferred_reserve) 3106 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3107 h_cg); 3108 out_subpool_put: 3109 if (map_chg || avoid_reserve) 3110 hugepage_subpool_put_pages(spool, 1); 3111 out_end_reservation: 3112 vma_end_reservation(h, vma, addr); 3113 if (!memcg_charge_ret) 3114 mem_cgroup_cancel_charge(memcg, nr_pages); 3115 mem_cgroup_put(memcg); 3116 return ERR_PTR(-ENOSPC); 3117 } 3118 3119 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3120 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3121 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3122 { 3123 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3124 int nr_nodes, node = nid; 3125 3126 /* do node specific alloc */ 3127 if (nid != NUMA_NO_NODE) { 3128 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3129 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3130 if (!m) 3131 return 0; 3132 goto found; 3133 } 3134 /* allocate from next node when distributing huge pages */ 3135 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { 3136 m = memblock_alloc_try_nid_raw( 3137 huge_page_size(h), huge_page_size(h), 3138 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3139 /* 3140 * Use the beginning of the huge page to store the 3141 * huge_bootmem_page struct (until gather_bootmem 3142 * puts them into the mem_map). 3143 */ 3144 if (!m) 3145 return 0; 3146 goto found; 3147 } 3148 3149 found: 3150 3151 /* 3152 * Only initialize the head struct page in memmap_init_reserved_pages, 3153 * rest of the struct pages will be initialized by the HugeTLB 3154 * subsystem itself. 3155 * The head struct page is used to get folio information by the HugeTLB 3156 * subsystem like zone id and node id. 3157 */ 3158 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3159 huge_page_size(h) - PAGE_SIZE); 3160 /* Put them into a private list first because mem_map is not up yet */ 3161 INIT_LIST_HEAD(&m->list); 3162 list_add(&m->list, &huge_boot_pages[node]); 3163 m->hstate = h; 3164 return 1; 3165 } 3166 3167 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ 3168 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, 3169 unsigned long start_page_number, 3170 unsigned long end_page_number) 3171 { 3172 enum zone_type zone = zone_idx(folio_zone(folio)); 3173 int nid = folio_nid(folio); 3174 unsigned long head_pfn = folio_pfn(folio); 3175 unsigned long pfn, end_pfn = head_pfn + end_page_number; 3176 int ret; 3177 3178 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { 3179 struct page *page = pfn_to_page(pfn); 3180 3181 __ClearPageReserved(folio_page(folio, pfn - head_pfn)); 3182 __init_single_page(page, pfn, zone, nid); 3183 prep_compound_tail((struct page *)folio, pfn - head_pfn); 3184 ret = page_ref_freeze(page, 1); 3185 VM_BUG_ON(!ret); 3186 } 3187 } 3188 3189 static void __init hugetlb_folio_init_vmemmap(struct folio *folio, 3190 struct hstate *h, 3191 unsigned long nr_pages) 3192 { 3193 int ret; 3194 3195 /* Prepare folio head */ 3196 __folio_clear_reserved(folio); 3197 __folio_set_head(folio); 3198 ret = folio_ref_freeze(folio, 1); 3199 VM_BUG_ON(!ret); 3200 /* Initialize the necessary tail struct pages */ 3201 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); 3202 prep_compound_head((struct page *)folio, huge_page_order(h)); 3203 } 3204 3205 static void __init prep_and_add_bootmem_folios(struct hstate *h, 3206 struct list_head *folio_list) 3207 { 3208 unsigned long flags; 3209 struct folio *folio, *tmp_f; 3210 3211 /* Send list for bulk vmemmap optimization processing */ 3212 hugetlb_vmemmap_optimize_folios(h, folio_list); 3213 3214 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 3215 if (!folio_test_hugetlb_vmemmap_optimized(folio)) { 3216 /* 3217 * If HVO fails, initialize all tail struct pages 3218 * We do not worry about potential long lock hold 3219 * time as this is early in boot and there should 3220 * be no contention. 3221 */ 3222 hugetlb_folio_init_tail_vmemmap(folio, 3223 HUGETLB_VMEMMAP_RESERVE_PAGES, 3224 pages_per_huge_page(h)); 3225 } 3226 /* Subdivide locks to achieve better parallel performance */ 3227 spin_lock_irqsave(&hugetlb_lock, flags); 3228 __prep_account_new_huge_page(h, folio_nid(folio)); 3229 enqueue_hugetlb_folio(h, folio); 3230 spin_unlock_irqrestore(&hugetlb_lock, flags); 3231 } 3232 } 3233 3234 /* 3235 * Put bootmem huge pages into the standard lists after mem_map is up. 3236 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. 3237 */ 3238 static void __init gather_bootmem_prealloc_node(unsigned long nid) 3239 { 3240 LIST_HEAD(folio_list); 3241 struct huge_bootmem_page *m; 3242 struct hstate *h = NULL, *prev_h = NULL; 3243 3244 list_for_each_entry(m, &huge_boot_pages[nid], list) { 3245 struct page *page = virt_to_page(m); 3246 struct folio *folio = (void *)page; 3247 3248 h = m->hstate; 3249 /* 3250 * It is possible to have multiple huge page sizes (hstates) 3251 * in this list. If so, process each size separately. 3252 */ 3253 if (h != prev_h && prev_h != NULL) 3254 prep_and_add_bootmem_folios(prev_h, &folio_list); 3255 prev_h = h; 3256 3257 VM_BUG_ON(!hstate_is_gigantic(h)); 3258 WARN_ON(folio_ref_count(folio) != 1); 3259 3260 hugetlb_folio_init_vmemmap(folio, h, 3261 HUGETLB_VMEMMAP_RESERVE_PAGES); 3262 init_new_hugetlb_folio(h, folio); 3263 list_add(&folio->lru, &folio_list); 3264 3265 /* 3266 * We need to restore the 'stolen' pages to totalram_pages 3267 * in order to fix confusing memory reports from free(1) and 3268 * other side-effects, like CommitLimit going negative. 3269 */ 3270 adjust_managed_page_count(page, pages_per_huge_page(h)); 3271 cond_resched(); 3272 } 3273 3274 prep_and_add_bootmem_folios(h, &folio_list); 3275 } 3276 3277 static void __init gather_bootmem_prealloc_parallel(unsigned long start, 3278 unsigned long end, void *arg) 3279 { 3280 int nid; 3281 3282 for (nid = start; nid < end; nid++) 3283 gather_bootmem_prealloc_node(nid); 3284 } 3285 3286 static void __init gather_bootmem_prealloc(void) 3287 { 3288 struct padata_mt_job job = { 3289 .thread_fn = gather_bootmem_prealloc_parallel, 3290 .fn_arg = NULL, 3291 .start = 0, 3292 .size = num_node_state(N_MEMORY), 3293 .align = 1, 3294 .min_chunk = 1, 3295 .max_threads = num_node_state(N_MEMORY), 3296 .numa_aware = true, 3297 }; 3298 3299 padata_do_multithreaded(&job); 3300 } 3301 3302 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3303 { 3304 unsigned long i; 3305 char buf[32]; 3306 LIST_HEAD(folio_list); 3307 3308 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3309 if (hstate_is_gigantic(h)) { 3310 if (!alloc_bootmem_huge_page(h, nid)) 3311 break; 3312 } else { 3313 struct folio *folio; 3314 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3315 3316 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3317 &node_states[N_MEMORY], NULL); 3318 if (!folio) 3319 break; 3320 list_add(&folio->lru, &folio_list); 3321 } 3322 cond_resched(); 3323 } 3324 3325 if (!list_empty(&folio_list)) 3326 prep_and_add_allocated_folios(h, &folio_list); 3327 3328 if (i == h->max_huge_pages_node[nid]) 3329 return; 3330 3331 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3332 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3333 h->max_huge_pages_node[nid], buf, nid, i); 3334 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3335 h->max_huge_pages_node[nid] = i; 3336 } 3337 3338 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) 3339 { 3340 int i; 3341 bool node_specific_alloc = false; 3342 3343 for_each_online_node(i) { 3344 if (h->max_huge_pages_node[i] > 0) { 3345 hugetlb_hstate_alloc_pages_onenode(h, i); 3346 node_specific_alloc = true; 3347 } 3348 } 3349 3350 return node_specific_alloc; 3351 } 3352 3353 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) 3354 { 3355 if (allocated < h->max_huge_pages) { 3356 char buf[32]; 3357 3358 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3359 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3360 h->max_huge_pages, buf, allocated); 3361 h->max_huge_pages = allocated; 3362 } 3363 } 3364 3365 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) 3366 { 3367 struct hstate *h = (struct hstate *)arg; 3368 int i, num = end - start; 3369 nodemask_t node_alloc_noretry; 3370 LIST_HEAD(folio_list); 3371 int next_node = first_online_node; 3372 3373 /* Bit mask controlling how hard we retry per-node allocations.*/ 3374 nodes_clear(node_alloc_noretry); 3375 3376 for (i = 0; i < num; ++i) { 3377 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], 3378 &node_alloc_noretry, &next_node); 3379 if (!folio) 3380 break; 3381 3382 list_move(&folio->lru, &folio_list); 3383 cond_resched(); 3384 } 3385 3386 prep_and_add_allocated_folios(h, &folio_list); 3387 } 3388 3389 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) 3390 { 3391 unsigned long i; 3392 3393 for (i = 0; i < h->max_huge_pages; ++i) { 3394 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3395 break; 3396 cond_resched(); 3397 } 3398 3399 return i; 3400 } 3401 3402 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) 3403 { 3404 struct padata_mt_job job = { 3405 .fn_arg = h, 3406 .align = 1, 3407 .numa_aware = true 3408 }; 3409 3410 job.thread_fn = hugetlb_pages_alloc_boot_node; 3411 job.start = 0; 3412 job.size = h->max_huge_pages; 3413 3414 /* 3415 * job.max_threads is twice the num_node_state(N_MEMORY), 3416 * 3417 * Tests below indicate that a multiplier of 2 significantly improves 3418 * performance, and although larger values also provide improvements, 3419 * the gains are marginal. 3420 * 3421 * Therefore, choosing 2 as the multiplier strikes a good balance between 3422 * enhancing parallel processing capabilities and maintaining efficient 3423 * resource management. 3424 * 3425 * +------------+-------+-------+-------+-------+-------+ 3426 * | multiplier | 1 | 2 | 3 | 4 | 5 | 3427 * +------------+-------+-------+-------+-------+-------+ 3428 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms | 3429 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms | 3430 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms | 3431 * +------------+-------+-------+-------+-------+-------+ 3432 */ 3433 job.max_threads = num_node_state(N_MEMORY) * 2; 3434 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; 3435 padata_do_multithreaded(&job); 3436 3437 return h->nr_huge_pages; 3438 } 3439 3440 /* 3441 * NOTE: this routine is called in different contexts for gigantic and 3442 * non-gigantic pages. 3443 * - For gigantic pages, this is called early in the boot process and 3444 * pages are allocated from memblock allocated or something similar. 3445 * Gigantic pages are actually added to pools later with the routine 3446 * gather_bootmem_prealloc. 3447 * - For non-gigantic pages, this is called later in the boot process after 3448 * all of mm is up and functional. Pages are allocated from buddy and 3449 * then added to hugetlb pools. 3450 */ 3451 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3452 { 3453 unsigned long allocated; 3454 static bool initialized __initdata; 3455 3456 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3457 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3458 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3459 return; 3460 } 3461 3462 /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */ 3463 if (!initialized) { 3464 int i = 0; 3465 3466 for (i = 0; i < MAX_NUMNODES; i++) 3467 INIT_LIST_HEAD(&huge_boot_pages[i]); 3468 initialized = true; 3469 } 3470 3471 /* do node specific alloc */ 3472 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) 3473 return; 3474 3475 /* below will do all node balanced alloc */ 3476 if (hstate_is_gigantic(h)) 3477 allocated = hugetlb_gigantic_pages_alloc_boot(h); 3478 else 3479 allocated = hugetlb_pages_alloc_boot(h); 3480 3481 hugetlb_hstate_alloc_pages_errcheck(allocated, h); 3482 } 3483 3484 static void __init hugetlb_init_hstates(void) 3485 { 3486 struct hstate *h, *h2; 3487 3488 for_each_hstate(h) { 3489 /* oversize hugepages were init'ed in early boot */ 3490 if (!hstate_is_gigantic(h)) 3491 hugetlb_hstate_alloc_pages(h); 3492 3493 /* 3494 * Set demote order for each hstate. Note that 3495 * h->demote_order is initially 0. 3496 * - We can not demote gigantic pages if runtime freeing 3497 * is not supported, so skip this. 3498 * - If CMA allocation is possible, we can not demote 3499 * HUGETLB_PAGE_ORDER or smaller size pages. 3500 */ 3501 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3502 continue; 3503 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3504 continue; 3505 for_each_hstate(h2) { 3506 if (h2 == h) 3507 continue; 3508 if (h2->order < h->order && 3509 h2->order > h->demote_order) 3510 h->demote_order = h2->order; 3511 } 3512 } 3513 } 3514 3515 static void __init report_hugepages(void) 3516 { 3517 struct hstate *h; 3518 3519 for_each_hstate(h) { 3520 char buf[32]; 3521 3522 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3523 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3524 buf, h->free_huge_pages); 3525 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3526 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3527 } 3528 } 3529 3530 #ifdef CONFIG_HIGHMEM 3531 static void try_to_free_low(struct hstate *h, unsigned long count, 3532 nodemask_t *nodes_allowed) 3533 { 3534 int i; 3535 LIST_HEAD(page_list); 3536 3537 lockdep_assert_held(&hugetlb_lock); 3538 if (hstate_is_gigantic(h)) 3539 return; 3540 3541 /* 3542 * Collect pages to be freed on a list, and free after dropping lock 3543 */ 3544 for_each_node_mask(i, *nodes_allowed) { 3545 struct folio *folio, *next; 3546 struct list_head *freel = &h->hugepage_freelists[i]; 3547 list_for_each_entry_safe(folio, next, freel, lru) { 3548 if (count >= h->nr_huge_pages) 3549 goto out; 3550 if (folio_test_highmem(folio)) 3551 continue; 3552 remove_hugetlb_folio(h, folio, false); 3553 list_add(&folio->lru, &page_list); 3554 } 3555 } 3556 3557 out: 3558 spin_unlock_irq(&hugetlb_lock); 3559 update_and_free_pages_bulk(h, &page_list); 3560 spin_lock_irq(&hugetlb_lock); 3561 } 3562 #else 3563 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3564 nodemask_t *nodes_allowed) 3565 { 3566 } 3567 #endif 3568 3569 /* 3570 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3571 * balanced by operating on them in a round-robin fashion. 3572 * Returns 1 if an adjustment was made. 3573 */ 3574 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3575 int delta) 3576 { 3577 int nr_nodes, node; 3578 3579 lockdep_assert_held(&hugetlb_lock); 3580 VM_BUG_ON(delta != -1 && delta != 1); 3581 3582 if (delta < 0) { 3583 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { 3584 if (h->surplus_huge_pages_node[node]) 3585 goto found; 3586 } 3587 } else { 3588 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3589 if (h->surplus_huge_pages_node[node] < 3590 h->nr_huge_pages_node[node]) 3591 goto found; 3592 } 3593 } 3594 return 0; 3595 3596 found: 3597 h->surplus_huge_pages += delta; 3598 h->surplus_huge_pages_node[node] += delta; 3599 return 1; 3600 } 3601 3602 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3603 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3604 nodemask_t *nodes_allowed) 3605 { 3606 unsigned long min_count; 3607 unsigned long allocated; 3608 struct folio *folio; 3609 LIST_HEAD(page_list); 3610 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3611 3612 /* 3613 * Bit mask controlling how hard we retry per-node allocations. 3614 * If we can not allocate the bit mask, do not attempt to allocate 3615 * the requested huge pages. 3616 */ 3617 if (node_alloc_noretry) 3618 nodes_clear(*node_alloc_noretry); 3619 else 3620 return -ENOMEM; 3621 3622 /* 3623 * resize_lock mutex prevents concurrent adjustments to number of 3624 * pages in hstate via the proc/sysfs interfaces. 3625 */ 3626 mutex_lock(&h->resize_lock); 3627 flush_free_hpage_work(h); 3628 spin_lock_irq(&hugetlb_lock); 3629 3630 /* 3631 * Check for a node specific request. 3632 * Changing node specific huge page count may require a corresponding 3633 * change to the global count. In any case, the passed node mask 3634 * (nodes_allowed) will restrict alloc/free to the specified node. 3635 */ 3636 if (nid != NUMA_NO_NODE) { 3637 unsigned long old_count = count; 3638 3639 count += persistent_huge_pages(h) - 3640 (h->nr_huge_pages_node[nid] - 3641 h->surplus_huge_pages_node[nid]); 3642 /* 3643 * User may have specified a large count value which caused the 3644 * above calculation to overflow. In this case, they wanted 3645 * to allocate as many huge pages as possible. Set count to 3646 * largest possible value to align with their intention. 3647 */ 3648 if (count < old_count) 3649 count = ULONG_MAX; 3650 } 3651 3652 /* 3653 * Gigantic pages runtime allocation depend on the capability for large 3654 * page range allocation. 3655 * If the system does not provide this feature, return an error when 3656 * the user tries to allocate gigantic pages but let the user free the 3657 * boottime allocated gigantic pages. 3658 */ 3659 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3660 if (count > persistent_huge_pages(h)) { 3661 spin_unlock_irq(&hugetlb_lock); 3662 mutex_unlock(&h->resize_lock); 3663 NODEMASK_FREE(node_alloc_noretry); 3664 return -EINVAL; 3665 } 3666 /* Fall through to decrease pool */ 3667 } 3668 3669 /* 3670 * Increase the pool size 3671 * First take pages out of surplus state. Then make up the 3672 * remaining difference by allocating fresh huge pages. 3673 * 3674 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3675 * to convert a surplus huge page to a normal huge page. That is 3676 * not critical, though, it just means the overall size of the 3677 * pool might be one hugepage larger than it needs to be, but 3678 * within all the constraints specified by the sysctls. 3679 */ 3680 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3681 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3682 break; 3683 } 3684 3685 allocated = 0; 3686 while (count > (persistent_huge_pages(h) + allocated)) { 3687 /* 3688 * If this allocation races such that we no longer need the 3689 * page, free_huge_folio will handle it by freeing the page 3690 * and reducing the surplus. 3691 */ 3692 spin_unlock_irq(&hugetlb_lock); 3693 3694 /* yield cpu to avoid soft lockup */ 3695 cond_resched(); 3696 3697 folio = alloc_pool_huge_folio(h, nodes_allowed, 3698 node_alloc_noretry, 3699 &h->next_nid_to_alloc); 3700 if (!folio) { 3701 prep_and_add_allocated_folios(h, &page_list); 3702 spin_lock_irq(&hugetlb_lock); 3703 goto out; 3704 } 3705 3706 list_add(&folio->lru, &page_list); 3707 allocated++; 3708 3709 /* Bail for signals. Probably ctrl-c from user */ 3710 if (signal_pending(current)) { 3711 prep_and_add_allocated_folios(h, &page_list); 3712 spin_lock_irq(&hugetlb_lock); 3713 goto out; 3714 } 3715 3716 spin_lock_irq(&hugetlb_lock); 3717 } 3718 3719 /* Add allocated pages to the pool */ 3720 if (!list_empty(&page_list)) { 3721 spin_unlock_irq(&hugetlb_lock); 3722 prep_and_add_allocated_folios(h, &page_list); 3723 spin_lock_irq(&hugetlb_lock); 3724 } 3725 3726 /* 3727 * Decrease the pool size 3728 * First return free pages to the buddy allocator (being careful 3729 * to keep enough around to satisfy reservations). Then place 3730 * pages into surplus state as needed so the pool will shrink 3731 * to the desired size as pages become free. 3732 * 3733 * By placing pages into the surplus state independent of the 3734 * overcommit value, we are allowing the surplus pool size to 3735 * exceed overcommit. There are few sane options here. Since 3736 * alloc_surplus_hugetlb_folio() is checking the global counter, 3737 * though, we'll note that we're not allowed to exceed surplus 3738 * and won't grow the pool anywhere else. Not until one of the 3739 * sysctls are changed, or the surplus pages go out of use. 3740 */ 3741 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3742 min_count = max(count, min_count); 3743 try_to_free_low(h, min_count, nodes_allowed); 3744 3745 /* 3746 * Collect pages to be removed on list without dropping lock 3747 */ 3748 while (min_count < persistent_huge_pages(h)) { 3749 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); 3750 if (!folio) 3751 break; 3752 3753 list_add(&folio->lru, &page_list); 3754 } 3755 /* free the pages after dropping lock */ 3756 spin_unlock_irq(&hugetlb_lock); 3757 update_and_free_pages_bulk(h, &page_list); 3758 flush_free_hpage_work(h); 3759 spin_lock_irq(&hugetlb_lock); 3760 3761 while (count < persistent_huge_pages(h)) { 3762 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3763 break; 3764 } 3765 out: 3766 h->max_huge_pages = persistent_huge_pages(h); 3767 spin_unlock_irq(&hugetlb_lock); 3768 mutex_unlock(&h->resize_lock); 3769 3770 NODEMASK_FREE(node_alloc_noretry); 3771 3772 return 0; 3773 } 3774 3775 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, 3776 struct list_head *src_list) 3777 { 3778 long rc; 3779 struct folio *folio, *next; 3780 LIST_HEAD(dst_list); 3781 LIST_HEAD(ret_list); 3782 3783 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); 3784 list_splice_init(&ret_list, src_list); 3785 3786 /* 3787 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3788 * Without the mutex, pages added to target hstate could be marked 3789 * as surplus. 3790 * 3791 * Note that we already hold src->resize_lock. To prevent deadlock, 3792 * use the convention of always taking larger size hstate mutex first. 3793 */ 3794 mutex_lock(&dst->resize_lock); 3795 3796 list_for_each_entry_safe(folio, next, src_list, lru) { 3797 int i; 3798 3799 if (folio_test_hugetlb_vmemmap_optimized(folio)) 3800 continue; 3801 3802 list_del(&folio->lru); 3803 3804 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); 3805 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); 3806 3807 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { 3808 struct page *page = folio_page(folio, i); 3809 3810 page->mapping = NULL; 3811 clear_compound_head(page); 3812 prep_compound_page(page, dst->order); 3813 3814 init_new_hugetlb_folio(dst, page_folio(page)); 3815 list_add(&page->lru, &dst_list); 3816 } 3817 } 3818 3819 prep_and_add_allocated_folios(dst, &dst_list); 3820 3821 mutex_unlock(&dst->resize_lock); 3822 3823 return rc; 3824 } 3825 3826 static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, 3827 unsigned long nr_to_demote) 3828 __must_hold(&hugetlb_lock) 3829 { 3830 int nr_nodes, node; 3831 struct hstate *dst; 3832 long rc = 0; 3833 long nr_demoted = 0; 3834 3835 lockdep_assert_held(&hugetlb_lock); 3836 3837 /* We should never get here if no demote order */ 3838 if (!src->demote_order) { 3839 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3840 return -EINVAL; /* internal error */ 3841 } 3842 dst = size_to_hstate(PAGE_SIZE << src->demote_order); 3843 3844 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { 3845 LIST_HEAD(list); 3846 struct folio *folio, *next; 3847 3848 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { 3849 if (folio_test_hwpoison(folio)) 3850 continue; 3851 3852 remove_hugetlb_folio(src, folio, false); 3853 list_add(&folio->lru, &list); 3854 3855 if (++nr_demoted == nr_to_demote) 3856 break; 3857 } 3858 3859 spin_unlock_irq(&hugetlb_lock); 3860 3861 rc = demote_free_hugetlb_folios(src, dst, &list); 3862 3863 spin_lock_irq(&hugetlb_lock); 3864 3865 list_for_each_entry_safe(folio, next, &list, lru) { 3866 list_del(&folio->lru); 3867 add_hugetlb_folio(src, folio, false); 3868 3869 nr_demoted--; 3870 } 3871 3872 if (rc < 0 || nr_demoted == nr_to_demote) 3873 break; 3874 } 3875 3876 /* 3877 * Not absolutely necessary, but for consistency update max_huge_pages 3878 * based on pool changes for the demoted page. 3879 */ 3880 src->max_huge_pages -= nr_demoted; 3881 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); 3882 3883 if (rc < 0) 3884 return rc; 3885 3886 if (nr_demoted) 3887 return nr_demoted; 3888 /* 3889 * Only way to get here is if all pages on free lists are poisoned. 3890 * Return -EBUSY so that caller will not retry. 3891 */ 3892 return -EBUSY; 3893 } 3894 3895 #define HSTATE_ATTR_RO(_name) \ 3896 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3897 3898 #define HSTATE_ATTR_WO(_name) \ 3899 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3900 3901 #define HSTATE_ATTR(_name) \ 3902 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3903 3904 static struct kobject *hugepages_kobj; 3905 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3906 3907 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3908 3909 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3910 { 3911 int i; 3912 3913 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3914 if (hstate_kobjs[i] == kobj) { 3915 if (nidp) 3916 *nidp = NUMA_NO_NODE; 3917 return &hstates[i]; 3918 } 3919 3920 return kobj_to_node_hstate(kobj, nidp); 3921 } 3922 3923 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3924 struct kobj_attribute *attr, char *buf) 3925 { 3926 struct hstate *h; 3927 unsigned long nr_huge_pages; 3928 int nid; 3929 3930 h = kobj_to_hstate(kobj, &nid); 3931 if (nid == NUMA_NO_NODE) 3932 nr_huge_pages = h->nr_huge_pages; 3933 else 3934 nr_huge_pages = h->nr_huge_pages_node[nid]; 3935 3936 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3937 } 3938 3939 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3940 struct hstate *h, int nid, 3941 unsigned long count, size_t len) 3942 { 3943 int err; 3944 nodemask_t nodes_allowed, *n_mask; 3945 3946 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3947 return -EINVAL; 3948 3949 if (nid == NUMA_NO_NODE) { 3950 /* 3951 * global hstate attribute 3952 */ 3953 if (!(obey_mempolicy && 3954 init_nodemask_of_mempolicy(&nodes_allowed))) 3955 n_mask = &node_states[N_MEMORY]; 3956 else 3957 n_mask = &nodes_allowed; 3958 } else { 3959 /* 3960 * Node specific request. count adjustment happens in 3961 * set_max_huge_pages() after acquiring hugetlb_lock. 3962 */ 3963 init_nodemask_of_node(&nodes_allowed, nid); 3964 n_mask = &nodes_allowed; 3965 } 3966 3967 err = set_max_huge_pages(h, count, nid, n_mask); 3968 3969 return err ? err : len; 3970 } 3971 3972 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3973 struct kobject *kobj, const char *buf, 3974 size_t len) 3975 { 3976 struct hstate *h; 3977 unsigned long count; 3978 int nid; 3979 int err; 3980 3981 err = kstrtoul(buf, 10, &count); 3982 if (err) 3983 return err; 3984 3985 h = kobj_to_hstate(kobj, &nid); 3986 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3987 } 3988 3989 static ssize_t nr_hugepages_show(struct kobject *kobj, 3990 struct kobj_attribute *attr, char *buf) 3991 { 3992 return nr_hugepages_show_common(kobj, attr, buf); 3993 } 3994 3995 static ssize_t nr_hugepages_store(struct kobject *kobj, 3996 struct kobj_attribute *attr, const char *buf, size_t len) 3997 { 3998 return nr_hugepages_store_common(false, kobj, buf, len); 3999 } 4000 HSTATE_ATTR(nr_hugepages); 4001 4002 #ifdef CONFIG_NUMA 4003 4004 /* 4005 * hstate attribute for optionally mempolicy-based constraint on persistent 4006 * huge page alloc/free. 4007 */ 4008 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 4009 struct kobj_attribute *attr, 4010 char *buf) 4011 { 4012 return nr_hugepages_show_common(kobj, attr, buf); 4013 } 4014 4015 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 4016 struct kobj_attribute *attr, const char *buf, size_t len) 4017 { 4018 return nr_hugepages_store_common(true, kobj, buf, len); 4019 } 4020 HSTATE_ATTR(nr_hugepages_mempolicy); 4021 #endif 4022 4023 4024 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 4025 struct kobj_attribute *attr, char *buf) 4026 { 4027 struct hstate *h = kobj_to_hstate(kobj, NULL); 4028 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 4029 } 4030 4031 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 4032 struct kobj_attribute *attr, const char *buf, size_t count) 4033 { 4034 int err; 4035 unsigned long input; 4036 struct hstate *h = kobj_to_hstate(kobj, NULL); 4037 4038 if (hstate_is_gigantic(h)) 4039 return -EINVAL; 4040 4041 err = kstrtoul(buf, 10, &input); 4042 if (err) 4043 return err; 4044 4045 spin_lock_irq(&hugetlb_lock); 4046 h->nr_overcommit_huge_pages = input; 4047 spin_unlock_irq(&hugetlb_lock); 4048 4049 return count; 4050 } 4051 HSTATE_ATTR(nr_overcommit_hugepages); 4052 4053 static ssize_t free_hugepages_show(struct kobject *kobj, 4054 struct kobj_attribute *attr, char *buf) 4055 { 4056 struct hstate *h; 4057 unsigned long free_huge_pages; 4058 int nid; 4059 4060 h = kobj_to_hstate(kobj, &nid); 4061 if (nid == NUMA_NO_NODE) 4062 free_huge_pages = h->free_huge_pages; 4063 else 4064 free_huge_pages = h->free_huge_pages_node[nid]; 4065 4066 return sysfs_emit(buf, "%lu\n", free_huge_pages); 4067 } 4068 HSTATE_ATTR_RO(free_hugepages); 4069 4070 static ssize_t resv_hugepages_show(struct kobject *kobj, 4071 struct kobj_attribute *attr, char *buf) 4072 { 4073 struct hstate *h = kobj_to_hstate(kobj, NULL); 4074 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 4075 } 4076 HSTATE_ATTR_RO(resv_hugepages); 4077 4078 static ssize_t surplus_hugepages_show(struct kobject *kobj, 4079 struct kobj_attribute *attr, char *buf) 4080 { 4081 struct hstate *h; 4082 unsigned long surplus_huge_pages; 4083 int nid; 4084 4085 h = kobj_to_hstate(kobj, &nid); 4086 if (nid == NUMA_NO_NODE) 4087 surplus_huge_pages = h->surplus_huge_pages; 4088 else 4089 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 4090 4091 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 4092 } 4093 HSTATE_ATTR_RO(surplus_hugepages); 4094 4095 static ssize_t demote_store(struct kobject *kobj, 4096 struct kobj_attribute *attr, const char *buf, size_t len) 4097 { 4098 unsigned long nr_demote; 4099 unsigned long nr_available; 4100 nodemask_t nodes_allowed, *n_mask; 4101 struct hstate *h; 4102 int err; 4103 int nid; 4104 4105 err = kstrtoul(buf, 10, &nr_demote); 4106 if (err) 4107 return err; 4108 h = kobj_to_hstate(kobj, &nid); 4109 4110 if (nid != NUMA_NO_NODE) { 4111 init_nodemask_of_node(&nodes_allowed, nid); 4112 n_mask = &nodes_allowed; 4113 } else { 4114 n_mask = &node_states[N_MEMORY]; 4115 } 4116 4117 /* Synchronize with other sysfs operations modifying huge pages */ 4118 mutex_lock(&h->resize_lock); 4119 spin_lock_irq(&hugetlb_lock); 4120 4121 while (nr_demote) { 4122 long rc; 4123 4124 /* 4125 * Check for available pages to demote each time thorough the 4126 * loop as demote_pool_huge_page will drop hugetlb_lock. 4127 */ 4128 if (nid != NUMA_NO_NODE) 4129 nr_available = h->free_huge_pages_node[nid]; 4130 else 4131 nr_available = h->free_huge_pages; 4132 nr_available -= h->resv_huge_pages; 4133 if (!nr_available) 4134 break; 4135 4136 rc = demote_pool_huge_page(h, n_mask, nr_demote); 4137 if (rc < 0) { 4138 err = rc; 4139 break; 4140 } 4141 4142 nr_demote -= rc; 4143 } 4144 4145 spin_unlock_irq(&hugetlb_lock); 4146 mutex_unlock(&h->resize_lock); 4147 4148 if (err) 4149 return err; 4150 return len; 4151 } 4152 HSTATE_ATTR_WO(demote); 4153 4154 static ssize_t demote_size_show(struct kobject *kobj, 4155 struct kobj_attribute *attr, char *buf) 4156 { 4157 struct hstate *h = kobj_to_hstate(kobj, NULL); 4158 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 4159 4160 return sysfs_emit(buf, "%lukB\n", demote_size); 4161 } 4162 4163 static ssize_t demote_size_store(struct kobject *kobj, 4164 struct kobj_attribute *attr, 4165 const char *buf, size_t count) 4166 { 4167 struct hstate *h, *demote_hstate; 4168 unsigned long demote_size; 4169 unsigned int demote_order; 4170 4171 demote_size = (unsigned long)memparse(buf, NULL); 4172 4173 demote_hstate = size_to_hstate(demote_size); 4174 if (!demote_hstate) 4175 return -EINVAL; 4176 demote_order = demote_hstate->order; 4177 if (demote_order < HUGETLB_PAGE_ORDER) 4178 return -EINVAL; 4179 4180 /* demote order must be smaller than hstate order */ 4181 h = kobj_to_hstate(kobj, NULL); 4182 if (demote_order >= h->order) 4183 return -EINVAL; 4184 4185 /* resize_lock synchronizes access to demote size and writes */ 4186 mutex_lock(&h->resize_lock); 4187 h->demote_order = demote_order; 4188 mutex_unlock(&h->resize_lock); 4189 4190 return count; 4191 } 4192 HSTATE_ATTR(demote_size); 4193 4194 static struct attribute *hstate_attrs[] = { 4195 &nr_hugepages_attr.attr, 4196 &nr_overcommit_hugepages_attr.attr, 4197 &free_hugepages_attr.attr, 4198 &resv_hugepages_attr.attr, 4199 &surplus_hugepages_attr.attr, 4200 #ifdef CONFIG_NUMA 4201 &nr_hugepages_mempolicy_attr.attr, 4202 #endif 4203 NULL, 4204 }; 4205 4206 static const struct attribute_group hstate_attr_group = { 4207 .attrs = hstate_attrs, 4208 }; 4209 4210 static struct attribute *hstate_demote_attrs[] = { 4211 &demote_size_attr.attr, 4212 &demote_attr.attr, 4213 NULL, 4214 }; 4215 4216 static const struct attribute_group hstate_demote_attr_group = { 4217 .attrs = hstate_demote_attrs, 4218 }; 4219 4220 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4221 struct kobject **hstate_kobjs, 4222 const struct attribute_group *hstate_attr_group) 4223 { 4224 int retval; 4225 int hi = hstate_index(h); 4226 4227 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4228 if (!hstate_kobjs[hi]) 4229 return -ENOMEM; 4230 4231 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4232 if (retval) { 4233 kobject_put(hstate_kobjs[hi]); 4234 hstate_kobjs[hi] = NULL; 4235 return retval; 4236 } 4237 4238 if (h->demote_order) { 4239 retval = sysfs_create_group(hstate_kobjs[hi], 4240 &hstate_demote_attr_group); 4241 if (retval) { 4242 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4243 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4244 kobject_put(hstate_kobjs[hi]); 4245 hstate_kobjs[hi] = NULL; 4246 return retval; 4247 } 4248 } 4249 4250 return 0; 4251 } 4252 4253 #ifdef CONFIG_NUMA 4254 static bool hugetlb_sysfs_initialized __ro_after_init; 4255 4256 /* 4257 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4258 * with node devices in node_devices[] using a parallel array. The array 4259 * index of a node device or _hstate == node id. 4260 * This is here to avoid any static dependency of the node device driver, in 4261 * the base kernel, on the hugetlb module. 4262 */ 4263 struct node_hstate { 4264 struct kobject *hugepages_kobj; 4265 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4266 }; 4267 static struct node_hstate node_hstates[MAX_NUMNODES]; 4268 4269 /* 4270 * A subset of global hstate attributes for node devices 4271 */ 4272 static struct attribute *per_node_hstate_attrs[] = { 4273 &nr_hugepages_attr.attr, 4274 &free_hugepages_attr.attr, 4275 &surplus_hugepages_attr.attr, 4276 NULL, 4277 }; 4278 4279 static const struct attribute_group per_node_hstate_attr_group = { 4280 .attrs = per_node_hstate_attrs, 4281 }; 4282 4283 /* 4284 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4285 * Returns node id via non-NULL nidp. 4286 */ 4287 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4288 { 4289 int nid; 4290 4291 for (nid = 0; nid < nr_node_ids; nid++) { 4292 struct node_hstate *nhs = &node_hstates[nid]; 4293 int i; 4294 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4295 if (nhs->hstate_kobjs[i] == kobj) { 4296 if (nidp) 4297 *nidp = nid; 4298 return &hstates[i]; 4299 } 4300 } 4301 4302 BUG(); 4303 return NULL; 4304 } 4305 4306 /* 4307 * Unregister hstate attributes from a single node device. 4308 * No-op if no hstate attributes attached. 4309 */ 4310 void hugetlb_unregister_node(struct node *node) 4311 { 4312 struct hstate *h; 4313 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4314 4315 if (!nhs->hugepages_kobj) 4316 return; /* no hstate attributes */ 4317 4318 for_each_hstate(h) { 4319 int idx = hstate_index(h); 4320 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4321 4322 if (!hstate_kobj) 4323 continue; 4324 if (h->demote_order) 4325 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4326 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4327 kobject_put(hstate_kobj); 4328 nhs->hstate_kobjs[idx] = NULL; 4329 } 4330 4331 kobject_put(nhs->hugepages_kobj); 4332 nhs->hugepages_kobj = NULL; 4333 } 4334 4335 4336 /* 4337 * Register hstate attributes for a single node device. 4338 * No-op if attributes already registered. 4339 */ 4340 void hugetlb_register_node(struct node *node) 4341 { 4342 struct hstate *h; 4343 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4344 int err; 4345 4346 if (!hugetlb_sysfs_initialized) 4347 return; 4348 4349 if (nhs->hugepages_kobj) 4350 return; /* already allocated */ 4351 4352 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4353 &node->dev.kobj); 4354 if (!nhs->hugepages_kobj) 4355 return; 4356 4357 for_each_hstate(h) { 4358 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4359 nhs->hstate_kobjs, 4360 &per_node_hstate_attr_group); 4361 if (err) { 4362 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4363 h->name, node->dev.id); 4364 hugetlb_unregister_node(node); 4365 break; 4366 } 4367 } 4368 } 4369 4370 /* 4371 * hugetlb init time: register hstate attributes for all registered node 4372 * devices of nodes that have memory. All on-line nodes should have 4373 * registered their associated device by this time. 4374 */ 4375 static void __init hugetlb_register_all_nodes(void) 4376 { 4377 int nid; 4378 4379 for_each_online_node(nid) 4380 hugetlb_register_node(node_devices[nid]); 4381 } 4382 #else /* !CONFIG_NUMA */ 4383 4384 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4385 { 4386 BUG(); 4387 if (nidp) 4388 *nidp = -1; 4389 return NULL; 4390 } 4391 4392 static void hugetlb_register_all_nodes(void) { } 4393 4394 #endif 4395 4396 #ifdef CONFIG_CMA 4397 static void __init hugetlb_cma_check(void); 4398 #else 4399 static inline __init void hugetlb_cma_check(void) 4400 { 4401 } 4402 #endif 4403 4404 static void __init hugetlb_sysfs_init(void) 4405 { 4406 struct hstate *h; 4407 int err; 4408 4409 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4410 if (!hugepages_kobj) 4411 return; 4412 4413 for_each_hstate(h) { 4414 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4415 hstate_kobjs, &hstate_attr_group); 4416 if (err) 4417 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4418 } 4419 4420 #ifdef CONFIG_NUMA 4421 hugetlb_sysfs_initialized = true; 4422 #endif 4423 hugetlb_register_all_nodes(); 4424 } 4425 4426 #ifdef CONFIG_SYSCTL 4427 static void hugetlb_sysctl_init(void); 4428 #else 4429 static inline void hugetlb_sysctl_init(void) { } 4430 #endif 4431 4432 static int __init hugetlb_init(void) 4433 { 4434 int i; 4435 4436 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4437 __NR_HPAGEFLAGS); 4438 4439 if (!hugepages_supported()) { 4440 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4441 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4442 return 0; 4443 } 4444 4445 /* 4446 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4447 * architectures depend on setup being done here. 4448 */ 4449 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4450 if (!parsed_default_hugepagesz) { 4451 /* 4452 * If we did not parse a default huge page size, set 4453 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4454 * number of huge pages for this default size was implicitly 4455 * specified, set that here as well. 4456 * Note that the implicit setting will overwrite an explicit 4457 * setting. A warning will be printed in this case. 4458 */ 4459 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4460 if (default_hstate_max_huge_pages) { 4461 if (default_hstate.max_huge_pages) { 4462 char buf[32]; 4463 4464 string_get_size(huge_page_size(&default_hstate), 4465 1, STRING_UNITS_2, buf, 32); 4466 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4467 default_hstate.max_huge_pages, buf); 4468 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4469 default_hstate_max_huge_pages); 4470 } 4471 default_hstate.max_huge_pages = 4472 default_hstate_max_huge_pages; 4473 4474 for_each_online_node(i) 4475 default_hstate.max_huge_pages_node[i] = 4476 default_hugepages_in_node[i]; 4477 } 4478 } 4479 4480 hugetlb_cma_check(); 4481 hugetlb_init_hstates(); 4482 gather_bootmem_prealloc(); 4483 report_hugepages(); 4484 4485 hugetlb_sysfs_init(); 4486 hugetlb_cgroup_file_init(); 4487 hugetlb_sysctl_init(); 4488 4489 #ifdef CONFIG_SMP 4490 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4491 #else 4492 num_fault_mutexes = 1; 4493 #endif 4494 hugetlb_fault_mutex_table = 4495 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4496 GFP_KERNEL); 4497 BUG_ON(!hugetlb_fault_mutex_table); 4498 4499 for (i = 0; i < num_fault_mutexes; i++) 4500 mutex_init(&hugetlb_fault_mutex_table[i]); 4501 return 0; 4502 } 4503 subsys_initcall(hugetlb_init); 4504 4505 /* Overwritten by architectures with more huge page sizes */ 4506 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4507 { 4508 return size == HPAGE_SIZE; 4509 } 4510 4511 void __init hugetlb_add_hstate(unsigned int order) 4512 { 4513 struct hstate *h; 4514 unsigned long i; 4515 4516 if (size_to_hstate(PAGE_SIZE << order)) { 4517 return; 4518 } 4519 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4520 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); 4521 h = &hstates[hugetlb_max_hstate++]; 4522 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); 4523 h->order = order; 4524 h->mask = ~(huge_page_size(h) - 1); 4525 for (i = 0; i < MAX_NUMNODES; ++i) 4526 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4527 INIT_LIST_HEAD(&h->hugepage_activelist); 4528 h->next_nid_to_alloc = first_memory_node; 4529 h->next_nid_to_free = first_memory_node; 4530 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4531 huge_page_size(h)/SZ_1K); 4532 4533 parsed_hstate = h; 4534 } 4535 4536 bool __init __weak hugetlb_node_alloc_supported(void) 4537 { 4538 return true; 4539 } 4540 4541 static void __init hugepages_clear_pages_in_node(void) 4542 { 4543 if (!hugetlb_max_hstate) { 4544 default_hstate_max_huge_pages = 0; 4545 memset(default_hugepages_in_node, 0, 4546 sizeof(default_hugepages_in_node)); 4547 } else { 4548 parsed_hstate->max_huge_pages = 0; 4549 memset(parsed_hstate->max_huge_pages_node, 0, 4550 sizeof(parsed_hstate->max_huge_pages_node)); 4551 } 4552 } 4553 4554 /* 4555 * hugepages command line processing 4556 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4557 * specification. If not, ignore the hugepages value. hugepages can also 4558 * be the first huge page command line option in which case it implicitly 4559 * specifies the number of huge pages for the default size. 4560 */ 4561 static int __init hugepages_setup(char *s) 4562 { 4563 unsigned long *mhp; 4564 static unsigned long *last_mhp; 4565 int node = NUMA_NO_NODE; 4566 int count; 4567 unsigned long tmp; 4568 char *p = s; 4569 4570 if (!parsed_valid_hugepagesz) { 4571 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4572 parsed_valid_hugepagesz = true; 4573 return 1; 4574 } 4575 4576 /* 4577 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4578 * yet, so this hugepages= parameter goes to the "default hstate". 4579 * Otherwise, it goes with the previously parsed hugepagesz or 4580 * default_hugepagesz. 4581 */ 4582 else if (!hugetlb_max_hstate) 4583 mhp = &default_hstate_max_huge_pages; 4584 else 4585 mhp = &parsed_hstate->max_huge_pages; 4586 4587 if (mhp == last_mhp) { 4588 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4589 return 1; 4590 } 4591 4592 while (*p) { 4593 count = 0; 4594 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4595 goto invalid; 4596 /* Parameter is node format */ 4597 if (p[count] == ':') { 4598 if (!hugetlb_node_alloc_supported()) { 4599 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4600 return 1; 4601 } 4602 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4603 goto invalid; 4604 node = array_index_nospec(tmp, MAX_NUMNODES); 4605 p += count + 1; 4606 /* Parse hugepages */ 4607 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4608 goto invalid; 4609 if (!hugetlb_max_hstate) 4610 default_hugepages_in_node[node] = tmp; 4611 else 4612 parsed_hstate->max_huge_pages_node[node] = tmp; 4613 *mhp += tmp; 4614 /* Go to parse next node*/ 4615 if (p[count] == ',') 4616 p += count + 1; 4617 else 4618 break; 4619 } else { 4620 if (p != s) 4621 goto invalid; 4622 *mhp = tmp; 4623 break; 4624 } 4625 } 4626 4627 /* 4628 * Global state is always initialized later in hugetlb_init. 4629 * But we need to allocate gigantic hstates here early to still 4630 * use the bootmem allocator. 4631 */ 4632 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4633 hugetlb_hstate_alloc_pages(parsed_hstate); 4634 4635 last_mhp = mhp; 4636 4637 return 1; 4638 4639 invalid: 4640 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4641 hugepages_clear_pages_in_node(); 4642 return 1; 4643 } 4644 __setup("hugepages=", hugepages_setup); 4645 4646 /* 4647 * hugepagesz command line processing 4648 * A specific huge page size can only be specified once with hugepagesz. 4649 * hugepagesz is followed by hugepages on the command line. The global 4650 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4651 * hugepagesz argument was valid. 4652 */ 4653 static int __init hugepagesz_setup(char *s) 4654 { 4655 unsigned long size; 4656 struct hstate *h; 4657 4658 parsed_valid_hugepagesz = false; 4659 size = (unsigned long)memparse(s, NULL); 4660 4661 if (!arch_hugetlb_valid_size(size)) { 4662 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4663 return 1; 4664 } 4665 4666 h = size_to_hstate(size); 4667 if (h) { 4668 /* 4669 * hstate for this size already exists. This is normally 4670 * an error, but is allowed if the existing hstate is the 4671 * default hstate. More specifically, it is only allowed if 4672 * the number of huge pages for the default hstate was not 4673 * previously specified. 4674 */ 4675 if (!parsed_default_hugepagesz || h != &default_hstate || 4676 default_hstate.max_huge_pages) { 4677 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4678 return 1; 4679 } 4680 4681 /* 4682 * No need to call hugetlb_add_hstate() as hstate already 4683 * exists. But, do set parsed_hstate so that a following 4684 * hugepages= parameter will be applied to this hstate. 4685 */ 4686 parsed_hstate = h; 4687 parsed_valid_hugepagesz = true; 4688 return 1; 4689 } 4690 4691 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4692 parsed_valid_hugepagesz = true; 4693 return 1; 4694 } 4695 __setup("hugepagesz=", hugepagesz_setup); 4696 4697 /* 4698 * default_hugepagesz command line input 4699 * Only one instance of default_hugepagesz allowed on command line. 4700 */ 4701 static int __init default_hugepagesz_setup(char *s) 4702 { 4703 unsigned long size; 4704 int i; 4705 4706 parsed_valid_hugepagesz = false; 4707 if (parsed_default_hugepagesz) { 4708 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4709 return 1; 4710 } 4711 4712 size = (unsigned long)memparse(s, NULL); 4713 4714 if (!arch_hugetlb_valid_size(size)) { 4715 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4716 return 1; 4717 } 4718 4719 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4720 parsed_valid_hugepagesz = true; 4721 parsed_default_hugepagesz = true; 4722 default_hstate_idx = hstate_index(size_to_hstate(size)); 4723 4724 /* 4725 * The number of default huge pages (for this size) could have been 4726 * specified as the first hugetlb parameter: hugepages=X. If so, 4727 * then default_hstate_max_huge_pages is set. If the default huge 4728 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be 4729 * allocated here from bootmem allocator. 4730 */ 4731 if (default_hstate_max_huge_pages) { 4732 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4733 for_each_online_node(i) 4734 default_hstate.max_huge_pages_node[i] = 4735 default_hugepages_in_node[i]; 4736 if (hstate_is_gigantic(&default_hstate)) 4737 hugetlb_hstate_alloc_pages(&default_hstate); 4738 default_hstate_max_huge_pages = 0; 4739 } 4740 4741 return 1; 4742 } 4743 __setup("default_hugepagesz=", default_hugepagesz_setup); 4744 4745 static unsigned int allowed_mems_nr(struct hstate *h) 4746 { 4747 int node; 4748 unsigned int nr = 0; 4749 nodemask_t *mbind_nodemask; 4750 unsigned int *array = h->free_huge_pages_node; 4751 gfp_t gfp_mask = htlb_alloc_mask(h); 4752 4753 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4754 for_each_node_mask(node, cpuset_current_mems_allowed) { 4755 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4756 nr += array[node]; 4757 } 4758 4759 return nr; 4760 } 4761 4762 #ifdef CONFIG_SYSCTL 4763 static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write, 4764 void *buffer, size_t *length, 4765 loff_t *ppos, unsigned long *out) 4766 { 4767 struct ctl_table dup_table; 4768 4769 /* 4770 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4771 * can duplicate the @table and alter the duplicate of it. 4772 */ 4773 dup_table = *table; 4774 dup_table.data = out; 4775 4776 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4777 } 4778 4779 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4780 const struct ctl_table *table, int write, 4781 void *buffer, size_t *length, loff_t *ppos) 4782 { 4783 struct hstate *h = &default_hstate; 4784 unsigned long tmp = h->max_huge_pages; 4785 int ret; 4786 4787 if (!hugepages_supported()) 4788 return -EOPNOTSUPP; 4789 4790 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4791 &tmp); 4792 if (ret) 4793 goto out; 4794 4795 if (write) 4796 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4797 NUMA_NO_NODE, tmp, *length); 4798 out: 4799 return ret; 4800 } 4801 4802 static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 4803 void *buffer, size_t *length, loff_t *ppos) 4804 { 4805 4806 return hugetlb_sysctl_handler_common(false, table, write, 4807 buffer, length, ppos); 4808 } 4809 4810 #ifdef CONFIG_NUMA 4811 static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 4812 void *buffer, size_t *length, loff_t *ppos) 4813 { 4814 return hugetlb_sysctl_handler_common(true, table, write, 4815 buffer, length, ppos); 4816 } 4817 #endif /* CONFIG_NUMA */ 4818 4819 static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 4820 void *buffer, size_t *length, loff_t *ppos) 4821 { 4822 struct hstate *h = &default_hstate; 4823 unsigned long tmp; 4824 int ret; 4825 4826 if (!hugepages_supported()) 4827 return -EOPNOTSUPP; 4828 4829 tmp = h->nr_overcommit_huge_pages; 4830 4831 if (write && hstate_is_gigantic(h)) 4832 return -EINVAL; 4833 4834 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4835 &tmp); 4836 if (ret) 4837 goto out; 4838 4839 if (write) { 4840 spin_lock_irq(&hugetlb_lock); 4841 h->nr_overcommit_huge_pages = tmp; 4842 spin_unlock_irq(&hugetlb_lock); 4843 } 4844 out: 4845 return ret; 4846 } 4847 4848 static struct ctl_table hugetlb_table[] = { 4849 { 4850 .procname = "nr_hugepages", 4851 .data = NULL, 4852 .maxlen = sizeof(unsigned long), 4853 .mode = 0644, 4854 .proc_handler = hugetlb_sysctl_handler, 4855 }, 4856 #ifdef CONFIG_NUMA 4857 { 4858 .procname = "nr_hugepages_mempolicy", 4859 .data = NULL, 4860 .maxlen = sizeof(unsigned long), 4861 .mode = 0644, 4862 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 4863 }, 4864 #endif 4865 { 4866 .procname = "hugetlb_shm_group", 4867 .data = &sysctl_hugetlb_shm_group, 4868 .maxlen = sizeof(gid_t), 4869 .mode = 0644, 4870 .proc_handler = proc_dointvec, 4871 }, 4872 { 4873 .procname = "nr_overcommit_hugepages", 4874 .data = NULL, 4875 .maxlen = sizeof(unsigned long), 4876 .mode = 0644, 4877 .proc_handler = hugetlb_overcommit_handler, 4878 }, 4879 }; 4880 4881 static void hugetlb_sysctl_init(void) 4882 { 4883 register_sysctl_init("vm", hugetlb_table); 4884 } 4885 #endif /* CONFIG_SYSCTL */ 4886 4887 void hugetlb_report_meminfo(struct seq_file *m) 4888 { 4889 struct hstate *h; 4890 unsigned long total = 0; 4891 4892 if (!hugepages_supported()) 4893 return; 4894 4895 for_each_hstate(h) { 4896 unsigned long count = h->nr_huge_pages; 4897 4898 total += huge_page_size(h) * count; 4899 4900 if (h == &default_hstate) 4901 seq_printf(m, 4902 "HugePages_Total: %5lu\n" 4903 "HugePages_Free: %5lu\n" 4904 "HugePages_Rsvd: %5lu\n" 4905 "HugePages_Surp: %5lu\n" 4906 "Hugepagesize: %8lu kB\n", 4907 count, 4908 h->free_huge_pages, 4909 h->resv_huge_pages, 4910 h->surplus_huge_pages, 4911 huge_page_size(h) / SZ_1K); 4912 } 4913 4914 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4915 } 4916 4917 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4918 { 4919 struct hstate *h = &default_hstate; 4920 4921 if (!hugepages_supported()) 4922 return 0; 4923 4924 return sysfs_emit_at(buf, len, 4925 "Node %d HugePages_Total: %5u\n" 4926 "Node %d HugePages_Free: %5u\n" 4927 "Node %d HugePages_Surp: %5u\n", 4928 nid, h->nr_huge_pages_node[nid], 4929 nid, h->free_huge_pages_node[nid], 4930 nid, h->surplus_huge_pages_node[nid]); 4931 } 4932 4933 void hugetlb_show_meminfo_node(int nid) 4934 { 4935 struct hstate *h; 4936 4937 if (!hugepages_supported()) 4938 return; 4939 4940 for_each_hstate(h) 4941 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4942 nid, 4943 h->nr_huge_pages_node[nid], 4944 h->free_huge_pages_node[nid], 4945 h->surplus_huge_pages_node[nid], 4946 huge_page_size(h) / SZ_1K); 4947 } 4948 4949 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4950 { 4951 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4952 K(atomic_long_read(&mm->hugetlb_usage))); 4953 } 4954 4955 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4956 unsigned long hugetlb_total_pages(void) 4957 { 4958 struct hstate *h; 4959 unsigned long nr_total_pages = 0; 4960 4961 for_each_hstate(h) 4962 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4963 return nr_total_pages; 4964 } 4965 4966 static int hugetlb_acct_memory(struct hstate *h, long delta) 4967 { 4968 int ret = -ENOMEM; 4969 4970 if (!delta) 4971 return 0; 4972 4973 spin_lock_irq(&hugetlb_lock); 4974 /* 4975 * When cpuset is configured, it breaks the strict hugetlb page 4976 * reservation as the accounting is done on a global variable. Such 4977 * reservation is completely rubbish in the presence of cpuset because 4978 * the reservation is not checked against page availability for the 4979 * current cpuset. Application can still potentially OOM'ed by kernel 4980 * with lack of free htlb page in cpuset that the task is in. 4981 * Attempt to enforce strict accounting with cpuset is almost 4982 * impossible (or too ugly) because cpuset is too fluid that 4983 * task or memory node can be dynamically moved between cpusets. 4984 * 4985 * The change of semantics for shared hugetlb mapping with cpuset is 4986 * undesirable. However, in order to preserve some of the semantics, 4987 * we fall back to check against current free page availability as 4988 * a best attempt and hopefully to minimize the impact of changing 4989 * semantics that cpuset has. 4990 * 4991 * Apart from cpuset, we also have memory policy mechanism that 4992 * also determines from which node the kernel will allocate memory 4993 * in a NUMA system. So similar to cpuset, we also should consider 4994 * the memory policy of the current task. Similar to the description 4995 * above. 4996 */ 4997 if (delta > 0) { 4998 if (gather_surplus_pages(h, delta) < 0) 4999 goto out; 5000 5001 if (delta > allowed_mems_nr(h)) { 5002 return_unused_surplus_pages(h, delta); 5003 goto out; 5004 } 5005 } 5006 5007 ret = 0; 5008 if (delta < 0) 5009 return_unused_surplus_pages(h, (unsigned long) -delta); 5010 5011 out: 5012 spin_unlock_irq(&hugetlb_lock); 5013 return ret; 5014 } 5015 5016 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 5017 { 5018 struct resv_map *resv = vma_resv_map(vma); 5019 5020 /* 5021 * HPAGE_RESV_OWNER indicates a private mapping. 5022 * This new VMA should share its siblings reservation map if present. 5023 * The VMA will only ever have a valid reservation map pointer where 5024 * it is being copied for another still existing VMA. As that VMA 5025 * has a reference to the reservation map it cannot disappear until 5026 * after this open call completes. It is therefore safe to take a 5027 * new reference here without additional locking. 5028 */ 5029 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 5030 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 5031 kref_get(&resv->refs); 5032 } 5033 5034 /* 5035 * vma_lock structure for sharable mappings is vma specific. 5036 * Clear old pointer (if copied via vm_area_dup) and allocate 5037 * new structure. Before clearing, make sure vma_lock is not 5038 * for this vma. 5039 */ 5040 if (vma->vm_flags & VM_MAYSHARE) { 5041 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 5042 5043 if (vma_lock) { 5044 if (vma_lock->vma != vma) { 5045 vma->vm_private_data = NULL; 5046 hugetlb_vma_lock_alloc(vma); 5047 } else 5048 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 5049 } else 5050 hugetlb_vma_lock_alloc(vma); 5051 } 5052 } 5053 5054 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 5055 { 5056 struct hstate *h = hstate_vma(vma); 5057 struct resv_map *resv; 5058 struct hugepage_subpool *spool = subpool_vma(vma); 5059 unsigned long reserve, start, end; 5060 long gbl_reserve; 5061 5062 hugetlb_vma_lock_free(vma); 5063 5064 resv = vma_resv_map(vma); 5065 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5066 return; 5067 5068 start = vma_hugecache_offset(h, vma, vma->vm_start); 5069 end = vma_hugecache_offset(h, vma, vma->vm_end); 5070 5071 reserve = (end - start) - region_count(resv, start, end); 5072 hugetlb_cgroup_uncharge_counter(resv, start, end); 5073 if (reserve) { 5074 /* 5075 * Decrement reserve counts. The global reserve count may be 5076 * adjusted if the subpool has a minimum size. 5077 */ 5078 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 5079 hugetlb_acct_memory(h, -gbl_reserve); 5080 } 5081 5082 kref_put(&resv->refs, resv_map_release); 5083 } 5084 5085 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 5086 { 5087 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 5088 return -EINVAL; 5089 5090 /* 5091 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 5092 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 5093 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 5094 */ 5095 if (addr & ~PUD_MASK) { 5096 /* 5097 * hugetlb_vm_op_split is called right before we attempt to 5098 * split the VMA. We will need to unshare PMDs in the old and 5099 * new VMAs, so let's unshare before we split. 5100 */ 5101 unsigned long floor = addr & PUD_MASK; 5102 unsigned long ceil = floor + PUD_SIZE; 5103 5104 if (floor >= vma->vm_start && ceil <= vma->vm_end) 5105 hugetlb_unshare_pmds(vma, floor, ceil); 5106 } 5107 5108 return 0; 5109 } 5110 5111 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 5112 { 5113 return huge_page_size(hstate_vma(vma)); 5114 } 5115 5116 /* 5117 * We cannot handle pagefaults against hugetlb pages at all. They cause 5118 * handle_mm_fault() to try to instantiate regular-sized pages in the 5119 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5120 * this far. 5121 */ 5122 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 5123 { 5124 BUG(); 5125 return 0; 5126 } 5127 5128 /* 5129 * When a new function is introduced to vm_operations_struct and added 5130 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 5131 * This is because under System V memory model, mappings created via 5132 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 5133 * their original vm_ops are overwritten with shm_vm_ops. 5134 */ 5135 const struct vm_operations_struct hugetlb_vm_ops = { 5136 .fault = hugetlb_vm_op_fault, 5137 .open = hugetlb_vm_op_open, 5138 .close = hugetlb_vm_op_close, 5139 .may_split = hugetlb_vm_op_split, 5140 .pagesize = hugetlb_vm_op_pagesize, 5141 }; 5142 5143 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 5144 int writable) 5145 { 5146 pte_t entry; 5147 unsigned int shift = huge_page_shift(hstate_vma(vma)); 5148 5149 if (writable) { 5150 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 5151 vma->vm_page_prot))); 5152 } else { 5153 entry = huge_pte_wrprotect(mk_huge_pte(page, 5154 vma->vm_page_prot)); 5155 } 5156 entry = pte_mkyoung(entry); 5157 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 5158 5159 return entry; 5160 } 5161 5162 static void set_huge_ptep_writable(struct vm_area_struct *vma, 5163 unsigned long address, pte_t *ptep) 5164 { 5165 pte_t entry; 5166 5167 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); 5168 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 5169 update_mmu_cache(vma, address, ptep); 5170 } 5171 5172 bool is_hugetlb_entry_migration(pte_t pte) 5173 { 5174 swp_entry_t swp; 5175 5176 if (huge_pte_none(pte) || pte_present(pte)) 5177 return false; 5178 swp = pte_to_swp_entry(pte); 5179 if (is_migration_entry(swp)) 5180 return true; 5181 else 5182 return false; 5183 } 5184 5185 bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5186 { 5187 swp_entry_t swp; 5188 5189 if (huge_pte_none(pte) || pte_present(pte)) 5190 return false; 5191 swp = pte_to_swp_entry(pte); 5192 if (is_hwpoison_entry(swp)) 5193 return true; 5194 else 5195 return false; 5196 } 5197 5198 static void 5199 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5200 struct folio *new_folio, pte_t old, unsigned long sz) 5201 { 5202 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 5203 5204 __folio_mark_uptodate(new_folio); 5205 hugetlb_add_new_anon_rmap(new_folio, vma, addr); 5206 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5207 newpte = huge_pte_mkuffd_wp(newpte); 5208 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 5209 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5210 folio_set_hugetlb_migratable(new_folio); 5211 } 5212 5213 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5214 struct vm_area_struct *dst_vma, 5215 struct vm_area_struct *src_vma) 5216 { 5217 pte_t *src_pte, *dst_pte, entry; 5218 struct folio *pte_folio; 5219 unsigned long addr; 5220 bool cow = is_cow_mapping(src_vma->vm_flags); 5221 struct hstate *h = hstate_vma(src_vma); 5222 unsigned long sz = huge_page_size(h); 5223 unsigned long npages = pages_per_huge_page(h); 5224 struct mmu_notifier_range range; 5225 unsigned long last_addr_mask; 5226 int ret = 0; 5227 5228 if (cow) { 5229 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5230 src_vma->vm_start, 5231 src_vma->vm_end); 5232 mmu_notifier_invalidate_range_start(&range); 5233 vma_assert_write_locked(src_vma); 5234 raw_write_seqcount_begin(&src->write_protect_seq); 5235 } else { 5236 /* 5237 * For shared mappings the vma lock must be held before 5238 * calling hugetlb_walk() in the src vma. Otherwise, the 5239 * returned ptep could go away if part of a shared pmd and 5240 * another thread calls huge_pmd_unshare. 5241 */ 5242 hugetlb_vma_lock_read(src_vma); 5243 } 5244 5245 last_addr_mask = hugetlb_mask_last_page(h); 5246 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5247 spinlock_t *src_ptl, *dst_ptl; 5248 src_pte = hugetlb_walk(src_vma, addr, sz); 5249 if (!src_pte) { 5250 addr |= last_addr_mask; 5251 continue; 5252 } 5253 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5254 if (!dst_pte) { 5255 ret = -ENOMEM; 5256 break; 5257 } 5258 5259 /* 5260 * If the pagetables are shared don't copy or take references. 5261 * 5262 * dst_pte == src_pte is the common case of src/dest sharing. 5263 * However, src could have 'unshared' and dst shares with 5264 * another vma. So page_count of ptep page is checked instead 5265 * to reliably determine whether pte is shared. 5266 */ 5267 if (page_count(virt_to_page(dst_pte)) > 1) { 5268 addr |= last_addr_mask; 5269 continue; 5270 } 5271 5272 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5273 src_ptl = huge_pte_lockptr(h, src, src_pte); 5274 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5275 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5276 again: 5277 if (huge_pte_none(entry)) { 5278 /* 5279 * Skip if src entry none. 5280 */ 5281 ; 5282 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5283 if (!userfaultfd_wp(dst_vma)) 5284 entry = huge_pte_clear_uffd_wp(entry); 5285 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5286 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5287 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5288 bool uffd_wp = pte_swp_uffd_wp(entry); 5289 5290 if (!is_readable_migration_entry(swp_entry) && cow) { 5291 /* 5292 * COW mappings require pages in both 5293 * parent and child to be set to read. 5294 */ 5295 swp_entry = make_readable_migration_entry( 5296 swp_offset(swp_entry)); 5297 entry = swp_entry_to_pte(swp_entry); 5298 if (userfaultfd_wp(src_vma) && uffd_wp) 5299 entry = pte_swp_mkuffd_wp(entry); 5300 set_huge_pte_at(src, addr, src_pte, entry, sz); 5301 } 5302 if (!userfaultfd_wp(dst_vma)) 5303 entry = huge_pte_clear_uffd_wp(entry); 5304 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5305 } else if (unlikely(is_pte_marker(entry))) { 5306 pte_marker marker = copy_pte_marker( 5307 pte_to_swp_entry(entry), dst_vma); 5308 5309 if (marker) 5310 set_huge_pte_at(dst, addr, dst_pte, 5311 make_pte_marker(marker), sz); 5312 } else { 5313 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5314 pte_folio = page_folio(pte_page(entry)); 5315 folio_get(pte_folio); 5316 5317 /* 5318 * Failing to duplicate the anon rmap is a rare case 5319 * where we see pinned hugetlb pages while they're 5320 * prone to COW. We need to do the COW earlier during 5321 * fork. 5322 * 5323 * When pre-allocating the page or copying data, we 5324 * need to be without the pgtable locks since we could 5325 * sleep during the process. 5326 */ 5327 if (!folio_test_anon(pte_folio)) { 5328 hugetlb_add_file_rmap(pte_folio); 5329 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { 5330 pte_t src_pte_old = entry; 5331 struct folio *new_folio; 5332 5333 spin_unlock(src_ptl); 5334 spin_unlock(dst_ptl); 5335 /* Do not use reserve as it's private owned */ 5336 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); 5337 if (IS_ERR(new_folio)) { 5338 folio_put(pte_folio); 5339 ret = PTR_ERR(new_folio); 5340 break; 5341 } 5342 ret = copy_user_large_folio(new_folio, pte_folio, 5343 addr, dst_vma); 5344 folio_put(pte_folio); 5345 if (ret) { 5346 folio_put(new_folio); 5347 break; 5348 } 5349 5350 /* Install the new hugetlb folio if src pte stable */ 5351 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5352 src_ptl = huge_pte_lockptr(h, src, src_pte); 5353 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5354 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5355 if (!pte_same(src_pte_old, entry)) { 5356 restore_reserve_on_error(h, dst_vma, addr, 5357 new_folio); 5358 folio_put(new_folio); 5359 /* huge_ptep of dst_pte won't change as in child */ 5360 goto again; 5361 } 5362 hugetlb_install_folio(dst_vma, dst_pte, addr, 5363 new_folio, src_pte_old, sz); 5364 spin_unlock(src_ptl); 5365 spin_unlock(dst_ptl); 5366 continue; 5367 } 5368 5369 if (cow) { 5370 /* 5371 * No need to notify as we are downgrading page 5372 * table protection not changing it to point 5373 * to a new page. 5374 * 5375 * See Documentation/mm/mmu_notifier.rst 5376 */ 5377 huge_ptep_set_wrprotect(src, addr, src_pte); 5378 entry = huge_pte_wrprotect(entry); 5379 } 5380 5381 if (!userfaultfd_wp(dst_vma)) 5382 entry = huge_pte_clear_uffd_wp(entry); 5383 5384 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5385 hugetlb_count_add(npages, dst); 5386 } 5387 spin_unlock(src_ptl); 5388 spin_unlock(dst_ptl); 5389 } 5390 5391 if (cow) { 5392 raw_write_seqcount_end(&src->write_protect_seq); 5393 mmu_notifier_invalidate_range_end(&range); 5394 } else { 5395 hugetlb_vma_unlock_read(src_vma); 5396 } 5397 5398 return ret; 5399 } 5400 5401 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5402 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5403 unsigned long sz) 5404 { 5405 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 5406 struct hstate *h = hstate_vma(vma); 5407 struct mm_struct *mm = vma->vm_mm; 5408 spinlock_t *src_ptl, *dst_ptl; 5409 pte_t pte; 5410 5411 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5412 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5413 5414 /* 5415 * We don't have to worry about the ordering of src and dst ptlocks 5416 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5417 */ 5418 if (src_ptl != dst_ptl) 5419 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5420 5421 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5422 5423 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) 5424 huge_pte_clear(mm, new_addr, dst_pte, sz); 5425 else { 5426 if (need_clear_uffd_wp) { 5427 if (pte_present(pte)) 5428 pte = huge_pte_clear_uffd_wp(pte); 5429 else if (is_swap_pte(pte)) 5430 pte = pte_swp_clear_uffd_wp(pte); 5431 } 5432 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5433 } 5434 5435 if (src_ptl != dst_ptl) 5436 spin_unlock(src_ptl); 5437 spin_unlock(dst_ptl); 5438 } 5439 5440 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5441 struct vm_area_struct *new_vma, 5442 unsigned long old_addr, unsigned long new_addr, 5443 unsigned long len) 5444 { 5445 struct hstate *h = hstate_vma(vma); 5446 struct address_space *mapping = vma->vm_file->f_mapping; 5447 unsigned long sz = huge_page_size(h); 5448 struct mm_struct *mm = vma->vm_mm; 5449 unsigned long old_end = old_addr + len; 5450 unsigned long last_addr_mask; 5451 pte_t *src_pte, *dst_pte; 5452 struct mmu_notifier_range range; 5453 bool shared_pmd = false; 5454 5455 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5456 old_end); 5457 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5458 /* 5459 * In case of shared PMDs, we should cover the maximum possible 5460 * range. 5461 */ 5462 flush_cache_range(vma, range.start, range.end); 5463 5464 mmu_notifier_invalidate_range_start(&range); 5465 last_addr_mask = hugetlb_mask_last_page(h); 5466 /* Prevent race with file truncation */ 5467 hugetlb_vma_lock_write(vma); 5468 i_mmap_lock_write(mapping); 5469 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5470 src_pte = hugetlb_walk(vma, old_addr, sz); 5471 if (!src_pte) { 5472 old_addr |= last_addr_mask; 5473 new_addr |= last_addr_mask; 5474 continue; 5475 } 5476 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) 5477 continue; 5478 5479 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5480 shared_pmd = true; 5481 old_addr |= last_addr_mask; 5482 new_addr |= last_addr_mask; 5483 continue; 5484 } 5485 5486 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5487 if (!dst_pte) 5488 break; 5489 5490 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5491 } 5492 5493 if (shared_pmd) 5494 flush_hugetlb_tlb_range(vma, range.start, range.end); 5495 else 5496 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5497 mmu_notifier_invalidate_range_end(&range); 5498 i_mmap_unlock_write(mapping); 5499 hugetlb_vma_unlock_write(vma); 5500 5501 return len + old_addr - old_end; 5502 } 5503 5504 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5505 unsigned long start, unsigned long end, 5506 struct page *ref_page, zap_flags_t zap_flags) 5507 { 5508 struct mm_struct *mm = vma->vm_mm; 5509 unsigned long address; 5510 pte_t *ptep; 5511 pte_t pte; 5512 spinlock_t *ptl; 5513 struct page *page; 5514 struct hstate *h = hstate_vma(vma); 5515 unsigned long sz = huge_page_size(h); 5516 bool adjust_reservation = false; 5517 unsigned long last_addr_mask; 5518 bool force_flush = false; 5519 5520 WARN_ON(!is_vm_hugetlb_page(vma)); 5521 BUG_ON(start & ~huge_page_mask(h)); 5522 BUG_ON(end & ~huge_page_mask(h)); 5523 5524 /* 5525 * This is a hugetlb vma, all the pte entries should point 5526 * to huge page. 5527 */ 5528 tlb_change_page_size(tlb, sz); 5529 tlb_start_vma(tlb, vma); 5530 5531 last_addr_mask = hugetlb_mask_last_page(h); 5532 address = start; 5533 for (; address < end; address += sz) { 5534 ptep = hugetlb_walk(vma, address, sz); 5535 if (!ptep) { 5536 address |= last_addr_mask; 5537 continue; 5538 } 5539 5540 ptl = huge_pte_lock(h, mm, ptep); 5541 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5542 spin_unlock(ptl); 5543 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5544 force_flush = true; 5545 address |= last_addr_mask; 5546 continue; 5547 } 5548 5549 pte = huge_ptep_get(mm, address, ptep); 5550 if (huge_pte_none(pte)) { 5551 spin_unlock(ptl); 5552 continue; 5553 } 5554 5555 /* 5556 * Migrating hugepage or HWPoisoned hugepage is already 5557 * unmapped and its refcount is dropped, so just clear pte here. 5558 */ 5559 if (unlikely(!pte_present(pte))) { 5560 /* 5561 * If the pte was wr-protected by uffd-wp in any of the 5562 * swap forms, meanwhile the caller does not want to 5563 * drop the uffd-wp bit in this zap, then replace the 5564 * pte with a marker. 5565 */ 5566 if (pte_swp_uffd_wp_any(pte) && 5567 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5568 set_huge_pte_at(mm, address, ptep, 5569 make_pte_marker(PTE_MARKER_UFFD_WP), 5570 sz); 5571 else 5572 huge_pte_clear(mm, address, ptep, sz); 5573 spin_unlock(ptl); 5574 continue; 5575 } 5576 5577 page = pte_page(pte); 5578 /* 5579 * If a reference page is supplied, it is because a specific 5580 * page is being unmapped, not a range. Ensure the page we 5581 * are about to unmap is the actual page of interest. 5582 */ 5583 if (ref_page) { 5584 if (page != ref_page) { 5585 spin_unlock(ptl); 5586 continue; 5587 } 5588 /* 5589 * Mark the VMA as having unmapped its page so that 5590 * future faults in this VMA will fail rather than 5591 * looking like data was lost 5592 */ 5593 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5594 } 5595 5596 pte = huge_ptep_get_and_clear(mm, address, ptep); 5597 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5598 if (huge_pte_dirty(pte)) 5599 set_page_dirty(page); 5600 /* Leave a uffd-wp pte marker if needed */ 5601 if (huge_pte_uffd_wp(pte) && 5602 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5603 set_huge_pte_at(mm, address, ptep, 5604 make_pte_marker(PTE_MARKER_UFFD_WP), 5605 sz); 5606 hugetlb_count_sub(pages_per_huge_page(h), mm); 5607 hugetlb_remove_rmap(page_folio(page)); 5608 5609 /* 5610 * Restore the reservation for anonymous page, otherwise the 5611 * backing page could be stolen by someone. 5612 * If there we are freeing a surplus, do not set the restore 5613 * reservation bit. 5614 */ 5615 if (!h->surplus_huge_pages && __vma_private_lock(vma) && 5616 folio_test_anon(page_folio(page))) { 5617 folio_set_hugetlb_restore_reserve(page_folio(page)); 5618 /* Reservation to be adjusted after the spin lock */ 5619 adjust_reservation = true; 5620 } 5621 5622 spin_unlock(ptl); 5623 5624 /* 5625 * Adjust the reservation for the region that will have the 5626 * reserve restored. Keep in mind that vma_needs_reservation() changes 5627 * resv->adds_in_progress if it succeeds. If this is not done, 5628 * do_exit() will not see it, and will keep the reservation 5629 * forever. 5630 */ 5631 if (adjust_reservation) { 5632 int rc = vma_needs_reservation(h, vma, address); 5633 5634 if (rc < 0) 5635 /* Pressumably allocate_file_region_entries failed 5636 * to allocate a file_region struct. Clear 5637 * hugetlb_restore_reserve so that global reserve 5638 * count will not be incremented by free_huge_folio. 5639 * Act as if we consumed the reservation. 5640 */ 5641 folio_clear_hugetlb_restore_reserve(page_folio(page)); 5642 else if (rc) 5643 vma_add_reservation(h, vma, address); 5644 } 5645 5646 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5647 /* 5648 * Bail out after unmapping reference page if supplied 5649 */ 5650 if (ref_page) 5651 break; 5652 } 5653 tlb_end_vma(tlb, vma); 5654 5655 /* 5656 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5657 * could defer the flush until now, since by holding i_mmap_rwsem we 5658 * guaranteed that the last refernece would not be dropped. But we must 5659 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5660 * dropped and the last reference to the shared PMDs page might be 5661 * dropped as well. 5662 * 5663 * In theory we could defer the freeing of the PMD pages as well, but 5664 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5665 * detect sharing, so we cannot defer the release of the page either. 5666 * Instead, do flush now. 5667 */ 5668 if (force_flush) 5669 tlb_flush_mmu_tlbonly(tlb); 5670 } 5671 5672 void __hugetlb_zap_begin(struct vm_area_struct *vma, 5673 unsigned long *start, unsigned long *end) 5674 { 5675 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5676 return; 5677 5678 adjust_range_if_pmd_sharing_possible(vma, start, end); 5679 hugetlb_vma_lock_write(vma); 5680 if (vma->vm_file) 5681 i_mmap_lock_write(vma->vm_file->f_mapping); 5682 } 5683 5684 void __hugetlb_zap_end(struct vm_area_struct *vma, 5685 struct zap_details *details) 5686 { 5687 zap_flags_t zap_flags = details ? details->zap_flags : 0; 5688 5689 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5690 return; 5691 5692 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5693 /* 5694 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5695 * When the vma_lock is freed, this makes the vma ineligible 5696 * for pmd sharing. And, i_mmap_rwsem is required to set up 5697 * pmd sharing. This is important as page tables for this 5698 * unmapped range will be asynchrously deleted. If the page 5699 * tables are shared, there will be issues when accessed by 5700 * someone else. 5701 */ 5702 __hugetlb_vma_unlock_write_free(vma); 5703 } else { 5704 hugetlb_vma_unlock_write(vma); 5705 } 5706 5707 if (vma->vm_file) 5708 i_mmap_unlock_write(vma->vm_file->f_mapping); 5709 } 5710 5711 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5712 unsigned long end, struct page *ref_page, 5713 zap_flags_t zap_flags) 5714 { 5715 struct mmu_notifier_range range; 5716 struct mmu_gather tlb; 5717 5718 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5719 start, end); 5720 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5721 mmu_notifier_invalidate_range_start(&range); 5722 tlb_gather_mmu(&tlb, vma->vm_mm); 5723 5724 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5725 5726 mmu_notifier_invalidate_range_end(&range); 5727 tlb_finish_mmu(&tlb); 5728 } 5729 5730 /* 5731 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5732 * mapping it owns the reserve page for. The intention is to unmap the page 5733 * from other VMAs and let the children be SIGKILLed if they are faulting the 5734 * same region. 5735 */ 5736 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5737 struct page *page, unsigned long address) 5738 { 5739 struct hstate *h = hstate_vma(vma); 5740 struct vm_area_struct *iter_vma; 5741 struct address_space *mapping; 5742 pgoff_t pgoff; 5743 5744 /* 5745 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5746 * from page cache lookup which is in HPAGE_SIZE units. 5747 */ 5748 address = address & huge_page_mask(h); 5749 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5750 vma->vm_pgoff; 5751 mapping = vma->vm_file->f_mapping; 5752 5753 /* 5754 * Take the mapping lock for the duration of the table walk. As 5755 * this mapping should be shared between all the VMAs, 5756 * __unmap_hugepage_range() is called as the lock is already held 5757 */ 5758 i_mmap_lock_write(mapping); 5759 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5760 /* Do not unmap the current VMA */ 5761 if (iter_vma == vma) 5762 continue; 5763 5764 /* 5765 * Shared VMAs have their own reserves and do not affect 5766 * MAP_PRIVATE accounting but it is possible that a shared 5767 * VMA is using the same page so check and skip such VMAs. 5768 */ 5769 if (iter_vma->vm_flags & VM_MAYSHARE) 5770 continue; 5771 5772 /* 5773 * Unmap the page from other VMAs without their own reserves. 5774 * They get marked to be SIGKILLed if they fault in these 5775 * areas. This is because a future no-page fault on this VMA 5776 * could insert a zeroed page instead of the data existing 5777 * from the time of fork. This would look like data corruption 5778 */ 5779 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5780 unmap_hugepage_range(iter_vma, address, 5781 address + huge_page_size(h), page, 0); 5782 } 5783 i_mmap_unlock_write(mapping); 5784 } 5785 5786 /* 5787 * hugetlb_wp() should be called with page lock of the original hugepage held. 5788 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5789 * cannot race with other handlers or page migration. 5790 * Keep the pte_same checks anyway to make transition from the mutex easier. 5791 */ 5792 static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 5793 struct vm_fault *vmf) 5794 { 5795 struct vm_area_struct *vma = vmf->vma; 5796 struct mm_struct *mm = vma->vm_mm; 5797 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5798 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); 5799 struct hstate *h = hstate_vma(vma); 5800 struct folio *old_folio; 5801 struct folio *new_folio; 5802 int outside_reserve = 0; 5803 vm_fault_t ret = 0; 5804 struct mmu_notifier_range range; 5805 5806 /* 5807 * Never handle CoW for uffd-wp protected pages. It should be only 5808 * handled when the uffd-wp protection is removed. 5809 * 5810 * Note that only the CoW optimization path (in hugetlb_no_page()) 5811 * can trigger this, because hugetlb_fault() will always resolve 5812 * uffd-wp bit first. 5813 */ 5814 if (!unshare && huge_pte_uffd_wp(pte)) 5815 return 0; 5816 5817 /* 5818 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5819 * PTE mapped R/O such as maybe_mkwrite() would do. 5820 */ 5821 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5822 return VM_FAULT_SIGSEGV; 5823 5824 /* Let's take out MAP_SHARED mappings first. */ 5825 if (vma->vm_flags & VM_MAYSHARE) { 5826 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5827 return 0; 5828 } 5829 5830 old_folio = page_folio(pte_page(pte)); 5831 5832 delayacct_wpcopy_start(); 5833 5834 retry_avoidcopy: 5835 /* 5836 * If no-one else is actually using this page, we're the exclusive 5837 * owner and can reuse this page. 5838 * 5839 * Note that we don't rely on the (safer) folio refcount here, because 5840 * copying the hugetlb folio when there are unexpected (temporary) 5841 * folio references could harm simple fork()+exit() users when 5842 * we run out of free hugetlb folios: we would have to kill processes 5843 * in scenarios that used to work. As a side effect, there can still 5844 * be leaks between processes, for example, with FOLL_GET users. 5845 */ 5846 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 5847 if (!PageAnonExclusive(&old_folio->page)) { 5848 folio_move_anon_rmap(old_folio, vma); 5849 SetPageAnonExclusive(&old_folio->page); 5850 } 5851 if (likely(!unshare)) 5852 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5853 5854 delayacct_wpcopy_end(); 5855 return 0; 5856 } 5857 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 5858 PageAnonExclusive(&old_folio->page), &old_folio->page); 5859 5860 /* 5861 * If the process that created a MAP_PRIVATE mapping is about to 5862 * perform a COW due to a shared page count, attempt to satisfy 5863 * the allocation without using the existing reserves. The pagecache 5864 * page is used to determine if the reserve at this address was 5865 * consumed or not. If reserves were used, a partial faulted mapping 5866 * at the time of fork() could consume its reserves on COW instead 5867 * of the full address range. 5868 */ 5869 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5870 old_folio != pagecache_folio) 5871 outside_reserve = 1; 5872 5873 folio_get(old_folio); 5874 5875 /* 5876 * Drop page table lock as buddy allocator may be called. It will 5877 * be acquired again before returning to the caller, as expected. 5878 */ 5879 spin_unlock(vmf->ptl); 5880 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); 5881 5882 if (IS_ERR(new_folio)) { 5883 /* 5884 * If a process owning a MAP_PRIVATE mapping fails to COW, 5885 * it is due to references held by a child and an insufficient 5886 * huge page pool. To guarantee the original mappers 5887 * reliability, unmap the page from child processes. The child 5888 * may get SIGKILLed if it later faults. 5889 */ 5890 if (outside_reserve) { 5891 struct address_space *mapping = vma->vm_file->f_mapping; 5892 pgoff_t idx; 5893 u32 hash; 5894 5895 folio_put(old_folio); 5896 /* 5897 * Drop hugetlb_fault_mutex and vma_lock before 5898 * unmapping. unmapping needs to hold vma_lock 5899 * in write mode. Dropping vma_lock in read mode 5900 * here is OK as COW mappings do not interact with 5901 * PMD sharing. 5902 * 5903 * Reacquire both after unmap operation. 5904 */ 5905 idx = vma_hugecache_offset(h, vma, vmf->address); 5906 hash = hugetlb_fault_mutex_hash(mapping, idx); 5907 hugetlb_vma_unlock_read(vma); 5908 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5909 5910 unmap_ref_private(mm, vma, &old_folio->page, 5911 vmf->address); 5912 5913 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5914 hugetlb_vma_lock_read(vma); 5915 spin_lock(vmf->ptl); 5916 vmf->pte = hugetlb_walk(vma, vmf->address, 5917 huge_page_size(h)); 5918 if (likely(vmf->pte && 5919 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) 5920 goto retry_avoidcopy; 5921 /* 5922 * race occurs while re-acquiring page table 5923 * lock, and our job is done. 5924 */ 5925 delayacct_wpcopy_end(); 5926 return 0; 5927 } 5928 5929 ret = vmf_error(PTR_ERR(new_folio)); 5930 goto out_release_old; 5931 } 5932 5933 /* 5934 * When the original hugepage is shared one, it does not have 5935 * anon_vma prepared. 5936 */ 5937 ret = __vmf_anon_prepare(vmf); 5938 if (unlikely(ret)) 5939 goto out_release_all; 5940 5941 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 5942 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); 5943 goto out_release_all; 5944 } 5945 __folio_mark_uptodate(new_folio); 5946 5947 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 5948 vmf->address + huge_page_size(h)); 5949 mmu_notifier_invalidate_range_start(&range); 5950 5951 /* 5952 * Retake the page table lock to check for racing updates 5953 * before the page tables are altered 5954 */ 5955 spin_lock(vmf->ptl); 5956 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 5957 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { 5958 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 5959 5960 /* Break COW or unshare */ 5961 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 5962 hugetlb_remove_rmap(old_folio); 5963 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 5964 if (huge_pte_uffd_wp(pte)) 5965 newpte = huge_pte_mkuffd_wp(newpte); 5966 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 5967 huge_page_size(h)); 5968 folio_set_hugetlb_migratable(new_folio); 5969 /* Make the old page be freed below */ 5970 new_folio = old_folio; 5971 } 5972 spin_unlock(vmf->ptl); 5973 mmu_notifier_invalidate_range_end(&range); 5974 out_release_all: 5975 /* 5976 * No restore in case of successful pagetable update (Break COW or 5977 * unshare) 5978 */ 5979 if (new_folio != old_folio) 5980 restore_reserve_on_error(h, vma, vmf->address, new_folio); 5981 folio_put(new_folio); 5982 out_release_old: 5983 folio_put(old_folio); 5984 5985 spin_lock(vmf->ptl); /* Caller expects lock to be held */ 5986 5987 delayacct_wpcopy_end(); 5988 return ret; 5989 } 5990 5991 /* 5992 * Return whether there is a pagecache page to back given address within VMA. 5993 */ 5994 bool hugetlbfs_pagecache_present(struct hstate *h, 5995 struct vm_area_struct *vma, unsigned long address) 5996 { 5997 struct address_space *mapping = vma->vm_file->f_mapping; 5998 pgoff_t idx = linear_page_index(vma, address); 5999 struct folio *folio; 6000 6001 folio = filemap_get_folio(mapping, idx); 6002 if (IS_ERR(folio)) 6003 return false; 6004 folio_put(folio); 6005 return true; 6006 } 6007 6008 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 6009 pgoff_t idx) 6010 { 6011 struct inode *inode = mapping->host; 6012 struct hstate *h = hstate_inode(inode); 6013 int err; 6014 6015 idx <<= huge_page_order(h); 6016 __folio_set_locked(folio); 6017 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 6018 6019 if (unlikely(err)) { 6020 __folio_clear_locked(folio); 6021 return err; 6022 } 6023 folio_clear_hugetlb_restore_reserve(folio); 6024 6025 /* 6026 * mark folio dirty so that it will not be removed from cache/file 6027 * by non-hugetlbfs specific code paths. 6028 */ 6029 folio_mark_dirty(folio); 6030 6031 spin_lock(&inode->i_lock); 6032 inode->i_blocks += blocks_per_huge_page(h); 6033 spin_unlock(&inode->i_lock); 6034 return 0; 6035 } 6036 6037 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, 6038 struct address_space *mapping, 6039 unsigned long reason) 6040 { 6041 u32 hash; 6042 6043 /* 6044 * vma_lock and hugetlb_fault_mutex must be dropped before handling 6045 * userfault. Also mmap_lock could be dropped due to handling 6046 * userfault, any vma operation should be careful from here. 6047 */ 6048 hugetlb_vma_unlock_read(vmf->vma); 6049 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6050 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6051 return handle_userfault(vmf, reason); 6052 } 6053 6054 /* 6055 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 6056 * false if pte changed or is changing. 6057 */ 6058 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, 6059 pte_t *ptep, pte_t old_pte) 6060 { 6061 spinlock_t *ptl; 6062 bool same; 6063 6064 ptl = huge_pte_lock(h, mm, ptep); 6065 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); 6066 spin_unlock(ptl); 6067 6068 return same; 6069 } 6070 6071 static vm_fault_t hugetlb_no_page(struct address_space *mapping, 6072 struct vm_fault *vmf) 6073 { 6074 struct vm_area_struct *vma = vmf->vma; 6075 struct mm_struct *mm = vma->vm_mm; 6076 struct hstate *h = hstate_vma(vma); 6077 vm_fault_t ret = VM_FAULT_SIGBUS; 6078 int anon_rmap = 0; 6079 unsigned long size; 6080 struct folio *folio; 6081 pte_t new_pte; 6082 bool new_folio, new_pagecache_folio = false; 6083 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6084 6085 /* 6086 * Currently, we are forced to kill the process in the event the 6087 * original mapper has unmapped pages from the child due to a failed 6088 * COW/unsharing. Warn that such a situation has occurred as it may not 6089 * be obvious. 6090 */ 6091 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 6092 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 6093 current->pid); 6094 goto out; 6095 } 6096 6097 /* 6098 * Use page lock to guard against racing truncation 6099 * before we get page_table_lock. 6100 */ 6101 new_folio = false; 6102 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); 6103 if (IS_ERR(folio)) { 6104 size = i_size_read(mapping->host) >> huge_page_shift(h); 6105 if (vmf->pgoff >= size) 6106 goto out; 6107 /* Check for page in userfault range */ 6108 if (userfaultfd_missing(vma)) { 6109 /* 6110 * Since hugetlb_no_page() was examining pte 6111 * without pgtable lock, we need to re-test under 6112 * lock because the pte may not be stable and could 6113 * have changed from under us. Try to detect 6114 * either changed or during-changing ptes and retry 6115 * properly when needed. 6116 * 6117 * Note that userfaultfd is actually fine with 6118 * false positives (e.g. caused by pte changed), 6119 * but not wrong logical events (e.g. caused by 6120 * reading a pte during changing). The latter can 6121 * confuse the userspace, so the strictness is very 6122 * much preferred. E.g., MISSING event should 6123 * never happen on the page after UFFDIO_COPY has 6124 * correctly installed the page and returned. 6125 */ 6126 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6127 ret = 0; 6128 goto out; 6129 } 6130 6131 return hugetlb_handle_userfault(vmf, mapping, 6132 VM_UFFD_MISSING); 6133 } 6134 6135 if (!(vma->vm_flags & VM_MAYSHARE)) { 6136 ret = __vmf_anon_prepare(vmf); 6137 if (unlikely(ret)) 6138 goto out; 6139 } 6140 6141 folio = alloc_hugetlb_folio(vma, vmf->address, 0); 6142 if (IS_ERR(folio)) { 6143 /* 6144 * Returning error will result in faulting task being 6145 * sent SIGBUS. The hugetlb fault mutex prevents two 6146 * tasks from racing to fault in the same page which 6147 * could result in false unable to allocate errors. 6148 * Page migration does not take the fault mutex, but 6149 * does a clear then write of pte's under page table 6150 * lock. Page fault code could race with migration, 6151 * notice the clear pte and try to allocate a page 6152 * here. Before returning error, get ptl and make 6153 * sure there really is no pte entry. 6154 */ 6155 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) 6156 ret = vmf_error(PTR_ERR(folio)); 6157 else 6158 ret = 0; 6159 goto out; 6160 } 6161 folio_zero_user(folio, vmf->real_address); 6162 __folio_mark_uptodate(folio); 6163 new_folio = true; 6164 6165 if (vma->vm_flags & VM_MAYSHARE) { 6166 int err = hugetlb_add_to_page_cache(folio, mapping, 6167 vmf->pgoff); 6168 if (err) { 6169 /* 6170 * err can't be -EEXIST which implies someone 6171 * else consumed the reservation since hugetlb 6172 * fault mutex is held when add a hugetlb page 6173 * to the page cache. So it's safe to call 6174 * restore_reserve_on_error() here. 6175 */ 6176 restore_reserve_on_error(h, vma, vmf->address, 6177 folio); 6178 folio_put(folio); 6179 ret = VM_FAULT_SIGBUS; 6180 goto out; 6181 } 6182 new_pagecache_folio = true; 6183 } else { 6184 folio_lock(folio); 6185 anon_rmap = 1; 6186 } 6187 } else { 6188 /* 6189 * If memory error occurs between mmap() and fault, some process 6190 * don't have hwpoisoned swap entry for errored virtual address. 6191 * So we need to block hugepage fault by PG_hwpoison bit check. 6192 */ 6193 if (unlikely(folio_test_hwpoison(folio))) { 6194 ret = VM_FAULT_HWPOISON_LARGE | 6195 VM_FAULT_SET_HINDEX(hstate_index(h)); 6196 goto backout_unlocked; 6197 } 6198 6199 /* Check for page in userfault range. */ 6200 if (userfaultfd_minor(vma)) { 6201 folio_unlock(folio); 6202 folio_put(folio); 6203 /* See comment in userfaultfd_missing() block above */ 6204 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6205 ret = 0; 6206 goto out; 6207 } 6208 return hugetlb_handle_userfault(vmf, mapping, 6209 VM_UFFD_MINOR); 6210 } 6211 } 6212 6213 /* 6214 * If we are going to COW a private mapping later, we examine the 6215 * pending reservations for this page now. This will ensure that 6216 * any allocations necessary to record that reservation occur outside 6217 * the spinlock. 6218 */ 6219 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6220 if (vma_needs_reservation(h, vma, vmf->address) < 0) { 6221 ret = VM_FAULT_OOM; 6222 goto backout_unlocked; 6223 } 6224 /* Just decrements count, does not deallocate */ 6225 vma_end_reservation(h, vma, vmf->address); 6226 } 6227 6228 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); 6229 ret = 0; 6230 /* If pte changed from under us, retry */ 6231 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) 6232 goto backout; 6233 6234 if (anon_rmap) 6235 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); 6236 else 6237 hugetlb_add_file_rmap(folio); 6238 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) 6239 && (vma->vm_flags & VM_SHARED))); 6240 /* 6241 * If this pte was previously wr-protected, keep it wr-protected even 6242 * if populated. 6243 */ 6244 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) 6245 new_pte = huge_pte_mkuffd_wp(new_pte); 6246 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); 6247 6248 hugetlb_count_add(pages_per_huge_page(h), mm); 6249 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6250 /* Optimization, do the COW without a second fault */ 6251 ret = hugetlb_wp(folio, vmf); 6252 } 6253 6254 spin_unlock(vmf->ptl); 6255 6256 /* 6257 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6258 * found in the pagecache may not have hugetlb_migratable if they have 6259 * been isolated for migration. 6260 */ 6261 if (new_folio) 6262 folio_set_hugetlb_migratable(folio); 6263 6264 folio_unlock(folio); 6265 out: 6266 hugetlb_vma_unlock_read(vma); 6267 6268 /* 6269 * We must check to release the per-VMA lock. __vmf_anon_prepare() is 6270 * the only way ret can be set to VM_FAULT_RETRY. 6271 */ 6272 if (unlikely(ret & VM_FAULT_RETRY)) 6273 vma_end_read(vma); 6274 6275 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6276 return ret; 6277 6278 backout: 6279 spin_unlock(vmf->ptl); 6280 backout_unlocked: 6281 if (new_folio && !new_pagecache_folio) 6282 restore_reserve_on_error(h, vma, vmf->address, folio); 6283 6284 folio_unlock(folio); 6285 folio_put(folio); 6286 goto out; 6287 } 6288 6289 #ifdef CONFIG_SMP 6290 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6291 { 6292 unsigned long key[2]; 6293 u32 hash; 6294 6295 key[0] = (unsigned long) mapping; 6296 key[1] = idx; 6297 6298 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6299 6300 return hash & (num_fault_mutexes - 1); 6301 } 6302 #else 6303 /* 6304 * For uniprocessor systems we always use a single mutex, so just 6305 * return 0 and avoid the hashing overhead. 6306 */ 6307 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6308 { 6309 return 0; 6310 } 6311 #endif 6312 6313 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6314 unsigned long address, unsigned int flags) 6315 { 6316 vm_fault_t ret; 6317 u32 hash; 6318 struct folio *folio = NULL; 6319 struct folio *pagecache_folio = NULL; 6320 struct hstate *h = hstate_vma(vma); 6321 struct address_space *mapping; 6322 int need_wait_lock = 0; 6323 struct vm_fault vmf = { 6324 .vma = vma, 6325 .address = address & huge_page_mask(h), 6326 .real_address = address, 6327 .flags = flags, 6328 .pgoff = vma_hugecache_offset(h, vma, 6329 address & huge_page_mask(h)), 6330 /* TODO: Track hugetlb faults using vm_fault */ 6331 6332 /* 6333 * Some fields may not be initialized, be careful as it may 6334 * be hard to debug if called functions make assumptions 6335 */ 6336 }; 6337 6338 /* 6339 * Serialize hugepage allocation and instantiation, so that we don't 6340 * get spurious allocation failures if two CPUs race to instantiate 6341 * the same page in the page cache. 6342 */ 6343 mapping = vma->vm_file->f_mapping; 6344 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); 6345 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6346 6347 /* 6348 * Acquire vma lock before calling huge_pte_alloc and hold 6349 * until finished with vmf.pte. This prevents huge_pmd_unshare from 6350 * being called elsewhere and making the vmf.pte no longer valid. 6351 */ 6352 hugetlb_vma_lock_read(vma); 6353 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); 6354 if (!vmf.pte) { 6355 hugetlb_vma_unlock_read(vma); 6356 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6357 return VM_FAULT_OOM; 6358 } 6359 6360 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); 6361 if (huge_pte_none_mostly(vmf.orig_pte)) { 6362 if (is_pte_marker(vmf.orig_pte)) { 6363 pte_marker marker = 6364 pte_marker_get(pte_to_swp_entry(vmf.orig_pte)); 6365 6366 if (marker & PTE_MARKER_POISONED) { 6367 ret = VM_FAULT_HWPOISON_LARGE | 6368 VM_FAULT_SET_HINDEX(hstate_index(h)); 6369 goto out_mutex; 6370 } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) { 6371 /* This isn't supported in hugetlb. */ 6372 ret = VM_FAULT_SIGSEGV; 6373 goto out_mutex; 6374 } 6375 } 6376 6377 /* 6378 * Other PTE markers should be handled the same way as none PTE. 6379 * 6380 * hugetlb_no_page will drop vma lock and hugetlb fault 6381 * mutex internally, which make us return immediately. 6382 */ 6383 return hugetlb_no_page(mapping, &vmf); 6384 } 6385 6386 ret = 0; 6387 6388 /* 6389 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this 6390 * point, so this check prevents the kernel from going below assuming 6391 * that we have an active hugepage in pagecache. This goto expects 6392 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned) 6393 * check will properly handle it. 6394 */ 6395 if (!pte_present(vmf.orig_pte)) { 6396 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) { 6397 /* 6398 * Release the hugetlb fault lock now, but retain 6399 * the vma lock, because it is needed to guard the 6400 * huge_pte_lockptr() later in 6401 * migration_entry_wait_huge(). The vma lock will 6402 * be released there. 6403 */ 6404 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6405 migration_entry_wait_huge(vma, vmf.address, vmf.pte); 6406 return 0; 6407 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte))) 6408 ret = VM_FAULT_HWPOISON_LARGE | 6409 VM_FAULT_SET_HINDEX(hstate_index(h)); 6410 goto out_mutex; 6411 } 6412 6413 /* 6414 * If we are going to COW/unshare the mapping later, we examine the 6415 * pending reservations for this page now. This will ensure that any 6416 * allocations necessary to record that reservation occur outside the 6417 * spinlock. Also lookup the pagecache page now as it is used to 6418 * determine if a reservation has been consumed. 6419 */ 6420 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6421 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { 6422 if (vma_needs_reservation(h, vma, vmf.address) < 0) { 6423 ret = VM_FAULT_OOM; 6424 goto out_mutex; 6425 } 6426 /* Just decrements count, does not deallocate */ 6427 vma_end_reservation(h, vma, vmf.address); 6428 6429 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, 6430 vmf.pgoff); 6431 if (IS_ERR(pagecache_folio)) 6432 pagecache_folio = NULL; 6433 } 6434 6435 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); 6436 6437 /* Check for a racing update before calling hugetlb_wp() */ 6438 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) 6439 goto out_ptl; 6440 6441 /* Handle userfault-wp first, before trying to lock more pages */ 6442 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && 6443 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { 6444 if (!userfaultfd_wp_async(vma)) { 6445 spin_unlock(vmf.ptl); 6446 if (pagecache_folio) { 6447 folio_unlock(pagecache_folio); 6448 folio_put(pagecache_folio); 6449 } 6450 hugetlb_vma_unlock_read(vma); 6451 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6452 return handle_userfault(&vmf, VM_UFFD_WP); 6453 } 6454 6455 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); 6456 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, 6457 huge_page_size(hstate_vma(vma))); 6458 /* Fallthrough to CoW */ 6459 } 6460 6461 /* 6462 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and 6463 * pagecache_folio, so here we need take the former one 6464 * when folio != pagecache_folio or !pagecache_folio. 6465 */ 6466 folio = page_folio(pte_page(vmf.orig_pte)); 6467 if (folio != pagecache_folio) 6468 if (!folio_trylock(folio)) { 6469 need_wait_lock = 1; 6470 goto out_ptl; 6471 } 6472 6473 folio_get(folio); 6474 6475 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6476 if (!huge_pte_write(vmf.orig_pte)) { 6477 ret = hugetlb_wp(pagecache_folio, &vmf); 6478 goto out_put_page; 6479 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6480 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); 6481 } 6482 } 6483 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); 6484 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, 6485 flags & FAULT_FLAG_WRITE)) 6486 update_mmu_cache(vma, vmf.address, vmf.pte); 6487 out_put_page: 6488 if (folio != pagecache_folio) 6489 folio_unlock(folio); 6490 folio_put(folio); 6491 out_ptl: 6492 spin_unlock(vmf.ptl); 6493 6494 if (pagecache_folio) { 6495 folio_unlock(pagecache_folio); 6496 folio_put(pagecache_folio); 6497 } 6498 out_mutex: 6499 hugetlb_vma_unlock_read(vma); 6500 6501 /* 6502 * We must check to release the per-VMA lock. __vmf_anon_prepare() in 6503 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. 6504 */ 6505 if (unlikely(ret & VM_FAULT_RETRY)) 6506 vma_end_read(vma); 6507 6508 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6509 /* 6510 * Generally it's safe to hold refcount during waiting page lock. But 6511 * here we just wait to defer the next page fault to avoid busy loop and 6512 * the page is not used after unlocked before returning from the current 6513 * page fault. So we are safe from accessing freed page, even if we wait 6514 * here without taking refcount. 6515 */ 6516 if (need_wait_lock) 6517 folio_wait_locked(folio); 6518 return ret; 6519 } 6520 6521 #ifdef CONFIG_USERFAULTFD 6522 /* 6523 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). 6524 */ 6525 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 6526 struct vm_area_struct *vma, unsigned long address) 6527 { 6528 struct mempolicy *mpol; 6529 nodemask_t *nodemask; 6530 struct folio *folio; 6531 gfp_t gfp_mask; 6532 int node; 6533 6534 gfp_mask = htlb_alloc_mask(h); 6535 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 6536 /* 6537 * This is used to allocate a temporary hugetlb to hold the copied 6538 * content, which will then be copied again to the final hugetlb 6539 * consuming a reservation. Set the alloc_fallback to false to indicate 6540 * that breaking the per-node hugetlb pool is not allowed in this case. 6541 */ 6542 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); 6543 mpol_cond_put(mpol); 6544 6545 return folio; 6546 } 6547 6548 /* 6549 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6550 * with modifications for hugetlb pages. 6551 */ 6552 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6553 struct vm_area_struct *dst_vma, 6554 unsigned long dst_addr, 6555 unsigned long src_addr, 6556 uffd_flags_t flags, 6557 struct folio **foliop) 6558 { 6559 struct mm_struct *dst_mm = dst_vma->vm_mm; 6560 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6561 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6562 struct hstate *h = hstate_vma(dst_vma); 6563 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6564 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6565 unsigned long size = huge_page_size(h); 6566 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6567 pte_t _dst_pte; 6568 spinlock_t *ptl; 6569 int ret = -ENOMEM; 6570 struct folio *folio; 6571 int writable; 6572 bool folio_in_pagecache = false; 6573 6574 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6575 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6576 6577 /* Don't overwrite any existing PTEs (even markers) */ 6578 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 6579 spin_unlock(ptl); 6580 return -EEXIST; 6581 } 6582 6583 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6584 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6585 6586 /* No need to invalidate - it was non-present before */ 6587 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6588 6589 spin_unlock(ptl); 6590 return 0; 6591 } 6592 6593 if (is_continue) { 6594 ret = -EFAULT; 6595 folio = filemap_lock_hugetlb_folio(h, mapping, idx); 6596 if (IS_ERR(folio)) 6597 goto out; 6598 folio_in_pagecache = true; 6599 } else if (!*foliop) { 6600 /* If a folio already exists, then it's UFFDIO_COPY for 6601 * a non-missing case. Return -EEXIST. 6602 */ 6603 if (vm_shared && 6604 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6605 ret = -EEXIST; 6606 goto out; 6607 } 6608 6609 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6610 if (IS_ERR(folio)) { 6611 ret = -ENOMEM; 6612 goto out; 6613 } 6614 6615 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6616 false); 6617 6618 /* fallback to copy_from_user outside mmap_lock */ 6619 if (unlikely(ret)) { 6620 ret = -ENOENT; 6621 /* Free the allocated folio which may have 6622 * consumed a reservation. 6623 */ 6624 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6625 folio_put(folio); 6626 6627 /* Allocate a temporary folio to hold the copied 6628 * contents. 6629 */ 6630 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6631 if (!folio) { 6632 ret = -ENOMEM; 6633 goto out; 6634 } 6635 *foliop = folio; 6636 /* Set the outparam foliop and return to the caller to 6637 * copy the contents outside the lock. Don't free the 6638 * folio. 6639 */ 6640 goto out; 6641 } 6642 } else { 6643 if (vm_shared && 6644 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6645 folio_put(*foliop); 6646 ret = -EEXIST; 6647 *foliop = NULL; 6648 goto out; 6649 } 6650 6651 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6652 if (IS_ERR(folio)) { 6653 folio_put(*foliop); 6654 ret = -ENOMEM; 6655 *foliop = NULL; 6656 goto out; 6657 } 6658 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6659 folio_put(*foliop); 6660 *foliop = NULL; 6661 if (ret) { 6662 folio_put(folio); 6663 goto out; 6664 } 6665 } 6666 6667 /* 6668 * If we just allocated a new page, we need a memory barrier to ensure 6669 * that preceding stores to the page become visible before the 6670 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate 6671 * is what we need. 6672 * 6673 * In the case where we have not allocated a new page (is_continue), 6674 * the page must already be uptodate. UFFDIO_CONTINUE already includes 6675 * an earlier smp_wmb() to ensure that prior stores will be visible 6676 * before the set_pte_at() write. 6677 */ 6678 if (!is_continue) 6679 __folio_mark_uptodate(folio); 6680 else 6681 WARN_ON_ONCE(!folio_test_uptodate(folio)); 6682 6683 /* Add shared, newly allocated pages to the page cache. */ 6684 if (vm_shared && !is_continue) { 6685 ret = -EFAULT; 6686 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) 6687 goto out_release_nounlock; 6688 6689 /* 6690 * Serialization between remove_inode_hugepages() and 6691 * hugetlb_add_to_page_cache() below happens through the 6692 * hugetlb_fault_mutex_table that here must be hold by 6693 * the caller. 6694 */ 6695 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6696 if (ret) 6697 goto out_release_nounlock; 6698 folio_in_pagecache = true; 6699 } 6700 6701 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6702 6703 ret = -EIO; 6704 if (folio_test_hwpoison(folio)) 6705 goto out_release_unlock; 6706 6707 /* 6708 * We allow to overwrite a pte marker: consider when both MISSING|WP 6709 * registered, we firstly wr-protect a none pte which has no page cache 6710 * page backing it, then access the page. 6711 */ 6712 ret = -EEXIST; 6713 if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) 6714 goto out_release_unlock; 6715 6716 if (folio_in_pagecache) 6717 hugetlb_add_file_rmap(folio); 6718 else 6719 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); 6720 6721 /* 6722 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6723 * with wp flag set, don't set pte write bit. 6724 */ 6725 if (wp_enabled || (is_continue && !vm_shared)) 6726 writable = 0; 6727 else 6728 writable = dst_vma->vm_flags & VM_WRITE; 6729 6730 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); 6731 /* 6732 * Always mark UFFDIO_COPY page dirty; note that this may not be 6733 * extremely important for hugetlbfs for now since swapping is not 6734 * supported, but we should still be clear in that this page cannot be 6735 * thrown away at will, even if write bit not set. 6736 */ 6737 _dst_pte = huge_pte_mkdirty(_dst_pte); 6738 _dst_pte = pte_mkyoung(_dst_pte); 6739 6740 if (wp_enabled) 6741 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6742 6743 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6744 6745 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6746 6747 /* No need to invalidate - it was non-present before */ 6748 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6749 6750 spin_unlock(ptl); 6751 if (!is_continue) 6752 folio_set_hugetlb_migratable(folio); 6753 if (vm_shared || is_continue) 6754 folio_unlock(folio); 6755 ret = 0; 6756 out: 6757 return ret; 6758 out_release_unlock: 6759 spin_unlock(ptl); 6760 if (vm_shared || is_continue) 6761 folio_unlock(folio); 6762 out_release_nounlock: 6763 if (!folio_in_pagecache) 6764 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6765 folio_put(folio); 6766 goto out; 6767 } 6768 #endif /* CONFIG_USERFAULTFD */ 6769 6770 long hugetlb_change_protection(struct vm_area_struct *vma, 6771 unsigned long address, unsigned long end, 6772 pgprot_t newprot, unsigned long cp_flags) 6773 { 6774 struct mm_struct *mm = vma->vm_mm; 6775 unsigned long start = address; 6776 pte_t *ptep; 6777 pte_t pte; 6778 struct hstate *h = hstate_vma(vma); 6779 long pages = 0, psize = huge_page_size(h); 6780 bool shared_pmd = false; 6781 struct mmu_notifier_range range; 6782 unsigned long last_addr_mask; 6783 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6784 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6785 6786 /* 6787 * In the case of shared PMDs, the area to flush could be beyond 6788 * start/end. Set range.start/range.end to cover the maximum possible 6789 * range if PMD sharing is possible. 6790 */ 6791 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6792 0, mm, start, end); 6793 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6794 6795 BUG_ON(address >= end); 6796 flush_cache_range(vma, range.start, range.end); 6797 6798 mmu_notifier_invalidate_range_start(&range); 6799 hugetlb_vma_lock_write(vma); 6800 i_mmap_lock_write(vma->vm_file->f_mapping); 6801 last_addr_mask = hugetlb_mask_last_page(h); 6802 for (; address < end; address += psize) { 6803 spinlock_t *ptl; 6804 ptep = hugetlb_walk(vma, address, psize); 6805 if (!ptep) { 6806 if (!uffd_wp) { 6807 address |= last_addr_mask; 6808 continue; 6809 } 6810 /* 6811 * Userfaultfd wr-protect requires pgtable 6812 * pre-allocations to install pte markers. 6813 */ 6814 ptep = huge_pte_alloc(mm, vma, address, psize); 6815 if (!ptep) { 6816 pages = -ENOMEM; 6817 break; 6818 } 6819 } 6820 ptl = huge_pte_lock(h, mm, ptep); 6821 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6822 /* 6823 * When uffd-wp is enabled on the vma, unshare 6824 * shouldn't happen at all. Warn about it if it 6825 * happened due to some reason. 6826 */ 6827 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6828 pages++; 6829 spin_unlock(ptl); 6830 shared_pmd = true; 6831 address |= last_addr_mask; 6832 continue; 6833 } 6834 pte = huge_ptep_get(mm, address, ptep); 6835 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6836 /* Nothing to do. */ 6837 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6838 swp_entry_t entry = pte_to_swp_entry(pte); 6839 struct page *page = pfn_swap_entry_to_page(entry); 6840 pte_t newpte = pte; 6841 6842 if (is_writable_migration_entry(entry)) { 6843 if (PageAnon(page)) 6844 entry = make_readable_exclusive_migration_entry( 6845 swp_offset(entry)); 6846 else 6847 entry = make_readable_migration_entry( 6848 swp_offset(entry)); 6849 newpte = swp_entry_to_pte(entry); 6850 pages++; 6851 } 6852 6853 if (uffd_wp) 6854 newpte = pte_swp_mkuffd_wp(newpte); 6855 else if (uffd_wp_resolve) 6856 newpte = pte_swp_clear_uffd_wp(newpte); 6857 if (!pte_same(pte, newpte)) 6858 set_huge_pte_at(mm, address, ptep, newpte, psize); 6859 } else if (unlikely(is_pte_marker(pte))) { 6860 /* 6861 * Do nothing on a poison marker; page is 6862 * corrupted, permissons do not apply. Here 6863 * pte_marker_uffd_wp()==true implies !poison 6864 * because they're mutual exclusive. 6865 */ 6866 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve) 6867 /* Safe to modify directly (non-present->none). */ 6868 huge_pte_clear(mm, address, ptep, psize); 6869 } else if (!huge_pte_none(pte)) { 6870 pte_t old_pte; 6871 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6872 6873 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6874 pte = huge_pte_modify(old_pte, newprot); 6875 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6876 if (uffd_wp) 6877 pte = huge_pte_mkuffd_wp(pte); 6878 else if (uffd_wp_resolve) 6879 pte = huge_pte_clear_uffd_wp(pte); 6880 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6881 pages++; 6882 } else { 6883 /* None pte */ 6884 if (unlikely(uffd_wp)) 6885 /* Safe to modify directly (none->non-present). */ 6886 set_huge_pte_at(mm, address, ptep, 6887 make_pte_marker(PTE_MARKER_UFFD_WP), 6888 psize); 6889 } 6890 spin_unlock(ptl); 6891 } 6892 /* 6893 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6894 * may have cleared our pud entry and done put_page on the page table: 6895 * once we release i_mmap_rwsem, another task can do the final put_page 6896 * and that page table be reused and filled with junk. If we actually 6897 * did unshare a page of pmds, flush the range corresponding to the pud. 6898 */ 6899 if (shared_pmd) 6900 flush_hugetlb_tlb_range(vma, range.start, range.end); 6901 else 6902 flush_hugetlb_tlb_range(vma, start, end); 6903 /* 6904 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 6905 * downgrading page table protection not changing it to point to a new 6906 * page. 6907 * 6908 * See Documentation/mm/mmu_notifier.rst 6909 */ 6910 i_mmap_unlock_write(vma->vm_file->f_mapping); 6911 hugetlb_vma_unlock_write(vma); 6912 mmu_notifier_invalidate_range_end(&range); 6913 6914 return pages > 0 ? (pages << h->order) : pages; 6915 } 6916 6917 /* Return true if reservation was successful, false otherwise. */ 6918 bool hugetlb_reserve_pages(struct inode *inode, 6919 long from, long to, 6920 struct vm_area_struct *vma, 6921 vm_flags_t vm_flags) 6922 { 6923 long chg = -1, add = -1; 6924 struct hstate *h = hstate_inode(inode); 6925 struct hugepage_subpool *spool = subpool_inode(inode); 6926 struct resv_map *resv_map; 6927 struct hugetlb_cgroup *h_cg = NULL; 6928 long gbl_reserve, regions_needed = 0; 6929 6930 /* This should never happen */ 6931 if (from > to) { 6932 VM_WARN(1, "%s called with a negative range\n", __func__); 6933 return false; 6934 } 6935 6936 /* 6937 * vma specific semaphore used for pmd sharing and fault/truncation 6938 * synchronization 6939 */ 6940 hugetlb_vma_lock_alloc(vma); 6941 6942 /* 6943 * Only apply hugepage reservation if asked. At fault time, an 6944 * attempt will be made for VM_NORESERVE to allocate a page 6945 * without using reserves 6946 */ 6947 if (vm_flags & VM_NORESERVE) 6948 return true; 6949 6950 /* 6951 * Shared mappings base their reservation on the number of pages that 6952 * are already allocated on behalf of the file. Private mappings need 6953 * to reserve the full area even if read-only as mprotect() may be 6954 * called to make the mapping read-write. Assume !vma is a shm mapping 6955 */ 6956 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6957 /* 6958 * resv_map can not be NULL as hugetlb_reserve_pages is only 6959 * called for inodes for which resv_maps were created (see 6960 * hugetlbfs_get_inode). 6961 */ 6962 resv_map = inode_resv_map(inode); 6963 6964 chg = region_chg(resv_map, from, to, ®ions_needed); 6965 } else { 6966 /* Private mapping. */ 6967 resv_map = resv_map_alloc(); 6968 if (!resv_map) 6969 goto out_err; 6970 6971 chg = to - from; 6972 6973 set_vma_resv_map(vma, resv_map); 6974 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6975 } 6976 6977 if (chg < 0) 6978 goto out_err; 6979 6980 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6981 chg * pages_per_huge_page(h), &h_cg) < 0) 6982 goto out_err; 6983 6984 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6985 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6986 * of the resv_map. 6987 */ 6988 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6989 } 6990 6991 /* 6992 * There must be enough pages in the subpool for the mapping. If 6993 * the subpool has a minimum size, there may be some global 6994 * reservations already in place (gbl_reserve). 6995 */ 6996 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6997 if (gbl_reserve < 0) 6998 goto out_uncharge_cgroup; 6999 7000 /* 7001 * Check enough hugepages are available for the reservation. 7002 * Hand the pages back to the subpool if there are not 7003 */ 7004 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 7005 goto out_put_pages; 7006 7007 /* 7008 * Account for the reservations made. Shared mappings record regions 7009 * that have reservations as they are shared by multiple VMAs. 7010 * When the last VMA disappears, the region map says how much 7011 * the reservation was and the page cache tells how much of 7012 * the reservation was consumed. Private mappings are per-VMA and 7013 * only the consumed reservations are tracked. When the VMA 7014 * disappears, the original reservation is the VMA size and the 7015 * consumed reservations are stored in the map. Hence, nothing 7016 * else has to be done for private mappings here 7017 */ 7018 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7019 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 7020 7021 if (unlikely(add < 0)) { 7022 hugetlb_acct_memory(h, -gbl_reserve); 7023 goto out_put_pages; 7024 } else if (unlikely(chg > add)) { 7025 /* 7026 * pages in this range were added to the reserve 7027 * map between region_chg and region_add. This 7028 * indicates a race with alloc_hugetlb_folio. Adjust 7029 * the subpool and reserve counts modified above 7030 * based on the difference. 7031 */ 7032 long rsv_adjust; 7033 7034 /* 7035 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 7036 * reference to h_cg->css. See comment below for detail. 7037 */ 7038 hugetlb_cgroup_uncharge_cgroup_rsvd( 7039 hstate_index(h), 7040 (chg - add) * pages_per_huge_page(h), h_cg); 7041 7042 rsv_adjust = hugepage_subpool_put_pages(spool, 7043 chg - add); 7044 hugetlb_acct_memory(h, -rsv_adjust); 7045 } else if (h_cg) { 7046 /* 7047 * The file_regions will hold their own reference to 7048 * h_cg->css. So we should release the reference held 7049 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 7050 * done. 7051 */ 7052 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 7053 } 7054 } 7055 return true; 7056 7057 out_put_pages: 7058 /* put back original number of pages, chg */ 7059 (void)hugepage_subpool_put_pages(spool, chg); 7060 out_uncharge_cgroup: 7061 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 7062 chg * pages_per_huge_page(h), h_cg); 7063 out_err: 7064 hugetlb_vma_lock_free(vma); 7065 if (!vma || vma->vm_flags & VM_MAYSHARE) 7066 /* Only call region_abort if the region_chg succeeded but the 7067 * region_add failed or didn't run. 7068 */ 7069 if (chg >= 0 && add < 0) 7070 region_abort(resv_map, from, to, regions_needed); 7071 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 7072 kref_put(&resv_map->refs, resv_map_release); 7073 set_vma_resv_map(vma, NULL); 7074 } 7075 return false; 7076 } 7077 7078 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 7079 long freed) 7080 { 7081 struct hstate *h = hstate_inode(inode); 7082 struct resv_map *resv_map = inode_resv_map(inode); 7083 long chg = 0; 7084 struct hugepage_subpool *spool = subpool_inode(inode); 7085 long gbl_reserve; 7086 7087 /* 7088 * Since this routine can be called in the evict inode path for all 7089 * hugetlbfs inodes, resv_map could be NULL. 7090 */ 7091 if (resv_map) { 7092 chg = region_del(resv_map, start, end); 7093 /* 7094 * region_del() can fail in the rare case where a region 7095 * must be split and another region descriptor can not be 7096 * allocated. If end == LONG_MAX, it will not fail. 7097 */ 7098 if (chg < 0) 7099 return chg; 7100 } 7101 7102 spin_lock(&inode->i_lock); 7103 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 7104 spin_unlock(&inode->i_lock); 7105 7106 /* 7107 * If the subpool has a minimum size, the number of global 7108 * reservations to be released may be adjusted. 7109 * 7110 * Note that !resv_map implies freed == 0. So (chg - freed) 7111 * won't go negative. 7112 */ 7113 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 7114 hugetlb_acct_memory(h, -gbl_reserve); 7115 7116 return 0; 7117 } 7118 7119 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7120 static unsigned long page_table_shareable(struct vm_area_struct *svma, 7121 struct vm_area_struct *vma, 7122 unsigned long addr, pgoff_t idx) 7123 { 7124 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 7125 svma->vm_start; 7126 unsigned long sbase = saddr & PUD_MASK; 7127 unsigned long s_end = sbase + PUD_SIZE; 7128 7129 /* Allow segments to share if only one is marked locked */ 7130 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7131 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7132 7133 /* 7134 * match the virtual addresses, permission and the alignment of the 7135 * page table page. 7136 * 7137 * Also, vma_lock (vm_private_data) is required for sharing. 7138 */ 7139 if (pmd_index(addr) != pmd_index(saddr) || 7140 vm_flags != svm_flags || 7141 !range_in_vma(svma, sbase, s_end) || 7142 !svma->vm_private_data) 7143 return 0; 7144 7145 return saddr; 7146 } 7147 7148 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7149 { 7150 unsigned long start = addr & PUD_MASK; 7151 unsigned long end = start + PUD_SIZE; 7152 7153 #ifdef CONFIG_USERFAULTFD 7154 if (uffd_disable_huge_pmd_share(vma)) 7155 return false; 7156 #endif 7157 /* 7158 * check on proper vm_flags and page table alignment 7159 */ 7160 if (!(vma->vm_flags & VM_MAYSHARE)) 7161 return false; 7162 if (!vma->vm_private_data) /* vma lock required for sharing */ 7163 return false; 7164 if (!range_in_vma(vma, start, end)) 7165 return false; 7166 return true; 7167 } 7168 7169 /* 7170 * Determine if start,end range within vma could be mapped by shared pmd. 7171 * If yes, adjust start and end to cover range associated with possible 7172 * shared pmd mappings. 7173 */ 7174 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7175 unsigned long *start, unsigned long *end) 7176 { 7177 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7178 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7179 7180 /* 7181 * vma needs to span at least one aligned PUD size, and the range 7182 * must be at least partially within in. 7183 */ 7184 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7185 (*end <= v_start) || (*start >= v_end)) 7186 return; 7187 7188 /* Extend the range to be PUD aligned for a worst case scenario */ 7189 if (*start > v_start) 7190 *start = ALIGN_DOWN(*start, PUD_SIZE); 7191 7192 if (*end < v_end) 7193 *end = ALIGN(*end, PUD_SIZE); 7194 } 7195 7196 /* 7197 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7198 * and returns the corresponding pte. While this is not necessary for the 7199 * !shared pmd case because we can allocate the pmd later as well, it makes the 7200 * code much cleaner. pmd allocation is essential for the shared case because 7201 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7202 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7203 * bad pmd for sharing. 7204 */ 7205 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7206 unsigned long addr, pud_t *pud) 7207 { 7208 struct address_space *mapping = vma->vm_file->f_mapping; 7209 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7210 vma->vm_pgoff; 7211 struct vm_area_struct *svma; 7212 unsigned long saddr; 7213 pte_t *spte = NULL; 7214 pte_t *pte; 7215 7216 i_mmap_lock_read(mapping); 7217 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7218 if (svma == vma) 7219 continue; 7220 7221 saddr = page_table_shareable(svma, vma, addr, idx); 7222 if (saddr) { 7223 spte = hugetlb_walk(svma, saddr, 7224 vma_mmu_pagesize(svma)); 7225 if (spte) { 7226 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); 7227 break; 7228 } 7229 } 7230 } 7231 7232 if (!spte) 7233 goto out; 7234 7235 spin_lock(&mm->page_table_lock); 7236 if (pud_none(*pud)) { 7237 pud_populate(mm, pud, 7238 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7239 mm_inc_nr_pmds(mm); 7240 } else { 7241 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); 7242 } 7243 spin_unlock(&mm->page_table_lock); 7244 out: 7245 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7246 i_mmap_unlock_read(mapping); 7247 return pte; 7248 } 7249 7250 /* 7251 * unmap huge page backed by shared pte. 7252 * 7253 * Called with page table lock held. 7254 * 7255 * returns: 1 successfully unmapped a shared pte page 7256 * 0 the underlying pte page is not shared, or it is the last user 7257 */ 7258 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7259 unsigned long addr, pte_t *ptep) 7260 { 7261 unsigned long sz = huge_page_size(hstate_vma(vma)); 7262 pgd_t *pgd = pgd_offset(mm, addr); 7263 p4d_t *p4d = p4d_offset(pgd, addr); 7264 pud_t *pud = pud_offset(p4d, addr); 7265 7266 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7267 hugetlb_vma_assert_locked(vma); 7268 if (sz != PMD_SIZE) 7269 return 0; 7270 if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) 7271 return 0; 7272 7273 pud_clear(pud); 7274 ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); 7275 mm_dec_nr_pmds(mm); 7276 return 1; 7277 } 7278 7279 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7280 7281 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7282 unsigned long addr, pud_t *pud) 7283 { 7284 return NULL; 7285 } 7286 7287 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7288 unsigned long addr, pte_t *ptep) 7289 { 7290 return 0; 7291 } 7292 7293 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7294 unsigned long *start, unsigned long *end) 7295 { 7296 } 7297 7298 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7299 { 7300 return false; 7301 } 7302 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7303 7304 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7305 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7306 unsigned long addr, unsigned long sz) 7307 { 7308 pgd_t *pgd; 7309 p4d_t *p4d; 7310 pud_t *pud; 7311 pte_t *pte = NULL; 7312 7313 pgd = pgd_offset(mm, addr); 7314 p4d = p4d_alloc(mm, pgd, addr); 7315 if (!p4d) 7316 return NULL; 7317 pud = pud_alloc(mm, p4d, addr); 7318 if (pud) { 7319 if (sz == PUD_SIZE) { 7320 pte = (pte_t *)pud; 7321 } else { 7322 BUG_ON(sz != PMD_SIZE); 7323 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7324 pte = huge_pmd_share(mm, vma, addr, pud); 7325 else 7326 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7327 } 7328 } 7329 7330 if (pte) { 7331 pte_t pteval = ptep_get_lockless(pte); 7332 7333 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7334 } 7335 7336 return pte; 7337 } 7338 7339 /* 7340 * huge_pte_offset() - Walk the page table to resolve the hugepage 7341 * entry at address @addr 7342 * 7343 * Return: Pointer to page table entry (PUD or PMD) for 7344 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7345 * size @sz doesn't match the hugepage size at this level of the page 7346 * table. 7347 */ 7348 pte_t *huge_pte_offset(struct mm_struct *mm, 7349 unsigned long addr, unsigned long sz) 7350 { 7351 pgd_t *pgd; 7352 p4d_t *p4d; 7353 pud_t *pud; 7354 pmd_t *pmd; 7355 7356 pgd = pgd_offset(mm, addr); 7357 if (!pgd_present(*pgd)) 7358 return NULL; 7359 p4d = p4d_offset(pgd, addr); 7360 if (!p4d_present(*p4d)) 7361 return NULL; 7362 7363 pud = pud_offset(p4d, addr); 7364 if (sz == PUD_SIZE) 7365 /* must be pud huge, non-present or none */ 7366 return (pte_t *)pud; 7367 if (!pud_present(*pud)) 7368 return NULL; 7369 /* must have a valid entry and size to go further */ 7370 7371 pmd = pmd_offset(pud, addr); 7372 /* must be pmd huge, non-present or none */ 7373 return (pte_t *)pmd; 7374 } 7375 7376 /* 7377 * Return a mask that can be used to update an address to the last huge 7378 * page in a page table page mapping size. Used to skip non-present 7379 * page table entries when linearly scanning address ranges. Architectures 7380 * with unique huge page to page table relationships can define their own 7381 * version of this routine. 7382 */ 7383 unsigned long hugetlb_mask_last_page(struct hstate *h) 7384 { 7385 unsigned long hp_size = huge_page_size(h); 7386 7387 if (hp_size == PUD_SIZE) 7388 return P4D_SIZE - PUD_SIZE; 7389 else if (hp_size == PMD_SIZE) 7390 return PUD_SIZE - PMD_SIZE; 7391 else 7392 return 0UL; 7393 } 7394 7395 #else 7396 7397 /* See description above. Architectures can provide their own version. */ 7398 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7399 { 7400 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7401 if (huge_page_size(h) == PMD_SIZE) 7402 return PUD_SIZE - PMD_SIZE; 7403 #endif 7404 return 0UL; 7405 } 7406 7407 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7408 7409 bool isolate_hugetlb(struct folio *folio, struct list_head *list) 7410 { 7411 bool ret = true; 7412 7413 spin_lock_irq(&hugetlb_lock); 7414 if (!folio_test_hugetlb(folio) || 7415 !folio_test_hugetlb_migratable(folio) || 7416 !folio_try_get(folio)) { 7417 ret = false; 7418 goto unlock; 7419 } 7420 folio_clear_hugetlb_migratable(folio); 7421 list_move_tail(&folio->lru, list); 7422 unlock: 7423 spin_unlock_irq(&hugetlb_lock); 7424 return ret; 7425 } 7426 7427 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7428 { 7429 int ret = 0; 7430 7431 *hugetlb = false; 7432 spin_lock_irq(&hugetlb_lock); 7433 if (folio_test_hugetlb(folio)) { 7434 *hugetlb = true; 7435 if (folio_test_hugetlb_freed(folio)) 7436 ret = 0; 7437 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7438 ret = folio_try_get(folio); 7439 else 7440 ret = -EBUSY; 7441 } 7442 spin_unlock_irq(&hugetlb_lock); 7443 return ret; 7444 } 7445 7446 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7447 bool *migratable_cleared) 7448 { 7449 int ret; 7450 7451 spin_lock_irq(&hugetlb_lock); 7452 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7453 spin_unlock_irq(&hugetlb_lock); 7454 return ret; 7455 } 7456 7457 void folio_putback_active_hugetlb(struct folio *folio) 7458 { 7459 spin_lock_irq(&hugetlb_lock); 7460 folio_set_hugetlb_migratable(folio); 7461 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7462 spin_unlock_irq(&hugetlb_lock); 7463 folio_put(folio); 7464 } 7465 7466 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7467 { 7468 struct hstate *h = folio_hstate(old_folio); 7469 7470 hugetlb_cgroup_migrate(old_folio, new_folio); 7471 set_page_owner_migrate_reason(&new_folio->page, reason); 7472 7473 /* 7474 * transfer temporary state of the new hugetlb folio. This is 7475 * reverse to other transitions because the newpage is going to 7476 * be final while the old one will be freed so it takes over 7477 * the temporary status. 7478 * 7479 * Also note that we have to transfer the per-node surplus state 7480 * here as well otherwise the global surplus count will not match 7481 * the per-node's. 7482 */ 7483 if (folio_test_hugetlb_temporary(new_folio)) { 7484 int old_nid = folio_nid(old_folio); 7485 int new_nid = folio_nid(new_folio); 7486 7487 folio_set_hugetlb_temporary(old_folio); 7488 folio_clear_hugetlb_temporary(new_folio); 7489 7490 7491 /* 7492 * There is no need to transfer the per-node surplus state 7493 * when we do not cross the node. 7494 */ 7495 if (new_nid == old_nid) 7496 return; 7497 spin_lock_irq(&hugetlb_lock); 7498 if (h->surplus_huge_pages_node[old_nid]) { 7499 h->surplus_huge_pages_node[old_nid]--; 7500 h->surplus_huge_pages_node[new_nid]++; 7501 } 7502 spin_unlock_irq(&hugetlb_lock); 7503 } 7504 } 7505 7506 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7507 unsigned long start, 7508 unsigned long end) 7509 { 7510 struct hstate *h = hstate_vma(vma); 7511 unsigned long sz = huge_page_size(h); 7512 struct mm_struct *mm = vma->vm_mm; 7513 struct mmu_notifier_range range; 7514 unsigned long address; 7515 spinlock_t *ptl; 7516 pte_t *ptep; 7517 7518 if (!(vma->vm_flags & VM_MAYSHARE)) 7519 return; 7520 7521 if (start >= end) 7522 return; 7523 7524 flush_cache_range(vma, start, end); 7525 /* 7526 * No need to call adjust_range_if_pmd_sharing_possible(), because 7527 * we have already done the PUD_SIZE alignment. 7528 */ 7529 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7530 start, end); 7531 mmu_notifier_invalidate_range_start(&range); 7532 hugetlb_vma_lock_write(vma); 7533 i_mmap_lock_write(vma->vm_file->f_mapping); 7534 for (address = start; address < end; address += PUD_SIZE) { 7535 ptep = hugetlb_walk(vma, address, sz); 7536 if (!ptep) 7537 continue; 7538 ptl = huge_pte_lock(h, mm, ptep); 7539 huge_pmd_unshare(mm, vma, address, ptep); 7540 spin_unlock(ptl); 7541 } 7542 flush_hugetlb_tlb_range(vma, start, end); 7543 i_mmap_unlock_write(vma->vm_file->f_mapping); 7544 hugetlb_vma_unlock_write(vma); 7545 /* 7546 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7547 * Documentation/mm/mmu_notifier.rst. 7548 */ 7549 mmu_notifier_invalidate_range_end(&range); 7550 } 7551 7552 /* 7553 * This function will unconditionally remove all the shared pmd pgtable entries 7554 * within the specific vma for a hugetlbfs memory range. 7555 */ 7556 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7557 { 7558 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7559 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7560 } 7561 7562 #ifdef CONFIG_CMA 7563 static bool cma_reserve_called __initdata; 7564 7565 static int __init cmdline_parse_hugetlb_cma(char *p) 7566 { 7567 int nid, count = 0; 7568 unsigned long tmp; 7569 char *s = p; 7570 7571 while (*s) { 7572 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7573 break; 7574 7575 if (s[count] == ':') { 7576 if (tmp >= MAX_NUMNODES) 7577 break; 7578 nid = array_index_nospec(tmp, MAX_NUMNODES); 7579 7580 s += count + 1; 7581 tmp = memparse(s, &s); 7582 hugetlb_cma_size_in_node[nid] = tmp; 7583 hugetlb_cma_size += tmp; 7584 7585 /* 7586 * Skip the separator if have one, otherwise 7587 * break the parsing. 7588 */ 7589 if (*s == ',') 7590 s++; 7591 else 7592 break; 7593 } else { 7594 hugetlb_cma_size = memparse(p, &p); 7595 break; 7596 } 7597 } 7598 7599 return 0; 7600 } 7601 7602 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7603 7604 void __init hugetlb_cma_reserve(int order) 7605 { 7606 unsigned long size, reserved, per_node; 7607 bool node_specific_cma_alloc = false; 7608 int nid; 7609 7610 /* 7611 * HugeTLB CMA reservation is required for gigantic 7612 * huge pages which could not be allocated via the 7613 * page allocator. Just warn if there is any change 7614 * breaking this assumption. 7615 */ 7616 VM_WARN_ON(order <= MAX_PAGE_ORDER); 7617 cma_reserve_called = true; 7618 7619 if (!hugetlb_cma_size) 7620 return; 7621 7622 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7623 if (hugetlb_cma_size_in_node[nid] == 0) 7624 continue; 7625 7626 if (!node_online(nid)) { 7627 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7628 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7629 hugetlb_cma_size_in_node[nid] = 0; 7630 continue; 7631 } 7632 7633 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7634 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7635 nid, (PAGE_SIZE << order) / SZ_1M); 7636 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7637 hugetlb_cma_size_in_node[nid] = 0; 7638 } else { 7639 node_specific_cma_alloc = true; 7640 } 7641 } 7642 7643 /* Validate the CMA size again in case some invalid nodes specified. */ 7644 if (!hugetlb_cma_size) 7645 return; 7646 7647 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7648 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7649 (PAGE_SIZE << order) / SZ_1M); 7650 hugetlb_cma_size = 0; 7651 return; 7652 } 7653 7654 if (!node_specific_cma_alloc) { 7655 /* 7656 * If 3 GB area is requested on a machine with 4 numa nodes, 7657 * let's allocate 1 GB on first three nodes and ignore the last one. 7658 */ 7659 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7660 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7661 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7662 } 7663 7664 reserved = 0; 7665 for_each_online_node(nid) { 7666 int res; 7667 char name[CMA_MAX_NAME]; 7668 7669 if (node_specific_cma_alloc) { 7670 if (hugetlb_cma_size_in_node[nid] == 0) 7671 continue; 7672 7673 size = hugetlb_cma_size_in_node[nid]; 7674 } else { 7675 size = min(per_node, hugetlb_cma_size - reserved); 7676 } 7677 7678 size = round_up(size, PAGE_SIZE << order); 7679 7680 snprintf(name, sizeof(name), "hugetlb%d", nid); 7681 /* 7682 * Note that 'order per bit' is based on smallest size that 7683 * may be returned to CMA allocator in the case of 7684 * huge page demotion. 7685 */ 7686 res = cma_declare_contiguous_nid(0, size, 0, 7687 PAGE_SIZE << order, 7688 HUGETLB_PAGE_ORDER, false, name, 7689 &hugetlb_cma[nid], nid); 7690 if (res) { 7691 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7692 res, nid); 7693 continue; 7694 } 7695 7696 reserved += size; 7697 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7698 size / SZ_1M, nid); 7699 7700 if (reserved >= hugetlb_cma_size) 7701 break; 7702 } 7703 7704 if (!reserved) 7705 /* 7706 * hugetlb_cma_size is used to determine if allocations from 7707 * cma are possible. Set to zero if no cma regions are set up. 7708 */ 7709 hugetlb_cma_size = 0; 7710 } 7711 7712 static void __init hugetlb_cma_check(void) 7713 { 7714 if (!hugetlb_cma_size || cma_reserve_called) 7715 return; 7716 7717 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7718 } 7719 7720 #endif /* CONFIG_CMA */ 7721