1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 #include <linux/mm_inline.h> 38 #include <linux/padata.h> 39 40 #include <asm/page.h> 41 #include <asm/pgalloc.h> 42 #include <asm/tlb.h> 43 44 #include <linux/io.h> 45 #include <linux/hugetlb.h> 46 #include <linux/hugetlb_cgroup.h> 47 #include <linux/node.h> 48 #include <linux/page_owner.h> 49 #include "internal.h" 50 #include "hugetlb_vmemmap.h" 51 52 int hugetlb_max_hstate __read_mostly; 53 unsigned int default_hstate_idx; 54 struct hstate hstates[HUGE_MAX_HSTATE]; 55 56 #ifdef CONFIG_CMA 57 static struct cma *hugetlb_cma[MAX_NUMNODES]; 58 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 59 #endif 60 static unsigned long hugetlb_cma_size __initdata; 61 62 __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; 63 64 /* for command line parsing */ 65 static struct hstate * __initdata parsed_hstate; 66 static unsigned long __initdata default_hstate_max_huge_pages; 67 static bool __initdata parsed_valid_hugepagesz = true; 68 static bool __initdata parsed_default_hugepagesz; 69 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 70 71 /* 72 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 73 * free_huge_pages, and surplus_huge_pages. 74 */ 75 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); 76 77 /* 78 * Serializes faults on the same logical page. This is used to 79 * prevent spurious OOMs when the hugepage pool is fully utilized. 80 */ 81 static int num_fault_mutexes __ro_after_init; 82 struct mutex *hugetlb_fault_mutex_table __ro_after_init; 83 84 /* Forward declaration */ 85 static int hugetlb_acct_memory(struct hstate *h, long delta); 86 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 87 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 88 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 89 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 90 unsigned long start, unsigned long end); 91 static struct resv_map *vma_resv_map(struct vm_area_struct *vma); 92 93 static void hugetlb_free_folio(struct folio *folio) 94 { 95 #ifdef CONFIG_CMA 96 int nid = folio_nid(folio); 97 98 if (cma_free_folio(hugetlb_cma[nid], folio)) 99 return; 100 #endif 101 folio_put(folio); 102 } 103 104 static inline bool subpool_is_free(struct hugepage_subpool *spool) 105 { 106 if (spool->count) 107 return false; 108 if (spool->max_hpages != -1) 109 return spool->used_hpages == 0; 110 if (spool->min_hpages != -1) 111 return spool->rsv_hpages == spool->min_hpages; 112 113 return true; 114 } 115 116 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 117 unsigned long irq_flags) 118 { 119 spin_unlock_irqrestore(&spool->lock, irq_flags); 120 121 /* If no pages are used, and no other handles to the subpool 122 * remain, give up any reservations based on minimum size and 123 * free the subpool */ 124 if (subpool_is_free(spool)) { 125 if (spool->min_hpages != -1) 126 hugetlb_acct_memory(spool->hstate, 127 -spool->min_hpages); 128 kfree(spool); 129 } 130 } 131 132 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 133 long min_hpages) 134 { 135 struct hugepage_subpool *spool; 136 137 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 138 if (!spool) 139 return NULL; 140 141 spin_lock_init(&spool->lock); 142 spool->count = 1; 143 spool->max_hpages = max_hpages; 144 spool->hstate = h; 145 spool->min_hpages = min_hpages; 146 147 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 148 kfree(spool); 149 return NULL; 150 } 151 spool->rsv_hpages = min_hpages; 152 153 return spool; 154 } 155 156 void hugepage_put_subpool(struct hugepage_subpool *spool) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&spool->lock, flags); 161 BUG_ON(!spool->count); 162 spool->count--; 163 unlock_or_release_subpool(spool, flags); 164 } 165 166 /* 167 * Subpool accounting for allocating and reserving pages. 168 * Return -ENOMEM if there are not enough resources to satisfy the 169 * request. Otherwise, return the number of pages by which the 170 * global pools must be adjusted (upward). The returned value may 171 * only be different than the passed value (delta) in the case where 172 * a subpool minimum size must be maintained. 173 */ 174 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 175 long delta) 176 { 177 long ret = delta; 178 179 if (!spool) 180 return ret; 181 182 spin_lock_irq(&spool->lock); 183 184 if (spool->max_hpages != -1) { /* maximum size accounting */ 185 if ((spool->used_hpages + delta) <= spool->max_hpages) 186 spool->used_hpages += delta; 187 else { 188 ret = -ENOMEM; 189 goto unlock_ret; 190 } 191 } 192 193 /* minimum size accounting */ 194 if (spool->min_hpages != -1 && spool->rsv_hpages) { 195 if (delta > spool->rsv_hpages) { 196 /* 197 * Asking for more reserves than those already taken on 198 * behalf of subpool. Return difference. 199 */ 200 ret = delta - spool->rsv_hpages; 201 spool->rsv_hpages = 0; 202 } else { 203 ret = 0; /* reserves already accounted for */ 204 spool->rsv_hpages -= delta; 205 } 206 } 207 208 unlock_ret: 209 spin_unlock_irq(&spool->lock); 210 return ret; 211 } 212 213 /* 214 * Subpool accounting for freeing and unreserving pages. 215 * Return the number of global page reservations that must be dropped. 216 * The return value may only be different than the passed value (delta) 217 * in the case where a subpool minimum size must be maintained. 218 */ 219 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 220 long delta) 221 { 222 long ret = delta; 223 unsigned long flags; 224 225 if (!spool) 226 return delta; 227 228 spin_lock_irqsave(&spool->lock, flags); 229 230 if (spool->max_hpages != -1) /* maximum size accounting */ 231 spool->used_hpages -= delta; 232 233 /* minimum size accounting */ 234 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 235 if (spool->rsv_hpages + delta <= spool->min_hpages) 236 ret = 0; 237 else 238 ret = spool->rsv_hpages + delta - spool->min_hpages; 239 240 spool->rsv_hpages += delta; 241 if (spool->rsv_hpages > spool->min_hpages) 242 spool->rsv_hpages = spool->min_hpages; 243 } 244 245 /* 246 * If hugetlbfs_put_super couldn't free spool due to an outstanding 247 * quota reference, free it now. 248 */ 249 unlock_or_release_subpool(spool, flags); 250 251 return ret; 252 } 253 254 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 255 { 256 return HUGETLBFS_SB(inode->i_sb)->spool; 257 } 258 259 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 260 { 261 return subpool_inode(file_inode(vma->vm_file)); 262 } 263 264 /* 265 * hugetlb vma_lock helper routines 266 */ 267 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 268 { 269 if (__vma_shareable_lock(vma)) { 270 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 271 272 down_read(&vma_lock->rw_sema); 273 } else if (__vma_private_lock(vma)) { 274 struct resv_map *resv_map = vma_resv_map(vma); 275 276 down_read(&resv_map->rw_sema); 277 } 278 } 279 280 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 281 { 282 if (__vma_shareable_lock(vma)) { 283 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 284 285 up_read(&vma_lock->rw_sema); 286 } else if (__vma_private_lock(vma)) { 287 struct resv_map *resv_map = vma_resv_map(vma); 288 289 up_read(&resv_map->rw_sema); 290 } 291 } 292 293 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 294 { 295 if (__vma_shareable_lock(vma)) { 296 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 297 298 down_write(&vma_lock->rw_sema); 299 } else if (__vma_private_lock(vma)) { 300 struct resv_map *resv_map = vma_resv_map(vma); 301 302 down_write(&resv_map->rw_sema); 303 } 304 } 305 306 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 307 { 308 if (__vma_shareable_lock(vma)) { 309 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 310 311 up_write(&vma_lock->rw_sema); 312 } else if (__vma_private_lock(vma)) { 313 struct resv_map *resv_map = vma_resv_map(vma); 314 315 up_write(&resv_map->rw_sema); 316 } 317 } 318 319 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 320 { 321 322 if (__vma_shareable_lock(vma)) { 323 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 324 325 return down_write_trylock(&vma_lock->rw_sema); 326 } else if (__vma_private_lock(vma)) { 327 struct resv_map *resv_map = vma_resv_map(vma); 328 329 return down_write_trylock(&resv_map->rw_sema); 330 } 331 332 return 1; 333 } 334 335 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 336 { 337 if (__vma_shareable_lock(vma)) { 338 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 339 340 lockdep_assert_held(&vma_lock->rw_sema); 341 } else if (__vma_private_lock(vma)) { 342 struct resv_map *resv_map = vma_resv_map(vma); 343 344 lockdep_assert_held(&resv_map->rw_sema); 345 } 346 } 347 348 void hugetlb_vma_lock_release(struct kref *kref) 349 { 350 struct hugetlb_vma_lock *vma_lock = container_of(kref, 351 struct hugetlb_vma_lock, refs); 352 353 kfree(vma_lock); 354 } 355 356 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 357 { 358 struct vm_area_struct *vma = vma_lock->vma; 359 360 /* 361 * vma_lock structure may or not be released as a result of put, 362 * it certainly will no longer be attached to vma so clear pointer. 363 * Semaphore synchronizes access to vma_lock->vma field. 364 */ 365 vma_lock->vma = NULL; 366 vma->vm_private_data = NULL; 367 up_write(&vma_lock->rw_sema); 368 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 369 } 370 371 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 372 { 373 if (__vma_shareable_lock(vma)) { 374 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 375 376 __hugetlb_vma_unlock_write_put(vma_lock); 377 } else if (__vma_private_lock(vma)) { 378 struct resv_map *resv_map = vma_resv_map(vma); 379 380 /* no free for anon vmas, but still need to unlock */ 381 up_write(&resv_map->rw_sema); 382 } 383 } 384 385 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 386 { 387 /* 388 * Only present in sharable vmas. 389 */ 390 if (!vma || !__vma_shareable_lock(vma)) 391 return; 392 393 if (vma->vm_private_data) { 394 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 395 396 down_write(&vma_lock->rw_sema); 397 __hugetlb_vma_unlock_write_put(vma_lock); 398 } 399 } 400 401 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 402 { 403 struct hugetlb_vma_lock *vma_lock; 404 405 /* Only establish in (flags) sharable vmas */ 406 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 407 return; 408 409 /* Should never get here with non-NULL vm_private_data */ 410 if (vma->vm_private_data) 411 return; 412 413 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 414 if (!vma_lock) { 415 /* 416 * If we can not allocate structure, then vma can not 417 * participate in pmd sharing. This is only a possible 418 * performance enhancement and memory saving issue. 419 * However, the lock is also used to synchronize page 420 * faults with truncation. If the lock is not present, 421 * unlikely races could leave pages in a file past i_size 422 * until the file is removed. Warn in the unlikely case of 423 * allocation failure. 424 */ 425 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 426 return; 427 } 428 429 kref_init(&vma_lock->refs); 430 init_rwsem(&vma_lock->rw_sema); 431 vma_lock->vma = vma; 432 vma->vm_private_data = vma_lock; 433 } 434 435 /* Helper that removes a struct file_region from the resv_map cache and returns 436 * it for use. 437 */ 438 static struct file_region * 439 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 440 { 441 struct file_region *nrg; 442 443 VM_BUG_ON(resv->region_cache_count <= 0); 444 445 resv->region_cache_count--; 446 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 447 list_del(&nrg->link); 448 449 nrg->from = from; 450 nrg->to = to; 451 452 return nrg; 453 } 454 455 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 456 struct file_region *rg) 457 { 458 #ifdef CONFIG_CGROUP_HUGETLB 459 nrg->reservation_counter = rg->reservation_counter; 460 nrg->css = rg->css; 461 if (rg->css) 462 css_get(rg->css); 463 #endif 464 } 465 466 /* Helper that records hugetlb_cgroup uncharge info. */ 467 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 468 struct hstate *h, 469 struct resv_map *resv, 470 struct file_region *nrg) 471 { 472 #ifdef CONFIG_CGROUP_HUGETLB 473 if (h_cg) { 474 nrg->reservation_counter = 475 &h_cg->rsvd_hugepage[hstate_index(h)]; 476 nrg->css = &h_cg->css; 477 /* 478 * The caller will hold exactly one h_cg->css reference for the 479 * whole contiguous reservation region. But this area might be 480 * scattered when there are already some file_regions reside in 481 * it. As a result, many file_regions may share only one css 482 * reference. In order to ensure that one file_region must hold 483 * exactly one h_cg->css reference, we should do css_get for 484 * each file_region and leave the reference held by caller 485 * untouched. 486 */ 487 css_get(&h_cg->css); 488 if (!resv->pages_per_hpage) 489 resv->pages_per_hpage = pages_per_huge_page(h); 490 /* pages_per_hpage should be the same for all entries in 491 * a resv_map. 492 */ 493 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 494 } else { 495 nrg->reservation_counter = NULL; 496 nrg->css = NULL; 497 } 498 #endif 499 } 500 501 static void put_uncharge_info(struct file_region *rg) 502 { 503 #ifdef CONFIG_CGROUP_HUGETLB 504 if (rg->css) 505 css_put(rg->css); 506 #endif 507 } 508 509 static bool has_same_uncharge_info(struct file_region *rg, 510 struct file_region *org) 511 { 512 #ifdef CONFIG_CGROUP_HUGETLB 513 return rg->reservation_counter == org->reservation_counter && 514 rg->css == org->css; 515 516 #else 517 return true; 518 #endif 519 } 520 521 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 522 { 523 struct file_region *nrg, *prg; 524 525 prg = list_prev_entry(rg, link); 526 if (&prg->link != &resv->regions && prg->to == rg->from && 527 has_same_uncharge_info(prg, rg)) { 528 prg->to = rg->to; 529 530 list_del(&rg->link); 531 put_uncharge_info(rg); 532 kfree(rg); 533 534 rg = prg; 535 } 536 537 nrg = list_next_entry(rg, link); 538 if (&nrg->link != &resv->regions && nrg->from == rg->to && 539 has_same_uncharge_info(nrg, rg)) { 540 nrg->from = rg->from; 541 542 list_del(&rg->link); 543 put_uncharge_info(rg); 544 kfree(rg); 545 } 546 } 547 548 static inline long 549 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 550 long to, struct hstate *h, struct hugetlb_cgroup *cg, 551 long *regions_needed) 552 { 553 struct file_region *nrg; 554 555 if (!regions_needed) { 556 nrg = get_file_region_entry_from_cache(map, from, to); 557 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 558 list_add(&nrg->link, rg); 559 coalesce_file_region(map, nrg); 560 } else 561 *regions_needed += 1; 562 563 return to - from; 564 } 565 566 /* 567 * Must be called with resv->lock held. 568 * 569 * Calling this with regions_needed != NULL will count the number of pages 570 * to be added but will not modify the linked list. And regions_needed will 571 * indicate the number of file_regions needed in the cache to carry out to add 572 * the regions for this range. 573 */ 574 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 575 struct hugetlb_cgroup *h_cg, 576 struct hstate *h, long *regions_needed) 577 { 578 long add = 0; 579 struct list_head *head = &resv->regions; 580 long last_accounted_offset = f; 581 struct file_region *iter, *trg = NULL; 582 struct list_head *rg = NULL; 583 584 if (regions_needed) 585 *regions_needed = 0; 586 587 /* In this loop, we essentially handle an entry for the range 588 * [last_accounted_offset, iter->from), at every iteration, with some 589 * bounds checking. 590 */ 591 list_for_each_entry_safe(iter, trg, head, link) { 592 /* Skip irrelevant regions that start before our range. */ 593 if (iter->from < f) { 594 /* If this region ends after the last accounted offset, 595 * then we need to update last_accounted_offset. 596 */ 597 if (iter->to > last_accounted_offset) 598 last_accounted_offset = iter->to; 599 continue; 600 } 601 602 /* When we find a region that starts beyond our range, we've 603 * finished. 604 */ 605 if (iter->from >= t) { 606 rg = iter->link.prev; 607 break; 608 } 609 610 /* Add an entry for last_accounted_offset -> iter->from, and 611 * update last_accounted_offset. 612 */ 613 if (iter->from > last_accounted_offset) 614 add += hugetlb_resv_map_add(resv, iter->link.prev, 615 last_accounted_offset, 616 iter->from, h, h_cg, 617 regions_needed); 618 619 last_accounted_offset = iter->to; 620 } 621 622 /* Handle the case where our range extends beyond 623 * last_accounted_offset. 624 */ 625 if (!rg) 626 rg = head->prev; 627 if (last_accounted_offset < t) 628 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 629 t, h, h_cg, regions_needed); 630 631 return add; 632 } 633 634 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 635 */ 636 static int allocate_file_region_entries(struct resv_map *resv, 637 int regions_needed) 638 __must_hold(&resv->lock) 639 { 640 LIST_HEAD(allocated_regions); 641 int to_allocate = 0, i = 0; 642 struct file_region *trg = NULL, *rg = NULL; 643 644 VM_BUG_ON(regions_needed < 0); 645 646 /* 647 * Check for sufficient descriptors in the cache to accommodate 648 * the number of in progress add operations plus regions_needed. 649 * 650 * This is a while loop because when we drop the lock, some other call 651 * to region_add or region_del may have consumed some region_entries, 652 * so we keep looping here until we finally have enough entries for 653 * (adds_in_progress + regions_needed). 654 */ 655 while (resv->region_cache_count < 656 (resv->adds_in_progress + regions_needed)) { 657 to_allocate = resv->adds_in_progress + regions_needed - 658 resv->region_cache_count; 659 660 /* At this point, we should have enough entries in the cache 661 * for all the existing adds_in_progress. We should only be 662 * needing to allocate for regions_needed. 663 */ 664 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 665 666 spin_unlock(&resv->lock); 667 for (i = 0; i < to_allocate; i++) { 668 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 669 if (!trg) 670 goto out_of_memory; 671 list_add(&trg->link, &allocated_regions); 672 } 673 674 spin_lock(&resv->lock); 675 676 list_splice(&allocated_regions, &resv->region_cache); 677 resv->region_cache_count += to_allocate; 678 } 679 680 return 0; 681 682 out_of_memory: 683 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 684 list_del(&rg->link); 685 kfree(rg); 686 } 687 return -ENOMEM; 688 } 689 690 /* 691 * Add the huge page range represented by [f, t) to the reserve 692 * map. Regions will be taken from the cache to fill in this range. 693 * Sufficient regions should exist in the cache due to the previous 694 * call to region_chg with the same range, but in some cases the cache will not 695 * have sufficient entries due to races with other code doing region_add or 696 * region_del. The extra needed entries will be allocated. 697 * 698 * regions_needed is the out value provided by a previous call to region_chg. 699 * 700 * Return the number of new huge pages added to the map. This number is greater 701 * than or equal to zero. If file_region entries needed to be allocated for 702 * this operation and we were not able to allocate, it returns -ENOMEM. 703 * region_add of regions of length 1 never allocate file_regions and cannot 704 * fail; region_chg will always allocate at least 1 entry and a region_add for 705 * 1 page will only require at most 1 entry. 706 */ 707 static long region_add(struct resv_map *resv, long f, long t, 708 long in_regions_needed, struct hstate *h, 709 struct hugetlb_cgroup *h_cg) 710 { 711 long add = 0, actual_regions_needed = 0; 712 713 spin_lock(&resv->lock); 714 retry: 715 716 /* Count how many regions are actually needed to execute this add. */ 717 add_reservation_in_range(resv, f, t, NULL, NULL, 718 &actual_regions_needed); 719 720 /* 721 * Check for sufficient descriptors in the cache to accommodate 722 * this add operation. Note that actual_regions_needed may be greater 723 * than in_regions_needed, as the resv_map may have been modified since 724 * the region_chg call. In this case, we need to make sure that we 725 * allocate extra entries, such that we have enough for all the 726 * existing adds_in_progress, plus the excess needed for this 727 * operation. 728 */ 729 if (actual_regions_needed > in_regions_needed && 730 resv->region_cache_count < 731 resv->adds_in_progress + 732 (actual_regions_needed - in_regions_needed)) { 733 /* region_add operation of range 1 should never need to 734 * allocate file_region entries. 735 */ 736 VM_BUG_ON(t - f <= 1); 737 738 if (allocate_file_region_entries( 739 resv, actual_regions_needed - in_regions_needed)) { 740 return -ENOMEM; 741 } 742 743 goto retry; 744 } 745 746 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 747 748 resv->adds_in_progress -= in_regions_needed; 749 750 spin_unlock(&resv->lock); 751 return add; 752 } 753 754 /* 755 * Examine the existing reserve map and determine how many 756 * huge pages in the specified range [f, t) are NOT currently 757 * represented. This routine is called before a subsequent 758 * call to region_add that will actually modify the reserve 759 * map to add the specified range [f, t). region_chg does 760 * not change the number of huge pages represented by the 761 * map. A number of new file_region structures is added to the cache as a 762 * placeholder, for the subsequent region_add call to use. At least 1 763 * file_region structure is added. 764 * 765 * out_regions_needed is the number of regions added to the 766 * resv->adds_in_progress. This value needs to be provided to a follow up call 767 * to region_add or region_abort for proper accounting. 768 * 769 * Returns the number of huge pages that need to be added to the existing 770 * reservation map for the range [f, t). This number is greater or equal to 771 * zero. -ENOMEM is returned if a new file_region structure or cache entry 772 * is needed and can not be allocated. 773 */ 774 static long region_chg(struct resv_map *resv, long f, long t, 775 long *out_regions_needed) 776 { 777 long chg = 0; 778 779 spin_lock(&resv->lock); 780 781 /* Count how many hugepages in this range are NOT represented. */ 782 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 783 out_regions_needed); 784 785 if (*out_regions_needed == 0) 786 *out_regions_needed = 1; 787 788 if (allocate_file_region_entries(resv, *out_regions_needed)) 789 return -ENOMEM; 790 791 resv->adds_in_progress += *out_regions_needed; 792 793 spin_unlock(&resv->lock); 794 return chg; 795 } 796 797 /* 798 * Abort the in progress add operation. The adds_in_progress field 799 * of the resv_map keeps track of the operations in progress between 800 * calls to region_chg and region_add. Operations are sometimes 801 * aborted after the call to region_chg. In such cases, region_abort 802 * is called to decrement the adds_in_progress counter. regions_needed 803 * is the value returned by the region_chg call, it is used to decrement 804 * the adds_in_progress counter. 805 * 806 * NOTE: The range arguments [f, t) are not needed or used in this 807 * routine. They are kept to make reading the calling code easier as 808 * arguments will match the associated region_chg call. 809 */ 810 static void region_abort(struct resv_map *resv, long f, long t, 811 long regions_needed) 812 { 813 spin_lock(&resv->lock); 814 VM_BUG_ON(!resv->region_cache_count); 815 resv->adds_in_progress -= regions_needed; 816 spin_unlock(&resv->lock); 817 } 818 819 /* 820 * Delete the specified range [f, t) from the reserve map. If the 821 * t parameter is LONG_MAX, this indicates that ALL regions after f 822 * should be deleted. Locate the regions which intersect [f, t) 823 * and either trim, delete or split the existing regions. 824 * 825 * Returns the number of huge pages deleted from the reserve map. 826 * In the normal case, the return value is zero or more. In the 827 * case where a region must be split, a new region descriptor must 828 * be allocated. If the allocation fails, -ENOMEM will be returned. 829 * NOTE: If the parameter t == LONG_MAX, then we will never split 830 * a region and possibly return -ENOMEM. Callers specifying 831 * t == LONG_MAX do not need to check for -ENOMEM error. 832 */ 833 static long region_del(struct resv_map *resv, long f, long t) 834 { 835 struct list_head *head = &resv->regions; 836 struct file_region *rg, *trg; 837 struct file_region *nrg = NULL; 838 long del = 0; 839 840 retry: 841 spin_lock(&resv->lock); 842 list_for_each_entry_safe(rg, trg, head, link) { 843 /* 844 * Skip regions before the range to be deleted. file_region 845 * ranges are normally of the form [from, to). However, there 846 * may be a "placeholder" entry in the map which is of the form 847 * (from, to) with from == to. Check for placeholder entries 848 * at the beginning of the range to be deleted. 849 */ 850 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 851 continue; 852 853 if (rg->from >= t) 854 break; 855 856 if (f > rg->from && t < rg->to) { /* Must split region */ 857 /* 858 * Check for an entry in the cache before dropping 859 * lock and attempting allocation. 860 */ 861 if (!nrg && 862 resv->region_cache_count > resv->adds_in_progress) { 863 nrg = list_first_entry(&resv->region_cache, 864 struct file_region, 865 link); 866 list_del(&nrg->link); 867 resv->region_cache_count--; 868 } 869 870 if (!nrg) { 871 spin_unlock(&resv->lock); 872 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 873 if (!nrg) 874 return -ENOMEM; 875 goto retry; 876 } 877 878 del += t - f; 879 hugetlb_cgroup_uncharge_file_region( 880 resv, rg, t - f, false); 881 882 /* New entry for end of split region */ 883 nrg->from = t; 884 nrg->to = rg->to; 885 886 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 887 888 INIT_LIST_HEAD(&nrg->link); 889 890 /* Original entry is trimmed */ 891 rg->to = f; 892 893 list_add(&nrg->link, &rg->link); 894 nrg = NULL; 895 break; 896 } 897 898 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 899 del += rg->to - rg->from; 900 hugetlb_cgroup_uncharge_file_region(resv, rg, 901 rg->to - rg->from, true); 902 list_del(&rg->link); 903 kfree(rg); 904 continue; 905 } 906 907 if (f <= rg->from) { /* Trim beginning of region */ 908 hugetlb_cgroup_uncharge_file_region(resv, rg, 909 t - rg->from, false); 910 911 del += t - rg->from; 912 rg->from = t; 913 } else { /* Trim end of region */ 914 hugetlb_cgroup_uncharge_file_region(resv, rg, 915 rg->to - f, false); 916 917 del += rg->to - f; 918 rg->to = f; 919 } 920 } 921 922 spin_unlock(&resv->lock); 923 kfree(nrg); 924 return del; 925 } 926 927 /* 928 * A rare out of memory error was encountered which prevented removal of 929 * the reserve map region for a page. The huge page itself was free'ed 930 * and removed from the page cache. This routine will adjust the subpool 931 * usage count, and the global reserve count if needed. By incrementing 932 * these counts, the reserve map entry which could not be deleted will 933 * appear as a "reserved" entry instead of simply dangling with incorrect 934 * counts. 935 */ 936 void hugetlb_fix_reserve_counts(struct inode *inode) 937 { 938 struct hugepage_subpool *spool = subpool_inode(inode); 939 long rsv_adjust; 940 bool reserved = false; 941 942 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 943 if (rsv_adjust > 0) { 944 struct hstate *h = hstate_inode(inode); 945 946 if (!hugetlb_acct_memory(h, 1)) 947 reserved = true; 948 } else if (!rsv_adjust) { 949 reserved = true; 950 } 951 952 if (!reserved) 953 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 954 } 955 956 /* 957 * Count and return the number of huge pages in the reserve map 958 * that intersect with the range [f, t). 959 */ 960 static long region_count(struct resv_map *resv, long f, long t) 961 { 962 struct list_head *head = &resv->regions; 963 struct file_region *rg; 964 long chg = 0; 965 966 spin_lock(&resv->lock); 967 /* Locate each segment we overlap with, and count that overlap. */ 968 list_for_each_entry(rg, head, link) { 969 long seg_from; 970 long seg_to; 971 972 if (rg->to <= f) 973 continue; 974 if (rg->from >= t) 975 break; 976 977 seg_from = max(rg->from, f); 978 seg_to = min(rg->to, t); 979 980 chg += seg_to - seg_from; 981 } 982 spin_unlock(&resv->lock); 983 984 return chg; 985 } 986 987 /* 988 * Convert the address within this vma to the page offset within 989 * the mapping, huge page units here. 990 */ 991 static pgoff_t vma_hugecache_offset(struct hstate *h, 992 struct vm_area_struct *vma, unsigned long address) 993 { 994 return ((address - vma->vm_start) >> huge_page_shift(h)) + 995 (vma->vm_pgoff >> huge_page_order(h)); 996 } 997 998 /** 999 * vma_kernel_pagesize - Page size granularity for this VMA. 1000 * @vma: The user mapping. 1001 * 1002 * Folios in this VMA will be aligned to, and at least the size of the 1003 * number of bytes returned by this function. 1004 * 1005 * Return: The default size of the folios allocated when backing a VMA. 1006 */ 1007 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1008 { 1009 if (vma->vm_ops && vma->vm_ops->pagesize) 1010 return vma->vm_ops->pagesize(vma); 1011 return PAGE_SIZE; 1012 } 1013 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 1014 1015 /* 1016 * Return the page size being used by the MMU to back a VMA. In the majority 1017 * of cases, the page size used by the kernel matches the MMU size. On 1018 * architectures where it differs, an architecture-specific 'strong' 1019 * version of this symbol is required. 1020 */ 1021 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1022 { 1023 return vma_kernel_pagesize(vma); 1024 } 1025 1026 /* 1027 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1028 * bits of the reservation map pointer, which are always clear due to 1029 * alignment. 1030 */ 1031 #define HPAGE_RESV_OWNER (1UL << 0) 1032 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1033 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1034 1035 /* 1036 * These helpers are used to track how many pages are reserved for 1037 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1038 * is guaranteed to have their future faults succeed. 1039 * 1040 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1041 * the reserve counters are updated with the hugetlb_lock held. It is safe 1042 * to reset the VMA at fork() time as it is not in use yet and there is no 1043 * chance of the global counters getting corrupted as a result of the values. 1044 * 1045 * The private mapping reservation is represented in a subtly different 1046 * manner to a shared mapping. A shared mapping has a region map associated 1047 * with the underlying file, this region map represents the backing file 1048 * pages which have ever had a reservation assigned which this persists even 1049 * after the page is instantiated. A private mapping has a region map 1050 * associated with the original mmap which is attached to all VMAs which 1051 * reference it, this region map represents those offsets which have consumed 1052 * reservation ie. where pages have been instantiated. 1053 */ 1054 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1055 { 1056 return (unsigned long)vma->vm_private_data; 1057 } 1058 1059 static void set_vma_private_data(struct vm_area_struct *vma, 1060 unsigned long value) 1061 { 1062 vma->vm_private_data = (void *)value; 1063 } 1064 1065 static void 1066 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1067 struct hugetlb_cgroup *h_cg, 1068 struct hstate *h) 1069 { 1070 #ifdef CONFIG_CGROUP_HUGETLB 1071 if (!h_cg || !h) { 1072 resv_map->reservation_counter = NULL; 1073 resv_map->pages_per_hpage = 0; 1074 resv_map->css = NULL; 1075 } else { 1076 resv_map->reservation_counter = 1077 &h_cg->rsvd_hugepage[hstate_index(h)]; 1078 resv_map->pages_per_hpage = pages_per_huge_page(h); 1079 resv_map->css = &h_cg->css; 1080 } 1081 #endif 1082 } 1083 1084 struct resv_map *resv_map_alloc(void) 1085 { 1086 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1087 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1088 1089 if (!resv_map || !rg) { 1090 kfree(resv_map); 1091 kfree(rg); 1092 return NULL; 1093 } 1094 1095 kref_init(&resv_map->refs); 1096 spin_lock_init(&resv_map->lock); 1097 INIT_LIST_HEAD(&resv_map->regions); 1098 init_rwsem(&resv_map->rw_sema); 1099 1100 resv_map->adds_in_progress = 0; 1101 /* 1102 * Initialize these to 0. On shared mappings, 0's here indicate these 1103 * fields don't do cgroup accounting. On private mappings, these will be 1104 * re-initialized to the proper values, to indicate that hugetlb cgroup 1105 * reservations are to be un-charged from here. 1106 */ 1107 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1108 1109 INIT_LIST_HEAD(&resv_map->region_cache); 1110 list_add(&rg->link, &resv_map->region_cache); 1111 resv_map->region_cache_count = 1; 1112 1113 return resv_map; 1114 } 1115 1116 void resv_map_release(struct kref *ref) 1117 { 1118 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1119 struct list_head *head = &resv_map->region_cache; 1120 struct file_region *rg, *trg; 1121 1122 /* Clear out any active regions before we release the map. */ 1123 region_del(resv_map, 0, LONG_MAX); 1124 1125 /* ... and any entries left in the cache */ 1126 list_for_each_entry_safe(rg, trg, head, link) { 1127 list_del(&rg->link); 1128 kfree(rg); 1129 } 1130 1131 VM_BUG_ON(resv_map->adds_in_progress); 1132 1133 kfree(resv_map); 1134 } 1135 1136 static inline struct resv_map *inode_resv_map(struct inode *inode) 1137 { 1138 /* 1139 * At inode evict time, i_mapping may not point to the original 1140 * address space within the inode. This original address space 1141 * contains the pointer to the resv_map. So, always use the 1142 * address space embedded within the inode. 1143 * The VERY common case is inode->mapping == &inode->i_data but, 1144 * this may not be true for device special inodes. 1145 */ 1146 return (struct resv_map *)(&inode->i_data)->i_private_data; 1147 } 1148 1149 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1150 { 1151 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1152 if (vma->vm_flags & VM_MAYSHARE) { 1153 struct address_space *mapping = vma->vm_file->f_mapping; 1154 struct inode *inode = mapping->host; 1155 1156 return inode_resv_map(inode); 1157 1158 } else { 1159 return (struct resv_map *)(get_vma_private_data(vma) & 1160 ~HPAGE_RESV_MASK); 1161 } 1162 } 1163 1164 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1165 { 1166 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1167 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1168 1169 set_vma_private_data(vma, (unsigned long)map); 1170 } 1171 1172 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1173 { 1174 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1175 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1176 1177 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1178 } 1179 1180 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1181 { 1182 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1183 1184 return (get_vma_private_data(vma) & flag) != 0; 1185 } 1186 1187 bool __vma_private_lock(struct vm_area_struct *vma) 1188 { 1189 return !(vma->vm_flags & VM_MAYSHARE) && 1190 get_vma_private_data(vma) & ~HPAGE_RESV_MASK && 1191 is_vma_resv_set(vma, HPAGE_RESV_OWNER); 1192 } 1193 1194 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1195 { 1196 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1197 /* 1198 * Clear vm_private_data 1199 * - For shared mappings this is a per-vma semaphore that may be 1200 * allocated in a subsequent call to hugetlb_vm_op_open. 1201 * Before clearing, make sure pointer is not associated with vma 1202 * as this will leak the structure. This is the case when called 1203 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1204 * been called to allocate a new structure. 1205 * - For MAP_PRIVATE mappings, this is the reserve map which does 1206 * not apply to children. Faults generated by the children are 1207 * not guaranteed to succeed, even if read-only. 1208 */ 1209 if (vma->vm_flags & VM_MAYSHARE) { 1210 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1211 1212 if (vma_lock && vma_lock->vma != vma) 1213 vma->vm_private_data = NULL; 1214 } else 1215 vma->vm_private_data = NULL; 1216 } 1217 1218 /* 1219 * Reset and decrement one ref on hugepage private reservation. 1220 * Called with mm->mmap_lock writer semaphore held. 1221 * This function should be only used by move_vma() and operate on 1222 * same sized vma. It should never come here with last ref on the 1223 * reservation. 1224 */ 1225 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1226 { 1227 /* 1228 * Clear the old hugetlb private page reservation. 1229 * It has already been transferred to new_vma. 1230 * 1231 * During a mremap() operation of a hugetlb vma we call move_vma() 1232 * which copies vma into new_vma and unmaps vma. After the copy 1233 * operation both new_vma and vma share a reference to the resv_map 1234 * struct, and at that point vma is about to be unmapped. We don't 1235 * want to return the reservation to the pool at unmap of vma because 1236 * the reservation still lives on in new_vma, so simply decrement the 1237 * ref here and remove the resv_map reference from this vma. 1238 */ 1239 struct resv_map *reservations = vma_resv_map(vma); 1240 1241 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1242 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1243 kref_put(&reservations->refs, resv_map_release); 1244 } 1245 1246 hugetlb_dup_vma_private(vma); 1247 } 1248 1249 /* Returns true if the VMA has associated reserve pages */ 1250 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1251 { 1252 if (vma->vm_flags & VM_NORESERVE) { 1253 /* 1254 * This address is already reserved by other process(chg == 0), 1255 * so, we should decrement reserved count. Without decrementing, 1256 * reserve count remains after releasing inode, because this 1257 * allocated page will go into page cache and is regarded as 1258 * coming from reserved pool in releasing step. Currently, we 1259 * don't have any other solution to deal with this situation 1260 * properly, so add work-around here. 1261 */ 1262 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1263 return true; 1264 else 1265 return false; 1266 } 1267 1268 /* Shared mappings always use reserves */ 1269 if (vma->vm_flags & VM_MAYSHARE) { 1270 /* 1271 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1272 * be a region map for all pages. The only situation where 1273 * there is no region map is if a hole was punched via 1274 * fallocate. In this case, there really are no reserves to 1275 * use. This situation is indicated if chg != 0. 1276 */ 1277 if (chg) 1278 return false; 1279 else 1280 return true; 1281 } 1282 1283 /* 1284 * Only the process that called mmap() has reserves for 1285 * private mappings. 1286 */ 1287 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1288 /* 1289 * Like the shared case above, a hole punch or truncate 1290 * could have been performed on the private mapping. 1291 * Examine the value of chg to determine if reserves 1292 * actually exist or were previously consumed. 1293 * Very Subtle - The value of chg comes from a previous 1294 * call to vma_needs_reserves(). The reserve map for 1295 * private mappings has different (opposite) semantics 1296 * than that of shared mappings. vma_needs_reserves() 1297 * has already taken this difference in semantics into 1298 * account. Therefore, the meaning of chg is the same 1299 * as in the shared case above. Code could easily be 1300 * combined, but keeping it separate draws attention to 1301 * subtle differences. 1302 */ 1303 if (chg) 1304 return false; 1305 else 1306 return true; 1307 } 1308 1309 return false; 1310 } 1311 1312 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1313 { 1314 int nid = folio_nid(folio); 1315 1316 lockdep_assert_held(&hugetlb_lock); 1317 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1318 1319 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1320 h->free_huge_pages++; 1321 h->free_huge_pages_node[nid]++; 1322 folio_set_hugetlb_freed(folio); 1323 } 1324 1325 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1326 int nid) 1327 { 1328 struct folio *folio; 1329 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1330 1331 lockdep_assert_held(&hugetlb_lock); 1332 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1333 if (pin && !folio_is_longterm_pinnable(folio)) 1334 continue; 1335 1336 if (folio_test_hwpoison(folio)) 1337 continue; 1338 1339 list_move(&folio->lru, &h->hugepage_activelist); 1340 folio_ref_unfreeze(folio, 1); 1341 folio_clear_hugetlb_freed(folio); 1342 h->free_huge_pages--; 1343 h->free_huge_pages_node[nid]--; 1344 return folio; 1345 } 1346 1347 return NULL; 1348 } 1349 1350 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1351 int nid, nodemask_t *nmask) 1352 { 1353 unsigned int cpuset_mems_cookie; 1354 struct zonelist *zonelist; 1355 struct zone *zone; 1356 struct zoneref *z; 1357 int node = NUMA_NO_NODE; 1358 1359 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ 1360 if (nid == NUMA_NO_NODE) 1361 nid = numa_node_id(); 1362 1363 zonelist = node_zonelist(nid, gfp_mask); 1364 1365 retry_cpuset: 1366 cpuset_mems_cookie = read_mems_allowed_begin(); 1367 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1368 struct folio *folio; 1369 1370 if (!cpuset_zone_allowed(zone, gfp_mask)) 1371 continue; 1372 /* 1373 * no need to ask again on the same node. Pool is node rather than 1374 * zone aware 1375 */ 1376 if (zone_to_nid(zone) == node) 1377 continue; 1378 node = zone_to_nid(zone); 1379 1380 folio = dequeue_hugetlb_folio_node_exact(h, node); 1381 if (folio) 1382 return folio; 1383 } 1384 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1385 goto retry_cpuset; 1386 1387 return NULL; 1388 } 1389 1390 static unsigned long available_huge_pages(struct hstate *h) 1391 { 1392 return h->free_huge_pages - h->resv_huge_pages; 1393 } 1394 1395 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1396 struct vm_area_struct *vma, 1397 unsigned long address, int avoid_reserve, 1398 long chg) 1399 { 1400 struct folio *folio = NULL; 1401 struct mempolicy *mpol; 1402 gfp_t gfp_mask; 1403 nodemask_t *nodemask; 1404 int nid; 1405 1406 /* 1407 * A child process with MAP_PRIVATE mappings created by their parent 1408 * have no page reserves. This check ensures that reservations are 1409 * not "stolen". The child may still get SIGKILLed 1410 */ 1411 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1412 goto err; 1413 1414 /* If reserves cannot be used, ensure enough pages are in the pool */ 1415 if (avoid_reserve && !available_huge_pages(h)) 1416 goto err; 1417 1418 gfp_mask = htlb_alloc_mask(h); 1419 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1420 1421 if (mpol_is_preferred_many(mpol)) { 1422 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1423 nid, nodemask); 1424 1425 /* Fallback to all nodes if page==NULL */ 1426 nodemask = NULL; 1427 } 1428 1429 if (!folio) 1430 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1431 nid, nodemask); 1432 1433 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1434 folio_set_hugetlb_restore_reserve(folio); 1435 h->resv_huge_pages--; 1436 } 1437 1438 mpol_cond_put(mpol); 1439 return folio; 1440 1441 err: 1442 return NULL; 1443 } 1444 1445 /* 1446 * common helper functions for hstate_next_node_to_{alloc|free}. 1447 * We may have allocated or freed a huge page based on a different 1448 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1449 * be outside of *nodes_allowed. Ensure that we use an allowed 1450 * node for alloc or free. 1451 */ 1452 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1453 { 1454 nid = next_node_in(nid, *nodes_allowed); 1455 VM_BUG_ON(nid >= MAX_NUMNODES); 1456 1457 return nid; 1458 } 1459 1460 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1461 { 1462 if (!node_isset(nid, *nodes_allowed)) 1463 nid = next_node_allowed(nid, nodes_allowed); 1464 return nid; 1465 } 1466 1467 /* 1468 * returns the previously saved node ["this node"] from which to 1469 * allocate a persistent huge page for the pool and advance the 1470 * next node from which to allocate, handling wrap at end of node 1471 * mask. 1472 */ 1473 static int hstate_next_node_to_alloc(int *next_node, 1474 nodemask_t *nodes_allowed) 1475 { 1476 int nid; 1477 1478 VM_BUG_ON(!nodes_allowed); 1479 1480 nid = get_valid_node_allowed(*next_node, nodes_allowed); 1481 *next_node = next_node_allowed(nid, nodes_allowed); 1482 1483 return nid; 1484 } 1485 1486 /* 1487 * helper for remove_pool_hugetlb_folio() - return the previously saved 1488 * node ["this node"] from which to free a huge page. Advance the 1489 * next node id whether or not we find a free huge page to free so 1490 * that the next attempt to free addresses the next node. 1491 */ 1492 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1493 { 1494 int nid; 1495 1496 VM_BUG_ON(!nodes_allowed); 1497 1498 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1499 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1500 1501 return nid; 1502 } 1503 1504 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \ 1505 for (nr_nodes = nodes_weight(*mask); \ 1506 nr_nodes > 0 && \ 1507 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \ 1508 nr_nodes--) 1509 1510 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1511 for (nr_nodes = nodes_weight(*mask); \ 1512 nr_nodes > 0 && \ 1513 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1514 nr_nodes--) 1515 1516 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1517 #ifdef CONFIG_CONTIG_ALLOC 1518 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1519 int nid, nodemask_t *nodemask) 1520 { 1521 struct folio *folio; 1522 int order = huge_page_order(h); 1523 bool retried = false; 1524 1525 if (nid == NUMA_NO_NODE) 1526 nid = numa_mem_id(); 1527 retry: 1528 folio = NULL; 1529 #ifdef CONFIG_CMA 1530 { 1531 int node; 1532 1533 if (hugetlb_cma[nid]) 1534 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); 1535 1536 if (!folio && !(gfp_mask & __GFP_THISNODE)) { 1537 for_each_node_mask(node, *nodemask) { 1538 if (node == nid || !hugetlb_cma[node]) 1539 continue; 1540 1541 folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); 1542 if (folio) 1543 break; 1544 } 1545 } 1546 } 1547 #endif 1548 if (!folio) { 1549 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); 1550 if (!folio) 1551 return NULL; 1552 } 1553 1554 if (folio_ref_freeze(folio, 1)) 1555 return folio; 1556 1557 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); 1558 hugetlb_free_folio(folio); 1559 if (!retried) { 1560 retried = true; 1561 goto retry; 1562 } 1563 return NULL; 1564 } 1565 1566 #else /* !CONFIG_CONTIG_ALLOC */ 1567 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1568 int nid, nodemask_t *nodemask) 1569 { 1570 return NULL; 1571 } 1572 #endif /* CONFIG_CONTIG_ALLOC */ 1573 1574 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1575 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1576 int nid, nodemask_t *nodemask) 1577 { 1578 return NULL; 1579 } 1580 #endif 1581 1582 /* 1583 * Remove hugetlb folio from lists. 1584 * If vmemmap exists for the folio, clear the hugetlb flag so that the 1585 * folio appears as just a compound page. Otherwise, wait until after 1586 * allocating vmemmap to clear the flag. 1587 * 1588 * Must be called with hugetlb lock held. 1589 */ 1590 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1591 bool adjust_surplus) 1592 { 1593 int nid = folio_nid(folio); 1594 1595 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1596 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1597 1598 lockdep_assert_held(&hugetlb_lock); 1599 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1600 return; 1601 1602 list_del(&folio->lru); 1603 1604 if (folio_test_hugetlb_freed(folio)) { 1605 folio_clear_hugetlb_freed(folio); 1606 h->free_huge_pages--; 1607 h->free_huge_pages_node[nid]--; 1608 } 1609 if (adjust_surplus) { 1610 h->surplus_huge_pages--; 1611 h->surplus_huge_pages_node[nid]--; 1612 } 1613 1614 /* 1615 * We can only clear the hugetlb flag after allocating vmemmap 1616 * pages. Otherwise, someone (memory error handling) may try to write 1617 * to tail struct pages. 1618 */ 1619 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1620 __folio_clear_hugetlb(folio); 1621 1622 h->nr_huge_pages--; 1623 h->nr_huge_pages_node[nid]--; 1624 } 1625 1626 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1627 bool adjust_surplus) 1628 { 1629 int nid = folio_nid(folio); 1630 1631 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1632 1633 lockdep_assert_held(&hugetlb_lock); 1634 1635 INIT_LIST_HEAD(&folio->lru); 1636 h->nr_huge_pages++; 1637 h->nr_huge_pages_node[nid]++; 1638 1639 if (adjust_surplus) { 1640 h->surplus_huge_pages++; 1641 h->surplus_huge_pages_node[nid]++; 1642 } 1643 1644 __folio_set_hugetlb(folio); 1645 folio_change_private(folio, NULL); 1646 /* 1647 * We have to set hugetlb_vmemmap_optimized again as above 1648 * folio_change_private(folio, NULL) cleared it. 1649 */ 1650 folio_set_hugetlb_vmemmap_optimized(folio); 1651 1652 arch_clear_hugetlb_flags(folio); 1653 enqueue_hugetlb_folio(h, folio); 1654 } 1655 1656 static void __update_and_free_hugetlb_folio(struct hstate *h, 1657 struct folio *folio) 1658 { 1659 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); 1660 1661 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1662 return; 1663 1664 /* 1665 * If we don't know which subpages are hwpoisoned, we can't free 1666 * the hugepage, so it's leaked intentionally. 1667 */ 1668 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1669 return; 1670 1671 /* 1672 * If folio is not vmemmap optimized (!clear_flag), then the folio 1673 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio 1674 * can only be passed hugetlb pages and will BUG otherwise. 1675 */ 1676 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { 1677 spin_lock_irq(&hugetlb_lock); 1678 /* 1679 * If we cannot allocate vmemmap pages, just refuse to free the 1680 * page and put the page back on the hugetlb free list and treat 1681 * as a surplus page. 1682 */ 1683 add_hugetlb_folio(h, folio, true); 1684 spin_unlock_irq(&hugetlb_lock); 1685 return; 1686 } 1687 1688 /* 1689 * If vmemmap pages were allocated above, then we need to clear the 1690 * hugetlb flag under the hugetlb lock. 1691 */ 1692 if (folio_test_hugetlb(folio)) { 1693 spin_lock_irq(&hugetlb_lock); 1694 __folio_clear_hugetlb(folio); 1695 spin_unlock_irq(&hugetlb_lock); 1696 } 1697 1698 /* 1699 * Move PageHWPoison flag from head page to the raw error pages, 1700 * which makes any healthy subpages reusable. 1701 */ 1702 if (unlikely(folio_test_hwpoison(folio))) 1703 folio_clear_hugetlb_hwpoison(folio); 1704 1705 folio_ref_unfreeze(folio, 1); 1706 1707 INIT_LIST_HEAD(&folio->_deferred_list); 1708 hugetlb_free_folio(folio); 1709 } 1710 1711 /* 1712 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1713 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1714 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1715 * the vmemmap pages. 1716 * 1717 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1718 * freed and frees them one-by-one. As the page->mapping pointer is going 1719 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1720 * structure of a lockless linked list of huge pages to be freed. 1721 */ 1722 static LLIST_HEAD(hpage_freelist); 1723 1724 static void free_hpage_workfn(struct work_struct *work) 1725 { 1726 struct llist_node *node; 1727 1728 node = llist_del_all(&hpage_freelist); 1729 1730 while (node) { 1731 struct folio *folio; 1732 struct hstate *h; 1733 1734 folio = container_of((struct address_space **)node, 1735 struct folio, mapping); 1736 node = node->next; 1737 folio->mapping = NULL; 1738 /* 1739 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1740 * folio_hstate() is going to trigger because a previous call to 1741 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1742 * not use folio_hstate() directly. 1743 */ 1744 h = size_to_hstate(folio_size(folio)); 1745 1746 __update_and_free_hugetlb_folio(h, folio); 1747 1748 cond_resched(); 1749 } 1750 } 1751 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1752 1753 static inline void flush_free_hpage_work(struct hstate *h) 1754 { 1755 if (hugetlb_vmemmap_optimizable(h)) 1756 flush_work(&free_hpage_work); 1757 } 1758 1759 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1760 bool atomic) 1761 { 1762 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1763 __update_and_free_hugetlb_folio(h, folio); 1764 return; 1765 } 1766 1767 /* 1768 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1769 * 1770 * Only call schedule_work() if hpage_freelist is previously 1771 * empty. Otherwise, schedule_work() had been called but the workfn 1772 * hasn't retrieved the list yet. 1773 */ 1774 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1775 schedule_work(&free_hpage_work); 1776 } 1777 1778 static void bulk_vmemmap_restore_error(struct hstate *h, 1779 struct list_head *folio_list, 1780 struct list_head *non_hvo_folios) 1781 { 1782 struct folio *folio, *t_folio; 1783 1784 if (!list_empty(non_hvo_folios)) { 1785 /* 1786 * Free any restored hugetlb pages so that restore of the 1787 * entire list can be retried. 1788 * The idea is that in the common case of ENOMEM errors freeing 1789 * hugetlb pages with vmemmap we will free up memory so that we 1790 * can allocate vmemmap for more hugetlb pages. 1791 */ 1792 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { 1793 list_del(&folio->lru); 1794 spin_lock_irq(&hugetlb_lock); 1795 __folio_clear_hugetlb(folio); 1796 spin_unlock_irq(&hugetlb_lock); 1797 update_and_free_hugetlb_folio(h, folio, false); 1798 cond_resched(); 1799 } 1800 } else { 1801 /* 1802 * In the case where there are no folios which can be 1803 * immediately freed, we loop through the list trying to restore 1804 * vmemmap individually in the hope that someone elsewhere may 1805 * have done something to cause success (such as freeing some 1806 * memory). If unable to restore a hugetlb page, the hugetlb 1807 * page is made a surplus page and removed from the list. 1808 * If are able to restore vmemmap and free one hugetlb page, we 1809 * quit processing the list to retry the bulk operation. 1810 */ 1811 list_for_each_entry_safe(folio, t_folio, folio_list, lru) 1812 if (hugetlb_vmemmap_restore_folio(h, folio)) { 1813 list_del(&folio->lru); 1814 spin_lock_irq(&hugetlb_lock); 1815 add_hugetlb_folio(h, folio, true); 1816 spin_unlock_irq(&hugetlb_lock); 1817 } else { 1818 list_del(&folio->lru); 1819 spin_lock_irq(&hugetlb_lock); 1820 __folio_clear_hugetlb(folio); 1821 spin_unlock_irq(&hugetlb_lock); 1822 update_and_free_hugetlb_folio(h, folio, false); 1823 cond_resched(); 1824 break; 1825 } 1826 } 1827 } 1828 1829 static void update_and_free_pages_bulk(struct hstate *h, 1830 struct list_head *folio_list) 1831 { 1832 long ret; 1833 struct folio *folio, *t_folio; 1834 LIST_HEAD(non_hvo_folios); 1835 1836 /* 1837 * First allocate required vmemmmap (if necessary) for all folios. 1838 * Carefully handle errors and free up any available hugetlb pages 1839 * in an effort to make forward progress. 1840 */ 1841 retry: 1842 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); 1843 if (ret < 0) { 1844 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); 1845 goto retry; 1846 } 1847 1848 /* 1849 * At this point, list should be empty, ret should be >= 0 and there 1850 * should only be pages on the non_hvo_folios list. 1851 * Do note that the non_hvo_folios list could be empty. 1852 * Without HVO enabled, ret will be 0 and there is no need to call 1853 * __folio_clear_hugetlb as this was done previously. 1854 */ 1855 VM_WARN_ON(!list_empty(folio_list)); 1856 VM_WARN_ON(ret < 0); 1857 if (!list_empty(&non_hvo_folios) && ret) { 1858 spin_lock_irq(&hugetlb_lock); 1859 list_for_each_entry(folio, &non_hvo_folios, lru) 1860 __folio_clear_hugetlb(folio); 1861 spin_unlock_irq(&hugetlb_lock); 1862 } 1863 1864 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { 1865 update_and_free_hugetlb_folio(h, folio, false); 1866 cond_resched(); 1867 } 1868 } 1869 1870 struct hstate *size_to_hstate(unsigned long size) 1871 { 1872 struct hstate *h; 1873 1874 for_each_hstate(h) { 1875 if (huge_page_size(h) == size) 1876 return h; 1877 } 1878 return NULL; 1879 } 1880 1881 void free_huge_folio(struct folio *folio) 1882 { 1883 /* 1884 * Can't pass hstate in here because it is called from the 1885 * generic mm code. 1886 */ 1887 struct hstate *h = folio_hstate(folio); 1888 int nid = folio_nid(folio); 1889 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1890 bool restore_reserve; 1891 unsigned long flags; 1892 1893 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1894 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1895 1896 hugetlb_set_folio_subpool(folio, NULL); 1897 if (folio_test_anon(folio)) 1898 __ClearPageAnonExclusive(&folio->page); 1899 folio->mapping = NULL; 1900 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1901 folio_clear_hugetlb_restore_reserve(folio); 1902 1903 /* 1904 * If HPageRestoreReserve was set on page, page allocation consumed a 1905 * reservation. If the page was associated with a subpool, there 1906 * would have been a page reserved in the subpool before allocation 1907 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1908 * reservation, do not call hugepage_subpool_put_pages() as this will 1909 * remove the reserved page from the subpool. 1910 */ 1911 if (!restore_reserve) { 1912 /* 1913 * A return code of zero implies that the subpool will be 1914 * under its minimum size if the reservation is not restored 1915 * after page is free. Therefore, force restore_reserve 1916 * operation. 1917 */ 1918 if (hugepage_subpool_put_pages(spool, 1) == 0) 1919 restore_reserve = true; 1920 } 1921 1922 spin_lock_irqsave(&hugetlb_lock, flags); 1923 folio_clear_hugetlb_migratable(folio); 1924 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1925 pages_per_huge_page(h), folio); 1926 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1927 pages_per_huge_page(h), folio); 1928 mem_cgroup_uncharge(folio); 1929 if (restore_reserve) 1930 h->resv_huge_pages++; 1931 1932 if (folio_test_hugetlb_temporary(folio)) { 1933 remove_hugetlb_folio(h, folio, false); 1934 spin_unlock_irqrestore(&hugetlb_lock, flags); 1935 update_and_free_hugetlb_folio(h, folio, true); 1936 } else if (h->surplus_huge_pages_node[nid]) { 1937 /* remove the page from active list */ 1938 remove_hugetlb_folio(h, folio, true); 1939 spin_unlock_irqrestore(&hugetlb_lock, flags); 1940 update_and_free_hugetlb_folio(h, folio, true); 1941 } else { 1942 arch_clear_hugetlb_flags(folio); 1943 enqueue_hugetlb_folio(h, folio); 1944 spin_unlock_irqrestore(&hugetlb_lock, flags); 1945 } 1946 } 1947 1948 /* 1949 * Must be called with the hugetlb lock held 1950 */ 1951 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1952 { 1953 lockdep_assert_held(&hugetlb_lock); 1954 h->nr_huge_pages++; 1955 h->nr_huge_pages_node[nid]++; 1956 } 1957 1958 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1959 { 1960 __folio_set_hugetlb(folio); 1961 INIT_LIST_HEAD(&folio->lru); 1962 hugetlb_set_folio_subpool(folio, NULL); 1963 set_hugetlb_cgroup(folio, NULL); 1964 set_hugetlb_cgroup_rsvd(folio, NULL); 1965 } 1966 1967 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1968 { 1969 init_new_hugetlb_folio(h, folio); 1970 hugetlb_vmemmap_optimize_folio(h, folio); 1971 } 1972 1973 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1974 { 1975 __prep_new_hugetlb_folio(h, folio); 1976 spin_lock_irq(&hugetlb_lock); 1977 __prep_account_new_huge_page(h, nid); 1978 spin_unlock_irq(&hugetlb_lock); 1979 } 1980 1981 /* 1982 * Find and lock address space (mapping) in write mode. 1983 * 1984 * Upon entry, the folio is locked which means that folio_mapping() is 1985 * stable. Due to locking order, we can only trylock_write. If we can 1986 * not get the lock, simply return NULL to caller. 1987 */ 1988 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) 1989 { 1990 struct address_space *mapping = folio_mapping(folio); 1991 1992 if (!mapping) 1993 return mapping; 1994 1995 if (i_mmap_trylock_write(mapping)) 1996 return mapping; 1997 1998 return NULL; 1999 } 2000 2001 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2002 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2003 nodemask_t *node_alloc_noretry) 2004 { 2005 int order = huge_page_order(h); 2006 struct folio *folio; 2007 bool alloc_try_hard = true; 2008 bool retry = true; 2009 2010 /* 2011 * By default we always try hard to allocate the folio with 2012 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in 2013 * a loop (to adjust global huge page counts) and previous allocation 2014 * failed, do not continue to try hard on the same node. Use the 2015 * node_alloc_noretry bitmap to manage this state information. 2016 */ 2017 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2018 alloc_try_hard = false; 2019 if (alloc_try_hard) 2020 gfp_mask |= __GFP_RETRY_MAYFAIL; 2021 if (nid == NUMA_NO_NODE) 2022 nid = numa_mem_id(); 2023 retry: 2024 folio = __folio_alloc(gfp_mask, order, nid, nmask); 2025 /* Ensure hugetlb folio won't have large_rmappable flag set. */ 2026 if (folio) 2027 folio_clear_large_rmappable(folio); 2028 2029 if (folio && !folio_ref_freeze(folio, 1)) { 2030 folio_put(folio); 2031 if (retry) { /* retry once */ 2032 retry = false; 2033 goto retry; 2034 } 2035 /* WOW! twice in a row. */ 2036 pr_warn("HugeTLB unexpected inflated folio ref count\n"); 2037 folio = NULL; 2038 } 2039 2040 /* 2041 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a 2042 * folio this indicates an overall state change. Clear bit so 2043 * that we resume normal 'try hard' allocations. 2044 */ 2045 if (node_alloc_noretry && folio && !alloc_try_hard) 2046 node_clear(nid, *node_alloc_noretry); 2047 2048 /* 2049 * If we tried hard to get a folio but failed, set bit so that 2050 * subsequent attempts will not try as hard until there is an 2051 * overall state change. 2052 */ 2053 if (node_alloc_noretry && !folio && alloc_try_hard) 2054 node_set(nid, *node_alloc_noretry); 2055 2056 if (!folio) { 2057 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2058 return NULL; 2059 } 2060 2061 __count_vm_event(HTLB_BUDDY_PGALLOC); 2062 return folio; 2063 } 2064 2065 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, 2066 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2067 nodemask_t *node_alloc_noretry) 2068 { 2069 struct folio *folio; 2070 2071 if (hstate_is_gigantic(h)) 2072 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2073 else 2074 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 2075 if (folio) 2076 init_new_hugetlb_folio(h, folio); 2077 return folio; 2078 } 2079 2080 /* 2081 * Common helper to allocate a fresh hugetlb page. All specific allocators 2082 * should use this function to get new hugetlb pages 2083 * 2084 * Note that returned page is 'frozen': ref count of head page and all tail 2085 * pages is zero. 2086 */ 2087 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2088 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2089 { 2090 struct folio *folio; 2091 2092 if (hstate_is_gigantic(h)) 2093 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2094 else 2095 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2096 if (!folio) 2097 return NULL; 2098 2099 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2100 return folio; 2101 } 2102 2103 static void prep_and_add_allocated_folios(struct hstate *h, 2104 struct list_head *folio_list) 2105 { 2106 unsigned long flags; 2107 struct folio *folio, *tmp_f; 2108 2109 /* Send list for bulk vmemmap optimization processing */ 2110 hugetlb_vmemmap_optimize_folios(h, folio_list); 2111 2112 /* Add all new pool pages to free lists in one lock cycle */ 2113 spin_lock_irqsave(&hugetlb_lock, flags); 2114 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 2115 __prep_account_new_huge_page(h, folio_nid(folio)); 2116 enqueue_hugetlb_folio(h, folio); 2117 } 2118 spin_unlock_irqrestore(&hugetlb_lock, flags); 2119 } 2120 2121 /* 2122 * Allocates a fresh hugetlb page in a node interleaved manner. The page 2123 * will later be added to the appropriate hugetlb pool. 2124 */ 2125 static struct folio *alloc_pool_huge_folio(struct hstate *h, 2126 nodemask_t *nodes_allowed, 2127 nodemask_t *node_alloc_noretry, 2128 int *next_node) 2129 { 2130 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2131 int nr_nodes, node; 2132 2133 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { 2134 struct folio *folio; 2135 2136 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2137 nodes_allowed, node_alloc_noretry); 2138 if (folio) 2139 return folio; 2140 } 2141 2142 return NULL; 2143 } 2144 2145 /* 2146 * Remove huge page from pool from next node to free. Attempt to keep 2147 * persistent huge pages more or less balanced over allowed nodes. 2148 * This routine only 'removes' the hugetlb page. The caller must make 2149 * an additional call to free the page to low level allocators. 2150 * Called with hugetlb_lock locked. 2151 */ 2152 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, 2153 nodemask_t *nodes_allowed, bool acct_surplus) 2154 { 2155 int nr_nodes, node; 2156 struct folio *folio = NULL; 2157 2158 lockdep_assert_held(&hugetlb_lock); 2159 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2160 /* 2161 * If we're returning unused surplus pages, only examine 2162 * nodes with surplus pages. 2163 */ 2164 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2165 !list_empty(&h->hugepage_freelists[node])) { 2166 folio = list_entry(h->hugepage_freelists[node].next, 2167 struct folio, lru); 2168 remove_hugetlb_folio(h, folio, acct_surplus); 2169 break; 2170 } 2171 } 2172 2173 return folio; 2174 } 2175 2176 /* 2177 * Dissolve a given free hugetlb folio into free buddy pages. This function 2178 * does nothing for in-use hugetlb folios and non-hugetlb folios. 2179 * This function returns values like below: 2180 * 2181 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2182 * when the system is under memory pressure and the feature of 2183 * freeing unused vmemmap pages associated with each hugetlb page 2184 * is enabled. 2185 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2186 * (allocated or reserved.) 2187 * 0: successfully dissolved free hugepages or the page is not a 2188 * hugepage (considered as already dissolved) 2189 */ 2190 int dissolve_free_hugetlb_folio(struct folio *folio) 2191 { 2192 int rc = -EBUSY; 2193 2194 retry: 2195 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2196 if (!folio_test_hugetlb(folio)) 2197 return 0; 2198 2199 spin_lock_irq(&hugetlb_lock); 2200 if (!folio_test_hugetlb(folio)) { 2201 rc = 0; 2202 goto out; 2203 } 2204 2205 if (!folio_ref_count(folio)) { 2206 struct hstate *h = folio_hstate(folio); 2207 if (!available_huge_pages(h)) 2208 goto out; 2209 2210 /* 2211 * We should make sure that the page is already on the free list 2212 * when it is dissolved. 2213 */ 2214 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2215 spin_unlock_irq(&hugetlb_lock); 2216 cond_resched(); 2217 2218 /* 2219 * Theoretically, we should return -EBUSY when we 2220 * encounter this race. In fact, we have a chance 2221 * to successfully dissolve the page if we do a 2222 * retry. Because the race window is quite small. 2223 * If we seize this opportunity, it is an optimization 2224 * for increasing the success rate of dissolving page. 2225 */ 2226 goto retry; 2227 } 2228 2229 remove_hugetlb_folio(h, folio, false); 2230 h->max_huge_pages--; 2231 spin_unlock_irq(&hugetlb_lock); 2232 2233 /* 2234 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2235 * before freeing the page. update_and_free_hugtlb_folio will fail to 2236 * free the page if it can not allocate required vmemmap. We 2237 * need to adjust max_huge_pages if the page is not freed. 2238 * Attempt to allocate vmemmmap here so that we can take 2239 * appropriate action on failure. 2240 * 2241 * The folio_test_hugetlb check here is because 2242 * remove_hugetlb_folio will clear hugetlb folio flag for 2243 * non-vmemmap optimized hugetlb folios. 2244 */ 2245 if (folio_test_hugetlb(folio)) { 2246 rc = hugetlb_vmemmap_restore_folio(h, folio); 2247 if (rc) { 2248 spin_lock_irq(&hugetlb_lock); 2249 add_hugetlb_folio(h, folio, false); 2250 h->max_huge_pages++; 2251 goto out; 2252 } 2253 } else 2254 rc = 0; 2255 2256 update_and_free_hugetlb_folio(h, folio, false); 2257 return rc; 2258 } 2259 out: 2260 spin_unlock_irq(&hugetlb_lock); 2261 return rc; 2262 } 2263 2264 /* 2265 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2266 * make specified memory blocks removable from the system. 2267 * Note that this will dissolve a free gigantic hugepage completely, if any 2268 * part of it lies within the given range. 2269 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all 2270 * free hugetlb folios that were dissolved before that error are lost. 2271 */ 2272 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) 2273 { 2274 unsigned long pfn; 2275 struct folio *folio; 2276 int rc = 0; 2277 unsigned int order; 2278 struct hstate *h; 2279 2280 if (!hugepages_supported()) 2281 return rc; 2282 2283 order = huge_page_order(&default_hstate); 2284 for_each_hstate(h) 2285 order = min(order, huge_page_order(h)); 2286 2287 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2288 folio = pfn_folio(pfn); 2289 rc = dissolve_free_hugetlb_folio(folio); 2290 if (rc) 2291 break; 2292 } 2293 2294 return rc; 2295 } 2296 2297 /* 2298 * Allocates a fresh surplus page from the page allocator. 2299 */ 2300 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2301 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2302 { 2303 struct folio *folio = NULL; 2304 2305 if (hstate_is_gigantic(h)) 2306 return NULL; 2307 2308 spin_lock_irq(&hugetlb_lock); 2309 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2310 goto out_unlock; 2311 spin_unlock_irq(&hugetlb_lock); 2312 2313 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2314 if (!folio) 2315 return NULL; 2316 2317 spin_lock_irq(&hugetlb_lock); 2318 /* 2319 * We could have raced with the pool size change. 2320 * Double check that and simply deallocate the new page 2321 * if we would end up overcommiting the surpluses. Abuse 2322 * temporary page to workaround the nasty free_huge_folio 2323 * codeflow 2324 */ 2325 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2326 folio_set_hugetlb_temporary(folio); 2327 spin_unlock_irq(&hugetlb_lock); 2328 free_huge_folio(folio); 2329 return NULL; 2330 } 2331 2332 h->surplus_huge_pages++; 2333 h->surplus_huge_pages_node[folio_nid(folio)]++; 2334 2335 out_unlock: 2336 spin_unlock_irq(&hugetlb_lock); 2337 2338 return folio; 2339 } 2340 2341 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2342 int nid, nodemask_t *nmask) 2343 { 2344 struct folio *folio; 2345 2346 if (hstate_is_gigantic(h)) 2347 return NULL; 2348 2349 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2350 if (!folio) 2351 return NULL; 2352 2353 /* fresh huge pages are frozen */ 2354 folio_ref_unfreeze(folio, 1); 2355 /* 2356 * We do not account these pages as surplus because they are only 2357 * temporary and will be released properly on the last reference 2358 */ 2359 folio_set_hugetlb_temporary(folio); 2360 2361 return folio; 2362 } 2363 2364 /* 2365 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2366 */ 2367 static 2368 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2369 struct vm_area_struct *vma, unsigned long addr) 2370 { 2371 struct folio *folio = NULL; 2372 struct mempolicy *mpol; 2373 gfp_t gfp_mask = htlb_alloc_mask(h); 2374 int nid; 2375 nodemask_t *nodemask; 2376 2377 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2378 if (mpol_is_preferred_many(mpol)) { 2379 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2380 2381 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2382 2383 /* Fallback to all nodes if page==NULL */ 2384 nodemask = NULL; 2385 } 2386 2387 if (!folio) 2388 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2389 mpol_cond_put(mpol); 2390 return folio; 2391 } 2392 2393 /* folio migration callback function */ 2394 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2395 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) 2396 { 2397 spin_lock_irq(&hugetlb_lock); 2398 if (available_huge_pages(h)) { 2399 struct folio *folio; 2400 2401 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2402 preferred_nid, nmask); 2403 if (folio) { 2404 spin_unlock_irq(&hugetlb_lock); 2405 return folio; 2406 } 2407 } 2408 spin_unlock_irq(&hugetlb_lock); 2409 2410 /* We cannot fallback to other nodes, as we could break the per-node pool. */ 2411 if (!allow_alloc_fallback) 2412 gfp_mask |= __GFP_THISNODE; 2413 2414 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2415 } 2416 2417 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 2418 { 2419 #ifdef CONFIG_NUMA 2420 struct mempolicy *mpol = get_task_policy(current); 2421 2422 /* 2423 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 2424 * (from policy_nodemask) specifically for hugetlb case 2425 */ 2426 if (mpol->mode == MPOL_BIND && 2427 (apply_policy_zone(mpol, gfp_zone(gfp)) && 2428 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 2429 return &mpol->nodes; 2430 #endif 2431 return NULL; 2432 } 2433 2434 /* 2435 * Increase the hugetlb pool such that it can accommodate a reservation 2436 * of size 'delta'. 2437 */ 2438 static int gather_surplus_pages(struct hstate *h, long delta) 2439 __must_hold(&hugetlb_lock) 2440 { 2441 LIST_HEAD(surplus_list); 2442 struct folio *folio, *tmp; 2443 int ret; 2444 long i; 2445 long needed, allocated; 2446 bool alloc_ok = true; 2447 int node; 2448 nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); 2449 2450 lockdep_assert_held(&hugetlb_lock); 2451 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2452 if (needed <= 0) { 2453 h->resv_huge_pages += delta; 2454 return 0; 2455 } 2456 2457 allocated = 0; 2458 2459 ret = -ENOMEM; 2460 retry: 2461 spin_unlock_irq(&hugetlb_lock); 2462 for (i = 0; i < needed; i++) { 2463 folio = NULL; 2464 for_each_node_mask(node, cpuset_current_mems_allowed) { 2465 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { 2466 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2467 node, NULL); 2468 if (folio) 2469 break; 2470 } 2471 } 2472 if (!folio) { 2473 alloc_ok = false; 2474 break; 2475 } 2476 list_add(&folio->lru, &surplus_list); 2477 cond_resched(); 2478 } 2479 allocated += i; 2480 2481 /* 2482 * After retaking hugetlb_lock, we need to recalculate 'needed' 2483 * because either resv_huge_pages or free_huge_pages may have changed. 2484 */ 2485 spin_lock_irq(&hugetlb_lock); 2486 needed = (h->resv_huge_pages + delta) - 2487 (h->free_huge_pages + allocated); 2488 if (needed > 0) { 2489 if (alloc_ok) 2490 goto retry; 2491 /* 2492 * We were not able to allocate enough pages to 2493 * satisfy the entire reservation so we free what 2494 * we've allocated so far. 2495 */ 2496 goto free; 2497 } 2498 /* 2499 * The surplus_list now contains _at_least_ the number of extra pages 2500 * needed to accommodate the reservation. Add the appropriate number 2501 * of pages to the hugetlb pool and free the extras back to the buddy 2502 * allocator. Commit the entire reservation here to prevent another 2503 * process from stealing the pages as they are added to the pool but 2504 * before they are reserved. 2505 */ 2506 needed += allocated; 2507 h->resv_huge_pages += delta; 2508 ret = 0; 2509 2510 /* Free the needed pages to the hugetlb pool */ 2511 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2512 if ((--needed) < 0) 2513 break; 2514 /* Add the page to the hugetlb allocator */ 2515 enqueue_hugetlb_folio(h, folio); 2516 } 2517 free: 2518 spin_unlock_irq(&hugetlb_lock); 2519 2520 /* 2521 * Free unnecessary surplus pages to the buddy allocator. 2522 * Pages have no ref count, call free_huge_folio directly. 2523 */ 2524 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2525 free_huge_folio(folio); 2526 spin_lock_irq(&hugetlb_lock); 2527 2528 return ret; 2529 } 2530 2531 /* 2532 * This routine has two main purposes: 2533 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2534 * in unused_resv_pages. This corresponds to the prior adjustments made 2535 * to the associated reservation map. 2536 * 2) Free any unused surplus pages that may have been allocated to satisfy 2537 * the reservation. As many as unused_resv_pages may be freed. 2538 */ 2539 static void return_unused_surplus_pages(struct hstate *h, 2540 unsigned long unused_resv_pages) 2541 { 2542 unsigned long nr_pages; 2543 LIST_HEAD(page_list); 2544 2545 lockdep_assert_held(&hugetlb_lock); 2546 /* Uncommit the reservation */ 2547 h->resv_huge_pages -= unused_resv_pages; 2548 2549 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2550 goto out; 2551 2552 /* 2553 * Part (or even all) of the reservation could have been backed 2554 * by pre-allocated pages. Only free surplus pages. 2555 */ 2556 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2557 2558 /* 2559 * We want to release as many surplus pages as possible, spread 2560 * evenly across all nodes with memory. Iterate across these nodes 2561 * until we can no longer free unreserved surplus pages. This occurs 2562 * when the nodes with surplus pages have no free pages. 2563 * remove_pool_hugetlb_folio() will balance the freed pages across the 2564 * on-line nodes with memory and will handle the hstate accounting. 2565 */ 2566 while (nr_pages--) { 2567 struct folio *folio; 2568 2569 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 2570 if (!folio) 2571 goto out; 2572 2573 list_add(&folio->lru, &page_list); 2574 } 2575 2576 out: 2577 spin_unlock_irq(&hugetlb_lock); 2578 update_and_free_pages_bulk(h, &page_list); 2579 spin_lock_irq(&hugetlb_lock); 2580 } 2581 2582 2583 /* 2584 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2585 * are used by the huge page allocation routines to manage reservations. 2586 * 2587 * vma_needs_reservation is called to determine if the huge page at addr 2588 * within the vma has an associated reservation. If a reservation is 2589 * needed, the value 1 is returned. The caller is then responsible for 2590 * managing the global reservation and subpool usage counts. After 2591 * the huge page has been allocated, vma_commit_reservation is called 2592 * to add the page to the reservation map. If the page allocation fails, 2593 * the reservation must be ended instead of committed. vma_end_reservation 2594 * is called in such cases. 2595 * 2596 * In the normal case, vma_commit_reservation returns the same value 2597 * as the preceding vma_needs_reservation call. The only time this 2598 * is not the case is if a reserve map was changed between calls. It 2599 * is the responsibility of the caller to notice the difference and 2600 * take appropriate action. 2601 * 2602 * vma_add_reservation is used in error paths where a reservation must 2603 * be restored when a newly allocated huge page must be freed. It is 2604 * to be called after calling vma_needs_reservation to determine if a 2605 * reservation exists. 2606 * 2607 * vma_del_reservation is used in error paths where an entry in the reserve 2608 * map was created during huge page allocation and must be removed. It is to 2609 * be called after calling vma_needs_reservation to determine if a reservation 2610 * exists. 2611 */ 2612 enum vma_resv_mode { 2613 VMA_NEEDS_RESV, 2614 VMA_COMMIT_RESV, 2615 VMA_END_RESV, 2616 VMA_ADD_RESV, 2617 VMA_DEL_RESV, 2618 }; 2619 static long __vma_reservation_common(struct hstate *h, 2620 struct vm_area_struct *vma, unsigned long addr, 2621 enum vma_resv_mode mode) 2622 { 2623 struct resv_map *resv; 2624 pgoff_t idx; 2625 long ret; 2626 long dummy_out_regions_needed; 2627 2628 resv = vma_resv_map(vma); 2629 if (!resv) 2630 return 1; 2631 2632 idx = vma_hugecache_offset(h, vma, addr); 2633 switch (mode) { 2634 case VMA_NEEDS_RESV: 2635 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2636 /* We assume that vma_reservation_* routines always operate on 2637 * 1 page, and that adding to resv map a 1 page entry can only 2638 * ever require 1 region. 2639 */ 2640 VM_BUG_ON(dummy_out_regions_needed != 1); 2641 break; 2642 case VMA_COMMIT_RESV: 2643 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2644 /* region_add calls of range 1 should never fail. */ 2645 VM_BUG_ON(ret < 0); 2646 break; 2647 case VMA_END_RESV: 2648 region_abort(resv, idx, idx + 1, 1); 2649 ret = 0; 2650 break; 2651 case VMA_ADD_RESV: 2652 if (vma->vm_flags & VM_MAYSHARE) { 2653 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2654 /* region_add calls of range 1 should never fail. */ 2655 VM_BUG_ON(ret < 0); 2656 } else { 2657 region_abort(resv, idx, idx + 1, 1); 2658 ret = region_del(resv, idx, idx + 1); 2659 } 2660 break; 2661 case VMA_DEL_RESV: 2662 if (vma->vm_flags & VM_MAYSHARE) { 2663 region_abort(resv, idx, idx + 1, 1); 2664 ret = region_del(resv, idx, idx + 1); 2665 } else { 2666 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2667 /* region_add calls of range 1 should never fail. */ 2668 VM_BUG_ON(ret < 0); 2669 } 2670 break; 2671 default: 2672 BUG(); 2673 } 2674 2675 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2676 return ret; 2677 /* 2678 * We know private mapping must have HPAGE_RESV_OWNER set. 2679 * 2680 * In most cases, reserves always exist for private mappings. 2681 * However, a file associated with mapping could have been 2682 * hole punched or truncated after reserves were consumed. 2683 * As subsequent fault on such a range will not use reserves. 2684 * Subtle - The reserve map for private mappings has the 2685 * opposite meaning than that of shared mappings. If NO 2686 * entry is in the reserve map, it means a reservation exists. 2687 * If an entry exists in the reserve map, it means the 2688 * reservation has already been consumed. As a result, the 2689 * return value of this routine is the opposite of the 2690 * value returned from reserve map manipulation routines above. 2691 */ 2692 if (ret > 0) 2693 return 0; 2694 if (ret == 0) 2695 return 1; 2696 return ret; 2697 } 2698 2699 static long vma_needs_reservation(struct hstate *h, 2700 struct vm_area_struct *vma, unsigned long addr) 2701 { 2702 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2703 } 2704 2705 static long vma_commit_reservation(struct hstate *h, 2706 struct vm_area_struct *vma, unsigned long addr) 2707 { 2708 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2709 } 2710 2711 static void vma_end_reservation(struct hstate *h, 2712 struct vm_area_struct *vma, unsigned long addr) 2713 { 2714 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2715 } 2716 2717 static long vma_add_reservation(struct hstate *h, 2718 struct vm_area_struct *vma, unsigned long addr) 2719 { 2720 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2721 } 2722 2723 static long vma_del_reservation(struct hstate *h, 2724 struct vm_area_struct *vma, unsigned long addr) 2725 { 2726 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2727 } 2728 2729 /* 2730 * This routine is called to restore reservation information on error paths. 2731 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2732 * and the hugetlb mutex should remain held when calling this routine. 2733 * 2734 * It handles two specific cases: 2735 * 1) A reservation was in place and the folio consumed the reservation. 2736 * hugetlb_restore_reserve is set in the folio. 2737 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2738 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2739 * 2740 * In case 1, free_huge_folio later in the error path will increment the 2741 * global reserve count. But, free_huge_folio does not have enough context 2742 * to adjust the reservation map. This case deals primarily with private 2743 * mappings. Adjust the reserve map here to be consistent with global 2744 * reserve count adjustments to be made by free_huge_folio. Make sure the 2745 * reserve map indicates there is a reservation present. 2746 * 2747 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2748 */ 2749 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2750 unsigned long address, struct folio *folio) 2751 { 2752 long rc = vma_needs_reservation(h, vma, address); 2753 2754 if (folio_test_hugetlb_restore_reserve(folio)) { 2755 if (unlikely(rc < 0)) 2756 /* 2757 * Rare out of memory condition in reserve map 2758 * manipulation. Clear hugetlb_restore_reserve so 2759 * that global reserve count will not be incremented 2760 * by free_huge_folio. This will make it appear 2761 * as though the reservation for this folio was 2762 * consumed. This may prevent the task from 2763 * faulting in the folio at a later time. This 2764 * is better than inconsistent global huge page 2765 * accounting of reserve counts. 2766 */ 2767 folio_clear_hugetlb_restore_reserve(folio); 2768 else if (rc) 2769 (void)vma_add_reservation(h, vma, address); 2770 else 2771 vma_end_reservation(h, vma, address); 2772 } else { 2773 if (!rc) { 2774 /* 2775 * This indicates there is an entry in the reserve map 2776 * not added by alloc_hugetlb_folio. We know it was added 2777 * before the alloc_hugetlb_folio call, otherwise 2778 * hugetlb_restore_reserve would be set on the folio. 2779 * Remove the entry so that a subsequent allocation 2780 * does not consume a reservation. 2781 */ 2782 rc = vma_del_reservation(h, vma, address); 2783 if (rc < 0) 2784 /* 2785 * VERY rare out of memory condition. Since 2786 * we can not delete the entry, set 2787 * hugetlb_restore_reserve so that the reserve 2788 * count will be incremented when the folio 2789 * is freed. This reserve will be consumed 2790 * on a subsequent allocation. 2791 */ 2792 folio_set_hugetlb_restore_reserve(folio); 2793 } else if (rc < 0) { 2794 /* 2795 * Rare out of memory condition from 2796 * vma_needs_reservation call. Memory allocation is 2797 * only attempted if a new entry is needed. Therefore, 2798 * this implies there is not an entry in the 2799 * reserve map. 2800 * 2801 * For shared mappings, no entry in the map indicates 2802 * no reservation. We are done. 2803 */ 2804 if (!(vma->vm_flags & VM_MAYSHARE)) 2805 /* 2806 * For private mappings, no entry indicates 2807 * a reservation is present. Since we can 2808 * not add an entry, set hugetlb_restore_reserve 2809 * on the folio so reserve count will be 2810 * incremented when freed. This reserve will 2811 * be consumed on a subsequent allocation. 2812 */ 2813 folio_set_hugetlb_restore_reserve(folio); 2814 } else 2815 /* 2816 * No reservation present, do nothing 2817 */ 2818 vma_end_reservation(h, vma, address); 2819 } 2820 } 2821 2822 /* 2823 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2824 * the old one 2825 * @h: struct hstate old page belongs to 2826 * @old_folio: Old folio to dissolve 2827 * @list: List to isolate the page in case we need to 2828 * Returns 0 on success, otherwise negated error. 2829 */ 2830 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2831 struct folio *old_folio, struct list_head *list) 2832 { 2833 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2834 int nid = folio_nid(old_folio); 2835 struct folio *new_folio = NULL; 2836 int ret = 0; 2837 2838 retry: 2839 spin_lock_irq(&hugetlb_lock); 2840 if (!folio_test_hugetlb(old_folio)) { 2841 /* 2842 * Freed from under us. Drop new_folio too. 2843 */ 2844 goto free_new; 2845 } else if (folio_ref_count(old_folio)) { 2846 bool isolated; 2847 2848 /* 2849 * Someone has grabbed the folio, try to isolate it here. 2850 * Fail with -EBUSY if not possible. 2851 */ 2852 spin_unlock_irq(&hugetlb_lock); 2853 isolated = isolate_hugetlb(old_folio, list); 2854 ret = isolated ? 0 : -EBUSY; 2855 spin_lock_irq(&hugetlb_lock); 2856 goto free_new; 2857 } else if (!folio_test_hugetlb_freed(old_folio)) { 2858 /* 2859 * Folio's refcount is 0 but it has not been enqueued in the 2860 * freelist yet. Race window is small, so we can succeed here if 2861 * we retry. 2862 */ 2863 spin_unlock_irq(&hugetlb_lock); 2864 cond_resched(); 2865 goto retry; 2866 } else { 2867 if (!new_folio) { 2868 spin_unlock_irq(&hugetlb_lock); 2869 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, 2870 NULL, NULL); 2871 if (!new_folio) 2872 return -ENOMEM; 2873 __prep_new_hugetlb_folio(h, new_folio); 2874 goto retry; 2875 } 2876 2877 /* 2878 * Ok, old_folio is still a genuine free hugepage. Remove it from 2879 * the freelist and decrease the counters. These will be 2880 * incremented again when calling __prep_account_new_huge_page() 2881 * and enqueue_hugetlb_folio() for new_folio. The counters will 2882 * remain stable since this happens under the lock. 2883 */ 2884 remove_hugetlb_folio(h, old_folio, false); 2885 2886 /* 2887 * Ref count on new_folio is already zero as it was dropped 2888 * earlier. It can be directly added to the pool free list. 2889 */ 2890 __prep_account_new_huge_page(h, nid); 2891 enqueue_hugetlb_folio(h, new_folio); 2892 2893 /* 2894 * Folio has been replaced, we can safely free the old one. 2895 */ 2896 spin_unlock_irq(&hugetlb_lock); 2897 update_and_free_hugetlb_folio(h, old_folio, false); 2898 } 2899 2900 return ret; 2901 2902 free_new: 2903 spin_unlock_irq(&hugetlb_lock); 2904 if (new_folio) 2905 update_and_free_hugetlb_folio(h, new_folio, false); 2906 2907 return ret; 2908 } 2909 2910 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2911 { 2912 struct hstate *h; 2913 struct folio *folio = page_folio(page); 2914 int ret = -EBUSY; 2915 2916 /* 2917 * The page might have been dissolved from under our feet, so make sure 2918 * to carefully check the state under the lock. 2919 * Return success when racing as if we dissolved the page ourselves. 2920 */ 2921 spin_lock_irq(&hugetlb_lock); 2922 if (folio_test_hugetlb(folio)) { 2923 h = folio_hstate(folio); 2924 } else { 2925 spin_unlock_irq(&hugetlb_lock); 2926 return 0; 2927 } 2928 spin_unlock_irq(&hugetlb_lock); 2929 2930 /* 2931 * Fence off gigantic pages as there is a cyclic dependency between 2932 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2933 * of bailing out right away without further retrying. 2934 */ 2935 if (hstate_is_gigantic(h)) 2936 return -ENOMEM; 2937 2938 if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) 2939 ret = 0; 2940 else if (!folio_ref_count(folio)) 2941 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 2942 2943 return ret; 2944 } 2945 2946 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 2947 unsigned long addr, int avoid_reserve) 2948 { 2949 struct hugepage_subpool *spool = subpool_vma(vma); 2950 struct hstate *h = hstate_vma(vma); 2951 struct folio *folio; 2952 long map_chg, map_commit, nr_pages = pages_per_huge_page(h); 2953 long gbl_chg; 2954 int memcg_charge_ret, ret, idx; 2955 struct hugetlb_cgroup *h_cg = NULL; 2956 struct mem_cgroup *memcg; 2957 bool deferred_reserve; 2958 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; 2959 2960 memcg = get_mem_cgroup_from_current(); 2961 memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages); 2962 if (memcg_charge_ret == -ENOMEM) { 2963 mem_cgroup_put(memcg); 2964 return ERR_PTR(-ENOMEM); 2965 } 2966 2967 idx = hstate_index(h); 2968 /* 2969 * Examine the region/reserve map to determine if the process 2970 * has a reservation for the page to be allocated. A return 2971 * code of zero indicates a reservation exists (no change). 2972 */ 2973 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2974 if (map_chg < 0) { 2975 if (!memcg_charge_ret) 2976 mem_cgroup_cancel_charge(memcg, nr_pages); 2977 mem_cgroup_put(memcg); 2978 return ERR_PTR(-ENOMEM); 2979 } 2980 2981 /* 2982 * Processes that did not create the mapping will have no 2983 * reserves as indicated by the region/reserve map. Check 2984 * that the allocation will not exceed the subpool limit. 2985 * Allocations for MAP_NORESERVE mappings also need to be 2986 * checked against any subpool limit. 2987 */ 2988 if (map_chg || avoid_reserve) { 2989 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2990 if (gbl_chg < 0) 2991 goto out_end_reservation; 2992 2993 /* 2994 * Even though there was no reservation in the region/reserve 2995 * map, there could be reservations associated with the 2996 * subpool that can be used. This would be indicated if the 2997 * return value of hugepage_subpool_get_pages() is zero. 2998 * However, if avoid_reserve is specified we still avoid even 2999 * the subpool reservations. 3000 */ 3001 if (avoid_reserve) 3002 gbl_chg = 1; 3003 } 3004 3005 /* If this allocation is not consuming a reservation, charge it now. 3006 */ 3007 deferred_reserve = map_chg || avoid_reserve; 3008 if (deferred_reserve) { 3009 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3010 idx, pages_per_huge_page(h), &h_cg); 3011 if (ret) 3012 goto out_subpool_put; 3013 } 3014 3015 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3016 if (ret) 3017 goto out_uncharge_cgroup_reservation; 3018 3019 spin_lock_irq(&hugetlb_lock); 3020 /* 3021 * glb_chg is passed to indicate whether or not a page must be taken 3022 * from the global free pool (global change). gbl_chg == 0 indicates 3023 * a reservation exists for the allocation. 3024 */ 3025 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3026 if (!folio) { 3027 spin_unlock_irq(&hugetlb_lock); 3028 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3029 if (!folio) 3030 goto out_uncharge_cgroup; 3031 spin_lock_irq(&hugetlb_lock); 3032 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3033 folio_set_hugetlb_restore_reserve(folio); 3034 h->resv_huge_pages--; 3035 } 3036 list_add(&folio->lru, &h->hugepage_activelist); 3037 folio_ref_unfreeze(folio, 1); 3038 /* Fall through */ 3039 } 3040 3041 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3042 /* If allocation is not consuming a reservation, also store the 3043 * hugetlb_cgroup pointer on the page. 3044 */ 3045 if (deferred_reserve) { 3046 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3047 h_cg, folio); 3048 } 3049 3050 spin_unlock_irq(&hugetlb_lock); 3051 3052 hugetlb_set_folio_subpool(folio, spool); 3053 3054 map_commit = vma_commit_reservation(h, vma, addr); 3055 if (unlikely(map_chg > map_commit)) { 3056 /* 3057 * The page was added to the reservation map between 3058 * vma_needs_reservation and vma_commit_reservation. 3059 * This indicates a race with hugetlb_reserve_pages. 3060 * Adjust for the subpool count incremented above AND 3061 * in hugetlb_reserve_pages for the same page. Also, 3062 * the reservation count added in hugetlb_reserve_pages 3063 * no longer applies. 3064 */ 3065 long rsv_adjust; 3066 3067 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3068 hugetlb_acct_memory(h, -rsv_adjust); 3069 if (deferred_reserve) { 3070 spin_lock_irq(&hugetlb_lock); 3071 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3072 pages_per_huge_page(h), folio); 3073 spin_unlock_irq(&hugetlb_lock); 3074 } 3075 } 3076 3077 if (!memcg_charge_ret) 3078 mem_cgroup_commit_charge(folio, memcg); 3079 mem_cgroup_put(memcg); 3080 3081 return folio; 3082 3083 out_uncharge_cgroup: 3084 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3085 out_uncharge_cgroup_reservation: 3086 if (deferred_reserve) 3087 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3088 h_cg); 3089 out_subpool_put: 3090 if (map_chg || avoid_reserve) 3091 hugepage_subpool_put_pages(spool, 1); 3092 out_end_reservation: 3093 vma_end_reservation(h, vma, addr); 3094 if (!memcg_charge_ret) 3095 mem_cgroup_cancel_charge(memcg, nr_pages); 3096 mem_cgroup_put(memcg); 3097 return ERR_PTR(-ENOSPC); 3098 } 3099 3100 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3101 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3102 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3103 { 3104 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3105 int nr_nodes, node = nid; 3106 3107 /* do node specific alloc */ 3108 if (nid != NUMA_NO_NODE) { 3109 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3110 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3111 if (!m) 3112 return 0; 3113 goto found; 3114 } 3115 /* allocate from next node when distributing huge pages */ 3116 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { 3117 m = memblock_alloc_try_nid_raw( 3118 huge_page_size(h), huge_page_size(h), 3119 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3120 /* 3121 * Use the beginning of the huge page to store the 3122 * huge_bootmem_page struct (until gather_bootmem 3123 * puts them into the mem_map). 3124 */ 3125 if (!m) 3126 return 0; 3127 goto found; 3128 } 3129 3130 found: 3131 3132 /* 3133 * Only initialize the head struct page in memmap_init_reserved_pages, 3134 * rest of the struct pages will be initialized by the HugeTLB 3135 * subsystem itself. 3136 * The head struct page is used to get folio information by the HugeTLB 3137 * subsystem like zone id and node id. 3138 */ 3139 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3140 huge_page_size(h) - PAGE_SIZE); 3141 /* Put them into a private list first because mem_map is not up yet */ 3142 INIT_LIST_HEAD(&m->list); 3143 list_add(&m->list, &huge_boot_pages[node]); 3144 m->hstate = h; 3145 return 1; 3146 } 3147 3148 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ 3149 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, 3150 unsigned long start_page_number, 3151 unsigned long end_page_number) 3152 { 3153 enum zone_type zone = zone_idx(folio_zone(folio)); 3154 int nid = folio_nid(folio); 3155 unsigned long head_pfn = folio_pfn(folio); 3156 unsigned long pfn, end_pfn = head_pfn + end_page_number; 3157 int ret; 3158 3159 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { 3160 struct page *page = pfn_to_page(pfn); 3161 3162 __ClearPageReserved(folio_page(folio, pfn - head_pfn)); 3163 __init_single_page(page, pfn, zone, nid); 3164 prep_compound_tail((struct page *)folio, pfn - head_pfn); 3165 ret = page_ref_freeze(page, 1); 3166 VM_BUG_ON(!ret); 3167 } 3168 } 3169 3170 static void __init hugetlb_folio_init_vmemmap(struct folio *folio, 3171 struct hstate *h, 3172 unsigned long nr_pages) 3173 { 3174 int ret; 3175 3176 /* Prepare folio head */ 3177 __folio_clear_reserved(folio); 3178 __folio_set_head(folio); 3179 ret = folio_ref_freeze(folio, 1); 3180 VM_BUG_ON(!ret); 3181 /* Initialize the necessary tail struct pages */ 3182 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); 3183 prep_compound_head((struct page *)folio, huge_page_order(h)); 3184 } 3185 3186 static void __init prep_and_add_bootmem_folios(struct hstate *h, 3187 struct list_head *folio_list) 3188 { 3189 unsigned long flags; 3190 struct folio *folio, *tmp_f; 3191 3192 /* Send list for bulk vmemmap optimization processing */ 3193 hugetlb_vmemmap_optimize_folios(h, folio_list); 3194 3195 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 3196 if (!folio_test_hugetlb_vmemmap_optimized(folio)) { 3197 /* 3198 * If HVO fails, initialize all tail struct pages 3199 * We do not worry about potential long lock hold 3200 * time as this is early in boot and there should 3201 * be no contention. 3202 */ 3203 hugetlb_folio_init_tail_vmemmap(folio, 3204 HUGETLB_VMEMMAP_RESERVE_PAGES, 3205 pages_per_huge_page(h)); 3206 } 3207 /* Subdivide locks to achieve better parallel performance */ 3208 spin_lock_irqsave(&hugetlb_lock, flags); 3209 __prep_account_new_huge_page(h, folio_nid(folio)); 3210 enqueue_hugetlb_folio(h, folio); 3211 spin_unlock_irqrestore(&hugetlb_lock, flags); 3212 } 3213 } 3214 3215 /* 3216 * Put bootmem huge pages into the standard lists after mem_map is up. 3217 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. 3218 */ 3219 static void __init gather_bootmem_prealloc_node(unsigned long nid) 3220 { 3221 LIST_HEAD(folio_list); 3222 struct huge_bootmem_page *m; 3223 struct hstate *h = NULL, *prev_h = NULL; 3224 3225 list_for_each_entry(m, &huge_boot_pages[nid], list) { 3226 struct page *page = virt_to_page(m); 3227 struct folio *folio = (void *)page; 3228 3229 h = m->hstate; 3230 /* 3231 * It is possible to have multiple huge page sizes (hstates) 3232 * in this list. If so, process each size separately. 3233 */ 3234 if (h != prev_h && prev_h != NULL) 3235 prep_and_add_bootmem_folios(prev_h, &folio_list); 3236 prev_h = h; 3237 3238 VM_BUG_ON(!hstate_is_gigantic(h)); 3239 WARN_ON(folio_ref_count(folio) != 1); 3240 3241 hugetlb_folio_init_vmemmap(folio, h, 3242 HUGETLB_VMEMMAP_RESERVE_PAGES); 3243 init_new_hugetlb_folio(h, folio); 3244 list_add(&folio->lru, &folio_list); 3245 3246 /* 3247 * We need to restore the 'stolen' pages to totalram_pages 3248 * in order to fix confusing memory reports from free(1) and 3249 * other side-effects, like CommitLimit going negative. 3250 */ 3251 adjust_managed_page_count(page, pages_per_huge_page(h)); 3252 cond_resched(); 3253 } 3254 3255 prep_and_add_bootmem_folios(h, &folio_list); 3256 } 3257 3258 static void __init gather_bootmem_prealloc_parallel(unsigned long start, 3259 unsigned long end, void *arg) 3260 { 3261 int nid; 3262 3263 for (nid = start; nid < end; nid++) 3264 gather_bootmem_prealloc_node(nid); 3265 } 3266 3267 static void __init gather_bootmem_prealloc(void) 3268 { 3269 struct padata_mt_job job = { 3270 .thread_fn = gather_bootmem_prealloc_parallel, 3271 .fn_arg = NULL, 3272 .start = 0, 3273 .size = num_node_state(N_MEMORY), 3274 .align = 1, 3275 .min_chunk = 1, 3276 .max_threads = num_node_state(N_MEMORY), 3277 .numa_aware = true, 3278 }; 3279 3280 padata_do_multithreaded(&job); 3281 } 3282 3283 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3284 { 3285 unsigned long i; 3286 char buf[32]; 3287 3288 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3289 if (hstate_is_gigantic(h)) { 3290 if (!alloc_bootmem_huge_page(h, nid)) 3291 break; 3292 } else { 3293 struct folio *folio; 3294 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3295 3296 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3297 &node_states[N_MEMORY]); 3298 if (!folio) 3299 break; 3300 free_huge_folio(folio); /* free it into the hugepage allocator */ 3301 } 3302 cond_resched(); 3303 } 3304 if (i == h->max_huge_pages_node[nid]) 3305 return; 3306 3307 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3308 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3309 h->max_huge_pages_node[nid], buf, nid, i); 3310 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3311 h->max_huge_pages_node[nid] = i; 3312 } 3313 3314 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) 3315 { 3316 int i; 3317 bool node_specific_alloc = false; 3318 3319 for_each_online_node(i) { 3320 if (h->max_huge_pages_node[i] > 0) { 3321 hugetlb_hstate_alloc_pages_onenode(h, i); 3322 node_specific_alloc = true; 3323 } 3324 } 3325 3326 return node_specific_alloc; 3327 } 3328 3329 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) 3330 { 3331 if (allocated < h->max_huge_pages) { 3332 char buf[32]; 3333 3334 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3335 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3336 h->max_huge_pages, buf, allocated); 3337 h->max_huge_pages = allocated; 3338 } 3339 } 3340 3341 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) 3342 { 3343 struct hstate *h = (struct hstate *)arg; 3344 int i, num = end - start; 3345 nodemask_t node_alloc_noretry; 3346 LIST_HEAD(folio_list); 3347 int next_node = first_online_node; 3348 3349 /* Bit mask controlling how hard we retry per-node allocations.*/ 3350 nodes_clear(node_alloc_noretry); 3351 3352 for (i = 0; i < num; ++i) { 3353 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], 3354 &node_alloc_noretry, &next_node); 3355 if (!folio) 3356 break; 3357 3358 list_move(&folio->lru, &folio_list); 3359 cond_resched(); 3360 } 3361 3362 prep_and_add_allocated_folios(h, &folio_list); 3363 } 3364 3365 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) 3366 { 3367 unsigned long i; 3368 3369 for (i = 0; i < h->max_huge_pages; ++i) { 3370 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3371 break; 3372 cond_resched(); 3373 } 3374 3375 return i; 3376 } 3377 3378 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) 3379 { 3380 struct padata_mt_job job = { 3381 .fn_arg = h, 3382 .align = 1, 3383 .numa_aware = true 3384 }; 3385 3386 job.thread_fn = hugetlb_pages_alloc_boot_node; 3387 job.start = 0; 3388 job.size = h->max_huge_pages; 3389 3390 /* 3391 * job.max_threads is twice the num_node_state(N_MEMORY), 3392 * 3393 * Tests below indicate that a multiplier of 2 significantly improves 3394 * performance, and although larger values also provide improvements, 3395 * the gains are marginal. 3396 * 3397 * Therefore, choosing 2 as the multiplier strikes a good balance between 3398 * enhancing parallel processing capabilities and maintaining efficient 3399 * resource management. 3400 * 3401 * +------------+-------+-------+-------+-------+-------+ 3402 * | multiplier | 1 | 2 | 3 | 4 | 5 | 3403 * +------------+-------+-------+-------+-------+-------+ 3404 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms | 3405 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms | 3406 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms | 3407 * +------------+-------+-------+-------+-------+-------+ 3408 */ 3409 job.max_threads = num_node_state(N_MEMORY) * 2; 3410 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; 3411 padata_do_multithreaded(&job); 3412 3413 return h->nr_huge_pages; 3414 } 3415 3416 /* 3417 * NOTE: this routine is called in different contexts for gigantic and 3418 * non-gigantic pages. 3419 * - For gigantic pages, this is called early in the boot process and 3420 * pages are allocated from memblock allocated or something similar. 3421 * Gigantic pages are actually added to pools later with the routine 3422 * gather_bootmem_prealloc. 3423 * - For non-gigantic pages, this is called later in the boot process after 3424 * all of mm is up and functional. Pages are allocated from buddy and 3425 * then added to hugetlb pools. 3426 */ 3427 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3428 { 3429 unsigned long allocated; 3430 static bool initialized __initdata; 3431 3432 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3433 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3434 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3435 return; 3436 } 3437 3438 /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */ 3439 if (!initialized) { 3440 int i = 0; 3441 3442 for (i = 0; i < MAX_NUMNODES; i++) 3443 INIT_LIST_HEAD(&huge_boot_pages[i]); 3444 initialized = true; 3445 } 3446 3447 /* do node specific alloc */ 3448 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) 3449 return; 3450 3451 /* below will do all node balanced alloc */ 3452 if (hstate_is_gigantic(h)) 3453 allocated = hugetlb_gigantic_pages_alloc_boot(h); 3454 else 3455 allocated = hugetlb_pages_alloc_boot(h); 3456 3457 hugetlb_hstate_alloc_pages_errcheck(allocated, h); 3458 } 3459 3460 static void __init hugetlb_init_hstates(void) 3461 { 3462 struct hstate *h, *h2; 3463 3464 for_each_hstate(h) { 3465 /* oversize hugepages were init'ed in early boot */ 3466 if (!hstate_is_gigantic(h)) 3467 hugetlb_hstate_alloc_pages(h); 3468 3469 /* 3470 * Set demote order for each hstate. Note that 3471 * h->demote_order is initially 0. 3472 * - We can not demote gigantic pages if runtime freeing 3473 * is not supported, so skip this. 3474 * - If CMA allocation is possible, we can not demote 3475 * HUGETLB_PAGE_ORDER or smaller size pages. 3476 */ 3477 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3478 continue; 3479 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3480 continue; 3481 for_each_hstate(h2) { 3482 if (h2 == h) 3483 continue; 3484 if (h2->order < h->order && 3485 h2->order > h->demote_order) 3486 h->demote_order = h2->order; 3487 } 3488 } 3489 } 3490 3491 static void __init report_hugepages(void) 3492 { 3493 struct hstate *h; 3494 3495 for_each_hstate(h) { 3496 char buf[32]; 3497 3498 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3499 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3500 buf, h->free_huge_pages); 3501 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3502 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3503 } 3504 } 3505 3506 #ifdef CONFIG_HIGHMEM 3507 static void try_to_free_low(struct hstate *h, unsigned long count, 3508 nodemask_t *nodes_allowed) 3509 { 3510 int i; 3511 LIST_HEAD(page_list); 3512 3513 lockdep_assert_held(&hugetlb_lock); 3514 if (hstate_is_gigantic(h)) 3515 return; 3516 3517 /* 3518 * Collect pages to be freed on a list, and free after dropping lock 3519 */ 3520 for_each_node_mask(i, *nodes_allowed) { 3521 struct folio *folio, *next; 3522 struct list_head *freel = &h->hugepage_freelists[i]; 3523 list_for_each_entry_safe(folio, next, freel, lru) { 3524 if (count >= h->nr_huge_pages) 3525 goto out; 3526 if (folio_test_highmem(folio)) 3527 continue; 3528 remove_hugetlb_folio(h, folio, false); 3529 list_add(&folio->lru, &page_list); 3530 } 3531 } 3532 3533 out: 3534 spin_unlock_irq(&hugetlb_lock); 3535 update_and_free_pages_bulk(h, &page_list); 3536 spin_lock_irq(&hugetlb_lock); 3537 } 3538 #else 3539 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3540 nodemask_t *nodes_allowed) 3541 { 3542 } 3543 #endif 3544 3545 /* 3546 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3547 * balanced by operating on them in a round-robin fashion. 3548 * Returns 1 if an adjustment was made. 3549 */ 3550 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3551 int delta) 3552 { 3553 int nr_nodes, node; 3554 3555 lockdep_assert_held(&hugetlb_lock); 3556 VM_BUG_ON(delta != -1 && delta != 1); 3557 3558 if (delta < 0) { 3559 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { 3560 if (h->surplus_huge_pages_node[node]) 3561 goto found; 3562 } 3563 } else { 3564 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3565 if (h->surplus_huge_pages_node[node] < 3566 h->nr_huge_pages_node[node]) 3567 goto found; 3568 } 3569 } 3570 return 0; 3571 3572 found: 3573 h->surplus_huge_pages += delta; 3574 h->surplus_huge_pages_node[node] += delta; 3575 return 1; 3576 } 3577 3578 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3579 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3580 nodemask_t *nodes_allowed) 3581 { 3582 unsigned long min_count; 3583 unsigned long allocated; 3584 struct folio *folio; 3585 LIST_HEAD(page_list); 3586 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3587 3588 /* 3589 * Bit mask controlling how hard we retry per-node allocations. 3590 * If we can not allocate the bit mask, do not attempt to allocate 3591 * the requested huge pages. 3592 */ 3593 if (node_alloc_noretry) 3594 nodes_clear(*node_alloc_noretry); 3595 else 3596 return -ENOMEM; 3597 3598 /* 3599 * resize_lock mutex prevents concurrent adjustments to number of 3600 * pages in hstate via the proc/sysfs interfaces. 3601 */ 3602 mutex_lock(&h->resize_lock); 3603 flush_free_hpage_work(h); 3604 spin_lock_irq(&hugetlb_lock); 3605 3606 /* 3607 * Check for a node specific request. 3608 * Changing node specific huge page count may require a corresponding 3609 * change to the global count. In any case, the passed node mask 3610 * (nodes_allowed) will restrict alloc/free to the specified node. 3611 */ 3612 if (nid != NUMA_NO_NODE) { 3613 unsigned long old_count = count; 3614 3615 count += persistent_huge_pages(h) - 3616 (h->nr_huge_pages_node[nid] - 3617 h->surplus_huge_pages_node[nid]); 3618 /* 3619 * User may have specified a large count value which caused the 3620 * above calculation to overflow. In this case, they wanted 3621 * to allocate as many huge pages as possible. Set count to 3622 * largest possible value to align with their intention. 3623 */ 3624 if (count < old_count) 3625 count = ULONG_MAX; 3626 } 3627 3628 /* 3629 * Gigantic pages runtime allocation depend on the capability for large 3630 * page range allocation. 3631 * If the system does not provide this feature, return an error when 3632 * the user tries to allocate gigantic pages but let the user free the 3633 * boottime allocated gigantic pages. 3634 */ 3635 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3636 if (count > persistent_huge_pages(h)) { 3637 spin_unlock_irq(&hugetlb_lock); 3638 mutex_unlock(&h->resize_lock); 3639 NODEMASK_FREE(node_alloc_noretry); 3640 return -EINVAL; 3641 } 3642 /* Fall through to decrease pool */ 3643 } 3644 3645 /* 3646 * Increase the pool size 3647 * First take pages out of surplus state. Then make up the 3648 * remaining difference by allocating fresh huge pages. 3649 * 3650 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3651 * to convert a surplus huge page to a normal huge page. That is 3652 * not critical, though, it just means the overall size of the 3653 * pool might be one hugepage larger than it needs to be, but 3654 * within all the constraints specified by the sysctls. 3655 */ 3656 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3657 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3658 break; 3659 } 3660 3661 allocated = 0; 3662 while (count > (persistent_huge_pages(h) + allocated)) { 3663 /* 3664 * If this allocation races such that we no longer need the 3665 * page, free_huge_folio will handle it by freeing the page 3666 * and reducing the surplus. 3667 */ 3668 spin_unlock_irq(&hugetlb_lock); 3669 3670 /* yield cpu to avoid soft lockup */ 3671 cond_resched(); 3672 3673 folio = alloc_pool_huge_folio(h, nodes_allowed, 3674 node_alloc_noretry, 3675 &h->next_nid_to_alloc); 3676 if (!folio) { 3677 prep_and_add_allocated_folios(h, &page_list); 3678 spin_lock_irq(&hugetlb_lock); 3679 goto out; 3680 } 3681 3682 list_add(&folio->lru, &page_list); 3683 allocated++; 3684 3685 /* Bail for signals. Probably ctrl-c from user */ 3686 if (signal_pending(current)) { 3687 prep_and_add_allocated_folios(h, &page_list); 3688 spin_lock_irq(&hugetlb_lock); 3689 goto out; 3690 } 3691 3692 spin_lock_irq(&hugetlb_lock); 3693 } 3694 3695 /* Add allocated pages to the pool */ 3696 if (!list_empty(&page_list)) { 3697 spin_unlock_irq(&hugetlb_lock); 3698 prep_and_add_allocated_folios(h, &page_list); 3699 spin_lock_irq(&hugetlb_lock); 3700 } 3701 3702 /* 3703 * Decrease the pool size 3704 * First return free pages to the buddy allocator (being careful 3705 * to keep enough around to satisfy reservations). Then place 3706 * pages into surplus state as needed so the pool will shrink 3707 * to the desired size as pages become free. 3708 * 3709 * By placing pages into the surplus state independent of the 3710 * overcommit value, we are allowing the surplus pool size to 3711 * exceed overcommit. There are few sane options here. Since 3712 * alloc_surplus_hugetlb_folio() is checking the global counter, 3713 * though, we'll note that we're not allowed to exceed surplus 3714 * and won't grow the pool anywhere else. Not until one of the 3715 * sysctls are changed, or the surplus pages go out of use. 3716 */ 3717 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3718 min_count = max(count, min_count); 3719 try_to_free_low(h, min_count, nodes_allowed); 3720 3721 /* 3722 * Collect pages to be removed on list without dropping lock 3723 */ 3724 while (min_count < persistent_huge_pages(h)) { 3725 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); 3726 if (!folio) 3727 break; 3728 3729 list_add(&folio->lru, &page_list); 3730 } 3731 /* free the pages after dropping lock */ 3732 spin_unlock_irq(&hugetlb_lock); 3733 update_and_free_pages_bulk(h, &page_list); 3734 flush_free_hpage_work(h); 3735 spin_lock_irq(&hugetlb_lock); 3736 3737 while (count < persistent_huge_pages(h)) { 3738 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3739 break; 3740 } 3741 out: 3742 h->max_huge_pages = persistent_huge_pages(h); 3743 spin_unlock_irq(&hugetlb_lock); 3744 mutex_unlock(&h->resize_lock); 3745 3746 NODEMASK_FREE(node_alloc_noretry); 3747 3748 return 0; 3749 } 3750 3751 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, 3752 struct list_head *src_list) 3753 { 3754 long rc; 3755 struct folio *folio, *next; 3756 LIST_HEAD(dst_list); 3757 LIST_HEAD(ret_list); 3758 3759 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); 3760 list_splice_init(&ret_list, src_list); 3761 3762 /* 3763 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3764 * Without the mutex, pages added to target hstate could be marked 3765 * as surplus. 3766 * 3767 * Note that we already hold src->resize_lock. To prevent deadlock, 3768 * use the convention of always taking larger size hstate mutex first. 3769 */ 3770 mutex_lock(&dst->resize_lock); 3771 3772 list_for_each_entry_safe(folio, next, src_list, lru) { 3773 int i; 3774 3775 if (folio_test_hugetlb_vmemmap_optimized(folio)) 3776 continue; 3777 3778 list_del(&folio->lru); 3779 3780 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); 3781 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); 3782 3783 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { 3784 struct page *page = folio_page(folio, i); 3785 3786 page->mapping = NULL; 3787 clear_compound_head(page); 3788 prep_compound_page(page, dst->order); 3789 3790 init_new_hugetlb_folio(dst, page_folio(page)); 3791 list_add(&page->lru, &dst_list); 3792 } 3793 } 3794 3795 prep_and_add_allocated_folios(dst, &dst_list); 3796 3797 mutex_unlock(&dst->resize_lock); 3798 3799 return rc; 3800 } 3801 3802 static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, 3803 unsigned long nr_to_demote) 3804 __must_hold(&hugetlb_lock) 3805 { 3806 int nr_nodes, node; 3807 struct hstate *dst; 3808 long rc = 0; 3809 long nr_demoted = 0; 3810 3811 lockdep_assert_held(&hugetlb_lock); 3812 3813 /* We should never get here if no demote order */ 3814 if (!src->demote_order) { 3815 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3816 return -EINVAL; /* internal error */ 3817 } 3818 dst = size_to_hstate(PAGE_SIZE << src->demote_order); 3819 3820 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { 3821 LIST_HEAD(list); 3822 struct folio *folio, *next; 3823 3824 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { 3825 if (folio_test_hwpoison(folio)) 3826 continue; 3827 3828 remove_hugetlb_folio(src, folio, false); 3829 list_add(&folio->lru, &list); 3830 3831 if (++nr_demoted == nr_to_demote) 3832 break; 3833 } 3834 3835 spin_unlock_irq(&hugetlb_lock); 3836 3837 rc = demote_free_hugetlb_folios(src, dst, &list); 3838 3839 spin_lock_irq(&hugetlb_lock); 3840 3841 list_for_each_entry_safe(folio, next, &list, lru) { 3842 list_del(&folio->lru); 3843 add_hugetlb_folio(src, folio, false); 3844 3845 nr_demoted--; 3846 } 3847 3848 if (rc < 0 || nr_demoted == nr_to_demote) 3849 break; 3850 } 3851 3852 /* 3853 * Not absolutely necessary, but for consistency update max_huge_pages 3854 * based on pool changes for the demoted page. 3855 */ 3856 src->max_huge_pages -= nr_demoted; 3857 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); 3858 3859 if (rc < 0) 3860 return rc; 3861 3862 if (nr_demoted) 3863 return nr_demoted; 3864 /* 3865 * Only way to get here is if all pages on free lists are poisoned. 3866 * Return -EBUSY so that caller will not retry. 3867 */ 3868 return -EBUSY; 3869 } 3870 3871 #define HSTATE_ATTR_RO(_name) \ 3872 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3873 3874 #define HSTATE_ATTR_WO(_name) \ 3875 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3876 3877 #define HSTATE_ATTR(_name) \ 3878 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3879 3880 static struct kobject *hugepages_kobj; 3881 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3882 3883 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3884 3885 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3886 { 3887 int i; 3888 3889 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3890 if (hstate_kobjs[i] == kobj) { 3891 if (nidp) 3892 *nidp = NUMA_NO_NODE; 3893 return &hstates[i]; 3894 } 3895 3896 return kobj_to_node_hstate(kobj, nidp); 3897 } 3898 3899 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3900 struct kobj_attribute *attr, char *buf) 3901 { 3902 struct hstate *h; 3903 unsigned long nr_huge_pages; 3904 int nid; 3905 3906 h = kobj_to_hstate(kobj, &nid); 3907 if (nid == NUMA_NO_NODE) 3908 nr_huge_pages = h->nr_huge_pages; 3909 else 3910 nr_huge_pages = h->nr_huge_pages_node[nid]; 3911 3912 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3913 } 3914 3915 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3916 struct hstate *h, int nid, 3917 unsigned long count, size_t len) 3918 { 3919 int err; 3920 nodemask_t nodes_allowed, *n_mask; 3921 3922 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3923 return -EINVAL; 3924 3925 if (nid == NUMA_NO_NODE) { 3926 /* 3927 * global hstate attribute 3928 */ 3929 if (!(obey_mempolicy && 3930 init_nodemask_of_mempolicy(&nodes_allowed))) 3931 n_mask = &node_states[N_MEMORY]; 3932 else 3933 n_mask = &nodes_allowed; 3934 } else { 3935 /* 3936 * Node specific request. count adjustment happens in 3937 * set_max_huge_pages() after acquiring hugetlb_lock. 3938 */ 3939 init_nodemask_of_node(&nodes_allowed, nid); 3940 n_mask = &nodes_allowed; 3941 } 3942 3943 err = set_max_huge_pages(h, count, nid, n_mask); 3944 3945 return err ? err : len; 3946 } 3947 3948 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3949 struct kobject *kobj, const char *buf, 3950 size_t len) 3951 { 3952 struct hstate *h; 3953 unsigned long count; 3954 int nid; 3955 int err; 3956 3957 err = kstrtoul(buf, 10, &count); 3958 if (err) 3959 return err; 3960 3961 h = kobj_to_hstate(kobj, &nid); 3962 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3963 } 3964 3965 static ssize_t nr_hugepages_show(struct kobject *kobj, 3966 struct kobj_attribute *attr, char *buf) 3967 { 3968 return nr_hugepages_show_common(kobj, attr, buf); 3969 } 3970 3971 static ssize_t nr_hugepages_store(struct kobject *kobj, 3972 struct kobj_attribute *attr, const char *buf, size_t len) 3973 { 3974 return nr_hugepages_store_common(false, kobj, buf, len); 3975 } 3976 HSTATE_ATTR(nr_hugepages); 3977 3978 #ifdef CONFIG_NUMA 3979 3980 /* 3981 * hstate attribute for optionally mempolicy-based constraint on persistent 3982 * huge page alloc/free. 3983 */ 3984 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3985 struct kobj_attribute *attr, 3986 char *buf) 3987 { 3988 return nr_hugepages_show_common(kobj, attr, buf); 3989 } 3990 3991 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3992 struct kobj_attribute *attr, const char *buf, size_t len) 3993 { 3994 return nr_hugepages_store_common(true, kobj, buf, len); 3995 } 3996 HSTATE_ATTR(nr_hugepages_mempolicy); 3997 #endif 3998 3999 4000 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 4001 struct kobj_attribute *attr, char *buf) 4002 { 4003 struct hstate *h = kobj_to_hstate(kobj, NULL); 4004 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 4005 } 4006 4007 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 4008 struct kobj_attribute *attr, const char *buf, size_t count) 4009 { 4010 int err; 4011 unsigned long input; 4012 struct hstate *h = kobj_to_hstate(kobj, NULL); 4013 4014 if (hstate_is_gigantic(h)) 4015 return -EINVAL; 4016 4017 err = kstrtoul(buf, 10, &input); 4018 if (err) 4019 return err; 4020 4021 spin_lock_irq(&hugetlb_lock); 4022 h->nr_overcommit_huge_pages = input; 4023 spin_unlock_irq(&hugetlb_lock); 4024 4025 return count; 4026 } 4027 HSTATE_ATTR(nr_overcommit_hugepages); 4028 4029 static ssize_t free_hugepages_show(struct kobject *kobj, 4030 struct kobj_attribute *attr, char *buf) 4031 { 4032 struct hstate *h; 4033 unsigned long free_huge_pages; 4034 int nid; 4035 4036 h = kobj_to_hstate(kobj, &nid); 4037 if (nid == NUMA_NO_NODE) 4038 free_huge_pages = h->free_huge_pages; 4039 else 4040 free_huge_pages = h->free_huge_pages_node[nid]; 4041 4042 return sysfs_emit(buf, "%lu\n", free_huge_pages); 4043 } 4044 HSTATE_ATTR_RO(free_hugepages); 4045 4046 static ssize_t resv_hugepages_show(struct kobject *kobj, 4047 struct kobj_attribute *attr, char *buf) 4048 { 4049 struct hstate *h = kobj_to_hstate(kobj, NULL); 4050 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 4051 } 4052 HSTATE_ATTR_RO(resv_hugepages); 4053 4054 static ssize_t surplus_hugepages_show(struct kobject *kobj, 4055 struct kobj_attribute *attr, char *buf) 4056 { 4057 struct hstate *h; 4058 unsigned long surplus_huge_pages; 4059 int nid; 4060 4061 h = kobj_to_hstate(kobj, &nid); 4062 if (nid == NUMA_NO_NODE) 4063 surplus_huge_pages = h->surplus_huge_pages; 4064 else 4065 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 4066 4067 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 4068 } 4069 HSTATE_ATTR_RO(surplus_hugepages); 4070 4071 static ssize_t demote_store(struct kobject *kobj, 4072 struct kobj_attribute *attr, const char *buf, size_t len) 4073 { 4074 unsigned long nr_demote; 4075 unsigned long nr_available; 4076 nodemask_t nodes_allowed, *n_mask; 4077 struct hstate *h; 4078 int err; 4079 int nid; 4080 4081 err = kstrtoul(buf, 10, &nr_demote); 4082 if (err) 4083 return err; 4084 h = kobj_to_hstate(kobj, &nid); 4085 4086 if (nid != NUMA_NO_NODE) { 4087 init_nodemask_of_node(&nodes_allowed, nid); 4088 n_mask = &nodes_allowed; 4089 } else { 4090 n_mask = &node_states[N_MEMORY]; 4091 } 4092 4093 /* Synchronize with other sysfs operations modifying huge pages */ 4094 mutex_lock(&h->resize_lock); 4095 spin_lock_irq(&hugetlb_lock); 4096 4097 while (nr_demote) { 4098 long rc; 4099 4100 /* 4101 * Check for available pages to demote each time thorough the 4102 * loop as demote_pool_huge_page will drop hugetlb_lock. 4103 */ 4104 if (nid != NUMA_NO_NODE) 4105 nr_available = h->free_huge_pages_node[nid]; 4106 else 4107 nr_available = h->free_huge_pages; 4108 nr_available -= h->resv_huge_pages; 4109 if (!nr_available) 4110 break; 4111 4112 rc = demote_pool_huge_page(h, n_mask, nr_demote); 4113 if (rc < 0) { 4114 err = rc; 4115 break; 4116 } 4117 4118 nr_demote -= rc; 4119 } 4120 4121 spin_unlock_irq(&hugetlb_lock); 4122 mutex_unlock(&h->resize_lock); 4123 4124 if (err) 4125 return err; 4126 return len; 4127 } 4128 HSTATE_ATTR_WO(demote); 4129 4130 static ssize_t demote_size_show(struct kobject *kobj, 4131 struct kobj_attribute *attr, char *buf) 4132 { 4133 struct hstate *h = kobj_to_hstate(kobj, NULL); 4134 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 4135 4136 return sysfs_emit(buf, "%lukB\n", demote_size); 4137 } 4138 4139 static ssize_t demote_size_store(struct kobject *kobj, 4140 struct kobj_attribute *attr, 4141 const char *buf, size_t count) 4142 { 4143 struct hstate *h, *demote_hstate; 4144 unsigned long demote_size; 4145 unsigned int demote_order; 4146 4147 demote_size = (unsigned long)memparse(buf, NULL); 4148 4149 demote_hstate = size_to_hstate(demote_size); 4150 if (!demote_hstate) 4151 return -EINVAL; 4152 demote_order = demote_hstate->order; 4153 if (demote_order < HUGETLB_PAGE_ORDER) 4154 return -EINVAL; 4155 4156 /* demote order must be smaller than hstate order */ 4157 h = kobj_to_hstate(kobj, NULL); 4158 if (demote_order >= h->order) 4159 return -EINVAL; 4160 4161 /* resize_lock synchronizes access to demote size and writes */ 4162 mutex_lock(&h->resize_lock); 4163 h->demote_order = demote_order; 4164 mutex_unlock(&h->resize_lock); 4165 4166 return count; 4167 } 4168 HSTATE_ATTR(demote_size); 4169 4170 static struct attribute *hstate_attrs[] = { 4171 &nr_hugepages_attr.attr, 4172 &nr_overcommit_hugepages_attr.attr, 4173 &free_hugepages_attr.attr, 4174 &resv_hugepages_attr.attr, 4175 &surplus_hugepages_attr.attr, 4176 #ifdef CONFIG_NUMA 4177 &nr_hugepages_mempolicy_attr.attr, 4178 #endif 4179 NULL, 4180 }; 4181 4182 static const struct attribute_group hstate_attr_group = { 4183 .attrs = hstate_attrs, 4184 }; 4185 4186 static struct attribute *hstate_demote_attrs[] = { 4187 &demote_size_attr.attr, 4188 &demote_attr.attr, 4189 NULL, 4190 }; 4191 4192 static const struct attribute_group hstate_demote_attr_group = { 4193 .attrs = hstate_demote_attrs, 4194 }; 4195 4196 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4197 struct kobject **hstate_kobjs, 4198 const struct attribute_group *hstate_attr_group) 4199 { 4200 int retval; 4201 int hi = hstate_index(h); 4202 4203 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4204 if (!hstate_kobjs[hi]) 4205 return -ENOMEM; 4206 4207 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4208 if (retval) { 4209 kobject_put(hstate_kobjs[hi]); 4210 hstate_kobjs[hi] = NULL; 4211 return retval; 4212 } 4213 4214 if (h->demote_order) { 4215 retval = sysfs_create_group(hstate_kobjs[hi], 4216 &hstate_demote_attr_group); 4217 if (retval) { 4218 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4219 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4220 kobject_put(hstate_kobjs[hi]); 4221 hstate_kobjs[hi] = NULL; 4222 return retval; 4223 } 4224 } 4225 4226 return 0; 4227 } 4228 4229 #ifdef CONFIG_NUMA 4230 static bool hugetlb_sysfs_initialized __ro_after_init; 4231 4232 /* 4233 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4234 * with node devices in node_devices[] using a parallel array. The array 4235 * index of a node device or _hstate == node id. 4236 * This is here to avoid any static dependency of the node device driver, in 4237 * the base kernel, on the hugetlb module. 4238 */ 4239 struct node_hstate { 4240 struct kobject *hugepages_kobj; 4241 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4242 }; 4243 static struct node_hstate node_hstates[MAX_NUMNODES]; 4244 4245 /* 4246 * A subset of global hstate attributes for node devices 4247 */ 4248 static struct attribute *per_node_hstate_attrs[] = { 4249 &nr_hugepages_attr.attr, 4250 &free_hugepages_attr.attr, 4251 &surplus_hugepages_attr.attr, 4252 NULL, 4253 }; 4254 4255 static const struct attribute_group per_node_hstate_attr_group = { 4256 .attrs = per_node_hstate_attrs, 4257 }; 4258 4259 /* 4260 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4261 * Returns node id via non-NULL nidp. 4262 */ 4263 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4264 { 4265 int nid; 4266 4267 for (nid = 0; nid < nr_node_ids; nid++) { 4268 struct node_hstate *nhs = &node_hstates[nid]; 4269 int i; 4270 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4271 if (nhs->hstate_kobjs[i] == kobj) { 4272 if (nidp) 4273 *nidp = nid; 4274 return &hstates[i]; 4275 } 4276 } 4277 4278 BUG(); 4279 return NULL; 4280 } 4281 4282 /* 4283 * Unregister hstate attributes from a single node device. 4284 * No-op if no hstate attributes attached. 4285 */ 4286 void hugetlb_unregister_node(struct node *node) 4287 { 4288 struct hstate *h; 4289 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4290 4291 if (!nhs->hugepages_kobj) 4292 return; /* no hstate attributes */ 4293 4294 for_each_hstate(h) { 4295 int idx = hstate_index(h); 4296 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4297 4298 if (!hstate_kobj) 4299 continue; 4300 if (h->demote_order) 4301 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4302 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4303 kobject_put(hstate_kobj); 4304 nhs->hstate_kobjs[idx] = NULL; 4305 } 4306 4307 kobject_put(nhs->hugepages_kobj); 4308 nhs->hugepages_kobj = NULL; 4309 } 4310 4311 4312 /* 4313 * Register hstate attributes for a single node device. 4314 * No-op if attributes already registered. 4315 */ 4316 void hugetlb_register_node(struct node *node) 4317 { 4318 struct hstate *h; 4319 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4320 int err; 4321 4322 if (!hugetlb_sysfs_initialized) 4323 return; 4324 4325 if (nhs->hugepages_kobj) 4326 return; /* already allocated */ 4327 4328 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4329 &node->dev.kobj); 4330 if (!nhs->hugepages_kobj) 4331 return; 4332 4333 for_each_hstate(h) { 4334 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4335 nhs->hstate_kobjs, 4336 &per_node_hstate_attr_group); 4337 if (err) { 4338 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4339 h->name, node->dev.id); 4340 hugetlb_unregister_node(node); 4341 break; 4342 } 4343 } 4344 } 4345 4346 /* 4347 * hugetlb init time: register hstate attributes for all registered node 4348 * devices of nodes that have memory. All on-line nodes should have 4349 * registered their associated device by this time. 4350 */ 4351 static void __init hugetlb_register_all_nodes(void) 4352 { 4353 int nid; 4354 4355 for_each_online_node(nid) 4356 hugetlb_register_node(node_devices[nid]); 4357 } 4358 #else /* !CONFIG_NUMA */ 4359 4360 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4361 { 4362 BUG(); 4363 if (nidp) 4364 *nidp = -1; 4365 return NULL; 4366 } 4367 4368 static void hugetlb_register_all_nodes(void) { } 4369 4370 #endif 4371 4372 #ifdef CONFIG_CMA 4373 static void __init hugetlb_cma_check(void); 4374 #else 4375 static inline __init void hugetlb_cma_check(void) 4376 { 4377 } 4378 #endif 4379 4380 static void __init hugetlb_sysfs_init(void) 4381 { 4382 struct hstate *h; 4383 int err; 4384 4385 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4386 if (!hugepages_kobj) 4387 return; 4388 4389 for_each_hstate(h) { 4390 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4391 hstate_kobjs, &hstate_attr_group); 4392 if (err) 4393 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4394 } 4395 4396 #ifdef CONFIG_NUMA 4397 hugetlb_sysfs_initialized = true; 4398 #endif 4399 hugetlb_register_all_nodes(); 4400 } 4401 4402 #ifdef CONFIG_SYSCTL 4403 static void hugetlb_sysctl_init(void); 4404 #else 4405 static inline void hugetlb_sysctl_init(void) { } 4406 #endif 4407 4408 static int __init hugetlb_init(void) 4409 { 4410 int i; 4411 4412 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4413 __NR_HPAGEFLAGS); 4414 4415 if (!hugepages_supported()) { 4416 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4417 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4418 return 0; 4419 } 4420 4421 /* 4422 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4423 * architectures depend on setup being done here. 4424 */ 4425 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4426 if (!parsed_default_hugepagesz) { 4427 /* 4428 * If we did not parse a default huge page size, set 4429 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4430 * number of huge pages for this default size was implicitly 4431 * specified, set that here as well. 4432 * Note that the implicit setting will overwrite an explicit 4433 * setting. A warning will be printed in this case. 4434 */ 4435 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4436 if (default_hstate_max_huge_pages) { 4437 if (default_hstate.max_huge_pages) { 4438 char buf[32]; 4439 4440 string_get_size(huge_page_size(&default_hstate), 4441 1, STRING_UNITS_2, buf, 32); 4442 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4443 default_hstate.max_huge_pages, buf); 4444 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4445 default_hstate_max_huge_pages); 4446 } 4447 default_hstate.max_huge_pages = 4448 default_hstate_max_huge_pages; 4449 4450 for_each_online_node(i) 4451 default_hstate.max_huge_pages_node[i] = 4452 default_hugepages_in_node[i]; 4453 } 4454 } 4455 4456 hugetlb_cma_check(); 4457 hugetlb_init_hstates(); 4458 gather_bootmem_prealloc(); 4459 report_hugepages(); 4460 4461 hugetlb_sysfs_init(); 4462 hugetlb_cgroup_file_init(); 4463 hugetlb_sysctl_init(); 4464 4465 #ifdef CONFIG_SMP 4466 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4467 #else 4468 num_fault_mutexes = 1; 4469 #endif 4470 hugetlb_fault_mutex_table = 4471 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4472 GFP_KERNEL); 4473 BUG_ON(!hugetlb_fault_mutex_table); 4474 4475 for (i = 0; i < num_fault_mutexes; i++) 4476 mutex_init(&hugetlb_fault_mutex_table[i]); 4477 return 0; 4478 } 4479 subsys_initcall(hugetlb_init); 4480 4481 /* Overwritten by architectures with more huge page sizes */ 4482 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4483 { 4484 return size == HPAGE_SIZE; 4485 } 4486 4487 void __init hugetlb_add_hstate(unsigned int order) 4488 { 4489 struct hstate *h; 4490 unsigned long i; 4491 4492 if (size_to_hstate(PAGE_SIZE << order)) { 4493 return; 4494 } 4495 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4496 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); 4497 h = &hstates[hugetlb_max_hstate++]; 4498 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); 4499 h->order = order; 4500 h->mask = ~(huge_page_size(h) - 1); 4501 for (i = 0; i < MAX_NUMNODES; ++i) 4502 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4503 INIT_LIST_HEAD(&h->hugepage_activelist); 4504 h->next_nid_to_alloc = first_memory_node; 4505 h->next_nid_to_free = first_memory_node; 4506 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4507 huge_page_size(h)/SZ_1K); 4508 4509 parsed_hstate = h; 4510 } 4511 4512 bool __init __weak hugetlb_node_alloc_supported(void) 4513 { 4514 return true; 4515 } 4516 4517 static void __init hugepages_clear_pages_in_node(void) 4518 { 4519 if (!hugetlb_max_hstate) { 4520 default_hstate_max_huge_pages = 0; 4521 memset(default_hugepages_in_node, 0, 4522 sizeof(default_hugepages_in_node)); 4523 } else { 4524 parsed_hstate->max_huge_pages = 0; 4525 memset(parsed_hstate->max_huge_pages_node, 0, 4526 sizeof(parsed_hstate->max_huge_pages_node)); 4527 } 4528 } 4529 4530 /* 4531 * hugepages command line processing 4532 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4533 * specification. If not, ignore the hugepages value. hugepages can also 4534 * be the first huge page command line option in which case it implicitly 4535 * specifies the number of huge pages for the default size. 4536 */ 4537 static int __init hugepages_setup(char *s) 4538 { 4539 unsigned long *mhp; 4540 static unsigned long *last_mhp; 4541 int node = NUMA_NO_NODE; 4542 int count; 4543 unsigned long tmp; 4544 char *p = s; 4545 4546 if (!parsed_valid_hugepagesz) { 4547 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4548 parsed_valid_hugepagesz = true; 4549 return 1; 4550 } 4551 4552 /* 4553 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4554 * yet, so this hugepages= parameter goes to the "default hstate". 4555 * Otherwise, it goes with the previously parsed hugepagesz or 4556 * default_hugepagesz. 4557 */ 4558 else if (!hugetlb_max_hstate) 4559 mhp = &default_hstate_max_huge_pages; 4560 else 4561 mhp = &parsed_hstate->max_huge_pages; 4562 4563 if (mhp == last_mhp) { 4564 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4565 return 1; 4566 } 4567 4568 while (*p) { 4569 count = 0; 4570 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4571 goto invalid; 4572 /* Parameter is node format */ 4573 if (p[count] == ':') { 4574 if (!hugetlb_node_alloc_supported()) { 4575 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4576 return 1; 4577 } 4578 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4579 goto invalid; 4580 node = array_index_nospec(tmp, MAX_NUMNODES); 4581 p += count + 1; 4582 /* Parse hugepages */ 4583 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4584 goto invalid; 4585 if (!hugetlb_max_hstate) 4586 default_hugepages_in_node[node] = tmp; 4587 else 4588 parsed_hstate->max_huge_pages_node[node] = tmp; 4589 *mhp += tmp; 4590 /* Go to parse next node*/ 4591 if (p[count] == ',') 4592 p += count + 1; 4593 else 4594 break; 4595 } else { 4596 if (p != s) 4597 goto invalid; 4598 *mhp = tmp; 4599 break; 4600 } 4601 } 4602 4603 /* 4604 * Global state is always initialized later in hugetlb_init. 4605 * But we need to allocate gigantic hstates here early to still 4606 * use the bootmem allocator. 4607 */ 4608 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4609 hugetlb_hstate_alloc_pages(parsed_hstate); 4610 4611 last_mhp = mhp; 4612 4613 return 1; 4614 4615 invalid: 4616 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4617 hugepages_clear_pages_in_node(); 4618 return 1; 4619 } 4620 __setup("hugepages=", hugepages_setup); 4621 4622 /* 4623 * hugepagesz command line processing 4624 * A specific huge page size can only be specified once with hugepagesz. 4625 * hugepagesz is followed by hugepages on the command line. The global 4626 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4627 * hugepagesz argument was valid. 4628 */ 4629 static int __init hugepagesz_setup(char *s) 4630 { 4631 unsigned long size; 4632 struct hstate *h; 4633 4634 parsed_valid_hugepagesz = false; 4635 size = (unsigned long)memparse(s, NULL); 4636 4637 if (!arch_hugetlb_valid_size(size)) { 4638 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4639 return 1; 4640 } 4641 4642 h = size_to_hstate(size); 4643 if (h) { 4644 /* 4645 * hstate for this size already exists. This is normally 4646 * an error, but is allowed if the existing hstate is the 4647 * default hstate. More specifically, it is only allowed if 4648 * the number of huge pages for the default hstate was not 4649 * previously specified. 4650 */ 4651 if (!parsed_default_hugepagesz || h != &default_hstate || 4652 default_hstate.max_huge_pages) { 4653 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4654 return 1; 4655 } 4656 4657 /* 4658 * No need to call hugetlb_add_hstate() as hstate already 4659 * exists. But, do set parsed_hstate so that a following 4660 * hugepages= parameter will be applied to this hstate. 4661 */ 4662 parsed_hstate = h; 4663 parsed_valid_hugepagesz = true; 4664 return 1; 4665 } 4666 4667 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4668 parsed_valid_hugepagesz = true; 4669 return 1; 4670 } 4671 __setup("hugepagesz=", hugepagesz_setup); 4672 4673 /* 4674 * default_hugepagesz command line input 4675 * Only one instance of default_hugepagesz allowed on command line. 4676 */ 4677 static int __init default_hugepagesz_setup(char *s) 4678 { 4679 unsigned long size; 4680 int i; 4681 4682 parsed_valid_hugepagesz = false; 4683 if (parsed_default_hugepagesz) { 4684 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4685 return 1; 4686 } 4687 4688 size = (unsigned long)memparse(s, NULL); 4689 4690 if (!arch_hugetlb_valid_size(size)) { 4691 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4692 return 1; 4693 } 4694 4695 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4696 parsed_valid_hugepagesz = true; 4697 parsed_default_hugepagesz = true; 4698 default_hstate_idx = hstate_index(size_to_hstate(size)); 4699 4700 /* 4701 * The number of default huge pages (for this size) could have been 4702 * specified as the first hugetlb parameter: hugepages=X. If so, 4703 * then default_hstate_max_huge_pages is set. If the default huge 4704 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be 4705 * allocated here from bootmem allocator. 4706 */ 4707 if (default_hstate_max_huge_pages) { 4708 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4709 for_each_online_node(i) 4710 default_hstate.max_huge_pages_node[i] = 4711 default_hugepages_in_node[i]; 4712 if (hstate_is_gigantic(&default_hstate)) 4713 hugetlb_hstate_alloc_pages(&default_hstate); 4714 default_hstate_max_huge_pages = 0; 4715 } 4716 4717 return 1; 4718 } 4719 __setup("default_hugepagesz=", default_hugepagesz_setup); 4720 4721 static unsigned int allowed_mems_nr(struct hstate *h) 4722 { 4723 int node; 4724 unsigned int nr = 0; 4725 nodemask_t *mbind_nodemask; 4726 unsigned int *array = h->free_huge_pages_node; 4727 gfp_t gfp_mask = htlb_alloc_mask(h); 4728 4729 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4730 for_each_node_mask(node, cpuset_current_mems_allowed) { 4731 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4732 nr += array[node]; 4733 } 4734 4735 return nr; 4736 } 4737 4738 #ifdef CONFIG_SYSCTL 4739 static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write, 4740 void *buffer, size_t *length, 4741 loff_t *ppos, unsigned long *out) 4742 { 4743 struct ctl_table dup_table; 4744 4745 /* 4746 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4747 * can duplicate the @table and alter the duplicate of it. 4748 */ 4749 dup_table = *table; 4750 dup_table.data = out; 4751 4752 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4753 } 4754 4755 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4756 const struct ctl_table *table, int write, 4757 void *buffer, size_t *length, loff_t *ppos) 4758 { 4759 struct hstate *h = &default_hstate; 4760 unsigned long tmp = h->max_huge_pages; 4761 int ret; 4762 4763 if (!hugepages_supported()) 4764 return -EOPNOTSUPP; 4765 4766 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4767 &tmp); 4768 if (ret) 4769 goto out; 4770 4771 if (write) 4772 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4773 NUMA_NO_NODE, tmp, *length); 4774 out: 4775 return ret; 4776 } 4777 4778 static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 4779 void *buffer, size_t *length, loff_t *ppos) 4780 { 4781 4782 return hugetlb_sysctl_handler_common(false, table, write, 4783 buffer, length, ppos); 4784 } 4785 4786 #ifdef CONFIG_NUMA 4787 static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 4788 void *buffer, size_t *length, loff_t *ppos) 4789 { 4790 return hugetlb_sysctl_handler_common(true, table, write, 4791 buffer, length, ppos); 4792 } 4793 #endif /* CONFIG_NUMA */ 4794 4795 static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 4796 void *buffer, size_t *length, loff_t *ppos) 4797 { 4798 struct hstate *h = &default_hstate; 4799 unsigned long tmp; 4800 int ret; 4801 4802 if (!hugepages_supported()) 4803 return -EOPNOTSUPP; 4804 4805 tmp = h->nr_overcommit_huge_pages; 4806 4807 if (write && hstate_is_gigantic(h)) 4808 return -EINVAL; 4809 4810 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4811 &tmp); 4812 if (ret) 4813 goto out; 4814 4815 if (write) { 4816 spin_lock_irq(&hugetlb_lock); 4817 h->nr_overcommit_huge_pages = tmp; 4818 spin_unlock_irq(&hugetlb_lock); 4819 } 4820 out: 4821 return ret; 4822 } 4823 4824 static struct ctl_table hugetlb_table[] = { 4825 { 4826 .procname = "nr_hugepages", 4827 .data = NULL, 4828 .maxlen = sizeof(unsigned long), 4829 .mode = 0644, 4830 .proc_handler = hugetlb_sysctl_handler, 4831 }, 4832 #ifdef CONFIG_NUMA 4833 { 4834 .procname = "nr_hugepages_mempolicy", 4835 .data = NULL, 4836 .maxlen = sizeof(unsigned long), 4837 .mode = 0644, 4838 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 4839 }, 4840 #endif 4841 { 4842 .procname = "hugetlb_shm_group", 4843 .data = &sysctl_hugetlb_shm_group, 4844 .maxlen = sizeof(gid_t), 4845 .mode = 0644, 4846 .proc_handler = proc_dointvec, 4847 }, 4848 { 4849 .procname = "nr_overcommit_hugepages", 4850 .data = NULL, 4851 .maxlen = sizeof(unsigned long), 4852 .mode = 0644, 4853 .proc_handler = hugetlb_overcommit_handler, 4854 }, 4855 }; 4856 4857 static void hugetlb_sysctl_init(void) 4858 { 4859 register_sysctl_init("vm", hugetlb_table); 4860 } 4861 #endif /* CONFIG_SYSCTL */ 4862 4863 void hugetlb_report_meminfo(struct seq_file *m) 4864 { 4865 struct hstate *h; 4866 unsigned long total = 0; 4867 4868 if (!hugepages_supported()) 4869 return; 4870 4871 for_each_hstate(h) { 4872 unsigned long count = h->nr_huge_pages; 4873 4874 total += huge_page_size(h) * count; 4875 4876 if (h == &default_hstate) 4877 seq_printf(m, 4878 "HugePages_Total: %5lu\n" 4879 "HugePages_Free: %5lu\n" 4880 "HugePages_Rsvd: %5lu\n" 4881 "HugePages_Surp: %5lu\n" 4882 "Hugepagesize: %8lu kB\n", 4883 count, 4884 h->free_huge_pages, 4885 h->resv_huge_pages, 4886 h->surplus_huge_pages, 4887 huge_page_size(h) / SZ_1K); 4888 } 4889 4890 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4891 } 4892 4893 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4894 { 4895 struct hstate *h = &default_hstate; 4896 4897 if (!hugepages_supported()) 4898 return 0; 4899 4900 return sysfs_emit_at(buf, len, 4901 "Node %d HugePages_Total: %5u\n" 4902 "Node %d HugePages_Free: %5u\n" 4903 "Node %d HugePages_Surp: %5u\n", 4904 nid, h->nr_huge_pages_node[nid], 4905 nid, h->free_huge_pages_node[nid], 4906 nid, h->surplus_huge_pages_node[nid]); 4907 } 4908 4909 void hugetlb_show_meminfo_node(int nid) 4910 { 4911 struct hstate *h; 4912 4913 if (!hugepages_supported()) 4914 return; 4915 4916 for_each_hstate(h) 4917 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4918 nid, 4919 h->nr_huge_pages_node[nid], 4920 h->free_huge_pages_node[nid], 4921 h->surplus_huge_pages_node[nid], 4922 huge_page_size(h) / SZ_1K); 4923 } 4924 4925 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4926 { 4927 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4928 K(atomic_long_read(&mm->hugetlb_usage))); 4929 } 4930 4931 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4932 unsigned long hugetlb_total_pages(void) 4933 { 4934 struct hstate *h; 4935 unsigned long nr_total_pages = 0; 4936 4937 for_each_hstate(h) 4938 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4939 return nr_total_pages; 4940 } 4941 4942 static int hugetlb_acct_memory(struct hstate *h, long delta) 4943 { 4944 int ret = -ENOMEM; 4945 4946 if (!delta) 4947 return 0; 4948 4949 spin_lock_irq(&hugetlb_lock); 4950 /* 4951 * When cpuset is configured, it breaks the strict hugetlb page 4952 * reservation as the accounting is done on a global variable. Such 4953 * reservation is completely rubbish in the presence of cpuset because 4954 * the reservation is not checked against page availability for the 4955 * current cpuset. Application can still potentially OOM'ed by kernel 4956 * with lack of free htlb page in cpuset that the task is in. 4957 * Attempt to enforce strict accounting with cpuset is almost 4958 * impossible (or too ugly) because cpuset is too fluid that 4959 * task or memory node can be dynamically moved between cpusets. 4960 * 4961 * The change of semantics for shared hugetlb mapping with cpuset is 4962 * undesirable. However, in order to preserve some of the semantics, 4963 * we fall back to check against current free page availability as 4964 * a best attempt and hopefully to minimize the impact of changing 4965 * semantics that cpuset has. 4966 * 4967 * Apart from cpuset, we also have memory policy mechanism that 4968 * also determines from which node the kernel will allocate memory 4969 * in a NUMA system. So similar to cpuset, we also should consider 4970 * the memory policy of the current task. Similar to the description 4971 * above. 4972 */ 4973 if (delta > 0) { 4974 if (gather_surplus_pages(h, delta) < 0) 4975 goto out; 4976 4977 if (delta > allowed_mems_nr(h)) { 4978 return_unused_surplus_pages(h, delta); 4979 goto out; 4980 } 4981 } 4982 4983 ret = 0; 4984 if (delta < 0) 4985 return_unused_surplus_pages(h, (unsigned long) -delta); 4986 4987 out: 4988 spin_unlock_irq(&hugetlb_lock); 4989 return ret; 4990 } 4991 4992 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4993 { 4994 struct resv_map *resv = vma_resv_map(vma); 4995 4996 /* 4997 * HPAGE_RESV_OWNER indicates a private mapping. 4998 * This new VMA should share its siblings reservation map if present. 4999 * The VMA will only ever have a valid reservation map pointer where 5000 * it is being copied for another still existing VMA. As that VMA 5001 * has a reference to the reservation map it cannot disappear until 5002 * after this open call completes. It is therefore safe to take a 5003 * new reference here without additional locking. 5004 */ 5005 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 5006 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 5007 kref_get(&resv->refs); 5008 } 5009 5010 /* 5011 * vma_lock structure for sharable mappings is vma specific. 5012 * Clear old pointer (if copied via vm_area_dup) and allocate 5013 * new structure. Before clearing, make sure vma_lock is not 5014 * for this vma. 5015 */ 5016 if (vma->vm_flags & VM_MAYSHARE) { 5017 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 5018 5019 if (vma_lock) { 5020 if (vma_lock->vma != vma) { 5021 vma->vm_private_data = NULL; 5022 hugetlb_vma_lock_alloc(vma); 5023 } else 5024 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 5025 } else 5026 hugetlb_vma_lock_alloc(vma); 5027 } 5028 } 5029 5030 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 5031 { 5032 struct hstate *h = hstate_vma(vma); 5033 struct resv_map *resv; 5034 struct hugepage_subpool *spool = subpool_vma(vma); 5035 unsigned long reserve, start, end; 5036 long gbl_reserve; 5037 5038 hugetlb_vma_lock_free(vma); 5039 5040 resv = vma_resv_map(vma); 5041 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5042 return; 5043 5044 start = vma_hugecache_offset(h, vma, vma->vm_start); 5045 end = vma_hugecache_offset(h, vma, vma->vm_end); 5046 5047 reserve = (end - start) - region_count(resv, start, end); 5048 hugetlb_cgroup_uncharge_counter(resv, start, end); 5049 if (reserve) { 5050 /* 5051 * Decrement reserve counts. The global reserve count may be 5052 * adjusted if the subpool has a minimum size. 5053 */ 5054 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 5055 hugetlb_acct_memory(h, -gbl_reserve); 5056 } 5057 5058 kref_put(&resv->refs, resv_map_release); 5059 } 5060 5061 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 5062 { 5063 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 5064 return -EINVAL; 5065 5066 /* 5067 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 5068 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 5069 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 5070 */ 5071 if (addr & ~PUD_MASK) { 5072 /* 5073 * hugetlb_vm_op_split is called right before we attempt to 5074 * split the VMA. We will need to unshare PMDs in the old and 5075 * new VMAs, so let's unshare before we split. 5076 */ 5077 unsigned long floor = addr & PUD_MASK; 5078 unsigned long ceil = floor + PUD_SIZE; 5079 5080 if (floor >= vma->vm_start && ceil <= vma->vm_end) 5081 hugetlb_unshare_pmds(vma, floor, ceil); 5082 } 5083 5084 return 0; 5085 } 5086 5087 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 5088 { 5089 return huge_page_size(hstate_vma(vma)); 5090 } 5091 5092 /* 5093 * We cannot handle pagefaults against hugetlb pages at all. They cause 5094 * handle_mm_fault() to try to instantiate regular-sized pages in the 5095 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5096 * this far. 5097 */ 5098 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 5099 { 5100 BUG(); 5101 return 0; 5102 } 5103 5104 /* 5105 * When a new function is introduced to vm_operations_struct and added 5106 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 5107 * This is because under System V memory model, mappings created via 5108 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 5109 * their original vm_ops are overwritten with shm_vm_ops. 5110 */ 5111 const struct vm_operations_struct hugetlb_vm_ops = { 5112 .fault = hugetlb_vm_op_fault, 5113 .open = hugetlb_vm_op_open, 5114 .close = hugetlb_vm_op_close, 5115 .may_split = hugetlb_vm_op_split, 5116 .pagesize = hugetlb_vm_op_pagesize, 5117 }; 5118 5119 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 5120 int writable) 5121 { 5122 pte_t entry; 5123 unsigned int shift = huge_page_shift(hstate_vma(vma)); 5124 5125 if (writable) { 5126 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 5127 vma->vm_page_prot))); 5128 } else { 5129 entry = huge_pte_wrprotect(mk_huge_pte(page, 5130 vma->vm_page_prot)); 5131 } 5132 entry = pte_mkyoung(entry); 5133 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 5134 5135 return entry; 5136 } 5137 5138 static void set_huge_ptep_writable(struct vm_area_struct *vma, 5139 unsigned long address, pte_t *ptep) 5140 { 5141 pte_t entry; 5142 5143 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); 5144 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 5145 update_mmu_cache(vma, address, ptep); 5146 } 5147 5148 bool is_hugetlb_entry_migration(pte_t pte) 5149 { 5150 swp_entry_t swp; 5151 5152 if (huge_pte_none(pte) || pte_present(pte)) 5153 return false; 5154 swp = pte_to_swp_entry(pte); 5155 if (is_migration_entry(swp)) 5156 return true; 5157 else 5158 return false; 5159 } 5160 5161 bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5162 { 5163 swp_entry_t swp; 5164 5165 if (huge_pte_none(pte) || pte_present(pte)) 5166 return false; 5167 swp = pte_to_swp_entry(pte); 5168 if (is_hwpoison_entry(swp)) 5169 return true; 5170 else 5171 return false; 5172 } 5173 5174 static void 5175 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5176 struct folio *new_folio, pte_t old, unsigned long sz) 5177 { 5178 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 5179 5180 __folio_mark_uptodate(new_folio); 5181 hugetlb_add_new_anon_rmap(new_folio, vma, addr); 5182 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5183 newpte = huge_pte_mkuffd_wp(newpte); 5184 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 5185 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5186 folio_set_hugetlb_migratable(new_folio); 5187 } 5188 5189 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5190 struct vm_area_struct *dst_vma, 5191 struct vm_area_struct *src_vma) 5192 { 5193 pte_t *src_pte, *dst_pte, entry; 5194 struct folio *pte_folio; 5195 unsigned long addr; 5196 bool cow = is_cow_mapping(src_vma->vm_flags); 5197 struct hstate *h = hstate_vma(src_vma); 5198 unsigned long sz = huge_page_size(h); 5199 unsigned long npages = pages_per_huge_page(h); 5200 struct mmu_notifier_range range; 5201 unsigned long last_addr_mask; 5202 int ret = 0; 5203 5204 if (cow) { 5205 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5206 src_vma->vm_start, 5207 src_vma->vm_end); 5208 mmu_notifier_invalidate_range_start(&range); 5209 vma_assert_write_locked(src_vma); 5210 raw_write_seqcount_begin(&src->write_protect_seq); 5211 } else { 5212 /* 5213 * For shared mappings the vma lock must be held before 5214 * calling hugetlb_walk() in the src vma. Otherwise, the 5215 * returned ptep could go away if part of a shared pmd and 5216 * another thread calls huge_pmd_unshare. 5217 */ 5218 hugetlb_vma_lock_read(src_vma); 5219 } 5220 5221 last_addr_mask = hugetlb_mask_last_page(h); 5222 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5223 spinlock_t *src_ptl, *dst_ptl; 5224 src_pte = hugetlb_walk(src_vma, addr, sz); 5225 if (!src_pte) { 5226 addr |= last_addr_mask; 5227 continue; 5228 } 5229 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5230 if (!dst_pte) { 5231 ret = -ENOMEM; 5232 break; 5233 } 5234 5235 /* 5236 * If the pagetables are shared don't copy or take references. 5237 * 5238 * dst_pte == src_pte is the common case of src/dest sharing. 5239 * However, src could have 'unshared' and dst shares with 5240 * another vma. So page_count of ptep page is checked instead 5241 * to reliably determine whether pte is shared. 5242 */ 5243 if (page_count(virt_to_page(dst_pte)) > 1) { 5244 addr |= last_addr_mask; 5245 continue; 5246 } 5247 5248 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5249 src_ptl = huge_pte_lockptr(h, src, src_pte); 5250 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5251 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5252 again: 5253 if (huge_pte_none(entry)) { 5254 /* 5255 * Skip if src entry none. 5256 */ 5257 ; 5258 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5259 if (!userfaultfd_wp(dst_vma)) 5260 entry = huge_pte_clear_uffd_wp(entry); 5261 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5262 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5263 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5264 bool uffd_wp = pte_swp_uffd_wp(entry); 5265 5266 if (!is_readable_migration_entry(swp_entry) && cow) { 5267 /* 5268 * COW mappings require pages in both 5269 * parent and child to be set to read. 5270 */ 5271 swp_entry = make_readable_migration_entry( 5272 swp_offset(swp_entry)); 5273 entry = swp_entry_to_pte(swp_entry); 5274 if (userfaultfd_wp(src_vma) && uffd_wp) 5275 entry = pte_swp_mkuffd_wp(entry); 5276 set_huge_pte_at(src, addr, src_pte, entry, sz); 5277 } 5278 if (!userfaultfd_wp(dst_vma)) 5279 entry = huge_pte_clear_uffd_wp(entry); 5280 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5281 } else if (unlikely(is_pte_marker(entry))) { 5282 pte_marker marker = copy_pte_marker( 5283 pte_to_swp_entry(entry), dst_vma); 5284 5285 if (marker) 5286 set_huge_pte_at(dst, addr, dst_pte, 5287 make_pte_marker(marker), sz); 5288 } else { 5289 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5290 pte_folio = page_folio(pte_page(entry)); 5291 folio_get(pte_folio); 5292 5293 /* 5294 * Failing to duplicate the anon rmap is a rare case 5295 * where we see pinned hugetlb pages while they're 5296 * prone to COW. We need to do the COW earlier during 5297 * fork. 5298 * 5299 * When pre-allocating the page or copying data, we 5300 * need to be without the pgtable locks since we could 5301 * sleep during the process. 5302 */ 5303 if (!folio_test_anon(pte_folio)) { 5304 hugetlb_add_file_rmap(pte_folio); 5305 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { 5306 pte_t src_pte_old = entry; 5307 struct folio *new_folio; 5308 5309 spin_unlock(src_ptl); 5310 spin_unlock(dst_ptl); 5311 /* Do not use reserve as it's private owned */ 5312 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); 5313 if (IS_ERR(new_folio)) { 5314 folio_put(pte_folio); 5315 ret = PTR_ERR(new_folio); 5316 break; 5317 } 5318 ret = copy_user_large_folio(new_folio, pte_folio, 5319 ALIGN_DOWN(addr, sz), dst_vma); 5320 folio_put(pte_folio); 5321 if (ret) { 5322 folio_put(new_folio); 5323 break; 5324 } 5325 5326 /* Install the new hugetlb folio if src pte stable */ 5327 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5328 src_ptl = huge_pte_lockptr(h, src, src_pte); 5329 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5330 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5331 if (!pte_same(src_pte_old, entry)) { 5332 restore_reserve_on_error(h, dst_vma, addr, 5333 new_folio); 5334 folio_put(new_folio); 5335 /* huge_ptep of dst_pte won't change as in child */ 5336 goto again; 5337 } 5338 hugetlb_install_folio(dst_vma, dst_pte, addr, 5339 new_folio, src_pte_old, sz); 5340 spin_unlock(src_ptl); 5341 spin_unlock(dst_ptl); 5342 continue; 5343 } 5344 5345 if (cow) { 5346 /* 5347 * No need to notify as we are downgrading page 5348 * table protection not changing it to point 5349 * to a new page. 5350 * 5351 * See Documentation/mm/mmu_notifier.rst 5352 */ 5353 huge_ptep_set_wrprotect(src, addr, src_pte); 5354 entry = huge_pte_wrprotect(entry); 5355 } 5356 5357 if (!userfaultfd_wp(dst_vma)) 5358 entry = huge_pte_clear_uffd_wp(entry); 5359 5360 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5361 hugetlb_count_add(npages, dst); 5362 } 5363 spin_unlock(src_ptl); 5364 spin_unlock(dst_ptl); 5365 } 5366 5367 if (cow) { 5368 raw_write_seqcount_end(&src->write_protect_seq); 5369 mmu_notifier_invalidate_range_end(&range); 5370 } else { 5371 hugetlb_vma_unlock_read(src_vma); 5372 } 5373 5374 return ret; 5375 } 5376 5377 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5378 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5379 unsigned long sz) 5380 { 5381 struct hstate *h = hstate_vma(vma); 5382 struct mm_struct *mm = vma->vm_mm; 5383 spinlock_t *src_ptl, *dst_ptl; 5384 pte_t pte; 5385 5386 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5387 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5388 5389 /* 5390 * We don't have to worry about the ordering of src and dst ptlocks 5391 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5392 */ 5393 if (src_ptl != dst_ptl) 5394 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5395 5396 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5397 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5398 5399 if (src_ptl != dst_ptl) 5400 spin_unlock(src_ptl); 5401 spin_unlock(dst_ptl); 5402 } 5403 5404 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5405 struct vm_area_struct *new_vma, 5406 unsigned long old_addr, unsigned long new_addr, 5407 unsigned long len) 5408 { 5409 struct hstate *h = hstate_vma(vma); 5410 struct address_space *mapping = vma->vm_file->f_mapping; 5411 unsigned long sz = huge_page_size(h); 5412 struct mm_struct *mm = vma->vm_mm; 5413 unsigned long old_end = old_addr + len; 5414 unsigned long last_addr_mask; 5415 pte_t *src_pte, *dst_pte; 5416 struct mmu_notifier_range range; 5417 bool shared_pmd = false; 5418 5419 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5420 old_end); 5421 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5422 /* 5423 * In case of shared PMDs, we should cover the maximum possible 5424 * range. 5425 */ 5426 flush_cache_range(vma, range.start, range.end); 5427 5428 mmu_notifier_invalidate_range_start(&range); 5429 last_addr_mask = hugetlb_mask_last_page(h); 5430 /* Prevent race with file truncation */ 5431 hugetlb_vma_lock_write(vma); 5432 i_mmap_lock_write(mapping); 5433 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5434 src_pte = hugetlb_walk(vma, old_addr, sz); 5435 if (!src_pte) { 5436 old_addr |= last_addr_mask; 5437 new_addr |= last_addr_mask; 5438 continue; 5439 } 5440 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) 5441 continue; 5442 5443 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5444 shared_pmd = true; 5445 old_addr |= last_addr_mask; 5446 new_addr |= last_addr_mask; 5447 continue; 5448 } 5449 5450 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5451 if (!dst_pte) 5452 break; 5453 5454 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5455 } 5456 5457 if (shared_pmd) 5458 flush_hugetlb_tlb_range(vma, range.start, range.end); 5459 else 5460 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5461 mmu_notifier_invalidate_range_end(&range); 5462 i_mmap_unlock_write(mapping); 5463 hugetlb_vma_unlock_write(vma); 5464 5465 return len + old_addr - old_end; 5466 } 5467 5468 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5469 unsigned long start, unsigned long end, 5470 struct page *ref_page, zap_flags_t zap_flags) 5471 { 5472 struct mm_struct *mm = vma->vm_mm; 5473 unsigned long address; 5474 pte_t *ptep; 5475 pte_t pte; 5476 spinlock_t *ptl; 5477 struct page *page; 5478 struct hstate *h = hstate_vma(vma); 5479 unsigned long sz = huge_page_size(h); 5480 bool adjust_reservation = false; 5481 unsigned long last_addr_mask; 5482 bool force_flush = false; 5483 5484 WARN_ON(!is_vm_hugetlb_page(vma)); 5485 BUG_ON(start & ~huge_page_mask(h)); 5486 BUG_ON(end & ~huge_page_mask(h)); 5487 5488 /* 5489 * This is a hugetlb vma, all the pte entries should point 5490 * to huge page. 5491 */ 5492 tlb_change_page_size(tlb, sz); 5493 tlb_start_vma(tlb, vma); 5494 5495 last_addr_mask = hugetlb_mask_last_page(h); 5496 address = start; 5497 for (; address < end; address += sz) { 5498 ptep = hugetlb_walk(vma, address, sz); 5499 if (!ptep) { 5500 address |= last_addr_mask; 5501 continue; 5502 } 5503 5504 ptl = huge_pte_lock(h, mm, ptep); 5505 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5506 spin_unlock(ptl); 5507 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5508 force_flush = true; 5509 address |= last_addr_mask; 5510 continue; 5511 } 5512 5513 pte = huge_ptep_get(mm, address, ptep); 5514 if (huge_pte_none(pte)) { 5515 spin_unlock(ptl); 5516 continue; 5517 } 5518 5519 /* 5520 * Migrating hugepage or HWPoisoned hugepage is already 5521 * unmapped and its refcount is dropped, so just clear pte here. 5522 */ 5523 if (unlikely(!pte_present(pte))) { 5524 /* 5525 * If the pte was wr-protected by uffd-wp in any of the 5526 * swap forms, meanwhile the caller does not want to 5527 * drop the uffd-wp bit in this zap, then replace the 5528 * pte with a marker. 5529 */ 5530 if (pte_swp_uffd_wp_any(pte) && 5531 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5532 set_huge_pte_at(mm, address, ptep, 5533 make_pte_marker(PTE_MARKER_UFFD_WP), 5534 sz); 5535 else 5536 huge_pte_clear(mm, address, ptep, sz); 5537 spin_unlock(ptl); 5538 continue; 5539 } 5540 5541 page = pte_page(pte); 5542 /* 5543 * If a reference page is supplied, it is because a specific 5544 * page is being unmapped, not a range. Ensure the page we 5545 * are about to unmap is the actual page of interest. 5546 */ 5547 if (ref_page) { 5548 if (page != ref_page) { 5549 spin_unlock(ptl); 5550 continue; 5551 } 5552 /* 5553 * Mark the VMA as having unmapped its page so that 5554 * future faults in this VMA will fail rather than 5555 * looking like data was lost 5556 */ 5557 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5558 } 5559 5560 pte = huge_ptep_get_and_clear(mm, address, ptep); 5561 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5562 if (huge_pte_dirty(pte)) 5563 set_page_dirty(page); 5564 /* Leave a uffd-wp pte marker if needed */ 5565 if (huge_pte_uffd_wp(pte) && 5566 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5567 set_huge_pte_at(mm, address, ptep, 5568 make_pte_marker(PTE_MARKER_UFFD_WP), 5569 sz); 5570 hugetlb_count_sub(pages_per_huge_page(h), mm); 5571 hugetlb_remove_rmap(page_folio(page)); 5572 5573 /* 5574 * Restore the reservation for anonymous page, otherwise the 5575 * backing page could be stolen by someone. 5576 * If there we are freeing a surplus, do not set the restore 5577 * reservation bit. 5578 */ 5579 if (!h->surplus_huge_pages && __vma_private_lock(vma) && 5580 folio_test_anon(page_folio(page))) { 5581 folio_set_hugetlb_restore_reserve(page_folio(page)); 5582 /* Reservation to be adjusted after the spin lock */ 5583 adjust_reservation = true; 5584 } 5585 5586 spin_unlock(ptl); 5587 5588 /* 5589 * Adjust the reservation for the region that will have the 5590 * reserve restored. Keep in mind that vma_needs_reservation() changes 5591 * resv->adds_in_progress if it succeeds. If this is not done, 5592 * do_exit() will not see it, and will keep the reservation 5593 * forever. 5594 */ 5595 if (adjust_reservation) { 5596 int rc = vma_needs_reservation(h, vma, address); 5597 5598 if (rc < 0) 5599 /* Pressumably allocate_file_region_entries failed 5600 * to allocate a file_region struct. Clear 5601 * hugetlb_restore_reserve so that global reserve 5602 * count will not be incremented by free_huge_folio. 5603 * Act as if we consumed the reservation. 5604 */ 5605 folio_clear_hugetlb_restore_reserve(page_folio(page)); 5606 else if (rc) 5607 vma_add_reservation(h, vma, address); 5608 } 5609 5610 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5611 /* 5612 * Bail out after unmapping reference page if supplied 5613 */ 5614 if (ref_page) 5615 break; 5616 } 5617 tlb_end_vma(tlb, vma); 5618 5619 /* 5620 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5621 * could defer the flush until now, since by holding i_mmap_rwsem we 5622 * guaranteed that the last refernece would not be dropped. But we must 5623 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5624 * dropped and the last reference to the shared PMDs page might be 5625 * dropped as well. 5626 * 5627 * In theory we could defer the freeing of the PMD pages as well, but 5628 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5629 * detect sharing, so we cannot defer the release of the page either. 5630 * Instead, do flush now. 5631 */ 5632 if (force_flush) 5633 tlb_flush_mmu_tlbonly(tlb); 5634 } 5635 5636 void __hugetlb_zap_begin(struct vm_area_struct *vma, 5637 unsigned long *start, unsigned long *end) 5638 { 5639 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5640 return; 5641 5642 adjust_range_if_pmd_sharing_possible(vma, start, end); 5643 hugetlb_vma_lock_write(vma); 5644 if (vma->vm_file) 5645 i_mmap_lock_write(vma->vm_file->f_mapping); 5646 } 5647 5648 void __hugetlb_zap_end(struct vm_area_struct *vma, 5649 struct zap_details *details) 5650 { 5651 zap_flags_t zap_flags = details ? details->zap_flags : 0; 5652 5653 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5654 return; 5655 5656 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5657 /* 5658 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5659 * When the vma_lock is freed, this makes the vma ineligible 5660 * for pmd sharing. And, i_mmap_rwsem is required to set up 5661 * pmd sharing. This is important as page tables for this 5662 * unmapped range will be asynchrously deleted. If the page 5663 * tables are shared, there will be issues when accessed by 5664 * someone else. 5665 */ 5666 __hugetlb_vma_unlock_write_free(vma); 5667 } else { 5668 hugetlb_vma_unlock_write(vma); 5669 } 5670 5671 if (vma->vm_file) 5672 i_mmap_unlock_write(vma->vm_file->f_mapping); 5673 } 5674 5675 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5676 unsigned long end, struct page *ref_page, 5677 zap_flags_t zap_flags) 5678 { 5679 struct mmu_notifier_range range; 5680 struct mmu_gather tlb; 5681 5682 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5683 start, end); 5684 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5685 mmu_notifier_invalidate_range_start(&range); 5686 tlb_gather_mmu(&tlb, vma->vm_mm); 5687 5688 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5689 5690 mmu_notifier_invalidate_range_end(&range); 5691 tlb_finish_mmu(&tlb); 5692 } 5693 5694 /* 5695 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5696 * mapping it owns the reserve page for. The intention is to unmap the page 5697 * from other VMAs and let the children be SIGKILLed if they are faulting the 5698 * same region. 5699 */ 5700 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5701 struct page *page, unsigned long address) 5702 { 5703 struct hstate *h = hstate_vma(vma); 5704 struct vm_area_struct *iter_vma; 5705 struct address_space *mapping; 5706 pgoff_t pgoff; 5707 5708 /* 5709 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5710 * from page cache lookup which is in HPAGE_SIZE units. 5711 */ 5712 address = address & huge_page_mask(h); 5713 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5714 vma->vm_pgoff; 5715 mapping = vma->vm_file->f_mapping; 5716 5717 /* 5718 * Take the mapping lock for the duration of the table walk. As 5719 * this mapping should be shared between all the VMAs, 5720 * __unmap_hugepage_range() is called as the lock is already held 5721 */ 5722 i_mmap_lock_write(mapping); 5723 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5724 /* Do not unmap the current VMA */ 5725 if (iter_vma == vma) 5726 continue; 5727 5728 /* 5729 * Shared VMAs have their own reserves and do not affect 5730 * MAP_PRIVATE accounting but it is possible that a shared 5731 * VMA is using the same page so check and skip such VMAs. 5732 */ 5733 if (iter_vma->vm_flags & VM_MAYSHARE) 5734 continue; 5735 5736 /* 5737 * Unmap the page from other VMAs without their own reserves. 5738 * They get marked to be SIGKILLed if they fault in these 5739 * areas. This is because a future no-page fault on this VMA 5740 * could insert a zeroed page instead of the data existing 5741 * from the time of fork. This would look like data corruption 5742 */ 5743 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5744 unmap_hugepage_range(iter_vma, address, 5745 address + huge_page_size(h), page, 0); 5746 } 5747 i_mmap_unlock_write(mapping); 5748 } 5749 5750 /* 5751 * hugetlb_wp() should be called with page lock of the original hugepage held. 5752 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5753 * cannot race with other handlers or page migration. 5754 * Keep the pte_same checks anyway to make transition from the mutex easier. 5755 */ 5756 static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 5757 struct vm_fault *vmf) 5758 { 5759 struct vm_area_struct *vma = vmf->vma; 5760 struct mm_struct *mm = vma->vm_mm; 5761 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5762 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); 5763 struct hstate *h = hstate_vma(vma); 5764 struct folio *old_folio; 5765 struct folio *new_folio; 5766 int outside_reserve = 0; 5767 vm_fault_t ret = 0; 5768 struct mmu_notifier_range range; 5769 5770 /* 5771 * Never handle CoW for uffd-wp protected pages. It should be only 5772 * handled when the uffd-wp protection is removed. 5773 * 5774 * Note that only the CoW optimization path (in hugetlb_no_page()) 5775 * can trigger this, because hugetlb_fault() will always resolve 5776 * uffd-wp bit first. 5777 */ 5778 if (!unshare && huge_pte_uffd_wp(pte)) 5779 return 0; 5780 5781 /* 5782 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5783 * PTE mapped R/O such as maybe_mkwrite() would do. 5784 */ 5785 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5786 return VM_FAULT_SIGSEGV; 5787 5788 /* Let's take out MAP_SHARED mappings first. */ 5789 if (vma->vm_flags & VM_MAYSHARE) { 5790 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5791 return 0; 5792 } 5793 5794 old_folio = page_folio(pte_page(pte)); 5795 5796 delayacct_wpcopy_start(); 5797 5798 retry_avoidcopy: 5799 /* 5800 * If no-one else is actually using this page, we're the exclusive 5801 * owner and can reuse this page. 5802 * 5803 * Note that we don't rely on the (safer) folio refcount here, because 5804 * copying the hugetlb folio when there are unexpected (temporary) 5805 * folio references could harm simple fork()+exit() users when 5806 * we run out of free hugetlb folios: we would have to kill processes 5807 * in scenarios that used to work. As a side effect, there can still 5808 * be leaks between processes, for example, with FOLL_GET users. 5809 */ 5810 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 5811 if (!PageAnonExclusive(&old_folio->page)) { 5812 folio_move_anon_rmap(old_folio, vma); 5813 SetPageAnonExclusive(&old_folio->page); 5814 } 5815 if (likely(!unshare)) 5816 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5817 5818 delayacct_wpcopy_end(); 5819 return 0; 5820 } 5821 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 5822 PageAnonExclusive(&old_folio->page), &old_folio->page); 5823 5824 /* 5825 * If the process that created a MAP_PRIVATE mapping is about to 5826 * perform a COW due to a shared page count, attempt to satisfy 5827 * the allocation without using the existing reserves. The pagecache 5828 * page is used to determine if the reserve at this address was 5829 * consumed or not. If reserves were used, a partial faulted mapping 5830 * at the time of fork() could consume its reserves on COW instead 5831 * of the full address range. 5832 */ 5833 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5834 old_folio != pagecache_folio) 5835 outside_reserve = 1; 5836 5837 folio_get(old_folio); 5838 5839 /* 5840 * Drop page table lock as buddy allocator may be called. It will 5841 * be acquired again before returning to the caller, as expected. 5842 */ 5843 spin_unlock(vmf->ptl); 5844 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); 5845 5846 if (IS_ERR(new_folio)) { 5847 /* 5848 * If a process owning a MAP_PRIVATE mapping fails to COW, 5849 * it is due to references held by a child and an insufficient 5850 * huge page pool. To guarantee the original mappers 5851 * reliability, unmap the page from child processes. The child 5852 * may get SIGKILLed if it later faults. 5853 */ 5854 if (outside_reserve) { 5855 struct address_space *mapping = vma->vm_file->f_mapping; 5856 pgoff_t idx; 5857 u32 hash; 5858 5859 folio_put(old_folio); 5860 /* 5861 * Drop hugetlb_fault_mutex and vma_lock before 5862 * unmapping. unmapping needs to hold vma_lock 5863 * in write mode. Dropping vma_lock in read mode 5864 * here is OK as COW mappings do not interact with 5865 * PMD sharing. 5866 * 5867 * Reacquire both after unmap operation. 5868 */ 5869 idx = vma_hugecache_offset(h, vma, vmf->address); 5870 hash = hugetlb_fault_mutex_hash(mapping, idx); 5871 hugetlb_vma_unlock_read(vma); 5872 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5873 5874 unmap_ref_private(mm, vma, &old_folio->page, 5875 vmf->address); 5876 5877 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5878 hugetlb_vma_lock_read(vma); 5879 spin_lock(vmf->ptl); 5880 vmf->pte = hugetlb_walk(vma, vmf->address, 5881 huge_page_size(h)); 5882 if (likely(vmf->pte && 5883 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) 5884 goto retry_avoidcopy; 5885 /* 5886 * race occurs while re-acquiring page table 5887 * lock, and our job is done. 5888 */ 5889 delayacct_wpcopy_end(); 5890 return 0; 5891 } 5892 5893 ret = vmf_error(PTR_ERR(new_folio)); 5894 goto out_release_old; 5895 } 5896 5897 /* 5898 * When the original hugepage is shared one, it does not have 5899 * anon_vma prepared. 5900 */ 5901 ret = __vmf_anon_prepare(vmf); 5902 if (unlikely(ret)) 5903 goto out_release_all; 5904 5905 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 5906 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); 5907 goto out_release_all; 5908 } 5909 __folio_mark_uptodate(new_folio); 5910 5911 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 5912 vmf->address + huge_page_size(h)); 5913 mmu_notifier_invalidate_range_start(&range); 5914 5915 /* 5916 * Retake the page table lock to check for racing updates 5917 * before the page tables are altered 5918 */ 5919 spin_lock(vmf->ptl); 5920 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 5921 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { 5922 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 5923 5924 /* Break COW or unshare */ 5925 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 5926 hugetlb_remove_rmap(old_folio); 5927 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 5928 if (huge_pte_uffd_wp(pte)) 5929 newpte = huge_pte_mkuffd_wp(newpte); 5930 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 5931 huge_page_size(h)); 5932 folio_set_hugetlb_migratable(new_folio); 5933 /* Make the old page be freed below */ 5934 new_folio = old_folio; 5935 } 5936 spin_unlock(vmf->ptl); 5937 mmu_notifier_invalidate_range_end(&range); 5938 out_release_all: 5939 /* 5940 * No restore in case of successful pagetable update (Break COW or 5941 * unshare) 5942 */ 5943 if (new_folio != old_folio) 5944 restore_reserve_on_error(h, vma, vmf->address, new_folio); 5945 folio_put(new_folio); 5946 out_release_old: 5947 folio_put(old_folio); 5948 5949 spin_lock(vmf->ptl); /* Caller expects lock to be held */ 5950 5951 delayacct_wpcopy_end(); 5952 return ret; 5953 } 5954 5955 /* 5956 * Return whether there is a pagecache page to back given address within VMA. 5957 */ 5958 bool hugetlbfs_pagecache_present(struct hstate *h, 5959 struct vm_area_struct *vma, unsigned long address) 5960 { 5961 struct address_space *mapping = vma->vm_file->f_mapping; 5962 pgoff_t idx = linear_page_index(vma, address); 5963 struct folio *folio; 5964 5965 folio = filemap_get_folio(mapping, idx); 5966 if (IS_ERR(folio)) 5967 return false; 5968 folio_put(folio); 5969 return true; 5970 } 5971 5972 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 5973 pgoff_t idx) 5974 { 5975 struct inode *inode = mapping->host; 5976 struct hstate *h = hstate_inode(inode); 5977 int err; 5978 5979 idx <<= huge_page_order(h); 5980 __folio_set_locked(folio); 5981 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5982 5983 if (unlikely(err)) { 5984 __folio_clear_locked(folio); 5985 return err; 5986 } 5987 folio_clear_hugetlb_restore_reserve(folio); 5988 5989 /* 5990 * mark folio dirty so that it will not be removed from cache/file 5991 * by non-hugetlbfs specific code paths. 5992 */ 5993 folio_mark_dirty(folio); 5994 5995 spin_lock(&inode->i_lock); 5996 inode->i_blocks += blocks_per_huge_page(h); 5997 spin_unlock(&inode->i_lock); 5998 return 0; 5999 } 6000 6001 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, 6002 struct address_space *mapping, 6003 unsigned long reason) 6004 { 6005 u32 hash; 6006 6007 /* 6008 * vma_lock and hugetlb_fault_mutex must be dropped before handling 6009 * userfault. Also mmap_lock could be dropped due to handling 6010 * userfault, any vma operation should be careful from here. 6011 */ 6012 hugetlb_vma_unlock_read(vmf->vma); 6013 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6014 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6015 return handle_userfault(vmf, reason); 6016 } 6017 6018 /* 6019 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 6020 * false if pte changed or is changing. 6021 */ 6022 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, 6023 pte_t *ptep, pte_t old_pte) 6024 { 6025 spinlock_t *ptl; 6026 bool same; 6027 6028 ptl = huge_pte_lock(h, mm, ptep); 6029 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); 6030 spin_unlock(ptl); 6031 6032 return same; 6033 } 6034 6035 static vm_fault_t hugetlb_no_page(struct address_space *mapping, 6036 struct vm_fault *vmf) 6037 { 6038 struct vm_area_struct *vma = vmf->vma; 6039 struct mm_struct *mm = vma->vm_mm; 6040 struct hstate *h = hstate_vma(vma); 6041 vm_fault_t ret = VM_FAULT_SIGBUS; 6042 int anon_rmap = 0; 6043 unsigned long size; 6044 struct folio *folio; 6045 pte_t new_pte; 6046 bool new_folio, new_pagecache_folio = false; 6047 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6048 6049 /* 6050 * Currently, we are forced to kill the process in the event the 6051 * original mapper has unmapped pages from the child due to a failed 6052 * COW/unsharing. Warn that such a situation has occurred as it may not 6053 * be obvious. 6054 */ 6055 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 6056 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 6057 current->pid); 6058 goto out; 6059 } 6060 6061 /* 6062 * Use page lock to guard against racing truncation 6063 * before we get page_table_lock. 6064 */ 6065 new_folio = false; 6066 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); 6067 if (IS_ERR(folio)) { 6068 size = i_size_read(mapping->host) >> huge_page_shift(h); 6069 if (vmf->pgoff >= size) 6070 goto out; 6071 /* Check for page in userfault range */ 6072 if (userfaultfd_missing(vma)) { 6073 /* 6074 * Since hugetlb_no_page() was examining pte 6075 * without pgtable lock, we need to re-test under 6076 * lock because the pte may not be stable and could 6077 * have changed from under us. Try to detect 6078 * either changed or during-changing ptes and retry 6079 * properly when needed. 6080 * 6081 * Note that userfaultfd is actually fine with 6082 * false positives (e.g. caused by pte changed), 6083 * but not wrong logical events (e.g. caused by 6084 * reading a pte during changing). The latter can 6085 * confuse the userspace, so the strictness is very 6086 * much preferred. E.g., MISSING event should 6087 * never happen on the page after UFFDIO_COPY has 6088 * correctly installed the page and returned. 6089 */ 6090 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6091 ret = 0; 6092 goto out; 6093 } 6094 6095 return hugetlb_handle_userfault(vmf, mapping, 6096 VM_UFFD_MISSING); 6097 } 6098 6099 if (!(vma->vm_flags & VM_MAYSHARE)) { 6100 ret = __vmf_anon_prepare(vmf); 6101 if (unlikely(ret)) 6102 goto out; 6103 } 6104 6105 folio = alloc_hugetlb_folio(vma, vmf->address, 0); 6106 if (IS_ERR(folio)) { 6107 /* 6108 * Returning error will result in faulting task being 6109 * sent SIGBUS. The hugetlb fault mutex prevents two 6110 * tasks from racing to fault in the same page which 6111 * could result in false unable to allocate errors. 6112 * Page migration does not take the fault mutex, but 6113 * does a clear then write of pte's under page table 6114 * lock. Page fault code could race with migration, 6115 * notice the clear pte and try to allocate a page 6116 * here. Before returning error, get ptl and make 6117 * sure there really is no pte entry. 6118 */ 6119 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) 6120 ret = vmf_error(PTR_ERR(folio)); 6121 else 6122 ret = 0; 6123 goto out; 6124 } 6125 folio_zero_user(folio, vmf->real_address); 6126 __folio_mark_uptodate(folio); 6127 new_folio = true; 6128 6129 if (vma->vm_flags & VM_MAYSHARE) { 6130 int err = hugetlb_add_to_page_cache(folio, mapping, 6131 vmf->pgoff); 6132 if (err) { 6133 /* 6134 * err can't be -EEXIST which implies someone 6135 * else consumed the reservation since hugetlb 6136 * fault mutex is held when add a hugetlb page 6137 * to the page cache. So it's safe to call 6138 * restore_reserve_on_error() here. 6139 */ 6140 restore_reserve_on_error(h, vma, vmf->address, 6141 folio); 6142 folio_put(folio); 6143 ret = VM_FAULT_SIGBUS; 6144 goto out; 6145 } 6146 new_pagecache_folio = true; 6147 } else { 6148 folio_lock(folio); 6149 anon_rmap = 1; 6150 } 6151 } else { 6152 /* 6153 * If memory error occurs between mmap() and fault, some process 6154 * don't have hwpoisoned swap entry for errored virtual address. 6155 * So we need to block hugepage fault by PG_hwpoison bit check. 6156 */ 6157 if (unlikely(folio_test_hwpoison(folio))) { 6158 ret = VM_FAULT_HWPOISON_LARGE | 6159 VM_FAULT_SET_HINDEX(hstate_index(h)); 6160 goto backout_unlocked; 6161 } 6162 6163 /* Check for page in userfault range. */ 6164 if (userfaultfd_minor(vma)) { 6165 folio_unlock(folio); 6166 folio_put(folio); 6167 /* See comment in userfaultfd_missing() block above */ 6168 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6169 ret = 0; 6170 goto out; 6171 } 6172 return hugetlb_handle_userfault(vmf, mapping, 6173 VM_UFFD_MINOR); 6174 } 6175 } 6176 6177 /* 6178 * If we are going to COW a private mapping later, we examine the 6179 * pending reservations for this page now. This will ensure that 6180 * any allocations necessary to record that reservation occur outside 6181 * the spinlock. 6182 */ 6183 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6184 if (vma_needs_reservation(h, vma, vmf->address) < 0) { 6185 ret = VM_FAULT_OOM; 6186 goto backout_unlocked; 6187 } 6188 /* Just decrements count, does not deallocate */ 6189 vma_end_reservation(h, vma, vmf->address); 6190 } 6191 6192 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); 6193 ret = 0; 6194 /* If pte changed from under us, retry */ 6195 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) 6196 goto backout; 6197 6198 if (anon_rmap) 6199 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); 6200 else 6201 hugetlb_add_file_rmap(folio); 6202 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) 6203 && (vma->vm_flags & VM_SHARED))); 6204 /* 6205 * If this pte was previously wr-protected, keep it wr-protected even 6206 * if populated. 6207 */ 6208 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) 6209 new_pte = huge_pte_mkuffd_wp(new_pte); 6210 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); 6211 6212 hugetlb_count_add(pages_per_huge_page(h), mm); 6213 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6214 /* Optimization, do the COW without a second fault */ 6215 ret = hugetlb_wp(folio, vmf); 6216 } 6217 6218 spin_unlock(vmf->ptl); 6219 6220 /* 6221 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6222 * found in the pagecache may not have hugetlb_migratable if they have 6223 * been isolated for migration. 6224 */ 6225 if (new_folio) 6226 folio_set_hugetlb_migratable(folio); 6227 6228 folio_unlock(folio); 6229 out: 6230 hugetlb_vma_unlock_read(vma); 6231 6232 /* 6233 * We must check to release the per-VMA lock. __vmf_anon_prepare() is 6234 * the only way ret can be set to VM_FAULT_RETRY. 6235 */ 6236 if (unlikely(ret & VM_FAULT_RETRY)) 6237 vma_end_read(vma); 6238 6239 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6240 return ret; 6241 6242 backout: 6243 spin_unlock(vmf->ptl); 6244 backout_unlocked: 6245 if (new_folio && !new_pagecache_folio) 6246 restore_reserve_on_error(h, vma, vmf->address, folio); 6247 6248 folio_unlock(folio); 6249 folio_put(folio); 6250 goto out; 6251 } 6252 6253 #ifdef CONFIG_SMP 6254 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6255 { 6256 unsigned long key[2]; 6257 u32 hash; 6258 6259 key[0] = (unsigned long) mapping; 6260 key[1] = idx; 6261 6262 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6263 6264 return hash & (num_fault_mutexes - 1); 6265 } 6266 #else 6267 /* 6268 * For uniprocessor systems we always use a single mutex, so just 6269 * return 0 and avoid the hashing overhead. 6270 */ 6271 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6272 { 6273 return 0; 6274 } 6275 #endif 6276 6277 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6278 unsigned long address, unsigned int flags) 6279 { 6280 vm_fault_t ret; 6281 u32 hash; 6282 struct folio *folio = NULL; 6283 struct folio *pagecache_folio = NULL; 6284 struct hstate *h = hstate_vma(vma); 6285 struct address_space *mapping; 6286 int need_wait_lock = 0; 6287 struct vm_fault vmf = { 6288 .vma = vma, 6289 .address = address & huge_page_mask(h), 6290 .real_address = address, 6291 .flags = flags, 6292 .pgoff = vma_hugecache_offset(h, vma, 6293 address & huge_page_mask(h)), 6294 /* TODO: Track hugetlb faults using vm_fault */ 6295 6296 /* 6297 * Some fields may not be initialized, be careful as it may 6298 * be hard to debug if called functions make assumptions 6299 */ 6300 }; 6301 6302 /* 6303 * Serialize hugepage allocation and instantiation, so that we don't 6304 * get spurious allocation failures if two CPUs race to instantiate 6305 * the same page in the page cache. 6306 */ 6307 mapping = vma->vm_file->f_mapping; 6308 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); 6309 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6310 6311 /* 6312 * Acquire vma lock before calling huge_pte_alloc and hold 6313 * until finished with vmf.pte. This prevents huge_pmd_unshare from 6314 * being called elsewhere and making the vmf.pte no longer valid. 6315 */ 6316 hugetlb_vma_lock_read(vma); 6317 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); 6318 if (!vmf.pte) { 6319 hugetlb_vma_unlock_read(vma); 6320 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6321 return VM_FAULT_OOM; 6322 } 6323 6324 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); 6325 if (huge_pte_none_mostly(vmf.orig_pte)) { 6326 if (is_pte_marker(vmf.orig_pte)) { 6327 pte_marker marker = 6328 pte_marker_get(pte_to_swp_entry(vmf.orig_pte)); 6329 6330 if (marker & PTE_MARKER_POISONED) { 6331 ret = VM_FAULT_HWPOISON_LARGE | 6332 VM_FAULT_SET_HINDEX(hstate_index(h)); 6333 goto out_mutex; 6334 } 6335 } 6336 6337 /* 6338 * Other PTE markers should be handled the same way as none PTE. 6339 * 6340 * hugetlb_no_page will drop vma lock and hugetlb fault 6341 * mutex internally, which make us return immediately. 6342 */ 6343 return hugetlb_no_page(mapping, &vmf); 6344 } 6345 6346 ret = 0; 6347 6348 /* 6349 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this 6350 * point, so this check prevents the kernel from going below assuming 6351 * that we have an active hugepage in pagecache. This goto expects 6352 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned) 6353 * check will properly handle it. 6354 */ 6355 if (!pte_present(vmf.orig_pte)) { 6356 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) { 6357 /* 6358 * Release the hugetlb fault lock now, but retain 6359 * the vma lock, because it is needed to guard the 6360 * huge_pte_lockptr() later in 6361 * migration_entry_wait_huge(). The vma lock will 6362 * be released there. 6363 */ 6364 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6365 migration_entry_wait_huge(vma, vmf.address, vmf.pte); 6366 return 0; 6367 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte))) 6368 ret = VM_FAULT_HWPOISON_LARGE | 6369 VM_FAULT_SET_HINDEX(hstate_index(h)); 6370 goto out_mutex; 6371 } 6372 6373 /* 6374 * If we are going to COW/unshare the mapping later, we examine the 6375 * pending reservations for this page now. This will ensure that any 6376 * allocations necessary to record that reservation occur outside the 6377 * spinlock. Also lookup the pagecache page now as it is used to 6378 * determine if a reservation has been consumed. 6379 */ 6380 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6381 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { 6382 if (vma_needs_reservation(h, vma, vmf.address) < 0) { 6383 ret = VM_FAULT_OOM; 6384 goto out_mutex; 6385 } 6386 /* Just decrements count, does not deallocate */ 6387 vma_end_reservation(h, vma, vmf.address); 6388 6389 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, 6390 vmf.pgoff); 6391 if (IS_ERR(pagecache_folio)) 6392 pagecache_folio = NULL; 6393 } 6394 6395 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); 6396 6397 /* Check for a racing update before calling hugetlb_wp() */ 6398 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) 6399 goto out_ptl; 6400 6401 /* Handle userfault-wp first, before trying to lock more pages */ 6402 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && 6403 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { 6404 if (!userfaultfd_wp_async(vma)) { 6405 spin_unlock(vmf.ptl); 6406 if (pagecache_folio) { 6407 folio_unlock(pagecache_folio); 6408 folio_put(pagecache_folio); 6409 } 6410 hugetlb_vma_unlock_read(vma); 6411 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6412 return handle_userfault(&vmf, VM_UFFD_WP); 6413 } 6414 6415 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); 6416 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, 6417 huge_page_size(hstate_vma(vma))); 6418 /* Fallthrough to CoW */ 6419 } 6420 6421 /* 6422 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and 6423 * pagecache_folio, so here we need take the former one 6424 * when folio != pagecache_folio or !pagecache_folio. 6425 */ 6426 folio = page_folio(pte_page(vmf.orig_pte)); 6427 if (folio != pagecache_folio) 6428 if (!folio_trylock(folio)) { 6429 need_wait_lock = 1; 6430 goto out_ptl; 6431 } 6432 6433 folio_get(folio); 6434 6435 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6436 if (!huge_pte_write(vmf.orig_pte)) { 6437 ret = hugetlb_wp(pagecache_folio, &vmf); 6438 goto out_put_page; 6439 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6440 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); 6441 } 6442 } 6443 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); 6444 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, 6445 flags & FAULT_FLAG_WRITE)) 6446 update_mmu_cache(vma, vmf.address, vmf.pte); 6447 out_put_page: 6448 if (folio != pagecache_folio) 6449 folio_unlock(folio); 6450 folio_put(folio); 6451 out_ptl: 6452 spin_unlock(vmf.ptl); 6453 6454 if (pagecache_folio) { 6455 folio_unlock(pagecache_folio); 6456 folio_put(pagecache_folio); 6457 } 6458 out_mutex: 6459 hugetlb_vma_unlock_read(vma); 6460 6461 /* 6462 * We must check to release the per-VMA lock. __vmf_anon_prepare() in 6463 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. 6464 */ 6465 if (unlikely(ret & VM_FAULT_RETRY)) 6466 vma_end_read(vma); 6467 6468 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6469 /* 6470 * Generally it's safe to hold refcount during waiting page lock. But 6471 * here we just wait to defer the next page fault to avoid busy loop and 6472 * the page is not used after unlocked before returning from the current 6473 * page fault. So we are safe from accessing freed page, even if we wait 6474 * here without taking refcount. 6475 */ 6476 if (need_wait_lock) 6477 folio_wait_locked(folio); 6478 return ret; 6479 } 6480 6481 #ifdef CONFIG_USERFAULTFD 6482 /* 6483 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). 6484 */ 6485 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 6486 struct vm_area_struct *vma, unsigned long address) 6487 { 6488 struct mempolicy *mpol; 6489 nodemask_t *nodemask; 6490 struct folio *folio; 6491 gfp_t gfp_mask; 6492 int node; 6493 6494 gfp_mask = htlb_alloc_mask(h); 6495 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 6496 /* 6497 * This is used to allocate a temporary hugetlb to hold the copied 6498 * content, which will then be copied again to the final hugetlb 6499 * consuming a reservation. Set the alloc_fallback to false to indicate 6500 * that breaking the per-node hugetlb pool is not allowed in this case. 6501 */ 6502 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); 6503 mpol_cond_put(mpol); 6504 6505 return folio; 6506 } 6507 6508 /* 6509 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6510 * with modifications for hugetlb pages. 6511 */ 6512 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6513 struct vm_area_struct *dst_vma, 6514 unsigned long dst_addr, 6515 unsigned long src_addr, 6516 uffd_flags_t flags, 6517 struct folio **foliop) 6518 { 6519 struct mm_struct *dst_mm = dst_vma->vm_mm; 6520 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6521 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6522 struct hstate *h = hstate_vma(dst_vma); 6523 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6524 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6525 unsigned long size = huge_page_size(h); 6526 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6527 pte_t _dst_pte; 6528 spinlock_t *ptl; 6529 int ret = -ENOMEM; 6530 struct folio *folio; 6531 int writable; 6532 bool folio_in_pagecache = false; 6533 6534 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6535 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6536 6537 /* Don't overwrite any existing PTEs (even markers) */ 6538 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 6539 spin_unlock(ptl); 6540 return -EEXIST; 6541 } 6542 6543 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6544 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6545 6546 /* No need to invalidate - it was non-present before */ 6547 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6548 6549 spin_unlock(ptl); 6550 return 0; 6551 } 6552 6553 if (is_continue) { 6554 ret = -EFAULT; 6555 folio = filemap_lock_hugetlb_folio(h, mapping, idx); 6556 if (IS_ERR(folio)) 6557 goto out; 6558 folio_in_pagecache = true; 6559 } else if (!*foliop) { 6560 /* If a folio already exists, then it's UFFDIO_COPY for 6561 * a non-missing case. Return -EEXIST. 6562 */ 6563 if (vm_shared && 6564 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6565 ret = -EEXIST; 6566 goto out; 6567 } 6568 6569 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6570 if (IS_ERR(folio)) { 6571 ret = -ENOMEM; 6572 goto out; 6573 } 6574 6575 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6576 false); 6577 6578 /* fallback to copy_from_user outside mmap_lock */ 6579 if (unlikely(ret)) { 6580 ret = -ENOENT; 6581 /* Free the allocated folio which may have 6582 * consumed a reservation. 6583 */ 6584 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6585 folio_put(folio); 6586 6587 /* Allocate a temporary folio to hold the copied 6588 * contents. 6589 */ 6590 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6591 if (!folio) { 6592 ret = -ENOMEM; 6593 goto out; 6594 } 6595 *foliop = folio; 6596 /* Set the outparam foliop and return to the caller to 6597 * copy the contents outside the lock. Don't free the 6598 * folio. 6599 */ 6600 goto out; 6601 } 6602 } else { 6603 if (vm_shared && 6604 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6605 folio_put(*foliop); 6606 ret = -EEXIST; 6607 *foliop = NULL; 6608 goto out; 6609 } 6610 6611 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6612 if (IS_ERR(folio)) { 6613 folio_put(*foliop); 6614 ret = -ENOMEM; 6615 *foliop = NULL; 6616 goto out; 6617 } 6618 ret = copy_user_large_folio(folio, *foliop, 6619 ALIGN_DOWN(dst_addr, size), dst_vma); 6620 folio_put(*foliop); 6621 *foliop = NULL; 6622 if (ret) { 6623 folio_put(folio); 6624 goto out; 6625 } 6626 } 6627 6628 /* 6629 * If we just allocated a new page, we need a memory barrier to ensure 6630 * that preceding stores to the page become visible before the 6631 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate 6632 * is what we need. 6633 * 6634 * In the case where we have not allocated a new page (is_continue), 6635 * the page must already be uptodate. UFFDIO_CONTINUE already includes 6636 * an earlier smp_wmb() to ensure that prior stores will be visible 6637 * before the set_pte_at() write. 6638 */ 6639 if (!is_continue) 6640 __folio_mark_uptodate(folio); 6641 else 6642 WARN_ON_ONCE(!folio_test_uptodate(folio)); 6643 6644 /* Add shared, newly allocated pages to the page cache. */ 6645 if (vm_shared && !is_continue) { 6646 ret = -EFAULT; 6647 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) 6648 goto out_release_nounlock; 6649 6650 /* 6651 * Serialization between remove_inode_hugepages() and 6652 * hugetlb_add_to_page_cache() below happens through the 6653 * hugetlb_fault_mutex_table that here must be hold by 6654 * the caller. 6655 */ 6656 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6657 if (ret) 6658 goto out_release_nounlock; 6659 folio_in_pagecache = true; 6660 } 6661 6662 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6663 6664 ret = -EIO; 6665 if (folio_test_hwpoison(folio)) 6666 goto out_release_unlock; 6667 6668 /* 6669 * We allow to overwrite a pte marker: consider when both MISSING|WP 6670 * registered, we firstly wr-protect a none pte which has no page cache 6671 * page backing it, then access the page. 6672 */ 6673 ret = -EEXIST; 6674 if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) 6675 goto out_release_unlock; 6676 6677 if (folio_in_pagecache) 6678 hugetlb_add_file_rmap(folio); 6679 else 6680 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); 6681 6682 /* 6683 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6684 * with wp flag set, don't set pte write bit. 6685 */ 6686 if (wp_enabled || (is_continue && !vm_shared)) 6687 writable = 0; 6688 else 6689 writable = dst_vma->vm_flags & VM_WRITE; 6690 6691 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); 6692 /* 6693 * Always mark UFFDIO_COPY page dirty; note that this may not be 6694 * extremely important for hugetlbfs for now since swapping is not 6695 * supported, but we should still be clear in that this page cannot be 6696 * thrown away at will, even if write bit not set. 6697 */ 6698 _dst_pte = huge_pte_mkdirty(_dst_pte); 6699 _dst_pte = pte_mkyoung(_dst_pte); 6700 6701 if (wp_enabled) 6702 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6703 6704 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6705 6706 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6707 6708 /* No need to invalidate - it was non-present before */ 6709 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6710 6711 spin_unlock(ptl); 6712 if (!is_continue) 6713 folio_set_hugetlb_migratable(folio); 6714 if (vm_shared || is_continue) 6715 folio_unlock(folio); 6716 ret = 0; 6717 out: 6718 return ret; 6719 out_release_unlock: 6720 spin_unlock(ptl); 6721 if (vm_shared || is_continue) 6722 folio_unlock(folio); 6723 out_release_nounlock: 6724 if (!folio_in_pagecache) 6725 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6726 folio_put(folio); 6727 goto out; 6728 } 6729 #endif /* CONFIG_USERFAULTFD */ 6730 6731 long hugetlb_change_protection(struct vm_area_struct *vma, 6732 unsigned long address, unsigned long end, 6733 pgprot_t newprot, unsigned long cp_flags) 6734 { 6735 struct mm_struct *mm = vma->vm_mm; 6736 unsigned long start = address; 6737 pte_t *ptep; 6738 pte_t pte; 6739 struct hstate *h = hstate_vma(vma); 6740 long pages = 0, psize = huge_page_size(h); 6741 bool shared_pmd = false; 6742 struct mmu_notifier_range range; 6743 unsigned long last_addr_mask; 6744 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6745 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6746 6747 /* 6748 * In the case of shared PMDs, the area to flush could be beyond 6749 * start/end. Set range.start/range.end to cover the maximum possible 6750 * range if PMD sharing is possible. 6751 */ 6752 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6753 0, mm, start, end); 6754 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6755 6756 BUG_ON(address >= end); 6757 flush_cache_range(vma, range.start, range.end); 6758 6759 mmu_notifier_invalidate_range_start(&range); 6760 hugetlb_vma_lock_write(vma); 6761 i_mmap_lock_write(vma->vm_file->f_mapping); 6762 last_addr_mask = hugetlb_mask_last_page(h); 6763 for (; address < end; address += psize) { 6764 spinlock_t *ptl; 6765 ptep = hugetlb_walk(vma, address, psize); 6766 if (!ptep) { 6767 if (!uffd_wp) { 6768 address |= last_addr_mask; 6769 continue; 6770 } 6771 /* 6772 * Userfaultfd wr-protect requires pgtable 6773 * pre-allocations to install pte markers. 6774 */ 6775 ptep = huge_pte_alloc(mm, vma, address, psize); 6776 if (!ptep) { 6777 pages = -ENOMEM; 6778 break; 6779 } 6780 } 6781 ptl = huge_pte_lock(h, mm, ptep); 6782 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6783 /* 6784 * When uffd-wp is enabled on the vma, unshare 6785 * shouldn't happen at all. Warn about it if it 6786 * happened due to some reason. 6787 */ 6788 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6789 pages++; 6790 spin_unlock(ptl); 6791 shared_pmd = true; 6792 address |= last_addr_mask; 6793 continue; 6794 } 6795 pte = huge_ptep_get(mm, address, ptep); 6796 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6797 /* Nothing to do. */ 6798 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6799 swp_entry_t entry = pte_to_swp_entry(pte); 6800 struct page *page = pfn_swap_entry_to_page(entry); 6801 pte_t newpte = pte; 6802 6803 if (is_writable_migration_entry(entry)) { 6804 if (PageAnon(page)) 6805 entry = make_readable_exclusive_migration_entry( 6806 swp_offset(entry)); 6807 else 6808 entry = make_readable_migration_entry( 6809 swp_offset(entry)); 6810 newpte = swp_entry_to_pte(entry); 6811 pages++; 6812 } 6813 6814 if (uffd_wp) 6815 newpte = pte_swp_mkuffd_wp(newpte); 6816 else if (uffd_wp_resolve) 6817 newpte = pte_swp_clear_uffd_wp(newpte); 6818 if (!pte_same(pte, newpte)) 6819 set_huge_pte_at(mm, address, ptep, newpte, psize); 6820 } else if (unlikely(is_pte_marker(pte))) { 6821 /* 6822 * Do nothing on a poison marker; page is 6823 * corrupted, permissons do not apply. Here 6824 * pte_marker_uffd_wp()==true implies !poison 6825 * because they're mutual exclusive. 6826 */ 6827 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve) 6828 /* Safe to modify directly (non-present->none). */ 6829 huge_pte_clear(mm, address, ptep, psize); 6830 } else if (!huge_pte_none(pte)) { 6831 pte_t old_pte; 6832 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6833 6834 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6835 pte = huge_pte_modify(old_pte, newprot); 6836 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6837 if (uffd_wp) 6838 pte = huge_pte_mkuffd_wp(pte); 6839 else if (uffd_wp_resolve) 6840 pte = huge_pte_clear_uffd_wp(pte); 6841 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6842 pages++; 6843 } else { 6844 /* None pte */ 6845 if (unlikely(uffd_wp)) 6846 /* Safe to modify directly (none->non-present). */ 6847 set_huge_pte_at(mm, address, ptep, 6848 make_pte_marker(PTE_MARKER_UFFD_WP), 6849 psize); 6850 } 6851 spin_unlock(ptl); 6852 } 6853 /* 6854 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6855 * may have cleared our pud entry and done put_page on the page table: 6856 * once we release i_mmap_rwsem, another task can do the final put_page 6857 * and that page table be reused and filled with junk. If we actually 6858 * did unshare a page of pmds, flush the range corresponding to the pud. 6859 */ 6860 if (shared_pmd) 6861 flush_hugetlb_tlb_range(vma, range.start, range.end); 6862 else 6863 flush_hugetlb_tlb_range(vma, start, end); 6864 /* 6865 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 6866 * downgrading page table protection not changing it to point to a new 6867 * page. 6868 * 6869 * See Documentation/mm/mmu_notifier.rst 6870 */ 6871 i_mmap_unlock_write(vma->vm_file->f_mapping); 6872 hugetlb_vma_unlock_write(vma); 6873 mmu_notifier_invalidate_range_end(&range); 6874 6875 return pages > 0 ? (pages << h->order) : pages; 6876 } 6877 6878 /* Return true if reservation was successful, false otherwise. */ 6879 bool hugetlb_reserve_pages(struct inode *inode, 6880 long from, long to, 6881 struct vm_area_struct *vma, 6882 vm_flags_t vm_flags) 6883 { 6884 long chg = -1, add = -1; 6885 struct hstate *h = hstate_inode(inode); 6886 struct hugepage_subpool *spool = subpool_inode(inode); 6887 struct resv_map *resv_map; 6888 struct hugetlb_cgroup *h_cg = NULL; 6889 long gbl_reserve, regions_needed = 0; 6890 6891 /* This should never happen */ 6892 if (from > to) { 6893 VM_WARN(1, "%s called with a negative range\n", __func__); 6894 return false; 6895 } 6896 6897 /* 6898 * vma specific semaphore used for pmd sharing and fault/truncation 6899 * synchronization 6900 */ 6901 hugetlb_vma_lock_alloc(vma); 6902 6903 /* 6904 * Only apply hugepage reservation if asked. At fault time, an 6905 * attempt will be made for VM_NORESERVE to allocate a page 6906 * without using reserves 6907 */ 6908 if (vm_flags & VM_NORESERVE) 6909 return true; 6910 6911 /* 6912 * Shared mappings base their reservation on the number of pages that 6913 * are already allocated on behalf of the file. Private mappings need 6914 * to reserve the full area even if read-only as mprotect() may be 6915 * called to make the mapping read-write. Assume !vma is a shm mapping 6916 */ 6917 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6918 /* 6919 * resv_map can not be NULL as hugetlb_reserve_pages is only 6920 * called for inodes for which resv_maps were created (see 6921 * hugetlbfs_get_inode). 6922 */ 6923 resv_map = inode_resv_map(inode); 6924 6925 chg = region_chg(resv_map, from, to, ®ions_needed); 6926 } else { 6927 /* Private mapping. */ 6928 resv_map = resv_map_alloc(); 6929 if (!resv_map) 6930 goto out_err; 6931 6932 chg = to - from; 6933 6934 set_vma_resv_map(vma, resv_map); 6935 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6936 } 6937 6938 if (chg < 0) 6939 goto out_err; 6940 6941 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6942 chg * pages_per_huge_page(h), &h_cg) < 0) 6943 goto out_err; 6944 6945 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6946 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6947 * of the resv_map. 6948 */ 6949 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6950 } 6951 6952 /* 6953 * There must be enough pages in the subpool for the mapping. If 6954 * the subpool has a minimum size, there may be some global 6955 * reservations already in place (gbl_reserve). 6956 */ 6957 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6958 if (gbl_reserve < 0) 6959 goto out_uncharge_cgroup; 6960 6961 /* 6962 * Check enough hugepages are available for the reservation. 6963 * Hand the pages back to the subpool if there are not 6964 */ 6965 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6966 goto out_put_pages; 6967 6968 /* 6969 * Account for the reservations made. Shared mappings record regions 6970 * that have reservations as they are shared by multiple VMAs. 6971 * When the last VMA disappears, the region map says how much 6972 * the reservation was and the page cache tells how much of 6973 * the reservation was consumed. Private mappings are per-VMA and 6974 * only the consumed reservations are tracked. When the VMA 6975 * disappears, the original reservation is the VMA size and the 6976 * consumed reservations are stored in the map. Hence, nothing 6977 * else has to be done for private mappings here 6978 */ 6979 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6980 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6981 6982 if (unlikely(add < 0)) { 6983 hugetlb_acct_memory(h, -gbl_reserve); 6984 goto out_put_pages; 6985 } else if (unlikely(chg > add)) { 6986 /* 6987 * pages in this range were added to the reserve 6988 * map between region_chg and region_add. This 6989 * indicates a race with alloc_hugetlb_folio. Adjust 6990 * the subpool and reserve counts modified above 6991 * based on the difference. 6992 */ 6993 long rsv_adjust; 6994 6995 /* 6996 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6997 * reference to h_cg->css. See comment below for detail. 6998 */ 6999 hugetlb_cgroup_uncharge_cgroup_rsvd( 7000 hstate_index(h), 7001 (chg - add) * pages_per_huge_page(h), h_cg); 7002 7003 rsv_adjust = hugepage_subpool_put_pages(spool, 7004 chg - add); 7005 hugetlb_acct_memory(h, -rsv_adjust); 7006 } else if (h_cg) { 7007 /* 7008 * The file_regions will hold their own reference to 7009 * h_cg->css. So we should release the reference held 7010 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 7011 * done. 7012 */ 7013 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 7014 } 7015 } 7016 return true; 7017 7018 out_put_pages: 7019 /* put back original number of pages, chg */ 7020 (void)hugepage_subpool_put_pages(spool, chg); 7021 out_uncharge_cgroup: 7022 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 7023 chg * pages_per_huge_page(h), h_cg); 7024 out_err: 7025 hugetlb_vma_lock_free(vma); 7026 if (!vma || vma->vm_flags & VM_MAYSHARE) 7027 /* Only call region_abort if the region_chg succeeded but the 7028 * region_add failed or didn't run. 7029 */ 7030 if (chg >= 0 && add < 0) 7031 region_abort(resv_map, from, to, regions_needed); 7032 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 7033 kref_put(&resv_map->refs, resv_map_release); 7034 set_vma_resv_map(vma, NULL); 7035 } 7036 return false; 7037 } 7038 7039 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 7040 long freed) 7041 { 7042 struct hstate *h = hstate_inode(inode); 7043 struct resv_map *resv_map = inode_resv_map(inode); 7044 long chg = 0; 7045 struct hugepage_subpool *spool = subpool_inode(inode); 7046 long gbl_reserve; 7047 7048 /* 7049 * Since this routine can be called in the evict inode path for all 7050 * hugetlbfs inodes, resv_map could be NULL. 7051 */ 7052 if (resv_map) { 7053 chg = region_del(resv_map, start, end); 7054 /* 7055 * region_del() can fail in the rare case where a region 7056 * must be split and another region descriptor can not be 7057 * allocated. If end == LONG_MAX, it will not fail. 7058 */ 7059 if (chg < 0) 7060 return chg; 7061 } 7062 7063 spin_lock(&inode->i_lock); 7064 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 7065 spin_unlock(&inode->i_lock); 7066 7067 /* 7068 * If the subpool has a minimum size, the number of global 7069 * reservations to be released may be adjusted. 7070 * 7071 * Note that !resv_map implies freed == 0. So (chg - freed) 7072 * won't go negative. 7073 */ 7074 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 7075 hugetlb_acct_memory(h, -gbl_reserve); 7076 7077 return 0; 7078 } 7079 7080 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7081 static unsigned long page_table_shareable(struct vm_area_struct *svma, 7082 struct vm_area_struct *vma, 7083 unsigned long addr, pgoff_t idx) 7084 { 7085 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 7086 svma->vm_start; 7087 unsigned long sbase = saddr & PUD_MASK; 7088 unsigned long s_end = sbase + PUD_SIZE; 7089 7090 /* Allow segments to share if only one is marked locked */ 7091 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7092 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7093 7094 /* 7095 * match the virtual addresses, permission and the alignment of the 7096 * page table page. 7097 * 7098 * Also, vma_lock (vm_private_data) is required for sharing. 7099 */ 7100 if (pmd_index(addr) != pmd_index(saddr) || 7101 vm_flags != svm_flags || 7102 !range_in_vma(svma, sbase, s_end) || 7103 !svma->vm_private_data) 7104 return 0; 7105 7106 return saddr; 7107 } 7108 7109 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7110 { 7111 unsigned long start = addr & PUD_MASK; 7112 unsigned long end = start + PUD_SIZE; 7113 7114 #ifdef CONFIG_USERFAULTFD 7115 if (uffd_disable_huge_pmd_share(vma)) 7116 return false; 7117 #endif 7118 /* 7119 * check on proper vm_flags and page table alignment 7120 */ 7121 if (!(vma->vm_flags & VM_MAYSHARE)) 7122 return false; 7123 if (!vma->vm_private_data) /* vma lock required for sharing */ 7124 return false; 7125 if (!range_in_vma(vma, start, end)) 7126 return false; 7127 return true; 7128 } 7129 7130 /* 7131 * Determine if start,end range within vma could be mapped by shared pmd. 7132 * If yes, adjust start and end to cover range associated with possible 7133 * shared pmd mappings. 7134 */ 7135 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7136 unsigned long *start, unsigned long *end) 7137 { 7138 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7139 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7140 7141 /* 7142 * vma needs to span at least one aligned PUD size, and the range 7143 * must be at least partially within in. 7144 */ 7145 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7146 (*end <= v_start) || (*start >= v_end)) 7147 return; 7148 7149 /* Extend the range to be PUD aligned for a worst case scenario */ 7150 if (*start > v_start) 7151 *start = ALIGN_DOWN(*start, PUD_SIZE); 7152 7153 if (*end < v_end) 7154 *end = ALIGN(*end, PUD_SIZE); 7155 } 7156 7157 /* 7158 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7159 * and returns the corresponding pte. While this is not necessary for the 7160 * !shared pmd case because we can allocate the pmd later as well, it makes the 7161 * code much cleaner. pmd allocation is essential for the shared case because 7162 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7163 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7164 * bad pmd for sharing. 7165 */ 7166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7167 unsigned long addr, pud_t *pud) 7168 { 7169 struct address_space *mapping = vma->vm_file->f_mapping; 7170 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7171 vma->vm_pgoff; 7172 struct vm_area_struct *svma; 7173 unsigned long saddr; 7174 pte_t *spte = NULL; 7175 pte_t *pte; 7176 7177 i_mmap_lock_read(mapping); 7178 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7179 if (svma == vma) 7180 continue; 7181 7182 saddr = page_table_shareable(svma, vma, addr, idx); 7183 if (saddr) { 7184 spte = hugetlb_walk(svma, saddr, 7185 vma_mmu_pagesize(svma)); 7186 if (spte) { 7187 get_page(virt_to_page(spte)); 7188 break; 7189 } 7190 } 7191 } 7192 7193 if (!spte) 7194 goto out; 7195 7196 spin_lock(&mm->page_table_lock); 7197 if (pud_none(*pud)) { 7198 pud_populate(mm, pud, 7199 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7200 mm_inc_nr_pmds(mm); 7201 } else { 7202 put_page(virt_to_page(spte)); 7203 } 7204 spin_unlock(&mm->page_table_lock); 7205 out: 7206 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7207 i_mmap_unlock_read(mapping); 7208 return pte; 7209 } 7210 7211 /* 7212 * unmap huge page backed by shared pte. 7213 * 7214 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 7215 * indicated by page_count > 1, unmap is achieved by clearing pud and 7216 * decrementing the ref count. If count == 1, the pte page is not shared. 7217 * 7218 * Called with page table lock held. 7219 * 7220 * returns: 1 successfully unmapped a shared pte page 7221 * 0 the underlying pte page is not shared, or it is the last user 7222 */ 7223 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7224 unsigned long addr, pte_t *ptep) 7225 { 7226 pgd_t *pgd = pgd_offset(mm, addr); 7227 p4d_t *p4d = p4d_offset(pgd, addr); 7228 pud_t *pud = pud_offset(p4d, addr); 7229 7230 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7231 hugetlb_vma_assert_locked(vma); 7232 BUG_ON(page_count(virt_to_page(ptep)) == 0); 7233 if (page_count(virt_to_page(ptep)) == 1) 7234 return 0; 7235 7236 pud_clear(pud); 7237 put_page(virt_to_page(ptep)); 7238 mm_dec_nr_pmds(mm); 7239 return 1; 7240 } 7241 7242 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7243 7244 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7245 unsigned long addr, pud_t *pud) 7246 { 7247 return NULL; 7248 } 7249 7250 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7251 unsigned long addr, pte_t *ptep) 7252 { 7253 return 0; 7254 } 7255 7256 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7257 unsigned long *start, unsigned long *end) 7258 { 7259 } 7260 7261 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7262 { 7263 return false; 7264 } 7265 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7266 7267 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7268 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7269 unsigned long addr, unsigned long sz) 7270 { 7271 pgd_t *pgd; 7272 p4d_t *p4d; 7273 pud_t *pud; 7274 pte_t *pte = NULL; 7275 7276 pgd = pgd_offset(mm, addr); 7277 p4d = p4d_alloc(mm, pgd, addr); 7278 if (!p4d) 7279 return NULL; 7280 pud = pud_alloc(mm, p4d, addr); 7281 if (pud) { 7282 if (sz == PUD_SIZE) { 7283 pte = (pte_t *)pud; 7284 } else { 7285 BUG_ON(sz != PMD_SIZE); 7286 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7287 pte = huge_pmd_share(mm, vma, addr, pud); 7288 else 7289 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7290 } 7291 } 7292 7293 if (pte) { 7294 pte_t pteval = ptep_get_lockless(pte); 7295 7296 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7297 } 7298 7299 return pte; 7300 } 7301 7302 /* 7303 * huge_pte_offset() - Walk the page table to resolve the hugepage 7304 * entry at address @addr 7305 * 7306 * Return: Pointer to page table entry (PUD or PMD) for 7307 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7308 * size @sz doesn't match the hugepage size at this level of the page 7309 * table. 7310 */ 7311 pte_t *huge_pte_offset(struct mm_struct *mm, 7312 unsigned long addr, unsigned long sz) 7313 { 7314 pgd_t *pgd; 7315 p4d_t *p4d; 7316 pud_t *pud; 7317 pmd_t *pmd; 7318 7319 pgd = pgd_offset(mm, addr); 7320 if (!pgd_present(*pgd)) 7321 return NULL; 7322 p4d = p4d_offset(pgd, addr); 7323 if (!p4d_present(*p4d)) 7324 return NULL; 7325 7326 pud = pud_offset(p4d, addr); 7327 if (sz == PUD_SIZE) 7328 /* must be pud huge, non-present or none */ 7329 return (pte_t *)pud; 7330 if (!pud_present(*pud)) 7331 return NULL; 7332 /* must have a valid entry and size to go further */ 7333 7334 pmd = pmd_offset(pud, addr); 7335 /* must be pmd huge, non-present or none */ 7336 return (pte_t *)pmd; 7337 } 7338 7339 /* 7340 * Return a mask that can be used to update an address to the last huge 7341 * page in a page table page mapping size. Used to skip non-present 7342 * page table entries when linearly scanning address ranges. Architectures 7343 * with unique huge page to page table relationships can define their own 7344 * version of this routine. 7345 */ 7346 unsigned long hugetlb_mask_last_page(struct hstate *h) 7347 { 7348 unsigned long hp_size = huge_page_size(h); 7349 7350 if (hp_size == PUD_SIZE) 7351 return P4D_SIZE - PUD_SIZE; 7352 else if (hp_size == PMD_SIZE) 7353 return PUD_SIZE - PMD_SIZE; 7354 else 7355 return 0UL; 7356 } 7357 7358 #else 7359 7360 /* See description above. Architectures can provide their own version. */ 7361 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7362 { 7363 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7364 if (huge_page_size(h) == PMD_SIZE) 7365 return PUD_SIZE - PMD_SIZE; 7366 #endif 7367 return 0UL; 7368 } 7369 7370 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7371 7372 bool isolate_hugetlb(struct folio *folio, struct list_head *list) 7373 { 7374 bool ret = true; 7375 7376 spin_lock_irq(&hugetlb_lock); 7377 if (!folio_test_hugetlb(folio) || 7378 !folio_test_hugetlb_migratable(folio) || 7379 !folio_try_get(folio)) { 7380 ret = false; 7381 goto unlock; 7382 } 7383 folio_clear_hugetlb_migratable(folio); 7384 list_move_tail(&folio->lru, list); 7385 unlock: 7386 spin_unlock_irq(&hugetlb_lock); 7387 return ret; 7388 } 7389 7390 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7391 { 7392 int ret = 0; 7393 7394 *hugetlb = false; 7395 spin_lock_irq(&hugetlb_lock); 7396 if (folio_test_hugetlb(folio)) { 7397 *hugetlb = true; 7398 if (folio_test_hugetlb_freed(folio)) 7399 ret = 0; 7400 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7401 ret = folio_try_get(folio); 7402 else 7403 ret = -EBUSY; 7404 } 7405 spin_unlock_irq(&hugetlb_lock); 7406 return ret; 7407 } 7408 7409 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7410 bool *migratable_cleared) 7411 { 7412 int ret; 7413 7414 spin_lock_irq(&hugetlb_lock); 7415 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7416 spin_unlock_irq(&hugetlb_lock); 7417 return ret; 7418 } 7419 7420 void folio_putback_active_hugetlb(struct folio *folio) 7421 { 7422 spin_lock_irq(&hugetlb_lock); 7423 folio_set_hugetlb_migratable(folio); 7424 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7425 spin_unlock_irq(&hugetlb_lock); 7426 folio_put(folio); 7427 } 7428 7429 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7430 { 7431 struct hstate *h = folio_hstate(old_folio); 7432 7433 hugetlb_cgroup_migrate(old_folio, new_folio); 7434 set_page_owner_migrate_reason(&new_folio->page, reason); 7435 7436 /* 7437 * transfer temporary state of the new hugetlb folio. This is 7438 * reverse to other transitions because the newpage is going to 7439 * be final while the old one will be freed so it takes over 7440 * the temporary status. 7441 * 7442 * Also note that we have to transfer the per-node surplus state 7443 * here as well otherwise the global surplus count will not match 7444 * the per-node's. 7445 */ 7446 if (folio_test_hugetlb_temporary(new_folio)) { 7447 int old_nid = folio_nid(old_folio); 7448 int new_nid = folio_nid(new_folio); 7449 7450 folio_set_hugetlb_temporary(old_folio); 7451 folio_clear_hugetlb_temporary(new_folio); 7452 7453 7454 /* 7455 * There is no need to transfer the per-node surplus state 7456 * when we do not cross the node. 7457 */ 7458 if (new_nid == old_nid) 7459 return; 7460 spin_lock_irq(&hugetlb_lock); 7461 if (h->surplus_huge_pages_node[old_nid]) { 7462 h->surplus_huge_pages_node[old_nid]--; 7463 h->surplus_huge_pages_node[new_nid]++; 7464 } 7465 spin_unlock_irq(&hugetlb_lock); 7466 } 7467 } 7468 7469 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7470 unsigned long start, 7471 unsigned long end) 7472 { 7473 struct hstate *h = hstate_vma(vma); 7474 unsigned long sz = huge_page_size(h); 7475 struct mm_struct *mm = vma->vm_mm; 7476 struct mmu_notifier_range range; 7477 unsigned long address; 7478 spinlock_t *ptl; 7479 pte_t *ptep; 7480 7481 if (!(vma->vm_flags & VM_MAYSHARE)) 7482 return; 7483 7484 if (start >= end) 7485 return; 7486 7487 flush_cache_range(vma, start, end); 7488 /* 7489 * No need to call adjust_range_if_pmd_sharing_possible(), because 7490 * we have already done the PUD_SIZE alignment. 7491 */ 7492 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7493 start, end); 7494 mmu_notifier_invalidate_range_start(&range); 7495 hugetlb_vma_lock_write(vma); 7496 i_mmap_lock_write(vma->vm_file->f_mapping); 7497 for (address = start; address < end; address += PUD_SIZE) { 7498 ptep = hugetlb_walk(vma, address, sz); 7499 if (!ptep) 7500 continue; 7501 ptl = huge_pte_lock(h, mm, ptep); 7502 huge_pmd_unshare(mm, vma, address, ptep); 7503 spin_unlock(ptl); 7504 } 7505 flush_hugetlb_tlb_range(vma, start, end); 7506 i_mmap_unlock_write(vma->vm_file->f_mapping); 7507 hugetlb_vma_unlock_write(vma); 7508 /* 7509 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7510 * Documentation/mm/mmu_notifier.rst. 7511 */ 7512 mmu_notifier_invalidate_range_end(&range); 7513 } 7514 7515 /* 7516 * This function will unconditionally remove all the shared pmd pgtable entries 7517 * within the specific vma for a hugetlbfs memory range. 7518 */ 7519 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7520 { 7521 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7522 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7523 } 7524 7525 #ifdef CONFIG_CMA 7526 static bool cma_reserve_called __initdata; 7527 7528 static int __init cmdline_parse_hugetlb_cma(char *p) 7529 { 7530 int nid, count = 0; 7531 unsigned long tmp; 7532 char *s = p; 7533 7534 while (*s) { 7535 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7536 break; 7537 7538 if (s[count] == ':') { 7539 if (tmp >= MAX_NUMNODES) 7540 break; 7541 nid = array_index_nospec(tmp, MAX_NUMNODES); 7542 7543 s += count + 1; 7544 tmp = memparse(s, &s); 7545 hugetlb_cma_size_in_node[nid] = tmp; 7546 hugetlb_cma_size += tmp; 7547 7548 /* 7549 * Skip the separator if have one, otherwise 7550 * break the parsing. 7551 */ 7552 if (*s == ',') 7553 s++; 7554 else 7555 break; 7556 } else { 7557 hugetlb_cma_size = memparse(p, &p); 7558 break; 7559 } 7560 } 7561 7562 return 0; 7563 } 7564 7565 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7566 7567 void __init hugetlb_cma_reserve(int order) 7568 { 7569 unsigned long size, reserved, per_node; 7570 bool node_specific_cma_alloc = false; 7571 int nid; 7572 7573 /* 7574 * HugeTLB CMA reservation is required for gigantic 7575 * huge pages which could not be allocated via the 7576 * page allocator. Just warn if there is any change 7577 * breaking this assumption. 7578 */ 7579 VM_WARN_ON(order <= MAX_PAGE_ORDER); 7580 cma_reserve_called = true; 7581 7582 if (!hugetlb_cma_size) 7583 return; 7584 7585 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7586 if (hugetlb_cma_size_in_node[nid] == 0) 7587 continue; 7588 7589 if (!node_online(nid)) { 7590 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7591 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7592 hugetlb_cma_size_in_node[nid] = 0; 7593 continue; 7594 } 7595 7596 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7597 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7598 nid, (PAGE_SIZE << order) / SZ_1M); 7599 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7600 hugetlb_cma_size_in_node[nid] = 0; 7601 } else { 7602 node_specific_cma_alloc = true; 7603 } 7604 } 7605 7606 /* Validate the CMA size again in case some invalid nodes specified. */ 7607 if (!hugetlb_cma_size) 7608 return; 7609 7610 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7611 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7612 (PAGE_SIZE << order) / SZ_1M); 7613 hugetlb_cma_size = 0; 7614 return; 7615 } 7616 7617 if (!node_specific_cma_alloc) { 7618 /* 7619 * If 3 GB area is requested on a machine with 4 numa nodes, 7620 * let's allocate 1 GB on first three nodes and ignore the last one. 7621 */ 7622 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7623 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7624 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7625 } 7626 7627 reserved = 0; 7628 for_each_online_node(nid) { 7629 int res; 7630 char name[CMA_MAX_NAME]; 7631 7632 if (node_specific_cma_alloc) { 7633 if (hugetlb_cma_size_in_node[nid] == 0) 7634 continue; 7635 7636 size = hugetlb_cma_size_in_node[nid]; 7637 } else { 7638 size = min(per_node, hugetlb_cma_size - reserved); 7639 } 7640 7641 size = round_up(size, PAGE_SIZE << order); 7642 7643 snprintf(name, sizeof(name), "hugetlb%d", nid); 7644 /* 7645 * Note that 'order per bit' is based on smallest size that 7646 * may be returned to CMA allocator in the case of 7647 * huge page demotion. 7648 */ 7649 res = cma_declare_contiguous_nid(0, size, 0, 7650 PAGE_SIZE << order, 7651 HUGETLB_PAGE_ORDER, false, name, 7652 &hugetlb_cma[nid], nid); 7653 if (res) { 7654 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7655 res, nid); 7656 continue; 7657 } 7658 7659 reserved += size; 7660 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7661 size / SZ_1M, nid); 7662 7663 if (reserved >= hugetlb_cma_size) 7664 break; 7665 } 7666 7667 if (!reserved) 7668 /* 7669 * hugetlb_cma_size is used to determine if allocations from 7670 * cma are possible. Set to zero if no cma regions are set up. 7671 */ 7672 hugetlb_cma_size = 0; 7673 } 7674 7675 static void __init hugetlb_cma_check(void) 7676 { 7677 if (!hugetlb_cma_size || cma_reserve_called) 7678 return; 7679 7680 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7681 } 7682 7683 #endif /* CONFIG_CMA */ 7684