1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 #include <linux/mm_inline.h> 38 #include <linux/padata.h> 39 40 #include <asm/page.h> 41 #include <asm/pgalloc.h> 42 #include <asm/tlb.h> 43 44 #include <linux/io.h> 45 #include <linux/hugetlb.h> 46 #include <linux/hugetlb_cgroup.h> 47 #include <linux/node.h> 48 #include <linux/page_owner.h> 49 #include "internal.h" 50 #include "hugetlb_vmemmap.h" 51 #include <linux/page-isolation.h> 52 53 int hugetlb_max_hstate __read_mostly; 54 unsigned int default_hstate_idx; 55 struct hstate hstates[HUGE_MAX_HSTATE]; 56 57 #ifdef CONFIG_CMA 58 static struct cma *hugetlb_cma[MAX_NUMNODES]; 59 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 60 #endif 61 static unsigned long hugetlb_cma_size __initdata; 62 63 __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; 64 65 /* for command line parsing */ 66 static struct hstate * __initdata parsed_hstate; 67 static unsigned long __initdata default_hstate_max_huge_pages; 68 static bool __initdata parsed_valid_hugepagesz = true; 69 static bool __initdata parsed_default_hugepagesz; 70 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 71 72 /* 73 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 74 * free_huge_pages, and surplus_huge_pages. 75 */ 76 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); 77 78 /* 79 * Serializes faults on the same logical page. This is used to 80 * prevent spurious OOMs when the hugepage pool is fully utilized. 81 */ 82 static int num_fault_mutexes __ro_after_init; 83 struct mutex *hugetlb_fault_mutex_table __ro_after_init; 84 85 /* Forward declaration */ 86 static int hugetlb_acct_memory(struct hstate *h, long delta); 87 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 88 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 89 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 90 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 91 unsigned long start, unsigned long end); 92 static struct resv_map *vma_resv_map(struct vm_area_struct *vma); 93 94 static void hugetlb_free_folio(struct folio *folio) 95 { 96 #ifdef CONFIG_CMA 97 int nid = folio_nid(folio); 98 99 if (cma_free_folio(hugetlb_cma[nid], folio)) 100 return; 101 #endif 102 folio_put(folio); 103 } 104 105 static inline bool subpool_is_free(struct hugepage_subpool *spool) 106 { 107 if (spool->count) 108 return false; 109 if (spool->max_hpages != -1) 110 return spool->used_hpages == 0; 111 if (spool->min_hpages != -1) 112 return spool->rsv_hpages == spool->min_hpages; 113 114 return true; 115 } 116 117 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 118 unsigned long irq_flags) 119 { 120 spin_unlock_irqrestore(&spool->lock, irq_flags); 121 122 /* If no pages are used, and no other handles to the subpool 123 * remain, give up any reservations based on minimum size and 124 * free the subpool */ 125 if (subpool_is_free(spool)) { 126 if (spool->min_hpages != -1) 127 hugetlb_acct_memory(spool->hstate, 128 -spool->min_hpages); 129 kfree(spool); 130 } 131 } 132 133 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 134 long min_hpages) 135 { 136 struct hugepage_subpool *spool; 137 138 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 139 if (!spool) 140 return NULL; 141 142 spin_lock_init(&spool->lock); 143 spool->count = 1; 144 spool->max_hpages = max_hpages; 145 spool->hstate = h; 146 spool->min_hpages = min_hpages; 147 148 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 149 kfree(spool); 150 return NULL; 151 } 152 spool->rsv_hpages = min_hpages; 153 154 return spool; 155 } 156 157 void hugepage_put_subpool(struct hugepage_subpool *spool) 158 { 159 unsigned long flags; 160 161 spin_lock_irqsave(&spool->lock, flags); 162 BUG_ON(!spool->count); 163 spool->count--; 164 unlock_or_release_subpool(spool, flags); 165 } 166 167 /* 168 * Subpool accounting for allocating and reserving pages. 169 * Return -ENOMEM if there are not enough resources to satisfy the 170 * request. Otherwise, return the number of pages by which the 171 * global pools must be adjusted (upward). The returned value may 172 * only be different than the passed value (delta) in the case where 173 * a subpool minimum size must be maintained. 174 */ 175 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 176 long delta) 177 { 178 long ret = delta; 179 180 if (!spool) 181 return ret; 182 183 spin_lock_irq(&spool->lock); 184 185 if (spool->max_hpages != -1) { /* maximum size accounting */ 186 if ((spool->used_hpages + delta) <= spool->max_hpages) 187 spool->used_hpages += delta; 188 else { 189 ret = -ENOMEM; 190 goto unlock_ret; 191 } 192 } 193 194 /* minimum size accounting */ 195 if (spool->min_hpages != -1 && spool->rsv_hpages) { 196 if (delta > spool->rsv_hpages) { 197 /* 198 * Asking for more reserves than those already taken on 199 * behalf of subpool. Return difference. 200 */ 201 ret = delta - spool->rsv_hpages; 202 spool->rsv_hpages = 0; 203 } else { 204 ret = 0; /* reserves already accounted for */ 205 spool->rsv_hpages -= delta; 206 } 207 } 208 209 unlock_ret: 210 spin_unlock_irq(&spool->lock); 211 return ret; 212 } 213 214 /* 215 * Subpool accounting for freeing and unreserving pages. 216 * Return the number of global page reservations that must be dropped. 217 * The return value may only be different than the passed value (delta) 218 * in the case where a subpool minimum size must be maintained. 219 */ 220 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 221 long delta) 222 { 223 long ret = delta; 224 unsigned long flags; 225 226 if (!spool) 227 return delta; 228 229 spin_lock_irqsave(&spool->lock, flags); 230 231 if (spool->max_hpages != -1) /* maximum size accounting */ 232 spool->used_hpages -= delta; 233 234 /* minimum size accounting */ 235 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 236 if (spool->rsv_hpages + delta <= spool->min_hpages) 237 ret = 0; 238 else 239 ret = spool->rsv_hpages + delta - spool->min_hpages; 240 241 spool->rsv_hpages += delta; 242 if (spool->rsv_hpages > spool->min_hpages) 243 spool->rsv_hpages = spool->min_hpages; 244 } 245 246 /* 247 * If hugetlbfs_put_super couldn't free spool due to an outstanding 248 * quota reference, free it now. 249 */ 250 unlock_or_release_subpool(spool, flags); 251 252 return ret; 253 } 254 255 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 256 { 257 return HUGETLBFS_SB(inode->i_sb)->spool; 258 } 259 260 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 261 { 262 return subpool_inode(file_inode(vma->vm_file)); 263 } 264 265 /* 266 * hugetlb vma_lock helper routines 267 */ 268 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 269 { 270 if (__vma_shareable_lock(vma)) { 271 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 272 273 down_read(&vma_lock->rw_sema); 274 } else if (__vma_private_lock(vma)) { 275 struct resv_map *resv_map = vma_resv_map(vma); 276 277 down_read(&resv_map->rw_sema); 278 } 279 } 280 281 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 282 { 283 if (__vma_shareable_lock(vma)) { 284 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 285 286 up_read(&vma_lock->rw_sema); 287 } else if (__vma_private_lock(vma)) { 288 struct resv_map *resv_map = vma_resv_map(vma); 289 290 up_read(&resv_map->rw_sema); 291 } 292 } 293 294 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 295 { 296 if (__vma_shareable_lock(vma)) { 297 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 298 299 down_write(&vma_lock->rw_sema); 300 } else if (__vma_private_lock(vma)) { 301 struct resv_map *resv_map = vma_resv_map(vma); 302 303 down_write(&resv_map->rw_sema); 304 } 305 } 306 307 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 308 { 309 if (__vma_shareable_lock(vma)) { 310 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 311 312 up_write(&vma_lock->rw_sema); 313 } else if (__vma_private_lock(vma)) { 314 struct resv_map *resv_map = vma_resv_map(vma); 315 316 up_write(&resv_map->rw_sema); 317 } 318 } 319 320 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 321 { 322 323 if (__vma_shareable_lock(vma)) { 324 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 325 326 return down_write_trylock(&vma_lock->rw_sema); 327 } else if (__vma_private_lock(vma)) { 328 struct resv_map *resv_map = vma_resv_map(vma); 329 330 return down_write_trylock(&resv_map->rw_sema); 331 } 332 333 return 1; 334 } 335 336 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 337 { 338 if (__vma_shareable_lock(vma)) { 339 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 340 341 lockdep_assert_held(&vma_lock->rw_sema); 342 } else if (__vma_private_lock(vma)) { 343 struct resv_map *resv_map = vma_resv_map(vma); 344 345 lockdep_assert_held(&resv_map->rw_sema); 346 } 347 } 348 349 void hugetlb_vma_lock_release(struct kref *kref) 350 { 351 struct hugetlb_vma_lock *vma_lock = container_of(kref, 352 struct hugetlb_vma_lock, refs); 353 354 kfree(vma_lock); 355 } 356 357 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 358 { 359 struct vm_area_struct *vma = vma_lock->vma; 360 361 /* 362 * vma_lock structure may or not be released as a result of put, 363 * it certainly will no longer be attached to vma so clear pointer. 364 * Semaphore synchronizes access to vma_lock->vma field. 365 */ 366 vma_lock->vma = NULL; 367 vma->vm_private_data = NULL; 368 up_write(&vma_lock->rw_sema); 369 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 370 } 371 372 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 373 { 374 if (__vma_shareable_lock(vma)) { 375 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 376 377 __hugetlb_vma_unlock_write_put(vma_lock); 378 } else if (__vma_private_lock(vma)) { 379 struct resv_map *resv_map = vma_resv_map(vma); 380 381 /* no free for anon vmas, but still need to unlock */ 382 up_write(&resv_map->rw_sema); 383 } 384 } 385 386 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 387 { 388 /* 389 * Only present in sharable vmas. 390 */ 391 if (!vma || !__vma_shareable_lock(vma)) 392 return; 393 394 if (vma->vm_private_data) { 395 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 396 397 down_write(&vma_lock->rw_sema); 398 __hugetlb_vma_unlock_write_put(vma_lock); 399 } 400 } 401 402 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 403 { 404 struct hugetlb_vma_lock *vma_lock; 405 406 /* Only establish in (flags) sharable vmas */ 407 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 408 return; 409 410 /* Should never get here with non-NULL vm_private_data */ 411 if (vma->vm_private_data) 412 return; 413 414 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 415 if (!vma_lock) { 416 /* 417 * If we can not allocate structure, then vma can not 418 * participate in pmd sharing. This is only a possible 419 * performance enhancement and memory saving issue. 420 * However, the lock is also used to synchronize page 421 * faults with truncation. If the lock is not present, 422 * unlikely races could leave pages in a file past i_size 423 * until the file is removed. Warn in the unlikely case of 424 * allocation failure. 425 */ 426 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 427 return; 428 } 429 430 kref_init(&vma_lock->refs); 431 init_rwsem(&vma_lock->rw_sema); 432 vma_lock->vma = vma; 433 vma->vm_private_data = vma_lock; 434 } 435 436 /* Helper that removes a struct file_region from the resv_map cache and returns 437 * it for use. 438 */ 439 static struct file_region * 440 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 441 { 442 struct file_region *nrg; 443 444 VM_BUG_ON(resv->region_cache_count <= 0); 445 446 resv->region_cache_count--; 447 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 448 list_del(&nrg->link); 449 450 nrg->from = from; 451 nrg->to = to; 452 453 return nrg; 454 } 455 456 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 457 struct file_region *rg) 458 { 459 #ifdef CONFIG_CGROUP_HUGETLB 460 nrg->reservation_counter = rg->reservation_counter; 461 nrg->css = rg->css; 462 if (rg->css) 463 css_get(rg->css); 464 #endif 465 } 466 467 /* Helper that records hugetlb_cgroup uncharge info. */ 468 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 469 struct hstate *h, 470 struct resv_map *resv, 471 struct file_region *nrg) 472 { 473 #ifdef CONFIG_CGROUP_HUGETLB 474 if (h_cg) { 475 nrg->reservation_counter = 476 &h_cg->rsvd_hugepage[hstate_index(h)]; 477 nrg->css = &h_cg->css; 478 /* 479 * The caller will hold exactly one h_cg->css reference for the 480 * whole contiguous reservation region. But this area might be 481 * scattered when there are already some file_regions reside in 482 * it. As a result, many file_regions may share only one css 483 * reference. In order to ensure that one file_region must hold 484 * exactly one h_cg->css reference, we should do css_get for 485 * each file_region and leave the reference held by caller 486 * untouched. 487 */ 488 css_get(&h_cg->css); 489 if (!resv->pages_per_hpage) 490 resv->pages_per_hpage = pages_per_huge_page(h); 491 /* pages_per_hpage should be the same for all entries in 492 * a resv_map. 493 */ 494 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 495 } else { 496 nrg->reservation_counter = NULL; 497 nrg->css = NULL; 498 } 499 #endif 500 } 501 502 static void put_uncharge_info(struct file_region *rg) 503 { 504 #ifdef CONFIG_CGROUP_HUGETLB 505 if (rg->css) 506 css_put(rg->css); 507 #endif 508 } 509 510 static bool has_same_uncharge_info(struct file_region *rg, 511 struct file_region *org) 512 { 513 #ifdef CONFIG_CGROUP_HUGETLB 514 return rg->reservation_counter == org->reservation_counter && 515 rg->css == org->css; 516 517 #else 518 return true; 519 #endif 520 } 521 522 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 523 { 524 struct file_region *nrg, *prg; 525 526 prg = list_prev_entry(rg, link); 527 if (&prg->link != &resv->regions && prg->to == rg->from && 528 has_same_uncharge_info(prg, rg)) { 529 prg->to = rg->to; 530 531 list_del(&rg->link); 532 put_uncharge_info(rg); 533 kfree(rg); 534 535 rg = prg; 536 } 537 538 nrg = list_next_entry(rg, link); 539 if (&nrg->link != &resv->regions && nrg->from == rg->to && 540 has_same_uncharge_info(nrg, rg)) { 541 nrg->from = rg->from; 542 543 list_del(&rg->link); 544 put_uncharge_info(rg); 545 kfree(rg); 546 } 547 } 548 549 static inline long 550 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 551 long to, struct hstate *h, struct hugetlb_cgroup *cg, 552 long *regions_needed) 553 { 554 struct file_region *nrg; 555 556 if (!regions_needed) { 557 nrg = get_file_region_entry_from_cache(map, from, to); 558 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 559 list_add(&nrg->link, rg); 560 coalesce_file_region(map, nrg); 561 } else 562 *regions_needed += 1; 563 564 return to - from; 565 } 566 567 /* 568 * Must be called with resv->lock held. 569 * 570 * Calling this with regions_needed != NULL will count the number of pages 571 * to be added but will not modify the linked list. And regions_needed will 572 * indicate the number of file_regions needed in the cache to carry out to add 573 * the regions for this range. 574 */ 575 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 576 struct hugetlb_cgroup *h_cg, 577 struct hstate *h, long *regions_needed) 578 { 579 long add = 0; 580 struct list_head *head = &resv->regions; 581 long last_accounted_offset = f; 582 struct file_region *iter, *trg = NULL; 583 struct list_head *rg = NULL; 584 585 if (regions_needed) 586 *regions_needed = 0; 587 588 /* In this loop, we essentially handle an entry for the range 589 * [last_accounted_offset, iter->from), at every iteration, with some 590 * bounds checking. 591 */ 592 list_for_each_entry_safe(iter, trg, head, link) { 593 /* Skip irrelevant regions that start before our range. */ 594 if (iter->from < f) { 595 /* If this region ends after the last accounted offset, 596 * then we need to update last_accounted_offset. 597 */ 598 if (iter->to > last_accounted_offset) 599 last_accounted_offset = iter->to; 600 continue; 601 } 602 603 /* When we find a region that starts beyond our range, we've 604 * finished. 605 */ 606 if (iter->from >= t) { 607 rg = iter->link.prev; 608 break; 609 } 610 611 /* Add an entry for last_accounted_offset -> iter->from, and 612 * update last_accounted_offset. 613 */ 614 if (iter->from > last_accounted_offset) 615 add += hugetlb_resv_map_add(resv, iter->link.prev, 616 last_accounted_offset, 617 iter->from, h, h_cg, 618 regions_needed); 619 620 last_accounted_offset = iter->to; 621 } 622 623 /* Handle the case where our range extends beyond 624 * last_accounted_offset. 625 */ 626 if (!rg) 627 rg = head->prev; 628 if (last_accounted_offset < t) 629 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 630 t, h, h_cg, regions_needed); 631 632 return add; 633 } 634 635 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 636 */ 637 static int allocate_file_region_entries(struct resv_map *resv, 638 int regions_needed) 639 __must_hold(&resv->lock) 640 { 641 LIST_HEAD(allocated_regions); 642 int to_allocate = 0, i = 0; 643 struct file_region *trg = NULL, *rg = NULL; 644 645 VM_BUG_ON(regions_needed < 0); 646 647 /* 648 * Check for sufficient descriptors in the cache to accommodate 649 * the number of in progress add operations plus regions_needed. 650 * 651 * This is a while loop because when we drop the lock, some other call 652 * to region_add or region_del may have consumed some region_entries, 653 * so we keep looping here until we finally have enough entries for 654 * (adds_in_progress + regions_needed). 655 */ 656 while (resv->region_cache_count < 657 (resv->adds_in_progress + regions_needed)) { 658 to_allocate = resv->adds_in_progress + regions_needed - 659 resv->region_cache_count; 660 661 /* At this point, we should have enough entries in the cache 662 * for all the existing adds_in_progress. We should only be 663 * needing to allocate for regions_needed. 664 */ 665 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 666 667 spin_unlock(&resv->lock); 668 for (i = 0; i < to_allocate; i++) { 669 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 670 if (!trg) 671 goto out_of_memory; 672 list_add(&trg->link, &allocated_regions); 673 } 674 675 spin_lock(&resv->lock); 676 677 list_splice(&allocated_regions, &resv->region_cache); 678 resv->region_cache_count += to_allocate; 679 } 680 681 return 0; 682 683 out_of_memory: 684 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 685 list_del(&rg->link); 686 kfree(rg); 687 } 688 return -ENOMEM; 689 } 690 691 /* 692 * Add the huge page range represented by [f, t) to the reserve 693 * map. Regions will be taken from the cache to fill in this range. 694 * Sufficient regions should exist in the cache due to the previous 695 * call to region_chg with the same range, but in some cases the cache will not 696 * have sufficient entries due to races with other code doing region_add or 697 * region_del. The extra needed entries will be allocated. 698 * 699 * regions_needed is the out value provided by a previous call to region_chg. 700 * 701 * Return the number of new huge pages added to the map. This number is greater 702 * than or equal to zero. If file_region entries needed to be allocated for 703 * this operation and we were not able to allocate, it returns -ENOMEM. 704 * region_add of regions of length 1 never allocate file_regions and cannot 705 * fail; region_chg will always allocate at least 1 entry and a region_add for 706 * 1 page will only require at most 1 entry. 707 */ 708 static long region_add(struct resv_map *resv, long f, long t, 709 long in_regions_needed, struct hstate *h, 710 struct hugetlb_cgroup *h_cg) 711 { 712 long add = 0, actual_regions_needed = 0; 713 714 spin_lock(&resv->lock); 715 retry: 716 717 /* Count how many regions are actually needed to execute this add. */ 718 add_reservation_in_range(resv, f, t, NULL, NULL, 719 &actual_regions_needed); 720 721 /* 722 * Check for sufficient descriptors in the cache to accommodate 723 * this add operation. Note that actual_regions_needed may be greater 724 * than in_regions_needed, as the resv_map may have been modified since 725 * the region_chg call. In this case, we need to make sure that we 726 * allocate extra entries, such that we have enough for all the 727 * existing adds_in_progress, plus the excess needed for this 728 * operation. 729 */ 730 if (actual_regions_needed > in_regions_needed && 731 resv->region_cache_count < 732 resv->adds_in_progress + 733 (actual_regions_needed - in_regions_needed)) { 734 /* region_add operation of range 1 should never need to 735 * allocate file_region entries. 736 */ 737 VM_BUG_ON(t - f <= 1); 738 739 if (allocate_file_region_entries( 740 resv, actual_regions_needed - in_regions_needed)) { 741 return -ENOMEM; 742 } 743 744 goto retry; 745 } 746 747 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 748 749 resv->adds_in_progress -= in_regions_needed; 750 751 spin_unlock(&resv->lock); 752 return add; 753 } 754 755 /* 756 * Examine the existing reserve map and determine how many 757 * huge pages in the specified range [f, t) are NOT currently 758 * represented. This routine is called before a subsequent 759 * call to region_add that will actually modify the reserve 760 * map to add the specified range [f, t). region_chg does 761 * not change the number of huge pages represented by the 762 * map. A number of new file_region structures is added to the cache as a 763 * placeholder, for the subsequent region_add call to use. At least 1 764 * file_region structure is added. 765 * 766 * out_regions_needed is the number of regions added to the 767 * resv->adds_in_progress. This value needs to be provided to a follow up call 768 * to region_add or region_abort for proper accounting. 769 * 770 * Returns the number of huge pages that need to be added to the existing 771 * reservation map for the range [f, t). This number is greater or equal to 772 * zero. -ENOMEM is returned if a new file_region structure or cache entry 773 * is needed and can not be allocated. 774 */ 775 static long region_chg(struct resv_map *resv, long f, long t, 776 long *out_regions_needed) 777 { 778 long chg = 0; 779 780 spin_lock(&resv->lock); 781 782 /* Count how many hugepages in this range are NOT represented. */ 783 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 784 out_regions_needed); 785 786 if (*out_regions_needed == 0) 787 *out_regions_needed = 1; 788 789 if (allocate_file_region_entries(resv, *out_regions_needed)) 790 return -ENOMEM; 791 792 resv->adds_in_progress += *out_regions_needed; 793 794 spin_unlock(&resv->lock); 795 return chg; 796 } 797 798 /* 799 * Abort the in progress add operation. The adds_in_progress field 800 * of the resv_map keeps track of the operations in progress between 801 * calls to region_chg and region_add. Operations are sometimes 802 * aborted after the call to region_chg. In such cases, region_abort 803 * is called to decrement the adds_in_progress counter. regions_needed 804 * is the value returned by the region_chg call, it is used to decrement 805 * the adds_in_progress counter. 806 * 807 * NOTE: The range arguments [f, t) are not needed or used in this 808 * routine. They are kept to make reading the calling code easier as 809 * arguments will match the associated region_chg call. 810 */ 811 static void region_abort(struct resv_map *resv, long f, long t, 812 long regions_needed) 813 { 814 spin_lock(&resv->lock); 815 VM_BUG_ON(!resv->region_cache_count); 816 resv->adds_in_progress -= regions_needed; 817 spin_unlock(&resv->lock); 818 } 819 820 /* 821 * Delete the specified range [f, t) from the reserve map. If the 822 * t parameter is LONG_MAX, this indicates that ALL regions after f 823 * should be deleted. Locate the regions which intersect [f, t) 824 * and either trim, delete or split the existing regions. 825 * 826 * Returns the number of huge pages deleted from the reserve map. 827 * In the normal case, the return value is zero or more. In the 828 * case where a region must be split, a new region descriptor must 829 * be allocated. If the allocation fails, -ENOMEM will be returned. 830 * NOTE: If the parameter t == LONG_MAX, then we will never split 831 * a region and possibly return -ENOMEM. Callers specifying 832 * t == LONG_MAX do not need to check for -ENOMEM error. 833 */ 834 static long region_del(struct resv_map *resv, long f, long t) 835 { 836 struct list_head *head = &resv->regions; 837 struct file_region *rg, *trg; 838 struct file_region *nrg = NULL; 839 long del = 0; 840 841 retry: 842 spin_lock(&resv->lock); 843 list_for_each_entry_safe(rg, trg, head, link) { 844 /* 845 * Skip regions before the range to be deleted. file_region 846 * ranges are normally of the form [from, to). However, there 847 * may be a "placeholder" entry in the map which is of the form 848 * (from, to) with from == to. Check for placeholder entries 849 * at the beginning of the range to be deleted. 850 */ 851 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 852 continue; 853 854 if (rg->from >= t) 855 break; 856 857 if (f > rg->from && t < rg->to) { /* Must split region */ 858 /* 859 * Check for an entry in the cache before dropping 860 * lock and attempting allocation. 861 */ 862 if (!nrg && 863 resv->region_cache_count > resv->adds_in_progress) { 864 nrg = list_first_entry(&resv->region_cache, 865 struct file_region, 866 link); 867 list_del(&nrg->link); 868 resv->region_cache_count--; 869 } 870 871 if (!nrg) { 872 spin_unlock(&resv->lock); 873 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 874 if (!nrg) 875 return -ENOMEM; 876 goto retry; 877 } 878 879 del += t - f; 880 hugetlb_cgroup_uncharge_file_region( 881 resv, rg, t - f, false); 882 883 /* New entry for end of split region */ 884 nrg->from = t; 885 nrg->to = rg->to; 886 887 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 888 889 INIT_LIST_HEAD(&nrg->link); 890 891 /* Original entry is trimmed */ 892 rg->to = f; 893 894 list_add(&nrg->link, &rg->link); 895 nrg = NULL; 896 break; 897 } 898 899 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 900 del += rg->to - rg->from; 901 hugetlb_cgroup_uncharge_file_region(resv, rg, 902 rg->to - rg->from, true); 903 list_del(&rg->link); 904 kfree(rg); 905 continue; 906 } 907 908 if (f <= rg->from) { /* Trim beginning of region */ 909 hugetlb_cgroup_uncharge_file_region(resv, rg, 910 t - rg->from, false); 911 912 del += t - rg->from; 913 rg->from = t; 914 } else { /* Trim end of region */ 915 hugetlb_cgroup_uncharge_file_region(resv, rg, 916 rg->to - f, false); 917 918 del += rg->to - f; 919 rg->to = f; 920 } 921 } 922 923 spin_unlock(&resv->lock); 924 kfree(nrg); 925 return del; 926 } 927 928 /* 929 * A rare out of memory error was encountered which prevented removal of 930 * the reserve map region for a page. The huge page itself was free'ed 931 * and removed from the page cache. This routine will adjust the subpool 932 * usage count, and the global reserve count if needed. By incrementing 933 * these counts, the reserve map entry which could not be deleted will 934 * appear as a "reserved" entry instead of simply dangling with incorrect 935 * counts. 936 */ 937 void hugetlb_fix_reserve_counts(struct inode *inode) 938 { 939 struct hugepage_subpool *spool = subpool_inode(inode); 940 long rsv_adjust; 941 bool reserved = false; 942 943 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 944 if (rsv_adjust > 0) { 945 struct hstate *h = hstate_inode(inode); 946 947 if (!hugetlb_acct_memory(h, 1)) 948 reserved = true; 949 } else if (!rsv_adjust) { 950 reserved = true; 951 } 952 953 if (!reserved) 954 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 955 } 956 957 /* 958 * Count and return the number of huge pages in the reserve map 959 * that intersect with the range [f, t). 960 */ 961 static long region_count(struct resv_map *resv, long f, long t) 962 { 963 struct list_head *head = &resv->regions; 964 struct file_region *rg; 965 long chg = 0; 966 967 spin_lock(&resv->lock); 968 /* Locate each segment we overlap with, and count that overlap. */ 969 list_for_each_entry(rg, head, link) { 970 long seg_from; 971 long seg_to; 972 973 if (rg->to <= f) 974 continue; 975 if (rg->from >= t) 976 break; 977 978 seg_from = max(rg->from, f); 979 seg_to = min(rg->to, t); 980 981 chg += seg_to - seg_from; 982 } 983 spin_unlock(&resv->lock); 984 985 return chg; 986 } 987 988 /* 989 * Convert the address within this vma to the page offset within 990 * the mapping, huge page units here. 991 */ 992 static pgoff_t vma_hugecache_offset(struct hstate *h, 993 struct vm_area_struct *vma, unsigned long address) 994 { 995 return ((address - vma->vm_start) >> huge_page_shift(h)) + 996 (vma->vm_pgoff >> huge_page_order(h)); 997 } 998 999 /** 1000 * vma_kernel_pagesize - Page size granularity for this VMA. 1001 * @vma: The user mapping. 1002 * 1003 * Folios in this VMA will be aligned to, and at least the size of the 1004 * number of bytes returned by this function. 1005 * 1006 * Return: The default size of the folios allocated when backing a VMA. 1007 */ 1008 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1009 { 1010 if (vma->vm_ops && vma->vm_ops->pagesize) 1011 return vma->vm_ops->pagesize(vma); 1012 return PAGE_SIZE; 1013 } 1014 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 1015 1016 /* 1017 * Return the page size being used by the MMU to back a VMA. In the majority 1018 * of cases, the page size used by the kernel matches the MMU size. On 1019 * architectures where it differs, an architecture-specific 'strong' 1020 * version of this symbol is required. 1021 */ 1022 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1023 { 1024 return vma_kernel_pagesize(vma); 1025 } 1026 1027 /* 1028 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1029 * bits of the reservation map pointer, which are always clear due to 1030 * alignment. 1031 */ 1032 #define HPAGE_RESV_OWNER (1UL << 0) 1033 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1034 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1035 1036 /* 1037 * These helpers are used to track how many pages are reserved for 1038 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1039 * is guaranteed to have their future faults succeed. 1040 * 1041 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1042 * the reserve counters are updated with the hugetlb_lock held. It is safe 1043 * to reset the VMA at fork() time as it is not in use yet and there is no 1044 * chance of the global counters getting corrupted as a result of the values. 1045 * 1046 * The private mapping reservation is represented in a subtly different 1047 * manner to a shared mapping. A shared mapping has a region map associated 1048 * with the underlying file, this region map represents the backing file 1049 * pages which have ever had a reservation assigned which this persists even 1050 * after the page is instantiated. A private mapping has a region map 1051 * associated with the original mmap which is attached to all VMAs which 1052 * reference it, this region map represents those offsets which have consumed 1053 * reservation ie. where pages have been instantiated. 1054 */ 1055 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1056 { 1057 return (unsigned long)vma->vm_private_data; 1058 } 1059 1060 static void set_vma_private_data(struct vm_area_struct *vma, 1061 unsigned long value) 1062 { 1063 vma->vm_private_data = (void *)value; 1064 } 1065 1066 static void 1067 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1068 struct hugetlb_cgroup *h_cg, 1069 struct hstate *h) 1070 { 1071 #ifdef CONFIG_CGROUP_HUGETLB 1072 if (!h_cg || !h) { 1073 resv_map->reservation_counter = NULL; 1074 resv_map->pages_per_hpage = 0; 1075 resv_map->css = NULL; 1076 } else { 1077 resv_map->reservation_counter = 1078 &h_cg->rsvd_hugepage[hstate_index(h)]; 1079 resv_map->pages_per_hpage = pages_per_huge_page(h); 1080 resv_map->css = &h_cg->css; 1081 } 1082 #endif 1083 } 1084 1085 struct resv_map *resv_map_alloc(void) 1086 { 1087 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1088 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1089 1090 if (!resv_map || !rg) { 1091 kfree(resv_map); 1092 kfree(rg); 1093 return NULL; 1094 } 1095 1096 kref_init(&resv_map->refs); 1097 spin_lock_init(&resv_map->lock); 1098 INIT_LIST_HEAD(&resv_map->regions); 1099 init_rwsem(&resv_map->rw_sema); 1100 1101 resv_map->adds_in_progress = 0; 1102 /* 1103 * Initialize these to 0. On shared mappings, 0's here indicate these 1104 * fields don't do cgroup accounting. On private mappings, these will be 1105 * re-initialized to the proper values, to indicate that hugetlb cgroup 1106 * reservations are to be un-charged from here. 1107 */ 1108 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1109 1110 INIT_LIST_HEAD(&resv_map->region_cache); 1111 list_add(&rg->link, &resv_map->region_cache); 1112 resv_map->region_cache_count = 1; 1113 1114 return resv_map; 1115 } 1116 1117 void resv_map_release(struct kref *ref) 1118 { 1119 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1120 struct list_head *head = &resv_map->region_cache; 1121 struct file_region *rg, *trg; 1122 1123 /* Clear out any active regions before we release the map. */ 1124 region_del(resv_map, 0, LONG_MAX); 1125 1126 /* ... and any entries left in the cache */ 1127 list_for_each_entry_safe(rg, trg, head, link) { 1128 list_del(&rg->link); 1129 kfree(rg); 1130 } 1131 1132 VM_BUG_ON(resv_map->adds_in_progress); 1133 1134 kfree(resv_map); 1135 } 1136 1137 static inline struct resv_map *inode_resv_map(struct inode *inode) 1138 { 1139 /* 1140 * At inode evict time, i_mapping may not point to the original 1141 * address space within the inode. This original address space 1142 * contains the pointer to the resv_map. So, always use the 1143 * address space embedded within the inode. 1144 * The VERY common case is inode->mapping == &inode->i_data but, 1145 * this may not be true for device special inodes. 1146 */ 1147 return (struct resv_map *)(&inode->i_data)->i_private_data; 1148 } 1149 1150 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1151 { 1152 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1153 if (vma->vm_flags & VM_MAYSHARE) { 1154 struct address_space *mapping = vma->vm_file->f_mapping; 1155 struct inode *inode = mapping->host; 1156 1157 return inode_resv_map(inode); 1158 1159 } else { 1160 return (struct resv_map *)(get_vma_private_data(vma) & 1161 ~HPAGE_RESV_MASK); 1162 } 1163 } 1164 1165 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1166 { 1167 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1168 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1169 1170 set_vma_private_data(vma, (unsigned long)map); 1171 } 1172 1173 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1174 { 1175 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1176 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1177 1178 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1179 } 1180 1181 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1182 { 1183 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1184 1185 return (get_vma_private_data(vma) & flag) != 0; 1186 } 1187 1188 bool __vma_private_lock(struct vm_area_struct *vma) 1189 { 1190 return !(vma->vm_flags & VM_MAYSHARE) && 1191 get_vma_private_data(vma) & ~HPAGE_RESV_MASK && 1192 is_vma_resv_set(vma, HPAGE_RESV_OWNER); 1193 } 1194 1195 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1196 { 1197 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1198 /* 1199 * Clear vm_private_data 1200 * - For shared mappings this is a per-vma semaphore that may be 1201 * allocated in a subsequent call to hugetlb_vm_op_open. 1202 * Before clearing, make sure pointer is not associated with vma 1203 * as this will leak the structure. This is the case when called 1204 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1205 * been called to allocate a new structure. 1206 * - For MAP_PRIVATE mappings, this is the reserve map which does 1207 * not apply to children. Faults generated by the children are 1208 * not guaranteed to succeed, even if read-only. 1209 */ 1210 if (vma->vm_flags & VM_MAYSHARE) { 1211 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1212 1213 if (vma_lock && vma_lock->vma != vma) 1214 vma->vm_private_data = NULL; 1215 } else 1216 vma->vm_private_data = NULL; 1217 } 1218 1219 /* 1220 * Reset and decrement one ref on hugepage private reservation. 1221 * Called with mm->mmap_lock writer semaphore held. 1222 * This function should be only used by move_vma() and operate on 1223 * same sized vma. It should never come here with last ref on the 1224 * reservation. 1225 */ 1226 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1227 { 1228 /* 1229 * Clear the old hugetlb private page reservation. 1230 * It has already been transferred to new_vma. 1231 * 1232 * During a mremap() operation of a hugetlb vma we call move_vma() 1233 * which copies vma into new_vma and unmaps vma. After the copy 1234 * operation both new_vma and vma share a reference to the resv_map 1235 * struct, and at that point vma is about to be unmapped. We don't 1236 * want to return the reservation to the pool at unmap of vma because 1237 * the reservation still lives on in new_vma, so simply decrement the 1238 * ref here and remove the resv_map reference from this vma. 1239 */ 1240 struct resv_map *reservations = vma_resv_map(vma); 1241 1242 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1243 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1244 kref_put(&reservations->refs, resv_map_release); 1245 } 1246 1247 hugetlb_dup_vma_private(vma); 1248 } 1249 1250 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1251 { 1252 int nid = folio_nid(folio); 1253 1254 lockdep_assert_held(&hugetlb_lock); 1255 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1256 1257 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1258 h->free_huge_pages++; 1259 h->free_huge_pages_node[nid]++; 1260 folio_set_hugetlb_freed(folio); 1261 } 1262 1263 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1264 int nid) 1265 { 1266 struct folio *folio; 1267 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1268 1269 lockdep_assert_held(&hugetlb_lock); 1270 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1271 if (pin && !folio_is_longterm_pinnable(folio)) 1272 continue; 1273 1274 if (folio_test_hwpoison(folio)) 1275 continue; 1276 1277 if (is_migrate_isolate_page(&folio->page)) 1278 continue; 1279 1280 list_move(&folio->lru, &h->hugepage_activelist); 1281 folio_ref_unfreeze(folio, 1); 1282 folio_clear_hugetlb_freed(folio); 1283 h->free_huge_pages--; 1284 h->free_huge_pages_node[nid]--; 1285 return folio; 1286 } 1287 1288 return NULL; 1289 } 1290 1291 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1292 int nid, nodemask_t *nmask) 1293 { 1294 unsigned int cpuset_mems_cookie; 1295 struct zonelist *zonelist; 1296 struct zone *zone; 1297 struct zoneref *z; 1298 int node = NUMA_NO_NODE; 1299 1300 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ 1301 if (nid == NUMA_NO_NODE) 1302 nid = numa_node_id(); 1303 1304 zonelist = node_zonelist(nid, gfp_mask); 1305 1306 retry_cpuset: 1307 cpuset_mems_cookie = read_mems_allowed_begin(); 1308 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1309 struct folio *folio; 1310 1311 if (!cpuset_zone_allowed(zone, gfp_mask)) 1312 continue; 1313 /* 1314 * no need to ask again on the same node. Pool is node rather than 1315 * zone aware 1316 */ 1317 if (zone_to_nid(zone) == node) 1318 continue; 1319 node = zone_to_nid(zone); 1320 1321 folio = dequeue_hugetlb_folio_node_exact(h, node); 1322 if (folio) 1323 return folio; 1324 } 1325 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1326 goto retry_cpuset; 1327 1328 return NULL; 1329 } 1330 1331 static unsigned long available_huge_pages(struct hstate *h) 1332 { 1333 return h->free_huge_pages - h->resv_huge_pages; 1334 } 1335 1336 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1337 struct vm_area_struct *vma, 1338 unsigned long address, long gbl_chg) 1339 { 1340 struct folio *folio = NULL; 1341 struct mempolicy *mpol; 1342 gfp_t gfp_mask; 1343 nodemask_t *nodemask; 1344 int nid; 1345 1346 /* 1347 * gbl_chg==1 means the allocation requires a new page that was not 1348 * reserved before. Making sure there's at least one free page. 1349 */ 1350 if (gbl_chg && !available_huge_pages(h)) 1351 goto err; 1352 1353 gfp_mask = htlb_alloc_mask(h); 1354 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1355 1356 if (mpol_is_preferred_many(mpol)) { 1357 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1358 nid, nodemask); 1359 1360 /* Fallback to all nodes if page==NULL */ 1361 nodemask = NULL; 1362 } 1363 1364 if (!folio) 1365 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1366 nid, nodemask); 1367 1368 mpol_cond_put(mpol); 1369 return folio; 1370 1371 err: 1372 return NULL; 1373 } 1374 1375 /* 1376 * common helper functions for hstate_next_node_to_{alloc|free}. 1377 * We may have allocated or freed a huge page based on a different 1378 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1379 * be outside of *nodes_allowed. Ensure that we use an allowed 1380 * node for alloc or free. 1381 */ 1382 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1383 { 1384 nid = next_node_in(nid, *nodes_allowed); 1385 VM_BUG_ON(nid >= MAX_NUMNODES); 1386 1387 return nid; 1388 } 1389 1390 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1391 { 1392 if (!node_isset(nid, *nodes_allowed)) 1393 nid = next_node_allowed(nid, nodes_allowed); 1394 return nid; 1395 } 1396 1397 /* 1398 * returns the previously saved node ["this node"] from which to 1399 * allocate a persistent huge page for the pool and advance the 1400 * next node from which to allocate, handling wrap at end of node 1401 * mask. 1402 */ 1403 static int hstate_next_node_to_alloc(int *next_node, 1404 nodemask_t *nodes_allowed) 1405 { 1406 int nid; 1407 1408 VM_BUG_ON(!nodes_allowed); 1409 1410 nid = get_valid_node_allowed(*next_node, nodes_allowed); 1411 *next_node = next_node_allowed(nid, nodes_allowed); 1412 1413 return nid; 1414 } 1415 1416 /* 1417 * helper for remove_pool_hugetlb_folio() - return the previously saved 1418 * node ["this node"] from which to free a huge page. Advance the 1419 * next node id whether or not we find a free huge page to free so 1420 * that the next attempt to free addresses the next node. 1421 */ 1422 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1423 { 1424 int nid; 1425 1426 VM_BUG_ON(!nodes_allowed); 1427 1428 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1429 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1430 1431 return nid; 1432 } 1433 1434 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \ 1435 for (nr_nodes = nodes_weight(*mask); \ 1436 nr_nodes > 0 && \ 1437 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \ 1438 nr_nodes--) 1439 1440 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1441 for (nr_nodes = nodes_weight(*mask); \ 1442 nr_nodes > 0 && \ 1443 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1444 nr_nodes--) 1445 1446 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1447 #ifdef CONFIG_CONTIG_ALLOC 1448 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1449 int nid, nodemask_t *nodemask) 1450 { 1451 struct folio *folio; 1452 int order = huge_page_order(h); 1453 bool retried = false; 1454 1455 if (nid == NUMA_NO_NODE) 1456 nid = numa_mem_id(); 1457 retry: 1458 folio = NULL; 1459 #ifdef CONFIG_CMA 1460 { 1461 int node; 1462 1463 if (hugetlb_cma[nid]) 1464 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); 1465 1466 if (!folio && !(gfp_mask & __GFP_THISNODE)) { 1467 for_each_node_mask(node, *nodemask) { 1468 if (node == nid || !hugetlb_cma[node]) 1469 continue; 1470 1471 folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); 1472 if (folio) 1473 break; 1474 } 1475 } 1476 } 1477 #endif 1478 if (!folio) { 1479 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); 1480 if (!folio) 1481 return NULL; 1482 } 1483 1484 if (folio_ref_freeze(folio, 1)) 1485 return folio; 1486 1487 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); 1488 hugetlb_free_folio(folio); 1489 if (!retried) { 1490 retried = true; 1491 goto retry; 1492 } 1493 return NULL; 1494 } 1495 1496 #else /* !CONFIG_CONTIG_ALLOC */ 1497 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1498 int nid, nodemask_t *nodemask) 1499 { 1500 return NULL; 1501 } 1502 #endif /* CONFIG_CONTIG_ALLOC */ 1503 1504 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1505 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1506 int nid, nodemask_t *nodemask) 1507 { 1508 return NULL; 1509 } 1510 #endif 1511 1512 /* 1513 * Remove hugetlb folio from lists. 1514 * If vmemmap exists for the folio, clear the hugetlb flag so that the 1515 * folio appears as just a compound page. Otherwise, wait until after 1516 * allocating vmemmap to clear the flag. 1517 * 1518 * Must be called with hugetlb lock held. 1519 */ 1520 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1521 bool adjust_surplus) 1522 { 1523 int nid = folio_nid(folio); 1524 1525 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1526 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1527 1528 lockdep_assert_held(&hugetlb_lock); 1529 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1530 return; 1531 1532 list_del(&folio->lru); 1533 1534 if (folio_test_hugetlb_freed(folio)) { 1535 folio_clear_hugetlb_freed(folio); 1536 h->free_huge_pages--; 1537 h->free_huge_pages_node[nid]--; 1538 } 1539 if (adjust_surplus) { 1540 h->surplus_huge_pages--; 1541 h->surplus_huge_pages_node[nid]--; 1542 } 1543 1544 /* 1545 * We can only clear the hugetlb flag after allocating vmemmap 1546 * pages. Otherwise, someone (memory error handling) may try to write 1547 * to tail struct pages. 1548 */ 1549 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1550 __folio_clear_hugetlb(folio); 1551 1552 h->nr_huge_pages--; 1553 h->nr_huge_pages_node[nid]--; 1554 } 1555 1556 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1557 bool adjust_surplus) 1558 { 1559 int nid = folio_nid(folio); 1560 1561 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1562 1563 lockdep_assert_held(&hugetlb_lock); 1564 1565 INIT_LIST_HEAD(&folio->lru); 1566 h->nr_huge_pages++; 1567 h->nr_huge_pages_node[nid]++; 1568 1569 if (adjust_surplus) { 1570 h->surplus_huge_pages++; 1571 h->surplus_huge_pages_node[nid]++; 1572 } 1573 1574 __folio_set_hugetlb(folio); 1575 folio_change_private(folio, NULL); 1576 /* 1577 * We have to set hugetlb_vmemmap_optimized again as above 1578 * folio_change_private(folio, NULL) cleared it. 1579 */ 1580 folio_set_hugetlb_vmemmap_optimized(folio); 1581 1582 arch_clear_hugetlb_flags(folio); 1583 enqueue_hugetlb_folio(h, folio); 1584 } 1585 1586 static void __update_and_free_hugetlb_folio(struct hstate *h, 1587 struct folio *folio) 1588 { 1589 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); 1590 1591 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1592 return; 1593 1594 /* 1595 * If we don't know which subpages are hwpoisoned, we can't free 1596 * the hugepage, so it's leaked intentionally. 1597 */ 1598 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1599 return; 1600 1601 /* 1602 * If folio is not vmemmap optimized (!clear_flag), then the folio 1603 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio 1604 * can only be passed hugetlb pages and will BUG otherwise. 1605 */ 1606 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { 1607 spin_lock_irq(&hugetlb_lock); 1608 /* 1609 * If we cannot allocate vmemmap pages, just refuse to free the 1610 * page and put the page back on the hugetlb free list and treat 1611 * as a surplus page. 1612 */ 1613 add_hugetlb_folio(h, folio, true); 1614 spin_unlock_irq(&hugetlb_lock); 1615 return; 1616 } 1617 1618 /* 1619 * If vmemmap pages were allocated above, then we need to clear the 1620 * hugetlb flag under the hugetlb lock. 1621 */ 1622 if (folio_test_hugetlb(folio)) { 1623 spin_lock_irq(&hugetlb_lock); 1624 __folio_clear_hugetlb(folio); 1625 spin_unlock_irq(&hugetlb_lock); 1626 } 1627 1628 /* 1629 * Move PageHWPoison flag from head page to the raw error pages, 1630 * which makes any healthy subpages reusable. 1631 */ 1632 if (unlikely(folio_test_hwpoison(folio))) 1633 folio_clear_hugetlb_hwpoison(folio); 1634 1635 folio_ref_unfreeze(folio, 1); 1636 1637 INIT_LIST_HEAD(&folio->_deferred_list); 1638 hugetlb_free_folio(folio); 1639 } 1640 1641 /* 1642 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1643 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1644 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1645 * the vmemmap pages. 1646 * 1647 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1648 * freed and frees them one-by-one. As the page->mapping pointer is going 1649 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1650 * structure of a lockless linked list of huge pages to be freed. 1651 */ 1652 static LLIST_HEAD(hpage_freelist); 1653 1654 static void free_hpage_workfn(struct work_struct *work) 1655 { 1656 struct llist_node *node; 1657 1658 node = llist_del_all(&hpage_freelist); 1659 1660 while (node) { 1661 struct folio *folio; 1662 struct hstate *h; 1663 1664 folio = container_of((struct address_space **)node, 1665 struct folio, mapping); 1666 node = node->next; 1667 folio->mapping = NULL; 1668 /* 1669 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1670 * folio_hstate() is going to trigger because a previous call to 1671 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1672 * not use folio_hstate() directly. 1673 */ 1674 h = size_to_hstate(folio_size(folio)); 1675 1676 __update_and_free_hugetlb_folio(h, folio); 1677 1678 cond_resched(); 1679 } 1680 } 1681 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1682 1683 static inline void flush_free_hpage_work(struct hstate *h) 1684 { 1685 if (hugetlb_vmemmap_optimizable(h)) 1686 flush_work(&free_hpage_work); 1687 } 1688 1689 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1690 bool atomic) 1691 { 1692 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1693 __update_and_free_hugetlb_folio(h, folio); 1694 return; 1695 } 1696 1697 /* 1698 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1699 * 1700 * Only call schedule_work() if hpage_freelist is previously 1701 * empty. Otherwise, schedule_work() had been called but the workfn 1702 * hasn't retrieved the list yet. 1703 */ 1704 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1705 schedule_work(&free_hpage_work); 1706 } 1707 1708 static void bulk_vmemmap_restore_error(struct hstate *h, 1709 struct list_head *folio_list, 1710 struct list_head *non_hvo_folios) 1711 { 1712 struct folio *folio, *t_folio; 1713 1714 if (!list_empty(non_hvo_folios)) { 1715 /* 1716 * Free any restored hugetlb pages so that restore of the 1717 * entire list can be retried. 1718 * The idea is that in the common case of ENOMEM errors freeing 1719 * hugetlb pages with vmemmap we will free up memory so that we 1720 * can allocate vmemmap for more hugetlb pages. 1721 */ 1722 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { 1723 list_del(&folio->lru); 1724 spin_lock_irq(&hugetlb_lock); 1725 __folio_clear_hugetlb(folio); 1726 spin_unlock_irq(&hugetlb_lock); 1727 update_and_free_hugetlb_folio(h, folio, false); 1728 cond_resched(); 1729 } 1730 } else { 1731 /* 1732 * In the case where there are no folios which can be 1733 * immediately freed, we loop through the list trying to restore 1734 * vmemmap individually in the hope that someone elsewhere may 1735 * have done something to cause success (such as freeing some 1736 * memory). If unable to restore a hugetlb page, the hugetlb 1737 * page is made a surplus page and removed from the list. 1738 * If are able to restore vmemmap and free one hugetlb page, we 1739 * quit processing the list to retry the bulk operation. 1740 */ 1741 list_for_each_entry_safe(folio, t_folio, folio_list, lru) 1742 if (hugetlb_vmemmap_restore_folio(h, folio)) { 1743 list_del(&folio->lru); 1744 spin_lock_irq(&hugetlb_lock); 1745 add_hugetlb_folio(h, folio, true); 1746 spin_unlock_irq(&hugetlb_lock); 1747 } else { 1748 list_del(&folio->lru); 1749 spin_lock_irq(&hugetlb_lock); 1750 __folio_clear_hugetlb(folio); 1751 spin_unlock_irq(&hugetlb_lock); 1752 update_and_free_hugetlb_folio(h, folio, false); 1753 cond_resched(); 1754 break; 1755 } 1756 } 1757 } 1758 1759 static void update_and_free_pages_bulk(struct hstate *h, 1760 struct list_head *folio_list) 1761 { 1762 long ret; 1763 struct folio *folio, *t_folio; 1764 LIST_HEAD(non_hvo_folios); 1765 1766 /* 1767 * First allocate required vmemmmap (if necessary) for all folios. 1768 * Carefully handle errors and free up any available hugetlb pages 1769 * in an effort to make forward progress. 1770 */ 1771 retry: 1772 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); 1773 if (ret < 0) { 1774 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); 1775 goto retry; 1776 } 1777 1778 /* 1779 * At this point, list should be empty, ret should be >= 0 and there 1780 * should only be pages on the non_hvo_folios list. 1781 * Do note that the non_hvo_folios list could be empty. 1782 * Without HVO enabled, ret will be 0 and there is no need to call 1783 * __folio_clear_hugetlb as this was done previously. 1784 */ 1785 VM_WARN_ON(!list_empty(folio_list)); 1786 VM_WARN_ON(ret < 0); 1787 if (!list_empty(&non_hvo_folios) && ret) { 1788 spin_lock_irq(&hugetlb_lock); 1789 list_for_each_entry(folio, &non_hvo_folios, lru) 1790 __folio_clear_hugetlb(folio); 1791 spin_unlock_irq(&hugetlb_lock); 1792 } 1793 1794 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { 1795 update_and_free_hugetlb_folio(h, folio, false); 1796 cond_resched(); 1797 } 1798 } 1799 1800 struct hstate *size_to_hstate(unsigned long size) 1801 { 1802 struct hstate *h; 1803 1804 for_each_hstate(h) { 1805 if (huge_page_size(h) == size) 1806 return h; 1807 } 1808 return NULL; 1809 } 1810 1811 void free_huge_folio(struct folio *folio) 1812 { 1813 /* 1814 * Can't pass hstate in here because it is called from the 1815 * generic mm code. 1816 */ 1817 struct hstate *h = folio_hstate(folio); 1818 int nid = folio_nid(folio); 1819 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1820 bool restore_reserve; 1821 unsigned long flags; 1822 1823 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1824 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1825 1826 hugetlb_set_folio_subpool(folio, NULL); 1827 if (folio_test_anon(folio)) 1828 __ClearPageAnonExclusive(&folio->page); 1829 folio->mapping = NULL; 1830 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1831 folio_clear_hugetlb_restore_reserve(folio); 1832 1833 /* 1834 * If HPageRestoreReserve was set on page, page allocation consumed a 1835 * reservation. If the page was associated with a subpool, there 1836 * would have been a page reserved in the subpool before allocation 1837 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1838 * reservation, do not call hugepage_subpool_put_pages() as this will 1839 * remove the reserved page from the subpool. 1840 */ 1841 if (!restore_reserve) { 1842 /* 1843 * A return code of zero implies that the subpool will be 1844 * under its minimum size if the reservation is not restored 1845 * after page is free. Therefore, force restore_reserve 1846 * operation. 1847 */ 1848 if (hugepage_subpool_put_pages(spool, 1) == 0) 1849 restore_reserve = true; 1850 } 1851 1852 spin_lock_irqsave(&hugetlb_lock, flags); 1853 folio_clear_hugetlb_migratable(folio); 1854 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1855 pages_per_huge_page(h), folio); 1856 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1857 pages_per_huge_page(h), folio); 1858 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); 1859 mem_cgroup_uncharge(folio); 1860 if (restore_reserve) 1861 h->resv_huge_pages++; 1862 1863 if (folio_test_hugetlb_temporary(folio)) { 1864 remove_hugetlb_folio(h, folio, false); 1865 spin_unlock_irqrestore(&hugetlb_lock, flags); 1866 update_and_free_hugetlb_folio(h, folio, true); 1867 } else if (h->surplus_huge_pages_node[nid]) { 1868 /* remove the page from active list */ 1869 remove_hugetlb_folio(h, folio, true); 1870 spin_unlock_irqrestore(&hugetlb_lock, flags); 1871 update_and_free_hugetlb_folio(h, folio, true); 1872 } else { 1873 arch_clear_hugetlb_flags(folio); 1874 enqueue_hugetlb_folio(h, folio); 1875 spin_unlock_irqrestore(&hugetlb_lock, flags); 1876 } 1877 } 1878 1879 /* 1880 * Must be called with the hugetlb lock held 1881 */ 1882 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1883 { 1884 lockdep_assert_held(&hugetlb_lock); 1885 h->nr_huge_pages++; 1886 h->nr_huge_pages_node[nid]++; 1887 } 1888 1889 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1890 { 1891 __folio_set_hugetlb(folio); 1892 INIT_LIST_HEAD(&folio->lru); 1893 hugetlb_set_folio_subpool(folio, NULL); 1894 set_hugetlb_cgroup(folio, NULL); 1895 set_hugetlb_cgroup_rsvd(folio, NULL); 1896 } 1897 1898 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1899 { 1900 init_new_hugetlb_folio(h, folio); 1901 hugetlb_vmemmap_optimize_folio(h, folio); 1902 } 1903 1904 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1905 { 1906 __prep_new_hugetlb_folio(h, folio); 1907 spin_lock_irq(&hugetlb_lock); 1908 __prep_account_new_huge_page(h, nid); 1909 spin_unlock_irq(&hugetlb_lock); 1910 } 1911 1912 /* 1913 * Find and lock address space (mapping) in write mode. 1914 * 1915 * Upon entry, the folio is locked which means that folio_mapping() is 1916 * stable. Due to locking order, we can only trylock_write. If we can 1917 * not get the lock, simply return NULL to caller. 1918 */ 1919 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) 1920 { 1921 struct address_space *mapping = folio_mapping(folio); 1922 1923 if (!mapping) 1924 return mapping; 1925 1926 if (i_mmap_trylock_write(mapping)) 1927 return mapping; 1928 1929 return NULL; 1930 } 1931 1932 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 1933 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1934 nodemask_t *node_alloc_noretry) 1935 { 1936 int order = huge_page_order(h); 1937 struct folio *folio; 1938 bool alloc_try_hard = true; 1939 bool retry = true; 1940 1941 /* 1942 * By default we always try hard to allocate the folio with 1943 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in 1944 * a loop (to adjust global huge page counts) and previous allocation 1945 * failed, do not continue to try hard on the same node. Use the 1946 * node_alloc_noretry bitmap to manage this state information. 1947 */ 1948 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1949 alloc_try_hard = false; 1950 if (alloc_try_hard) 1951 gfp_mask |= __GFP_RETRY_MAYFAIL; 1952 if (nid == NUMA_NO_NODE) 1953 nid = numa_mem_id(); 1954 retry: 1955 folio = __folio_alloc(gfp_mask, order, nid, nmask); 1956 /* Ensure hugetlb folio won't have large_rmappable flag set. */ 1957 if (folio) 1958 folio_clear_large_rmappable(folio); 1959 1960 if (folio && !folio_ref_freeze(folio, 1)) { 1961 folio_put(folio); 1962 if (retry) { /* retry once */ 1963 retry = false; 1964 goto retry; 1965 } 1966 /* WOW! twice in a row. */ 1967 pr_warn("HugeTLB unexpected inflated folio ref count\n"); 1968 folio = NULL; 1969 } 1970 1971 /* 1972 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a 1973 * folio this indicates an overall state change. Clear bit so 1974 * that we resume normal 'try hard' allocations. 1975 */ 1976 if (node_alloc_noretry && folio && !alloc_try_hard) 1977 node_clear(nid, *node_alloc_noretry); 1978 1979 /* 1980 * If we tried hard to get a folio but failed, set bit so that 1981 * subsequent attempts will not try as hard until there is an 1982 * overall state change. 1983 */ 1984 if (node_alloc_noretry && !folio && alloc_try_hard) 1985 node_set(nid, *node_alloc_noretry); 1986 1987 if (!folio) { 1988 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1989 return NULL; 1990 } 1991 1992 __count_vm_event(HTLB_BUDDY_PGALLOC); 1993 return folio; 1994 } 1995 1996 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, 1997 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1998 nodemask_t *node_alloc_noretry) 1999 { 2000 struct folio *folio; 2001 2002 if (hstate_is_gigantic(h)) 2003 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2004 else 2005 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 2006 if (folio) 2007 init_new_hugetlb_folio(h, folio); 2008 return folio; 2009 } 2010 2011 /* 2012 * Common helper to allocate a fresh hugetlb page. All specific allocators 2013 * should use this function to get new hugetlb pages 2014 * 2015 * Note that returned page is 'frozen': ref count of head page and all tail 2016 * pages is zero. 2017 */ 2018 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2019 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2020 { 2021 struct folio *folio; 2022 2023 if (hstate_is_gigantic(h)) 2024 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2025 else 2026 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2027 if (!folio) 2028 return NULL; 2029 2030 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2031 return folio; 2032 } 2033 2034 static void prep_and_add_allocated_folios(struct hstate *h, 2035 struct list_head *folio_list) 2036 { 2037 unsigned long flags; 2038 struct folio *folio, *tmp_f; 2039 2040 /* Send list for bulk vmemmap optimization processing */ 2041 hugetlb_vmemmap_optimize_folios(h, folio_list); 2042 2043 /* Add all new pool pages to free lists in one lock cycle */ 2044 spin_lock_irqsave(&hugetlb_lock, flags); 2045 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 2046 __prep_account_new_huge_page(h, folio_nid(folio)); 2047 enqueue_hugetlb_folio(h, folio); 2048 } 2049 spin_unlock_irqrestore(&hugetlb_lock, flags); 2050 } 2051 2052 /* 2053 * Allocates a fresh hugetlb page in a node interleaved manner. The page 2054 * will later be added to the appropriate hugetlb pool. 2055 */ 2056 static struct folio *alloc_pool_huge_folio(struct hstate *h, 2057 nodemask_t *nodes_allowed, 2058 nodemask_t *node_alloc_noretry, 2059 int *next_node) 2060 { 2061 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2062 int nr_nodes, node; 2063 2064 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { 2065 struct folio *folio; 2066 2067 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2068 nodes_allowed, node_alloc_noretry); 2069 if (folio) 2070 return folio; 2071 } 2072 2073 return NULL; 2074 } 2075 2076 /* 2077 * Remove huge page from pool from next node to free. Attempt to keep 2078 * persistent huge pages more or less balanced over allowed nodes. 2079 * This routine only 'removes' the hugetlb page. The caller must make 2080 * an additional call to free the page to low level allocators. 2081 * Called with hugetlb_lock locked. 2082 */ 2083 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, 2084 nodemask_t *nodes_allowed, bool acct_surplus) 2085 { 2086 int nr_nodes, node; 2087 struct folio *folio = NULL; 2088 2089 lockdep_assert_held(&hugetlb_lock); 2090 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2091 /* 2092 * If we're returning unused surplus pages, only examine 2093 * nodes with surplus pages. 2094 */ 2095 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2096 !list_empty(&h->hugepage_freelists[node])) { 2097 folio = list_entry(h->hugepage_freelists[node].next, 2098 struct folio, lru); 2099 remove_hugetlb_folio(h, folio, acct_surplus); 2100 break; 2101 } 2102 } 2103 2104 return folio; 2105 } 2106 2107 /* 2108 * Dissolve a given free hugetlb folio into free buddy pages. This function 2109 * does nothing for in-use hugetlb folios and non-hugetlb folios. 2110 * This function returns values like below: 2111 * 2112 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2113 * when the system is under memory pressure and the feature of 2114 * freeing unused vmemmap pages associated with each hugetlb page 2115 * is enabled. 2116 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2117 * (allocated or reserved.) 2118 * 0: successfully dissolved free hugepages or the page is not a 2119 * hugepage (considered as already dissolved) 2120 */ 2121 int dissolve_free_hugetlb_folio(struct folio *folio) 2122 { 2123 int rc = -EBUSY; 2124 2125 retry: 2126 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2127 if (!folio_test_hugetlb(folio)) 2128 return 0; 2129 2130 spin_lock_irq(&hugetlb_lock); 2131 if (!folio_test_hugetlb(folio)) { 2132 rc = 0; 2133 goto out; 2134 } 2135 2136 if (!folio_ref_count(folio)) { 2137 struct hstate *h = folio_hstate(folio); 2138 bool adjust_surplus = false; 2139 2140 if (!available_huge_pages(h)) 2141 goto out; 2142 2143 /* 2144 * We should make sure that the page is already on the free list 2145 * when it is dissolved. 2146 */ 2147 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2148 spin_unlock_irq(&hugetlb_lock); 2149 cond_resched(); 2150 2151 /* 2152 * Theoretically, we should return -EBUSY when we 2153 * encounter this race. In fact, we have a chance 2154 * to successfully dissolve the page if we do a 2155 * retry. Because the race window is quite small. 2156 * If we seize this opportunity, it is an optimization 2157 * for increasing the success rate of dissolving page. 2158 */ 2159 goto retry; 2160 } 2161 2162 if (h->surplus_huge_pages_node[folio_nid(folio)]) 2163 adjust_surplus = true; 2164 remove_hugetlb_folio(h, folio, adjust_surplus); 2165 h->max_huge_pages--; 2166 spin_unlock_irq(&hugetlb_lock); 2167 2168 /* 2169 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2170 * before freeing the page. update_and_free_hugtlb_folio will fail to 2171 * free the page if it can not allocate required vmemmap. We 2172 * need to adjust max_huge_pages if the page is not freed. 2173 * Attempt to allocate vmemmmap here so that we can take 2174 * appropriate action on failure. 2175 * 2176 * The folio_test_hugetlb check here is because 2177 * remove_hugetlb_folio will clear hugetlb folio flag for 2178 * non-vmemmap optimized hugetlb folios. 2179 */ 2180 if (folio_test_hugetlb(folio)) { 2181 rc = hugetlb_vmemmap_restore_folio(h, folio); 2182 if (rc) { 2183 spin_lock_irq(&hugetlb_lock); 2184 add_hugetlb_folio(h, folio, adjust_surplus); 2185 h->max_huge_pages++; 2186 goto out; 2187 } 2188 } else 2189 rc = 0; 2190 2191 update_and_free_hugetlb_folio(h, folio, false); 2192 return rc; 2193 } 2194 out: 2195 spin_unlock_irq(&hugetlb_lock); 2196 return rc; 2197 } 2198 2199 /* 2200 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2201 * make specified memory blocks removable from the system. 2202 * Note that this will dissolve a free gigantic hugepage completely, if any 2203 * part of it lies within the given range. 2204 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all 2205 * free hugetlb folios that were dissolved before that error are lost. 2206 */ 2207 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) 2208 { 2209 unsigned long pfn; 2210 struct folio *folio; 2211 int rc = 0; 2212 unsigned int order; 2213 struct hstate *h; 2214 2215 if (!hugepages_supported()) 2216 return rc; 2217 2218 order = huge_page_order(&default_hstate); 2219 for_each_hstate(h) 2220 order = min(order, huge_page_order(h)); 2221 2222 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2223 folio = pfn_folio(pfn); 2224 rc = dissolve_free_hugetlb_folio(folio); 2225 if (rc) 2226 break; 2227 } 2228 2229 return rc; 2230 } 2231 2232 /* 2233 * Allocates a fresh surplus page from the page allocator. 2234 */ 2235 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2236 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2237 { 2238 struct folio *folio = NULL; 2239 2240 if (hstate_is_gigantic(h)) 2241 return NULL; 2242 2243 spin_lock_irq(&hugetlb_lock); 2244 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2245 goto out_unlock; 2246 spin_unlock_irq(&hugetlb_lock); 2247 2248 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2249 if (!folio) 2250 return NULL; 2251 2252 spin_lock_irq(&hugetlb_lock); 2253 /* 2254 * We could have raced with the pool size change. 2255 * Double check that and simply deallocate the new page 2256 * if we would end up overcommiting the surpluses. Abuse 2257 * temporary page to workaround the nasty free_huge_folio 2258 * codeflow 2259 */ 2260 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2261 folio_set_hugetlb_temporary(folio); 2262 spin_unlock_irq(&hugetlb_lock); 2263 free_huge_folio(folio); 2264 return NULL; 2265 } 2266 2267 h->surplus_huge_pages++; 2268 h->surplus_huge_pages_node[folio_nid(folio)]++; 2269 2270 out_unlock: 2271 spin_unlock_irq(&hugetlb_lock); 2272 2273 return folio; 2274 } 2275 2276 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2277 int nid, nodemask_t *nmask) 2278 { 2279 struct folio *folio; 2280 2281 if (hstate_is_gigantic(h)) 2282 return NULL; 2283 2284 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2285 if (!folio) 2286 return NULL; 2287 2288 /* fresh huge pages are frozen */ 2289 folio_ref_unfreeze(folio, 1); 2290 /* 2291 * We do not account these pages as surplus because they are only 2292 * temporary and will be released properly on the last reference 2293 */ 2294 folio_set_hugetlb_temporary(folio); 2295 2296 return folio; 2297 } 2298 2299 /* 2300 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2301 */ 2302 static 2303 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2304 struct vm_area_struct *vma, unsigned long addr) 2305 { 2306 struct folio *folio = NULL; 2307 struct mempolicy *mpol; 2308 gfp_t gfp_mask = htlb_alloc_mask(h); 2309 int nid; 2310 nodemask_t *nodemask; 2311 2312 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2313 if (mpol_is_preferred_many(mpol)) { 2314 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2315 2316 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2317 2318 /* Fallback to all nodes if page==NULL */ 2319 nodemask = NULL; 2320 } 2321 2322 if (!folio) 2323 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2324 mpol_cond_put(mpol); 2325 return folio; 2326 } 2327 2328 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 2329 nodemask_t *nmask, gfp_t gfp_mask) 2330 { 2331 struct folio *folio; 2332 2333 spin_lock_irq(&hugetlb_lock); 2334 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, 2335 nmask); 2336 if (folio) { 2337 VM_BUG_ON(!h->resv_huge_pages); 2338 h->resv_huge_pages--; 2339 } 2340 2341 spin_unlock_irq(&hugetlb_lock); 2342 return folio; 2343 } 2344 2345 /* folio migration callback function */ 2346 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2347 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) 2348 { 2349 spin_lock_irq(&hugetlb_lock); 2350 if (available_huge_pages(h)) { 2351 struct folio *folio; 2352 2353 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2354 preferred_nid, nmask); 2355 if (folio) { 2356 spin_unlock_irq(&hugetlb_lock); 2357 return folio; 2358 } 2359 } 2360 spin_unlock_irq(&hugetlb_lock); 2361 2362 /* We cannot fallback to other nodes, as we could break the per-node pool. */ 2363 if (!allow_alloc_fallback) 2364 gfp_mask |= __GFP_THISNODE; 2365 2366 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2367 } 2368 2369 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 2370 { 2371 #ifdef CONFIG_NUMA 2372 struct mempolicy *mpol = get_task_policy(current); 2373 2374 /* 2375 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 2376 * (from policy_nodemask) specifically for hugetlb case 2377 */ 2378 if (mpol->mode == MPOL_BIND && 2379 (apply_policy_zone(mpol, gfp_zone(gfp)) && 2380 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 2381 return &mpol->nodes; 2382 #endif 2383 return NULL; 2384 } 2385 2386 /* 2387 * Increase the hugetlb pool such that it can accommodate a reservation 2388 * of size 'delta'. 2389 */ 2390 static int gather_surplus_pages(struct hstate *h, long delta) 2391 __must_hold(&hugetlb_lock) 2392 { 2393 LIST_HEAD(surplus_list); 2394 struct folio *folio, *tmp; 2395 int ret; 2396 long i; 2397 long needed, allocated; 2398 bool alloc_ok = true; 2399 int node; 2400 nodemask_t *mbind_nodemask, alloc_nodemask; 2401 2402 mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); 2403 if (mbind_nodemask) 2404 nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); 2405 else 2406 alloc_nodemask = cpuset_current_mems_allowed; 2407 2408 lockdep_assert_held(&hugetlb_lock); 2409 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2410 if (needed <= 0) { 2411 h->resv_huge_pages += delta; 2412 return 0; 2413 } 2414 2415 allocated = 0; 2416 2417 ret = -ENOMEM; 2418 retry: 2419 spin_unlock_irq(&hugetlb_lock); 2420 for (i = 0; i < needed; i++) { 2421 folio = NULL; 2422 2423 /* Prioritize current node */ 2424 if (node_isset(numa_mem_id(), alloc_nodemask)) 2425 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2426 numa_mem_id(), NULL); 2427 2428 if (!folio) { 2429 for_each_node_mask(node, alloc_nodemask) { 2430 if (node == numa_mem_id()) 2431 continue; 2432 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2433 node, NULL); 2434 if (folio) 2435 break; 2436 } 2437 } 2438 if (!folio) { 2439 alloc_ok = false; 2440 break; 2441 } 2442 list_add(&folio->lru, &surplus_list); 2443 cond_resched(); 2444 } 2445 allocated += i; 2446 2447 /* 2448 * After retaking hugetlb_lock, we need to recalculate 'needed' 2449 * because either resv_huge_pages or free_huge_pages may have changed. 2450 */ 2451 spin_lock_irq(&hugetlb_lock); 2452 needed = (h->resv_huge_pages + delta) - 2453 (h->free_huge_pages + allocated); 2454 if (needed > 0) { 2455 if (alloc_ok) 2456 goto retry; 2457 /* 2458 * We were not able to allocate enough pages to 2459 * satisfy the entire reservation so we free what 2460 * we've allocated so far. 2461 */ 2462 goto free; 2463 } 2464 /* 2465 * The surplus_list now contains _at_least_ the number of extra pages 2466 * needed to accommodate the reservation. Add the appropriate number 2467 * of pages to the hugetlb pool and free the extras back to the buddy 2468 * allocator. Commit the entire reservation here to prevent another 2469 * process from stealing the pages as they are added to the pool but 2470 * before they are reserved. 2471 */ 2472 needed += allocated; 2473 h->resv_huge_pages += delta; 2474 ret = 0; 2475 2476 /* Free the needed pages to the hugetlb pool */ 2477 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2478 if ((--needed) < 0) 2479 break; 2480 /* Add the page to the hugetlb allocator */ 2481 enqueue_hugetlb_folio(h, folio); 2482 } 2483 free: 2484 spin_unlock_irq(&hugetlb_lock); 2485 2486 /* 2487 * Free unnecessary surplus pages to the buddy allocator. 2488 * Pages have no ref count, call free_huge_folio directly. 2489 */ 2490 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2491 free_huge_folio(folio); 2492 spin_lock_irq(&hugetlb_lock); 2493 2494 return ret; 2495 } 2496 2497 /* 2498 * This routine has two main purposes: 2499 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2500 * in unused_resv_pages. This corresponds to the prior adjustments made 2501 * to the associated reservation map. 2502 * 2) Free any unused surplus pages that may have been allocated to satisfy 2503 * the reservation. As many as unused_resv_pages may be freed. 2504 */ 2505 static void return_unused_surplus_pages(struct hstate *h, 2506 unsigned long unused_resv_pages) 2507 { 2508 unsigned long nr_pages; 2509 LIST_HEAD(page_list); 2510 2511 lockdep_assert_held(&hugetlb_lock); 2512 /* Uncommit the reservation */ 2513 h->resv_huge_pages -= unused_resv_pages; 2514 2515 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2516 goto out; 2517 2518 /* 2519 * Part (or even all) of the reservation could have been backed 2520 * by pre-allocated pages. Only free surplus pages. 2521 */ 2522 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2523 2524 /* 2525 * We want to release as many surplus pages as possible, spread 2526 * evenly across all nodes with memory. Iterate across these nodes 2527 * until we can no longer free unreserved surplus pages. This occurs 2528 * when the nodes with surplus pages have no free pages. 2529 * remove_pool_hugetlb_folio() will balance the freed pages across the 2530 * on-line nodes with memory and will handle the hstate accounting. 2531 */ 2532 while (nr_pages--) { 2533 struct folio *folio; 2534 2535 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 2536 if (!folio) 2537 goto out; 2538 2539 list_add(&folio->lru, &page_list); 2540 } 2541 2542 out: 2543 spin_unlock_irq(&hugetlb_lock); 2544 update_and_free_pages_bulk(h, &page_list); 2545 spin_lock_irq(&hugetlb_lock); 2546 } 2547 2548 2549 /* 2550 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2551 * are used by the huge page allocation routines to manage reservations. 2552 * 2553 * vma_needs_reservation is called to determine if the huge page at addr 2554 * within the vma has an associated reservation. If a reservation is 2555 * needed, the value 1 is returned. The caller is then responsible for 2556 * managing the global reservation and subpool usage counts. After 2557 * the huge page has been allocated, vma_commit_reservation is called 2558 * to add the page to the reservation map. If the page allocation fails, 2559 * the reservation must be ended instead of committed. vma_end_reservation 2560 * is called in such cases. 2561 * 2562 * In the normal case, vma_commit_reservation returns the same value 2563 * as the preceding vma_needs_reservation call. The only time this 2564 * is not the case is if a reserve map was changed between calls. It 2565 * is the responsibility of the caller to notice the difference and 2566 * take appropriate action. 2567 * 2568 * vma_add_reservation is used in error paths where a reservation must 2569 * be restored when a newly allocated huge page must be freed. It is 2570 * to be called after calling vma_needs_reservation to determine if a 2571 * reservation exists. 2572 * 2573 * vma_del_reservation is used in error paths where an entry in the reserve 2574 * map was created during huge page allocation and must be removed. It is to 2575 * be called after calling vma_needs_reservation to determine if a reservation 2576 * exists. 2577 */ 2578 enum vma_resv_mode { 2579 VMA_NEEDS_RESV, 2580 VMA_COMMIT_RESV, 2581 VMA_END_RESV, 2582 VMA_ADD_RESV, 2583 VMA_DEL_RESV, 2584 }; 2585 static long __vma_reservation_common(struct hstate *h, 2586 struct vm_area_struct *vma, unsigned long addr, 2587 enum vma_resv_mode mode) 2588 { 2589 struct resv_map *resv; 2590 pgoff_t idx; 2591 long ret; 2592 long dummy_out_regions_needed; 2593 2594 resv = vma_resv_map(vma); 2595 if (!resv) 2596 return 1; 2597 2598 idx = vma_hugecache_offset(h, vma, addr); 2599 switch (mode) { 2600 case VMA_NEEDS_RESV: 2601 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2602 /* We assume that vma_reservation_* routines always operate on 2603 * 1 page, and that adding to resv map a 1 page entry can only 2604 * ever require 1 region. 2605 */ 2606 VM_BUG_ON(dummy_out_regions_needed != 1); 2607 break; 2608 case VMA_COMMIT_RESV: 2609 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2610 /* region_add calls of range 1 should never fail. */ 2611 VM_BUG_ON(ret < 0); 2612 break; 2613 case VMA_END_RESV: 2614 region_abort(resv, idx, idx + 1, 1); 2615 ret = 0; 2616 break; 2617 case VMA_ADD_RESV: 2618 if (vma->vm_flags & VM_MAYSHARE) { 2619 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2620 /* region_add calls of range 1 should never fail. */ 2621 VM_BUG_ON(ret < 0); 2622 } else { 2623 region_abort(resv, idx, idx + 1, 1); 2624 ret = region_del(resv, idx, idx + 1); 2625 } 2626 break; 2627 case VMA_DEL_RESV: 2628 if (vma->vm_flags & VM_MAYSHARE) { 2629 region_abort(resv, idx, idx + 1, 1); 2630 ret = region_del(resv, idx, idx + 1); 2631 } else { 2632 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2633 /* region_add calls of range 1 should never fail. */ 2634 VM_BUG_ON(ret < 0); 2635 } 2636 break; 2637 default: 2638 BUG(); 2639 } 2640 2641 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2642 return ret; 2643 /* 2644 * We know private mapping must have HPAGE_RESV_OWNER set. 2645 * 2646 * In most cases, reserves always exist for private mappings. 2647 * However, a file associated with mapping could have been 2648 * hole punched or truncated after reserves were consumed. 2649 * As subsequent fault on such a range will not use reserves. 2650 * Subtle - The reserve map for private mappings has the 2651 * opposite meaning than that of shared mappings. If NO 2652 * entry is in the reserve map, it means a reservation exists. 2653 * If an entry exists in the reserve map, it means the 2654 * reservation has already been consumed. As a result, the 2655 * return value of this routine is the opposite of the 2656 * value returned from reserve map manipulation routines above. 2657 */ 2658 if (ret > 0) 2659 return 0; 2660 if (ret == 0) 2661 return 1; 2662 return ret; 2663 } 2664 2665 static long vma_needs_reservation(struct hstate *h, 2666 struct vm_area_struct *vma, unsigned long addr) 2667 { 2668 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2669 } 2670 2671 static long vma_commit_reservation(struct hstate *h, 2672 struct vm_area_struct *vma, unsigned long addr) 2673 { 2674 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2675 } 2676 2677 static void vma_end_reservation(struct hstate *h, 2678 struct vm_area_struct *vma, unsigned long addr) 2679 { 2680 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2681 } 2682 2683 static long vma_add_reservation(struct hstate *h, 2684 struct vm_area_struct *vma, unsigned long addr) 2685 { 2686 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2687 } 2688 2689 static long vma_del_reservation(struct hstate *h, 2690 struct vm_area_struct *vma, unsigned long addr) 2691 { 2692 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2693 } 2694 2695 /* 2696 * This routine is called to restore reservation information on error paths. 2697 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2698 * and the hugetlb mutex should remain held when calling this routine. 2699 * 2700 * It handles two specific cases: 2701 * 1) A reservation was in place and the folio consumed the reservation. 2702 * hugetlb_restore_reserve is set in the folio. 2703 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2704 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2705 * 2706 * In case 1, free_huge_folio later in the error path will increment the 2707 * global reserve count. But, free_huge_folio does not have enough context 2708 * to adjust the reservation map. This case deals primarily with private 2709 * mappings. Adjust the reserve map here to be consistent with global 2710 * reserve count adjustments to be made by free_huge_folio. Make sure the 2711 * reserve map indicates there is a reservation present. 2712 * 2713 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2714 */ 2715 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2716 unsigned long address, struct folio *folio) 2717 { 2718 long rc = vma_needs_reservation(h, vma, address); 2719 2720 if (folio_test_hugetlb_restore_reserve(folio)) { 2721 if (unlikely(rc < 0)) 2722 /* 2723 * Rare out of memory condition in reserve map 2724 * manipulation. Clear hugetlb_restore_reserve so 2725 * that global reserve count will not be incremented 2726 * by free_huge_folio. This will make it appear 2727 * as though the reservation for this folio was 2728 * consumed. This may prevent the task from 2729 * faulting in the folio at a later time. This 2730 * is better than inconsistent global huge page 2731 * accounting of reserve counts. 2732 */ 2733 folio_clear_hugetlb_restore_reserve(folio); 2734 else if (rc) 2735 (void)vma_add_reservation(h, vma, address); 2736 else 2737 vma_end_reservation(h, vma, address); 2738 } else { 2739 if (!rc) { 2740 /* 2741 * This indicates there is an entry in the reserve map 2742 * not added by alloc_hugetlb_folio. We know it was added 2743 * before the alloc_hugetlb_folio call, otherwise 2744 * hugetlb_restore_reserve would be set on the folio. 2745 * Remove the entry so that a subsequent allocation 2746 * does not consume a reservation. 2747 */ 2748 rc = vma_del_reservation(h, vma, address); 2749 if (rc < 0) 2750 /* 2751 * VERY rare out of memory condition. Since 2752 * we can not delete the entry, set 2753 * hugetlb_restore_reserve so that the reserve 2754 * count will be incremented when the folio 2755 * is freed. This reserve will be consumed 2756 * on a subsequent allocation. 2757 */ 2758 folio_set_hugetlb_restore_reserve(folio); 2759 } else if (rc < 0) { 2760 /* 2761 * Rare out of memory condition from 2762 * vma_needs_reservation call. Memory allocation is 2763 * only attempted if a new entry is needed. Therefore, 2764 * this implies there is not an entry in the 2765 * reserve map. 2766 * 2767 * For shared mappings, no entry in the map indicates 2768 * no reservation. We are done. 2769 */ 2770 if (!(vma->vm_flags & VM_MAYSHARE)) 2771 /* 2772 * For private mappings, no entry indicates 2773 * a reservation is present. Since we can 2774 * not add an entry, set hugetlb_restore_reserve 2775 * on the folio so reserve count will be 2776 * incremented when freed. This reserve will 2777 * be consumed on a subsequent allocation. 2778 */ 2779 folio_set_hugetlb_restore_reserve(folio); 2780 } else 2781 /* 2782 * No reservation present, do nothing 2783 */ 2784 vma_end_reservation(h, vma, address); 2785 } 2786 } 2787 2788 /* 2789 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2790 * the old one 2791 * @h: struct hstate old page belongs to 2792 * @old_folio: Old folio to dissolve 2793 * @list: List to isolate the page in case we need to 2794 * Returns 0 on success, otherwise negated error. 2795 */ 2796 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2797 struct folio *old_folio, struct list_head *list) 2798 { 2799 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2800 int nid = folio_nid(old_folio); 2801 struct folio *new_folio = NULL; 2802 int ret = 0; 2803 2804 retry: 2805 spin_lock_irq(&hugetlb_lock); 2806 if (!folio_test_hugetlb(old_folio)) { 2807 /* 2808 * Freed from under us. Drop new_folio too. 2809 */ 2810 goto free_new; 2811 } else if (folio_ref_count(old_folio)) { 2812 bool isolated; 2813 2814 /* 2815 * Someone has grabbed the folio, try to isolate it here. 2816 * Fail with -EBUSY if not possible. 2817 */ 2818 spin_unlock_irq(&hugetlb_lock); 2819 isolated = folio_isolate_hugetlb(old_folio, list); 2820 ret = isolated ? 0 : -EBUSY; 2821 spin_lock_irq(&hugetlb_lock); 2822 goto free_new; 2823 } else if (!folio_test_hugetlb_freed(old_folio)) { 2824 /* 2825 * Folio's refcount is 0 but it has not been enqueued in the 2826 * freelist yet. Race window is small, so we can succeed here if 2827 * we retry. 2828 */ 2829 spin_unlock_irq(&hugetlb_lock); 2830 cond_resched(); 2831 goto retry; 2832 } else { 2833 if (!new_folio) { 2834 spin_unlock_irq(&hugetlb_lock); 2835 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, 2836 NULL, NULL); 2837 if (!new_folio) 2838 return -ENOMEM; 2839 __prep_new_hugetlb_folio(h, new_folio); 2840 goto retry; 2841 } 2842 2843 /* 2844 * Ok, old_folio is still a genuine free hugepage. Remove it from 2845 * the freelist and decrease the counters. These will be 2846 * incremented again when calling __prep_account_new_huge_page() 2847 * and enqueue_hugetlb_folio() for new_folio. The counters will 2848 * remain stable since this happens under the lock. 2849 */ 2850 remove_hugetlb_folio(h, old_folio, false); 2851 2852 /* 2853 * Ref count on new_folio is already zero as it was dropped 2854 * earlier. It can be directly added to the pool free list. 2855 */ 2856 __prep_account_new_huge_page(h, nid); 2857 enqueue_hugetlb_folio(h, new_folio); 2858 2859 /* 2860 * Folio has been replaced, we can safely free the old one. 2861 */ 2862 spin_unlock_irq(&hugetlb_lock); 2863 update_and_free_hugetlb_folio(h, old_folio, false); 2864 } 2865 2866 return ret; 2867 2868 free_new: 2869 spin_unlock_irq(&hugetlb_lock); 2870 if (new_folio) 2871 update_and_free_hugetlb_folio(h, new_folio, false); 2872 2873 return ret; 2874 } 2875 2876 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2877 { 2878 struct hstate *h; 2879 struct folio *folio = page_folio(page); 2880 int ret = -EBUSY; 2881 2882 /* 2883 * The page might have been dissolved from under our feet, so make sure 2884 * to carefully check the state under the lock. 2885 * Return success when racing as if we dissolved the page ourselves. 2886 */ 2887 spin_lock_irq(&hugetlb_lock); 2888 if (folio_test_hugetlb(folio)) { 2889 h = folio_hstate(folio); 2890 } else { 2891 spin_unlock_irq(&hugetlb_lock); 2892 return 0; 2893 } 2894 spin_unlock_irq(&hugetlb_lock); 2895 2896 /* 2897 * Fence off gigantic pages as there is a cyclic dependency between 2898 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2899 * of bailing out right away without further retrying. 2900 */ 2901 if (hstate_is_gigantic(h)) 2902 return -ENOMEM; 2903 2904 if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) 2905 ret = 0; 2906 else if (!folio_ref_count(folio)) 2907 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 2908 2909 return ret; 2910 } 2911 2912 /* 2913 * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn 2914 * range with new folios. 2915 * @start_pfn: start pfn of the given pfn range 2916 * @end_pfn: end pfn of the given pfn range 2917 * Returns 0 on success, otherwise negated error. 2918 */ 2919 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) 2920 { 2921 struct hstate *h; 2922 struct folio *folio; 2923 int ret = 0; 2924 2925 LIST_HEAD(isolate_list); 2926 2927 while (start_pfn < end_pfn) { 2928 folio = pfn_folio(start_pfn); 2929 if (folio_test_hugetlb(folio)) { 2930 h = folio_hstate(folio); 2931 } else { 2932 start_pfn++; 2933 continue; 2934 } 2935 2936 if (!folio_ref_count(folio)) { 2937 ret = alloc_and_dissolve_hugetlb_folio(h, folio, 2938 &isolate_list); 2939 if (ret) 2940 break; 2941 2942 putback_movable_pages(&isolate_list); 2943 } 2944 start_pfn++; 2945 } 2946 2947 return ret; 2948 } 2949 2950 void wait_for_freed_hugetlb_folios(void) 2951 { 2952 if (llist_empty(&hpage_freelist)) 2953 return; 2954 2955 flush_work(&free_hpage_work); 2956 } 2957 2958 typedef enum { 2959 /* 2960 * For either 0/1: we checked the per-vma resv map, and one resv 2961 * count either can be reused (0), or an extra needed (1). 2962 */ 2963 MAP_CHG_REUSE = 0, 2964 MAP_CHG_NEEDED = 1, 2965 /* 2966 * Cannot use per-vma resv count can be used, hence a new resv 2967 * count is enforced. 2968 * 2969 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except 2970 * that currently vma_needs_reservation() has an unwanted side 2971 * effect to either use end() or commit() to complete the 2972 * transaction. Hence it needs to differenciate from NEEDED. 2973 */ 2974 MAP_CHG_ENFORCED = 2, 2975 } map_chg_state; 2976 2977 /* 2978 * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW 2979 * faults of hugetlb private mappings on top of a non-page-cache folio (in 2980 * which case even if there's a private vma resv map it won't cover such 2981 * allocation). New call sites should (probably) never set it to true!! 2982 * When it's set, the allocation will bypass all vma level reservations. 2983 */ 2984 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 2985 unsigned long addr, bool cow_from_owner) 2986 { 2987 struct hugepage_subpool *spool = subpool_vma(vma); 2988 struct hstate *h = hstate_vma(vma); 2989 struct folio *folio; 2990 long retval, gbl_chg; 2991 map_chg_state map_chg; 2992 int ret, idx; 2993 struct hugetlb_cgroup *h_cg = NULL; 2994 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; 2995 2996 idx = hstate_index(h); 2997 2998 /* Whether we need a separate per-vma reservation? */ 2999 if (cow_from_owner) { 3000 /* 3001 * Special case! Since it's a CoW on top of a reserved 3002 * page, the private resv map doesn't count. So it cannot 3003 * consume the per-vma resv map even if it's reserved. 3004 */ 3005 map_chg = MAP_CHG_ENFORCED; 3006 } else { 3007 /* 3008 * Examine the region/reserve map to determine if the process 3009 * has a reservation for the page to be allocated. A return 3010 * code of zero indicates a reservation exists (no change). 3011 */ 3012 retval = vma_needs_reservation(h, vma, addr); 3013 if (retval < 0) 3014 return ERR_PTR(-ENOMEM); 3015 map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; 3016 } 3017 3018 /* 3019 * Whether we need a separate global reservation? 3020 * 3021 * Processes that did not create the mapping will have no 3022 * reserves as indicated by the region/reserve map. Check 3023 * that the allocation will not exceed the subpool limit. 3024 * Or if it can get one from the pool reservation directly. 3025 */ 3026 if (map_chg) { 3027 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3028 if (gbl_chg < 0) 3029 goto out_end_reservation; 3030 } else { 3031 /* 3032 * If we have the vma reservation ready, no need for extra 3033 * global reservation. 3034 */ 3035 gbl_chg = 0; 3036 } 3037 3038 /* 3039 * If this allocation is not consuming a per-vma reservation, 3040 * charge the hugetlb cgroup now. 3041 */ 3042 if (map_chg) { 3043 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3044 idx, pages_per_huge_page(h), &h_cg); 3045 if (ret) 3046 goto out_subpool_put; 3047 } 3048 3049 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3050 if (ret) 3051 goto out_uncharge_cgroup_reservation; 3052 3053 spin_lock_irq(&hugetlb_lock); 3054 /* 3055 * glb_chg is passed to indicate whether or not a page must be taken 3056 * from the global free pool (global change). gbl_chg == 0 indicates 3057 * a reservation exists for the allocation. 3058 */ 3059 folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); 3060 if (!folio) { 3061 spin_unlock_irq(&hugetlb_lock); 3062 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3063 if (!folio) 3064 goto out_uncharge_cgroup; 3065 spin_lock_irq(&hugetlb_lock); 3066 list_add(&folio->lru, &h->hugepage_activelist); 3067 folio_ref_unfreeze(folio, 1); 3068 /* Fall through */ 3069 } 3070 3071 /* 3072 * Either dequeued or buddy-allocated folio needs to add special 3073 * mark to the folio when it consumes a global reservation. 3074 */ 3075 if (!gbl_chg) { 3076 folio_set_hugetlb_restore_reserve(folio); 3077 h->resv_huge_pages--; 3078 } 3079 3080 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3081 /* If allocation is not consuming a reservation, also store the 3082 * hugetlb_cgroup pointer on the page. 3083 */ 3084 if (map_chg) { 3085 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3086 h_cg, folio); 3087 } 3088 3089 spin_unlock_irq(&hugetlb_lock); 3090 3091 hugetlb_set_folio_subpool(folio, spool); 3092 3093 if (map_chg != MAP_CHG_ENFORCED) { 3094 /* commit() is only needed if the map_chg is not enforced */ 3095 retval = vma_commit_reservation(h, vma, addr); 3096 /* 3097 * Check for possible race conditions. When it happens.. 3098 * The page was added to the reservation map between 3099 * vma_needs_reservation and vma_commit_reservation. 3100 * This indicates a race with hugetlb_reserve_pages. 3101 * Adjust for the subpool count incremented above AND 3102 * in hugetlb_reserve_pages for the same page. Also, 3103 * the reservation count added in hugetlb_reserve_pages 3104 * no longer applies. 3105 */ 3106 if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { 3107 long rsv_adjust; 3108 3109 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3110 hugetlb_acct_memory(h, -rsv_adjust); 3111 if (map_chg) { 3112 spin_lock_irq(&hugetlb_lock); 3113 hugetlb_cgroup_uncharge_folio_rsvd( 3114 hstate_index(h), pages_per_huge_page(h), 3115 folio); 3116 spin_unlock_irq(&hugetlb_lock); 3117 } 3118 } 3119 } 3120 3121 ret = mem_cgroup_charge_hugetlb(folio, gfp); 3122 /* 3123 * Unconditionally increment NR_HUGETLB here. If it turns out that 3124 * mem_cgroup_charge_hugetlb failed, then immediately free the page and 3125 * decrement NR_HUGETLB. 3126 */ 3127 lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); 3128 3129 if (ret == -ENOMEM) { 3130 free_huge_folio(folio); 3131 return ERR_PTR(-ENOMEM); 3132 } 3133 3134 return folio; 3135 3136 out_uncharge_cgroup: 3137 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3138 out_uncharge_cgroup_reservation: 3139 if (map_chg) 3140 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3141 h_cg); 3142 out_subpool_put: 3143 if (map_chg) 3144 hugepage_subpool_put_pages(spool, 1); 3145 out_end_reservation: 3146 if (map_chg != MAP_CHG_ENFORCED) 3147 vma_end_reservation(h, vma, addr); 3148 return ERR_PTR(-ENOSPC); 3149 } 3150 3151 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3152 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3153 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3154 { 3155 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3156 int nr_nodes, node = nid; 3157 3158 /* do node specific alloc */ 3159 if (nid != NUMA_NO_NODE) { 3160 m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), 3161 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3162 if (!m) 3163 return 0; 3164 goto found; 3165 } 3166 /* allocate from next node when distributing huge pages */ 3167 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { 3168 m = memblock_alloc_try_nid_raw( 3169 huge_page_size(h), huge_page_size(h), 3170 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3171 /* 3172 * Use the beginning of the huge page to store the 3173 * huge_bootmem_page struct (until gather_bootmem 3174 * puts them into the mem_map). 3175 */ 3176 if (!m) 3177 return 0; 3178 goto found; 3179 } 3180 3181 found: 3182 3183 /* 3184 * Only initialize the head struct page in memmap_init_reserved_pages, 3185 * rest of the struct pages will be initialized by the HugeTLB 3186 * subsystem itself. 3187 * The head struct page is used to get folio information by the HugeTLB 3188 * subsystem like zone id and node id. 3189 */ 3190 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3191 huge_page_size(h) - PAGE_SIZE); 3192 /* Put them into a private list first because mem_map is not up yet */ 3193 INIT_LIST_HEAD(&m->list); 3194 list_add(&m->list, &huge_boot_pages[node]); 3195 m->hstate = h; 3196 return 1; 3197 } 3198 3199 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ 3200 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, 3201 unsigned long start_page_number, 3202 unsigned long end_page_number) 3203 { 3204 enum zone_type zone = zone_idx(folio_zone(folio)); 3205 int nid = folio_nid(folio); 3206 unsigned long head_pfn = folio_pfn(folio); 3207 unsigned long pfn, end_pfn = head_pfn + end_page_number; 3208 int ret; 3209 3210 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { 3211 struct page *page = pfn_to_page(pfn); 3212 3213 __ClearPageReserved(folio_page(folio, pfn - head_pfn)); 3214 __init_single_page(page, pfn, zone, nid); 3215 prep_compound_tail((struct page *)folio, pfn - head_pfn); 3216 ret = page_ref_freeze(page, 1); 3217 VM_BUG_ON(!ret); 3218 } 3219 } 3220 3221 static void __init hugetlb_folio_init_vmemmap(struct folio *folio, 3222 struct hstate *h, 3223 unsigned long nr_pages) 3224 { 3225 int ret; 3226 3227 /* Prepare folio head */ 3228 __folio_clear_reserved(folio); 3229 __folio_set_head(folio); 3230 ret = folio_ref_freeze(folio, 1); 3231 VM_BUG_ON(!ret); 3232 /* Initialize the necessary tail struct pages */ 3233 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); 3234 prep_compound_head((struct page *)folio, huge_page_order(h)); 3235 } 3236 3237 static void __init prep_and_add_bootmem_folios(struct hstate *h, 3238 struct list_head *folio_list) 3239 { 3240 unsigned long flags; 3241 struct folio *folio, *tmp_f; 3242 3243 /* Send list for bulk vmemmap optimization processing */ 3244 hugetlb_vmemmap_optimize_folios(h, folio_list); 3245 3246 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 3247 if (!folio_test_hugetlb_vmemmap_optimized(folio)) { 3248 /* 3249 * If HVO fails, initialize all tail struct pages 3250 * We do not worry about potential long lock hold 3251 * time as this is early in boot and there should 3252 * be no contention. 3253 */ 3254 hugetlb_folio_init_tail_vmemmap(folio, 3255 HUGETLB_VMEMMAP_RESERVE_PAGES, 3256 pages_per_huge_page(h)); 3257 } 3258 /* Subdivide locks to achieve better parallel performance */ 3259 spin_lock_irqsave(&hugetlb_lock, flags); 3260 __prep_account_new_huge_page(h, folio_nid(folio)); 3261 enqueue_hugetlb_folio(h, folio); 3262 spin_unlock_irqrestore(&hugetlb_lock, flags); 3263 } 3264 } 3265 3266 /* 3267 * Put bootmem huge pages into the standard lists after mem_map is up. 3268 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. 3269 */ 3270 static void __init gather_bootmem_prealloc_node(unsigned long nid) 3271 { 3272 LIST_HEAD(folio_list); 3273 struct huge_bootmem_page *m; 3274 struct hstate *h = NULL, *prev_h = NULL; 3275 3276 list_for_each_entry(m, &huge_boot_pages[nid], list) { 3277 struct page *page = virt_to_page(m); 3278 struct folio *folio = (void *)page; 3279 3280 h = m->hstate; 3281 /* 3282 * It is possible to have multiple huge page sizes (hstates) 3283 * in this list. If so, process each size separately. 3284 */ 3285 if (h != prev_h && prev_h != NULL) 3286 prep_and_add_bootmem_folios(prev_h, &folio_list); 3287 prev_h = h; 3288 3289 VM_BUG_ON(!hstate_is_gigantic(h)); 3290 WARN_ON(folio_ref_count(folio) != 1); 3291 3292 hugetlb_folio_init_vmemmap(folio, h, 3293 HUGETLB_VMEMMAP_RESERVE_PAGES); 3294 init_new_hugetlb_folio(h, folio); 3295 list_add(&folio->lru, &folio_list); 3296 3297 /* 3298 * We need to restore the 'stolen' pages to totalram_pages 3299 * in order to fix confusing memory reports from free(1) and 3300 * other side-effects, like CommitLimit going negative. 3301 */ 3302 adjust_managed_page_count(page, pages_per_huge_page(h)); 3303 cond_resched(); 3304 } 3305 3306 prep_and_add_bootmem_folios(h, &folio_list); 3307 } 3308 3309 static void __init gather_bootmem_prealloc_parallel(unsigned long start, 3310 unsigned long end, void *arg) 3311 { 3312 int nid; 3313 3314 for (nid = start; nid < end; nid++) 3315 gather_bootmem_prealloc_node(nid); 3316 } 3317 3318 static void __init gather_bootmem_prealloc(void) 3319 { 3320 struct padata_mt_job job = { 3321 .thread_fn = gather_bootmem_prealloc_parallel, 3322 .fn_arg = NULL, 3323 .start = 0, 3324 .size = nr_node_ids, 3325 .align = 1, 3326 .min_chunk = 1, 3327 .max_threads = num_node_state(N_MEMORY), 3328 .numa_aware = true, 3329 }; 3330 3331 padata_do_multithreaded(&job); 3332 } 3333 3334 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3335 { 3336 unsigned long i; 3337 char buf[32]; 3338 LIST_HEAD(folio_list); 3339 3340 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3341 if (hstate_is_gigantic(h)) { 3342 if (!alloc_bootmem_huge_page(h, nid)) 3343 break; 3344 } else { 3345 struct folio *folio; 3346 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3347 3348 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3349 &node_states[N_MEMORY], NULL); 3350 if (!folio) 3351 break; 3352 list_add(&folio->lru, &folio_list); 3353 } 3354 cond_resched(); 3355 } 3356 3357 if (!list_empty(&folio_list)) 3358 prep_and_add_allocated_folios(h, &folio_list); 3359 3360 if (i == h->max_huge_pages_node[nid]) 3361 return; 3362 3363 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3364 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3365 h->max_huge_pages_node[nid], buf, nid, i); 3366 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3367 h->max_huge_pages_node[nid] = i; 3368 } 3369 3370 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) 3371 { 3372 int i; 3373 bool node_specific_alloc = false; 3374 3375 for_each_online_node(i) { 3376 if (h->max_huge_pages_node[i] > 0) { 3377 hugetlb_hstate_alloc_pages_onenode(h, i); 3378 node_specific_alloc = true; 3379 } 3380 } 3381 3382 return node_specific_alloc; 3383 } 3384 3385 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) 3386 { 3387 if (allocated < h->max_huge_pages) { 3388 char buf[32]; 3389 3390 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3391 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3392 h->max_huge_pages, buf, allocated); 3393 h->max_huge_pages = allocated; 3394 } 3395 } 3396 3397 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) 3398 { 3399 struct hstate *h = (struct hstate *)arg; 3400 int i, num = end - start; 3401 nodemask_t node_alloc_noretry; 3402 LIST_HEAD(folio_list); 3403 int next_node = first_online_node; 3404 3405 /* Bit mask controlling how hard we retry per-node allocations.*/ 3406 nodes_clear(node_alloc_noretry); 3407 3408 for (i = 0; i < num; ++i) { 3409 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], 3410 &node_alloc_noretry, &next_node); 3411 if (!folio) 3412 break; 3413 3414 list_move(&folio->lru, &folio_list); 3415 cond_resched(); 3416 } 3417 3418 prep_and_add_allocated_folios(h, &folio_list); 3419 } 3420 3421 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) 3422 { 3423 unsigned long i; 3424 3425 for (i = 0; i < h->max_huge_pages; ++i) { 3426 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3427 break; 3428 cond_resched(); 3429 } 3430 3431 return i; 3432 } 3433 3434 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) 3435 { 3436 struct padata_mt_job job = { 3437 .fn_arg = h, 3438 .align = 1, 3439 .numa_aware = true 3440 }; 3441 3442 job.thread_fn = hugetlb_pages_alloc_boot_node; 3443 job.start = 0; 3444 job.size = h->max_huge_pages; 3445 3446 /* 3447 * job.max_threads is twice the num_node_state(N_MEMORY), 3448 * 3449 * Tests below indicate that a multiplier of 2 significantly improves 3450 * performance, and although larger values also provide improvements, 3451 * the gains are marginal. 3452 * 3453 * Therefore, choosing 2 as the multiplier strikes a good balance between 3454 * enhancing parallel processing capabilities and maintaining efficient 3455 * resource management. 3456 * 3457 * +------------+-------+-------+-------+-------+-------+ 3458 * | multiplier | 1 | 2 | 3 | 4 | 5 | 3459 * +------------+-------+-------+-------+-------+-------+ 3460 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms | 3461 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms | 3462 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms | 3463 * +------------+-------+-------+-------+-------+-------+ 3464 */ 3465 job.max_threads = num_node_state(N_MEMORY) * 2; 3466 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; 3467 padata_do_multithreaded(&job); 3468 3469 return h->nr_huge_pages; 3470 } 3471 3472 /* 3473 * NOTE: this routine is called in different contexts for gigantic and 3474 * non-gigantic pages. 3475 * - For gigantic pages, this is called early in the boot process and 3476 * pages are allocated from memblock allocated or something similar. 3477 * Gigantic pages are actually added to pools later with the routine 3478 * gather_bootmem_prealloc. 3479 * - For non-gigantic pages, this is called later in the boot process after 3480 * all of mm is up and functional. Pages are allocated from buddy and 3481 * then added to hugetlb pools. 3482 */ 3483 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3484 { 3485 unsigned long allocated; 3486 static bool initialized __initdata; 3487 3488 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3489 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3490 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3491 return; 3492 } 3493 3494 /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */ 3495 if (!initialized) { 3496 int i = 0; 3497 3498 for (i = 0; i < MAX_NUMNODES; i++) 3499 INIT_LIST_HEAD(&huge_boot_pages[i]); 3500 initialized = true; 3501 } 3502 3503 /* do node specific alloc */ 3504 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) 3505 return; 3506 3507 /* below will do all node balanced alloc */ 3508 if (hstate_is_gigantic(h)) 3509 allocated = hugetlb_gigantic_pages_alloc_boot(h); 3510 else 3511 allocated = hugetlb_pages_alloc_boot(h); 3512 3513 hugetlb_hstate_alloc_pages_errcheck(allocated, h); 3514 } 3515 3516 static void __init hugetlb_init_hstates(void) 3517 { 3518 struct hstate *h, *h2; 3519 3520 for_each_hstate(h) { 3521 /* oversize hugepages were init'ed in early boot */ 3522 if (!hstate_is_gigantic(h)) 3523 hugetlb_hstate_alloc_pages(h); 3524 3525 /* 3526 * Set demote order for each hstate. Note that 3527 * h->demote_order is initially 0. 3528 * - We can not demote gigantic pages if runtime freeing 3529 * is not supported, so skip this. 3530 * - If CMA allocation is possible, we can not demote 3531 * HUGETLB_PAGE_ORDER or smaller size pages. 3532 */ 3533 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3534 continue; 3535 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3536 continue; 3537 for_each_hstate(h2) { 3538 if (h2 == h) 3539 continue; 3540 if (h2->order < h->order && 3541 h2->order > h->demote_order) 3542 h->demote_order = h2->order; 3543 } 3544 } 3545 } 3546 3547 static void __init report_hugepages(void) 3548 { 3549 struct hstate *h; 3550 3551 for_each_hstate(h) { 3552 char buf[32]; 3553 3554 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3555 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3556 buf, h->free_huge_pages); 3557 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3558 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3559 } 3560 } 3561 3562 #ifdef CONFIG_HIGHMEM 3563 static void try_to_free_low(struct hstate *h, unsigned long count, 3564 nodemask_t *nodes_allowed) 3565 { 3566 int i; 3567 LIST_HEAD(page_list); 3568 3569 lockdep_assert_held(&hugetlb_lock); 3570 if (hstate_is_gigantic(h)) 3571 return; 3572 3573 /* 3574 * Collect pages to be freed on a list, and free after dropping lock 3575 */ 3576 for_each_node_mask(i, *nodes_allowed) { 3577 struct folio *folio, *next; 3578 struct list_head *freel = &h->hugepage_freelists[i]; 3579 list_for_each_entry_safe(folio, next, freel, lru) { 3580 if (count >= h->nr_huge_pages) 3581 goto out; 3582 if (folio_test_highmem(folio)) 3583 continue; 3584 remove_hugetlb_folio(h, folio, false); 3585 list_add(&folio->lru, &page_list); 3586 } 3587 } 3588 3589 out: 3590 spin_unlock_irq(&hugetlb_lock); 3591 update_and_free_pages_bulk(h, &page_list); 3592 spin_lock_irq(&hugetlb_lock); 3593 } 3594 #else 3595 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3596 nodemask_t *nodes_allowed) 3597 { 3598 } 3599 #endif 3600 3601 /* 3602 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3603 * balanced by operating on them in a round-robin fashion. 3604 * Returns 1 if an adjustment was made. 3605 */ 3606 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3607 int delta) 3608 { 3609 int nr_nodes, node; 3610 3611 lockdep_assert_held(&hugetlb_lock); 3612 VM_BUG_ON(delta != -1 && delta != 1); 3613 3614 if (delta < 0) { 3615 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { 3616 if (h->surplus_huge_pages_node[node]) 3617 goto found; 3618 } 3619 } else { 3620 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3621 if (h->surplus_huge_pages_node[node] < 3622 h->nr_huge_pages_node[node]) 3623 goto found; 3624 } 3625 } 3626 return 0; 3627 3628 found: 3629 h->surplus_huge_pages += delta; 3630 h->surplus_huge_pages_node[node] += delta; 3631 return 1; 3632 } 3633 3634 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3635 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3636 nodemask_t *nodes_allowed) 3637 { 3638 unsigned long min_count; 3639 unsigned long allocated; 3640 struct folio *folio; 3641 LIST_HEAD(page_list); 3642 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3643 3644 /* 3645 * Bit mask controlling how hard we retry per-node allocations. 3646 * If we can not allocate the bit mask, do not attempt to allocate 3647 * the requested huge pages. 3648 */ 3649 if (node_alloc_noretry) 3650 nodes_clear(*node_alloc_noretry); 3651 else 3652 return -ENOMEM; 3653 3654 /* 3655 * resize_lock mutex prevents concurrent adjustments to number of 3656 * pages in hstate via the proc/sysfs interfaces. 3657 */ 3658 mutex_lock(&h->resize_lock); 3659 flush_free_hpage_work(h); 3660 spin_lock_irq(&hugetlb_lock); 3661 3662 /* 3663 * Check for a node specific request. 3664 * Changing node specific huge page count may require a corresponding 3665 * change to the global count. In any case, the passed node mask 3666 * (nodes_allowed) will restrict alloc/free to the specified node. 3667 */ 3668 if (nid != NUMA_NO_NODE) { 3669 unsigned long old_count = count; 3670 3671 count += persistent_huge_pages(h) - 3672 (h->nr_huge_pages_node[nid] - 3673 h->surplus_huge_pages_node[nid]); 3674 /* 3675 * User may have specified a large count value which caused the 3676 * above calculation to overflow. In this case, they wanted 3677 * to allocate as many huge pages as possible. Set count to 3678 * largest possible value to align with their intention. 3679 */ 3680 if (count < old_count) 3681 count = ULONG_MAX; 3682 } 3683 3684 /* 3685 * Gigantic pages runtime allocation depend on the capability for large 3686 * page range allocation. 3687 * If the system does not provide this feature, return an error when 3688 * the user tries to allocate gigantic pages but let the user free the 3689 * boottime allocated gigantic pages. 3690 */ 3691 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3692 if (count > persistent_huge_pages(h)) { 3693 spin_unlock_irq(&hugetlb_lock); 3694 mutex_unlock(&h->resize_lock); 3695 NODEMASK_FREE(node_alloc_noretry); 3696 return -EINVAL; 3697 } 3698 /* Fall through to decrease pool */ 3699 } 3700 3701 /* 3702 * Increase the pool size 3703 * First take pages out of surplus state. Then make up the 3704 * remaining difference by allocating fresh huge pages. 3705 * 3706 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3707 * to convert a surplus huge page to a normal huge page. That is 3708 * not critical, though, it just means the overall size of the 3709 * pool might be one hugepage larger than it needs to be, but 3710 * within all the constraints specified by the sysctls. 3711 */ 3712 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3713 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3714 break; 3715 } 3716 3717 allocated = 0; 3718 while (count > (persistent_huge_pages(h) + allocated)) { 3719 /* 3720 * If this allocation races such that we no longer need the 3721 * page, free_huge_folio will handle it by freeing the page 3722 * and reducing the surplus. 3723 */ 3724 spin_unlock_irq(&hugetlb_lock); 3725 3726 /* yield cpu to avoid soft lockup */ 3727 cond_resched(); 3728 3729 folio = alloc_pool_huge_folio(h, nodes_allowed, 3730 node_alloc_noretry, 3731 &h->next_nid_to_alloc); 3732 if (!folio) { 3733 prep_and_add_allocated_folios(h, &page_list); 3734 spin_lock_irq(&hugetlb_lock); 3735 goto out; 3736 } 3737 3738 list_add(&folio->lru, &page_list); 3739 allocated++; 3740 3741 /* Bail for signals. Probably ctrl-c from user */ 3742 if (signal_pending(current)) { 3743 prep_and_add_allocated_folios(h, &page_list); 3744 spin_lock_irq(&hugetlb_lock); 3745 goto out; 3746 } 3747 3748 spin_lock_irq(&hugetlb_lock); 3749 } 3750 3751 /* Add allocated pages to the pool */ 3752 if (!list_empty(&page_list)) { 3753 spin_unlock_irq(&hugetlb_lock); 3754 prep_and_add_allocated_folios(h, &page_list); 3755 spin_lock_irq(&hugetlb_lock); 3756 } 3757 3758 /* 3759 * Decrease the pool size 3760 * First return free pages to the buddy allocator (being careful 3761 * to keep enough around to satisfy reservations). Then place 3762 * pages into surplus state as needed so the pool will shrink 3763 * to the desired size as pages become free. 3764 * 3765 * By placing pages into the surplus state independent of the 3766 * overcommit value, we are allowing the surplus pool size to 3767 * exceed overcommit. There are few sane options here. Since 3768 * alloc_surplus_hugetlb_folio() is checking the global counter, 3769 * though, we'll note that we're not allowed to exceed surplus 3770 * and won't grow the pool anywhere else. Not until one of the 3771 * sysctls are changed, or the surplus pages go out of use. 3772 */ 3773 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3774 min_count = max(count, min_count); 3775 try_to_free_low(h, min_count, nodes_allowed); 3776 3777 /* 3778 * Collect pages to be removed on list without dropping lock 3779 */ 3780 while (min_count < persistent_huge_pages(h)) { 3781 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); 3782 if (!folio) 3783 break; 3784 3785 list_add(&folio->lru, &page_list); 3786 } 3787 /* free the pages after dropping lock */ 3788 spin_unlock_irq(&hugetlb_lock); 3789 update_and_free_pages_bulk(h, &page_list); 3790 flush_free_hpage_work(h); 3791 spin_lock_irq(&hugetlb_lock); 3792 3793 while (count < persistent_huge_pages(h)) { 3794 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3795 break; 3796 } 3797 out: 3798 h->max_huge_pages = persistent_huge_pages(h); 3799 spin_unlock_irq(&hugetlb_lock); 3800 mutex_unlock(&h->resize_lock); 3801 3802 NODEMASK_FREE(node_alloc_noretry); 3803 3804 return 0; 3805 } 3806 3807 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, 3808 struct list_head *src_list) 3809 { 3810 long rc; 3811 struct folio *folio, *next; 3812 LIST_HEAD(dst_list); 3813 LIST_HEAD(ret_list); 3814 3815 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); 3816 list_splice_init(&ret_list, src_list); 3817 3818 /* 3819 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3820 * Without the mutex, pages added to target hstate could be marked 3821 * as surplus. 3822 * 3823 * Note that we already hold src->resize_lock. To prevent deadlock, 3824 * use the convention of always taking larger size hstate mutex first. 3825 */ 3826 mutex_lock(&dst->resize_lock); 3827 3828 list_for_each_entry_safe(folio, next, src_list, lru) { 3829 int i; 3830 3831 if (folio_test_hugetlb_vmemmap_optimized(folio)) 3832 continue; 3833 3834 list_del(&folio->lru); 3835 3836 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); 3837 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); 3838 3839 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { 3840 struct page *page = folio_page(folio, i); 3841 /* Careful: see __split_huge_page_tail() */ 3842 struct folio *new_folio = (struct folio *)page; 3843 3844 clear_compound_head(page); 3845 prep_compound_page(page, dst->order); 3846 3847 new_folio->mapping = NULL; 3848 init_new_hugetlb_folio(dst, new_folio); 3849 list_add(&new_folio->lru, &dst_list); 3850 } 3851 } 3852 3853 prep_and_add_allocated_folios(dst, &dst_list); 3854 3855 mutex_unlock(&dst->resize_lock); 3856 3857 return rc; 3858 } 3859 3860 static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, 3861 unsigned long nr_to_demote) 3862 __must_hold(&hugetlb_lock) 3863 { 3864 int nr_nodes, node; 3865 struct hstate *dst; 3866 long rc = 0; 3867 long nr_demoted = 0; 3868 3869 lockdep_assert_held(&hugetlb_lock); 3870 3871 /* We should never get here if no demote order */ 3872 if (!src->demote_order) { 3873 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3874 return -EINVAL; /* internal error */ 3875 } 3876 dst = size_to_hstate(PAGE_SIZE << src->demote_order); 3877 3878 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { 3879 LIST_HEAD(list); 3880 struct folio *folio, *next; 3881 3882 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { 3883 if (folio_test_hwpoison(folio)) 3884 continue; 3885 3886 remove_hugetlb_folio(src, folio, false); 3887 list_add(&folio->lru, &list); 3888 3889 if (++nr_demoted == nr_to_demote) 3890 break; 3891 } 3892 3893 spin_unlock_irq(&hugetlb_lock); 3894 3895 rc = demote_free_hugetlb_folios(src, dst, &list); 3896 3897 spin_lock_irq(&hugetlb_lock); 3898 3899 list_for_each_entry_safe(folio, next, &list, lru) { 3900 list_del(&folio->lru); 3901 add_hugetlb_folio(src, folio, false); 3902 3903 nr_demoted--; 3904 } 3905 3906 if (rc < 0 || nr_demoted == nr_to_demote) 3907 break; 3908 } 3909 3910 /* 3911 * Not absolutely necessary, but for consistency update max_huge_pages 3912 * based on pool changes for the demoted page. 3913 */ 3914 src->max_huge_pages -= nr_demoted; 3915 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); 3916 3917 if (rc < 0) 3918 return rc; 3919 3920 if (nr_demoted) 3921 return nr_demoted; 3922 /* 3923 * Only way to get here is if all pages on free lists are poisoned. 3924 * Return -EBUSY so that caller will not retry. 3925 */ 3926 return -EBUSY; 3927 } 3928 3929 #define HSTATE_ATTR_RO(_name) \ 3930 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3931 3932 #define HSTATE_ATTR_WO(_name) \ 3933 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3934 3935 #define HSTATE_ATTR(_name) \ 3936 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3937 3938 static struct kobject *hugepages_kobj; 3939 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3940 3941 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3942 3943 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3944 { 3945 int i; 3946 3947 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3948 if (hstate_kobjs[i] == kobj) { 3949 if (nidp) 3950 *nidp = NUMA_NO_NODE; 3951 return &hstates[i]; 3952 } 3953 3954 return kobj_to_node_hstate(kobj, nidp); 3955 } 3956 3957 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3958 struct kobj_attribute *attr, char *buf) 3959 { 3960 struct hstate *h; 3961 unsigned long nr_huge_pages; 3962 int nid; 3963 3964 h = kobj_to_hstate(kobj, &nid); 3965 if (nid == NUMA_NO_NODE) 3966 nr_huge_pages = h->nr_huge_pages; 3967 else 3968 nr_huge_pages = h->nr_huge_pages_node[nid]; 3969 3970 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3971 } 3972 3973 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3974 struct hstate *h, int nid, 3975 unsigned long count, size_t len) 3976 { 3977 int err; 3978 nodemask_t nodes_allowed, *n_mask; 3979 3980 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3981 return -EINVAL; 3982 3983 if (nid == NUMA_NO_NODE) { 3984 /* 3985 * global hstate attribute 3986 */ 3987 if (!(obey_mempolicy && 3988 init_nodemask_of_mempolicy(&nodes_allowed))) 3989 n_mask = &node_states[N_MEMORY]; 3990 else 3991 n_mask = &nodes_allowed; 3992 } else { 3993 /* 3994 * Node specific request. count adjustment happens in 3995 * set_max_huge_pages() after acquiring hugetlb_lock. 3996 */ 3997 init_nodemask_of_node(&nodes_allowed, nid); 3998 n_mask = &nodes_allowed; 3999 } 4000 4001 err = set_max_huge_pages(h, count, nid, n_mask); 4002 4003 return err ? err : len; 4004 } 4005 4006 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 4007 struct kobject *kobj, const char *buf, 4008 size_t len) 4009 { 4010 struct hstate *h; 4011 unsigned long count; 4012 int nid; 4013 int err; 4014 4015 err = kstrtoul(buf, 10, &count); 4016 if (err) 4017 return err; 4018 4019 h = kobj_to_hstate(kobj, &nid); 4020 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 4021 } 4022 4023 static ssize_t nr_hugepages_show(struct kobject *kobj, 4024 struct kobj_attribute *attr, char *buf) 4025 { 4026 return nr_hugepages_show_common(kobj, attr, buf); 4027 } 4028 4029 static ssize_t nr_hugepages_store(struct kobject *kobj, 4030 struct kobj_attribute *attr, const char *buf, size_t len) 4031 { 4032 return nr_hugepages_store_common(false, kobj, buf, len); 4033 } 4034 HSTATE_ATTR(nr_hugepages); 4035 4036 #ifdef CONFIG_NUMA 4037 4038 /* 4039 * hstate attribute for optionally mempolicy-based constraint on persistent 4040 * huge page alloc/free. 4041 */ 4042 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 4043 struct kobj_attribute *attr, 4044 char *buf) 4045 { 4046 return nr_hugepages_show_common(kobj, attr, buf); 4047 } 4048 4049 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 4050 struct kobj_attribute *attr, const char *buf, size_t len) 4051 { 4052 return nr_hugepages_store_common(true, kobj, buf, len); 4053 } 4054 HSTATE_ATTR(nr_hugepages_mempolicy); 4055 #endif 4056 4057 4058 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 4059 struct kobj_attribute *attr, char *buf) 4060 { 4061 struct hstate *h = kobj_to_hstate(kobj, NULL); 4062 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 4063 } 4064 4065 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 4066 struct kobj_attribute *attr, const char *buf, size_t count) 4067 { 4068 int err; 4069 unsigned long input; 4070 struct hstate *h = kobj_to_hstate(kobj, NULL); 4071 4072 if (hstate_is_gigantic(h)) 4073 return -EINVAL; 4074 4075 err = kstrtoul(buf, 10, &input); 4076 if (err) 4077 return err; 4078 4079 spin_lock_irq(&hugetlb_lock); 4080 h->nr_overcommit_huge_pages = input; 4081 spin_unlock_irq(&hugetlb_lock); 4082 4083 return count; 4084 } 4085 HSTATE_ATTR(nr_overcommit_hugepages); 4086 4087 static ssize_t free_hugepages_show(struct kobject *kobj, 4088 struct kobj_attribute *attr, char *buf) 4089 { 4090 struct hstate *h; 4091 unsigned long free_huge_pages; 4092 int nid; 4093 4094 h = kobj_to_hstate(kobj, &nid); 4095 if (nid == NUMA_NO_NODE) 4096 free_huge_pages = h->free_huge_pages; 4097 else 4098 free_huge_pages = h->free_huge_pages_node[nid]; 4099 4100 return sysfs_emit(buf, "%lu\n", free_huge_pages); 4101 } 4102 HSTATE_ATTR_RO(free_hugepages); 4103 4104 static ssize_t resv_hugepages_show(struct kobject *kobj, 4105 struct kobj_attribute *attr, char *buf) 4106 { 4107 struct hstate *h = kobj_to_hstate(kobj, NULL); 4108 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 4109 } 4110 HSTATE_ATTR_RO(resv_hugepages); 4111 4112 static ssize_t surplus_hugepages_show(struct kobject *kobj, 4113 struct kobj_attribute *attr, char *buf) 4114 { 4115 struct hstate *h; 4116 unsigned long surplus_huge_pages; 4117 int nid; 4118 4119 h = kobj_to_hstate(kobj, &nid); 4120 if (nid == NUMA_NO_NODE) 4121 surplus_huge_pages = h->surplus_huge_pages; 4122 else 4123 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 4124 4125 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 4126 } 4127 HSTATE_ATTR_RO(surplus_hugepages); 4128 4129 static ssize_t demote_store(struct kobject *kobj, 4130 struct kobj_attribute *attr, const char *buf, size_t len) 4131 { 4132 unsigned long nr_demote; 4133 unsigned long nr_available; 4134 nodemask_t nodes_allowed, *n_mask; 4135 struct hstate *h; 4136 int err; 4137 int nid; 4138 4139 err = kstrtoul(buf, 10, &nr_demote); 4140 if (err) 4141 return err; 4142 h = kobj_to_hstate(kobj, &nid); 4143 4144 if (nid != NUMA_NO_NODE) { 4145 init_nodemask_of_node(&nodes_allowed, nid); 4146 n_mask = &nodes_allowed; 4147 } else { 4148 n_mask = &node_states[N_MEMORY]; 4149 } 4150 4151 /* Synchronize with other sysfs operations modifying huge pages */ 4152 mutex_lock(&h->resize_lock); 4153 spin_lock_irq(&hugetlb_lock); 4154 4155 while (nr_demote) { 4156 long rc; 4157 4158 /* 4159 * Check for available pages to demote each time thorough the 4160 * loop as demote_pool_huge_page will drop hugetlb_lock. 4161 */ 4162 if (nid != NUMA_NO_NODE) 4163 nr_available = h->free_huge_pages_node[nid]; 4164 else 4165 nr_available = h->free_huge_pages; 4166 nr_available -= h->resv_huge_pages; 4167 if (!nr_available) 4168 break; 4169 4170 rc = demote_pool_huge_page(h, n_mask, nr_demote); 4171 if (rc < 0) { 4172 err = rc; 4173 break; 4174 } 4175 4176 nr_demote -= rc; 4177 } 4178 4179 spin_unlock_irq(&hugetlb_lock); 4180 mutex_unlock(&h->resize_lock); 4181 4182 if (err) 4183 return err; 4184 return len; 4185 } 4186 HSTATE_ATTR_WO(demote); 4187 4188 static ssize_t demote_size_show(struct kobject *kobj, 4189 struct kobj_attribute *attr, char *buf) 4190 { 4191 struct hstate *h = kobj_to_hstate(kobj, NULL); 4192 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 4193 4194 return sysfs_emit(buf, "%lukB\n", demote_size); 4195 } 4196 4197 static ssize_t demote_size_store(struct kobject *kobj, 4198 struct kobj_attribute *attr, 4199 const char *buf, size_t count) 4200 { 4201 struct hstate *h, *demote_hstate; 4202 unsigned long demote_size; 4203 unsigned int demote_order; 4204 4205 demote_size = (unsigned long)memparse(buf, NULL); 4206 4207 demote_hstate = size_to_hstate(demote_size); 4208 if (!demote_hstate) 4209 return -EINVAL; 4210 demote_order = demote_hstate->order; 4211 if (demote_order < HUGETLB_PAGE_ORDER) 4212 return -EINVAL; 4213 4214 /* demote order must be smaller than hstate order */ 4215 h = kobj_to_hstate(kobj, NULL); 4216 if (demote_order >= h->order) 4217 return -EINVAL; 4218 4219 /* resize_lock synchronizes access to demote size and writes */ 4220 mutex_lock(&h->resize_lock); 4221 h->demote_order = demote_order; 4222 mutex_unlock(&h->resize_lock); 4223 4224 return count; 4225 } 4226 HSTATE_ATTR(demote_size); 4227 4228 static struct attribute *hstate_attrs[] = { 4229 &nr_hugepages_attr.attr, 4230 &nr_overcommit_hugepages_attr.attr, 4231 &free_hugepages_attr.attr, 4232 &resv_hugepages_attr.attr, 4233 &surplus_hugepages_attr.attr, 4234 #ifdef CONFIG_NUMA 4235 &nr_hugepages_mempolicy_attr.attr, 4236 #endif 4237 NULL, 4238 }; 4239 4240 static const struct attribute_group hstate_attr_group = { 4241 .attrs = hstate_attrs, 4242 }; 4243 4244 static struct attribute *hstate_demote_attrs[] = { 4245 &demote_size_attr.attr, 4246 &demote_attr.attr, 4247 NULL, 4248 }; 4249 4250 static const struct attribute_group hstate_demote_attr_group = { 4251 .attrs = hstate_demote_attrs, 4252 }; 4253 4254 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4255 struct kobject **hstate_kobjs, 4256 const struct attribute_group *hstate_attr_group) 4257 { 4258 int retval; 4259 int hi = hstate_index(h); 4260 4261 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4262 if (!hstate_kobjs[hi]) 4263 return -ENOMEM; 4264 4265 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4266 if (retval) { 4267 kobject_put(hstate_kobjs[hi]); 4268 hstate_kobjs[hi] = NULL; 4269 return retval; 4270 } 4271 4272 if (h->demote_order) { 4273 retval = sysfs_create_group(hstate_kobjs[hi], 4274 &hstate_demote_attr_group); 4275 if (retval) { 4276 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4277 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4278 kobject_put(hstate_kobjs[hi]); 4279 hstate_kobjs[hi] = NULL; 4280 return retval; 4281 } 4282 } 4283 4284 return 0; 4285 } 4286 4287 #ifdef CONFIG_NUMA 4288 static bool hugetlb_sysfs_initialized __ro_after_init; 4289 4290 /* 4291 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4292 * with node devices in node_devices[] using a parallel array. The array 4293 * index of a node device or _hstate == node id. 4294 * This is here to avoid any static dependency of the node device driver, in 4295 * the base kernel, on the hugetlb module. 4296 */ 4297 struct node_hstate { 4298 struct kobject *hugepages_kobj; 4299 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4300 }; 4301 static struct node_hstate node_hstates[MAX_NUMNODES]; 4302 4303 /* 4304 * A subset of global hstate attributes for node devices 4305 */ 4306 static struct attribute *per_node_hstate_attrs[] = { 4307 &nr_hugepages_attr.attr, 4308 &free_hugepages_attr.attr, 4309 &surplus_hugepages_attr.attr, 4310 NULL, 4311 }; 4312 4313 static const struct attribute_group per_node_hstate_attr_group = { 4314 .attrs = per_node_hstate_attrs, 4315 }; 4316 4317 /* 4318 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4319 * Returns node id via non-NULL nidp. 4320 */ 4321 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4322 { 4323 int nid; 4324 4325 for (nid = 0; nid < nr_node_ids; nid++) { 4326 struct node_hstate *nhs = &node_hstates[nid]; 4327 int i; 4328 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4329 if (nhs->hstate_kobjs[i] == kobj) { 4330 if (nidp) 4331 *nidp = nid; 4332 return &hstates[i]; 4333 } 4334 } 4335 4336 BUG(); 4337 return NULL; 4338 } 4339 4340 /* 4341 * Unregister hstate attributes from a single node device. 4342 * No-op if no hstate attributes attached. 4343 */ 4344 void hugetlb_unregister_node(struct node *node) 4345 { 4346 struct hstate *h; 4347 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4348 4349 if (!nhs->hugepages_kobj) 4350 return; /* no hstate attributes */ 4351 4352 for_each_hstate(h) { 4353 int idx = hstate_index(h); 4354 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4355 4356 if (!hstate_kobj) 4357 continue; 4358 if (h->demote_order) 4359 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4360 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4361 kobject_put(hstate_kobj); 4362 nhs->hstate_kobjs[idx] = NULL; 4363 } 4364 4365 kobject_put(nhs->hugepages_kobj); 4366 nhs->hugepages_kobj = NULL; 4367 } 4368 4369 4370 /* 4371 * Register hstate attributes for a single node device. 4372 * No-op if attributes already registered. 4373 */ 4374 void hugetlb_register_node(struct node *node) 4375 { 4376 struct hstate *h; 4377 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4378 int err; 4379 4380 if (!hugetlb_sysfs_initialized) 4381 return; 4382 4383 if (nhs->hugepages_kobj) 4384 return; /* already allocated */ 4385 4386 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4387 &node->dev.kobj); 4388 if (!nhs->hugepages_kobj) 4389 return; 4390 4391 for_each_hstate(h) { 4392 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4393 nhs->hstate_kobjs, 4394 &per_node_hstate_attr_group); 4395 if (err) { 4396 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4397 h->name, node->dev.id); 4398 hugetlb_unregister_node(node); 4399 break; 4400 } 4401 } 4402 } 4403 4404 /* 4405 * hugetlb init time: register hstate attributes for all registered node 4406 * devices of nodes that have memory. All on-line nodes should have 4407 * registered their associated device by this time. 4408 */ 4409 static void __init hugetlb_register_all_nodes(void) 4410 { 4411 int nid; 4412 4413 for_each_online_node(nid) 4414 hugetlb_register_node(node_devices[nid]); 4415 } 4416 #else /* !CONFIG_NUMA */ 4417 4418 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4419 { 4420 BUG(); 4421 if (nidp) 4422 *nidp = -1; 4423 return NULL; 4424 } 4425 4426 static void hugetlb_register_all_nodes(void) { } 4427 4428 #endif 4429 4430 #ifdef CONFIG_CMA 4431 static void __init hugetlb_cma_check(void); 4432 #else 4433 static inline __init void hugetlb_cma_check(void) 4434 { 4435 } 4436 #endif 4437 4438 static void __init hugetlb_sysfs_init(void) 4439 { 4440 struct hstate *h; 4441 int err; 4442 4443 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4444 if (!hugepages_kobj) 4445 return; 4446 4447 for_each_hstate(h) { 4448 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4449 hstate_kobjs, &hstate_attr_group); 4450 if (err) 4451 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4452 } 4453 4454 #ifdef CONFIG_NUMA 4455 hugetlb_sysfs_initialized = true; 4456 #endif 4457 hugetlb_register_all_nodes(); 4458 } 4459 4460 #ifdef CONFIG_SYSCTL 4461 static void hugetlb_sysctl_init(void); 4462 #else 4463 static inline void hugetlb_sysctl_init(void) { } 4464 #endif 4465 4466 static int __init hugetlb_init(void) 4467 { 4468 int i; 4469 4470 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4471 __NR_HPAGEFLAGS); 4472 4473 if (!hugepages_supported()) { 4474 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4475 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4476 return 0; 4477 } 4478 4479 /* 4480 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4481 * architectures depend on setup being done here. 4482 */ 4483 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4484 if (!parsed_default_hugepagesz) { 4485 /* 4486 * If we did not parse a default huge page size, set 4487 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4488 * number of huge pages for this default size was implicitly 4489 * specified, set that here as well. 4490 * Note that the implicit setting will overwrite an explicit 4491 * setting. A warning will be printed in this case. 4492 */ 4493 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4494 if (default_hstate_max_huge_pages) { 4495 if (default_hstate.max_huge_pages) { 4496 char buf[32]; 4497 4498 string_get_size(huge_page_size(&default_hstate), 4499 1, STRING_UNITS_2, buf, 32); 4500 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4501 default_hstate.max_huge_pages, buf); 4502 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4503 default_hstate_max_huge_pages); 4504 } 4505 default_hstate.max_huge_pages = 4506 default_hstate_max_huge_pages; 4507 4508 for_each_online_node(i) 4509 default_hstate.max_huge_pages_node[i] = 4510 default_hugepages_in_node[i]; 4511 } 4512 } 4513 4514 hugetlb_cma_check(); 4515 hugetlb_init_hstates(); 4516 gather_bootmem_prealloc(); 4517 report_hugepages(); 4518 4519 hugetlb_sysfs_init(); 4520 hugetlb_cgroup_file_init(); 4521 hugetlb_sysctl_init(); 4522 4523 #ifdef CONFIG_SMP 4524 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4525 #else 4526 num_fault_mutexes = 1; 4527 #endif 4528 hugetlb_fault_mutex_table = 4529 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4530 GFP_KERNEL); 4531 BUG_ON(!hugetlb_fault_mutex_table); 4532 4533 for (i = 0; i < num_fault_mutexes; i++) 4534 mutex_init(&hugetlb_fault_mutex_table[i]); 4535 return 0; 4536 } 4537 subsys_initcall(hugetlb_init); 4538 4539 /* Overwritten by architectures with more huge page sizes */ 4540 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4541 { 4542 return size == HPAGE_SIZE; 4543 } 4544 4545 void __init hugetlb_add_hstate(unsigned int order) 4546 { 4547 struct hstate *h; 4548 unsigned long i; 4549 4550 if (size_to_hstate(PAGE_SIZE << order)) { 4551 return; 4552 } 4553 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4554 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); 4555 h = &hstates[hugetlb_max_hstate++]; 4556 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); 4557 h->order = order; 4558 h->mask = ~(huge_page_size(h) - 1); 4559 for (i = 0; i < MAX_NUMNODES; ++i) 4560 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4561 INIT_LIST_HEAD(&h->hugepage_activelist); 4562 h->next_nid_to_alloc = first_memory_node; 4563 h->next_nid_to_free = first_memory_node; 4564 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4565 huge_page_size(h)/SZ_1K); 4566 4567 parsed_hstate = h; 4568 } 4569 4570 bool __init __weak hugetlb_node_alloc_supported(void) 4571 { 4572 return true; 4573 } 4574 4575 static void __init hugepages_clear_pages_in_node(void) 4576 { 4577 if (!hugetlb_max_hstate) { 4578 default_hstate_max_huge_pages = 0; 4579 memset(default_hugepages_in_node, 0, 4580 sizeof(default_hugepages_in_node)); 4581 } else { 4582 parsed_hstate->max_huge_pages = 0; 4583 memset(parsed_hstate->max_huge_pages_node, 0, 4584 sizeof(parsed_hstate->max_huge_pages_node)); 4585 } 4586 } 4587 4588 /* 4589 * hugepages command line processing 4590 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4591 * specification. If not, ignore the hugepages value. hugepages can also 4592 * be the first huge page command line option in which case it implicitly 4593 * specifies the number of huge pages for the default size. 4594 */ 4595 static int __init hugepages_setup(char *s) 4596 { 4597 unsigned long *mhp; 4598 static unsigned long *last_mhp; 4599 int node = NUMA_NO_NODE; 4600 int count; 4601 unsigned long tmp; 4602 char *p = s; 4603 4604 if (!parsed_valid_hugepagesz) { 4605 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4606 parsed_valid_hugepagesz = true; 4607 return 1; 4608 } 4609 4610 /* 4611 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4612 * yet, so this hugepages= parameter goes to the "default hstate". 4613 * Otherwise, it goes with the previously parsed hugepagesz or 4614 * default_hugepagesz. 4615 */ 4616 else if (!hugetlb_max_hstate) 4617 mhp = &default_hstate_max_huge_pages; 4618 else 4619 mhp = &parsed_hstate->max_huge_pages; 4620 4621 if (mhp == last_mhp) { 4622 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4623 return 1; 4624 } 4625 4626 while (*p) { 4627 count = 0; 4628 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4629 goto invalid; 4630 /* Parameter is node format */ 4631 if (p[count] == ':') { 4632 if (!hugetlb_node_alloc_supported()) { 4633 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4634 return 1; 4635 } 4636 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4637 goto invalid; 4638 node = array_index_nospec(tmp, MAX_NUMNODES); 4639 p += count + 1; 4640 /* Parse hugepages */ 4641 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4642 goto invalid; 4643 if (!hugetlb_max_hstate) 4644 default_hugepages_in_node[node] = tmp; 4645 else 4646 parsed_hstate->max_huge_pages_node[node] = tmp; 4647 *mhp += tmp; 4648 /* Go to parse next node*/ 4649 if (p[count] == ',') 4650 p += count + 1; 4651 else 4652 break; 4653 } else { 4654 if (p != s) 4655 goto invalid; 4656 *mhp = tmp; 4657 break; 4658 } 4659 } 4660 4661 /* 4662 * Global state is always initialized later in hugetlb_init. 4663 * But we need to allocate gigantic hstates here early to still 4664 * use the bootmem allocator. 4665 */ 4666 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4667 hugetlb_hstate_alloc_pages(parsed_hstate); 4668 4669 last_mhp = mhp; 4670 4671 return 1; 4672 4673 invalid: 4674 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4675 hugepages_clear_pages_in_node(); 4676 return 1; 4677 } 4678 __setup("hugepages=", hugepages_setup); 4679 4680 /* 4681 * hugepagesz command line processing 4682 * A specific huge page size can only be specified once with hugepagesz. 4683 * hugepagesz is followed by hugepages on the command line. The global 4684 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4685 * hugepagesz argument was valid. 4686 */ 4687 static int __init hugepagesz_setup(char *s) 4688 { 4689 unsigned long size; 4690 struct hstate *h; 4691 4692 parsed_valid_hugepagesz = false; 4693 size = (unsigned long)memparse(s, NULL); 4694 4695 if (!arch_hugetlb_valid_size(size)) { 4696 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4697 return 1; 4698 } 4699 4700 h = size_to_hstate(size); 4701 if (h) { 4702 /* 4703 * hstate for this size already exists. This is normally 4704 * an error, but is allowed if the existing hstate is the 4705 * default hstate. More specifically, it is only allowed if 4706 * the number of huge pages for the default hstate was not 4707 * previously specified. 4708 */ 4709 if (!parsed_default_hugepagesz || h != &default_hstate || 4710 default_hstate.max_huge_pages) { 4711 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4712 return 1; 4713 } 4714 4715 /* 4716 * No need to call hugetlb_add_hstate() as hstate already 4717 * exists. But, do set parsed_hstate so that a following 4718 * hugepages= parameter will be applied to this hstate. 4719 */ 4720 parsed_hstate = h; 4721 parsed_valid_hugepagesz = true; 4722 return 1; 4723 } 4724 4725 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4726 parsed_valid_hugepagesz = true; 4727 return 1; 4728 } 4729 __setup("hugepagesz=", hugepagesz_setup); 4730 4731 /* 4732 * default_hugepagesz command line input 4733 * Only one instance of default_hugepagesz allowed on command line. 4734 */ 4735 static int __init default_hugepagesz_setup(char *s) 4736 { 4737 unsigned long size; 4738 int i; 4739 4740 parsed_valid_hugepagesz = false; 4741 if (parsed_default_hugepagesz) { 4742 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4743 return 1; 4744 } 4745 4746 size = (unsigned long)memparse(s, NULL); 4747 4748 if (!arch_hugetlb_valid_size(size)) { 4749 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4750 return 1; 4751 } 4752 4753 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4754 parsed_valid_hugepagesz = true; 4755 parsed_default_hugepagesz = true; 4756 default_hstate_idx = hstate_index(size_to_hstate(size)); 4757 4758 /* 4759 * The number of default huge pages (for this size) could have been 4760 * specified as the first hugetlb parameter: hugepages=X. If so, 4761 * then default_hstate_max_huge_pages is set. If the default huge 4762 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be 4763 * allocated here from bootmem allocator. 4764 */ 4765 if (default_hstate_max_huge_pages) { 4766 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4767 for_each_online_node(i) 4768 default_hstate.max_huge_pages_node[i] = 4769 default_hugepages_in_node[i]; 4770 if (hstate_is_gigantic(&default_hstate)) 4771 hugetlb_hstate_alloc_pages(&default_hstate); 4772 default_hstate_max_huge_pages = 0; 4773 } 4774 4775 return 1; 4776 } 4777 __setup("default_hugepagesz=", default_hugepagesz_setup); 4778 4779 static unsigned int allowed_mems_nr(struct hstate *h) 4780 { 4781 int node; 4782 unsigned int nr = 0; 4783 nodemask_t *mbind_nodemask; 4784 unsigned int *array = h->free_huge_pages_node; 4785 gfp_t gfp_mask = htlb_alloc_mask(h); 4786 4787 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4788 for_each_node_mask(node, cpuset_current_mems_allowed) { 4789 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4790 nr += array[node]; 4791 } 4792 4793 return nr; 4794 } 4795 4796 #ifdef CONFIG_SYSCTL 4797 static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write, 4798 void *buffer, size_t *length, 4799 loff_t *ppos, unsigned long *out) 4800 { 4801 struct ctl_table dup_table; 4802 4803 /* 4804 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4805 * can duplicate the @table and alter the duplicate of it. 4806 */ 4807 dup_table = *table; 4808 dup_table.data = out; 4809 4810 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4811 } 4812 4813 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4814 const struct ctl_table *table, int write, 4815 void *buffer, size_t *length, loff_t *ppos) 4816 { 4817 struct hstate *h = &default_hstate; 4818 unsigned long tmp = h->max_huge_pages; 4819 int ret; 4820 4821 if (!hugepages_supported()) 4822 return -EOPNOTSUPP; 4823 4824 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4825 &tmp); 4826 if (ret) 4827 goto out; 4828 4829 if (write) 4830 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4831 NUMA_NO_NODE, tmp, *length); 4832 out: 4833 return ret; 4834 } 4835 4836 static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 4837 void *buffer, size_t *length, loff_t *ppos) 4838 { 4839 4840 return hugetlb_sysctl_handler_common(false, table, write, 4841 buffer, length, ppos); 4842 } 4843 4844 #ifdef CONFIG_NUMA 4845 static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 4846 void *buffer, size_t *length, loff_t *ppos) 4847 { 4848 return hugetlb_sysctl_handler_common(true, table, write, 4849 buffer, length, ppos); 4850 } 4851 #endif /* CONFIG_NUMA */ 4852 4853 static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 4854 void *buffer, size_t *length, loff_t *ppos) 4855 { 4856 struct hstate *h = &default_hstate; 4857 unsigned long tmp; 4858 int ret; 4859 4860 if (!hugepages_supported()) 4861 return -EOPNOTSUPP; 4862 4863 tmp = h->nr_overcommit_huge_pages; 4864 4865 if (write && hstate_is_gigantic(h)) 4866 return -EINVAL; 4867 4868 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4869 &tmp); 4870 if (ret) 4871 goto out; 4872 4873 if (write) { 4874 spin_lock_irq(&hugetlb_lock); 4875 h->nr_overcommit_huge_pages = tmp; 4876 spin_unlock_irq(&hugetlb_lock); 4877 } 4878 out: 4879 return ret; 4880 } 4881 4882 static const struct ctl_table hugetlb_table[] = { 4883 { 4884 .procname = "nr_hugepages", 4885 .data = NULL, 4886 .maxlen = sizeof(unsigned long), 4887 .mode = 0644, 4888 .proc_handler = hugetlb_sysctl_handler, 4889 }, 4890 #ifdef CONFIG_NUMA 4891 { 4892 .procname = "nr_hugepages_mempolicy", 4893 .data = NULL, 4894 .maxlen = sizeof(unsigned long), 4895 .mode = 0644, 4896 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 4897 }, 4898 #endif 4899 { 4900 .procname = "hugetlb_shm_group", 4901 .data = &sysctl_hugetlb_shm_group, 4902 .maxlen = sizeof(gid_t), 4903 .mode = 0644, 4904 .proc_handler = proc_dointvec, 4905 }, 4906 { 4907 .procname = "nr_overcommit_hugepages", 4908 .data = NULL, 4909 .maxlen = sizeof(unsigned long), 4910 .mode = 0644, 4911 .proc_handler = hugetlb_overcommit_handler, 4912 }, 4913 }; 4914 4915 static void hugetlb_sysctl_init(void) 4916 { 4917 register_sysctl_init("vm", hugetlb_table); 4918 } 4919 #endif /* CONFIG_SYSCTL */ 4920 4921 void hugetlb_report_meminfo(struct seq_file *m) 4922 { 4923 struct hstate *h; 4924 unsigned long total = 0; 4925 4926 if (!hugepages_supported()) 4927 return; 4928 4929 for_each_hstate(h) { 4930 unsigned long count = h->nr_huge_pages; 4931 4932 total += huge_page_size(h) * count; 4933 4934 if (h == &default_hstate) 4935 seq_printf(m, 4936 "HugePages_Total: %5lu\n" 4937 "HugePages_Free: %5lu\n" 4938 "HugePages_Rsvd: %5lu\n" 4939 "HugePages_Surp: %5lu\n" 4940 "Hugepagesize: %8lu kB\n", 4941 count, 4942 h->free_huge_pages, 4943 h->resv_huge_pages, 4944 h->surplus_huge_pages, 4945 huge_page_size(h) / SZ_1K); 4946 } 4947 4948 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4949 } 4950 4951 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4952 { 4953 struct hstate *h = &default_hstate; 4954 4955 if (!hugepages_supported()) 4956 return 0; 4957 4958 return sysfs_emit_at(buf, len, 4959 "Node %d HugePages_Total: %5u\n" 4960 "Node %d HugePages_Free: %5u\n" 4961 "Node %d HugePages_Surp: %5u\n", 4962 nid, h->nr_huge_pages_node[nid], 4963 nid, h->free_huge_pages_node[nid], 4964 nid, h->surplus_huge_pages_node[nid]); 4965 } 4966 4967 void hugetlb_show_meminfo_node(int nid) 4968 { 4969 struct hstate *h; 4970 4971 if (!hugepages_supported()) 4972 return; 4973 4974 for_each_hstate(h) 4975 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4976 nid, 4977 h->nr_huge_pages_node[nid], 4978 h->free_huge_pages_node[nid], 4979 h->surplus_huge_pages_node[nid], 4980 huge_page_size(h) / SZ_1K); 4981 } 4982 4983 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4984 { 4985 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4986 K(atomic_long_read(&mm->hugetlb_usage))); 4987 } 4988 4989 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4990 unsigned long hugetlb_total_pages(void) 4991 { 4992 struct hstate *h; 4993 unsigned long nr_total_pages = 0; 4994 4995 for_each_hstate(h) 4996 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4997 return nr_total_pages; 4998 } 4999 5000 static int hugetlb_acct_memory(struct hstate *h, long delta) 5001 { 5002 int ret = -ENOMEM; 5003 5004 if (!delta) 5005 return 0; 5006 5007 spin_lock_irq(&hugetlb_lock); 5008 /* 5009 * When cpuset is configured, it breaks the strict hugetlb page 5010 * reservation as the accounting is done on a global variable. Such 5011 * reservation is completely rubbish in the presence of cpuset because 5012 * the reservation is not checked against page availability for the 5013 * current cpuset. Application can still potentially OOM'ed by kernel 5014 * with lack of free htlb page in cpuset that the task is in. 5015 * Attempt to enforce strict accounting with cpuset is almost 5016 * impossible (or too ugly) because cpuset is too fluid that 5017 * task or memory node can be dynamically moved between cpusets. 5018 * 5019 * The change of semantics for shared hugetlb mapping with cpuset is 5020 * undesirable. However, in order to preserve some of the semantics, 5021 * we fall back to check against current free page availability as 5022 * a best attempt and hopefully to minimize the impact of changing 5023 * semantics that cpuset has. 5024 * 5025 * Apart from cpuset, we also have memory policy mechanism that 5026 * also determines from which node the kernel will allocate memory 5027 * in a NUMA system. So similar to cpuset, we also should consider 5028 * the memory policy of the current task. Similar to the description 5029 * above. 5030 */ 5031 if (delta > 0) { 5032 if (gather_surplus_pages(h, delta) < 0) 5033 goto out; 5034 5035 if (delta > allowed_mems_nr(h)) { 5036 return_unused_surplus_pages(h, delta); 5037 goto out; 5038 } 5039 } 5040 5041 ret = 0; 5042 if (delta < 0) 5043 return_unused_surplus_pages(h, (unsigned long) -delta); 5044 5045 out: 5046 spin_unlock_irq(&hugetlb_lock); 5047 return ret; 5048 } 5049 5050 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 5051 { 5052 struct resv_map *resv = vma_resv_map(vma); 5053 5054 /* 5055 * HPAGE_RESV_OWNER indicates a private mapping. 5056 * This new VMA should share its siblings reservation map if present. 5057 * The VMA will only ever have a valid reservation map pointer where 5058 * it is being copied for another still existing VMA. As that VMA 5059 * has a reference to the reservation map it cannot disappear until 5060 * after this open call completes. It is therefore safe to take a 5061 * new reference here without additional locking. 5062 */ 5063 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 5064 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 5065 kref_get(&resv->refs); 5066 } 5067 5068 /* 5069 * vma_lock structure for sharable mappings is vma specific. 5070 * Clear old pointer (if copied via vm_area_dup) and allocate 5071 * new structure. Before clearing, make sure vma_lock is not 5072 * for this vma. 5073 */ 5074 if (vma->vm_flags & VM_MAYSHARE) { 5075 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 5076 5077 if (vma_lock) { 5078 if (vma_lock->vma != vma) { 5079 vma->vm_private_data = NULL; 5080 hugetlb_vma_lock_alloc(vma); 5081 } else 5082 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 5083 } else 5084 hugetlb_vma_lock_alloc(vma); 5085 } 5086 } 5087 5088 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 5089 { 5090 struct hstate *h = hstate_vma(vma); 5091 struct resv_map *resv; 5092 struct hugepage_subpool *spool = subpool_vma(vma); 5093 unsigned long reserve, start, end; 5094 long gbl_reserve; 5095 5096 hugetlb_vma_lock_free(vma); 5097 5098 resv = vma_resv_map(vma); 5099 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5100 return; 5101 5102 start = vma_hugecache_offset(h, vma, vma->vm_start); 5103 end = vma_hugecache_offset(h, vma, vma->vm_end); 5104 5105 reserve = (end - start) - region_count(resv, start, end); 5106 hugetlb_cgroup_uncharge_counter(resv, start, end); 5107 if (reserve) { 5108 /* 5109 * Decrement reserve counts. The global reserve count may be 5110 * adjusted if the subpool has a minimum size. 5111 */ 5112 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 5113 hugetlb_acct_memory(h, -gbl_reserve); 5114 } 5115 5116 kref_put(&resv->refs, resv_map_release); 5117 } 5118 5119 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 5120 { 5121 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 5122 return -EINVAL; 5123 5124 /* 5125 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 5126 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 5127 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 5128 */ 5129 if (addr & ~PUD_MASK) { 5130 /* 5131 * hugetlb_vm_op_split is called right before we attempt to 5132 * split the VMA. We will need to unshare PMDs in the old and 5133 * new VMAs, so let's unshare before we split. 5134 */ 5135 unsigned long floor = addr & PUD_MASK; 5136 unsigned long ceil = floor + PUD_SIZE; 5137 5138 if (floor >= vma->vm_start && ceil <= vma->vm_end) 5139 hugetlb_unshare_pmds(vma, floor, ceil); 5140 } 5141 5142 return 0; 5143 } 5144 5145 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 5146 { 5147 return huge_page_size(hstate_vma(vma)); 5148 } 5149 5150 /* 5151 * We cannot handle pagefaults against hugetlb pages at all. They cause 5152 * handle_mm_fault() to try to instantiate regular-sized pages in the 5153 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5154 * this far. 5155 */ 5156 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 5157 { 5158 BUG(); 5159 return 0; 5160 } 5161 5162 /* 5163 * When a new function is introduced to vm_operations_struct and added 5164 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 5165 * This is because under System V memory model, mappings created via 5166 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 5167 * their original vm_ops are overwritten with shm_vm_ops. 5168 */ 5169 const struct vm_operations_struct hugetlb_vm_ops = { 5170 .fault = hugetlb_vm_op_fault, 5171 .open = hugetlb_vm_op_open, 5172 .close = hugetlb_vm_op_close, 5173 .may_split = hugetlb_vm_op_split, 5174 .pagesize = hugetlb_vm_op_pagesize, 5175 }; 5176 5177 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 5178 bool try_mkwrite) 5179 { 5180 pte_t entry; 5181 unsigned int shift = huge_page_shift(hstate_vma(vma)); 5182 5183 if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { 5184 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 5185 vma->vm_page_prot))); 5186 } else { 5187 entry = huge_pte_wrprotect(mk_huge_pte(page, 5188 vma->vm_page_prot)); 5189 } 5190 entry = pte_mkyoung(entry); 5191 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 5192 5193 return entry; 5194 } 5195 5196 static void set_huge_ptep_writable(struct vm_area_struct *vma, 5197 unsigned long address, pte_t *ptep) 5198 { 5199 pte_t entry; 5200 5201 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); 5202 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 5203 update_mmu_cache(vma, address, ptep); 5204 } 5205 5206 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, 5207 unsigned long address, pte_t *ptep) 5208 { 5209 if (vma->vm_flags & VM_WRITE) 5210 set_huge_ptep_writable(vma, address, ptep); 5211 } 5212 5213 bool is_hugetlb_entry_migration(pte_t pte) 5214 { 5215 swp_entry_t swp; 5216 5217 if (huge_pte_none(pte) || pte_present(pte)) 5218 return false; 5219 swp = pte_to_swp_entry(pte); 5220 if (is_migration_entry(swp)) 5221 return true; 5222 else 5223 return false; 5224 } 5225 5226 bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5227 { 5228 swp_entry_t swp; 5229 5230 if (huge_pte_none(pte) || pte_present(pte)) 5231 return false; 5232 swp = pte_to_swp_entry(pte); 5233 if (is_hwpoison_entry(swp)) 5234 return true; 5235 else 5236 return false; 5237 } 5238 5239 static void 5240 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5241 struct folio *new_folio, pte_t old, unsigned long sz) 5242 { 5243 pte_t newpte = make_huge_pte(vma, &new_folio->page, true); 5244 5245 __folio_mark_uptodate(new_folio); 5246 hugetlb_add_new_anon_rmap(new_folio, vma, addr); 5247 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5248 newpte = huge_pte_mkuffd_wp(newpte); 5249 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 5250 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5251 folio_set_hugetlb_migratable(new_folio); 5252 } 5253 5254 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5255 struct vm_area_struct *dst_vma, 5256 struct vm_area_struct *src_vma) 5257 { 5258 pte_t *src_pte, *dst_pte, entry; 5259 struct folio *pte_folio; 5260 unsigned long addr; 5261 bool cow = is_cow_mapping(src_vma->vm_flags); 5262 struct hstate *h = hstate_vma(src_vma); 5263 unsigned long sz = huge_page_size(h); 5264 unsigned long npages = pages_per_huge_page(h); 5265 struct mmu_notifier_range range; 5266 unsigned long last_addr_mask; 5267 int ret = 0; 5268 5269 if (cow) { 5270 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5271 src_vma->vm_start, 5272 src_vma->vm_end); 5273 mmu_notifier_invalidate_range_start(&range); 5274 vma_assert_write_locked(src_vma); 5275 raw_write_seqcount_begin(&src->write_protect_seq); 5276 } else { 5277 /* 5278 * For shared mappings the vma lock must be held before 5279 * calling hugetlb_walk() in the src vma. Otherwise, the 5280 * returned ptep could go away if part of a shared pmd and 5281 * another thread calls huge_pmd_unshare. 5282 */ 5283 hugetlb_vma_lock_read(src_vma); 5284 } 5285 5286 last_addr_mask = hugetlb_mask_last_page(h); 5287 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5288 spinlock_t *src_ptl, *dst_ptl; 5289 src_pte = hugetlb_walk(src_vma, addr, sz); 5290 if (!src_pte) { 5291 addr |= last_addr_mask; 5292 continue; 5293 } 5294 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5295 if (!dst_pte) { 5296 ret = -ENOMEM; 5297 break; 5298 } 5299 5300 /* 5301 * If the pagetables are shared don't copy or take references. 5302 * 5303 * dst_pte == src_pte is the common case of src/dest sharing. 5304 * However, src could have 'unshared' and dst shares with 5305 * another vma. So page_count of ptep page is checked instead 5306 * to reliably determine whether pte is shared. 5307 */ 5308 if (page_count(virt_to_page(dst_pte)) > 1) { 5309 addr |= last_addr_mask; 5310 continue; 5311 } 5312 5313 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5314 src_ptl = huge_pte_lockptr(h, src, src_pte); 5315 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5316 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5317 again: 5318 if (huge_pte_none(entry)) { 5319 /* 5320 * Skip if src entry none. 5321 */ 5322 ; 5323 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5324 if (!userfaultfd_wp(dst_vma)) 5325 entry = huge_pte_clear_uffd_wp(entry); 5326 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5327 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5328 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5329 bool uffd_wp = pte_swp_uffd_wp(entry); 5330 5331 if (!is_readable_migration_entry(swp_entry) && cow) { 5332 /* 5333 * COW mappings require pages in both 5334 * parent and child to be set to read. 5335 */ 5336 swp_entry = make_readable_migration_entry( 5337 swp_offset(swp_entry)); 5338 entry = swp_entry_to_pte(swp_entry); 5339 if (userfaultfd_wp(src_vma) && uffd_wp) 5340 entry = pte_swp_mkuffd_wp(entry); 5341 set_huge_pte_at(src, addr, src_pte, entry, sz); 5342 } 5343 if (!userfaultfd_wp(dst_vma)) 5344 entry = huge_pte_clear_uffd_wp(entry); 5345 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5346 } else if (unlikely(is_pte_marker(entry))) { 5347 pte_marker marker = copy_pte_marker( 5348 pte_to_swp_entry(entry), dst_vma); 5349 5350 if (marker) 5351 set_huge_pte_at(dst, addr, dst_pte, 5352 make_pte_marker(marker), sz); 5353 } else { 5354 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5355 pte_folio = page_folio(pte_page(entry)); 5356 folio_get(pte_folio); 5357 5358 /* 5359 * Failing to duplicate the anon rmap is a rare case 5360 * where we see pinned hugetlb pages while they're 5361 * prone to COW. We need to do the COW earlier during 5362 * fork. 5363 * 5364 * When pre-allocating the page or copying data, we 5365 * need to be without the pgtable locks since we could 5366 * sleep during the process. 5367 */ 5368 if (!folio_test_anon(pte_folio)) { 5369 hugetlb_add_file_rmap(pte_folio); 5370 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { 5371 pte_t src_pte_old = entry; 5372 struct folio *new_folio; 5373 5374 spin_unlock(src_ptl); 5375 spin_unlock(dst_ptl); 5376 /* Do not use reserve as it's private owned */ 5377 new_folio = alloc_hugetlb_folio(dst_vma, addr, false); 5378 if (IS_ERR(new_folio)) { 5379 folio_put(pte_folio); 5380 ret = PTR_ERR(new_folio); 5381 break; 5382 } 5383 ret = copy_user_large_folio(new_folio, pte_folio, 5384 addr, dst_vma); 5385 folio_put(pte_folio); 5386 if (ret) { 5387 folio_put(new_folio); 5388 break; 5389 } 5390 5391 /* Install the new hugetlb folio if src pte stable */ 5392 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5393 src_ptl = huge_pte_lockptr(h, src, src_pte); 5394 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5395 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5396 if (!pte_same(src_pte_old, entry)) { 5397 restore_reserve_on_error(h, dst_vma, addr, 5398 new_folio); 5399 folio_put(new_folio); 5400 /* huge_ptep of dst_pte won't change as in child */ 5401 goto again; 5402 } 5403 hugetlb_install_folio(dst_vma, dst_pte, addr, 5404 new_folio, src_pte_old, sz); 5405 spin_unlock(src_ptl); 5406 spin_unlock(dst_ptl); 5407 continue; 5408 } 5409 5410 if (cow) { 5411 /* 5412 * No need to notify as we are downgrading page 5413 * table protection not changing it to point 5414 * to a new page. 5415 * 5416 * See Documentation/mm/mmu_notifier.rst 5417 */ 5418 huge_ptep_set_wrprotect(src, addr, src_pte); 5419 entry = huge_pte_wrprotect(entry); 5420 } 5421 5422 if (!userfaultfd_wp(dst_vma)) 5423 entry = huge_pte_clear_uffd_wp(entry); 5424 5425 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5426 hugetlb_count_add(npages, dst); 5427 } 5428 spin_unlock(src_ptl); 5429 spin_unlock(dst_ptl); 5430 } 5431 5432 if (cow) { 5433 raw_write_seqcount_end(&src->write_protect_seq); 5434 mmu_notifier_invalidate_range_end(&range); 5435 } else { 5436 hugetlb_vma_unlock_read(src_vma); 5437 } 5438 5439 return ret; 5440 } 5441 5442 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5443 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5444 unsigned long sz) 5445 { 5446 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 5447 struct hstate *h = hstate_vma(vma); 5448 struct mm_struct *mm = vma->vm_mm; 5449 spinlock_t *src_ptl, *dst_ptl; 5450 pte_t pte; 5451 5452 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5453 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5454 5455 /* 5456 * We don't have to worry about the ordering of src and dst ptlocks 5457 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5458 */ 5459 if (src_ptl != dst_ptl) 5460 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5461 5462 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); 5463 5464 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) 5465 huge_pte_clear(mm, new_addr, dst_pte, sz); 5466 else { 5467 if (need_clear_uffd_wp) { 5468 if (pte_present(pte)) 5469 pte = huge_pte_clear_uffd_wp(pte); 5470 else if (is_swap_pte(pte)) 5471 pte = pte_swp_clear_uffd_wp(pte); 5472 } 5473 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5474 } 5475 5476 if (src_ptl != dst_ptl) 5477 spin_unlock(src_ptl); 5478 spin_unlock(dst_ptl); 5479 } 5480 5481 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5482 struct vm_area_struct *new_vma, 5483 unsigned long old_addr, unsigned long new_addr, 5484 unsigned long len) 5485 { 5486 struct hstate *h = hstate_vma(vma); 5487 struct address_space *mapping = vma->vm_file->f_mapping; 5488 unsigned long sz = huge_page_size(h); 5489 struct mm_struct *mm = vma->vm_mm; 5490 unsigned long old_end = old_addr + len; 5491 unsigned long last_addr_mask; 5492 pte_t *src_pte, *dst_pte; 5493 struct mmu_notifier_range range; 5494 bool shared_pmd = false; 5495 5496 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5497 old_end); 5498 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5499 /* 5500 * In case of shared PMDs, we should cover the maximum possible 5501 * range. 5502 */ 5503 flush_cache_range(vma, range.start, range.end); 5504 5505 mmu_notifier_invalidate_range_start(&range); 5506 last_addr_mask = hugetlb_mask_last_page(h); 5507 /* Prevent race with file truncation */ 5508 hugetlb_vma_lock_write(vma); 5509 i_mmap_lock_write(mapping); 5510 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5511 src_pte = hugetlb_walk(vma, old_addr, sz); 5512 if (!src_pte) { 5513 old_addr |= last_addr_mask; 5514 new_addr |= last_addr_mask; 5515 continue; 5516 } 5517 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) 5518 continue; 5519 5520 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5521 shared_pmd = true; 5522 old_addr |= last_addr_mask; 5523 new_addr |= last_addr_mask; 5524 continue; 5525 } 5526 5527 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5528 if (!dst_pte) 5529 break; 5530 5531 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5532 } 5533 5534 if (shared_pmd) 5535 flush_hugetlb_tlb_range(vma, range.start, range.end); 5536 else 5537 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5538 mmu_notifier_invalidate_range_end(&range); 5539 i_mmap_unlock_write(mapping); 5540 hugetlb_vma_unlock_write(vma); 5541 5542 return len + old_addr - old_end; 5543 } 5544 5545 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5546 unsigned long start, unsigned long end, 5547 struct page *ref_page, zap_flags_t zap_flags) 5548 { 5549 struct mm_struct *mm = vma->vm_mm; 5550 unsigned long address; 5551 pte_t *ptep; 5552 pte_t pte; 5553 spinlock_t *ptl; 5554 struct page *page; 5555 struct hstate *h = hstate_vma(vma); 5556 unsigned long sz = huge_page_size(h); 5557 bool adjust_reservation = false; 5558 unsigned long last_addr_mask; 5559 bool force_flush = false; 5560 5561 WARN_ON(!is_vm_hugetlb_page(vma)); 5562 BUG_ON(start & ~huge_page_mask(h)); 5563 BUG_ON(end & ~huge_page_mask(h)); 5564 5565 /* 5566 * This is a hugetlb vma, all the pte entries should point 5567 * to huge page. 5568 */ 5569 tlb_change_page_size(tlb, sz); 5570 tlb_start_vma(tlb, vma); 5571 5572 last_addr_mask = hugetlb_mask_last_page(h); 5573 address = start; 5574 for (; address < end; address += sz) { 5575 ptep = hugetlb_walk(vma, address, sz); 5576 if (!ptep) { 5577 address |= last_addr_mask; 5578 continue; 5579 } 5580 5581 ptl = huge_pte_lock(h, mm, ptep); 5582 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5583 spin_unlock(ptl); 5584 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5585 force_flush = true; 5586 address |= last_addr_mask; 5587 continue; 5588 } 5589 5590 pte = huge_ptep_get(mm, address, ptep); 5591 if (huge_pte_none(pte)) { 5592 spin_unlock(ptl); 5593 continue; 5594 } 5595 5596 /* 5597 * Migrating hugepage or HWPoisoned hugepage is already 5598 * unmapped and its refcount is dropped, so just clear pte here. 5599 */ 5600 if (unlikely(!pte_present(pte))) { 5601 /* 5602 * If the pte was wr-protected by uffd-wp in any of the 5603 * swap forms, meanwhile the caller does not want to 5604 * drop the uffd-wp bit in this zap, then replace the 5605 * pte with a marker. 5606 */ 5607 if (pte_swp_uffd_wp_any(pte) && 5608 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5609 set_huge_pte_at(mm, address, ptep, 5610 make_pte_marker(PTE_MARKER_UFFD_WP), 5611 sz); 5612 else 5613 huge_pte_clear(mm, address, ptep, sz); 5614 spin_unlock(ptl); 5615 continue; 5616 } 5617 5618 page = pte_page(pte); 5619 /* 5620 * If a reference page is supplied, it is because a specific 5621 * page is being unmapped, not a range. Ensure the page we 5622 * are about to unmap is the actual page of interest. 5623 */ 5624 if (ref_page) { 5625 if (page != ref_page) { 5626 spin_unlock(ptl); 5627 continue; 5628 } 5629 /* 5630 * Mark the VMA as having unmapped its page so that 5631 * future faults in this VMA will fail rather than 5632 * looking like data was lost 5633 */ 5634 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5635 } 5636 5637 pte = huge_ptep_get_and_clear(mm, address, ptep, sz); 5638 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5639 if (huge_pte_dirty(pte)) 5640 set_page_dirty(page); 5641 /* Leave a uffd-wp pte marker if needed */ 5642 if (huge_pte_uffd_wp(pte) && 5643 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5644 set_huge_pte_at(mm, address, ptep, 5645 make_pte_marker(PTE_MARKER_UFFD_WP), 5646 sz); 5647 hugetlb_count_sub(pages_per_huge_page(h), mm); 5648 hugetlb_remove_rmap(page_folio(page)); 5649 5650 /* 5651 * Restore the reservation for anonymous page, otherwise the 5652 * backing page could be stolen by someone. 5653 * If there we are freeing a surplus, do not set the restore 5654 * reservation bit. 5655 */ 5656 if (!h->surplus_huge_pages && __vma_private_lock(vma) && 5657 folio_test_anon(page_folio(page))) { 5658 folio_set_hugetlb_restore_reserve(page_folio(page)); 5659 /* Reservation to be adjusted after the spin lock */ 5660 adjust_reservation = true; 5661 } 5662 5663 spin_unlock(ptl); 5664 5665 /* 5666 * Adjust the reservation for the region that will have the 5667 * reserve restored. Keep in mind that vma_needs_reservation() changes 5668 * resv->adds_in_progress if it succeeds. If this is not done, 5669 * do_exit() will not see it, and will keep the reservation 5670 * forever. 5671 */ 5672 if (adjust_reservation) { 5673 int rc = vma_needs_reservation(h, vma, address); 5674 5675 if (rc < 0) 5676 /* Pressumably allocate_file_region_entries failed 5677 * to allocate a file_region struct. Clear 5678 * hugetlb_restore_reserve so that global reserve 5679 * count will not be incremented by free_huge_folio. 5680 * Act as if we consumed the reservation. 5681 */ 5682 folio_clear_hugetlb_restore_reserve(page_folio(page)); 5683 else if (rc) 5684 vma_add_reservation(h, vma, address); 5685 } 5686 5687 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5688 /* 5689 * Bail out after unmapping reference page if supplied 5690 */ 5691 if (ref_page) 5692 break; 5693 } 5694 tlb_end_vma(tlb, vma); 5695 5696 /* 5697 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5698 * could defer the flush until now, since by holding i_mmap_rwsem we 5699 * guaranteed that the last refernece would not be dropped. But we must 5700 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5701 * dropped and the last reference to the shared PMDs page might be 5702 * dropped as well. 5703 * 5704 * In theory we could defer the freeing of the PMD pages as well, but 5705 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5706 * detect sharing, so we cannot defer the release of the page either. 5707 * Instead, do flush now. 5708 */ 5709 if (force_flush) 5710 tlb_flush_mmu_tlbonly(tlb); 5711 } 5712 5713 void __hugetlb_zap_begin(struct vm_area_struct *vma, 5714 unsigned long *start, unsigned long *end) 5715 { 5716 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5717 return; 5718 5719 adjust_range_if_pmd_sharing_possible(vma, start, end); 5720 hugetlb_vma_lock_write(vma); 5721 if (vma->vm_file) 5722 i_mmap_lock_write(vma->vm_file->f_mapping); 5723 } 5724 5725 void __hugetlb_zap_end(struct vm_area_struct *vma, 5726 struct zap_details *details) 5727 { 5728 zap_flags_t zap_flags = details ? details->zap_flags : 0; 5729 5730 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 5731 return; 5732 5733 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5734 /* 5735 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5736 * When the vma_lock is freed, this makes the vma ineligible 5737 * for pmd sharing. And, i_mmap_rwsem is required to set up 5738 * pmd sharing. This is important as page tables for this 5739 * unmapped range will be asynchrously deleted. If the page 5740 * tables are shared, there will be issues when accessed by 5741 * someone else. 5742 */ 5743 __hugetlb_vma_unlock_write_free(vma); 5744 } else { 5745 hugetlb_vma_unlock_write(vma); 5746 } 5747 5748 if (vma->vm_file) 5749 i_mmap_unlock_write(vma->vm_file->f_mapping); 5750 } 5751 5752 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5753 unsigned long end, struct page *ref_page, 5754 zap_flags_t zap_flags) 5755 { 5756 struct mmu_notifier_range range; 5757 struct mmu_gather tlb; 5758 5759 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5760 start, end); 5761 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5762 mmu_notifier_invalidate_range_start(&range); 5763 tlb_gather_mmu(&tlb, vma->vm_mm); 5764 5765 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5766 5767 mmu_notifier_invalidate_range_end(&range); 5768 tlb_finish_mmu(&tlb); 5769 } 5770 5771 /* 5772 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5773 * mapping it owns the reserve page for. The intention is to unmap the page 5774 * from other VMAs and let the children be SIGKILLed if they are faulting the 5775 * same region. 5776 */ 5777 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5778 struct page *page, unsigned long address) 5779 { 5780 struct hstate *h = hstate_vma(vma); 5781 struct vm_area_struct *iter_vma; 5782 struct address_space *mapping; 5783 pgoff_t pgoff; 5784 5785 /* 5786 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5787 * from page cache lookup which is in HPAGE_SIZE units. 5788 */ 5789 address = address & huge_page_mask(h); 5790 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5791 vma->vm_pgoff; 5792 mapping = vma->vm_file->f_mapping; 5793 5794 /* 5795 * Take the mapping lock for the duration of the table walk. As 5796 * this mapping should be shared between all the VMAs, 5797 * __unmap_hugepage_range() is called as the lock is already held 5798 */ 5799 i_mmap_lock_write(mapping); 5800 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5801 /* Do not unmap the current VMA */ 5802 if (iter_vma == vma) 5803 continue; 5804 5805 /* 5806 * Shared VMAs have their own reserves and do not affect 5807 * MAP_PRIVATE accounting but it is possible that a shared 5808 * VMA is using the same page so check and skip such VMAs. 5809 */ 5810 if (iter_vma->vm_flags & VM_MAYSHARE) 5811 continue; 5812 5813 /* 5814 * Unmap the page from other VMAs without their own reserves. 5815 * They get marked to be SIGKILLed if they fault in these 5816 * areas. This is because a future no-page fault on this VMA 5817 * could insert a zeroed page instead of the data existing 5818 * from the time of fork. This would look like data corruption 5819 */ 5820 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5821 unmap_hugepage_range(iter_vma, address, 5822 address + huge_page_size(h), page, 0); 5823 } 5824 i_mmap_unlock_write(mapping); 5825 } 5826 5827 /* 5828 * hugetlb_wp() should be called with page lock of the original hugepage held. 5829 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5830 * cannot race with other handlers or page migration. 5831 * Keep the pte_same checks anyway to make transition from the mutex easier. 5832 */ 5833 static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 5834 struct vm_fault *vmf) 5835 { 5836 struct vm_area_struct *vma = vmf->vma; 5837 struct mm_struct *mm = vma->vm_mm; 5838 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5839 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); 5840 struct hstate *h = hstate_vma(vma); 5841 struct folio *old_folio; 5842 struct folio *new_folio; 5843 bool cow_from_owner = 0; 5844 vm_fault_t ret = 0; 5845 struct mmu_notifier_range range; 5846 5847 /* 5848 * Never handle CoW for uffd-wp protected pages. It should be only 5849 * handled when the uffd-wp protection is removed. 5850 * 5851 * Note that only the CoW optimization path (in hugetlb_no_page()) 5852 * can trigger this, because hugetlb_fault() will always resolve 5853 * uffd-wp bit first. 5854 */ 5855 if (!unshare && huge_pte_uffd_wp(pte)) 5856 return 0; 5857 5858 /* Let's take out MAP_SHARED mappings first. */ 5859 if (vma->vm_flags & VM_MAYSHARE) { 5860 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5861 return 0; 5862 } 5863 5864 old_folio = page_folio(pte_page(pte)); 5865 5866 delayacct_wpcopy_start(); 5867 5868 retry_avoidcopy: 5869 /* 5870 * If no-one else is actually using this page, we're the exclusive 5871 * owner and can reuse this page. 5872 * 5873 * Note that we don't rely on the (safer) folio refcount here, because 5874 * copying the hugetlb folio when there are unexpected (temporary) 5875 * folio references could harm simple fork()+exit() users when 5876 * we run out of free hugetlb folios: we would have to kill processes 5877 * in scenarios that used to work. As a side effect, there can still 5878 * be leaks between processes, for example, with FOLL_GET users. 5879 */ 5880 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 5881 if (!PageAnonExclusive(&old_folio->page)) { 5882 folio_move_anon_rmap(old_folio, vma); 5883 SetPageAnonExclusive(&old_folio->page); 5884 } 5885 if (likely(!unshare)) 5886 set_huge_ptep_maybe_writable(vma, vmf->address, 5887 vmf->pte); 5888 5889 delayacct_wpcopy_end(); 5890 return 0; 5891 } 5892 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 5893 PageAnonExclusive(&old_folio->page), &old_folio->page); 5894 5895 /* 5896 * If the process that created a MAP_PRIVATE mapping is about to 5897 * perform a COW due to a shared page count, attempt to satisfy 5898 * the allocation without using the existing reserves. The pagecache 5899 * page is used to determine if the reserve at this address was 5900 * consumed or not. If reserves were used, a partial faulted mapping 5901 * at the time of fork() could consume its reserves on COW instead 5902 * of the full address range. 5903 */ 5904 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5905 old_folio != pagecache_folio) 5906 cow_from_owner = true; 5907 5908 folio_get(old_folio); 5909 5910 /* 5911 * Drop page table lock as buddy allocator may be called. It will 5912 * be acquired again before returning to the caller, as expected. 5913 */ 5914 spin_unlock(vmf->ptl); 5915 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); 5916 5917 if (IS_ERR(new_folio)) { 5918 /* 5919 * If a process owning a MAP_PRIVATE mapping fails to COW, 5920 * it is due to references held by a child and an insufficient 5921 * huge page pool. To guarantee the original mappers 5922 * reliability, unmap the page from child processes. The child 5923 * may get SIGKILLed if it later faults. 5924 */ 5925 if (cow_from_owner) { 5926 struct address_space *mapping = vma->vm_file->f_mapping; 5927 pgoff_t idx; 5928 u32 hash; 5929 5930 folio_put(old_folio); 5931 /* 5932 * Drop hugetlb_fault_mutex and vma_lock before 5933 * unmapping. unmapping needs to hold vma_lock 5934 * in write mode. Dropping vma_lock in read mode 5935 * here is OK as COW mappings do not interact with 5936 * PMD sharing. 5937 * 5938 * Reacquire both after unmap operation. 5939 */ 5940 idx = vma_hugecache_offset(h, vma, vmf->address); 5941 hash = hugetlb_fault_mutex_hash(mapping, idx); 5942 hugetlb_vma_unlock_read(vma); 5943 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5944 5945 unmap_ref_private(mm, vma, &old_folio->page, 5946 vmf->address); 5947 5948 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5949 hugetlb_vma_lock_read(vma); 5950 spin_lock(vmf->ptl); 5951 vmf->pte = hugetlb_walk(vma, vmf->address, 5952 huge_page_size(h)); 5953 if (likely(vmf->pte && 5954 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) 5955 goto retry_avoidcopy; 5956 /* 5957 * race occurs while re-acquiring page table 5958 * lock, and our job is done. 5959 */ 5960 delayacct_wpcopy_end(); 5961 return 0; 5962 } 5963 5964 ret = vmf_error(PTR_ERR(new_folio)); 5965 goto out_release_old; 5966 } 5967 5968 /* 5969 * When the original hugepage is shared one, it does not have 5970 * anon_vma prepared. 5971 */ 5972 ret = __vmf_anon_prepare(vmf); 5973 if (unlikely(ret)) 5974 goto out_release_all; 5975 5976 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 5977 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); 5978 goto out_release_all; 5979 } 5980 __folio_mark_uptodate(new_folio); 5981 5982 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 5983 vmf->address + huge_page_size(h)); 5984 mmu_notifier_invalidate_range_start(&range); 5985 5986 /* 5987 * Retake the page table lock to check for racing updates 5988 * before the page tables are altered 5989 */ 5990 spin_lock(vmf->ptl); 5991 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 5992 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { 5993 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 5994 5995 /* Break COW or unshare */ 5996 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 5997 hugetlb_remove_rmap(old_folio); 5998 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 5999 if (huge_pte_uffd_wp(pte)) 6000 newpte = huge_pte_mkuffd_wp(newpte); 6001 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 6002 huge_page_size(h)); 6003 folio_set_hugetlb_migratable(new_folio); 6004 /* Make the old page be freed below */ 6005 new_folio = old_folio; 6006 } 6007 spin_unlock(vmf->ptl); 6008 mmu_notifier_invalidate_range_end(&range); 6009 out_release_all: 6010 /* 6011 * No restore in case of successful pagetable update (Break COW or 6012 * unshare) 6013 */ 6014 if (new_folio != old_folio) 6015 restore_reserve_on_error(h, vma, vmf->address, new_folio); 6016 folio_put(new_folio); 6017 out_release_old: 6018 folio_put(old_folio); 6019 6020 spin_lock(vmf->ptl); /* Caller expects lock to be held */ 6021 6022 delayacct_wpcopy_end(); 6023 return ret; 6024 } 6025 6026 /* 6027 * Return whether there is a pagecache page to back given address within VMA. 6028 */ 6029 bool hugetlbfs_pagecache_present(struct hstate *h, 6030 struct vm_area_struct *vma, unsigned long address) 6031 { 6032 struct address_space *mapping = vma->vm_file->f_mapping; 6033 pgoff_t idx = linear_page_index(vma, address); 6034 struct folio *folio; 6035 6036 folio = filemap_get_folio(mapping, idx); 6037 if (IS_ERR(folio)) 6038 return false; 6039 folio_put(folio); 6040 return true; 6041 } 6042 6043 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 6044 pgoff_t idx) 6045 { 6046 struct inode *inode = mapping->host; 6047 struct hstate *h = hstate_inode(inode); 6048 int err; 6049 6050 idx <<= huge_page_order(h); 6051 __folio_set_locked(folio); 6052 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 6053 6054 if (unlikely(err)) { 6055 __folio_clear_locked(folio); 6056 return err; 6057 } 6058 folio_clear_hugetlb_restore_reserve(folio); 6059 6060 /* 6061 * mark folio dirty so that it will not be removed from cache/file 6062 * by non-hugetlbfs specific code paths. 6063 */ 6064 folio_mark_dirty(folio); 6065 6066 spin_lock(&inode->i_lock); 6067 inode->i_blocks += blocks_per_huge_page(h); 6068 spin_unlock(&inode->i_lock); 6069 return 0; 6070 } 6071 6072 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, 6073 struct address_space *mapping, 6074 unsigned long reason) 6075 { 6076 u32 hash; 6077 6078 /* 6079 * vma_lock and hugetlb_fault_mutex must be dropped before handling 6080 * userfault. Also mmap_lock could be dropped due to handling 6081 * userfault, any vma operation should be careful from here. 6082 */ 6083 hugetlb_vma_unlock_read(vmf->vma); 6084 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6085 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6086 return handle_userfault(vmf, reason); 6087 } 6088 6089 /* 6090 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 6091 * false if pte changed or is changing. 6092 */ 6093 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, 6094 pte_t *ptep, pte_t old_pte) 6095 { 6096 spinlock_t *ptl; 6097 bool same; 6098 6099 ptl = huge_pte_lock(h, mm, ptep); 6100 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); 6101 spin_unlock(ptl); 6102 6103 return same; 6104 } 6105 6106 static vm_fault_t hugetlb_no_page(struct address_space *mapping, 6107 struct vm_fault *vmf) 6108 { 6109 struct vm_area_struct *vma = vmf->vma; 6110 struct mm_struct *mm = vma->vm_mm; 6111 struct hstate *h = hstate_vma(vma); 6112 vm_fault_t ret = VM_FAULT_SIGBUS; 6113 int anon_rmap = 0; 6114 unsigned long size; 6115 struct folio *folio; 6116 pte_t new_pte; 6117 bool new_folio, new_pagecache_folio = false; 6118 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6119 6120 /* 6121 * Currently, we are forced to kill the process in the event the 6122 * original mapper has unmapped pages from the child due to a failed 6123 * COW/unsharing. Warn that such a situation has occurred as it may not 6124 * be obvious. 6125 */ 6126 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 6127 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 6128 current->pid); 6129 goto out; 6130 } 6131 6132 /* 6133 * Use page lock to guard against racing truncation 6134 * before we get page_table_lock. 6135 */ 6136 new_folio = false; 6137 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); 6138 if (IS_ERR(folio)) { 6139 size = i_size_read(mapping->host) >> huge_page_shift(h); 6140 if (vmf->pgoff >= size) 6141 goto out; 6142 /* Check for page in userfault range */ 6143 if (userfaultfd_missing(vma)) { 6144 /* 6145 * Since hugetlb_no_page() was examining pte 6146 * without pgtable lock, we need to re-test under 6147 * lock because the pte may not be stable and could 6148 * have changed from under us. Try to detect 6149 * either changed or during-changing ptes and retry 6150 * properly when needed. 6151 * 6152 * Note that userfaultfd is actually fine with 6153 * false positives (e.g. caused by pte changed), 6154 * but not wrong logical events (e.g. caused by 6155 * reading a pte during changing). The latter can 6156 * confuse the userspace, so the strictness is very 6157 * much preferred. E.g., MISSING event should 6158 * never happen on the page after UFFDIO_COPY has 6159 * correctly installed the page and returned. 6160 */ 6161 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6162 ret = 0; 6163 goto out; 6164 } 6165 6166 return hugetlb_handle_userfault(vmf, mapping, 6167 VM_UFFD_MISSING); 6168 } 6169 6170 if (!(vma->vm_flags & VM_MAYSHARE)) { 6171 ret = __vmf_anon_prepare(vmf); 6172 if (unlikely(ret)) 6173 goto out; 6174 } 6175 6176 folio = alloc_hugetlb_folio(vma, vmf->address, false); 6177 if (IS_ERR(folio)) { 6178 /* 6179 * Returning error will result in faulting task being 6180 * sent SIGBUS. The hugetlb fault mutex prevents two 6181 * tasks from racing to fault in the same page which 6182 * could result in false unable to allocate errors. 6183 * Page migration does not take the fault mutex, but 6184 * does a clear then write of pte's under page table 6185 * lock. Page fault code could race with migration, 6186 * notice the clear pte and try to allocate a page 6187 * here. Before returning error, get ptl and make 6188 * sure there really is no pte entry. 6189 */ 6190 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) 6191 ret = vmf_error(PTR_ERR(folio)); 6192 else 6193 ret = 0; 6194 goto out; 6195 } 6196 folio_zero_user(folio, vmf->real_address); 6197 __folio_mark_uptodate(folio); 6198 new_folio = true; 6199 6200 if (vma->vm_flags & VM_MAYSHARE) { 6201 int err = hugetlb_add_to_page_cache(folio, mapping, 6202 vmf->pgoff); 6203 if (err) { 6204 /* 6205 * err can't be -EEXIST which implies someone 6206 * else consumed the reservation since hugetlb 6207 * fault mutex is held when add a hugetlb page 6208 * to the page cache. So it's safe to call 6209 * restore_reserve_on_error() here. 6210 */ 6211 restore_reserve_on_error(h, vma, vmf->address, 6212 folio); 6213 folio_put(folio); 6214 ret = VM_FAULT_SIGBUS; 6215 goto out; 6216 } 6217 new_pagecache_folio = true; 6218 } else { 6219 folio_lock(folio); 6220 anon_rmap = 1; 6221 } 6222 } else { 6223 /* 6224 * If memory error occurs between mmap() and fault, some process 6225 * don't have hwpoisoned swap entry for errored virtual address. 6226 * So we need to block hugepage fault by PG_hwpoison bit check. 6227 */ 6228 if (unlikely(folio_test_hwpoison(folio))) { 6229 ret = VM_FAULT_HWPOISON_LARGE | 6230 VM_FAULT_SET_HINDEX(hstate_index(h)); 6231 goto backout_unlocked; 6232 } 6233 6234 /* Check for page in userfault range. */ 6235 if (userfaultfd_minor(vma)) { 6236 folio_unlock(folio); 6237 folio_put(folio); 6238 /* See comment in userfaultfd_missing() block above */ 6239 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6240 ret = 0; 6241 goto out; 6242 } 6243 return hugetlb_handle_userfault(vmf, mapping, 6244 VM_UFFD_MINOR); 6245 } 6246 } 6247 6248 /* 6249 * If we are going to COW a private mapping later, we examine the 6250 * pending reservations for this page now. This will ensure that 6251 * any allocations necessary to record that reservation occur outside 6252 * the spinlock. 6253 */ 6254 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6255 if (vma_needs_reservation(h, vma, vmf->address) < 0) { 6256 ret = VM_FAULT_OOM; 6257 goto backout_unlocked; 6258 } 6259 /* Just decrements count, does not deallocate */ 6260 vma_end_reservation(h, vma, vmf->address); 6261 } 6262 6263 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); 6264 ret = 0; 6265 /* If pte changed from under us, retry */ 6266 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) 6267 goto backout; 6268 6269 if (anon_rmap) 6270 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); 6271 else 6272 hugetlb_add_file_rmap(folio); 6273 new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED); 6274 /* 6275 * If this pte was previously wr-protected, keep it wr-protected even 6276 * if populated. 6277 */ 6278 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) 6279 new_pte = huge_pte_mkuffd_wp(new_pte); 6280 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); 6281 6282 hugetlb_count_add(pages_per_huge_page(h), mm); 6283 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6284 /* Optimization, do the COW without a second fault */ 6285 ret = hugetlb_wp(folio, vmf); 6286 } 6287 6288 spin_unlock(vmf->ptl); 6289 6290 /* 6291 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6292 * found in the pagecache may not have hugetlb_migratable if they have 6293 * been isolated for migration. 6294 */ 6295 if (new_folio) 6296 folio_set_hugetlb_migratable(folio); 6297 6298 folio_unlock(folio); 6299 out: 6300 hugetlb_vma_unlock_read(vma); 6301 6302 /* 6303 * We must check to release the per-VMA lock. __vmf_anon_prepare() is 6304 * the only way ret can be set to VM_FAULT_RETRY. 6305 */ 6306 if (unlikely(ret & VM_FAULT_RETRY)) 6307 vma_end_read(vma); 6308 6309 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6310 return ret; 6311 6312 backout: 6313 spin_unlock(vmf->ptl); 6314 backout_unlocked: 6315 if (new_folio && !new_pagecache_folio) 6316 restore_reserve_on_error(h, vma, vmf->address, folio); 6317 6318 folio_unlock(folio); 6319 folio_put(folio); 6320 goto out; 6321 } 6322 6323 #ifdef CONFIG_SMP 6324 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6325 { 6326 unsigned long key[2]; 6327 u32 hash; 6328 6329 key[0] = (unsigned long) mapping; 6330 key[1] = idx; 6331 6332 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6333 6334 return hash & (num_fault_mutexes - 1); 6335 } 6336 #else 6337 /* 6338 * For uniprocessor systems we always use a single mutex, so just 6339 * return 0 and avoid the hashing overhead. 6340 */ 6341 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6342 { 6343 return 0; 6344 } 6345 #endif 6346 6347 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6348 unsigned long address, unsigned int flags) 6349 { 6350 vm_fault_t ret; 6351 u32 hash; 6352 struct folio *folio = NULL; 6353 struct folio *pagecache_folio = NULL; 6354 struct hstate *h = hstate_vma(vma); 6355 struct address_space *mapping; 6356 int need_wait_lock = 0; 6357 struct vm_fault vmf = { 6358 .vma = vma, 6359 .address = address & huge_page_mask(h), 6360 .real_address = address, 6361 .flags = flags, 6362 .pgoff = vma_hugecache_offset(h, vma, 6363 address & huge_page_mask(h)), 6364 /* TODO: Track hugetlb faults using vm_fault */ 6365 6366 /* 6367 * Some fields may not be initialized, be careful as it may 6368 * be hard to debug if called functions make assumptions 6369 */ 6370 }; 6371 6372 /* 6373 * Serialize hugepage allocation and instantiation, so that we don't 6374 * get spurious allocation failures if two CPUs race to instantiate 6375 * the same page in the page cache. 6376 */ 6377 mapping = vma->vm_file->f_mapping; 6378 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); 6379 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6380 6381 /* 6382 * Acquire vma lock before calling huge_pte_alloc and hold 6383 * until finished with vmf.pte. This prevents huge_pmd_unshare from 6384 * being called elsewhere and making the vmf.pte no longer valid. 6385 */ 6386 hugetlb_vma_lock_read(vma); 6387 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); 6388 if (!vmf.pte) { 6389 hugetlb_vma_unlock_read(vma); 6390 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6391 return VM_FAULT_OOM; 6392 } 6393 6394 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); 6395 if (huge_pte_none_mostly(vmf.orig_pte)) { 6396 if (is_pte_marker(vmf.orig_pte)) { 6397 pte_marker marker = 6398 pte_marker_get(pte_to_swp_entry(vmf.orig_pte)); 6399 6400 if (marker & PTE_MARKER_POISONED) { 6401 ret = VM_FAULT_HWPOISON_LARGE | 6402 VM_FAULT_SET_HINDEX(hstate_index(h)); 6403 goto out_mutex; 6404 } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) { 6405 /* This isn't supported in hugetlb. */ 6406 ret = VM_FAULT_SIGSEGV; 6407 goto out_mutex; 6408 } 6409 } 6410 6411 /* 6412 * Other PTE markers should be handled the same way as none PTE. 6413 * 6414 * hugetlb_no_page will drop vma lock and hugetlb fault 6415 * mutex internally, which make us return immediately. 6416 */ 6417 return hugetlb_no_page(mapping, &vmf); 6418 } 6419 6420 ret = 0; 6421 6422 /* 6423 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this 6424 * point, so this check prevents the kernel from going below assuming 6425 * that we have an active hugepage in pagecache. This goto expects 6426 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned) 6427 * check will properly handle it. 6428 */ 6429 if (!pte_present(vmf.orig_pte)) { 6430 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) { 6431 /* 6432 * Release the hugetlb fault lock now, but retain 6433 * the vma lock, because it is needed to guard the 6434 * huge_pte_lockptr() later in 6435 * migration_entry_wait_huge(). The vma lock will 6436 * be released there. 6437 */ 6438 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6439 migration_entry_wait_huge(vma, vmf.address, vmf.pte); 6440 return 0; 6441 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte))) 6442 ret = VM_FAULT_HWPOISON_LARGE | 6443 VM_FAULT_SET_HINDEX(hstate_index(h)); 6444 goto out_mutex; 6445 } 6446 6447 /* 6448 * If we are going to COW/unshare the mapping later, we examine the 6449 * pending reservations for this page now. This will ensure that any 6450 * allocations necessary to record that reservation occur outside the 6451 * spinlock. Also lookup the pagecache page now as it is used to 6452 * determine if a reservation has been consumed. 6453 */ 6454 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6455 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { 6456 if (vma_needs_reservation(h, vma, vmf.address) < 0) { 6457 ret = VM_FAULT_OOM; 6458 goto out_mutex; 6459 } 6460 /* Just decrements count, does not deallocate */ 6461 vma_end_reservation(h, vma, vmf.address); 6462 6463 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, 6464 vmf.pgoff); 6465 if (IS_ERR(pagecache_folio)) 6466 pagecache_folio = NULL; 6467 } 6468 6469 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); 6470 6471 /* Check for a racing update before calling hugetlb_wp() */ 6472 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) 6473 goto out_ptl; 6474 6475 /* Handle userfault-wp first, before trying to lock more pages */ 6476 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && 6477 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { 6478 if (!userfaultfd_wp_async(vma)) { 6479 spin_unlock(vmf.ptl); 6480 if (pagecache_folio) { 6481 folio_unlock(pagecache_folio); 6482 folio_put(pagecache_folio); 6483 } 6484 hugetlb_vma_unlock_read(vma); 6485 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6486 return handle_userfault(&vmf, VM_UFFD_WP); 6487 } 6488 6489 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); 6490 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, 6491 huge_page_size(hstate_vma(vma))); 6492 /* Fallthrough to CoW */ 6493 } 6494 6495 /* 6496 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and 6497 * pagecache_folio, so here we need take the former one 6498 * when folio != pagecache_folio or !pagecache_folio. 6499 */ 6500 folio = page_folio(pte_page(vmf.orig_pte)); 6501 if (folio != pagecache_folio) 6502 if (!folio_trylock(folio)) { 6503 need_wait_lock = 1; 6504 goto out_ptl; 6505 } 6506 6507 folio_get(folio); 6508 6509 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6510 if (!huge_pte_write(vmf.orig_pte)) { 6511 ret = hugetlb_wp(pagecache_folio, &vmf); 6512 goto out_put_page; 6513 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6514 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); 6515 } 6516 } 6517 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); 6518 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, 6519 flags & FAULT_FLAG_WRITE)) 6520 update_mmu_cache(vma, vmf.address, vmf.pte); 6521 out_put_page: 6522 if (folio != pagecache_folio) 6523 folio_unlock(folio); 6524 folio_put(folio); 6525 out_ptl: 6526 spin_unlock(vmf.ptl); 6527 6528 if (pagecache_folio) { 6529 folio_unlock(pagecache_folio); 6530 folio_put(pagecache_folio); 6531 } 6532 out_mutex: 6533 hugetlb_vma_unlock_read(vma); 6534 6535 /* 6536 * We must check to release the per-VMA lock. __vmf_anon_prepare() in 6537 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. 6538 */ 6539 if (unlikely(ret & VM_FAULT_RETRY)) 6540 vma_end_read(vma); 6541 6542 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6543 /* 6544 * Generally it's safe to hold refcount during waiting page lock. But 6545 * here we just wait to defer the next page fault to avoid busy loop and 6546 * the page is not used after unlocked before returning from the current 6547 * page fault. So we are safe from accessing freed page, even if we wait 6548 * here without taking refcount. 6549 */ 6550 if (need_wait_lock) 6551 folio_wait_locked(folio); 6552 return ret; 6553 } 6554 6555 #ifdef CONFIG_USERFAULTFD 6556 /* 6557 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). 6558 */ 6559 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 6560 struct vm_area_struct *vma, unsigned long address) 6561 { 6562 struct mempolicy *mpol; 6563 nodemask_t *nodemask; 6564 struct folio *folio; 6565 gfp_t gfp_mask; 6566 int node; 6567 6568 gfp_mask = htlb_alloc_mask(h); 6569 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 6570 /* 6571 * This is used to allocate a temporary hugetlb to hold the copied 6572 * content, which will then be copied again to the final hugetlb 6573 * consuming a reservation. Set the alloc_fallback to false to indicate 6574 * that breaking the per-node hugetlb pool is not allowed in this case. 6575 */ 6576 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); 6577 mpol_cond_put(mpol); 6578 6579 return folio; 6580 } 6581 6582 /* 6583 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6584 * with modifications for hugetlb pages. 6585 */ 6586 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6587 struct vm_area_struct *dst_vma, 6588 unsigned long dst_addr, 6589 unsigned long src_addr, 6590 uffd_flags_t flags, 6591 struct folio **foliop) 6592 { 6593 struct mm_struct *dst_mm = dst_vma->vm_mm; 6594 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6595 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6596 struct hstate *h = hstate_vma(dst_vma); 6597 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6598 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6599 unsigned long size = huge_page_size(h); 6600 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6601 pte_t _dst_pte; 6602 spinlock_t *ptl; 6603 int ret = -ENOMEM; 6604 struct folio *folio; 6605 bool folio_in_pagecache = false; 6606 6607 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6608 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6609 6610 /* Don't overwrite any existing PTEs (even markers) */ 6611 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 6612 spin_unlock(ptl); 6613 return -EEXIST; 6614 } 6615 6616 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6617 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6618 6619 /* No need to invalidate - it was non-present before */ 6620 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6621 6622 spin_unlock(ptl); 6623 return 0; 6624 } 6625 6626 if (is_continue) { 6627 ret = -EFAULT; 6628 folio = filemap_lock_hugetlb_folio(h, mapping, idx); 6629 if (IS_ERR(folio)) 6630 goto out; 6631 folio_in_pagecache = true; 6632 } else if (!*foliop) { 6633 /* If a folio already exists, then it's UFFDIO_COPY for 6634 * a non-missing case. Return -EEXIST. 6635 */ 6636 if (vm_shared && 6637 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6638 ret = -EEXIST; 6639 goto out; 6640 } 6641 6642 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6643 if (IS_ERR(folio)) { 6644 ret = -ENOMEM; 6645 goto out; 6646 } 6647 6648 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6649 false); 6650 6651 /* fallback to copy_from_user outside mmap_lock */ 6652 if (unlikely(ret)) { 6653 ret = -ENOENT; 6654 /* Free the allocated folio which may have 6655 * consumed a reservation. 6656 */ 6657 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6658 folio_put(folio); 6659 6660 /* Allocate a temporary folio to hold the copied 6661 * contents. 6662 */ 6663 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6664 if (!folio) { 6665 ret = -ENOMEM; 6666 goto out; 6667 } 6668 *foliop = folio; 6669 /* Set the outparam foliop and return to the caller to 6670 * copy the contents outside the lock. Don't free the 6671 * folio. 6672 */ 6673 goto out; 6674 } 6675 } else { 6676 if (vm_shared && 6677 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6678 folio_put(*foliop); 6679 ret = -EEXIST; 6680 *foliop = NULL; 6681 goto out; 6682 } 6683 6684 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6685 if (IS_ERR(folio)) { 6686 folio_put(*foliop); 6687 ret = -ENOMEM; 6688 *foliop = NULL; 6689 goto out; 6690 } 6691 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6692 folio_put(*foliop); 6693 *foliop = NULL; 6694 if (ret) { 6695 folio_put(folio); 6696 goto out; 6697 } 6698 } 6699 6700 /* 6701 * If we just allocated a new page, we need a memory barrier to ensure 6702 * that preceding stores to the page become visible before the 6703 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate 6704 * is what we need. 6705 * 6706 * In the case where we have not allocated a new page (is_continue), 6707 * the page must already be uptodate. UFFDIO_CONTINUE already includes 6708 * an earlier smp_wmb() to ensure that prior stores will be visible 6709 * before the set_pte_at() write. 6710 */ 6711 if (!is_continue) 6712 __folio_mark_uptodate(folio); 6713 else 6714 WARN_ON_ONCE(!folio_test_uptodate(folio)); 6715 6716 /* Add shared, newly allocated pages to the page cache. */ 6717 if (vm_shared && !is_continue) { 6718 ret = -EFAULT; 6719 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) 6720 goto out_release_nounlock; 6721 6722 /* 6723 * Serialization between remove_inode_hugepages() and 6724 * hugetlb_add_to_page_cache() below happens through the 6725 * hugetlb_fault_mutex_table that here must be hold by 6726 * the caller. 6727 */ 6728 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6729 if (ret) 6730 goto out_release_nounlock; 6731 folio_in_pagecache = true; 6732 } 6733 6734 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6735 6736 ret = -EIO; 6737 if (folio_test_hwpoison(folio)) 6738 goto out_release_unlock; 6739 6740 /* 6741 * We allow to overwrite a pte marker: consider when both MISSING|WP 6742 * registered, we firstly wr-protect a none pte which has no page cache 6743 * page backing it, then access the page. 6744 */ 6745 ret = -EEXIST; 6746 if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) 6747 goto out_release_unlock; 6748 6749 if (folio_in_pagecache) 6750 hugetlb_add_file_rmap(folio); 6751 else 6752 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); 6753 6754 /* 6755 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6756 * with wp flag set, don't set pte write bit. 6757 */ 6758 _dst_pte = make_huge_pte(dst_vma, &folio->page, 6759 !wp_enabled && !(is_continue && !vm_shared)); 6760 /* 6761 * Always mark UFFDIO_COPY page dirty; note that this may not be 6762 * extremely important for hugetlbfs for now since swapping is not 6763 * supported, but we should still be clear in that this page cannot be 6764 * thrown away at will, even if write bit not set. 6765 */ 6766 _dst_pte = huge_pte_mkdirty(_dst_pte); 6767 _dst_pte = pte_mkyoung(_dst_pte); 6768 6769 if (wp_enabled) 6770 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6771 6772 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6773 6774 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6775 6776 /* No need to invalidate - it was non-present before */ 6777 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6778 6779 spin_unlock(ptl); 6780 if (!is_continue) 6781 folio_set_hugetlb_migratable(folio); 6782 if (vm_shared || is_continue) 6783 folio_unlock(folio); 6784 ret = 0; 6785 out: 6786 return ret; 6787 out_release_unlock: 6788 spin_unlock(ptl); 6789 if (vm_shared || is_continue) 6790 folio_unlock(folio); 6791 out_release_nounlock: 6792 if (!folio_in_pagecache) 6793 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6794 folio_put(folio); 6795 goto out; 6796 } 6797 #endif /* CONFIG_USERFAULTFD */ 6798 6799 long hugetlb_change_protection(struct vm_area_struct *vma, 6800 unsigned long address, unsigned long end, 6801 pgprot_t newprot, unsigned long cp_flags) 6802 { 6803 struct mm_struct *mm = vma->vm_mm; 6804 unsigned long start = address; 6805 pte_t *ptep; 6806 pte_t pte; 6807 struct hstate *h = hstate_vma(vma); 6808 long pages = 0, psize = huge_page_size(h); 6809 bool shared_pmd = false; 6810 struct mmu_notifier_range range; 6811 unsigned long last_addr_mask; 6812 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6813 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6814 6815 /* 6816 * In the case of shared PMDs, the area to flush could be beyond 6817 * start/end. Set range.start/range.end to cover the maximum possible 6818 * range if PMD sharing is possible. 6819 */ 6820 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6821 0, mm, start, end); 6822 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6823 6824 BUG_ON(address >= end); 6825 flush_cache_range(vma, range.start, range.end); 6826 6827 mmu_notifier_invalidate_range_start(&range); 6828 hugetlb_vma_lock_write(vma); 6829 i_mmap_lock_write(vma->vm_file->f_mapping); 6830 last_addr_mask = hugetlb_mask_last_page(h); 6831 for (; address < end; address += psize) { 6832 spinlock_t *ptl; 6833 ptep = hugetlb_walk(vma, address, psize); 6834 if (!ptep) { 6835 if (!uffd_wp) { 6836 address |= last_addr_mask; 6837 continue; 6838 } 6839 /* 6840 * Userfaultfd wr-protect requires pgtable 6841 * pre-allocations to install pte markers. 6842 */ 6843 ptep = huge_pte_alloc(mm, vma, address, psize); 6844 if (!ptep) { 6845 pages = -ENOMEM; 6846 break; 6847 } 6848 } 6849 ptl = huge_pte_lock(h, mm, ptep); 6850 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6851 /* 6852 * When uffd-wp is enabled on the vma, unshare 6853 * shouldn't happen at all. Warn about it if it 6854 * happened due to some reason. 6855 */ 6856 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6857 pages++; 6858 spin_unlock(ptl); 6859 shared_pmd = true; 6860 address |= last_addr_mask; 6861 continue; 6862 } 6863 pte = huge_ptep_get(mm, address, ptep); 6864 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6865 /* Nothing to do. */ 6866 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6867 swp_entry_t entry = pte_to_swp_entry(pte); 6868 struct page *page = pfn_swap_entry_to_page(entry); 6869 pte_t newpte = pte; 6870 6871 if (is_writable_migration_entry(entry)) { 6872 if (PageAnon(page)) 6873 entry = make_readable_exclusive_migration_entry( 6874 swp_offset(entry)); 6875 else 6876 entry = make_readable_migration_entry( 6877 swp_offset(entry)); 6878 newpte = swp_entry_to_pte(entry); 6879 pages++; 6880 } 6881 6882 if (uffd_wp) 6883 newpte = pte_swp_mkuffd_wp(newpte); 6884 else if (uffd_wp_resolve) 6885 newpte = pte_swp_clear_uffd_wp(newpte); 6886 if (!pte_same(pte, newpte)) 6887 set_huge_pte_at(mm, address, ptep, newpte, psize); 6888 } else if (unlikely(is_pte_marker(pte))) { 6889 /* 6890 * Do nothing on a poison marker; page is 6891 * corrupted, permissons do not apply. Here 6892 * pte_marker_uffd_wp()==true implies !poison 6893 * because they're mutual exclusive. 6894 */ 6895 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve) 6896 /* Safe to modify directly (non-present->none). */ 6897 huge_pte_clear(mm, address, ptep, psize); 6898 } else if (!huge_pte_none(pte)) { 6899 pte_t old_pte; 6900 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6901 6902 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6903 pte = huge_pte_modify(old_pte, newprot); 6904 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6905 if (uffd_wp) 6906 pte = huge_pte_mkuffd_wp(pte); 6907 else if (uffd_wp_resolve) 6908 pte = huge_pte_clear_uffd_wp(pte); 6909 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6910 pages++; 6911 } else { 6912 /* None pte */ 6913 if (unlikely(uffd_wp)) 6914 /* Safe to modify directly (none->non-present). */ 6915 set_huge_pte_at(mm, address, ptep, 6916 make_pte_marker(PTE_MARKER_UFFD_WP), 6917 psize); 6918 } 6919 spin_unlock(ptl); 6920 } 6921 /* 6922 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6923 * may have cleared our pud entry and done put_page on the page table: 6924 * once we release i_mmap_rwsem, another task can do the final put_page 6925 * and that page table be reused and filled with junk. If we actually 6926 * did unshare a page of pmds, flush the range corresponding to the pud. 6927 */ 6928 if (shared_pmd) 6929 flush_hugetlb_tlb_range(vma, range.start, range.end); 6930 else 6931 flush_hugetlb_tlb_range(vma, start, end); 6932 /* 6933 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 6934 * downgrading page table protection not changing it to point to a new 6935 * page. 6936 * 6937 * See Documentation/mm/mmu_notifier.rst 6938 */ 6939 i_mmap_unlock_write(vma->vm_file->f_mapping); 6940 hugetlb_vma_unlock_write(vma); 6941 mmu_notifier_invalidate_range_end(&range); 6942 6943 return pages > 0 ? (pages << h->order) : pages; 6944 } 6945 6946 /* Return true if reservation was successful, false otherwise. */ 6947 bool hugetlb_reserve_pages(struct inode *inode, 6948 long from, long to, 6949 struct vm_area_struct *vma, 6950 vm_flags_t vm_flags) 6951 { 6952 long chg = -1, add = -1; 6953 struct hstate *h = hstate_inode(inode); 6954 struct hugepage_subpool *spool = subpool_inode(inode); 6955 struct resv_map *resv_map; 6956 struct hugetlb_cgroup *h_cg = NULL; 6957 long gbl_reserve, regions_needed = 0; 6958 6959 /* This should never happen */ 6960 if (from > to) { 6961 VM_WARN(1, "%s called with a negative range\n", __func__); 6962 return false; 6963 } 6964 6965 /* 6966 * vma specific semaphore used for pmd sharing and fault/truncation 6967 * synchronization 6968 */ 6969 hugetlb_vma_lock_alloc(vma); 6970 6971 /* 6972 * Only apply hugepage reservation if asked. At fault time, an 6973 * attempt will be made for VM_NORESERVE to allocate a page 6974 * without using reserves 6975 */ 6976 if (vm_flags & VM_NORESERVE) 6977 return true; 6978 6979 /* 6980 * Shared mappings base their reservation on the number of pages that 6981 * are already allocated on behalf of the file. Private mappings need 6982 * to reserve the full area even if read-only as mprotect() may be 6983 * called to make the mapping read-write. Assume !vma is a shm mapping 6984 */ 6985 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6986 /* 6987 * resv_map can not be NULL as hugetlb_reserve_pages is only 6988 * called for inodes for which resv_maps were created (see 6989 * hugetlbfs_get_inode). 6990 */ 6991 resv_map = inode_resv_map(inode); 6992 6993 chg = region_chg(resv_map, from, to, ®ions_needed); 6994 } else { 6995 /* Private mapping. */ 6996 resv_map = resv_map_alloc(); 6997 if (!resv_map) 6998 goto out_err; 6999 7000 chg = to - from; 7001 7002 set_vma_resv_map(vma, resv_map); 7003 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 7004 } 7005 7006 if (chg < 0) 7007 goto out_err; 7008 7009 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 7010 chg * pages_per_huge_page(h), &h_cg) < 0) 7011 goto out_err; 7012 7013 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 7014 /* For private mappings, the hugetlb_cgroup uncharge info hangs 7015 * of the resv_map. 7016 */ 7017 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 7018 } 7019 7020 /* 7021 * There must be enough pages in the subpool for the mapping. If 7022 * the subpool has a minimum size, there may be some global 7023 * reservations already in place (gbl_reserve). 7024 */ 7025 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 7026 if (gbl_reserve < 0) 7027 goto out_uncharge_cgroup; 7028 7029 /* 7030 * Check enough hugepages are available for the reservation. 7031 * Hand the pages back to the subpool if there are not 7032 */ 7033 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 7034 goto out_put_pages; 7035 7036 /* 7037 * Account for the reservations made. Shared mappings record regions 7038 * that have reservations as they are shared by multiple VMAs. 7039 * When the last VMA disappears, the region map says how much 7040 * the reservation was and the page cache tells how much of 7041 * the reservation was consumed. Private mappings are per-VMA and 7042 * only the consumed reservations are tracked. When the VMA 7043 * disappears, the original reservation is the VMA size and the 7044 * consumed reservations are stored in the map. Hence, nothing 7045 * else has to be done for private mappings here 7046 */ 7047 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7048 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 7049 7050 if (unlikely(add < 0)) { 7051 hugetlb_acct_memory(h, -gbl_reserve); 7052 goto out_put_pages; 7053 } else if (unlikely(chg > add)) { 7054 /* 7055 * pages in this range were added to the reserve 7056 * map between region_chg and region_add. This 7057 * indicates a race with alloc_hugetlb_folio. Adjust 7058 * the subpool and reserve counts modified above 7059 * based on the difference. 7060 */ 7061 long rsv_adjust; 7062 7063 /* 7064 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 7065 * reference to h_cg->css. See comment below for detail. 7066 */ 7067 hugetlb_cgroup_uncharge_cgroup_rsvd( 7068 hstate_index(h), 7069 (chg - add) * pages_per_huge_page(h), h_cg); 7070 7071 rsv_adjust = hugepage_subpool_put_pages(spool, 7072 chg - add); 7073 hugetlb_acct_memory(h, -rsv_adjust); 7074 } else if (h_cg) { 7075 /* 7076 * The file_regions will hold their own reference to 7077 * h_cg->css. So we should release the reference held 7078 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 7079 * done. 7080 */ 7081 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 7082 } 7083 } 7084 return true; 7085 7086 out_put_pages: 7087 /* put back original number of pages, chg */ 7088 (void)hugepage_subpool_put_pages(spool, chg); 7089 out_uncharge_cgroup: 7090 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 7091 chg * pages_per_huge_page(h), h_cg); 7092 out_err: 7093 hugetlb_vma_lock_free(vma); 7094 if (!vma || vma->vm_flags & VM_MAYSHARE) 7095 /* Only call region_abort if the region_chg succeeded but the 7096 * region_add failed or didn't run. 7097 */ 7098 if (chg >= 0 && add < 0) 7099 region_abort(resv_map, from, to, regions_needed); 7100 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 7101 kref_put(&resv_map->refs, resv_map_release); 7102 set_vma_resv_map(vma, NULL); 7103 } 7104 return false; 7105 } 7106 7107 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 7108 long freed) 7109 { 7110 struct hstate *h = hstate_inode(inode); 7111 struct resv_map *resv_map = inode_resv_map(inode); 7112 long chg = 0; 7113 struct hugepage_subpool *spool = subpool_inode(inode); 7114 long gbl_reserve; 7115 7116 /* 7117 * Since this routine can be called in the evict inode path for all 7118 * hugetlbfs inodes, resv_map could be NULL. 7119 */ 7120 if (resv_map) { 7121 chg = region_del(resv_map, start, end); 7122 /* 7123 * region_del() can fail in the rare case where a region 7124 * must be split and another region descriptor can not be 7125 * allocated. If end == LONG_MAX, it will not fail. 7126 */ 7127 if (chg < 0) 7128 return chg; 7129 } 7130 7131 spin_lock(&inode->i_lock); 7132 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 7133 spin_unlock(&inode->i_lock); 7134 7135 /* 7136 * If the subpool has a minimum size, the number of global 7137 * reservations to be released may be adjusted. 7138 * 7139 * Note that !resv_map implies freed == 0. So (chg - freed) 7140 * won't go negative. 7141 */ 7142 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 7143 hugetlb_acct_memory(h, -gbl_reserve); 7144 7145 return 0; 7146 } 7147 7148 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7149 static unsigned long page_table_shareable(struct vm_area_struct *svma, 7150 struct vm_area_struct *vma, 7151 unsigned long addr, pgoff_t idx) 7152 { 7153 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 7154 svma->vm_start; 7155 unsigned long sbase = saddr & PUD_MASK; 7156 unsigned long s_end = sbase + PUD_SIZE; 7157 7158 /* Allow segments to share if only one is marked locked */ 7159 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7160 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7161 7162 /* 7163 * match the virtual addresses, permission and the alignment of the 7164 * page table page. 7165 * 7166 * Also, vma_lock (vm_private_data) is required for sharing. 7167 */ 7168 if (pmd_index(addr) != pmd_index(saddr) || 7169 vm_flags != svm_flags || 7170 !range_in_vma(svma, sbase, s_end) || 7171 !svma->vm_private_data) 7172 return 0; 7173 7174 return saddr; 7175 } 7176 7177 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7178 { 7179 unsigned long start = addr & PUD_MASK; 7180 unsigned long end = start + PUD_SIZE; 7181 7182 #ifdef CONFIG_USERFAULTFD 7183 if (uffd_disable_huge_pmd_share(vma)) 7184 return false; 7185 #endif 7186 /* 7187 * check on proper vm_flags and page table alignment 7188 */ 7189 if (!(vma->vm_flags & VM_MAYSHARE)) 7190 return false; 7191 if (!vma->vm_private_data) /* vma lock required for sharing */ 7192 return false; 7193 if (!range_in_vma(vma, start, end)) 7194 return false; 7195 return true; 7196 } 7197 7198 /* 7199 * Determine if start,end range within vma could be mapped by shared pmd. 7200 * If yes, adjust start and end to cover range associated with possible 7201 * shared pmd mappings. 7202 */ 7203 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7204 unsigned long *start, unsigned long *end) 7205 { 7206 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7207 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7208 7209 /* 7210 * vma needs to span at least one aligned PUD size, and the range 7211 * must be at least partially within in. 7212 */ 7213 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7214 (*end <= v_start) || (*start >= v_end)) 7215 return; 7216 7217 /* Extend the range to be PUD aligned for a worst case scenario */ 7218 if (*start > v_start) 7219 *start = ALIGN_DOWN(*start, PUD_SIZE); 7220 7221 if (*end < v_end) 7222 *end = ALIGN(*end, PUD_SIZE); 7223 } 7224 7225 /* 7226 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7227 * and returns the corresponding pte. While this is not necessary for the 7228 * !shared pmd case because we can allocate the pmd later as well, it makes the 7229 * code much cleaner. pmd allocation is essential for the shared case because 7230 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7231 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7232 * bad pmd for sharing. 7233 */ 7234 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7235 unsigned long addr, pud_t *pud) 7236 { 7237 struct address_space *mapping = vma->vm_file->f_mapping; 7238 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7239 vma->vm_pgoff; 7240 struct vm_area_struct *svma; 7241 unsigned long saddr; 7242 pte_t *spte = NULL; 7243 pte_t *pte; 7244 7245 i_mmap_lock_read(mapping); 7246 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7247 if (svma == vma) 7248 continue; 7249 7250 saddr = page_table_shareable(svma, vma, addr, idx); 7251 if (saddr) { 7252 spte = hugetlb_walk(svma, saddr, 7253 vma_mmu_pagesize(svma)); 7254 if (spte) { 7255 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); 7256 break; 7257 } 7258 } 7259 } 7260 7261 if (!spte) 7262 goto out; 7263 7264 spin_lock(&mm->page_table_lock); 7265 if (pud_none(*pud)) { 7266 pud_populate(mm, pud, 7267 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7268 mm_inc_nr_pmds(mm); 7269 } else { 7270 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); 7271 } 7272 spin_unlock(&mm->page_table_lock); 7273 out: 7274 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7275 i_mmap_unlock_read(mapping); 7276 return pte; 7277 } 7278 7279 /* 7280 * unmap huge page backed by shared pte. 7281 * 7282 * Called with page table lock held. 7283 * 7284 * returns: 1 successfully unmapped a shared pte page 7285 * 0 the underlying pte page is not shared, or it is the last user 7286 */ 7287 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7288 unsigned long addr, pte_t *ptep) 7289 { 7290 unsigned long sz = huge_page_size(hstate_vma(vma)); 7291 pgd_t *pgd = pgd_offset(mm, addr); 7292 p4d_t *p4d = p4d_offset(pgd, addr); 7293 pud_t *pud = pud_offset(p4d, addr); 7294 7295 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7296 hugetlb_vma_assert_locked(vma); 7297 if (sz != PMD_SIZE) 7298 return 0; 7299 if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) 7300 return 0; 7301 7302 pud_clear(pud); 7303 ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); 7304 mm_dec_nr_pmds(mm); 7305 return 1; 7306 } 7307 7308 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7309 7310 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7311 unsigned long addr, pud_t *pud) 7312 { 7313 return NULL; 7314 } 7315 7316 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7317 unsigned long addr, pte_t *ptep) 7318 { 7319 return 0; 7320 } 7321 7322 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7323 unsigned long *start, unsigned long *end) 7324 { 7325 } 7326 7327 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7328 { 7329 return false; 7330 } 7331 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7332 7333 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7334 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7335 unsigned long addr, unsigned long sz) 7336 { 7337 pgd_t *pgd; 7338 p4d_t *p4d; 7339 pud_t *pud; 7340 pte_t *pte = NULL; 7341 7342 pgd = pgd_offset(mm, addr); 7343 p4d = p4d_alloc(mm, pgd, addr); 7344 if (!p4d) 7345 return NULL; 7346 pud = pud_alloc(mm, p4d, addr); 7347 if (pud) { 7348 if (sz == PUD_SIZE) { 7349 pte = (pte_t *)pud; 7350 } else { 7351 BUG_ON(sz != PMD_SIZE); 7352 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7353 pte = huge_pmd_share(mm, vma, addr, pud); 7354 else 7355 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7356 } 7357 } 7358 7359 if (pte) { 7360 pte_t pteval = ptep_get_lockless(pte); 7361 7362 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7363 } 7364 7365 return pte; 7366 } 7367 7368 /* 7369 * huge_pte_offset() - Walk the page table to resolve the hugepage 7370 * entry at address @addr 7371 * 7372 * Return: Pointer to page table entry (PUD or PMD) for 7373 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7374 * size @sz doesn't match the hugepage size at this level of the page 7375 * table. 7376 */ 7377 pte_t *huge_pte_offset(struct mm_struct *mm, 7378 unsigned long addr, unsigned long sz) 7379 { 7380 pgd_t *pgd; 7381 p4d_t *p4d; 7382 pud_t *pud; 7383 pmd_t *pmd; 7384 7385 pgd = pgd_offset(mm, addr); 7386 if (!pgd_present(*pgd)) 7387 return NULL; 7388 p4d = p4d_offset(pgd, addr); 7389 if (!p4d_present(*p4d)) 7390 return NULL; 7391 7392 pud = pud_offset(p4d, addr); 7393 if (sz == PUD_SIZE) 7394 /* must be pud huge, non-present or none */ 7395 return (pte_t *)pud; 7396 if (!pud_present(*pud)) 7397 return NULL; 7398 /* must have a valid entry and size to go further */ 7399 7400 pmd = pmd_offset(pud, addr); 7401 /* must be pmd huge, non-present or none */ 7402 return (pte_t *)pmd; 7403 } 7404 7405 /* 7406 * Return a mask that can be used to update an address to the last huge 7407 * page in a page table page mapping size. Used to skip non-present 7408 * page table entries when linearly scanning address ranges. Architectures 7409 * with unique huge page to page table relationships can define their own 7410 * version of this routine. 7411 */ 7412 unsigned long hugetlb_mask_last_page(struct hstate *h) 7413 { 7414 unsigned long hp_size = huge_page_size(h); 7415 7416 if (hp_size == PUD_SIZE) 7417 return P4D_SIZE - PUD_SIZE; 7418 else if (hp_size == PMD_SIZE) 7419 return PUD_SIZE - PMD_SIZE; 7420 else 7421 return 0UL; 7422 } 7423 7424 #else 7425 7426 /* See description above. Architectures can provide their own version. */ 7427 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7428 { 7429 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7430 if (huge_page_size(h) == PMD_SIZE) 7431 return PUD_SIZE - PMD_SIZE; 7432 #endif 7433 return 0UL; 7434 } 7435 7436 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7437 7438 /** 7439 * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio 7440 * @folio: the folio to isolate 7441 * @list: the list to add the folio to on success 7442 * 7443 * Isolate an allocated (refcount > 0) hugetlb folio, marking it as 7444 * isolated/non-migratable, and moving it from the active list to the 7445 * given list. 7446 * 7447 * Isolation will fail if @folio is not an allocated hugetlb folio, or if 7448 * it is already isolated/non-migratable. 7449 * 7450 * On success, an additional folio reference is taken that must be dropped 7451 * using folio_putback_hugetlb() to undo the isolation. 7452 * 7453 * Return: True if isolation worked, otherwise False. 7454 */ 7455 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) 7456 { 7457 bool ret = true; 7458 7459 spin_lock_irq(&hugetlb_lock); 7460 if (!folio_test_hugetlb(folio) || 7461 !folio_test_hugetlb_migratable(folio) || 7462 !folio_try_get(folio)) { 7463 ret = false; 7464 goto unlock; 7465 } 7466 folio_clear_hugetlb_migratable(folio); 7467 list_move_tail(&folio->lru, list); 7468 unlock: 7469 spin_unlock_irq(&hugetlb_lock); 7470 return ret; 7471 } 7472 7473 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7474 { 7475 int ret = 0; 7476 7477 *hugetlb = false; 7478 spin_lock_irq(&hugetlb_lock); 7479 if (folio_test_hugetlb(folio)) { 7480 *hugetlb = true; 7481 if (folio_test_hugetlb_freed(folio)) 7482 ret = 0; 7483 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7484 ret = folio_try_get(folio); 7485 else 7486 ret = -EBUSY; 7487 } 7488 spin_unlock_irq(&hugetlb_lock); 7489 return ret; 7490 } 7491 7492 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7493 bool *migratable_cleared) 7494 { 7495 int ret; 7496 7497 spin_lock_irq(&hugetlb_lock); 7498 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7499 spin_unlock_irq(&hugetlb_lock); 7500 return ret; 7501 } 7502 7503 /** 7504 * folio_putback_hugetlb - unisolate a hugetlb folio 7505 * @folio: the isolated hugetlb folio 7506 * 7507 * Putback/un-isolate the hugetlb folio that was previous isolated using 7508 * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it 7509 * back onto the active list. 7510 * 7511 * Will drop the additional folio reference obtained through 7512 * folio_isolate_hugetlb(). 7513 */ 7514 void folio_putback_hugetlb(struct folio *folio) 7515 { 7516 spin_lock_irq(&hugetlb_lock); 7517 folio_set_hugetlb_migratable(folio); 7518 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7519 spin_unlock_irq(&hugetlb_lock); 7520 folio_put(folio); 7521 } 7522 7523 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7524 { 7525 struct hstate *h = folio_hstate(old_folio); 7526 7527 hugetlb_cgroup_migrate(old_folio, new_folio); 7528 set_page_owner_migrate_reason(&new_folio->page, reason); 7529 7530 /* 7531 * transfer temporary state of the new hugetlb folio. This is 7532 * reverse to other transitions because the newpage is going to 7533 * be final while the old one will be freed so it takes over 7534 * the temporary status. 7535 * 7536 * Also note that we have to transfer the per-node surplus state 7537 * here as well otherwise the global surplus count will not match 7538 * the per-node's. 7539 */ 7540 if (folio_test_hugetlb_temporary(new_folio)) { 7541 int old_nid = folio_nid(old_folio); 7542 int new_nid = folio_nid(new_folio); 7543 7544 folio_set_hugetlb_temporary(old_folio); 7545 folio_clear_hugetlb_temporary(new_folio); 7546 7547 7548 /* 7549 * There is no need to transfer the per-node surplus state 7550 * when we do not cross the node. 7551 */ 7552 if (new_nid == old_nid) 7553 return; 7554 spin_lock_irq(&hugetlb_lock); 7555 if (h->surplus_huge_pages_node[old_nid]) { 7556 h->surplus_huge_pages_node[old_nid]--; 7557 h->surplus_huge_pages_node[new_nid]++; 7558 } 7559 spin_unlock_irq(&hugetlb_lock); 7560 } 7561 7562 /* 7563 * Our old folio is isolated and has "migratable" cleared until it 7564 * is putback. As migration succeeded, set the new folio "migratable" 7565 * and add it to the active list. 7566 */ 7567 spin_lock_irq(&hugetlb_lock); 7568 folio_set_hugetlb_migratable(new_folio); 7569 list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist); 7570 spin_unlock_irq(&hugetlb_lock); 7571 } 7572 7573 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7574 unsigned long start, 7575 unsigned long end) 7576 { 7577 struct hstate *h = hstate_vma(vma); 7578 unsigned long sz = huge_page_size(h); 7579 struct mm_struct *mm = vma->vm_mm; 7580 struct mmu_notifier_range range; 7581 unsigned long address; 7582 spinlock_t *ptl; 7583 pte_t *ptep; 7584 7585 if (!(vma->vm_flags & VM_MAYSHARE)) 7586 return; 7587 7588 if (start >= end) 7589 return; 7590 7591 flush_cache_range(vma, start, end); 7592 /* 7593 * No need to call adjust_range_if_pmd_sharing_possible(), because 7594 * we have already done the PUD_SIZE alignment. 7595 */ 7596 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7597 start, end); 7598 mmu_notifier_invalidate_range_start(&range); 7599 hugetlb_vma_lock_write(vma); 7600 i_mmap_lock_write(vma->vm_file->f_mapping); 7601 for (address = start; address < end; address += PUD_SIZE) { 7602 ptep = hugetlb_walk(vma, address, sz); 7603 if (!ptep) 7604 continue; 7605 ptl = huge_pte_lock(h, mm, ptep); 7606 huge_pmd_unshare(mm, vma, address, ptep); 7607 spin_unlock(ptl); 7608 } 7609 flush_hugetlb_tlb_range(vma, start, end); 7610 i_mmap_unlock_write(vma->vm_file->f_mapping); 7611 hugetlb_vma_unlock_write(vma); 7612 /* 7613 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7614 * Documentation/mm/mmu_notifier.rst. 7615 */ 7616 mmu_notifier_invalidate_range_end(&range); 7617 } 7618 7619 /* 7620 * This function will unconditionally remove all the shared pmd pgtable entries 7621 * within the specific vma for a hugetlbfs memory range. 7622 */ 7623 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7624 { 7625 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7626 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7627 } 7628 7629 #ifdef CONFIG_CMA 7630 static bool cma_reserve_called __initdata; 7631 7632 static int __init cmdline_parse_hugetlb_cma(char *p) 7633 { 7634 int nid, count = 0; 7635 unsigned long tmp; 7636 char *s = p; 7637 7638 while (*s) { 7639 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7640 break; 7641 7642 if (s[count] == ':') { 7643 if (tmp >= MAX_NUMNODES) 7644 break; 7645 nid = array_index_nospec(tmp, MAX_NUMNODES); 7646 7647 s += count + 1; 7648 tmp = memparse(s, &s); 7649 hugetlb_cma_size_in_node[nid] = tmp; 7650 hugetlb_cma_size += tmp; 7651 7652 /* 7653 * Skip the separator if have one, otherwise 7654 * break the parsing. 7655 */ 7656 if (*s == ',') 7657 s++; 7658 else 7659 break; 7660 } else { 7661 hugetlb_cma_size = memparse(p, &p); 7662 break; 7663 } 7664 } 7665 7666 return 0; 7667 } 7668 7669 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7670 7671 void __init hugetlb_cma_reserve(int order) 7672 { 7673 unsigned long size, reserved, per_node; 7674 bool node_specific_cma_alloc = false; 7675 int nid; 7676 7677 /* 7678 * HugeTLB CMA reservation is required for gigantic 7679 * huge pages which could not be allocated via the 7680 * page allocator. Just warn if there is any change 7681 * breaking this assumption. 7682 */ 7683 VM_WARN_ON(order <= MAX_PAGE_ORDER); 7684 cma_reserve_called = true; 7685 7686 if (!hugetlb_cma_size) 7687 return; 7688 7689 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7690 if (hugetlb_cma_size_in_node[nid] == 0) 7691 continue; 7692 7693 if (!node_online(nid)) { 7694 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7695 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7696 hugetlb_cma_size_in_node[nid] = 0; 7697 continue; 7698 } 7699 7700 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7701 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7702 nid, (PAGE_SIZE << order) / SZ_1M); 7703 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7704 hugetlb_cma_size_in_node[nid] = 0; 7705 } else { 7706 node_specific_cma_alloc = true; 7707 } 7708 } 7709 7710 /* Validate the CMA size again in case some invalid nodes specified. */ 7711 if (!hugetlb_cma_size) 7712 return; 7713 7714 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7715 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7716 (PAGE_SIZE << order) / SZ_1M); 7717 hugetlb_cma_size = 0; 7718 return; 7719 } 7720 7721 if (!node_specific_cma_alloc) { 7722 /* 7723 * If 3 GB area is requested on a machine with 4 numa nodes, 7724 * let's allocate 1 GB on first three nodes and ignore the last one. 7725 */ 7726 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7727 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7728 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7729 } 7730 7731 reserved = 0; 7732 for_each_online_node(nid) { 7733 int res; 7734 char name[CMA_MAX_NAME]; 7735 7736 if (node_specific_cma_alloc) { 7737 if (hugetlb_cma_size_in_node[nid] == 0) 7738 continue; 7739 7740 size = hugetlb_cma_size_in_node[nid]; 7741 } else { 7742 size = min(per_node, hugetlb_cma_size - reserved); 7743 } 7744 7745 size = round_up(size, PAGE_SIZE << order); 7746 7747 snprintf(name, sizeof(name), "hugetlb%d", nid); 7748 /* 7749 * Note that 'order per bit' is based on smallest size that 7750 * may be returned to CMA allocator in the case of 7751 * huge page demotion. 7752 */ 7753 res = cma_declare_contiguous_nid(0, size, 0, 7754 PAGE_SIZE << order, 7755 HUGETLB_PAGE_ORDER, false, name, 7756 &hugetlb_cma[nid], nid); 7757 if (res) { 7758 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7759 res, nid); 7760 continue; 7761 } 7762 7763 reserved += size; 7764 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7765 size / SZ_1M, nid); 7766 7767 if (reserved >= hugetlb_cma_size) 7768 break; 7769 } 7770 7771 if (!reserved) 7772 /* 7773 * hugetlb_cma_size is used to determine if allocations from 7774 * cma are possible. Set to zero if no cma regions are set up. 7775 */ 7776 hugetlb_cma_size = 0; 7777 } 7778 7779 static void __init hugetlb_cma_check(void) 7780 { 7781 if (!hugetlb_cma_size || cma_reserve_called) 7782 return; 7783 7784 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7785 } 7786 7787 #endif /* CONFIG_CMA */ 7788