1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 38 #include <asm/page.h> 39 #include <asm/pgalloc.h> 40 #include <asm/tlb.h> 41 42 #include <linux/io.h> 43 #include <linux/hugetlb.h> 44 #include <linux/hugetlb_cgroup.h> 45 #include <linux/node.h> 46 #include <linux/page_owner.h> 47 #include "internal.h" 48 #include "hugetlb_vmemmap.h" 49 50 int hugetlb_max_hstate __read_mostly; 51 unsigned int default_hstate_idx; 52 struct hstate hstates[HUGE_MAX_HSTATE]; 53 54 #ifdef CONFIG_CMA 55 static struct cma *hugetlb_cma[MAX_NUMNODES]; 56 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 57 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 58 { 59 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 60 1 << order); 61 } 62 #else 63 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 64 { 65 return false; 66 } 67 #endif 68 static unsigned long hugetlb_cma_size __initdata; 69 70 __initdata LIST_HEAD(huge_boot_pages); 71 72 /* for command line parsing */ 73 static struct hstate * __initdata parsed_hstate; 74 static unsigned long __initdata default_hstate_max_huge_pages; 75 static bool __initdata parsed_valid_hugepagesz = true; 76 static bool __initdata parsed_default_hugepagesz; 77 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 78 79 /* 80 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 81 * free_huge_pages, and surplus_huge_pages. 82 */ 83 DEFINE_SPINLOCK(hugetlb_lock); 84 85 /* 86 * Serializes faults on the same logical page. This is used to 87 * prevent spurious OOMs when the hugepage pool is fully utilized. 88 */ 89 static int num_fault_mutexes; 90 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 91 92 /* Forward declaration */ 93 static int hugetlb_acct_memory(struct hstate *h, long delta); 94 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 95 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 96 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 97 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 98 unsigned long start, unsigned long end); 99 100 static inline bool subpool_is_free(struct hugepage_subpool *spool) 101 { 102 if (spool->count) 103 return false; 104 if (spool->max_hpages != -1) 105 return spool->used_hpages == 0; 106 if (spool->min_hpages != -1) 107 return spool->rsv_hpages == spool->min_hpages; 108 109 return true; 110 } 111 112 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 113 unsigned long irq_flags) 114 { 115 spin_unlock_irqrestore(&spool->lock, irq_flags); 116 117 /* If no pages are used, and no other handles to the subpool 118 * remain, give up any reservations based on minimum size and 119 * free the subpool */ 120 if (subpool_is_free(spool)) { 121 if (spool->min_hpages != -1) 122 hugetlb_acct_memory(spool->hstate, 123 -spool->min_hpages); 124 kfree(spool); 125 } 126 } 127 128 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 129 long min_hpages) 130 { 131 struct hugepage_subpool *spool; 132 133 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 134 if (!spool) 135 return NULL; 136 137 spin_lock_init(&spool->lock); 138 spool->count = 1; 139 spool->max_hpages = max_hpages; 140 spool->hstate = h; 141 spool->min_hpages = min_hpages; 142 143 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 144 kfree(spool); 145 return NULL; 146 } 147 spool->rsv_hpages = min_hpages; 148 149 return spool; 150 } 151 152 void hugepage_put_subpool(struct hugepage_subpool *spool) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&spool->lock, flags); 157 BUG_ON(!spool->count); 158 spool->count--; 159 unlock_or_release_subpool(spool, flags); 160 } 161 162 /* 163 * Subpool accounting for allocating and reserving pages. 164 * Return -ENOMEM if there are not enough resources to satisfy the 165 * request. Otherwise, return the number of pages by which the 166 * global pools must be adjusted (upward). The returned value may 167 * only be different than the passed value (delta) in the case where 168 * a subpool minimum size must be maintained. 169 */ 170 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 171 long delta) 172 { 173 long ret = delta; 174 175 if (!spool) 176 return ret; 177 178 spin_lock_irq(&spool->lock); 179 180 if (spool->max_hpages != -1) { /* maximum size accounting */ 181 if ((spool->used_hpages + delta) <= spool->max_hpages) 182 spool->used_hpages += delta; 183 else { 184 ret = -ENOMEM; 185 goto unlock_ret; 186 } 187 } 188 189 /* minimum size accounting */ 190 if (spool->min_hpages != -1 && spool->rsv_hpages) { 191 if (delta > spool->rsv_hpages) { 192 /* 193 * Asking for more reserves than those already taken on 194 * behalf of subpool. Return difference. 195 */ 196 ret = delta - spool->rsv_hpages; 197 spool->rsv_hpages = 0; 198 } else { 199 ret = 0; /* reserves already accounted for */ 200 spool->rsv_hpages -= delta; 201 } 202 } 203 204 unlock_ret: 205 spin_unlock_irq(&spool->lock); 206 return ret; 207 } 208 209 /* 210 * Subpool accounting for freeing and unreserving pages. 211 * Return the number of global page reservations that must be dropped. 212 * The return value may only be different than the passed value (delta) 213 * in the case where a subpool minimum size must be maintained. 214 */ 215 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 216 long delta) 217 { 218 long ret = delta; 219 unsigned long flags; 220 221 if (!spool) 222 return delta; 223 224 spin_lock_irqsave(&spool->lock, flags); 225 226 if (spool->max_hpages != -1) /* maximum size accounting */ 227 spool->used_hpages -= delta; 228 229 /* minimum size accounting */ 230 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 231 if (spool->rsv_hpages + delta <= spool->min_hpages) 232 ret = 0; 233 else 234 ret = spool->rsv_hpages + delta - spool->min_hpages; 235 236 spool->rsv_hpages += delta; 237 if (spool->rsv_hpages > spool->min_hpages) 238 spool->rsv_hpages = spool->min_hpages; 239 } 240 241 /* 242 * If hugetlbfs_put_super couldn't free spool due to an outstanding 243 * quota reference, free it now. 244 */ 245 unlock_or_release_subpool(spool, flags); 246 247 return ret; 248 } 249 250 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 251 { 252 return HUGETLBFS_SB(inode->i_sb)->spool; 253 } 254 255 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 256 { 257 return subpool_inode(file_inode(vma->vm_file)); 258 } 259 260 /* 261 * hugetlb vma_lock helper routines 262 */ 263 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 264 { 265 if (__vma_shareable_lock(vma)) { 266 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 267 268 down_read(&vma_lock->rw_sema); 269 } 270 } 271 272 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 273 { 274 if (__vma_shareable_lock(vma)) { 275 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 276 277 up_read(&vma_lock->rw_sema); 278 } 279 } 280 281 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 282 { 283 if (__vma_shareable_lock(vma)) { 284 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 285 286 down_write(&vma_lock->rw_sema); 287 } 288 } 289 290 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 291 { 292 if (__vma_shareable_lock(vma)) { 293 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 294 295 up_write(&vma_lock->rw_sema); 296 } 297 } 298 299 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 300 { 301 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 302 303 if (!__vma_shareable_lock(vma)) 304 return 1; 305 306 return down_write_trylock(&vma_lock->rw_sema); 307 } 308 309 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 310 { 311 if (__vma_shareable_lock(vma)) { 312 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 313 314 lockdep_assert_held(&vma_lock->rw_sema); 315 } 316 } 317 318 void hugetlb_vma_lock_release(struct kref *kref) 319 { 320 struct hugetlb_vma_lock *vma_lock = container_of(kref, 321 struct hugetlb_vma_lock, refs); 322 323 kfree(vma_lock); 324 } 325 326 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 327 { 328 struct vm_area_struct *vma = vma_lock->vma; 329 330 /* 331 * vma_lock structure may or not be released as a result of put, 332 * it certainly will no longer be attached to vma so clear pointer. 333 * Semaphore synchronizes access to vma_lock->vma field. 334 */ 335 vma_lock->vma = NULL; 336 vma->vm_private_data = NULL; 337 up_write(&vma_lock->rw_sema); 338 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 339 } 340 341 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 342 { 343 if (__vma_shareable_lock(vma)) { 344 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 345 346 __hugetlb_vma_unlock_write_put(vma_lock); 347 } 348 } 349 350 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 351 { 352 /* 353 * Only present in sharable vmas. 354 */ 355 if (!vma || !__vma_shareable_lock(vma)) 356 return; 357 358 if (vma->vm_private_data) { 359 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 360 361 down_write(&vma_lock->rw_sema); 362 __hugetlb_vma_unlock_write_put(vma_lock); 363 } 364 } 365 366 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 367 { 368 struct hugetlb_vma_lock *vma_lock; 369 370 /* Only establish in (flags) sharable vmas */ 371 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 372 return; 373 374 /* Should never get here with non-NULL vm_private_data */ 375 if (vma->vm_private_data) 376 return; 377 378 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 379 if (!vma_lock) { 380 /* 381 * If we can not allocate structure, then vma can not 382 * participate in pmd sharing. This is only a possible 383 * performance enhancement and memory saving issue. 384 * However, the lock is also used to synchronize page 385 * faults with truncation. If the lock is not present, 386 * unlikely races could leave pages in a file past i_size 387 * until the file is removed. Warn in the unlikely case of 388 * allocation failure. 389 */ 390 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 391 return; 392 } 393 394 kref_init(&vma_lock->refs); 395 init_rwsem(&vma_lock->rw_sema); 396 vma_lock->vma = vma; 397 vma->vm_private_data = vma_lock; 398 } 399 400 /* Helper that removes a struct file_region from the resv_map cache and returns 401 * it for use. 402 */ 403 static struct file_region * 404 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 405 { 406 struct file_region *nrg; 407 408 VM_BUG_ON(resv->region_cache_count <= 0); 409 410 resv->region_cache_count--; 411 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 412 list_del(&nrg->link); 413 414 nrg->from = from; 415 nrg->to = to; 416 417 return nrg; 418 } 419 420 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 421 struct file_region *rg) 422 { 423 #ifdef CONFIG_CGROUP_HUGETLB 424 nrg->reservation_counter = rg->reservation_counter; 425 nrg->css = rg->css; 426 if (rg->css) 427 css_get(rg->css); 428 #endif 429 } 430 431 /* Helper that records hugetlb_cgroup uncharge info. */ 432 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 433 struct hstate *h, 434 struct resv_map *resv, 435 struct file_region *nrg) 436 { 437 #ifdef CONFIG_CGROUP_HUGETLB 438 if (h_cg) { 439 nrg->reservation_counter = 440 &h_cg->rsvd_hugepage[hstate_index(h)]; 441 nrg->css = &h_cg->css; 442 /* 443 * The caller will hold exactly one h_cg->css reference for the 444 * whole contiguous reservation region. But this area might be 445 * scattered when there are already some file_regions reside in 446 * it. As a result, many file_regions may share only one css 447 * reference. In order to ensure that one file_region must hold 448 * exactly one h_cg->css reference, we should do css_get for 449 * each file_region and leave the reference held by caller 450 * untouched. 451 */ 452 css_get(&h_cg->css); 453 if (!resv->pages_per_hpage) 454 resv->pages_per_hpage = pages_per_huge_page(h); 455 /* pages_per_hpage should be the same for all entries in 456 * a resv_map. 457 */ 458 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 459 } else { 460 nrg->reservation_counter = NULL; 461 nrg->css = NULL; 462 } 463 #endif 464 } 465 466 static void put_uncharge_info(struct file_region *rg) 467 { 468 #ifdef CONFIG_CGROUP_HUGETLB 469 if (rg->css) 470 css_put(rg->css); 471 #endif 472 } 473 474 static bool has_same_uncharge_info(struct file_region *rg, 475 struct file_region *org) 476 { 477 #ifdef CONFIG_CGROUP_HUGETLB 478 return rg->reservation_counter == org->reservation_counter && 479 rg->css == org->css; 480 481 #else 482 return true; 483 #endif 484 } 485 486 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 487 { 488 struct file_region *nrg, *prg; 489 490 prg = list_prev_entry(rg, link); 491 if (&prg->link != &resv->regions && prg->to == rg->from && 492 has_same_uncharge_info(prg, rg)) { 493 prg->to = rg->to; 494 495 list_del(&rg->link); 496 put_uncharge_info(rg); 497 kfree(rg); 498 499 rg = prg; 500 } 501 502 nrg = list_next_entry(rg, link); 503 if (&nrg->link != &resv->regions && nrg->from == rg->to && 504 has_same_uncharge_info(nrg, rg)) { 505 nrg->from = rg->from; 506 507 list_del(&rg->link); 508 put_uncharge_info(rg); 509 kfree(rg); 510 } 511 } 512 513 static inline long 514 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 515 long to, struct hstate *h, struct hugetlb_cgroup *cg, 516 long *regions_needed) 517 { 518 struct file_region *nrg; 519 520 if (!regions_needed) { 521 nrg = get_file_region_entry_from_cache(map, from, to); 522 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 523 list_add(&nrg->link, rg); 524 coalesce_file_region(map, nrg); 525 } else 526 *regions_needed += 1; 527 528 return to - from; 529 } 530 531 /* 532 * Must be called with resv->lock held. 533 * 534 * Calling this with regions_needed != NULL will count the number of pages 535 * to be added but will not modify the linked list. And regions_needed will 536 * indicate the number of file_regions needed in the cache to carry out to add 537 * the regions for this range. 538 */ 539 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 540 struct hugetlb_cgroup *h_cg, 541 struct hstate *h, long *regions_needed) 542 { 543 long add = 0; 544 struct list_head *head = &resv->regions; 545 long last_accounted_offset = f; 546 struct file_region *iter, *trg = NULL; 547 struct list_head *rg = NULL; 548 549 if (regions_needed) 550 *regions_needed = 0; 551 552 /* In this loop, we essentially handle an entry for the range 553 * [last_accounted_offset, iter->from), at every iteration, with some 554 * bounds checking. 555 */ 556 list_for_each_entry_safe(iter, trg, head, link) { 557 /* Skip irrelevant regions that start before our range. */ 558 if (iter->from < f) { 559 /* If this region ends after the last accounted offset, 560 * then we need to update last_accounted_offset. 561 */ 562 if (iter->to > last_accounted_offset) 563 last_accounted_offset = iter->to; 564 continue; 565 } 566 567 /* When we find a region that starts beyond our range, we've 568 * finished. 569 */ 570 if (iter->from >= t) { 571 rg = iter->link.prev; 572 break; 573 } 574 575 /* Add an entry for last_accounted_offset -> iter->from, and 576 * update last_accounted_offset. 577 */ 578 if (iter->from > last_accounted_offset) 579 add += hugetlb_resv_map_add(resv, iter->link.prev, 580 last_accounted_offset, 581 iter->from, h, h_cg, 582 regions_needed); 583 584 last_accounted_offset = iter->to; 585 } 586 587 /* Handle the case where our range extends beyond 588 * last_accounted_offset. 589 */ 590 if (!rg) 591 rg = head->prev; 592 if (last_accounted_offset < t) 593 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 594 t, h, h_cg, regions_needed); 595 596 return add; 597 } 598 599 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 600 */ 601 static int allocate_file_region_entries(struct resv_map *resv, 602 int regions_needed) 603 __must_hold(&resv->lock) 604 { 605 LIST_HEAD(allocated_regions); 606 int to_allocate = 0, i = 0; 607 struct file_region *trg = NULL, *rg = NULL; 608 609 VM_BUG_ON(regions_needed < 0); 610 611 /* 612 * Check for sufficient descriptors in the cache to accommodate 613 * the number of in progress add operations plus regions_needed. 614 * 615 * This is a while loop because when we drop the lock, some other call 616 * to region_add or region_del may have consumed some region_entries, 617 * so we keep looping here until we finally have enough entries for 618 * (adds_in_progress + regions_needed). 619 */ 620 while (resv->region_cache_count < 621 (resv->adds_in_progress + regions_needed)) { 622 to_allocate = resv->adds_in_progress + regions_needed - 623 resv->region_cache_count; 624 625 /* At this point, we should have enough entries in the cache 626 * for all the existing adds_in_progress. We should only be 627 * needing to allocate for regions_needed. 628 */ 629 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 630 631 spin_unlock(&resv->lock); 632 for (i = 0; i < to_allocate; i++) { 633 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 634 if (!trg) 635 goto out_of_memory; 636 list_add(&trg->link, &allocated_regions); 637 } 638 639 spin_lock(&resv->lock); 640 641 list_splice(&allocated_regions, &resv->region_cache); 642 resv->region_cache_count += to_allocate; 643 } 644 645 return 0; 646 647 out_of_memory: 648 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 649 list_del(&rg->link); 650 kfree(rg); 651 } 652 return -ENOMEM; 653 } 654 655 /* 656 * Add the huge page range represented by [f, t) to the reserve 657 * map. Regions will be taken from the cache to fill in this range. 658 * Sufficient regions should exist in the cache due to the previous 659 * call to region_chg with the same range, but in some cases the cache will not 660 * have sufficient entries due to races with other code doing region_add or 661 * region_del. The extra needed entries will be allocated. 662 * 663 * regions_needed is the out value provided by a previous call to region_chg. 664 * 665 * Return the number of new huge pages added to the map. This number is greater 666 * than or equal to zero. If file_region entries needed to be allocated for 667 * this operation and we were not able to allocate, it returns -ENOMEM. 668 * region_add of regions of length 1 never allocate file_regions and cannot 669 * fail; region_chg will always allocate at least 1 entry and a region_add for 670 * 1 page will only require at most 1 entry. 671 */ 672 static long region_add(struct resv_map *resv, long f, long t, 673 long in_regions_needed, struct hstate *h, 674 struct hugetlb_cgroup *h_cg) 675 { 676 long add = 0, actual_regions_needed = 0; 677 678 spin_lock(&resv->lock); 679 retry: 680 681 /* Count how many regions are actually needed to execute this add. */ 682 add_reservation_in_range(resv, f, t, NULL, NULL, 683 &actual_regions_needed); 684 685 /* 686 * Check for sufficient descriptors in the cache to accommodate 687 * this add operation. Note that actual_regions_needed may be greater 688 * than in_regions_needed, as the resv_map may have been modified since 689 * the region_chg call. In this case, we need to make sure that we 690 * allocate extra entries, such that we have enough for all the 691 * existing adds_in_progress, plus the excess needed for this 692 * operation. 693 */ 694 if (actual_regions_needed > in_regions_needed && 695 resv->region_cache_count < 696 resv->adds_in_progress + 697 (actual_regions_needed - in_regions_needed)) { 698 /* region_add operation of range 1 should never need to 699 * allocate file_region entries. 700 */ 701 VM_BUG_ON(t - f <= 1); 702 703 if (allocate_file_region_entries( 704 resv, actual_regions_needed - in_regions_needed)) { 705 return -ENOMEM; 706 } 707 708 goto retry; 709 } 710 711 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 712 713 resv->adds_in_progress -= in_regions_needed; 714 715 spin_unlock(&resv->lock); 716 return add; 717 } 718 719 /* 720 * Examine the existing reserve map and determine how many 721 * huge pages in the specified range [f, t) are NOT currently 722 * represented. This routine is called before a subsequent 723 * call to region_add that will actually modify the reserve 724 * map to add the specified range [f, t). region_chg does 725 * not change the number of huge pages represented by the 726 * map. A number of new file_region structures is added to the cache as a 727 * placeholder, for the subsequent region_add call to use. At least 1 728 * file_region structure is added. 729 * 730 * out_regions_needed is the number of regions added to the 731 * resv->adds_in_progress. This value needs to be provided to a follow up call 732 * to region_add or region_abort for proper accounting. 733 * 734 * Returns the number of huge pages that need to be added to the existing 735 * reservation map for the range [f, t). This number is greater or equal to 736 * zero. -ENOMEM is returned if a new file_region structure or cache entry 737 * is needed and can not be allocated. 738 */ 739 static long region_chg(struct resv_map *resv, long f, long t, 740 long *out_regions_needed) 741 { 742 long chg = 0; 743 744 spin_lock(&resv->lock); 745 746 /* Count how many hugepages in this range are NOT represented. */ 747 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 748 out_regions_needed); 749 750 if (*out_regions_needed == 0) 751 *out_regions_needed = 1; 752 753 if (allocate_file_region_entries(resv, *out_regions_needed)) 754 return -ENOMEM; 755 756 resv->adds_in_progress += *out_regions_needed; 757 758 spin_unlock(&resv->lock); 759 return chg; 760 } 761 762 /* 763 * Abort the in progress add operation. The adds_in_progress field 764 * of the resv_map keeps track of the operations in progress between 765 * calls to region_chg and region_add. Operations are sometimes 766 * aborted after the call to region_chg. In such cases, region_abort 767 * is called to decrement the adds_in_progress counter. regions_needed 768 * is the value returned by the region_chg call, it is used to decrement 769 * the adds_in_progress counter. 770 * 771 * NOTE: The range arguments [f, t) are not needed or used in this 772 * routine. They are kept to make reading the calling code easier as 773 * arguments will match the associated region_chg call. 774 */ 775 static void region_abort(struct resv_map *resv, long f, long t, 776 long regions_needed) 777 { 778 spin_lock(&resv->lock); 779 VM_BUG_ON(!resv->region_cache_count); 780 resv->adds_in_progress -= regions_needed; 781 spin_unlock(&resv->lock); 782 } 783 784 /* 785 * Delete the specified range [f, t) from the reserve map. If the 786 * t parameter is LONG_MAX, this indicates that ALL regions after f 787 * should be deleted. Locate the regions which intersect [f, t) 788 * and either trim, delete or split the existing regions. 789 * 790 * Returns the number of huge pages deleted from the reserve map. 791 * In the normal case, the return value is zero or more. In the 792 * case where a region must be split, a new region descriptor must 793 * be allocated. If the allocation fails, -ENOMEM will be returned. 794 * NOTE: If the parameter t == LONG_MAX, then we will never split 795 * a region and possibly return -ENOMEM. Callers specifying 796 * t == LONG_MAX do not need to check for -ENOMEM error. 797 */ 798 static long region_del(struct resv_map *resv, long f, long t) 799 { 800 struct list_head *head = &resv->regions; 801 struct file_region *rg, *trg; 802 struct file_region *nrg = NULL; 803 long del = 0; 804 805 retry: 806 spin_lock(&resv->lock); 807 list_for_each_entry_safe(rg, trg, head, link) { 808 /* 809 * Skip regions before the range to be deleted. file_region 810 * ranges are normally of the form [from, to). However, there 811 * may be a "placeholder" entry in the map which is of the form 812 * (from, to) with from == to. Check for placeholder entries 813 * at the beginning of the range to be deleted. 814 */ 815 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 816 continue; 817 818 if (rg->from >= t) 819 break; 820 821 if (f > rg->from && t < rg->to) { /* Must split region */ 822 /* 823 * Check for an entry in the cache before dropping 824 * lock and attempting allocation. 825 */ 826 if (!nrg && 827 resv->region_cache_count > resv->adds_in_progress) { 828 nrg = list_first_entry(&resv->region_cache, 829 struct file_region, 830 link); 831 list_del(&nrg->link); 832 resv->region_cache_count--; 833 } 834 835 if (!nrg) { 836 spin_unlock(&resv->lock); 837 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 838 if (!nrg) 839 return -ENOMEM; 840 goto retry; 841 } 842 843 del += t - f; 844 hugetlb_cgroup_uncharge_file_region( 845 resv, rg, t - f, false); 846 847 /* New entry for end of split region */ 848 nrg->from = t; 849 nrg->to = rg->to; 850 851 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 852 853 INIT_LIST_HEAD(&nrg->link); 854 855 /* Original entry is trimmed */ 856 rg->to = f; 857 858 list_add(&nrg->link, &rg->link); 859 nrg = NULL; 860 break; 861 } 862 863 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 864 del += rg->to - rg->from; 865 hugetlb_cgroup_uncharge_file_region(resv, rg, 866 rg->to - rg->from, true); 867 list_del(&rg->link); 868 kfree(rg); 869 continue; 870 } 871 872 if (f <= rg->from) { /* Trim beginning of region */ 873 hugetlb_cgroup_uncharge_file_region(resv, rg, 874 t - rg->from, false); 875 876 del += t - rg->from; 877 rg->from = t; 878 } else { /* Trim end of region */ 879 hugetlb_cgroup_uncharge_file_region(resv, rg, 880 rg->to - f, false); 881 882 del += rg->to - f; 883 rg->to = f; 884 } 885 } 886 887 spin_unlock(&resv->lock); 888 kfree(nrg); 889 return del; 890 } 891 892 /* 893 * A rare out of memory error was encountered which prevented removal of 894 * the reserve map region for a page. The huge page itself was free'ed 895 * and removed from the page cache. This routine will adjust the subpool 896 * usage count, and the global reserve count if needed. By incrementing 897 * these counts, the reserve map entry which could not be deleted will 898 * appear as a "reserved" entry instead of simply dangling with incorrect 899 * counts. 900 */ 901 void hugetlb_fix_reserve_counts(struct inode *inode) 902 { 903 struct hugepage_subpool *spool = subpool_inode(inode); 904 long rsv_adjust; 905 bool reserved = false; 906 907 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 908 if (rsv_adjust > 0) { 909 struct hstate *h = hstate_inode(inode); 910 911 if (!hugetlb_acct_memory(h, 1)) 912 reserved = true; 913 } else if (!rsv_adjust) { 914 reserved = true; 915 } 916 917 if (!reserved) 918 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 919 } 920 921 /* 922 * Count and return the number of huge pages in the reserve map 923 * that intersect with the range [f, t). 924 */ 925 static long region_count(struct resv_map *resv, long f, long t) 926 { 927 struct list_head *head = &resv->regions; 928 struct file_region *rg; 929 long chg = 0; 930 931 spin_lock(&resv->lock); 932 /* Locate each segment we overlap with, and count that overlap. */ 933 list_for_each_entry(rg, head, link) { 934 long seg_from; 935 long seg_to; 936 937 if (rg->to <= f) 938 continue; 939 if (rg->from >= t) 940 break; 941 942 seg_from = max(rg->from, f); 943 seg_to = min(rg->to, t); 944 945 chg += seg_to - seg_from; 946 } 947 spin_unlock(&resv->lock); 948 949 return chg; 950 } 951 952 /* 953 * Convert the address within this vma to the page offset within 954 * the mapping, in pagecache page units; huge pages here. 955 */ 956 static pgoff_t vma_hugecache_offset(struct hstate *h, 957 struct vm_area_struct *vma, unsigned long address) 958 { 959 return ((address - vma->vm_start) >> huge_page_shift(h)) + 960 (vma->vm_pgoff >> huge_page_order(h)); 961 } 962 963 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 964 unsigned long address) 965 { 966 return vma_hugecache_offset(hstate_vma(vma), vma, address); 967 } 968 EXPORT_SYMBOL_GPL(linear_hugepage_index); 969 970 /* 971 * Return the size of the pages allocated when backing a VMA. In the majority 972 * cases this will be same size as used by the page table entries. 973 */ 974 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 975 { 976 if (vma->vm_ops && vma->vm_ops->pagesize) 977 return vma->vm_ops->pagesize(vma); 978 return PAGE_SIZE; 979 } 980 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 981 982 /* 983 * Return the page size being used by the MMU to back a VMA. In the majority 984 * of cases, the page size used by the kernel matches the MMU size. On 985 * architectures where it differs, an architecture-specific 'strong' 986 * version of this symbol is required. 987 */ 988 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 989 { 990 return vma_kernel_pagesize(vma); 991 } 992 993 /* 994 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 995 * bits of the reservation map pointer, which are always clear due to 996 * alignment. 997 */ 998 #define HPAGE_RESV_OWNER (1UL << 0) 999 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1000 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1001 1002 /* 1003 * These helpers are used to track how many pages are reserved for 1004 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1005 * is guaranteed to have their future faults succeed. 1006 * 1007 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1008 * the reserve counters are updated with the hugetlb_lock held. It is safe 1009 * to reset the VMA at fork() time as it is not in use yet and there is no 1010 * chance of the global counters getting corrupted as a result of the values. 1011 * 1012 * The private mapping reservation is represented in a subtly different 1013 * manner to a shared mapping. A shared mapping has a region map associated 1014 * with the underlying file, this region map represents the backing file 1015 * pages which have ever had a reservation assigned which this persists even 1016 * after the page is instantiated. A private mapping has a region map 1017 * associated with the original mmap which is attached to all VMAs which 1018 * reference it, this region map represents those offsets which have consumed 1019 * reservation ie. where pages have been instantiated. 1020 */ 1021 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1022 { 1023 return (unsigned long)vma->vm_private_data; 1024 } 1025 1026 static void set_vma_private_data(struct vm_area_struct *vma, 1027 unsigned long value) 1028 { 1029 vma->vm_private_data = (void *)value; 1030 } 1031 1032 static void 1033 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1034 struct hugetlb_cgroup *h_cg, 1035 struct hstate *h) 1036 { 1037 #ifdef CONFIG_CGROUP_HUGETLB 1038 if (!h_cg || !h) { 1039 resv_map->reservation_counter = NULL; 1040 resv_map->pages_per_hpage = 0; 1041 resv_map->css = NULL; 1042 } else { 1043 resv_map->reservation_counter = 1044 &h_cg->rsvd_hugepage[hstate_index(h)]; 1045 resv_map->pages_per_hpage = pages_per_huge_page(h); 1046 resv_map->css = &h_cg->css; 1047 } 1048 #endif 1049 } 1050 1051 struct resv_map *resv_map_alloc(void) 1052 { 1053 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1054 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1055 1056 if (!resv_map || !rg) { 1057 kfree(resv_map); 1058 kfree(rg); 1059 return NULL; 1060 } 1061 1062 kref_init(&resv_map->refs); 1063 spin_lock_init(&resv_map->lock); 1064 INIT_LIST_HEAD(&resv_map->regions); 1065 1066 resv_map->adds_in_progress = 0; 1067 /* 1068 * Initialize these to 0. On shared mappings, 0's here indicate these 1069 * fields don't do cgroup accounting. On private mappings, these will be 1070 * re-initialized to the proper values, to indicate that hugetlb cgroup 1071 * reservations are to be un-charged from here. 1072 */ 1073 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1074 1075 INIT_LIST_HEAD(&resv_map->region_cache); 1076 list_add(&rg->link, &resv_map->region_cache); 1077 resv_map->region_cache_count = 1; 1078 1079 return resv_map; 1080 } 1081 1082 void resv_map_release(struct kref *ref) 1083 { 1084 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1085 struct list_head *head = &resv_map->region_cache; 1086 struct file_region *rg, *trg; 1087 1088 /* Clear out any active regions before we release the map. */ 1089 region_del(resv_map, 0, LONG_MAX); 1090 1091 /* ... and any entries left in the cache */ 1092 list_for_each_entry_safe(rg, trg, head, link) { 1093 list_del(&rg->link); 1094 kfree(rg); 1095 } 1096 1097 VM_BUG_ON(resv_map->adds_in_progress); 1098 1099 kfree(resv_map); 1100 } 1101 1102 static inline struct resv_map *inode_resv_map(struct inode *inode) 1103 { 1104 /* 1105 * At inode evict time, i_mapping may not point to the original 1106 * address space within the inode. This original address space 1107 * contains the pointer to the resv_map. So, always use the 1108 * address space embedded within the inode. 1109 * The VERY common case is inode->mapping == &inode->i_data but, 1110 * this may not be true for device special inodes. 1111 */ 1112 return (struct resv_map *)(&inode->i_data)->private_data; 1113 } 1114 1115 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1116 { 1117 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1118 if (vma->vm_flags & VM_MAYSHARE) { 1119 struct address_space *mapping = vma->vm_file->f_mapping; 1120 struct inode *inode = mapping->host; 1121 1122 return inode_resv_map(inode); 1123 1124 } else { 1125 return (struct resv_map *)(get_vma_private_data(vma) & 1126 ~HPAGE_RESV_MASK); 1127 } 1128 } 1129 1130 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1131 { 1132 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1133 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1134 1135 set_vma_private_data(vma, (get_vma_private_data(vma) & 1136 HPAGE_RESV_MASK) | (unsigned long)map); 1137 } 1138 1139 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1140 { 1141 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1142 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1143 1144 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1145 } 1146 1147 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1148 { 1149 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1150 1151 return (get_vma_private_data(vma) & flag) != 0; 1152 } 1153 1154 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1155 { 1156 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1157 /* 1158 * Clear vm_private_data 1159 * - For shared mappings this is a per-vma semaphore that may be 1160 * allocated in a subsequent call to hugetlb_vm_op_open. 1161 * Before clearing, make sure pointer is not associated with vma 1162 * as this will leak the structure. This is the case when called 1163 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1164 * been called to allocate a new structure. 1165 * - For MAP_PRIVATE mappings, this is the reserve map which does 1166 * not apply to children. Faults generated by the children are 1167 * not guaranteed to succeed, even if read-only. 1168 */ 1169 if (vma->vm_flags & VM_MAYSHARE) { 1170 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1171 1172 if (vma_lock && vma_lock->vma != vma) 1173 vma->vm_private_data = NULL; 1174 } else 1175 vma->vm_private_data = NULL; 1176 } 1177 1178 /* 1179 * Reset and decrement one ref on hugepage private reservation. 1180 * Called with mm->mmap_lock writer semaphore held. 1181 * This function should be only used by move_vma() and operate on 1182 * same sized vma. It should never come here with last ref on the 1183 * reservation. 1184 */ 1185 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1186 { 1187 /* 1188 * Clear the old hugetlb private page reservation. 1189 * It has already been transferred to new_vma. 1190 * 1191 * During a mremap() operation of a hugetlb vma we call move_vma() 1192 * which copies vma into new_vma and unmaps vma. After the copy 1193 * operation both new_vma and vma share a reference to the resv_map 1194 * struct, and at that point vma is about to be unmapped. We don't 1195 * want to return the reservation to the pool at unmap of vma because 1196 * the reservation still lives on in new_vma, so simply decrement the 1197 * ref here and remove the resv_map reference from this vma. 1198 */ 1199 struct resv_map *reservations = vma_resv_map(vma); 1200 1201 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1202 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1203 kref_put(&reservations->refs, resv_map_release); 1204 } 1205 1206 hugetlb_dup_vma_private(vma); 1207 } 1208 1209 /* Returns true if the VMA has associated reserve pages */ 1210 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1211 { 1212 if (vma->vm_flags & VM_NORESERVE) { 1213 /* 1214 * This address is already reserved by other process(chg == 0), 1215 * so, we should decrement reserved count. Without decrementing, 1216 * reserve count remains after releasing inode, because this 1217 * allocated page will go into page cache and is regarded as 1218 * coming from reserved pool in releasing step. Currently, we 1219 * don't have any other solution to deal with this situation 1220 * properly, so add work-around here. 1221 */ 1222 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1223 return true; 1224 else 1225 return false; 1226 } 1227 1228 /* Shared mappings always use reserves */ 1229 if (vma->vm_flags & VM_MAYSHARE) { 1230 /* 1231 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1232 * be a region map for all pages. The only situation where 1233 * there is no region map is if a hole was punched via 1234 * fallocate. In this case, there really are no reserves to 1235 * use. This situation is indicated if chg != 0. 1236 */ 1237 if (chg) 1238 return false; 1239 else 1240 return true; 1241 } 1242 1243 /* 1244 * Only the process that called mmap() has reserves for 1245 * private mappings. 1246 */ 1247 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1248 /* 1249 * Like the shared case above, a hole punch or truncate 1250 * could have been performed on the private mapping. 1251 * Examine the value of chg to determine if reserves 1252 * actually exist or were previously consumed. 1253 * Very Subtle - The value of chg comes from a previous 1254 * call to vma_needs_reserves(). The reserve map for 1255 * private mappings has different (opposite) semantics 1256 * than that of shared mappings. vma_needs_reserves() 1257 * has already taken this difference in semantics into 1258 * account. Therefore, the meaning of chg is the same 1259 * as in the shared case above. Code could easily be 1260 * combined, but keeping it separate draws attention to 1261 * subtle differences. 1262 */ 1263 if (chg) 1264 return false; 1265 else 1266 return true; 1267 } 1268 1269 return false; 1270 } 1271 1272 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1273 { 1274 int nid = folio_nid(folio); 1275 1276 lockdep_assert_held(&hugetlb_lock); 1277 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1278 1279 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1280 h->free_huge_pages++; 1281 h->free_huge_pages_node[nid]++; 1282 folio_set_hugetlb_freed(folio); 1283 } 1284 1285 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1286 int nid) 1287 { 1288 struct folio *folio; 1289 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1290 1291 lockdep_assert_held(&hugetlb_lock); 1292 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1293 if (pin && !folio_is_longterm_pinnable(folio)) 1294 continue; 1295 1296 if (folio_test_hwpoison(folio)) 1297 continue; 1298 1299 list_move(&folio->lru, &h->hugepage_activelist); 1300 folio_ref_unfreeze(folio, 1); 1301 folio_clear_hugetlb_freed(folio); 1302 h->free_huge_pages--; 1303 h->free_huge_pages_node[nid]--; 1304 return folio; 1305 } 1306 1307 return NULL; 1308 } 1309 1310 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1311 int nid, nodemask_t *nmask) 1312 { 1313 unsigned int cpuset_mems_cookie; 1314 struct zonelist *zonelist; 1315 struct zone *zone; 1316 struct zoneref *z; 1317 int node = NUMA_NO_NODE; 1318 1319 zonelist = node_zonelist(nid, gfp_mask); 1320 1321 retry_cpuset: 1322 cpuset_mems_cookie = read_mems_allowed_begin(); 1323 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1324 struct folio *folio; 1325 1326 if (!cpuset_zone_allowed(zone, gfp_mask)) 1327 continue; 1328 /* 1329 * no need to ask again on the same node. Pool is node rather than 1330 * zone aware 1331 */ 1332 if (zone_to_nid(zone) == node) 1333 continue; 1334 node = zone_to_nid(zone); 1335 1336 folio = dequeue_hugetlb_folio_node_exact(h, node); 1337 if (folio) 1338 return folio; 1339 } 1340 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1341 goto retry_cpuset; 1342 1343 return NULL; 1344 } 1345 1346 static unsigned long available_huge_pages(struct hstate *h) 1347 { 1348 return h->free_huge_pages - h->resv_huge_pages; 1349 } 1350 1351 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1352 struct vm_area_struct *vma, 1353 unsigned long address, int avoid_reserve, 1354 long chg) 1355 { 1356 struct folio *folio = NULL; 1357 struct mempolicy *mpol; 1358 gfp_t gfp_mask; 1359 nodemask_t *nodemask; 1360 int nid; 1361 1362 /* 1363 * A child process with MAP_PRIVATE mappings created by their parent 1364 * have no page reserves. This check ensures that reservations are 1365 * not "stolen". The child may still get SIGKILLed 1366 */ 1367 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1368 goto err; 1369 1370 /* If reserves cannot be used, ensure enough pages are in the pool */ 1371 if (avoid_reserve && !available_huge_pages(h)) 1372 goto err; 1373 1374 gfp_mask = htlb_alloc_mask(h); 1375 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1376 1377 if (mpol_is_preferred_many(mpol)) { 1378 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1379 nid, nodemask); 1380 1381 /* Fallback to all nodes if page==NULL */ 1382 nodemask = NULL; 1383 } 1384 1385 if (!folio) 1386 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1387 nid, nodemask); 1388 1389 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1390 folio_set_hugetlb_restore_reserve(folio); 1391 h->resv_huge_pages--; 1392 } 1393 1394 mpol_cond_put(mpol); 1395 return folio; 1396 1397 err: 1398 return NULL; 1399 } 1400 1401 /* 1402 * common helper functions for hstate_next_node_to_{alloc|free}. 1403 * We may have allocated or freed a huge page based on a different 1404 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1405 * be outside of *nodes_allowed. Ensure that we use an allowed 1406 * node for alloc or free. 1407 */ 1408 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1409 { 1410 nid = next_node_in(nid, *nodes_allowed); 1411 VM_BUG_ON(nid >= MAX_NUMNODES); 1412 1413 return nid; 1414 } 1415 1416 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1417 { 1418 if (!node_isset(nid, *nodes_allowed)) 1419 nid = next_node_allowed(nid, nodes_allowed); 1420 return nid; 1421 } 1422 1423 /* 1424 * returns the previously saved node ["this node"] from which to 1425 * allocate a persistent huge page for the pool and advance the 1426 * next node from which to allocate, handling wrap at end of node 1427 * mask. 1428 */ 1429 static int hstate_next_node_to_alloc(struct hstate *h, 1430 nodemask_t *nodes_allowed) 1431 { 1432 int nid; 1433 1434 VM_BUG_ON(!nodes_allowed); 1435 1436 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1437 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1438 1439 return nid; 1440 } 1441 1442 /* 1443 * helper for remove_pool_huge_page() - return the previously saved 1444 * node ["this node"] from which to free a huge page. Advance the 1445 * next node id whether or not we find a free huge page to free so 1446 * that the next attempt to free addresses the next node. 1447 */ 1448 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1449 { 1450 int nid; 1451 1452 VM_BUG_ON(!nodes_allowed); 1453 1454 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1455 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1456 1457 return nid; 1458 } 1459 1460 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1461 for (nr_nodes = nodes_weight(*mask); \ 1462 nr_nodes > 0 && \ 1463 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1464 nr_nodes--) 1465 1466 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1467 for (nr_nodes = nodes_weight(*mask); \ 1468 nr_nodes > 0 && \ 1469 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1470 nr_nodes--) 1471 1472 /* used to demote non-gigantic_huge pages as well */ 1473 static void __destroy_compound_gigantic_folio(struct folio *folio, 1474 unsigned int order, bool demote) 1475 { 1476 int i; 1477 int nr_pages = 1 << order; 1478 struct page *p; 1479 1480 atomic_set(&folio->_entire_mapcount, 0); 1481 atomic_set(&folio->_nr_pages_mapped, 0); 1482 atomic_set(&folio->_pincount, 0); 1483 1484 for (i = 1; i < nr_pages; i++) { 1485 p = folio_page(folio, i); 1486 p->mapping = NULL; 1487 clear_compound_head(p); 1488 if (!demote) 1489 set_page_refcounted(p); 1490 } 1491 1492 folio_set_order(folio, 0); 1493 __folio_clear_head(folio); 1494 } 1495 1496 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio, 1497 unsigned int order) 1498 { 1499 __destroy_compound_gigantic_folio(folio, order, true); 1500 } 1501 1502 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1503 static void destroy_compound_gigantic_folio(struct folio *folio, 1504 unsigned int order) 1505 { 1506 __destroy_compound_gigantic_folio(folio, order, false); 1507 } 1508 1509 static void free_gigantic_folio(struct folio *folio, unsigned int order) 1510 { 1511 /* 1512 * If the page isn't allocated using the cma allocator, 1513 * cma_release() returns false. 1514 */ 1515 #ifdef CONFIG_CMA 1516 int nid = folio_nid(folio); 1517 1518 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order)) 1519 return; 1520 #endif 1521 1522 free_contig_range(folio_pfn(folio), 1 << order); 1523 } 1524 1525 #ifdef CONFIG_CONTIG_ALLOC 1526 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1527 int nid, nodemask_t *nodemask) 1528 { 1529 struct page *page; 1530 unsigned long nr_pages = pages_per_huge_page(h); 1531 if (nid == NUMA_NO_NODE) 1532 nid = numa_mem_id(); 1533 1534 #ifdef CONFIG_CMA 1535 { 1536 int node; 1537 1538 if (hugetlb_cma[nid]) { 1539 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1540 huge_page_order(h), true); 1541 if (page) 1542 return page_folio(page); 1543 } 1544 1545 if (!(gfp_mask & __GFP_THISNODE)) { 1546 for_each_node_mask(node, *nodemask) { 1547 if (node == nid || !hugetlb_cma[node]) 1548 continue; 1549 1550 page = cma_alloc(hugetlb_cma[node], nr_pages, 1551 huge_page_order(h), true); 1552 if (page) 1553 return page_folio(page); 1554 } 1555 } 1556 } 1557 #endif 1558 1559 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1560 return page ? page_folio(page) : NULL; 1561 } 1562 1563 #else /* !CONFIG_CONTIG_ALLOC */ 1564 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1565 int nid, nodemask_t *nodemask) 1566 { 1567 return NULL; 1568 } 1569 #endif /* CONFIG_CONTIG_ALLOC */ 1570 1571 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1572 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1573 int nid, nodemask_t *nodemask) 1574 { 1575 return NULL; 1576 } 1577 static inline void free_gigantic_folio(struct folio *folio, 1578 unsigned int order) { } 1579 static inline void destroy_compound_gigantic_folio(struct folio *folio, 1580 unsigned int order) { } 1581 #endif 1582 1583 /* 1584 * Remove hugetlb folio from lists, and update dtor so that the folio appears 1585 * as just a compound page. 1586 * 1587 * A reference is held on the folio, except in the case of demote. 1588 * 1589 * Must be called with hugetlb lock held. 1590 */ 1591 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1592 bool adjust_surplus, 1593 bool demote) 1594 { 1595 int nid = folio_nid(folio); 1596 1597 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1598 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1599 1600 lockdep_assert_held(&hugetlb_lock); 1601 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1602 return; 1603 1604 list_del(&folio->lru); 1605 1606 if (folio_test_hugetlb_freed(folio)) { 1607 h->free_huge_pages--; 1608 h->free_huge_pages_node[nid]--; 1609 } 1610 if (adjust_surplus) { 1611 h->surplus_huge_pages--; 1612 h->surplus_huge_pages_node[nid]--; 1613 } 1614 1615 /* 1616 * Very subtle 1617 * 1618 * For non-gigantic pages set the destructor to the normal compound 1619 * page dtor. This is needed in case someone takes an additional 1620 * temporary ref to the page, and freeing is delayed until they drop 1621 * their reference. 1622 * 1623 * For gigantic pages set the destructor to the null dtor. This 1624 * destructor will never be called. Before freeing the gigantic 1625 * page destroy_compound_gigantic_folio will turn the folio into a 1626 * simple group of pages. After this the destructor does not 1627 * apply. 1628 * 1629 * This handles the case where more than one ref is held when and 1630 * after update_and_free_hugetlb_folio is called. 1631 * 1632 * In the case of demote we do not ref count the page as it will soon 1633 * be turned into a page of smaller size. 1634 */ 1635 if (!demote) 1636 folio_ref_unfreeze(folio, 1); 1637 if (hstate_is_gigantic(h)) 1638 folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); 1639 else 1640 folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); 1641 1642 h->nr_huge_pages--; 1643 h->nr_huge_pages_node[nid]--; 1644 } 1645 1646 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1647 bool adjust_surplus) 1648 { 1649 __remove_hugetlb_folio(h, folio, adjust_surplus, false); 1650 } 1651 1652 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, 1653 bool adjust_surplus) 1654 { 1655 __remove_hugetlb_folio(h, folio, adjust_surplus, true); 1656 } 1657 1658 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1659 bool adjust_surplus) 1660 { 1661 int zeroed; 1662 int nid = folio_nid(folio); 1663 1664 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1665 1666 lockdep_assert_held(&hugetlb_lock); 1667 1668 INIT_LIST_HEAD(&folio->lru); 1669 h->nr_huge_pages++; 1670 h->nr_huge_pages_node[nid]++; 1671 1672 if (adjust_surplus) { 1673 h->surplus_huge_pages++; 1674 h->surplus_huge_pages_node[nid]++; 1675 } 1676 1677 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1678 folio_change_private(folio, NULL); 1679 /* 1680 * We have to set hugetlb_vmemmap_optimized again as above 1681 * folio_change_private(folio, NULL) cleared it. 1682 */ 1683 folio_set_hugetlb_vmemmap_optimized(folio); 1684 1685 /* 1686 * This folio is about to be managed by the hugetlb allocator and 1687 * should have no users. Drop our reference, and check for others 1688 * just in case. 1689 */ 1690 zeroed = folio_put_testzero(folio); 1691 if (unlikely(!zeroed)) 1692 /* 1693 * It is VERY unlikely soneone else has taken a ref on 1694 * the page. In this case, we simply return as the 1695 * hugetlb destructor (free_huge_page) will be called 1696 * when this other ref is dropped. 1697 */ 1698 return; 1699 1700 arch_clear_hugepage_flags(&folio->page); 1701 enqueue_hugetlb_folio(h, folio); 1702 } 1703 1704 static void __update_and_free_hugetlb_folio(struct hstate *h, 1705 struct folio *folio) 1706 { 1707 int i; 1708 struct page *subpage; 1709 1710 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1711 return; 1712 1713 /* 1714 * If we don't know which subpages are hwpoisoned, we can't free 1715 * the hugepage, so it's leaked intentionally. 1716 */ 1717 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1718 return; 1719 1720 if (hugetlb_vmemmap_restore(h, &folio->page)) { 1721 spin_lock_irq(&hugetlb_lock); 1722 /* 1723 * If we cannot allocate vmemmap pages, just refuse to free the 1724 * page and put the page back on the hugetlb free list and treat 1725 * as a surplus page. 1726 */ 1727 add_hugetlb_folio(h, folio, true); 1728 spin_unlock_irq(&hugetlb_lock); 1729 return; 1730 } 1731 1732 /* 1733 * Move PageHWPoison flag from head page to the raw error pages, 1734 * which makes any healthy subpages reusable. 1735 */ 1736 if (unlikely(folio_test_hwpoison(folio))) 1737 folio_clear_hugetlb_hwpoison(folio); 1738 1739 for (i = 0; i < pages_per_huge_page(h); i++) { 1740 subpage = folio_page(folio, i); 1741 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1742 1 << PG_referenced | 1 << PG_dirty | 1743 1 << PG_active | 1 << PG_private | 1744 1 << PG_writeback); 1745 } 1746 1747 /* 1748 * Non-gigantic pages demoted from CMA allocated gigantic pages 1749 * need to be given back to CMA in free_gigantic_folio. 1750 */ 1751 if (hstate_is_gigantic(h) || 1752 hugetlb_cma_folio(folio, huge_page_order(h))) { 1753 destroy_compound_gigantic_folio(folio, huge_page_order(h)); 1754 free_gigantic_folio(folio, huge_page_order(h)); 1755 } else { 1756 __free_pages(&folio->page, huge_page_order(h)); 1757 } 1758 } 1759 1760 /* 1761 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1762 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1763 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1764 * the vmemmap pages. 1765 * 1766 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1767 * freed and frees them one-by-one. As the page->mapping pointer is going 1768 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1769 * structure of a lockless linked list of huge pages to be freed. 1770 */ 1771 static LLIST_HEAD(hpage_freelist); 1772 1773 static void free_hpage_workfn(struct work_struct *work) 1774 { 1775 struct llist_node *node; 1776 1777 node = llist_del_all(&hpage_freelist); 1778 1779 while (node) { 1780 struct page *page; 1781 struct hstate *h; 1782 1783 page = container_of((struct address_space **)node, 1784 struct page, mapping); 1785 node = node->next; 1786 page->mapping = NULL; 1787 /* 1788 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate() 1789 * is going to trigger because a previous call to 1790 * remove_hugetlb_folio() will call folio_set_compound_dtor 1791 * (folio, NULL_COMPOUND_DTOR), so do not use page_hstate() 1792 * directly. 1793 */ 1794 h = size_to_hstate(page_size(page)); 1795 1796 __update_and_free_hugetlb_folio(h, page_folio(page)); 1797 1798 cond_resched(); 1799 } 1800 } 1801 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1802 1803 static inline void flush_free_hpage_work(struct hstate *h) 1804 { 1805 if (hugetlb_vmemmap_optimizable(h)) 1806 flush_work(&free_hpage_work); 1807 } 1808 1809 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1810 bool atomic) 1811 { 1812 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1813 __update_and_free_hugetlb_folio(h, folio); 1814 return; 1815 } 1816 1817 /* 1818 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1819 * 1820 * Only call schedule_work() if hpage_freelist is previously 1821 * empty. Otherwise, schedule_work() had been called but the workfn 1822 * hasn't retrieved the list yet. 1823 */ 1824 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1825 schedule_work(&free_hpage_work); 1826 } 1827 1828 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1829 { 1830 struct page *page, *t_page; 1831 struct folio *folio; 1832 1833 list_for_each_entry_safe(page, t_page, list, lru) { 1834 folio = page_folio(page); 1835 update_and_free_hugetlb_folio(h, folio, false); 1836 cond_resched(); 1837 } 1838 } 1839 1840 struct hstate *size_to_hstate(unsigned long size) 1841 { 1842 struct hstate *h; 1843 1844 for_each_hstate(h) { 1845 if (huge_page_size(h) == size) 1846 return h; 1847 } 1848 return NULL; 1849 } 1850 1851 void free_huge_page(struct page *page) 1852 { 1853 /* 1854 * Can't pass hstate in here because it is called from the 1855 * compound page destructor. 1856 */ 1857 struct folio *folio = page_folio(page); 1858 struct hstate *h = folio_hstate(folio); 1859 int nid = folio_nid(folio); 1860 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1861 bool restore_reserve; 1862 unsigned long flags; 1863 1864 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1865 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1866 1867 hugetlb_set_folio_subpool(folio, NULL); 1868 if (folio_test_anon(folio)) 1869 __ClearPageAnonExclusive(&folio->page); 1870 folio->mapping = NULL; 1871 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1872 folio_clear_hugetlb_restore_reserve(folio); 1873 1874 /* 1875 * If HPageRestoreReserve was set on page, page allocation consumed a 1876 * reservation. If the page was associated with a subpool, there 1877 * would have been a page reserved in the subpool before allocation 1878 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1879 * reservation, do not call hugepage_subpool_put_pages() as this will 1880 * remove the reserved page from the subpool. 1881 */ 1882 if (!restore_reserve) { 1883 /* 1884 * A return code of zero implies that the subpool will be 1885 * under its minimum size if the reservation is not restored 1886 * after page is free. Therefore, force restore_reserve 1887 * operation. 1888 */ 1889 if (hugepage_subpool_put_pages(spool, 1) == 0) 1890 restore_reserve = true; 1891 } 1892 1893 spin_lock_irqsave(&hugetlb_lock, flags); 1894 folio_clear_hugetlb_migratable(folio); 1895 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1896 pages_per_huge_page(h), folio); 1897 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1898 pages_per_huge_page(h), folio); 1899 if (restore_reserve) 1900 h->resv_huge_pages++; 1901 1902 if (folio_test_hugetlb_temporary(folio)) { 1903 remove_hugetlb_folio(h, folio, false); 1904 spin_unlock_irqrestore(&hugetlb_lock, flags); 1905 update_and_free_hugetlb_folio(h, folio, true); 1906 } else if (h->surplus_huge_pages_node[nid]) { 1907 /* remove the page from active list */ 1908 remove_hugetlb_folio(h, folio, true); 1909 spin_unlock_irqrestore(&hugetlb_lock, flags); 1910 update_and_free_hugetlb_folio(h, folio, true); 1911 } else { 1912 arch_clear_hugepage_flags(page); 1913 enqueue_hugetlb_folio(h, folio); 1914 spin_unlock_irqrestore(&hugetlb_lock, flags); 1915 } 1916 } 1917 1918 /* 1919 * Must be called with the hugetlb lock held 1920 */ 1921 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1922 { 1923 lockdep_assert_held(&hugetlb_lock); 1924 h->nr_huge_pages++; 1925 h->nr_huge_pages_node[nid]++; 1926 } 1927 1928 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1929 { 1930 hugetlb_vmemmap_optimize(h, &folio->page); 1931 INIT_LIST_HEAD(&folio->lru); 1932 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1933 hugetlb_set_folio_subpool(folio, NULL); 1934 set_hugetlb_cgroup(folio, NULL); 1935 set_hugetlb_cgroup_rsvd(folio, NULL); 1936 } 1937 1938 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1939 { 1940 __prep_new_hugetlb_folio(h, folio); 1941 spin_lock_irq(&hugetlb_lock); 1942 __prep_account_new_huge_page(h, nid); 1943 spin_unlock_irq(&hugetlb_lock); 1944 } 1945 1946 static bool __prep_compound_gigantic_folio(struct folio *folio, 1947 unsigned int order, bool demote) 1948 { 1949 int i, j; 1950 int nr_pages = 1 << order; 1951 struct page *p; 1952 1953 __folio_clear_reserved(folio); 1954 __folio_set_head(folio); 1955 /* we rely on prep_new_hugetlb_folio to set the destructor */ 1956 folio_set_order(folio, order); 1957 for (i = 0; i < nr_pages; i++) { 1958 p = folio_page(folio, i); 1959 1960 /* 1961 * For gigantic hugepages allocated through bootmem at 1962 * boot, it's safer to be consistent with the not-gigantic 1963 * hugepages and clear the PG_reserved bit from all tail pages 1964 * too. Otherwise drivers using get_user_pages() to access tail 1965 * pages may get the reference counting wrong if they see 1966 * PG_reserved set on a tail page (despite the head page not 1967 * having PG_reserved set). Enforcing this consistency between 1968 * head and tail pages allows drivers to optimize away a check 1969 * on the head page when they need know if put_page() is needed 1970 * after get_user_pages(). 1971 */ 1972 if (i != 0) /* head page cleared above */ 1973 __ClearPageReserved(p); 1974 /* 1975 * Subtle and very unlikely 1976 * 1977 * Gigantic 'page allocators' such as memblock or cma will 1978 * return a set of pages with each page ref counted. We need 1979 * to turn this set of pages into a compound page with tail 1980 * page ref counts set to zero. Code such as speculative page 1981 * cache adding could take a ref on a 'to be' tail page. 1982 * We need to respect any increased ref count, and only set 1983 * the ref count to zero if count is currently 1. If count 1984 * is not 1, we return an error. An error return indicates 1985 * the set of pages can not be converted to a gigantic page. 1986 * The caller who allocated the pages should then discard the 1987 * pages using the appropriate free interface. 1988 * 1989 * In the case of demote, the ref count will be zero. 1990 */ 1991 if (!demote) { 1992 if (!page_ref_freeze(p, 1)) { 1993 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 1994 goto out_error; 1995 } 1996 } else { 1997 VM_BUG_ON_PAGE(page_count(p), p); 1998 } 1999 if (i != 0) 2000 set_compound_head(p, &folio->page); 2001 } 2002 atomic_set(&folio->_entire_mapcount, -1); 2003 atomic_set(&folio->_nr_pages_mapped, 0); 2004 atomic_set(&folio->_pincount, 0); 2005 return true; 2006 2007 out_error: 2008 /* undo page modifications made above */ 2009 for (j = 0; j < i; j++) { 2010 p = folio_page(folio, j); 2011 if (j != 0) 2012 clear_compound_head(p); 2013 set_page_refcounted(p); 2014 } 2015 /* need to clear PG_reserved on remaining tail pages */ 2016 for (; j < nr_pages; j++) { 2017 p = folio_page(folio, j); 2018 __ClearPageReserved(p); 2019 } 2020 folio_set_order(folio, 0); 2021 __folio_clear_head(folio); 2022 return false; 2023 } 2024 2025 static bool prep_compound_gigantic_folio(struct folio *folio, 2026 unsigned int order) 2027 { 2028 return __prep_compound_gigantic_folio(folio, order, false); 2029 } 2030 2031 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio, 2032 unsigned int order) 2033 { 2034 return __prep_compound_gigantic_folio(folio, order, true); 2035 } 2036 2037 /* 2038 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 2039 * transparent huge pages. See the PageTransHuge() documentation for more 2040 * details. 2041 */ 2042 int PageHuge(struct page *page) 2043 { 2044 struct folio *folio; 2045 2046 if (!PageCompound(page)) 2047 return 0; 2048 folio = page_folio(page); 2049 return folio->_folio_dtor == HUGETLB_PAGE_DTOR; 2050 } 2051 EXPORT_SYMBOL_GPL(PageHuge); 2052 2053 /** 2054 * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs 2055 * @folio: The folio to test. 2056 * 2057 * Context: Any context. Caller should have a reference on the folio to 2058 * prevent it from being turned into a tail page. 2059 * Return: True for hugetlbfs folios, false for anon folios or folios 2060 * belonging to other filesystems. 2061 */ 2062 bool folio_test_hugetlb(struct folio *folio) 2063 { 2064 if (!folio_test_large(folio)) 2065 return false; 2066 2067 return folio->_folio_dtor == HUGETLB_PAGE_DTOR; 2068 } 2069 EXPORT_SYMBOL_GPL(folio_test_hugetlb); 2070 2071 /* 2072 * Find and lock address space (mapping) in write mode. 2073 * 2074 * Upon entry, the page is locked which means that page_mapping() is 2075 * stable. Due to locking order, we can only trylock_write. If we can 2076 * not get the lock, simply return NULL to caller. 2077 */ 2078 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 2079 { 2080 struct address_space *mapping = page_mapping(hpage); 2081 2082 if (!mapping) 2083 return mapping; 2084 2085 if (i_mmap_trylock_write(mapping)) 2086 return mapping; 2087 2088 return NULL; 2089 } 2090 2091 pgoff_t hugetlb_basepage_index(struct page *page) 2092 { 2093 struct page *page_head = compound_head(page); 2094 pgoff_t index = page_index(page_head); 2095 unsigned long compound_idx; 2096 2097 if (compound_order(page_head) > MAX_ORDER) 2098 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 2099 else 2100 compound_idx = page - page_head; 2101 2102 return (index << compound_order(page_head)) + compound_idx; 2103 } 2104 2105 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2106 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2107 nodemask_t *node_alloc_noretry) 2108 { 2109 int order = huge_page_order(h); 2110 struct page *page; 2111 bool alloc_try_hard = true; 2112 bool retry = true; 2113 2114 /* 2115 * By default we always try hard to allocate the page with 2116 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 2117 * a loop (to adjust global huge page counts) and previous allocation 2118 * failed, do not continue to try hard on the same node. Use the 2119 * node_alloc_noretry bitmap to manage this state information. 2120 */ 2121 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2122 alloc_try_hard = false; 2123 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 2124 if (alloc_try_hard) 2125 gfp_mask |= __GFP_RETRY_MAYFAIL; 2126 if (nid == NUMA_NO_NODE) 2127 nid = numa_mem_id(); 2128 retry: 2129 page = __alloc_pages(gfp_mask, order, nid, nmask); 2130 2131 /* Freeze head page */ 2132 if (page && !page_ref_freeze(page, 1)) { 2133 __free_pages(page, order); 2134 if (retry) { /* retry once */ 2135 retry = false; 2136 goto retry; 2137 } 2138 /* WOW! twice in a row. */ 2139 pr_warn("HugeTLB head page unexpected inflated ref count\n"); 2140 page = NULL; 2141 } 2142 2143 /* 2144 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 2145 * indicates an overall state change. Clear bit so that we resume 2146 * normal 'try hard' allocations. 2147 */ 2148 if (node_alloc_noretry && page && !alloc_try_hard) 2149 node_clear(nid, *node_alloc_noretry); 2150 2151 /* 2152 * If we tried hard to get a page but failed, set bit so that 2153 * subsequent attempts will not try as hard until there is an 2154 * overall state change. 2155 */ 2156 if (node_alloc_noretry && !page && alloc_try_hard) 2157 node_set(nid, *node_alloc_noretry); 2158 2159 if (!page) { 2160 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2161 return NULL; 2162 } 2163 2164 __count_vm_event(HTLB_BUDDY_PGALLOC); 2165 return page_folio(page); 2166 } 2167 2168 /* 2169 * Common helper to allocate a fresh hugetlb page. All specific allocators 2170 * should use this function to get new hugetlb pages 2171 * 2172 * Note that returned page is 'frozen': ref count of head page and all tail 2173 * pages is zero. 2174 */ 2175 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2176 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2177 nodemask_t *node_alloc_noretry) 2178 { 2179 struct folio *folio; 2180 bool retry = false; 2181 2182 retry: 2183 if (hstate_is_gigantic(h)) 2184 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2185 else 2186 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, 2187 nid, nmask, node_alloc_noretry); 2188 if (!folio) 2189 return NULL; 2190 if (hstate_is_gigantic(h)) { 2191 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { 2192 /* 2193 * Rare failure to convert pages to compound page. 2194 * Free pages and try again - ONCE! 2195 */ 2196 free_gigantic_folio(folio, huge_page_order(h)); 2197 if (!retry) { 2198 retry = true; 2199 goto retry; 2200 } 2201 return NULL; 2202 } 2203 } 2204 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2205 2206 return folio; 2207 } 2208 2209 /* 2210 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 2211 * manner. 2212 */ 2213 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 2214 nodemask_t *node_alloc_noretry) 2215 { 2216 struct folio *folio; 2217 int nr_nodes, node; 2218 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2219 2220 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2221 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2222 nodes_allowed, node_alloc_noretry); 2223 if (folio) { 2224 free_huge_page(&folio->page); /* free it into the hugepage allocator */ 2225 return 1; 2226 } 2227 } 2228 2229 return 0; 2230 } 2231 2232 /* 2233 * Remove huge page from pool from next node to free. Attempt to keep 2234 * persistent huge pages more or less balanced over allowed nodes. 2235 * This routine only 'removes' the hugetlb page. The caller must make 2236 * an additional call to free the page to low level allocators. 2237 * Called with hugetlb_lock locked. 2238 */ 2239 static struct page *remove_pool_huge_page(struct hstate *h, 2240 nodemask_t *nodes_allowed, 2241 bool acct_surplus) 2242 { 2243 int nr_nodes, node; 2244 struct page *page = NULL; 2245 struct folio *folio; 2246 2247 lockdep_assert_held(&hugetlb_lock); 2248 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2249 /* 2250 * If we're returning unused surplus pages, only examine 2251 * nodes with surplus pages. 2252 */ 2253 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2254 !list_empty(&h->hugepage_freelists[node])) { 2255 page = list_entry(h->hugepage_freelists[node].next, 2256 struct page, lru); 2257 folio = page_folio(page); 2258 remove_hugetlb_folio(h, folio, acct_surplus); 2259 break; 2260 } 2261 } 2262 2263 return page; 2264 } 2265 2266 /* 2267 * Dissolve a given free hugepage into free buddy pages. This function does 2268 * nothing for in-use hugepages and non-hugepages. 2269 * This function returns values like below: 2270 * 2271 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2272 * when the system is under memory pressure and the feature of 2273 * freeing unused vmemmap pages associated with each hugetlb page 2274 * is enabled. 2275 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2276 * (allocated or reserved.) 2277 * 0: successfully dissolved free hugepages or the page is not a 2278 * hugepage (considered as already dissolved) 2279 */ 2280 int dissolve_free_huge_page(struct page *page) 2281 { 2282 int rc = -EBUSY; 2283 struct folio *folio = page_folio(page); 2284 2285 retry: 2286 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2287 if (!folio_test_hugetlb(folio)) 2288 return 0; 2289 2290 spin_lock_irq(&hugetlb_lock); 2291 if (!folio_test_hugetlb(folio)) { 2292 rc = 0; 2293 goto out; 2294 } 2295 2296 if (!folio_ref_count(folio)) { 2297 struct hstate *h = folio_hstate(folio); 2298 if (!available_huge_pages(h)) 2299 goto out; 2300 2301 /* 2302 * We should make sure that the page is already on the free list 2303 * when it is dissolved. 2304 */ 2305 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2306 spin_unlock_irq(&hugetlb_lock); 2307 cond_resched(); 2308 2309 /* 2310 * Theoretically, we should return -EBUSY when we 2311 * encounter this race. In fact, we have a chance 2312 * to successfully dissolve the page if we do a 2313 * retry. Because the race window is quite small. 2314 * If we seize this opportunity, it is an optimization 2315 * for increasing the success rate of dissolving page. 2316 */ 2317 goto retry; 2318 } 2319 2320 remove_hugetlb_folio(h, folio, false); 2321 h->max_huge_pages--; 2322 spin_unlock_irq(&hugetlb_lock); 2323 2324 /* 2325 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2326 * before freeing the page. update_and_free_hugtlb_folio will fail to 2327 * free the page if it can not allocate required vmemmap. We 2328 * need to adjust max_huge_pages if the page is not freed. 2329 * Attempt to allocate vmemmmap here so that we can take 2330 * appropriate action on failure. 2331 */ 2332 rc = hugetlb_vmemmap_restore(h, &folio->page); 2333 if (!rc) { 2334 update_and_free_hugetlb_folio(h, folio, false); 2335 } else { 2336 spin_lock_irq(&hugetlb_lock); 2337 add_hugetlb_folio(h, folio, false); 2338 h->max_huge_pages++; 2339 spin_unlock_irq(&hugetlb_lock); 2340 } 2341 2342 return rc; 2343 } 2344 out: 2345 spin_unlock_irq(&hugetlb_lock); 2346 return rc; 2347 } 2348 2349 /* 2350 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2351 * make specified memory blocks removable from the system. 2352 * Note that this will dissolve a free gigantic hugepage completely, if any 2353 * part of it lies within the given range. 2354 * Also note that if dissolve_free_huge_page() returns with an error, all 2355 * free hugepages that were dissolved before that error are lost. 2356 */ 2357 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2358 { 2359 unsigned long pfn; 2360 struct page *page; 2361 int rc = 0; 2362 unsigned int order; 2363 struct hstate *h; 2364 2365 if (!hugepages_supported()) 2366 return rc; 2367 2368 order = huge_page_order(&default_hstate); 2369 for_each_hstate(h) 2370 order = min(order, huge_page_order(h)); 2371 2372 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2373 page = pfn_to_page(pfn); 2374 rc = dissolve_free_huge_page(page); 2375 if (rc) 2376 break; 2377 } 2378 2379 return rc; 2380 } 2381 2382 /* 2383 * Allocates a fresh surplus page from the page allocator. 2384 */ 2385 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2386 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2387 { 2388 struct folio *folio = NULL; 2389 2390 if (hstate_is_gigantic(h)) 2391 return NULL; 2392 2393 spin_lock_irq(&hugetlb_lock); 2394 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2395 goto out_unlock; 2396 spin_unlock_irq(&hugetlb_lock); 2397 2398 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2399 if (!folio) 2400 return NULL; 2401 2402 spin_lock_irq(&hugetlb_lock); 2403 /* 2404 * We could have raced with the pool size change. 2405 * Double check that and simply deallocate the new page 2406 * if we would end up overcommiting the surpluses. Abuse 2407 * temporary page to workaround the nasty free_huge_page 2408 * codeflow 2409 */ 2410 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2411 folio_set_hugetlb_temporary(folio); 2412 spin_unlock_irq(&hugetlb_lock); 2413 free_huge_page(&folio->page); 2414 return NULL; 2415 } 2416 2417 h->surplus_huge_pages++; 2418 h->surplus_huge_pages_node[folio_nid(folio)]++; 2419 2420 out_unlock: 2421 spin_unlock_irq(&hugetlb_lock); 2422 2423 return folio; 2424 } 2425 2426 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2427 int nid, nodemask_t *nmask) 2428 { 2429 struct folio *folio; 2430 2431 if (hstate_is_gigantic(h)) 2432 return NULL; 2433 2434 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2435 if (!folio) 2436 return NULL; 2437 2438 /* fresh huge pages are frozen */ 2439 folio_ref_unfreeze(folio, 1); 2440 /* 2441 * We do not account these pages as surplus because they are only 2442 * temporary and will be released properly on the last reference 2443 */ 2444 folio_set_hugetlb_temporary(folio); 2445 2446 return folio; 2447 } 2448 2449 /* 2450 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2451 */ 2452 static 2453 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2454 struct vm_area_struct *vma, unsigned long addr) 2455 { 2456 struct folio *folio = NULL; 2457 struct mempolicy *mpol; 2458 gfp_t gfp_mask = htlb_alloc_mask(h); 2459 int nid; 2460 nodemask_t *nodemask; 2461 2462 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2463 if (mpol_is_preferred_many(mpol)) { 2464 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2465 2466 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2467 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2468 2469 /* Fallback to all nodes if page==NULL */ 2470 nodemask = NULL; 2471 } 2472 2473 if (!folio) 2474 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2475 mpol_cond_put(mpol); 2476 return folio; 2477 } 2478 2479 /* folio migration callback function */ 2480 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2481 nodemask_t *nmask, gfp_t gfp_mask) 2482 { 2483 spin_lock_irq(&hugetlb_lock); 2484 if (available_huge_pages(h)) { 2485 struct folio *folio; 2486 2487 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2488 preferred_nid, nmask); 2489 if (folio) { 2490 spin_unlock_irq(&hugetlb_lock); 2491 return folio; 2492 } 2493 } 2494 spin_unlock_irq(&hugetlb_lock); 2495 2496 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2497 } 2498 2499 /* mempolicy aware migration callback */ 2500 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, 2501 unsigned long address) 2502 { 2503 struct mempolicy *mpol; 2504 nodemask_t *nodemask; 2505 struct folio *folio; 2506 gfp_t gfp_mask; 2507 int node; 2508 2509 gfp_mask = htlb_alloc_mask(h); 2510 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2511 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); 2512 mpol_cond_put(mpol); 2513 2514 return folio; 2515 } 2516 2517 /* 2518 * Increase the hugetlb pool such that it can accommodate a reservation 2519 * of size 'delta'. 2520 */ 2521 static int gather_surplus_pages(struct hstate *h, long delta) 2522 __must_hold(&hugetlb_lock) 2523 { 2524 LIST_HEAD(surplus_list); 2525 struct folio *folio; 2526 struct page *page, *tmp; 2527 int ret; 2528 long i; 2529 long needed, allocated; 2530 bool alloc_ok = true; 2531 2532 lockdep_assert_held(&hugetlb_lock); 2533 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2534 if (needed <= 0) { 2535 h->resv_huge_pages += delta; 2536 return 0; 2537 } 2538 2539 allocated = 0; 2540 2541 ret = -ENOMEM; 2542 retry: 2543 spin_unlock_irq(&hugetlb_lock); 2544 for (i = 0; i < needed; i++) { 2545 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2546 NUMA_NO_NODE, NULL); 2547 if (!folio) { 2548 alloc_ok = false; 2549 break; 2550 } 2551 list_add(&folio->lru, &surplus_list); 2552 cond_resched(); 2553 } 2554 allocated += i; 2555 2556 /* 2557 * After retaking hugetlb_lock, we need to recalculate 'needed' 2558 * because either resv_huge_pages or free_huge_pages may have changed. 2559 */ 2560 spin_lock_irq(&hugetlb_lock); 2561 needed = (h->resv_huge_pages + delta) - 2562 (h->free_huge_pages + allocated); 2563 if (needed > 0) { 2564 if (alloc_ok) 2565 goto retry; 2566 /* 2567 * We were not able to allocate enough pages to 2568 * satisfy the entire reservation so we free what 2569 * we've allocated so far. 2570 */ 2571 goto free; 2572 } 2573 /* 2574 * The surplus_list now contains _at_least_ the number of extra pages 2575 * needed to accommodate the reservation. Add the appropriate number 2576 * of pages to the hugetlb pool and free the extras back to the buddy 2577 * allocator. Commit the entire reservation here to prevent another 2578 * process from stealing the pages as they are added to the pool but 2579 * before they are reserved. 2580 */ 2581 needed += allocated; 2582 h->resv_huge_pages += delta; 2583 ret = 0; 2584 2585 /* Free the needed pages to the hugetlb pool */ 2586 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 2587 if ((--needed) < 0) 2588 break; 2589 /* Add the page to the hugetlb allocator */ 2590 enqueue_hugetlb_folio(h, page_folio(page)); 2591 } 2592 free: 2593 spin_unlock_irq(&hugetlb_lock); 2594 2595 /* 2596 * Free unnecessary surplus pages to the buddy allocator. 2597 * Pages have no ref count, call free_huge_page directly. 2598 */ 2599 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2600 free_huge_page(page); 2601 spin_lock_irq(&hugetlb_lock); 2602 2603 return ret; 2604 } 2605 2606 /* 2607 * This routine has two main purposes: 2608 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2609 * in unused_resv_pages. This corresponds to the prior adjustments made 2610 * to the associated reservation map. 2611 * 2) Free any unused surplus pages that may have been allocated to satisfy 2612 * the reservation. As many as unused_resv_pages may be freed. 2613 */ 2614 static void return_unused_surplus_pages(struct hstate *h, 2615 unsigned long unused_resv_pages) 2616 { 2617 unsigned long nr_pages; 2618 struct page *page; 2619 LIST_HEAD(page_list); 2620 2621 lockdep_assert_held(&hugetlb_lock); 2622 /* Uncommit the reservation */ 2623 h->resv_huge_pages -= unused_resv_pages; 2624 2625 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2626 goto out; 2627 2628 /* 2629 * Part (or even all) of the reservation could have been backed 2630 * by pre-allocated pages. Only free surplus pages. 2631 */ 2632 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2633 2634 /* 2635 * We want to release as many surplus pages as possible, spread 2636 * evenly across all nodes with memory. Iterate across these nodes 2637 * until we can no longer free unreserved surplus pages. This occurs 2638 * when the nodes with surplus pages have no free pages. 2639 * remove_pool_huge_page() will balance the freed pages across the 2640 * on-line nodes with memory and will handle the hstate accounting. 2641 */ 2642 while (nr_pages--) { 2643 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2644 if (!page) 2645 goto out; 2646 2647 list_add(&page->lru, &page_list); 2648 } 2649 2650 out: 2651 spin_unlock_irq(&hugetlb_lock); 2652 update_and_free_pages_bulk(h, &page_list); 2653 spin_lock_irq(&hugetlb_lock); 2654 } 2655 2656 2657 /* 2658 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2659 * are used by the huge page allocation routines to manage reservations. 2660 * 2661 * vma_needs_reservation is called to determine if the huge page at addr 2662 * within the vma has an associated reservation. If a reservation is 2663 * needed, the value 1 is returned. The caller is then responsible for 2664 * managing the global reservation and subpool usage counts. After 2665 * the huge page has been allocated, vma_commit_reservation is called 2666 * to add the page to the reservation map. If the page allocation fails, 2667 * the reservation must be ended instead of committed. vma_end_reservation 2668 * is called in such cases. 2669 * 2670 * In the normal case, vma_commit_reservation returns the same value 2671 * as the preceding vma_needs_reservation call. The only time this 2672 * is not the case is if a reserve map was changed between calls. It 2673 * is the responsibility of the caller to notice the difference and 2674 * take appropriate action. 2675 * 2676 * vma_add_reservation is used in error paths where a reservation must 2677 * be restored when a newly allocated huge page must be freed. It is 2678 * to be called after calling vma_needs_reservation to determine if a 2679 * reservation exists. 2680 * 2681 * vma_del_reservation is used in error paths where an entry in the reserve 2682 * map was created during huge page allocation and must be removed. It is to 2683 * be called after calling vma_needs_reservation to determine if a reservation 2684 * exists. 2685 */ 2686 enum vma_resv_mode { 2687 VMA_NEEDS_RESV, 2688 VMA_COMMIT_RESV, 2689 VMA_END_RESV, 2690 VMA_ADD_RESV, 2691 VMA_DEL_RESV, 2692 }; 2693 static long __vma_reservation_common(struct hstate *h, 2694 struct vm_area_struct *vma, unsigned long addr, 2695 enum vma_resv_mode mode) 2696 { 2697 struct resv_map *resv; 2698 pgoff_t idx; 2699 long ret; 2700 long dummy_out_regions_needed; 2701 2702 resv = vma_resv_map(vma); 2703 if (!resv) 2704 return 1; 2705 2706 idx = vma_hugecache_offset(h, vma, addr); 2707 switch (mode) { 2708 case VMA_NEEDS_RESV: 2709 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2710 /* We assume that vma_reservation_* routines always operate on 2711 * 1 page, and that adding to resv map a 1 page entry can only 2712 * ever require 1 region. 2713 */ 2714 VM_BUG_ON(dummy_out_regions_needed != 1); 2715 break; 2716 case VMA_COMMIT_RESV: 2717 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2718 /* region_add calls of range 1 should never fail. */ 2719 VM_BUG_ON(ret < 0); 2720 break; 2721 case VMA_END_RESV: 2722 region_abort(resv, idx, idx + 1, 1); 2723 ret = 0; 2724 break; 2725 case VMA_ADD_RESV: 2726 if (vma->vm_flags & VM_MAYSHARE) { 2727 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2728 /* region_add calls of range 1 should never fail. */ 2729 VM_BUG_ON(ret < 0); 2730 } else { 2731 region_abort(resv, idx, idx + 1, 1); 2732 ret = region_del(resv, idx, idx + 1); 2733 } 2734 break; 2735 case VMA_DEL_RESV: 2736 if (vma->vm_flags & VM_MAYSHARE) { 2737 region_abort(resv, idx, idx + 1, 1); 2738 ret = region_del(resv, idx, idx + 1); 2739 } else { 2740 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2741 /* region_add calls of range 1 should never fail. */ 2742 VM_BUG_ON(ret < 0); 2743 } 2744 break; 2745 default: 2746 BUG(); 2747 } 2748 2749 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2750 return ret; 2751 /* 2752 * We know private mapping must have HPAGE_RESV_OWNER set. 2753 * 2754 * In most cases, reserves always exist for private mappings. 2755 * However, a file associated with mapping could have been 2756 * hole punched or truncated after reserves were consumed. 2757 * As subsequent fault on such a range will not use reserves. 2758 * Subtle - The reserve map for private mappings has the 2759 * opposite meaning than that of shared mappings. If NO 2760 * entry is in the reserve map, it means a reservation exists. 2761 * If an entry exists in the reserve map, it means the 2762 * reservation has already been consumed. As a result, the 2763 * return value of this routine is the opposite of the 2764 * value returned from reserve map manipulation routines above. 2765 */ 2766 if (ret > 0) 2767 return 0; 2768 if (ret == 0) 2769 return 1; 2770 return ret; 2771 } 2772 2773 static long vma_needs_reservation(struct hstate *h, 2774 struct vm_area_struct *vma, unsigned long addr) 2775 { 2776 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2777 } 2778 2779 static long vma_commit_reservation(struct hstate *h, 2780 struct vm_area_struct *vma, unsigned long addr) 2781 { 2782 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2783 } 2784 2785 static void vma_end_reservation(struct hstate *h, 2786 struct vm_area_struct *vma, unsigned long addr) 2787 { 2788 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2789 } 2790 2791 static long vma_add_reservation(struct hstate *h, 2792 struct vm_area_struct *vma, unsigned long addr) 2793 { 2794 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2795 } 2796 2797 static long vma_del_reservation(struct hstate *h, 2798 struct vm_area_struct *vma, unsigned long addr) 2799 { 2800 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2801 } 2802 2803 /* 2804 * This routine is called to restore reservation information on error paths. 2805 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2806 * and the hugetlb mutex should remain held when calling this routine. 2807 * 2808 * It handles two specific cases: 2809 * 1) A reservation was in place and the folio consumed the reservation. 2810 * hugetlb_restore_reserve is set in the folio. 2811 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2812 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2813 * 2814 * In case 1, free_huge_page later in the error path will increment the 2815 * global reserve count. But, free_huge_page does not have enough context 2816 * to adjust the reservation map. This case deals primarily with private 2817 * mappings. Adjust the reserve map here to be consistent with global 2818 * reserve count adjustments to be made by free_huge_page. Make sure the 2819 * reserve map indicates there is a reservation present. 2820 * 2821 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2822 */ 2823 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2824 unsigned long address, struct folio *folio) 2825 { 2826 long rc = vma_needs_reservation(h, vma, address); 2827 2828 if (folio_test_hugetlb_restore_reserve(folio)) { 2829 if (unlikely(rc < 0)) 2830 /* 2831 * Rare out of memory condition in reserve map 2832 * manipulation. Clear hugetlb_restore_reserve so 2833 * that global reserve count will not be incremented 2834 * by free_huge_page. This will make it appear 2835 * as though the reservation for this folio was 2836 * consumed. This may prevent the task from 2837 * faulting in the folio at a later time. This 2838 * is better than inconsistent global huge page 2839 * accounting of reserve counts. 2840 */ 2841 folio_clear_hugetlb_restore_reserve(folio); 2842 else if (rc) 2843 (void)vma_add_reservation(h, vma, address); 2844 else 2845 vma_end_reservation(h, vma, address); 2846 } else { 2847 if (!rc) { 2848 /* 2849 * This indicates there is an entry in the reserve map 2850 * not added by alloc_hugetlb_folio. We know it was added 2851 * before the alloc_hugetlb_folio call, otherwise 2852 * hugetlb_restore_reserve would be set on the folio. 2853 * Remove the entry so that a subsequent allocation 2854 * does not consume a reservation. 2855 */ 2856 rc = vma_del_reservation(h, vma, address); 2857 if (rc < 0) 2858 /* 2859 * VERY rare out of memory condition. Since 2860 * we can not delete the entry, set 2861 * hugetlb_restore_reserve so that the reserve 2862 * count will be incremented when the folio 2863 * is freed. This reserve will be consumed 2864 * on a subsequent allocation. 2865 */ 2866 folio_set_hugetlb_restore_reserve(folio); 2867 } else if (rc < 0) { 2868 /* 2869 * Rare out of memory condition from 2870 * vma_needs_reservation call. Memory allocation is 2871 * only attempted if a new entry is needed. Therefore, 2872 * this implies there is not an entry in the 2873 * reserve map. 2874 * 2875 * For shared mappings, no entry in the map indicates 2876 * no reservation. We are done. 2877 */ 2878 if (!(vma->vm_flags & VM_MAYSHARE)) 2879 /* 2880 * For private mappings, no entry indicates 2881 * a reservation is present. Since we can 2882 * not add an entry, set hugetlb_restore_reserve 2883 * on the folio so reserve count will be 2884 * incremented when freed. This reserve will 2885 * be consumed on a subsequent allocation. 2886 */ 2887 folio_set_hugetlb_restore_reserve(folio); 2888 } else 2889 /* 2890 * No reservation present, do nothing 2891 */ 2892 vma_end_reservation(h, vma, address); 2893 } 2894 } 2895 2896 /* 2897 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2898 * the old one 2899 * @h: struct hstate old page belongs to 2900 * @old_folio: Old folio to dissolve 2901 * @list: List to isolate the page in case we need to 2902 * Returns 0 on success, otherwise negated error. 2903 */ 2904 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2905 struct folio *old_folio, struct list_head *list) 2906 { 2907 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2908 int nid = folio_nid(old_folio); 2909 struct folio *new_folio; 2910 int ret = 0; 2911 2912 /* 2913 * Before dissolving the folio, we need to allocate a new one for the 2914 * pool to remain stable. Here, we allocate the folio and 'prep' it 2915 * by doing everything but actually updating counters and adding to 2916 * the pool. This simplifies and let us do most of the processing 2917 * under the lock. 2918 */ 2919 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); 2920 if (!new_folio) 2921 return -ENOMEM; 2922 __prep_new_hugetlb_folio(h, new_folio); 2923 2924 retry: 2925 spin_lock_irq(&hugetlb_lock); 2926 if (!folio_test_hugetlb(old_folio)) { 2927 /* 2928 * Freed from under us. Drop new_folio too. 2929 */ 2930 goto free_new; 2931 } else if (folio_ref_count(old_folio)) { 2932 bool isolated; 2933 2934 /* 2935 * Someone has grabbed the folio, try to isolate it here. 2936 * Fail with -EBUSY if not possible. 2937 */ 2938 spin_unlock_irq(&hugetlb_lock); 2939 isolated = isolate_hugetlb(old_folio, list); 2940 ret = isolated ? 0 : -EBUSY; 2941 spin_lock_irq(&hugetlb_lock); 2942 goto free_new; 2943 } else if (!folio_test_hugetlb_freed(old_folio)) { 2944 /* 2945 * Folio's refcount is 0 but it has not been enqueued in the 2946 * freelist yet. Race window is small, so we can succeed here if 2947 * we retry. 2948 */ 2949 spin_unlock_irq(&hugetlb_lock); 2950 cond_resched(); 2951 goto retry; 2952 } else { 2953 /* 2954 * Ok, old_folio is still a genuine free hugepage. Remove it from 2955 * the freelist and decrease the counters. These will be 2956 * incremented again when calling __prep_account_new_huge_page() 2957 * and enqueue_hugetlb_folio() for new_folio. The counters will 2958 * remain stable since this happens under the lock. 2959 */ 2960 remove_hugetlb_folio(h, old_folio, false); 2961 2962 /* 2963 * Ref count on new_folio is already zero as it was dropped 2964 * earlier. It can be directly added to the pool free list. 2965 */ 2966 __prep_account_new_huge_page(h, nid); 2967 enqueue_hugetlb_folio(h, new_folio); 2968 2969 /* 2970 * Folio has been replaced, we can safely free the old one. 2971 */ 2972 spin_unlock_irq(&hugetlb_lock); 2973 update_and_free_hugetlb_folio(h, old_folio, false); 2974 } 2975 2976 return ret; 2977 2978 free_new: 2979 spin_unlock_irq(&hugetlb_lock); 2980 /* Folio has a zero ref count, but needs a ref to be freed */ 2981 folio_ref_unfreeze(new_folio, 1); 2982 update_and_free_hugetlb_folio(h, new_folio, false); 2983 2984 return ret; 2985 } 2986 2987 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2988 { 2989 struct hstate *h; 2990 struct folio *folio = page_folio(page); 2991 int ret = -EBUSY; 2992 2993 /* 2994 * The page might have been dissolved from under our feet, so make sure 2995 * to carefully check the state under the lock. 2996 * Return success when racing as if we dissolved the page ourselves. 2997 */ 2998 spin_lock_irq(&hugetlb_lock); 2999 if (folio_test_hugetlb(folio)) { 3000 h = folio_hstate(folio); 3001 } else { 3002 spin_unlock_irq(&hugetlb_lock); 3003 return 0; 3004 } 3005 spin_unlock_irq(&hugetlb_lock); 3006 3007 /* 3008 * Fence off gigantic pages as there is a cyclic dependency between 3009 * alloc_contig_range and them. Return -ENOMEM as this has the effect 3010 * of bailing out right away without further retrying. 3011 */ 3012 if (hstate_is_gigantic(h)) 3013 return -ENOMEM; 3014 3015 if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) 3016 ret = 0; 3017 else if (!folio_ref_count(folio)) 3018 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 3019 3020 return ret; 3021 } 3022 3023 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 3024 unsigned long addr, int avoid_reserve) 3025 { 3026 struct hugepage_subpool *spool = subpool_vma(vma); 3027 struct hstate *h = hstate_vma(vma); 3028 struct folio *folio; 3029 long map_chg, map_commit; 3030 long gbl_chg; 3031 int ret, idx; 3032 struct hugetlb_cgroup *h_cg = NULL; 3033 bool deferred_reserve; 3034 3035 idx = hstate_index(h); 3036 /* 3037 * Examine the region/reserve map to determine if the process 3038 * has a reservation for the page to be allocated. A return 3039 * code of zero indicates a reservation exists (no change). 3040 */ 3041 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 3042 if (map_chg < 0) 3043 return ERR_PTR(-ENOMEM); 3044 3045 /* 3046 * Processes that did not create the mapping will have no 3047 * reserves as indicated by the region/reserve map. Check 3048 * that the allocation will not exceed the subpool limit. 3049 * Allocations for MAP_NORESERVE mappings also need to be 3050 * checked against any subpool limit. 3051 */ 3052 if (map_chg || avoid_reserve) { 3053 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3054 if (gbl_chg < 0) { 3055 vma_end_reservation(h, vma, addr); 3056 return ERR_PTR(-ENOSPC); 3057 } 3058 3059 /* 3060 * Even though there was no reservation in the region/reserve 3061 * map, there could be reservations associated with the 3062 * subpool that can be used. This would be indicated if the 3063 * return value of hugepage_subpool_get_pages() is zero. 3064 * However, if avoid_reserve is specified we still avoid even 3065 * the subpool reservations. 3066 */ 3067 if (avoid_reserve) 3068 gbl_chg = 1; 3069 } 3070 3071 /* If this allocation is not consuming a reservation, charge it now. 3072 */ 3073 deferred_reserve = map_chg || avoid_reserve; 3074 if (deferred_reserve) { 3075 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3076 idx, pages_per_huge_page(h), &h_cg); 3077 if (ret) 3078 goto out_subpool_put; 3079 } 3080 3081 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3082 if (ret) 3083 goto out_uncharge_cgroup_reservation; 3084 3085 spin_lock_irq(&hugetlb_lock); 3086 /* 3087 * glb_chg is passed to indicate whether or not a page must be taken 3088 * from the global free pool (global change). gbl_chg == 0 indicates 3089 * a reservation exists for the allocation. 3090 */ 3091 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3092 if (!folio) { 3093 spin_unlock_irq(&hugetlb_lock); 3094 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3095 if (!folio) 3096 goto out_uncharge_cgroup; 3097 spin_lock_irq(&hugetlb_lock); 3098 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3099 folio_set_hugetlb_restore_reserve(folio); 3100 h->resv_huge_pages--; 3101 } 3102 list_add(&folio->lru, &h->hugepage_activelist); 3103 folio_ref_unfreeze(folio, 1); 3104 /* Fall through */ 3105 } 3106 3107 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3108 /* If allocation is not consuming a reservation, also store the 3109 * hugetlb_cgroup pointer on the page. 3110 */ 3111 if (deferred_reserve) { 3112 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3113 h_cg, folio); 3114 } 3115 3116 spin_unlock_irq(&hugetlb_lock); 3117 3118 hugetlb_set_folio_subpool(folio, spool); 3119 3120 map_commit = vma_commit_reservation(h, vma, addr); 3121 if (unlikely(map_chg > map_commit)) { 3122 /* 3123 * The page was added to the reservation map between 3124 * vma_needs_reservation and vma_commit_reservation. 3125 * This indicates a race with hugetlb_reserve_pages. 3126 * Adjust for the subpool count incremented above AND 3127 * in hugetlb_reserve_pages for the same page. Also, 3128 * the reservation count added in hugetlb_reserve_pages 3129 * no longer applies. 3130 */ 3131 long rsv_adjust; 3132 3133 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3134 hugetlb_acct_memory(h, -rsv_adjust); 3135 if (deferred_reserve) 3136 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3137 pages_per_huge_page(h), folio); 3138 } 3139 return folio; 3140 3141 out_uncharge_cgroup: 3142 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3143 out_uncharge_cgroup_reservation: 3144 if (deferred_reserve) 3145 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3146 h_cg); 3147 out_subpool_put: 3148 if (map_chg || avoid_reserve) 3149 hugepage_subpool_put_pages(spool, 1); 3150 vma_end_reservation(h, vma, addr); 3151 return ERR_PTR(-ENOSPC); 3152 } 3153 3154 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3155 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3156 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3157 { 3158 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3159 int nr_nodes, node; 3160 3161 /* do node specific alloc */ 3162 if (nid != NUMA_NO_NODE) { 3163 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3164 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3165 if (!m) 3166 return 0; 3167 goto found; 3168 } 3169 /* allocate from next node when distributing huge pages */ 3170 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 3171 m = memblock_alloc_try_nid_raw( 3172 huge_page_size(h), huge_page_size(h), 3173 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3174 /* 3175 * Use the beginning of the huge page to store the 3176 * huge_bootmem_page struct (until gather_bootmem 3177 * puts them into the mem_map). 3178 */ 3179 if (!m) 3180 return 0; 3181 goto found; 3182 } 3183 3184 found: 3185 /* Put them into a private list first because mem_map is not up yet */ 3186 INIT_LIST_HEAD(&m->list); 3187 list_add(&m->list, &huge_boot_pages); 3188 m->hstate = h; 3189 return 1; 3190 } 3191 3192 /* 3193 * Put bootmem huge pages into the standard lists after mem_map is up. 3194 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3195 */ 3196 static void __init gather_bootmem_prealloc(void) 3197 { 3198 struct huge_bootmem_page *m; 3199 3200 list_for_each_entry(m, &huge_boot_pages, list) { 3201 struct page *page = virt_to_page(m); 3202 struct folio *folio = page_folio(page); 3203 struct hstate *h = m->hstate; 3204 3205 VM_BUG_ON(!hstate_is_gigantic(h)); 3206 WARN_ON(folio_ref_count(folio) != 1); 3207 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { 3208 WARN_ON(folio_test_reserved(folio)); 3209 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 3210 free_huge_page(page); /* add to the hugepage allocator */ 3211 } else { 3212 /* VERY unlikely inflated ref count on a tail page */ 3213 free_gigantic_folio(folio, huge_page_order(h)); 3214 } 3215 3216 /* 3217 * We need to restore the 'stolen' pages to totalram_pages 3218 * in order to fix confusing memory reports from free(1) and 3219 * other side-effects, like CommitLimit going negative. 3220 */ 3221 adjust_managed_page_count(page, pages_per_huge_page(h)); 3222 cond_resched(); 3223 } 3224 } 3225 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3226 { 3227 unsigned long i; 3228 char buf[32]; 3229 3230 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3231 if (hstate_is_gigantic(h)) { 3232 if (!alloc_bootmem_huge_page(h, nid)) 3233 break; 3234 } else { 3235 struct folio *folio; 3236 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3237 3238 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3239 &node_states[N_MEMORY], NULL); 3240 if (!folio) 3241 break; 3242 free_huge_page(&folio->page); /* free it into the hugepage allocator */ 3243 } 3244 cond_resched(); 3245 } 3246 if (i == h->max_huge_pages_node[nid]) 3247 return; 3248 3249 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3250 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3251 h->max_huge_pages_node[nid], buf, nid, i); 3252 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3253 h->max_huge_pages_node[nid] = i; 3254 } 3255 3256 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3257 { 3258 unsigned long i; 3259 nodemask_t *node_alloc_noretry; 3260 bool node_specific_alloc = false; 3261 3262 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3263 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3264 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3265 return; 3266 } 3267 3268 /* do node specific alloc */ 3269 for_each_online_node(i) { 3270 if (h->max_huge_pages_node[i] > 0) { 3271 hugetlb_hstate_alloc_pages_onenode(h, i); 3272 node_specific_alloc = true; 3273 } 3274 } 3275 3276 if (node_specific_alloc) 3277 return; 3278 3279 /* below will do all node balanced alloc */ 3280 if (!hstate_is_gigantic(h)) { 3281 /* 3282 * Bit mask controlling how hard we retry per-node allocations. 3283 * Ignore errors as lower level routines can deal with 3284 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3285 * time, we are likely in bigger trouble. 3286 */ 3287 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3288 GFP_KERNEL); 3289 } else { 3290 /* allocations done at boot time */ 3291 node_alloc_noretry = NULL; 3292 } 3293 3294 /* bit mask controlling how hard we retry per-node allocations */ 3295 if (node_alloc_noretry) 3296 nodes_clear(*node_alloc_noretry); 3297 3298 for (i = 0; i < h->max_huge_pages; ++i) { 3299 if (hstate_is_gigantic(h)) { 3300 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3301 break; 3302 } else if (!alloc_pool_huge_page(h, 3303 &node_states[N_MEMORY], 3304 node_alloc_noretry)) 3305 break; 3306 cond_resched(); 3307 } 3308 if (i < h->max_huge_pages) { 3309 char buf[32]; 3310 3311 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3312 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3313 h->max_huge_pages, buf, i); 3314 h->max_huge_pages = i; 3315 } 3316 kfree(node_alloc_noretry); 3317 } 3318 3319 static void __init hugetlb_init_hstates(void) 3320 { 3321 struct hstate *h, *h2; 3322 3323 for_each_hstate(h) { 3324 /* oversize hugepages were init'ed in early boot */ 3325 if (!hstate_is_gigantic(h)) 3326 hugetlb_hstate_alloc_pages(h); 3327 3328 /* 3329 * Set demote order for each hstate. Note that 3330 * h->demote_order is initially 0. 3331 * - We can not demote gigantic pages if runtime freeing 3332 * is not supported, so skip this. 3333 * - If CMA allocation is possible, we can not demote 3334 * HUGETLB_PAGE_ORDER or smaller size pages. 3335 */ 3336 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3337 continue; 3338 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3339 continue; 3340 for_each_hstate(h2) { 3341 if (h2 == h) 3342 continue; 3343 if (h2->order < h->order && 3344 h2->order > h->demote_order) 3345 h->demote_order = h2->order; 3346 } 3347 } 3348 } 3349 3350 static void __init report_hugepages(void) 3351 { 3352 struct hstate *h; 3353 3354 for_each_hstate(h) { 3355 char buf[32]; 3356 3357 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3358 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3359 buf, h->free_huge_pages); 3360 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3361 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3362 } 3363 } 3364 3365 #ifdef CONFIG_HIGHMEM 3366 static void try_to_free_low(struct hstate *h, unsigned long count, 3367 nodemask_t *nodes_allowed) 3368 { 3369 int i; 3370 LIST_HEAD(page_list); 3371 3372 lockdep_assert_held(&hugetlb_lock); 3373 if (hstate_is_gigantic(h)) 3374 return; 3375 3376 /* 3377 * Collect pages to be freed on a list, and free after dropping lock 3378 */ 3379 for_each_node_mask(i, *nodes_allowed) { 3380 struct page *page, *next; 3381 struct list_head *freel = &h->hugepage_freelists[i]; 3382 list_for_each_entry_safe(page, next, freel, lru) { 3383 if (count >= h->nr_huge_pages) 3384 goto out; 3385 if (PageHighMem(page)) 3386 continue; 3387 remove_hugetlb_folio(h, page_folio(page), false); 3388 list_add(&page->lru, &page_list); 3389 } 3390 } 3391 3392 out: 3393 spin_unlock_irq(&hugetlb_lock); 3394 update_and_free_pages_bulk(h, &page_list); 3395 spin_lock_irq(&hugetlb_lock); 3396 } 3397 #else 3398 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3399 nodemask_t *nodes_allowed) 3400 { 3401 } 3402 #endif 3403 3404 /* 3405 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3406 * balanced by operating on them in a round-robin fashion. 3407 * Returns 1 if an adjustment was made. 3408 */ 3409 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3410 int delta) 3411 { 3412 int nr_nodes, node; 3413 3414 lockdep_assert_held(&hugetlb_lock); 3415 VM_BUG_ON(delta != -1 && delta != 1); 3416 3417 if (delta < 0) { 3418 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3419 if (h->surplus_huge_pages_node[node]) 3420 goto found; 3421 } 3422 } else { 3423 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3424 if (h->surplus_huge_pages_node[node] < 3425 h->nr_huge_pages_node[node]) 3426 goto found; 3427 } 3428 } 3429 return 0; 3430 3431 found: 3432 h->surplus_huge_pages += delta; 3433 h->surplus_huge_pages_node[node] += delta; 3434 return 1; 3435 } 3436 3437 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3438 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3439 nodemask_t *nodes_allowed) 3440 { 3441 unsigned long min_count, ret; 3442 struct page *page; 3443 LIST_HEAD(page_list); 3444 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3445 3446 /* 3447 * Bit mask controlling how hard we retry per-node allocations. 3448 * If we can not allocate the bit mask, do not attempt to allocate 3449 * the requested huge pages. 3450 */ 3451 if (node_alloc_noretry) 3452 nodes_clear(*node_alloc_noretry); 3453 else 3454 return -ENOMEM; 3455 3456 /* 3457 * resize_lock mutex prevents concurrent adjustments to number of 3458 * pages in hstate via the proc/sysfs interfaces. 3459 */ 3460 mutex_lock(&h->resize_lock); 3461 flush_free_hpage_work(h); 3462 spin_lock_irq(&hugetlb_lock); 3463 3464 /* 3465 * Check for a node specific request. 3466 * Changing node specific huge page count may require a corresponding 3467 * change to the global count. In any case, the passed node mask 3468 * (nodes_allowed) will restrict alloc/free to the specified node. 3469 */ 3470 if (nid != NUMA_NO_NODE) { 3471 unsigned long old_count = count; 3472 3473 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3474 /* 3475 * User may have specified a large count value which caused the 3476 * above calculation to overflow. In this case, they wanted 3477 * to allocate as many huge pages as possible. Set count to 3478 * largest possible value to align with their intention. 3479 */ 3480 if (count < old_count) 3481 count = ULONG_MAX; 3482 } 3483 3484 /* 3485 * Gigantic pages runtime allocation depend on the capability for large 3486 * page range allocation. 3487 * If the system does not provide this feature, return an error when 3488 * the user tries to allocate gigantic pages but let the user free the 3489 * boottime allocated gigantic pages. 3490 */ 3491 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3492 if (count > persistent_huge_pages(h)) { 3493 spin_unlock_irq(&hugetlb_lock); 3494 mutex_unlock(&h->resize_lock); 3495 NODEMASK_FREE(node_alloc_noretry); 3496 return -EINVAL; 3497 } 3498 /* Fall through to decrease pool */ 3499 } 3500 3501 /* 3502 * Increase the pool size 3503 * First take pages out of surplus state. Then make up the 3504 * remaining difference by allocating fresh huge pages. 3505 * 3506 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3507 * to convert a surplus huge page to a normal huge page. That is 3508 * not critical, though, it just means the overall size of the 3509 * pool might be one hugepage larger than it needs to be, but 3510 * within all the constraints specified by the sysctls. 3511 */ 3512 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3513 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3514 break; 3515 } 3516 3517 while (count > persistent_huge_pages(h)) { 3518 /* 3519 * If this allocation races such that we no longer need the 3520 * page, free_huge_page will handle it by freeing the page 3521 * and reducing the surplus. 3522 */ 3523 spin_unlock_irq(&hugetlb_lock); 3524 3525 /* yield cpu to avoid soft lockup */ 3526 cond_resched(); 3527 3528 ret = alloc_pool_huge_page(h, nodes_allowed, 3529 node_alloc_noretry); 3530 spin_lock_irq(&hugetlb_lock); 3531 if (!ret) 3532 goto out; 3533 3534 /* Bail for signals. Probably ctrl-c from user */ 3535 if (signal_pending(current)) 3536 goto out; 3537 } 3538 3539 /* 3540 * Decrease the pool size 3541 * First return free pages to the buddy allocator (being careful 3542 * to keep enough around to satisfy reservations). Then place 3543 * pages into surplus state as needed so the pool will shrink 3544 * to the desired size as pages become free. 3545 * 3546 * By placing pages into the surplus state independent of the 3547 * overcommit value, we are allowing the surplus pool size to 3548 * exceed overcommit. There are few sane options here. Since 3549 * alloc_surplus_hugetlb_folio() is checking the global counter, 3550 * though, we'll note that we're not allowed to exceed surplus 3551 * and won't grow the pool anywhere else. Not until one of the 3552 * sysctls are changed, or the surplus pages go out of use. 3553 */ 3554 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3555 min_count = max(count, min_count); 3556 try_to_free_low(h, min_count, nodes_allowed); 3557 3558 /* 3559 * Collect pages to be removed on list without dropping lock 3560 */ 3561 while (min_count < persistent_huge_pages(h)) { 3562 page = remove_pool_huge_page(h, nodes_allowed, 0); 3563 if (!page) 3564 break; 3565 3566 list_add(&page->lru, &page_list); 3567 } 3568 /* free the pages after dropping lock */ 3569 spin_unlock_irq(&hugetlb_lock); 3570 update_and_free_pages_bulk(h, &page_list); 3571 flush_free_hpage_work(h); 3572 spin_lock_irq(&hugetlb_lock); 3573 3574 while (count < persistent_huge_pages(h)) { 3575 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3576 break; 3577 } 3578 out: 3579 h->max_huge_pages = persistent_huge_pages(h); 3580 spin_unlock_irq(&hugetlb_lock); 3581 mutex_unlock(&h->resize_lock); 3582 3583 NODEMASK_FREE(node_alloc_noretry); 3584 3585 return 0; 3586 } 3587 3588 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) 3589 { 3590 int i, nid = folio_nid(folio); 3591 struct hstate *target_hstate; 3592 struct page *subpage; 3593 struct folio *inner_folio; 3594 int rc = 0; 3595 3596 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3597 3598 remove_hugetlb_folio_for_demote(h, folio, false); 3599 spin_unlock_irq(&hugetlb_lock); 3600 3601 rc = hugetlb_vmemmap_restore(h, &folio->page); 3602 if (rc) { 3603 /* Allocation of vmemmmap failed, we can not demote folio */ 3604 spin_lock_irq(&hugetlb_lock); 3605 folio_ref_unfreeze(folio, 1); 3606 add_hugetlb_folio(h, folio, false); 3607 return rc; 3608 } 3609 3610 /* 3611 * Use destroy_compound_hugetlb_folio_for_demote for all huge page 3612 * sizes as it will not ref count folios. 3613 */ 3614 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); 3615 3616 /* 3617 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3618 * Without the mutex, pages added to target hstate could be marked 3619 * as surplus. 3620 * 3621 * Note that we already hold h->resize_lock. To prevent deadlock, 3622 * use the convention of always taking larger size hstate mutex first. 3623 */ 3624 mutex_lock(&target_hstate->resize_lock); 3625 for (i = 0; i < pages_per_huge_page(h); 3626 i += pages_per_huge_page(target_hstate)) { 3627 subpage = folio_page(folio, i); 3628 inner_folio = page_folio(subpage); 3629 if (hstate_is_gigantic(target_hstate)) 3630 prep_compound_gigantic_folio_for_demote(inner_folio, 3631 target_hstate->order); 3632 else 3633 prep_compound_page(subpage, target_hstate->order); 3634 folio_change_private(inner_folio, NULL); 3635 prep_new_hugetlb_folio(target_hstate, inner_folio, nid); 3636 free_huge_page(subpage); 3637 } 3638 mutex_unlock(&target_hstate->resize_lock); 3639 3640 spin_lock_irq(&hugetlb_lock); 3641 3642 /* 3643 * Not absolutely necessary, but for consistency update max_huge_pages 3644 * based on pool changes for the demoted page. 3645 */ 3646 h->max_huge_pages--; 3647 target_hstate->max_huge_pages += 3648 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); 3649 3650 return rc; 3651 } 3652 3653 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3654 __must_hold(&hugetlb_lock) 3655 { 3656 int nr_nodes, node; 3657 struct folio *folio; 3658 3659 lockdep_assert_held(&hugetlb_lock); 3660 3661 /* We should never get here if no demote order */ 3662 if (!h->demote_order) { 3663 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3664 return -EINVAL; /* internal error */ 3665 } 3666 3667 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3668 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { 3669 if (folio_test_hwpoison(folio)) 3670 continue; 3671 return demote_free_hugetlb_folio(h, folio); 3672 } 3673 } 3674 3675 /* 3676 * Only way to get here is if all pages on free lists are poisoned. 3677 * Return -EBUSY so that caller will not retry. 3678 */ 3679 return -EBUSY; 3680 } 3681 3682 #define HSTATE_ATTR_RO(_name) \ 3683 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3684 3685 #define HSTATE_ATTR_WO(_name) \ 3686 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3687 3688 #define HSTATE_ATTR(_name) \ 3689 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3690 3691 static struct kobject *hugepages_kobj; 3692 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3693 3694 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3695 3696 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3697 { 3698 int i; 3699 3700 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3701 if (hstate_kobjs[i] == kobj) { 3702 if (nidp) 3703 *nidp = NUMA_NO_NODE; 3704 return &hstates[i]; 3705 } 3706 3707 return kobj_to_node_hstate(kobj, nidp); 3708 } 3709 3710 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3711 struct kobj_attribute *attr, char *buf) 3712 { 3713 struct hstate *h; 3714 unsigned long nr_huge_pages; 3715 int nid; 3716 3717 h = kobj_to_hstate(kobj, &nid); 3718 if (nid == NUMA_NO_NODE) 3719 nr_huge_pages = h->nr_huge_pages; 3720 else 3721 nr_huge_pages = h->nr_huge_pages_node[nid]; 3722 3723 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3724 } 3725 3726 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3727 struct hstate *h, int nid, 3728 unsigned long count, size_t len) 3729 { 3730 int err; 3731 nodemask_t nodes_allowed, *n_mask; 3732 3733 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3734 return -EINVAL; 3735 3736 if (nid == NUMA_NO_NODE) { 3737 /* 3738 * global hstate attribute 3739 */ 3740 if (!(obey_mempolicy && 3741 init_nodemask_of_mempolicy(&nodes_allowed))) 3742 n_mask = &node_states[N_MEMORY]; 3743 else 3744 n_mask = &nodes_allowed; 3745 } else { 3746 /* 3747 * Node specific request. count adjustment happens in 3748 * set_max_huge_pages() after acquiring hugetlb_lock. 3749 */ 3750 init_nodemask_of_node(&nodes_allowed, nid); 3751 n_mask = &nodes_allowed; 3752 } 3753 3754 err = set_max_huge_pages(h, count, nid, n_mask); 3755 3756 return err ? err : len; 3757 } 3758 3759 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3760 struct kobject *kobj, const char *buf, 3761 size_t len) 3762 { 3763 struct hstate *h; 3764 unsigned long count; 3765 int nid; 3766 int err; 3767 3768 err = kstrtoul(buf, 10, &count); 3769 if (err) 3770 return err; 3771 3772 h = kobj_to_hstate(kobj, &nid); 3773 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3774 } 3775 3776 static ssize_t nr_hugepages_show(struct kobject *kobj, 3777 struct kobj_attribute *attr, char *buf) 3778 { 3779 return nr_hugepages_show_common(kobj, attr, buf); 3780 } 3781 3782 static ssize_t nr_hugepages_store(struct kobject *kobj, 3783 struct kobj_attribute *attr, const char *buf, size_t len) 3784 { 3785 return nr_hugepages_store_common(false, kobj, buf, len); 3786 } 3787 HSTATE_ATTR(nr_hugepages); 3788 3789 #ifdef CONFIG_NUMA 3790 3791 /* 3792 * hstate attribute for optionally mempolicy-based constraint on persistent 3793 * huge page alloc/free. 3794 */ 3795 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3796 struct kobj_attribute *attr, 3797 char *buf) 3798 { 3799 return nr_hugepages_show_common(kobj, attr, buf); 3800 } 3801 3802 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3803 struct kobj_attribute *attr, const char *buf, size_t len) 3804 { 3805 return nr_hugepages_store_common(true, kobj, buf, len); 3806 } 3807 HSTATE_ATTR(nr_hugepages_mempolicy); 3808 #endif 3809 3810 3811 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3812 struct kobj_attribute *attr, char *buf) 3813 { 3814 struct hstate *h = kobj_to_hstate(kobj, NULL); 3815 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3816 } 3817 3818 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3819 struct kobj_attribute *attr, const char *buf, size_t count) 3820 { 3821 int err; 3822 unsigned long input; 3823 struct hstate *h = kobj_to_hstate(kobj, NULL); 3824 3825 if (hstate_is_gigantic(h)) 3826 return -EINVAL; 3827 3828 err = kstrtoul(buf, 10, &input); 3829 if (err) 3830 return err; 3831 3832 spin_lock_irq(&hugetlb_lock); 3833 h->nr_overcommit_huge_pages = input; 3834 spin_unlock_irq(&hugetlb_lock); 3835 3836 return count; 3837 } 3838 HSTATE_ATTR(nr_overcommit_hugepages); 3839 3840 static ssize_t free_hugepages_show(struct kobject *kobj, 3841 struct kobj_attribute *attr, char *buf) 3842 { 3843 struct hstate *h; 3844 unsigned long free_huge_pages; 3845 int nid; 3846 3847 h = kobj_to_hstate(kobj, &nid); 3848 if (nid == NUMA_NO_NODE) 3849 free_huge_pages = h->free_huge_pages; 3850 else 3851 free_huge_pages = h->free_huge_pages_node[nid]; 3852 3853 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3854 } 3855 HSTATE_ATTR_RO(free_hugepages); 3856 3857 static ssize_t resv_hugepages_show(struct kobject *kobj, 3858 struct kobj_attribute *attr, char *buf) 3859 { 3860 struct hstate *h = kobj_to_hstate(kobj, NULL); 3861 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3862 } 3863 HSTATE_ATTR_RO(resv_hugepages); 3864 3865 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3866 struct kobj_attribute *attr, char *buf) 3867 { 3868 struct hstate *h; 3869 unsigned long surplus_huge_pages; 3870 int nid; 3871 3872 h = kobj_to_hstate(kobj, &nid); 3873 if (nid == NUMA_NO_NODE) 3874 surplus_huge_pages = h->surplus_huge_pages; 3875 else 3876 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3877 3878 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3879 } 3880 HSTATE_ATTR_RO(surplus_hugepages); 3881 3882 static ssize_t demote_store(struct kobject *kobj, 3883 struct kobj_attribute *attr, const char *buf, size_t len) 3884 { 3885 unsigned long nr_demote; 3886 unsigned long nr_available; 3887 nodemask_t nodes_allowed, *n_mask; 3888 struct hstate *h; 3889 int err; 3890 int nid; 3891 3892 err = kstrtoul(buf, 10, &nr_demote); 3893 if (err) 3894 return err; 3895 h = kobj_to_hstate(kobj, &nid); 3896 3897 if (nid != NUMA_NO_NODE) { 3898 init_nodemask_of_node(&nodes_allowed, nid); 3899 n_mask = &nodes_allowed; 3900 } else { 3901 n_mask = &node_states[N_MEMORY]; 3902 } 3903 3904 /* Synchronize with other sysfs operations modifying huge pages */ 3905 mutex_lock(&h->resize_lock); 3906 spin_lock_irq(&hugetlb_lock); 3907 3908 while (nr_demote) { 3909 /* 3910 * Check for available pages to demote each time thorough the 3911 * loop as demote_pool_huge_page will drop hugetlb_lock. 3912 */ 3913 if (nid != NUMA_NO_NODE) 3914 nr_available = h->free_huge_pages_node[nid]; 3915 else 3916 nr_available = h->free_huge_pages; 3917 nr_available -= h->resv_huge_pages; 3918 if (!nr_available) 3919 break; 3920 3921 err = demote_pool_huge_page(h, n_mask); 3922 if (err) 3923 break; 3924 3925 nr_demote--; 3926 } 3927 3928 spin_unlock_irq(&hugetlb_lock); 3929 mutex_unlock(&h->resize_lock); 3930 3931 if (err) 3932 return err; 3933 return len; 3934 } 3935 HSTATE_ATTR_WO(demote); 3936 3937 static ssize_t demote_size_show(struct kobject *kobj, 3938 struct kobj_attribute *attr, char *buf) 3939 { 3940 struct hstate *h = kobj_to_hstate(kobj, NULL); 3941 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3942 3943 return sysfs_emit(buf, "%lukB\n", demote_size); 3944 } 3945 3946 static ssize_t demote_size_store(struct kobject *kobj, 3947 struct kobj_attribute *attr, 3948 const char *buf, size_t count) 3949 { 3950 struct hstate *h, *demote_hstate; 3951 unsigned long demote_size; 3952 unsigned int demote_order; 3953 3954 demote_size = (unsigned long)memparse(buf, NULL); 3955 3956 demote_hstate = size_to_hstate(demote_size); 3957 if (!demote_hstate) 3958 return -EINVAL; 3959 demote_order = demote_hstate->order; 3960 if (demote_order < HUGETLB_PAGE_ORDER) 3961 return -EINVAL; 3962 3963 /* demote order must be smaller than hstate order */ 3964 h = kobj_to_hstate(kobj, NULL); 3965 if (demote_order >= h->order) 3966 return -EINVAL; 3967 3968 /* resize_lock synchronizes access to demote size and writes */ 3969 mutex_lock(&h->resize_lock); 3970 h->demote_order = demote_order; 3971 mutex_unlock(&h->resize_lock); 3972 3973 return count; 3974 } 3975 HSTATE_ATTR(demote_size); 3976 3977 static struct attribute *hstate_attrs[] = { 3978 &nr_hugepages_attr.attr, 3979 &nr_overcommit_hugepages_attr.attr, 3980 &free_hugepages_attr.attr, 3981 &resv_hugepages_attr.attr, 3982 &surplus_hugepages_attr.attr, 3983 #ifdef CONFIG_NUMA 3984 &nr_hugepages_mempolicy_attr.attr, 3985 #endif 3986 NULL, 3987 }; 3988 3989 static const struct attribute_group hstate_attr_group = { 3990 .attrs = hstate_attrs, 3991 }; 3992 3993 static struct attribute *hstate_demote_attrs[] = { 3994 &demote_size_attr.attr, 3995 &demote_attr.attr, 3996 NULL, 3997 }; 3998 3999 static const struct attribute_group hstate_demote_attr_group = { 4000 .attrs = hstate_demote_attrs, 4001 }; 4002 4003 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4004 struct kobject **hstate_kobjs, 4005 const struct attribute_group *hstate_attr_group) 4006 { 4007 int retval; 4008 int hi = hstate_index(h); 4009 4010 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4011 if (!hstate_kobjs[hi]) 4012 return -ENOMEM; 4013 4014 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4015 if (retval) { 4016 kobject_put(hstate_kobjs[hi]); 4017 hstate_kobjs[hi] = NULL; 4018 return retval; 4019 } 4020 4021 if (h->demote_order) { 4022 retval = sysfs_create_group(hstate_kobjs[hi], 4023 &hstate_demote_attr_group); 4024 if (retval) { 4025 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4026 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4027 kobject_put(hstate_kobjs[hi]); 4028 hstate_kobjs[hi] = NULL; 4029 return retval; 4030 } 4031 } 4032 4033 return 0; 4034 } 4035 4036 #ifdef CONFIG_NUMA 4037 static bool hugetlb_sysfs_initialized __ro_after_init; 4038 4039 /* 4040 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4041 * with node devices in node_devices[] using a parallel array. The array 4042 * index of a node device or _hstate == node id. 4043 * This is here to avoid any static dependency of the node device driver, in 4044 * the base kernel, on the hugetlb module. 4045 */ 4046 struct node_hstate { 4047 struct kobject *hugepages_kobj; 4048 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4049 }; 4050 static struct node_hstate node_hstates[MAX_NUMNODES]; 4051 4052 /* 4053 * A subset of global hstate attributes for node devices 4054 */ 4055 static struct attribute *per_node_hstate_attrs[] = { 4056 &nr_hugepages_attr.attr, 4057 &free_hugepages_attr.attr, 4058 &surplus_hugepages_attr.attr, 4059 NULL, 4060 }; 4061 4062 static const struct attribute_group per_node_hstate_attr_group = { 4063 .attrs = per_node_hstate_attrs, 4064 }; 4065 4066 /* 4067 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4068 * Returns node id via non-NULL nidp. 4069 */ 4070 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4071 { 4072 int nid; 4073 4074 for (nid = 0; nid < nr_node_ids; nid++) { 4075 struct node_hstate *nhs = &node_hstates[nid]; 4076 int i; 4077 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4078 if (nhs->hstate_kobjs[i] == kobj) { 4079 if (nidp) 4080 *nidp = nid; 4081 return &hstates[i]; 4082 } 4083 } 4084 4085 BUG(); 4086 return NULL; 4087 } 4088 4089 /* 4090 * Unregister hstate attributes from a single node device. 4091 * No-op if no hstate attributes attached. 4092 */ 4093 void hugetlb_unregister_node(struct node *node) 4094 { 4095 struct hstate *h; 4096 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4097 4098 if (!nhs->hugepages_kobj) 4099 return; /* no hstate attributes */ 4100 4101 for_each_hstate(h) { 4102 int idx = hstate_index(h); 4103 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4104 4105 if (!hstate_kobj) 4106 continue; 4107 if (h->demote_order) 4108 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4109 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4110 kobject_put(hstate_kobj); 4111 nhs->hstate_kobjs[idx] = NULL; 4112 } 4113 4114 kobject_put(nhs->hugepages_kobj); 4115 nhs->hugepages_kobj = NULL; 4116 } 4117 4118 4119 /* 4120 * Register hstate attributes for a single node device. 4121 * No-op if attributes already registered. 4122 */ 4123 void hugetlb_register_node(struct node *node) 4124 { 4125 struct hstate *h; 4126 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4127 int err; 4128 4129 if (!hugetlb_sysfs_initialized) 4130 return; 4131 4132 if (nhs->hugepages_kobj) 4133 return; /* already allocated */ 4134 4135 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4136 &node->dev.kobj); 4137 if (!nhs->hugepages_kobj) 4138 return; 4139 4140 for_each_hstate(h) { 4141 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4142 nhs->hstate_kobjs, 4143 &per_node_hstate_attr_group); 4144 if (err) { 4145 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4146 h->name, node->dev.id); 4147 hugetlb_unregister_node(node); 4148 break; 4149 } 4150 } 4151 } 4152 4153 /* 4154 * hugetlb init time: register hstate attributes for all registered node 4155 * devices of nodes that have memory. All on-line nodes should have 4156 * registered their associated device by this time. 4157 */ 4158 static void __init hugetlb_register_all_nodes(void) 4159 { 4160 int nid; 4161 4162 for_each_online_node(nid) 4163 hugetlb_register_node(node_devices[nid]); 4164 } 4165 #else /* !CONFIG_NUMA */ 4166 4167 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4168 { 4169 BUG(); 4170 if (nidp) 4171 *nidp = -1; 4172 return NULL; 4173 } 4174 4175 static void hugetlb_register_all_nodes(void) { } 4176 4177 #endif 4178 4179 #ifdef CONFIG_CMA 4180 static void __init hugetlb_cma_check(void); 4181 #else 4182 static inline __init void hugetlb_cma_check(void) 4183 { 4184 } 4185 #endif 4186 4187 static void __init hugetlb_sysfs_init(void) 4188 { 4189 struct hstate *h; 4190 int err; 4191 4192 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4193 if (!hugepages_kobj) 4194 return; 4195 4196 for_each_hstate(h) { 4197 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4198 hstate_kobjs, &hstate_attr_group); 4199 if (err) 4200 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4201 } 4202 4203 #ifdef CONFIG_NUMA 4204 hugetlb_sysfs_initialized = true; 4205 #endif 4206 hugetlb_register_all_nodes(); 4207 } 4208 4209 static int __init hugetlb_init(void) 4210 { 4211 int i; 4212 4213 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4214 __NR_HPAGEFLAGS); 4215 4216 if (!hugepages_supported()) { 4217 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4218 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4219 return 0; 4220 } 4221 4222 /* 4223 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4224 * architectures depend on setup being done here. 4225 */ 4226 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4227 if (!parsed_default_hugepagesz) { 4228 /* 4229 * If we did not parse a default huge page size, set 4230 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4231 * number of huge pages for this default size was implicitly 4232 * specified, set that here as well. 4233 * Note that the implicit setting will overwrite an explicit 4234 * setting. A warning will be printed in this case. 4235 */ 4236 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4237 if (default_hstate_max_huge_pages) { 4238 if (default_hstate.max_huge_pages) { 4239 char buf[32]; 4240 4241 string_get_size(huge_page_size(&default_hstate), 4242 1, STRING_UNITS_2, buf, 32); 4243 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4244 default_hstate.max_huge_pages, buf); 4245 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4246 default_hstate_max_huge_pages); 4247 } 4248 default_hstate.max_huge_pages = 4249 default_hstate_max_huge_pages; 4250 4251 for_each_online_node(i) 4252 default_hstate.max_huge_pages_node[i] = 4253 default_hugepages_in_node[i]; 4254 } 4255 } 4256 4257 hugetlb_cma_check(); 4258 hugetlb_init_hstates(); 4259 gather_bootmem_prealloc(); 4260 report_hugepages(); 4261 4262 hugetlb_sysfs_init(); 4263 hugetlb_cgroup_file_init(); 4264 4265 #ifdef CONFIG_SMP 4266 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4267 #else 4268 num_fault_mutexes = 1; 4269 #endif 4270 hugetlb_fault_mutex_table = 4271 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4272 GFP_KERNEL); 4273 BUG_ON(!hugetlb_fault_mutex_table); 4274 4275 for (i = 0; i < num_fault_mutexes; i++) 4276 mutex_init(&hugetlb_fault_mutex_table[i]); 4277 return 0; 4278 } 4279 subsys_initcall(hugetlb_init); 4280 4281 /* Overwritten by architectures with more huge page sizes */ 4282 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4283 { 4284 return size == HPAGE_SIZE; 4285 } 4286 4287 void __init hugetlb_add_hstate(unsigned int order) 4288 { 4289 struct hstate *h; 4290 unsigned long i; 4291 4292 if (size_to_hstate(PAGE_SIZE << order)) { 4293 return; 4294 } 4295 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4296 BUG_ON(order == 0); 4297 h = &hstates[hugetlb_max_hstate++]; 4298 mutex_init(&h->resize_lock); 4299 h->order = order; 4300 h->mask = ~(huge_page_size(h) - 1); 4301 for (i = 0; i < MAX_NUMNODES; ++i) 4302 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4303 INIT_LIST_HEAD(&h->hugepage_activelist); 4304 h->next_nid_to_alloc = first_memory_node; 4305 h->next_nid_to_free = first_memory_node; 4306 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4307 huge_page_size(h)/SZ_1K); 4308 4309 parsed_hstate = h; 4310 } 4311 4312 bool __init __weak hugetlb_node_alloc_supported(void) 4313 { 4314 return true; 4315 } 4316 4317 static void __init hugepages_clear_pages_in_node(void) 4318 { 4319 if (!hugetlb_max_hstate) { 4320 default_hstate_max_huge_pages = 0; 4321 memset(default_hugepages_in_node, 0, 4322 sizeof(default_hugepages_in_node)); 4323 } else { 4324 parsed_hstate->max_huge_pages = 0; 4325 memset(parsed_hstate->max_huge_pages_node, 0, 4326 sizeof(parsed_hstate->max_huge_pages_node)); 4327 } 4328 } 4329 4330 /* 4331 * hugepages command line processing 4332 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4333 * specification. If not, ignore the hugepages value. hugepages can also 4334 * be the first huge page command line option in which case it implicitly 4335 * specifies the number of huge pages for the default size. 4336 */ 4337 static int __init hugepages_setup(char *s) 4338 { 4339 unsigned long *mhp; 4340 static unsigned long *last_mhp; 4341 int node = NUMA_NO_NODE; 4342 int count; 4343 unsigned long tmp; 4344 char *p = s; 4345 4346 if (!parsed_valid_hugepagesz) { 4347 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4348 parsed_valid_hugepagesz = true; 4349 return 1; 4350 } 4351 4352 /* 4353 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4354 * yet, so this hugepages= parameter goes to the "default hstate". 4355 * Otherwise, it goes with the previously parsed hugepagesz or 4356 * default_hugepagesz. 4357 */ 4358 else if (!hugetlb_max_hstate) 4359 mhp = &default_hstate_max_huge_pages; 4360 else 4361 mhp = &parsed_hstate->max_huge_pages; 4362 4363 if (mhp == last_mhp) { 4364 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4365 return 1; 4366 } 4367 4368 while (*p) { 4369 count = 0; 4370 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4371 goto invalid; 4372 /* Parameter is node format */ 4373 if (p[count] == ':') { 4374 if (!hugetlb_node_alloc_supported()) { 4375 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4376 return 1; 4377 } 4378 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4379 goto invalid; 4380 node = array_index_nospec(tmp, MAX_NUMNODES); 4381 p += count + 1; 4382 /* Parse hugepages */ 4383 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4384 goto invalid; 4385 if (!hugetlb_max_hstate) 4386 default_hugepages_in_node[node] = tmp; 4387 else 4388 parsed_hstate->max_huge_pages_node[node] = tmp; 4389 *mhp += tmp; 4390 /* Go to parse next node*/ 4391 if (p[count] == ',') 4392 p += count + 1; 4393 else 4394 break; 4395 } else { 4396 if (p != s) 4397 goto invalid; 4398 *mhp = tmp; 4399 break; 4400 } 4401 } 4402 4403 /* 4404 * Global state is always initialized later in hugetlb_init. 4405 * But we need to allocate gigantic hstates here early to still 4406 * use the bootmem allocator. 4407 */ 4408 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4409 hugetlb_hstate_alloc_pages(parsed_hstate); 4410 4411 last_mhp = mhp; 4412 4413 return 1; 4414 4415 invalid: 4416 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4417 hugepages_clear_pages_in_node(); 4418 return 1; 4419 } 4420 __setup("hugepages=", hugepages_setup); 4421 4422 /* 4423 * hugepagesz command line processing 4424 * A specific huge page size can only be specified once with hugepagesz. 4425 * hugepagesz is followed by hugepages on the command line. The global 4426 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4427 * hugepagesz argument was valid. 4428 */ 4429 static int __init hugepagesz_setup(char *s) 4430 { 4431 unsigned long size; 4432 struct hstate *h; 4433 4434 parsed_valid_hugepagesz = false; 4435 size = (unsigned long)memparse(s, NULL); 4436 4437 if (!arch_hugetlb_valid_size(size)) { 4438 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4439 return 1; 4440 } 4441 4442 h = size_to_hstate(size); 4443 if (h) { 4444 /* 4445 * hstate for this size already exists. This is normally 4446 * an error, but is allowed if the existing hstate is the 4447 * default hstate. More specifically, it is only allowed if 4448 * the number of huge pages for the default hstate was not 4449 * previously specified. 4450 */ 4451 if (!parsed_default_hugepagesz || h != &default_hstate || 4452 default_hstate.max_huge_pages) { 4453 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4454 return 1; 4455 } 4456 4457 /* 4458 * No need to call hugetlb_add_hstate() as hstate already 4459 * exists. But, do set parsed_hstate so that a following 4460 * hugepages= parameter will be applied to this hstate. 4461 */ 4462 parsed_hstate = h; 4463 parsed_valid_hugepagesz = true; 4464 return 1; 4465 } 4466 4467 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4468 parsed_valid_hugepagesz = true; 4469 return 1; 4470 } 4471 __setup("hugepagesz=", hugepagesz_setup); 4472 4473 /* 4474 * default_hugepagesz command line input 4475 * Only one instance of default_hugepagesz allowed on command line. 4476 */ 4477 static int __init default_hugepagesz_setup(char *s) 4478 { 4479 unsigned long size; 4480 int i; 4481 4482 parsed_valid_hugepagesz = false; 4483 if (parsed_default_hugepagesz) { 4484 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4485 return 1; 4486 } 4487 4488 size = (unsigned long)memparse(s, NULL); 4489 4490 if (!arch_hugetlb_valid_size(size)) { 4491 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4492 return 1; 4493 } 4494 4495 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4496 parsed_valid_hugepagesz = true; 4497 parsed_default_hugepagesz = true; 4498 default_hstate_idx = hstate_index(size_to_hstate(size)); 4499 4500 /* 4501 * The number of default huge pages (for this size) could have been 4502 * specified as the first hugetlb parameter: hugepages=X. If so, 4503 * then default_hstate_max_huge_pages is set. If the default huge 4504 * page size is gigantic (> MAX_ORDER), then the pages must be 4505 * allocated here from bootmem allocator. 4506 */ 4507 if (default_hstate_max_huge_pages) { 4508 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4509 for_each_online_node(i) 4510 default_hstate.max_huge_pages_node[i] = 4511 default_hugepages_in_node[i]; 4512 if (hstate_is_gigantic(&default_hstate)) 4513 hugetlb_hstate_alloc_pages(&default_hstate); 4514 default_hstate_max_huge_pages = 0; 4515 } 4516 4517 return 1; 4518 } 4519 __setup("default_hugepagesz=", default_hugepagesz_setup); 4520 4521 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 4522 { 4523 #ifdef CONFIG_NUMA 4524 struct mempolicy *mpol = get_task_policy(current); 4525 4526 /* 4527 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 4528 * (from policy_nodemask) specifically for hugetlb case 4529 */ 4530 if (mpol->mode == MPOL_BIND && 4531 (apply_policy_zone(mpol, gfp_zone(gfp)) && 4532 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 4533 return &mpol->nodes; 4534 #endif 4535 return NULL; 4536 } 4537 4538 static unsigned int allowed_mems_nr(struct hstate *h) 4539 { 4540 int node; 4541 unsigned int nr = 0; 4542 nodemask_t *mbind_nodemask; 4543 unsigned int *array = h->free_huge_pages_node; 4544 gfp_t gfp_mask = htlb_alloc_mask(h); 4545 4546 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4547 for_each_node_mask(node, cpuset_current_mems_allowed) { 4548 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4549 nr += array[node]; 4550 } 4551 4552 return nr; 4553 } 4554 4555 #ifdef CONFIG_SYSCTL 4556 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4557 void *buffer, size_t *length, 4558 loff_t *ppos, unsigned long *out) 4559 { 4560 struct ctl_table dup_table; 4561 4562 /* 4563 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4564 * can duplicate the @table and alter the duplicate of it. 4565 */ 4566 dup_table = *table; 4567 dup_table.data = out; 4568 4569 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4570 } 4571 4572 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4573 struct ctl_table *table, int write, 4574 void *buffer, size_t *length, loff_t *ppos) 4575 { 4576 struct hstate *h = &default_hstate; 4577 unsigned long tmp = h->max_huge_pages; 4578 int ret; 4579 4580 if (!hugepages_supported()) 4581 return -EOPNOTSUPP; 4582 4583 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4584 &tmp); 4585 if (ret) 4586 goto out; 4587 4588 if (write) 4589 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4590 NUMA_NO_NODE, tmp, *length); 4591 out: 4592 return ret; 4593 } 4594 4595 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4596 void *buffer, size_t *length, loff_t *ppos) 4597 { 4598 4599 return hugetlb_sysctl_handler_common(false, table, write, 4600 buffer, length, ppos); 4601 } 4602 4603 #ifdef CONFIG_NUMA 4604 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4605 void *buffer, size_t *length, loff_t *ppos) 4606 { 4607 return hugetlb_sysctl_handler_common(true, table, write, 4608 buffer, length, ppos); 4609 } 4610 #endif /* CONFIG_NUMA */ 4611 4612 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4613 void *buffer, size_t *length, loff_t *ppos) 4614 { 4615 struct hstate *h = &default_hstate; 4616 unsigned long tmp; 4617 int ret; 4618 4619 if (!hugepages_supported()) 4620 return -EOPNOTSUPP; 4621 4622 tmp = h->nr_overcommit_huge_pages; 4623 4624 if (write && hstate_is_gigantic(h)) 4625 return -EINVAL; 4626 4627 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4628 &tmp); 4629 if (ret) 4630 goto out; 4631 4632 if (write) { 4633 spin_lock_irq(&hugetlb_lock); 4634 h->nr_overcommit_huge_pages = tmp; 4635 spin_unlock_irq(&hugetlb_lock); 4636 } 4637 out: 4638 return ret; 4639 } 4640 4641 #endif /* CONFIG_SYSCTL */ 4642 4643 void hugetlb_report_meminfo(struct seq_file *m) 4644 { 4645 struct hstate *h; 4646 unsigned long total = 0; 4647 4648 if (!hugepages_supported()) 4649 return; 4650 4651 for_each_hstate(h) { 4652 unsigned long count = h->nr_huge_pages; 4653 4654 total += huge_page_size(h) * count; 4655 4656 if (h == &default_hstate) 4657 seq_printf(m, 4658 "HugePages_Total: %5lu\n" 4659 "HugePages_Free: %5lu\n" 4660 "HugePages_Rsvd: %5lu\n" 4661 "HugePages_Surp: %5lu\n" 4662 "Hugepagesize: %8lu kB\n", 4663 count, 4664 h->free_huge_pages, 4665 h->resv_huge_pages, 4666 h->surplus_huge_pages, 4667 huge_page_size(h) / SZ_1K); 4668 } 4669 4670 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4671 } 4672 4673 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4674 { 4675 struct hstate *h = &default_hstate; 4676 4677 if (!hugepages_supported()) 4678 return 0; 4679 4680 return sysfs_emit_at(buf, len, 4681 "Node %d HugePages_Total: %5u\n" 4682 "Node %d HugePages_Free: %5u\n" 4683 "Node %d HugePages_Surp: %5u\n", 4684 nid, h->nr_huge_pages_node[nid], 4685 nid, h->free_huge_pages_node[nid], 4686 nid, h->surplus_huge_pages_node[nid]); 4687 } 4688 4689 void hugetlb_show_meminfo_node(int nid) 4690 { 4691 struct hstate *h; 4692 4693 if (!hugepages_supported()) 4694 return; 4695 4696 for_each_hstate(h) 4697 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4698 nid, 4699 h->nr_huge_pages_node[nid], 4700 h->free_huge_pages_node[nid], 4701 h->surplus_huge_pages_node[nid], 4702 huge_page_size(h) / SZ_1K); 4703 } 4704 4705 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4706 { 4707 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4708 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 4709 } 4710 4711 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4712 unsigned long hugetlb_total_pages(void) 4713 { 4714 struct hstate *h; 4715 unsigned long nr_total_pages = 0; 4716 4717 for_each_hstate(h) 4718 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4719 return nr_total_pages; 4720 } 4721 4722 static int hugetlb_acct_memory(struct hstate *h, long delta) 4723 { 4724 int ret = -ENOMEM; 4725 4726 if (!delta) 4727 return 0; 4728 4729 spin_lock_irq(&hugetlb_lock); 4730 /* 4731 * When cpuset is configured, it breaks the strict hugetlb page 4732 * reservation as the accounting is done on a global variable. Such 4733 * reservation is completely rubbish in the presence of cpuset because 4734 * the reservation is not checked against page availability for the 4735 * current cpuset. Application can still potentially OOM'ed by kernel 4736 * with lack of free htlb page in cpuset that the task is in. 4737 * Attempt to enforce strict accounting with cpuset is almost 4738 * impossible (or too ugly) because cpuset is too fluid that 4739 * task or memory node can be dynamically moved between cpusets. 4740 * 4741 * The change of semantics for shared hugetlb mapping with cpuset is 4742 * undesirable. However, in order to preserve some of the semantics, 4743 * we fall back to check against current free page availability as 4744 * a best attempt and hopefully to minimize the impact of changing 4745 * semantics that cpuset has. 4746 * 4747 * Apart from cpuset, we also have memory policy mechanism that 4748 * also determines from which node the kernel will allocate memory 4749 * in a NUMA system. So similar to cpuset, we also should consider 4750 * the memory policy of the current task. Similar to the description 4751 * above. 4752 */ 4753 if (delta > 0) { 4754 if (gather_surplus_pages(h, delta) < 0) 4755 goto out; 4756 4757 if (delta > allowed_mems_nr(h)) { 4758 return_unused_surplus_pages(h, delta); 4759 goto out; 4760 } 4761 } 4762 4763 ret = 0; 4764 if (delta < 0) 4765 return_unused_surplus_pages(h, (unsigned long) -delta); 4766 4767 out: 4768 spin_unlock_irq(&hugetlb_lock); 4769 return ret; 4770 } 4771 4772 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4773 { 4774 struct resv_map *resv = vma_resv_map(vma); 4775 4776 /* 4777 * HPAGE_RESV_OWNER indicates a private mapping. 4778 * This new VMA should share its siblings reservation map if present. 4779 * The VMA will only ever have a valid reservation map pointer where 4780 * it is being copied for another still existing VMA. As that VMA 4781 * has a reference to the reservation map it cannot disappear until 4782 * after this open call completes. It is therefore safe to take a 4783 * new reference here without additional locking. 4784 */ 4785 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4786 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4787 kref_get(&resv->refs); 4788 } 4789 4790 /* 4791 * vma_lock structure for sharable mappings is vma specific. 4792 * Clear old pointer (if copied via vm_area_dup) and allocate 4793 * new structure. Before clearing, make sure vma_lock is not 4794 * for this vma. 4795 */ 4796 if (vma->vm_flags & VM_MAYSHARE) { 4797 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 4798 4799 if (vma_lock) { 4800 if (vma_lock->vma != vma) { 4801 vma->vm_private_data = NULL; 4802 hugetlb_vma_lock_alloc(vma); 4803 } else 4804 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 4805 } else 4806 hugetlb_vma_lock_alloc(vma); 4807 } 4808 } 4809 4810 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4811 { 4812 struct hstate *h = hstate_vma(vma); 4813 struct resv_map *resv; 4814 struct hugepage_subpool *spool = subpool_vma(vma); 4815 unsigned long reserve, start, end; 4816 long gbl_reserve; 4817 4818 hugetlb_vma_lock_free(vma); 4819 4820 resv = vma_resv_map(vma); 4821 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4822 return; 4823 4824 start = vma_hugecache_offset(h, vma, vma->vm_start); 4825 end = vma_hugecache_offset(h, vma, vma->vm_end); 4826 4827 reserve = (end - start) - region_count(resv, start, end); 4828 hugetlb_cgroup_uncharge_counter(resv, start, end); 4829 if (reserve) { 4830 /* 4831 * Decrement reserve counts. The global reserve count may be 4832 * adjusted if the subpool has a minimum size. 4833 */ 4834 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4835 hugetlb_acct_memory(h, -gbl_reserve); 4836 } 4837 4838 kref_put(&resv->refs, resv_map_release); 4839 } 4840 4841 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4842 { 4843 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4844 return -EINVAL; 4845 4846 /* 4847 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 4848 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 4849 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 4850 */ 4851 if (addr & ~PUD_MASK) { 4852 /* 4853 * hugetlb_vm_op_split is called right before we attempt to 4854 * split the VMA. We will need to unshare PMDs in the old and 4855 * new VMAs, so let's unshare before we split. 4856 */ 4857 unsigned long floor = addr & PUD_MASK; 4858 unsigned long ceil = floor + PUD_SIZE; 4859 4860 if (floor >= vma->vm_start && ceil <= vma->vm_end) 4861 hugetlb_unshare_pmds(vma, floor, ceil); 4862 } 4863 4864 return 0; 4865 } 4866 4867 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4868 { 4869 return huge_page_size(hstate_vma(vma)); 4870 } 4871 4872 /* 4873 * We cannot handle pagefaults against hugetlb pages at all. They cause 4874 * handle_mm_fault() to try to instantiate regular-sized pages in the 4875 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4876 * this far. 4877 */ 4878 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4879 { 4880 BUG(); 4881 return 0; 4882 } 4883 4884 /* 4885 * When a new function is introduced to vm_operations_struct and added 4886 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4887 * This is because under System V memory model, mappings created via 4888 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4889 * their original vm_ops are overwritten with shm_vm_ops. 4890 */ 4891 const struct vm_operations_struct hugetlb_vm_ops = { 4892 .fault = hugetlb_vm_op_fault, 4893 .open = hugetlb_vm_op_open, 4894 .close = hugetlb_vm_op_close, 4895 .may_split = hugetlb_vm_op_split, 4896 .pagesize = hugetlb_vm_op_pagesize, 4897 }; 4898 4899 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4900 int writable) 4901 { 4902 pte_t entry; 4903 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4904 4905 if (writable) { 4906 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4907 vma->vm_page_prot))); 4908 } else { 4909 entry = huge_pte_wrprotect(mk_huge_pte(page, 4910 vma->vm_page_prot)); 4911 } 4912 entry = pte_mkyoung(entry); 4913 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4914 4915 return entry; 4916 } 4917 4918 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4919 unsigned long address, pte_t *ptep) 4920 { 4921 pte_t entry; 4922 4923 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4924 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4925 update_mmu_cache(vma, address, ptep); 4926 } 4927 4928 bool is_hugetlb_entry_migration(pte_t pte) 4929 { 4930 swp_entry_t swp; 4931 4932 if (huge_pte_none(pte) || pte_present(pte)) 4933 return false; 4934 swp = pte_to_swp_entry(pte); 4935 if (is_migration_entry(swp)) 4936 return true; 4937 else 4938 return false; 4939 } 4940 4941 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 4942 { 4943 swp_entry_t swp; 4944 4945 if (huge_pte_none(pte) || pte_present(pte)) 4946 return false; 4947 swp = pte_to_swp_entry(pte); 4948 if (is_hwpoison_entry(swp)) 4949 return true; 4950 else 4951 return false; 4952 } 4953 4954 static void 4955 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4956 struct folio *new_folio) 4957 { 4958 __folio_mark_uptodate(new_folio); 4959 hugepage_add_new_anon_rmap(new_folio, vma, addr); 4960 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, &new_folio->page, 1)); 4961 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4962 folio_set_hugetlb_migratable(new_folio); 4963 } 4964 4965 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 4966 struct vm_area_struct *dst_vma, 4967 struct vm_area_struct *src_vma) 4968 { 4969 pte_t *src_pte, *dst_pte, entry; 4970 struct page *ptepage; 4971 unsigned long addr; 4972 bool cow = is_cow_mapping(src_vma->vm_flags); 4973 struct hstate *h = hstate_vma(src_vma); 4974 unsigned long sz = huge_page_size(h); 4975 unsigned long npages = pages_per_huge_page(h); 4976 struct mmu_notifier_range range; 4977 unsigned long last_addr_mask; 4978 int ret = 0; 4979 4980 if (cow) { 4981 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 4982 src_vma->vm_start, 4983 src_vma->vm_end); 4984 mmu_notifier_invalidate_range_start(&range); 4985 mmap_assert_write_locked(src); 4986 raw_write_seqcount_begin(&src->write_protect_seq); 4987 } else { 4988 /* 4989 * For shared mappings the vma lock must be held before 4990 * calling hugetlb_walk() in the src vma. Otherwise, the 4991 * returned ptep could go away if part of a shared pmd and 4992 * another thread calls huge_pmd_unshare. 4993 */ 4994 hugetlb_vma_lock_read(src_vma); 4995 } 4996 4997 last_addr_mask = hugetlb_mask_last_page(h); 4998 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 4999 spinlock_t *src_ptl, *dst_ptl; 5000 src_pte = hugetlb_walk(src_vma, addr, sz); 5001 if (!src_pte) { 5002 addr |= last_addr_mask; 5003 continue; 5004 } 5005 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5006 if (!dst_pte) { 5007 ret = -ENOMEM; 5008 break; 5009 } 5010 5011 /* 5012 * If the pagetables are shared don't copy or take references. 5013 * 5014 * dst_pte == src_pte is the common case of src/dest sharing. 5015 * However, src could have 'unshared' and dst shares with 5016 * another vma. So page_count of ptep page is checked instead 5017 * to reliably determine whether pte is shared. 5018 */ 5019 if (page_count(virt_to_page(dst_pte)) > 1) { 5020 addr |= last_addr_mask; 5021 continue; 5022 } 5023 5024 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5025 src_ptl = huge_pte_lockptr(h, src, src_pte); 5026 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5027 entry = huge_ptep_get(src_pte); 5028 again: 5029 if (huge_pte_none(entry)) { 5030 /* 5031 * Skip if src entry none. 5032 */ 5033 ; 5034 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5035 bool uffd_wp = huge_pte_uffd_wp(entry); 5036 5037 if (!userfaultfd_wp(dst_vma) && uffd_wp) 5038 entry = huge_pte_clear_uffd_wp(entry); 5039 set_huge_pte_at(dst, addr, dst_pte, entry); 5040 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5041 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5042 bool uffd_wp = huge_pte_uffd_wp(entry); 5043 5044 if (!is_readable_migration_entry(swp_entry) && cow) { 5045 /* 5046 * COW mappings require pages in both 5047 * parent and child to be set to read. 5048 */ 5049 swp_entry = make_readable_migration_entry( 5050 swp_offset(swp_entry)); 5051 entry = swp_entry_to_pte(swp_entry); 5052 if (userfaultfd_wp(src_vma) && uffd_wp) 5053 entry = huge_pte_mkuffd_wp(entry); 5054 set_huge_pte_at(src, addr, src_pte, entry); 5055 } 5056 if (!userfaultfd_wp(dst_vma) && uffd_wp) 5057 entry = huge_pte_clear_uffd_wp(entry); 5058 set_huge_pte_at(dst, addr, dst_pte, entry); 5059 } else if (unlikely(is_pte_marker(entry))) { 5060 /* No swap on hugetlb */ 5061 WARN_ON_ONCE( 5062 is_swapin_error_entry(pte_to_swp_entry(entry))); 5063 /* 5064 * We copy the pte marker only if the dst vma has 5065 * uffd-wp enabled. 5066 */ 5067 if (userfaultfd_wp(dst_vma)) 5068 set_huge_pte_at(dst, addr, dst_pte, entry); 5069 } else { 5070 entry = huge_ptep_get(src_pte); 5071 ptepage = pte_page(entry); 5072 get_page(ptepage); 5073 5074 /* 5075 * Failing to duplicate the anon rmap is a rare case 5076 * where we see pinned hugetlb pages while they're 5077 * prone to COW. We need to do the COW earlier during 5078 * fork. 5079 * 5080 * When pre-allocating the page or copying data, we 5081 * need to be without the pgtable locks since we could 5082 * sleep during the process. 5083 */ 5084 if (!PageAnon(ptepage)) { 5085 page_dup_file_rmap(ptepage, true); 5086 } else if (page_try_dup_anon_rmap(ptepage, true, 5087 src_vma)) { 5088 pte_t src_pte_old = entry; 5089 struct folio *new_folio; 5090 5091 spin_unlock(src_ptl); 5092 spin_unlock(dst_ptl); 5093 /* Do not use reserve as it's private owned */ 5094 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); 5095 if (IS_ERR(new_folio)) { 5096 put_page(ptepage); 5097 ret = PTR_ERR(new_folio); 5098 break; 5099 } 5100 ret = copy_user_large_folio(new_folio, 5101 page_folio(ptepage), 5102 addr, dst_vma); 5103 put_page(ptepage); 5104 if (ret) { 5105 folio_put(new_folio); 5106 break; 5107 } 5108 5109 /* Install the new hugetlb folio if src pte stable */ 5110 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5111 src_ptl = huge_pte_lockptr(h, src, src_pte); 5112 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5113 entry = huge_ptep_get(src_pte); 5114 if (!pte_same(src_pte_old, entry)) { 5115 restore_reserve_on_error(h, dst_vma, addr, 5116 new_folio); 5117 folio_put(new_folio); 5118 /* huge_ptep of dst_pte won't change as in child */ 5119 goto again; 5120 } 5121 hugetlb_install_folio(dst_vma, dst_pte, addr, new_folio); 5122 spin_unlock(src_ptl); 5123 spin_unlock(dst_ptl); 5124 continue; 5125 } 5126 5127 if (cow) { 5128 /* 5129 * No need to notify as we are downgrading page 5130 * table protection not changing it to point 5131 * to a new page. 5132 * 5133 * See Documentation/mm/mmu_notifier.rst 5134 */ 5135 huge_ptep_set_wrprotect(src, addr, src_pte); 5136 entry = huge_pte_wrprotect(entry); 5137 } 5138 5139 set_huge_pte_at(dst, addr, dst_pte, entry); 5140 hugetlb_count_add(npages, dst); 5141 } 5142 spin_unlock(src_ptl); 5143 spin_unlock(dst_ptl); 5144 } 5145 5146 if (cow) { 5147 raw_write_seqcount_end(&src->write_protect_seq); 5148 mmu_notifier_invalidate_range_end(&range); 5149 } else { 5150 hugetlb_vma_unlock_read(src_vma); 5151 } 5152 5153 return ret; 5154 } 5155 5156 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5157 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 5158 { 5159 struct hstate *h = hstate_vma(vma); 5160 struct mm_struct *mm = vma->vm_mm; 5161 spinlock_t *src_ptl, *dst_ptl; 5162 pte_t pte; 5163 5164 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5165 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5166 5167 /* 5168 * We don't have to worry about the ordering of src and dst ptlocks 5169 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5170 */ 5171 if (src_ptl != dst_ptl) 5172 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5173 5174 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5175 set_huge_pte_at(mm, new_addr, dst_pte, pte); 5176 5177 if (src_ptl != dst_ptl) 5178 spin_unlock(src_ptl); 5179 spin_unlock(dst_ptl); 5180 } 5181 5182 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5183 struct vm_area_struct *new_vma, 5184 unsigned long old_addr, unsigned long new_addr, 5185 unsigned long len) 5186 { 5187 struct hstate *h = hstate_vma(vma); 5188 struct address_space *mapping = vma->vm_file->f_mapping; 5189 unsigned long sz = huge_page_size(h); 5190 struct mm_struct *mm = vma->vm_mm; 5191 unsigned long old_end = old_addr + len; 5192 unsigned long last_addr_mask; 5193 pte_t *src_pte, *dst_pte; 5194 struct mmu_notifier_range range; 5195 bool shared_pmd = false; 5196 5197 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5198 old_end); 5199 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5200 /* 5201 * In case of shared PMDs, we should cover the maximum possible 5202 * range. 5203 */ 5204 flush_cache_range(vma, range.start, range.end); 5205 5206 mmu_notifier_invalidate_range_start(&range); 5207 last_addr_mask = hugetlb_mask_last_page(h); 5208 /* Prevent race with file truncation */ 5209 hugetlb_vma_lock_write(vma); 5210 i_mmap_lock_write(mapping); 5211 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5212 src_pte = hugetlb_walk(vma, old_addr, sz); 5213 if (!src_pte) { 5214 old_addr |= last_addr_mask; 5215 new_addr |= last_addr_mask; 5216 continue; 5217 } 5218 if (huge_pte_none(huge_ptep_get(src_pte))) 5219 continue; 5220 5221 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5222 shared_pmd = true; 5223 old_addr |= last_addr_mask; 5224 new_addr |= last_addr_mask; 5225 continue; 5226 } 5227 5228 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5229 if (!dst_pte) 5230 break; 5231 5232 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 5233 } 5234 5235 if (shared_pmd) 5236 flush_tlb_range(vma, range.start, range.end); 5237 else 5238 flush_tlb_range(vma, old_end - len, old_end); 5239 mmu_notifier_invalidate_range_end(&range); 5240 i_mmap_unlock_write(mapping); 5241 hugetlb_vma_unlock_write(vma); 5242 5243 return len + old_addr - old_end; 5244 } 5245 5246 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5247 unsigned long start, unsigned long end, 5248 struct page *ref_page, zap_flags_t zap_flags) 5249 { 5250 struct mm_struct *mm = vma->vm_mm; 5251 unsigned long address; 5252 pte_t *ptep; 5253 pte_t pte; 5254 spinlock_t *ptl; 5255 struct page *page; 5256 struct hstate *h = hstate_vma(vma); 5257 unsigned long sz = huge_page_size(h); 5258 unsigned long last_addr_mask; 5259 bool force_flush = false; 5260 5261 WARN_ON(!is_vm_hugetlb_page(vma)); 5262 BUG_ON(start & ~huge_page_mask(h)); 5263 BUG_ON(end & ~huge_page_mask(h)); 5264 5265 /* 5266 * This is a hugetlb vma, all the pte entries should point 5267 * to huge page. 5268 */ 5269 tlb_change_page_size(tlb, sz); 5270 tlb_start_vma(tlb, vma); 5271 5272 last_addr_mask = hugetlb_mask_last_page(h); 5273 address = start; 5274 for (; address < end; address += sz) { 5275 ptep = hugetlb_walk(vma, address, sz); 5276 if (!ptep) { 5277 address |= last_addr_mask; 5278 continue; 5279 } 5280 5281 ptl = huge_pte_lock(h, mm, ptep); 5282 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5283 spin_unlock(ptl); 5284 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5285 force_flush = true; 5286 address |= last_addr_mask; 5287 continue; 5288 } 5289 5290 pte = huge_ptep_get(ptep); 5291 if (huge_pte_none(pte)) { 5292 spin_unlock(ptl); 5293 continue; 5294 } 5295 5296 /* 5297 * Migrating hugepage or HWPoisoned hugepage is already 5298 * unmapped and its refcount is dropped, so just clear pte here. 5299 */ 5300 if (unlikely(!pte_present(pte))) { 5301 /* 5302 * If the pte was wr-protected by uffd-wp in any of the 5303 * swap forms, meanwhile the caller does not want to 5304 * drop the uffd-wp bit in this zap, then replace the 5305 * pte with a marker. 5306 */ 5307 if (pte_swp_uffd_wp_any(pte) && 5308 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5309 set_huge_pte_at(mm, address, ptep, 5310 make_pte_marker(PTE_MARKER_UFFD_WP)); 5311 else 5312 huge_pte_clear(mm, address, ptep, sz); 5313 spin_unlock(ptl); 5314 continue; 5315 } 5316 5317 page = pte_page(pte); 5318 /* 5319 * If a reference page is supplied, it is because a specific 5320 * page is being unmapped, not a range. Ensure the page we 5321 * are about to unmap is the actual page of interest. 5322 */ 5323 if (ref_page) { 5324 if (page != ref_page) { 5325 spin_unlock(ptl); 5326 continue; 5327 } 5328 /* 5329 * Mark the VMA as having unmapped its page so that 5330 * future faults in this VMA will fail rather than 5331 * looking like data was lost 5332 */ 5333 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5334 } 5335 5336 pte = huge_ptep_get_and_clear(mm, address, ptep); 5337 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5338 if (huge_pte_dirty(pte)) 5339 set_page_dirty(page); 5340 /* Leave a uffd-wp pte marker if needed */ 5341 if (huge_pte_uffd_wp(pte) && 5342 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5343 set_huge_pte_at(mm, address, ptep, 5344 make_pte_marker(PTE_MARKER_UFFD_WP)); 5345 hugetlb_count_sub(pages_per_huge_page(h), mm); 5346 page_remove_rmap(page, vma, true); 5347 5348 spin_unlock(ptl); 5349 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5350 /* 5351 * Bail out after unmapping reference page if supplied 5352 */ 5353 if (ref_page) 5354 break; 5355 } 5356 tlb_end_vma(tlb, vma); 5357 5358 /* 5359 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5360 * could defer the flush until now, since by holding i_mmap_rwsem we 5361 * guaranteed that the last refernece would not be dropped. But we must 5362 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5363 * dropped and the last reference to the shared PMDs page might be 5364 * dropped as well. 5365 * 5366 * In theory we could defer the freeing of the PMD pages as well, but 5367 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5368 * detect sharing, so we cannot defer the release of the page either. 5369 * Instead, do flush now. 5370 */ 5371 if (force_flush) 5372 tlb_flush_mmu_tlbonly(tlb); 5373 } 5374 5375 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5376 struct vm_area_struct *vma, unsigned long start, 5377 unsigned long end, struct page *ref_page, 5378 zap_flags_t zap_flags) 5379 { 5380 hugetlb_vma_lock_write(vma); 5381 i_mmap_lock_write(vma->vm_file->f_mapping); 5382 5383 /* mmu notification performed in caller */ 5384 __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); 5385 5386 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5387 /* 5388 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5389 * When the vma_lock is freed, this makes the vma ineligible 5390 * for pmd sharing. And, i_mmap_rwsem is required to set up 5391 * pmd sharing. This is important as page tables for this 5392 * unmapped range will be asynchrously deleted. If the page 5393 * tables are shared, there will be issues when accessed by 5394 * someone else. 5395 */ 5396 __hugetlb_vma_unlock_write_free(vma); 5397 i_mmap_unlock_write(vma->vm_file->f_mapping); 5398 } else { 5399 i_mmap_unlock_write(vma->vm_file->f_mapping); 5400 hugetlb_vma_unlock_write(vma); 5401 } 5402 } 5403 5404 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5405 unsigned long end, struct page *ref_page, 5406 zap_flags_t zap_flags) 5407 { 5408 struct mmu_notifier_range range; 5409 struct mmu_gather tlb; 5410 5411 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5412 start, end); 5413 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5414 mmu_notifier_invalidate_range_start(&range); 5415 tlb_gather_mmu(&tlb, vma->vm_mm); 5416 5417 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5418 5419 mmu_notifier_invalidate_range_end(&range); 5420 tlb_finish_mmu(&tlb); 5421 } 5422 5423 /* 5424 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5425 * mapping it owns the reserve page for. The intention is to unmap the page 5426 * from other VMAs and let the children be SIGKILLed if they are faulting the 5427 * same region. 5428 */ 5429 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5430 struct page *page, unsigned long address) 5431 { 5432 struct hstate *h = hstate_vma(vma); 5433 struct vm_area_struct *iter_vma; 5434 struct address_space *mapping; 5435 pgoff_t pgoff; 5436 5437 /* 5438 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5439 * from page cache lookup which is in HPAGE_SIZE units. 5440 */ 5441 address = address & huge_page_mask(h); 5442 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5443 vma->vm_pgoff; 5444 mapping = vma->vm_file->f_mapping; 5445 5446 /* 5447 * Take the mapping lock for the duration of the table walk. As 5448 * this mapping should be shared between all the VMAs, 5449 * __unmap_hugepage_range() is called as the lock is already held 5450 */ 5451 i_mmap_lock_write(mapping); 5452 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5453 /* Do not unmap the current VMA */ 5454 if (iter_vma == vma) 5455 continue; 5456 5457 /* 5458 * Shared VMAs have their own reserves and do not affect 5459 * MAP_PRIVATE accounting but it is possible that a shared 5460 * VMA is using the same page so check and skip such VMAs. 5461 */ 5462 if (iter_vma->vm_flags & VM_MAYSHARE) 5463 continue; 5464 5465 /* 5466 * Unmap the page from other VMAs without their own reserves. 5467 * They get marked to be SIGKILLed if they fault in these 5468 * areas. This is because a future no-page fault on this VMA 5469 * could insert a zeroed page instead of the data existing 5470 * from the time of fork. This would look like data corruption 5471 */ 5472 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5473 unmap_hugepage_range(iter_vma, address, 5474 address + huge_page_size(h), page, 0); 5475 } 5476 i_mmap_unlock_write(mapping); 5477 } 5478 5479 /* 5480 * hugetlb_wp() should be called with page lock of the original hugepage held. 5481 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5482 * cannot race with other handlers or page migration. 5483 * Keep the pte_same checks anyway to make transition from the mutex easier. 5484 */ 5485 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5486 unsigned long address, pte_t *ptep, unsigned int flags, 5487 struct folio *pagecache_folio, spinlock_t *ptl) 5488 { 5489 const bool unshare = flags & FAULT_FLAG_UNSHARE; 5490 pte_t pte = huge_ptep_get(ptep); 5491 struct hstate *h = hstate_vma(vma); 5492 struct page *old_page; 5493 struct folio *new_folio; 5494 int outside_reserve = 0; 5495 vm_fault_t ret = 0; 5496 unsigned long haddr = address & huge_page_mask(h); 5497 struct mmu_notifier_range range; 5498 5499 /* 5500 * Never handle CoW for uffd-wp protected pages. It should be only 5501 * handled when the uffd-wp protection is removed. 5502 * 5503 * Note that only the CoW optimization path (in hugetlb_no_page()) 5504 * can trigger this, because hugetlb_fault() will always resolve 5505 * uffd-wp bit first. 5506 */ 5507 if (!unshare && huge_pte_uffd_wp(pte)) 5508 return 0; 5509 5510 /* 5511 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5512 * PTE mapped R/O such as maybe_mkwrite() would do. 5513 */ 5514 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5515 return VM_FAULT_SIGSEGV; 5516 5517 /* Let's take out MAP_SHARED mappings first. */ 5518 if (vma->vm_flags & VM_MAYSHARE) { 5519 set_huge_ptep_writable(vma, haddr, ptep); 5520 return 0; 5521 } 5522 5523 old_page = pte_page(pte); 5524 5525 delayacct_wpcopy_start(); 5526 5527 retry_avoidcopy: 5528 /* 5529 * If no-one else is actually using this page, we're the exclusive 5530 * owner and can reuse this page. 5531 */ 5532 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 5533 if (!PageAnonExclusive(old_page)) 5534 page_move_anon_rmap(old_page, vma); 5535 if (likely(!unshare)) 5536 set_huge_ptep_writable(vma, haddr, ptep); 5537 5538 delayacct_wpcopy_end(); 5539 return 0; 5540 } 5541 VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), 5542 old_page); 5543 5544 /* 5545 * If the process that created a MAP_PRIVATE mapping is about to 5546 * perform a COW due to a shared page count, attempt to satisfy 5547 * the allocation without using the existing reserves. The pagecache 5548 * page is used to determine if the reserve at this address was 5549 * consumed or not. If reserves were used, a partial faulted mapping 5550 * at the time of fork() could consume its reserves on COW instead 5551 * of the full address range. 5552 */ 5553 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5554 page_folio(old_page) != pagecache_folio) 5555 outside_reserve = 1; 5556 5557 get_page(old_page); 5558 5559 /* 5560 * Drop page table lock as buddy allocator may be called. It will 5561 * be acquired again before returning to the caller, as expected. 5562 */ 5563 spin_unlock(ptl); 5564 new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve); 5565 5566 if (IS_ERR(new_folio)) { 5567 /* 5568 * If a process owning a MAP_PRIVATE mapping fails to COW, 5569 * it is due to references held by a child and an insufficient 5570 * huge page pool. To guarantee the original mappers 5571 * reliability, unmap the page from child processes. The child 5572 * may get SIGKILLed if it later faults. 5573 */ 5574 if (outside_reserve) { 5575 struct address_space *mapping = vma->vm_file->f_mapping; 5576 pgoff_t idx; 5577 u32 hash; 5578 5579 put_page(old_page); 5580 /* 5581 * Drop hugetlb_fault_mutex and vma_lock before 5582 * unmapping. unmapping needs to hold vma_lock 5583 * in write mode. Dropping vma_lock in read mode 5584 * here is OK as COW mappings do not interact with 5585 * PMD sharing. 5586 * 5587 * Reacquire both after unmap operation. 5588 */ 5589 idx = vma_hugecache_offset(h, vma, haddr); 5590 hash = hugetlb_fault_mutex_hash(mapping, idx); 5591 hugetlb_vma_unlock_read(vma); 5592 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5593 5594 unmap_ref_private(mm, vma, old_page, haddr); 5595 5596 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5597 hugetlb_vma_lock_read(vma); 5598 spin_lock(ptl); 5599 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5600 if (likely(ptep && 5601 pte_same(huge_ptep_get(ptep), pte))) 5602 goto retry_avoidcopy; 5603 /* 5604 * race occurs while re-acquiring page table 5605 * lock, and our job is done. 5606 */ 5607 delayacct_wpcopy_end(); 5608 return 0; 5609 } 5610 5611 ret = vmf_error(PTR_ERR(new_folio)); 5612 goto out_release_old; 5613 } 5614 5615 /* 5616 * When the original hugepage is shared one, it does not have 5617 * anon_vma prepared. 5618 */ 5619 if (unlikely(anon_vma_prepare(vma))) { 5620 ret = VM_FAULT_OOM; 5621 goto out_release_all; 5622 } 5623 5624 if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) { 5625 ret = VM_FAULT_HWPOISON_LARGE; 5626 goto out_release_all; 5627 } 5628 __folio_mark_uptodate(new_folio); 5629 5630 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, 5631 haddr + huge_page_size(h)); 5632 mmu_notifier_invalidate_range_start(&range); 5633 5634 /* 5635 * Retake the page table lock to check for racing updates 5636 * before the page tables are altered 5637 */ 5638 spin_lock(ptl); 5639 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5640 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5641 /* Break COW or unshare */ 5642 huge_ptep_clear_flush(vma, haddr, ptep); 5643 mmu_notifier_invalidate_range(mm, range.start, range.end); 5644 page_remove_rmap(old_page, vma, true); 5645 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5646 set_huge_pte_at(mm, haddr, ptep, 5647 make_huge_pte(vma, &new_folio->page, !unshare)); 5648 folio_set_hugetlb_migratable(new_folio); 5649 /* Make the old page be freed below */ 5650 new_folio = page_folio(old_page); 5651 } 5652 spin_unlock(ptl); 5653 mmu_notifier_invalidate_range_end(&range); 5654 out_release_all: 5655 /* 5656 * No restore in case of successful pagetable update (Break COW or 5657 * unshare) 5658 */ 5659 if (new_folio != page_folio(old_page)) 5660 restore_reserve_on_error(h, vma, haddr, new_folio); 5661 folio_put(new_folio); 5662 out_release_old: 5663 put_page(old_page); 5664 5665 spin_lock(ptl); /* Caller expects lock to be held */ 5666 5667 delayacct_wpcopy_end(); 5668 return ret; 5669 } 5670 5671 /* 5672 * Return whether there is a pagecache page to back given address within VMA. 5673 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 5674 */ 5675 static bool hugetlbfs_pagecache_present(struct hstate *h, 5676 struct vm_area_struct *vma, unsigned long address) 5677 { 5678 struct address_space *mapping = vma->vm_file->f_mapping; 5679 pgoff_t idx = vma_hugecache_offset(h, vma, address); 5680 bool present; 5681 5682 rcu_read_lock(); 5683 present = page_cache_next_miss(mapping, idx, 1) != idx; 5684 rcu_read_unlock(); 5685 5686 return present; 5687 } 5688 5689 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 5690 pgoff_t idx) 5691 { 5692 struct inode *inode = mapping->host; 5693 struct hstate *h = hstate_inode(inode); 5694 int err; 5695 5696 __folio_set_locked(folio); 5697 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5698 5699 if (unlikely(err)) { 5700 __folio_clear_locked(folio); 5701 return err; 5702 } 5703 folio_clear_hugetlb_restore_reserve(folio); 5704 5705 /* 5706 * mark folio dirty so that it will not be removed from cache/file 5707 * by non-hugetlbfs specific code paths. 5708 */ 5709 folio_mark_dirty(folio); 5710 5711 spin_lock(&inode->i_lock); 5712 inode->i_blocks += blocks_per_huge_page(h); 5713 spin_unlock(&inode->i_lock); 5714 return 0; 5715 } 5716 5717 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5718 struct address_space *mapping, 5719 pgoff_t idx, 5720 unsigned int flags, 5721 unsigned long haddr, 5722 unsigned long addr, 5723 unsigned long reason) 5724 { 5725 u32 hash; 5726 struct vm_fault vmf = { 5727 .vma = vma, 5728 .address = haddr, 5729 .real_address = addr, 5730 .flags = flags, 5731 5732 /* 5733 * Hard to debug if it ends up being 5734 * used by a callee that assumes 5735 * something about the other 5736 * uninitialized fields... same as in 5737 * memory.c 5738 */ 5739 }; 5740 5741 /* 5742 * vma_lock and hugetlb_fault_mutex must be dropped before handling 5743 * userfault. Also mmap_lock could be dropped due to handling 5744 * userfault, any vma operation should be careful from here. 5745 */ 5746 hugetlb_vma_unlock_read(vma); 5747 hash = hugetlb_fault_mutex_hash(mapping, idx); 5748 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5749 return handle_userfault(&vmf, reason); 5750 } 5751 5752 /* 5753 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 5754 * false if pte changed or is changing. 5755 */ 5756 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, 5757 pte_t *ptep, pte_t old_pte) 5758 { 5759 spinlock_t *ptl; 5760 bool same; 5761 5762 ptl = huge_pte_lock(h, mm, ptep); 5763 same = pte_same(huge_ptep_get(ptep), old_pte); 5764 spin_unlock(ptl); 5765 5766 return same; 5767 } 5768 5769 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5770 struct vm_area_struct *vma, 5771 struct address_space *mapping, pgoff_t idx, 5772 unsigned long address, pte_t *ptep, 5773 pte_t old_pte, unsigned int flags) 5774 { 5775 struct hstate *h = hstate_vma(vma); 5776 vm_fault_t ret = VM_FAULT_SIGBUS; 5777 int anon_rmap = 0; 5778 unsigned long size; 5779 struct folio *folio; 5780 pte_t new_pte; 5781 spinlock_t *ptl; 5782 unsigned long haddr = address & huge_page_mask(h); 5783 bool new_folio, new_pagecache_folio = false; 5784 u32 hash = hugetlb_fault_mutex_hash(mapping, idx); 5785 5786 /* 5787 * Currently, we are forced to kill the process in the event the 5788 * original mapper has unmapped pages from the child due to a failed 5789 * COW/unsharing. Warn that such a situation has occurred as it may not 5790 * be obvious. 5791 */ 5792 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5793 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5794 current->pid); 5795 goto out; 5796 } 5797 5798 /* 5799 * Use page lock to guard against racing truncation 5800 * before we get page_table_lock. 5801 */ 5802 new_folio = false; 5803 folio = filemap_lock_folio(mapping, idx); 5804 if (IS_ERR(folio)) { 5805 size = i_size_read(mapping->host) >> huge_page_shift(h); 5806 if (idx >= size) 5807 goto out; 5808 /* Check for page in userfault range */ 5809 if (userfaultfd_missing(vma)) { 5810 /* 5811 * Since hugetlb_no_page() was examining pte 5812 * without pgtable lock, we need to re-test under 5813 * lock because the pte may not be stable and could 5814 * have changed from under us. Try to detect 5815 * either changed or during-changing ptes and retry 5816 * properly when needed. 5817 * 5818 * Note that userfaultfd is actually fine with 5819 * false positives (e.g. caused by pte changed), 5820 * but not wrong logical events (e.g. caused by 5821 * reading a pte during changing). The latter can 5822 * confuse the userspace, so the strictness is very 5823 * much preferred. E.g., MISSING event should 5824 * never happen on the page after UFFDIO_COPY has 5825 * correctly installed the page and returned. 5826 */ 5827 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5828 ret = 0; 5829 goto out; 5830 } 5831 5832 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5833 haddr, address, 5834 VM_UFFD_MISSING); 5835 } 5836 5837 folio = alloc_hugetlb_folio(vma, haddr, 0); 5838 if (IS_ERR(folio)) { 5839 /* 5840 * Returning error will result in faulting task being 5841 * sent SIGBUS. The hugetlb fault mutex prevents two 5842 * tasks from racing to fault in the same page which 5843 * could result in false unable to allocate errors. 5844 * Page migration does not take the fault mutex, but 5845 * does a clear then write of pte's under page table 5846 * lock. Page fault code could race with migration, 5847 * notice the clear pte and try to allocate a page 5848 * here. Before returning error, get ptl and make 5849 * sure there really is no pte entry. 5850 */ 5851 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) 5852 ret = vmf_error(PTR_ERR(folio)); 5853 else 5854 ret = 0; 5855 goto out; 5856 } 5857 clear_huge_page(&folio->page, address, pages_per_huge_page(h)); 5858 __folio_mark_uptodate(folio); 5859 new_folio = true; 5860 5861 if (vma->vm_flags & VM_MAYSHARE) { 5862 int err = hugetlb_add_to_page_cache(folio, mapping, idx); 5863 if (err) { 5864 /* 5865 * err can't be -EEXIST which implies someone 5866 * else consumed the reservation since hugetlb 5867 * fault mutex is held when add a hugetlb page 5868 * to the page cache. So it's safe to call 5869 * restore_reserve_on_error() here. 5870 */ 5871 restore_reserve_on_error(h, vma, haddr, folio); 5872 folio_put(folio); 5873 goto out; 5874 } 5875 new_pagecache_folio = true; 5876 } else { 5877 folio_lock(folio); 5878 if (unlikely(anon_vma_prepare(vma))) { 5879 ret = VM_FAULT_OOM; 5880 goto backout_unlocked; 5881 } 5882 anon_rmap = 1; 5883 } 5884 } else { 5885 /* 5886 * If memory error occurs between mmap() and fault, some process 5887 * don't have hwpoisoned swap entry for errored virtual address. 5888 * So we need to block hugepage fault by PG_hwpoison bit check. 5889 */ 5890 if (unlikely(folio_test_hwpoison(folio))) { 5891 ret = VM_FAULT_HWPOISON_LARGE | 5892 VM_FAULT_SET_HINDEX(hstate_index(h)); 5893 goto backout_unlocked; 5894 } 5895 5896 /* Check for page in userfault range. */ 5897 if (userfaultfd_minor(vma)) { 5898 folio_unlock(folio); 5899 folio_put(folio); 5900 /* See comment in userfaultfd_missing() block above */ 5901 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5902 ret = 0; 5903 goto out; 5904 } 5905 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5906 haddr, address, 5907 VM_UFFD_MINOR); 5908 } 5909 } 5910 5911 /* 5912 * If we are going to COW a private mapping later, we examine the 5913 * pending reservations for this page now. This will ensure that 5914 * any allocations necessary to record that reservation occur outside 5915 * the spinlock. 5916 */ 5917 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5918 if (vma_needs_reservation(h, vma, haddr) < 0) { 5919 ret = VM_FAULT_OOM; 5920 goto backout_unlocked; 5921 } 5922 /* Just decrements count, does not deallocate */ 5923 vma_end_reservation(h, vma, haddr); 5924 } 5925 5926 ptl = huge_pte_lock(h, mm, ptep); 5927 ret = 0; 5928 /* If pte changed from under us, retry */ 5929 if (!pte_same(huge_ptep_get(ptep), old_pte)) 5930 goto backout; 5931 5932 if (anon_rmap) 5933 hugepage_add_new_anon_rmap(folio, vma, haddr); 5934 else 5935 page_dup_file_rmap(&folio->page, true); 5936 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) 5937 && (vma->vm_flags & VM_SHARED))); 5938 /* 5939 * If this pte was previously wr-protected, keep it wr-protected even 5940 * if populated. 5941 */ 5942 if (unlikely(pte_marker_uffd_wp(old_pte))) 5943 new_pte = huge_pte_mkuffd_wp(new_pte); 5944 set_huge_pte_at(mm, haddr, ptep, new_pte); 5945 5946 hugetlb_count_add(pages_per_huge_page(h), mm); 5947 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5948 /* Optimization, do the COW without a second fault */ 5949 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl); 5950 } 5951 5952 spin_unlock(ptl); 5953 5954 /* 5955 * Only set hugetlb_migratable in newly allocated pages. Existing pages 5956 * found in the pagecache may not have hugetlb_migratable if they have 5957 * been isolated for migration. 5958 */ 5959 if (new_folio) 5960 folio_set_hugetlb_migratable(folio); 5961 5962 folio_unlock(folio); 5963 out: 5964 hugetlb_vma_unlock_read(vma); 5965 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5966 return ret; 5967 5968 backout: 5969 spin_unlock(ptl); 5970 backout_unlocked: 5971 if (new_folio && !new_pagecache_folio) 5972 restore_reserve_on_error(h, vma, haddr, folio); 5973 5974 folio_unlock(folio); 5975 folio_put(folio); 5976 goto out; 5977 } 5978 5979 #ifdef CONFIG_SMP 5980 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5981 { 5982 unsigned long key[2]; 5983 u32 hash; 5984 5985 key[0] = (unsigned long) mapping; 5986 key[1] = idx; 5987 5988 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 5989 5990 return hash & (num_fault_mutexes - 1); 5991 } 5992 #else 5993 /* 5994 * For uniprocessor systems we always use a single mutex, so just 5995 * return 0 and avoid the hashing overhead. 5996 */ 5997 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5998 { 5999 return 0; 6000 } 6001 #endif 6002 6003 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6004 unsigned long address, unsigned int flags) 6005 { 6006 pte_t *ptep, entry; 6007 spinlock_t *ptl; 6008 vm_fault_t ret; 6009 u32 hash; 6010 pgoff_t idx; 6011 struct page *page = NULL; 6012 struct folio *pagecache_folio = NULL; 6013 struct hstate *h = hstate_vma(vma); 6014 struct address_space *mapping; 6015 int need_wait_lock = 0; 6016 unsigned long haddr = address & huge_page_mask(h); 6017 6018 /* 6019 * Serialize hugepage allocation and instantiation, so that we don't 6020 * get spurious allocation failures if two CPUs race to instantiate 6021 * the same page in the page cache. 6022 */ 6023 mapping = vma->vm_file->f_mapping; 6024 idx = vma_hugecache_offset(h, vma, haddr); 6025 hash = hugetlb_fault_mutex_hash(mapping, idx); 6026 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6027 6028 /* 6029 * Acquire vma lock before calling huge_pte_alloc and hold 6030 * until finished with ptep. This prevents huge_pmd_unshare from 6031 * being called elsewhere and making the ptep no longer valid. 6032 */ 6033 hugetlb_vma_lock_read(vma); 6034 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 6035 if (!ptep) { 6036 hugetlb_vma_unlock_read(vma); 6037 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6038 return VM_FAULT_OOM; 6039 } 6040 6041 entry = huge_ptep_get(ptep); 6042 /* PTE markers should be handled the same way as none pte */ 6043 if (huge_pte_none_mostly(entry)) 6044 /* 6045 * hugetlb_no_page will drop vma lock and hugetlb fault 6046 * mutex internally, which make us return immediately. 6047 */ 6048 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, 6049 entry, flags); 6050 6051 ret = 0; 6052 6053 /* 6054 * entry could be a migration/hwpoison entry at this point, so this 6055 * check prevents the kernel from going below assuming that we have 6056 * an active hugepage in pagecache. This goto expects the 2nd page 6057 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 6058 * properly handle it. 6059 */ 6060 if (!pte_present(entry)) { 6061 if (unlikely(is_hugetlb_entry_migration(entry))) { 6062 /* 6063 * Release the hugetlb fault lock now, but retain 6064 * the vma lock, because it is needed to guard the 6065 * huge_pte_lockptr() later in 6066 * migration_entry_wait_huge(). The vma lock will 6067 * be released there. 6068 */ 6069 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6070 migration_entry_wait_huge(vma, ptep); 6071 return 0; 6072 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 6073 ret = VM_FAULT_HWPOISON_LARGE | 6074 VM_FAULT_SET_HINDEX(hstate_index(h)); 6075 goto out_mutex; 6076 } 6077 6078 /* 6079 * If we are going to COW/unshare the mapping later, we examine the 6080 * pending reservations for this page now. This will ensure that any 6081 * allocations necessary to record that reservation occur outside the 6082 * spinlock. Also lookup the pagecache page now as it is used to 6083 * determine if a reservation has been consumed. 6084 */ 6085 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6086 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { 6087 if (vma_needs_reservation(h, vma, haddr) < 0) { 6088 ret = VM_FAULT_OOM; 6089 goto out_mutex; 6090 } 6091 /* Just decrements count, does not deallocate */ 6092 vma_end_reservation(h, vma, haddr); 6093 6094 pagecache_folio = filemap_lock_folio(mapping, idx); 6095 if (IS_ERR(pagecache_folio)) 6096 pagecache_folio = NULL; 6097 } 6098 6099 ptl = huge_pte_lock(h, mm, ptep); 6100 6101 /* Check for a racing update before calling hugetlb_wp() */ 6102 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 6103 goto out_ptl; 6104 6105 /* Handle userfault-wp first, before trying to lock more pages */ 6106 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && 6107 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 6108 struct vm_fault vmf = { 6109 .vma = vma, 6110 .address = haddr, 6111 .real_address = address, 6112 .flags = flags, 6113 }; 6114 6115 spin_unlock(ptl); 6116 if (pagecache_folio) { 6117 folio_unlock(pagecache_folio); 6118 folio_put(pagecache_folio); 6119 } 6120 hugetlb_vma_unlock_read(vma); 6121 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6122 return handle_userfault(&vmf, VM_UFFD_WP); 6123 } 6124 6125 /* 6126 * hugetlb_wp() requires page locks of pte_page(entry) and 6127 * pagecache_folio, so here we need take the former one 6128 * when page != pagecache_folio or !pagecache_folio. 6129 */ 6130 page = pte_page(entry); 6131 if (page_folio(page) != pagecache_folio) 6132 if (!trylock_page(page)) { 6133 need_wait_lock = 1; 6134 goto out_ptl; 6135 } 6136 6137 get_page(page); 6138 6139 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6140 if (!huge_pte_write(entry)) { 6141 ret = hugetlb_wp(mm, vma, address, ptep, flags, 6142 pagecache_folio, ptl); 6143 goto out_put_page; 6144 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6145 entry = huge_pte_mkdirty(entry); 6146 } 6147 } 6148 entry = pte_mkyoung(entry); 6149 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 6150 flags & FAULT_FLAG_WRITE)) 6151 update_mmu_cache(vma, haddr, ptep); 6152 out_put_page: 6153 if (page_folio(page) != pagecache_folio) 6154 unlock_page(page); 6155 put_page(page); 6156 out_ptl: 6157 spin_unlock(ptl); 6158 6159 if (pagecache_folio) { 6160 folio_unlock(pagecache_folio); 6161 folio_put(pagecache_folio); 6162 } 6163 out_mutex: 6164 hugetlb_vma_unlock_read(vma); 6165 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6166 /* 6167 * Generally it's safe to hold refcount during waiting page lock. But 6168 * here we just wait to defer the next page fault to avoid busy loop and 6169 * the page is not used after unlocked before returning from the current 6170 * page fault. So we are safe from accessing freed page, even if we wait 6171 * here without taking refcount. 6172 */ 6173 if (need_wait_lock) 6174 wait_on_page_locked(page); 6175 return ret; 6176 } 6177 6178 #ifdef CONFIG_USERFAULTFD 6179 /* 6180 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6181 * with modifications for hugetlb pages. 6182 */ 6183 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6184 struct vm_area_struct *dst_vma, 6185 unsigned long dst_addr, 6186 unsigned long src_addr, 6187 uffd_flags_t flags, 6188 struct folio **foliop) 6189 { 6190 struct mm_struct *dst_mm = dst_vma->vm_mm; 6191 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6192 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6193 struct hstate *h = hstate_vma(dst_vma); 6194 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6195 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6196 unsigned long size; 6197 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6198 pte_t _dst_pte; 6199 spinlock_t *ptl; 6200 int ret = -ENOMEM; 6201 struct folio *folio; 6202 int writable; 6203 bool folio_in_pagecache = false; 6204 6205 if (is_continue) { 6206 ret = -EFAULT; 6207 folio = filemap_lock_folio(mapping, idx); 6208 if (IS_ERR(folio)) 6209 goto out; 6210 folio_in_pagecache = true; 6211 } else if (!*foliop) { 6212 /* If a folio already exists, then it's UFFDIO_COPY for 6213 * a non-missing case. Return -EEXIST. 6214 */ 6215 if (vm_shared && 6216 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6217 ret = -EEXIST; 6218 goto out; 6219 } 6220 6221 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6222 if (IS_ERR(folio)) { 6223 ret = -ENOMEM; 6224 goto out; 6225 } 6226 6227 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6228 false); 6229 6230 /* fallback to copy_from_user outside mmap_lock */ 6231 if (unlikely(ret)) { 6232 ret = -ENOENT; 6233 /* Free the allocated folio which may have 6234 * consumed a reservation. 6235 */ 6236 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6237 folio_put(folio); 6238 6239 /* Allocate a temporary folio to hold the copied 6240 * contents. 6241 */ 6242 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6243 if (!folio) { 6244 ret = -ENOMEM; 6245 goto out; 6246 } 6247 *foliop = folio; 6248 /* Set the outparam foliop and return to the caller to 6249 * copy the contents outside the lock. Don't free the 6250 * folio. 6251 */ 6252 goto out; 6253 } 6254 } else { 6255 if (vm_shared && 6256 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6257 folio_put(*foliop); 6258 ret = -EEXIST; 6259 *foliop = NULL; 6260 goto out; 6261 } 6262 6263 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6264 if (IS_ERR(folio)) { 6265 folio_put(*foliop); 6266 ret = -ENOMEM; 6267 *foliop = NULL; 6268 goto out; 6269 } 6270 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6271 folio_put(*foliop); 6272 *foliop = NULL; 6273 if (ret) { 6274 folio_put(folio); 6275 goto out; 6276 } 6277 } 6278 6279 /* 6280 * The memory barrier inside __folio_mark_uptodate makes sure that 6281 * preceding stores to the page contents become visible before 6282 * the set_pte_at() write. 6283 */ 6284 __folio_mark_uptodate(folio); 6285 6286 /* Add shared, newly allocated pages to the page cache. */ 6287 if (vm_shared && !is_continue) { 6288 size = i_size_read(mapping->host) >> huge_page_shift(h); 6289 ret = -EFAULT; 6290 if (idx >= size) 6291 goto out_release_nounlock; 6292 6293 /* 6294 * Serialization between remove_inode_hugepages() and 6295 * hugetlb_add_to_page_cache() below happens through the 6296 * hugetlb_fault_mutex_table that here must be hold by 6297 * the caller. 6298 */ 6299 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6300 if (ret) 6301 goto out_release_nounlock; 6302 folio_in_pagecache = true; 6303 } 6304 6305 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6306 6307 ret = -EIO; 6308 if (folio_test_hwpoison(folio)) 6309 goto out_release_unlock; 6310 6311 /* 6312 * We allow to overwrite a pte marker: consider when both MISSING|WP 6313 * registered, we firstly wr-protect a none pte which has no page cache 6314 * page backing it, then access the page. 6315 */ 6316 ret = -EEXIST; 6317 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6318 goto out_release_unlock; 6319 6320 if (folio_in_pagecache) 6321 page_dup_file_rmap(&folio->page, true); 6322 else 6323 hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); 6324 6325 /* 6326 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6327 * with wp flag set, don't set pte write bit. 6328 */ 6329 if (wp_enabled || (is_continue && !vm_shared)) 6330 writable = 0; 6331 else 6332 writable = dst_vma->vm_flags & VM_WRITE; 6333 6334 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); 6335 /* 6336 * Always mark UFFDIO_COPY page dirty; note that this may not be 6337 * extremely important for hugetlbfs for now since swapping is not 6338 * supported, but we should still be clear in that this page cannot be 6339 * thrown away at will, even if write bit not set. 6340 */ 6341 _dst_pte = huge_pte_mkdirty(_dst_pte); 6342 _dst_pte = pte_mkyoung(_dst_pte); 6343 6344 if (wp_enabled) 6345 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6346 6347 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6348 6349 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6350 6351 /* No need to invalidate - it was non-present before */ 6352 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6353 6354 spin_unlock(ptl); 6355 if (!is_continue) 6356 folio_set_hugetlb_migratable(folio); 6357 if (vm_shared || is_continue) 6358 folio_unlock(folio); 6359 ret = 0; 6360 out: 6361 return ret; 6362 out_release_unlock: 6363 spin_unlock(ptl); 6364 if (vm_shared || is_continue) 6365 folio_unlock(folio); 6366 out_release_nounlock: 6367 if (!folio_in_pagecache) 6368 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6369 folio_put(folio); 6370 goto out; 6371 } 6372 #endif /* CONFIG_USERFAULTFD */ 6373 6374 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, 6375 int refs, struct page **pages, 6376 struct vm_area_struct **vmas) 6377 { 6378 int nr; 6379 6380 for (nr = 0; nr < refs; nr++) { 6381 if (likely(pages)) 6382 pages[nr] = nth_page(page, nr); 6383 if (vmas) 6384 vmas[nr] = vma; 6385 } 6386 } 6387 6388 static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma, 6389 unsigned int flags, pte_t *pte, 6390 bool *unshare) 6391 { 6392 pte_t pteval = huge_ptep_get(pte); 6393 6394 *unshare = false; 6395 if (is_swap_pte(pteval)) 6396 return true; 6397 if (huge_pte_write(pteval)) 6398 return false; 6399 if (flags & FOLL_WRITE) 6400 return true; 6401 if (gup_must_unshare(vma, flags, pte_page(pteval))) { 6402 *unshare = true; 6403 return true; 6404 } 6405 return false; 6406 } 6407 6408 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 6409 unsigned long address, unsigned int flags) 6410 { 6411 struct hstate *h = hstate_vma(vma); 6412 struct mm_struct *mm = vma->vm_mm; 6413 unsigned long haddr = address & huge_page_mask(h); 6414 struct page *page = NULL; 6415 spinlock_t *ptl; 6416 pte_t *pte, entry; 6417 6418 /* 6419 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via 6420 * follow_hugetlb_page(). 6421 */ 6422 if (WARN_ON_ONCE(flags & FOLL_PIN)) 6423 return NULL; 6424 6425 hugetlb_vma_lock_read(vma); 6426 pte = hugetlb_walk(vma, haddr, huge_page_size(h)); 6427 if (!pte) 6428 goto out_unlock; 6429 6430 ptl = huge_pte_lock(h, mm, pte); 6431 entry = huge_ptep_get(pte); 6432 if (pte_present(entry)) { 6433 page = pte_page(entry) + 6434 ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); 6435 /* 6436 * Note that page may be a sub-page, and with vmemmap 6437 * optimizations the page struct may be read only. 6438 * try_grab_page() will increase the ref count on the 6439 * head page, so this will be OK. 6440 * 6441 * try_grab_page() should always be able to get the page here, 6442 * because we hold the ptl lock and have verified pte_present(). 6443 */ 6444 if (try_grab_page(page, flags)) { 6445 page = NULL; 6446 goto out; 6447 } 6448 } 6449 out: 6450 spin_unlock(ptl); 6451 out_unlock: 6452 hugetlb_vma_unlock_read(vma); 6453 return page; 6454 } 6455 6456 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 6457 struct page **pages, struct vm_area_struct **vmas, 6458 unsigned long *position, unsigned long *nr_pages, 6459 long i, unsigned int flags, int *locked) 6460 { 6461 unsigned long pfn_offset; 6462 unsigned long vaddr = *position; 6463 unsigned long remainder = *nr_pages; 6464 struct hstate *h = hstate_vma(vma); 6465 int err = -EFAULT, refs; 6466 6467 while (vaddr < vma->vm_end && remainder) { 6468 pte_t *pte; 6469 spinlock_t *ptl = NULL; 6470 bool unshare = false; 6471 int absent; 6472 struct page *page; 6473 6474 /* 6475 * If we have a pending SIGKILL, don't keep faulting pages and 6476 * potentially allocating memory. 6477 */ 6478 if (fatal_signal_pending(current)) { 6479 remainder = 0; 6480 break; 6481 } 6482 6483 hugetlb_vma_lock_read(vma); 6484 /* 6485 * Some archs (sparc64, sh*) have multiple pte_ts to 6486 * each hugepage. We have to make sure we get the 6487 * first, for the page indexing below to work. 6488 * 6489 * Note that page table lock is not held when pte is null. 6490 */ 6491 pte = hugetlb_walk(vma, vaddr & huge_page_mask(h), 6492 huge_page_size(h)); 6493 if (pte) 6494 ptl = huge_pte_lock(h, mm, pte); 6495 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 6496 6497 /* 6498 * When coredumping, it suits get_dump_page if we just return 6499 * an error where there's an empty slot with no huge pagecache 6500 * to back it. This way, we avoid allocating a hugepage, and 6501 * the sparse dumpfile avoids allocating disk blocks, but its 6502 * huge holes still show up with zeroes where they need to be. 6503 */ 6504 if (absent && (flags & FOLL_DUMP) && 6505 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 6506 if (pte) 6507 spin_unlock(ptl); 6508 hugetlb_vma_unlock_read(vma); 6509 remainder = 0; 6510 break; 6511 } 6512 6513 /* 6514 * We need call hugetlb_fault for both hugepages under migration 6515 * (in which case hugetlb_fault waits for the migration,) and 6516 * hwpoisoned hugepages (in which case we need to prevent the 6517 * caller from accessing to them.) In order to do this, we use 6518 * here is_swap_pte instead of is_hugetlb_entry_migration and 6519 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 6520 * both cases, and because we can't follow correct pages 6521 * directly from any kind of swap entries. 6522 */ 6523 if (absent || 6524 __follow_hugetlb_must_fault(vma, flags, pte, &unshare)) { 6525 vm_fault_t ret; 6526 unsigned int fault_flags = 0; 6527 6528 if (pte) 6529 spin_unlock(ptl); 6530 hugetlb_vma_unlock_read(vma); 6531 6532 if (flags & FOLL_WRITE) 6533 fault_flags |= FAULT_FLAG_WRITE; 6534 else if (unshare) 6535 fault_flags |= FAULT_FLAG_UNSHARE; 6536 if (locked) { 6537 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6538 FAULT_FLAG_KILLABLE; 6539 if (flags & FOLL_INTERRUPTIBLE) 6540 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 6541 } 6542 if (flags & FOLL_NOWAIT) 6543 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6544 FAULT_FLAG_RETRY_NOWAIT; 6545 if (flags & FOLL_TRIED) { 6546 /* 6547 * Note: FAULT_FLAG_ALLOW_RETRY and 6548 * FAULT_FLAG_TRIED can co-exist 6549 */ 6550 fault_flags |= FAULT_FLAG_TRIED; 6551 } 6552 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 6553 if (ret & VM_FAULT_ERROR) { 6554 err = vm_fault_to_errno(ret, flags); 6555 remainder = 0; 6556 break; 6557 } 6558 if (ret & VM_FAULT_RETRY) { 6559 if (locked && 6560 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 6561 *locked = 0; 6562 *nr_pages = 0; 6563 /* 6564 * VM_FAULT_RETRY must not return an 6565 * error, it will return zero 6566 * instead. 6567 * 6568 * No need to update "position" as the 6569 * caller will not check it after 6570 * *nr_pages is set to 0. 6571 */ 6572 return i; 6573 } 6574 continue; 6575 } 6576 6577 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 6578 page = pte_page(huge_ptep_get(pte)); 6579 6580 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 6581 !PageAnonExclusive(page), page); 6582 6583 /* 6584 * If subpage information not requested, update counters 6585 * and skip the same_page loop below. 6586 */ 6587 if (!pages && !vmas && !pfn_offset && 6588 (vaddr + huge_page_size(h) < vma->vm_end) && 6589 (remainder >= pages_per_huge_page(h))) { 6590 vaddr += huge_page_size(h); 6591 remainder -= pages_per_huge_page(h); 6592 i += pages_per_huge_page(h); 6593 spin_unlock(ptl); 6594 hugetlb_vma_unlock_read(vma); 6595 continue; 6596 } 6597 6598 /* vaddr may not be aligned to PAGE_SIZE */ 6599 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, 6600 (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); 6601 6602 if (pages || vmas) 6603 record_subpages_vmas(nth_page(page, pfn_offset), 6604 vma, refs, 6605 likely(pages) ? pages + i : NULL, 6606 vmas ? vmas + i : NULL); 6607 6608 if (pages) { 6609 /* 6610 * try_grab_folio() should always succeed here, 6611 * because: a) we hold the ptl lock, and b) we've just 6612 * checked that the huge page is present in the page 6613 * tables. If the huge page is present, then the tail 6614 * pages must also be present. The ptl prevents the 6615 * head page and tail pages from being rearranged in 6616 * any way. As this is hugetlb, the pages will never 6617 * be p2pdma or not longterm pinable. So this page 6618 * must be available at this point, unless the page 6619 * refcount overflowed: 6620 */ 6621 if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs, 6622 flags))) { 6623 spin_unlock(ptl); 6624 hugetlb_vma_unlock_read(vma); 6625 remainder = 0; 6626 err = -ENOMEM; 6627 break; 6628 } 6629 } 6630 6631 vaddr += (refs << PAGE_SHIFT); 6632 remainder -= refs; 6633 i += refs; 6634 6635 spin_unlock(ptl); 6636 hugetlb_vma_unlock_read(vma); 6637 } 6638 *nr_pages = remainder; 6639 /* 6640 * setting position is actually required only if remainder is 6641 * not zero but it's faster not to add a "if (remainder)" 6642 * branch. 6643 */ 6644 *position = vaddr; 6645 6646 return i ? i : err; 6647 } 6648 6649 long hugetlb_change_protection(struct vm_area_struct *vma, 6650 unsigned long address, unsigned long end, 6651 pgprot_t newprot, unsigned long cp_flags) 6652 { 6653 struct mm_struct *mm = vma->vm_mm; 6654 unsigned long start = address; 6655 pte_t *ptep; 6656 pte_t pte; 6657 struct hstate *h = hstate_vma(vma); 6658 long pages = 0, psize = huge_page_size(h); 6659 bool shared_pmd = false; 6660 struct mmu_notifier_range range; 6661 unsigned long last_addr_mask; 6662 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6663 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6664 6665 /* 6666 * In the case of shared PMDs, the area to flush could be beyond 6667 * start/end. Set range.start/range.end to cover the maximum possible 6668 * range if PMD sharing is possible. 6669 */ 6670 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6671 0, mm, start, end); 6672 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6673 6674 BUG_ON(address >= end); 6675 flush_cache_range(vma, range.start, range.end); 6676 6677 mmu_notifier_invalidate_range_start(&range); 6678 hugetlb_vma_lock_write(vma); 6679 i_mmap_lock_write(vma->vm_file->f_mapping); 6680 last_addr_mask = hugetlb_mask_last_page(h); 6681 for (; address < end; address += psize) { 6682 spinlock_t *ptl; 6683 ptep = hugetlb_walk(vma, address, psize); 6684 if (!ptep) { 6685 if (!uffd_wp) { 6686 address |= last_addr_mask; 6687 continue; 6688 } 6689 /* 6690 * Userfaultfd wr-protect requires pgtable 6691 * pre-allocations to install pte markers. 6692 */ 6693 ptep = huge_pte_alloc(mm, vma, address, psize); 6694 if (!ptep) { 6695 pages = -ENOMEM; 6696 break; 6697 } 6698 } 6699 ptl = huge_pte_lock(h, mm, ptep); 6700 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6701 /* 6702 * When uffd-wp is enabled on the vma, unshare 6703 * shouldn't happen at all. Warn about it if it 6704 * happened due to some reason. 6705 */ 6706 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6707 pages++; 6708 spin_unlock(ptl); 6709 shared_pmd = true; 6710 address |= last_addr_mask; 6711 continue; 6712 } 6713 pte = huge_ptep_get(ptep); 6714 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6715 /* Nothing to do. */ 6716 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6717 swp_entry_t entry = pte_to_swp_entry(pte); 6718 struct page *page = pfn_swap_entry_to_page(entry); 6719 pte_t newpte = pte; 6720 6721 if (is_writable_migration_entry(entry)) { 6722 if (PageAnon(page)) 6723 entry = make_readable_exclusive_migration_entry( 6724 swp_offset(entry)); 6725 else 6726 entry = make_readable_migration_entry( 6727 swp_offset(entry)); 6728 newpte = swp_entry_to_pte(entry); 6729 pages++; 6730 } 6731 6732 if (uffd_wp) 6733 newpte = pte_swp_mkuffd_wp(newpte); 6734 else if (uffd_wp_resolve) 6735 newpte = pte_swp_clear_uffd_wp(newpte); 6736 if (!pte_same(pte, newpte)) 6737 set_huge_pte_at(mm, address, ptep, newpte); 6738 } else if (unlikely(is_pte_marker(pte))) { 6739 /* No other markers apply for now. */ 6740 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); 6741 if (uffd_wp_resolve) 6742 /* Safe to modify directly (non-present->none). */ 6743 huge_pte_clear(mm, address, ptep, psize); 6744 } else if (!huge_pte_none(pte)) { 6745 pte_t old_pte; 6746 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6747 6748 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6749 pte = huge_pte_modify(old_pte, newprot); 6750 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6751 if (uffd_wp) 6752 pte = huge_pte_mkuffd_wp(pte); 6753 else if (uffd_wp_resolve) 6754 pte = huge_pte_clear_uffd_wp(pte); 6755 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6756 pages++; 6757 } else { 6758 /* None pte */ 6759 if (unlikely(uffd_wp)) 6760 /* Safe to modify directly (none->non-present). */ 6761 set_huge_pte_at(mm, address, ptep, 6762 make_pte_marker(PTE_MARKER_UFFD_WP)); 6763 } 6764 spin_unlock(ptl); 6765 } 6766 /* 6767 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6768 * may have cleared our pud entry and done put_page on the page table: 6769 * once we release i_mmap_rwsem, another task can do the final put_page 6770 * and that page table be reused and filled with junk. If we actually 6771 * did unshare a page of pmds, flush the range corresponding to the pud. 6772 */ 6773 if (shared_pmd) 6774 flush_hugetlb_tlb_range(vma, range.start, range.end); 6775 else 6776 flush_hugetlb_tlb_range(vma, start, end); 6777 /* 6778 * No need to call mmu_notifier_invalidate_range() we are downgrading 6779 * page table protection not changing it to point to a new page. 6780 * 6781 * See Documentation/mm/mmu_notifier.rst 6782 */ 6783 i_mmap_unlock_write(vma->vm_file->f_mapping); 6784 hugetlb_vma_unlock_write(vma); 6785 mmu_notifier_invalidate_range_end(&range); 6786 6787 return pages > 0 ? (pages << h->order) : pages; 6788 } 6789 6790 /* Return true if reservation was successful, false otherwise. */ 6791 bool hugetlb_reserve_pages(struct inode *inode, 6792 long from, long to, 6793 struct vm_area_struct *vma, 6794 vm_flags_t vm_flags) 6795 { 6796 long chg = -1, add = -1; 6797 struct hstate *h = hstate_inode(inode); 6798 struct hugepage_subpool *spool = subpool_inode(inode); 6799 struct resv_map *resv_map; 6800 struct hugetlb_cgroup *h_cg = NULL; 6801 long gbl_reserve, regions_needed = 0; 6802 6803 /* This should never happen */ 6804 if (from > to) { 6805 VM_WARN(1, "%s called with a negative range\n", __func__); 6806 return false; 6807 } 6808 6809 /* 6810 * vma specific semaphore used for pmd sharing and fault/truncation 6811 * synchronization 6812 */ 6813 hugetlb_vma_lock_alloc(vma); 6814 6815 /* 6816 * Only apply hugepage reservation if asked. At fault time, an 6817 * attempt will be made for VM_NORESERVE to allocate a page 6818 * without using reserves 6819 */ 6820 if (vm_flags & VM_NORESERVE) 6821 return true; 6822 6823 /* 6824 * Shared mappings base their reservation on the number of pages that 6825 * are already allocated on behalf of the file. Private mappings need 6826 * to reserve the full area even if read-only as mprotect() may be 6827 * called to make the mapping read-write. Assume !vma is a shm mapping 6828 */ 6829 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6830 /* 6831 * resv_map can not be NULL as hugetlb_reserve_pages is only 6832 * called for inodes for which resv_maps were created (see 6833 * hugetlbfs_get_inode). 6834 */ 6835 resv_map = inode_resv_map(inode); 6836 6837 chg = region_chg(resv_map, from, to, ®ions_needed); 6838 } else { 6839 /* Private mapping. */ 6840 resv_map = resv_map_alloc(); 6841 if (!resv_map) 6842 goto out_err; 6843 6844 chg = to - from; 6845 6846 set_vma_resv_map(vma, resv_map); 6847 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6848 } 6849 6850 if (chg < 0) 6851 goto out_err; 6852 6853 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6854 chg * pages_per_huge_page(h), &h_cg) < 0) 6855 goto out_err; 6856 6857 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6858 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6859 * of the resv_map. 6860 */ 6861 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6862 } 6863 6864 /* 6865 * There must be enough pages in the subpool for the mapping. If 6866 * the subpool has a minimum size, there may be some global 6867 * reservations already in place (gbl_reserve). 6868 */ 6869 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6870 if (gbl_reserve < 0) 6871 goto out_uncharge_cgroup; 6872 6873 /* 6874 * Check enough hugepages are available for the reservation. 6875 * Hand the pages back to the subpool if there are not 6876 */ 6877 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6878 goto out_put_pages; 6879 6880 /* 6881 * Account for the reservations made. Shared mappings record regions 6882 * that have reservations as they are shared by multiple VMAs. 6883 * When the last VMA disappears, the region map says how much 6884 * the reservation was and the page cache tells how much of 6885 * the reservation was consumed. Private mappings are per-VMA and 6886 * only the consumed reservations are tracked. When the VMA 6887 * disappears, the original reservation is the VMA size and the 6888 * consumed reservations are stored in the map. Hence, nothing 6889 * else has to be done for private mappings here 6890 */ 6891 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6892 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6893 6894 if (unlikely(add < 0)) { 6895 hugetlb_acct_memory(h, -gbl_reserve); 6896 goto out_put_pages; 6897 } else if (unlikely(chg > add)) { 6898 /* 6899 * pages in this range were added to the reserve 6900 * map between region_chg and region_add. This 6901 * indicates a race with alloc_hugetlb_folio. Adjust 6902 * the subpool and reserve counts modified above 6903 * based on the difference. 6904 */ 6905 long rsv_adjust; 6906 6907 /* 6908 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6909 * reference to h_cg->css. See comment below for detail. 6910 */ 6911 hugetlb_cgroup_uncharge_cgroup_rsvd( 6912 hstate_index(h), 6913 (chg - add) * pages_per_huge_page(h), h_cg); 6914 6915 rsv_adjust = hugepage_subpool_put_pages(spool, 6916 chg - add); 6917 hugetlb_acct_memory(h, -rsv_adjust); 6918 } else if (h_cg) { 6919 /* 6920 * The file_regions will hold their own reference to 6921 * h_cg->css. So we should release the reference held 6922 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6923 * done. 6924 */ 6925 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6926 } 6927 } 6928 return true; 6929 6930 out_put_pages: 6931 /* put back original number of pages, chg */ 6932 (void)hugepage_subpool_put_pages(spool, chg); 6933 out_uncharge_cgroup: 6934 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6935 chg * pages_per_huge_page(h), h_cg); 6936 out_err: 6937 hugetlb_vma_lock_free(vma); 6938 if (!vma || vma->vm_flags & VM_MAYSHARE) 6939 /* Only call region_abort if the region_chg succeeded but the 6940 * region_add failed or didn't run. 6941 */ 6942 if (chg >= 0 && add < 0) 6943 region_abort(resv_map, from, to, regions_needed); 6944 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6945 kref_put(&resv_map->refs, resv_map_release); 6946 return false; 6947 } 6948 6949 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6950 long freed) 6951 { 6952 struct hstate *h = hstate_inode(inode); 6953 struct resv_map *resv_map = inode_resv_map(inode); 6954 long chg = 0; 6955 struct hugepage_subpool *spool = subpool_inode(inode); 6956 long gbl_reserve; 6957 6958 /* 6959 * Since this routine can be called in the evict inode path for all 6960 * hugetlbfs inodes, resv_map could be NULL. 6961 */ 6962 if (resv_map) { 6963 chg = region_del(resv_map, start, end); 6964 /* 6965 * region_del() can fail in the rare case where a region 6966 * must be split and another region descriptor can not be 6967 * allocated. If end == LONG_MAX, it will not fail. 6968 */ 6969 if (chg < 0) 6970 return chg; 6971 } 6972 6973 spin_lock(&inode->i_lock); 6974 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6975 spin_unlock(&inode->i_lock); 6976 6977 /* 6978 * If the subpool has a minimum size, the number of global 6979 * reservations to be released may be adjusted. 6980 * 6981 * Note that !resv_map implies freed == 0. So (chg - freed) 6982 * won't go negative. 6983 */ 6984 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6985 hugetlb_acct_memory(h, -gbl_reserve); 6986 6987 return 0; 6988 } 6989 6990 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6991 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6992 struct vm_area_struct *vma, 6993 unsigned long addr, pgoff_t idx) 6994 { 6995 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6996 svma->vm_start; 6997 unsigned long sbase = saddr & PUD_MASK; 6998 unsigned long s_end = sbase + PUD_SIZE; 6999 7000 /* Allow segments to share if only one is marked locked */ 7001 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7002 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7003 7004 /* 7005 * match the virtual addresses, permission and the alignment of the 7006 * page table page. 7007 * 7008 * Also, vma_lock (vm_private_data) is required for sharing. 7009 */ 7010 if (pmd_index(addr) != pmd_index(saddr) || 7011 vm_flags != svm_flags || 7012 !range_in_vma(svma, sbase, s_end) || 7013 !svma->vm_private_data) 7014 return 0; 7015 7016 return saddr; 7017 } 7018 7019 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7020 { 7021 unsigned long start = addr & PUD_MASK; 7022 unsigned long end = start + PUD_SIZE; 7023 7024 #ifdef CONFIG_USERFAULTFD 7025 if (uffd_disable_huge_pmd_share(vma)) 7026 return false; 7027 #endif 7028 /* 7029 * check on proper vm_flags and page table alignment 7030 */ 7031 if (!(vma->vm_flags & VM_MAYSHARE)) 7032 return false; 7033 if (!vma->vm_private_data) /* vma lock required for sharing */ 7034 return false; 7035 if (!range_in_vma(vma, start, end)) 7036 return false; 7037 return true; 7038 } 7039 7040 /* 7041 * Determine if start,end range within vma could be mapped by shared pmd. 7042 * If yes, adjust start and end to cover range associated with possible 7043 * shared pmd mappings. 7044 */ 7045 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7046 unsigned long *start, unsigned long *end) 7047 { 7048 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7049 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7050 7051 /* 7052 * vma needs to span at least one aligned PUD size, and the range 7053 * must be at least partially within in. 7054 */ 7055 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7056 (*end <= v_start) || (*start >= v_end)) 7057 return; 7058 7059 /* Extend the range to be PUD aligned for a worst case scenario */ 7060 if (*start > v_start) 7061 *start = ALIGN_DOWN(*start, PUD_SIZE); 7062 7063 if (*end < v_end) 7064 *end = ALIGN(*end, PUD_SIZE); 7065 } 7066 7067 /* 7068 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7069 * and returns the corresponding pte. While this is not necessary for the 7070 * !shared pmd case because we can allocate the pmd later as well, it makes the 7071 * code much cleaner. pmd allocation is essential for the shared case because 7072 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7073 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7074 * bad pmd for sharing. 7075 */ 7076 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7077 unsigned long addr, pud_t *pud) 7078 { 7079 struct address_space *mapping = vma->vm_file->f_mapping; 7080 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7081 vma->vm_pgoff; 7082 struct vm_area_struct *svma; 7083 unsigned long saddr; 7084 pte_t *spte = NULL; 7085 pte_t *pte; 7086 spinlock_t *ptl; 7087 7088 i_mmap_lock_read(mapping); 7089 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7090 if (svma == vma) 7091 continue; 7092 7093 saddr = page_table_shareable(svma, vma, addr, idx); 7094 if (saddr) { 7095 spte = hugetlb_walk(svma, saddr, 7096 vma_mmu_pagesize(svma)); 7097 if (spte) { 7098 get_page(virt_to_page(spte)); 7099 break; 7100 } 7101 } 7102 } 7103 7104 if (!spte) 7105 goto out; 7106 7107 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 7108 if (pud_none(*pud)) { 7109 pud_populate(mm, pud, 7110 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7111 mm_inc_nr_pmds(mm); 7112 } else { 7113 put_page(virt_to_page(spte)); 7114 } 7115 spin_unlock(ptl); 7116 out: 7117 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7118 i_mmap_unlock_read(mapping); 7119 return pte; 7120 } 7121 7122 /* 7123 * unmap huge page backed by shared pte. 7124 * 7125 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 7126 * indicated by page_count > 1, unmap is achieved by clearing pud and 7127 * decrementing the ref count. If count == 1, the pte page is not shared. 7128 * 7129 * Called with page table lock held. 7130 * 7131 * returns: 1 successfully unmapped a shared pte page 7132 * 0 the underlying pte page is not shared, or it is the last user 7133 */ 7134 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7135 unsigned long addr, pte_t *ptep) 7136 { 7137 pgd_t *pgd = pgd_offset(mm, addr); 7138 p4d_t *p4d = p4d_offset(pgd, addr); 7139 pud_t *pud = pud_offset(p4d, addr); 7140 7141 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7142 hugetlb_vma_assert_locked(vma); 7143 BUG_ON(page_count(virt_to_page(ptep)) == 0); 7144 if (page_count(virt_to_page(ptep)) == 1) 7145 return 0; 7146 7147 pud_clear(pud); 7148 put_page(virt_to_page(ptep)); 7149 mm_dec_nr_pmds(mm); 7150 return 1; 7151 } 7152 7153 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7154 7155 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7156 unsigned long addr, pud_t *pud) 7157 { 7158 return NULL; 7159 } 7160 7161 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7162 unsigned long addr, pte_t *ptep) 7163 { 7164 return 0; 7165 } 7166 7167 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7168 unsigned long *start, unsigned long *end) 7169 { 7170 } 7171 7172 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7173 { 7174 return false; 7175 } 7176 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7177 7178 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7179 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7180 unsigned long addr, unsigned long sz) 7181 { 7182 pgd_t *pgd; 7183 p4d_t *p4d; 7184 pud_t *pud; 7185 pte_t *pte = NULL; 7186 7187 pgd = pgd_offset(mm, addr); 7188 p4d = p4d_alloc(mm, pgd, addr); 7189 if (!p4d) 7190 return NULL; 7191 pud = pud_alloc(mm, p4d, addr); 7192 if (pud) { 7193 if (sz == PUD_SIZE) { 7194 pte = (pte_t *)pud; 7195 } else { 7196 BUG_ON(sz != PMD_SIZE); 7197 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7198 pte = huge_pmd_share(mm, vma, addr, pud); 7199 else 7200 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7201 } 7202 } 7203 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 7204 7205 return pte; 7206 } 7207 7208 /* 7209 * huge_pte_offset() - Walk the page table to resolve the hugepage 7210 * entry at address @addr 7211 * 7212 * Return: Pointer to page table entry (PUD or PMD) for 7213 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7214 * size @sz doesn't match the hugepage size at this level of the page 7215 * table. 7216 */ 7217 pte_t *huge_pte_offset(struct mm_struct *mm, 7218 unsigned long addr, unsigned long sz) 7219 { 7220 pgd_t *pgd; 7221 p4d_t *p4d; 7222 pud_t *pud; 7223 pmd_t *pmd; 7224 7225 pgd = pgd_offset(mm, addr); 7226 if (!pgd_present(*pgd)) 7227 return NULL; 7228 p4d = p4d_offset(pgd, addr); 7229 if (!p4d_present(*p4d)) 7230 return NULL; 7231 7232 pud = pud_offset(p4d, addr); 7233 if (sz == PUD_SIZE) 7234 /* must be pud huge, non-present or none */ 7235 return (pte_t *)pud; 7236 if (!pud_present(*pud)) 7237 return NULL; 7238 /* must have a valid entry and size to go further */ 7239 7240 pmd = pmd_offset(pud, addr); 7241 /* must be pmd huge, non-present or none */ 7242 return (pte_t *)pmd; 7243 } 7244 7245 /* 7246 * Return a mask that can be used to update an address to the last huge 7247 * page in a page table page mapping size. Used to skip non-present 7248 * page table entries when linearly scanning address ranges. Architectures 7249 * with unique huge page to page table relationships can define their own 7250 * version of this routine. 7251 */ 7252 unsigned long hugetlb_mask_last_page(struct hstate *h) 7253 { 7254 unsigned long hp_size = huge_page_size(h); 7255 7256 if (hp_size == PUD_SIZE) 7257 return P4D_SIZE - PUD_SIZE; 7258 else if (hp_size == PMD_SIZE) 7259 return PUD_SIZE - PMD_SIZE; 7260 else 7261 return 0UL; 7262 } 7263 7264 #else 7265 7266 /* See description above. Architectures can provide their own version. */ 7267 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7268 { 7269 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 7270 if (huge_page_size(h) == PMD_SIZE) 7271 return PUD_SIZE - PMD_SIZE; 7272 #endif 7273 return 0UL; 7274 } 7275 7276 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7277 7278 /* 7279 * These functions are overwritable if your architecture needs its own 7280 * behavior. 7281 */ 7282 bool isolate_hugetlb(struct folio *folio, struct list_head *list) 7283 { 7284 bool ret = true; 7285 7286 spin_lock_irq(&hugetlb_lock); 7287 if (!folio_test_hugetlb(folio) || 7288 !folio_test_hugetlb_migratable(folio) || 7289 !folio_try_get(folio)) { 7290 ret = false; 7291 goto unlock; 7292 } 7293 folio_clear_hugetlb_migratable(folio); 7294 list_move_tail(&folio->lru, list); 7295 unlock: 7296 spin_unlock_irq(&hugetlb_lock); 7297 return ret; 7298 } 7299 7300 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7301 { 7302 int ret = 0; 7303 7304 *hugetlb = false; 7305 spin_lock_irq(&hugetlb_lock); 7306 if (folio_test_hugetlb(folio)) { 7307 *hugetlb = true; 7308 if (folio_test_hugetlb_freed(folio)) 7309 ret = 0; 7310 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7311 ret = folio_try_get(folio); 7312 else 7313 ret = -EBUSY; 7314 } 7315 spin_unlock_irq(&hugetlb_lock); 7316 return ret; 7317 } 7318 7319 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7320 bool *migratable_cleared) 7321 { 7322 int ret; 7323 7324 spin_lock_irq(&hugetlb_lock); 7325 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7326 spin_unlock_irq(&hugetlb_lock); 7327 return ret; 7328 } 7329 7330 void folio_putback_active_hugetlb(struct folio *folio) 7331 { 7332 spin_lock_irq(&hugetlb_lock); 7333 folio_set_hugetlb_migratable(folio); 7334 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7335 spin_unlock_irq(&hugetlb_lock); 7336 folio_put(folio); 7337 } 7338 7339 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7340 { 7341 struct hstate *h = folio_hstate(old_folio); 7342 7343 hugetlb_cgroup_migrate(old_folio, new_folio); 7344 set_page_owner_migrate_reason(&new_folio->page, reason); 7345 7346 /* 7347 * transfer temporary state of the new hugetlb folio. This is 7348 * reverse to other transitions because the newpage is going to 7349 * be final while the old one will be freed so it takes over 7350 * the temporary status. 7351 * 7352 * Also note that we have to transfer the per-node surplus state 7353 * here as well otherwise the global surplus count will not match 7354 * the per-node's. 7355 */ 7356 if (folio_test_hugetlb_temporary(new_folio)) { 7357 int old_nid = folio_nid(old_folio); 7358 int new_nid = folio_nid(new_folio); 7359 7360 folio_set_hugetlb_temporary(old_folio); 7361 folio_clear_hugetlb_temporary(new_folio); 7362 7363 7364 /* 7365 * There is no need to transfer the per-node surplus state 7366 * when we do not cross the node. 7367 */ 7368 if (new_nid == old_nid) 7369 return; 7370 spin_lock_irq(&hugetlb_lock); 7371 if (h->surplus_huge_pages_node[old_nid]) { 7372 h->surplus_huge_pages_node[old_nid]--; 7373 h->surplus_huge_pages_node[new_nid]++; 7374 } 7375 spin_unlock_irq(&hugetlb_lock); 7376 } 7377 } 7378 7379 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7380 unsigned long start, 7381 unsigned long end) 7382 { 7383 struct hstate *h = hstate_vma(vma); 7384 unsigned long sz = huge_page_size(h); 7385 struct mm_struct *mm = vma->vm_mm; 7386 struct mmu_notifier_range range; 7387 unsigned long address; 7388 spinlock_t *ptl; 7389 pte_t *ptep; 7390 7391 if (!(vma->vm_flags & VM_MAYSHARE)) 7392 return; 7393 7394 if (start >= end) 7395 return; 7396 7397 flush_cache_range(vma, start, end); 7398 /* 7399 * No need to call adjust_range_if_pmd_sharing_possible(), because 7400 * we have already done the PUD_SIZE alignment. 7401 */ 7402 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7403 start, end); 7404 mmu_notifier_invalidate_range_start(&range); 7405 hugetlb_vma_lock_write(vma); 7406 i_mmap_lock_write(vma->vm_file->f_mapping); 7407 for (address = start; address < end; address += PUD_SIZE) { 7408 ptep = hugetlb_walk(vma, address, sz); 7409 if (!ptep) 7410 continue; 7411 ptl = huge_pte_lock(h, mm, ptep); 7412 huge_pmd_unshare(mm, vma, address, ptep); 7413 spin_unlock(ptl); 7414 } 7415 flush_hugetlb_tlb_range(vma, start, end); 7416 i_mmap_unlock_write(vma->vm_file->f_mapping); 7417 hugetlb_vma_unlock_write(vma); 7418 /* 7419 * No need to call mmu_notifier_invalidate_range(), see 7420 * Documentation/mm/mmu_notifier.rst. 7421 */ 7422 mmu_notifier_invalidate_range_end(&range); 7423 } 7424 7425 /* 7426 * This function will unconditionally remove all the shared pmd pgtable entries 7427 * within the specific vma for a hugetlbfs memory range. 7428 */ 7429 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7430 { 7431 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7432 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7433 } 7434 7435 #ifdef CONFIG_CMA 7436 static bool cma_reserve_called __initdata; 7437 7438 static int __init cmdline_parse_hugetlb_cma(char *p) 7439 { 7440 int nid, count = 0; 7441 unsigned long tmp; 7442 char *s = p; 7443 7444 while (*s) { 7445 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7446 break; 7447 7448 if (s[count] == ':') { 7449 if (tmp >= MAX_NUMNODES) 7450 break; 7451 nid = array_index_nospec(tmp, MAX_NUMNODES); 7452 7453 s += count + 1; 7454 tmp = memparse(s, &s); 7455 hugetlb_cma_size_in_node[nid] = tmp; 7456 hugetlb_cma_size += tmp; 7457 7458 /* 7459 * Skip the separator if have one, otherwise 7460 * break the parsing. 7461 */ 7462 if (*s == ',') 7463 s++; 7464 else 7465 break; 7466 } else { 7467 hugetlb_cma_size = memparse(p, &p); 7468 break; 7469 } 7470 } 7471 7472 return 0; 7473 } 7474 7475 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7476 7477 void __init hugetlb_cma_reserve(int order) 7478 { 7479 unsigned long size, reserved, per_node; 7480 bool node_specific_cma_alloc = false; 7481 int nid; 7482 7483 cma_reserve_called = true; 7484 7485 if (!hugetlb_cma_size) 7486 return; 7487 7488 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7489 if (hugetlb_cma_size_in_node[nid] == 0) 7490 continue; 7491 7492 if (!node_online(nid)) { 7493 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7494 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7495 hugetlb_cma_size_in_node[nid] = 0; 7496 continue; 7497 } 7498 7499 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7500 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7501 nid, (PAGE_SIZE << order) / SZ_1M); 7502 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7503 hugetlb_cma_size_in_node[nid] = 0; 7504 } else { 7505 node_specific_cma_alloc = true; 7506 } 7507 } 7508 7509 /* Validate the CMA size again in case some invalid nodes specified. */ 7510 if (!hugetlb_cma_size) 7511 return; 7512 7513 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7514 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7515 (PAGE_SIZE << order) / SZ_1M); 7516 hugetlb_cma_size = 0; 7517 return; 7518 } 7519 7520 if (!node_specific_cma_alloc) { 7521 /* 7522 * If 3 GB area is requested on a machine with 4 numa nodes, 7523 * let's allocate 1 GB on first three nodes and ignore the last one. 7524 */ 7525 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7526 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7527 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7528 } 7529 7530 reserved = 0; 7531 for_each_online_node(nid) { 7532 int res; 7533 char name[CMA_MAX_NAME]; 7534 7535 if (node_specific_cma_alloc) { 7536 if (hugetlb_cma_size_in_node[nid] == 0) 7537 continue; 7538 7539 size = hugetlb_cma_size_in_node[nid]; 7540 } else { 7541 size = min(per_node, hugetlb_cma_size - reserved); 7542 } 7543 7544 size = round_up(size, PAGE_SIZE << order); 7545 7546 snprintf(name, sizeof(name), "hugetlb%d", nid); 7547 /* 7548 * Note that 'order per bit' is based on smallest size that 7549 * may be returned to CMA allocator in the case of 7550 * huge page demotion. 7551 */ 7552 res = cma_declare_contiguous_nid(0, size, 0, 7553 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7554 0, false, name, 7555 &hugetlb_cma[nid], nid); 7556 if (res) { 7557 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7558 res, nid); 7559 continue; 7560 } 7561 7562 reserved += size; 7563 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7564 size / SZ_1M, nid); 7565 7566 if (reserved >= hugetlb_cma_size) 7567 break; 7568 } 7569 7570 if (!reserved) 7571 /* 7572 * hugetlb_cma_size is used to determine if allocations from 7573 * cma are possible. Set to zero if no cma regions are set up. 7574 */ 7575 hugetlb_cma_size = 0; 7576 } 7577 7578 static void __init hugetlb_cma_check(void) 7579 { 7580 if (!hugetlb_cma_size || cma_reserve_called) 7581 return; 7582 7583 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7584 } 7585 7586 #endif /* CONFIG_CMA */ 7587