1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 38 #include <asm/page.h> 39 #include <asm/pgalloc.h> 40 #include <asm/tlb.h> 41 42 #include <linux/io.h> 43 #include <linux/hugetlb.h> 44 #include <linux/hugetlb_cgroup.h> 45 #include <linux/node.h> 46 #include <linux/page_owner.h> 47 #include "internal.h" 48 #include "hugetlb_vmemmap.h" 49 50 int hugetlb_max_hstate __read_mostly; 51 unsigned int default_hstate_idx; 52 struct hstate hstates[HUGE_MAX_HSTATE]; 53 54 #ifdef CONFIG_CMA 55 static struct cma *hugetlb_cma[MAX_NUMNODES]; 56 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 57 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 58 { 59 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 60 1 << order); 61 } 62 #else 63 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 64 { 65 return false; 66 } 67 #endif 68 static unsigned long hugetlb_cma_size __initdata; 69 70 __initdata LIST_HEAD(huge_boot_pages); 71 72 /* for command line parsing */ 73 static struct hstate * __initdata parsed_hstate; 74 static unsigned long __initdata default_hstate_max_huge_pages; 75 static bool __initdata parsed_valid_hugepagesz = true; 76 static bool __initdata parsed_default_hugepagesz; 77 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 78 79 /* 80 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 81 * free_huge_pages, and surplus_huge_pages. 82 */ 83 DEFINE_SPINLOCK(hugetlb_lock); 84 85 /* 86 * Serializes faults on the same logical page. This is used to 87 * prevent spurious OOMs when the hugepage pool is fully utilized. 88 */ 89 static int num_fault_mutexes; 90 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 91 92 /* Forward declaration */ 93 static int hugetlb_acct_memory(struct hstate *h, long delta); 94 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 95 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 96 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 97 98 static inline bool subpool_is_free(struct hugepage_subpool *spool) 99 { 100 if (spool->count) 101 return false; 102 if (spool->max_hpages != -1) 103 return spool->used_hpages == 0; 104 if (spool->min_hpages != -1) 105 return spool->rsv_hpages == spool->min_hpages; 106 107 return true; 108 } 109 110 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 111 unsigned long irq_flags) 112 { 113 spin_unlock_irqrestore(&spool->lock, irq_flags); 114 115 /* If no pages are used, and no other handles to the subpool 116 * remain, give up any reservations based on minimum size and 117 * free the subpool */ 118 if (subpool_is_free(spool)) { 119 if (spool->min_hpages != -1) 120 hugetlb_acct_memory(spool->hstate, 121 -spool->min_hpages); 122 kfree(spool); 123 } 124 } 125 126 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 127 long min_hpages) 128 { 129 struct hugepage_subpool *spool; 130 131 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 132 if (!spool) 133 return NULL; 134 135 spin_lock_init(&spool->lock); 136 spool->count = 1; 137 spool->max_hpages = max_hpages; 138 spool->hstate = h; 139 spool->min_hpages = min_hpages; 140 141 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 142 kfree(spool); 143 return NULL; 144 } 145 spool->rsv_hpages = min_hpages; 146 147 return spool; 148 } 149 150 void hugepage_put_subpool(struct hugepage_subpool *spool) 151 { 152 unsigned long flags; 153 154 spin_lock_irqsave(&spool->lock, flags); 155 BUG_ON(!spool->count); 156 spool->count--; 157 unlock_or_release_subpool(spool, flags); 158 } 159 160 /* 161 * Subpool accounting for allocating and reserving pages. 162 * Return -ENOMEM if there are not enough resources to satisfy the 163 * request. Otherwise, return the number of pages by which the 164 * global pools must be adjusted (upward). The returned value may 165 * only be different than the passed value (delta) in the case where 166 * a subpool minimum size must be maintained. 167 */ 168 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 169 long delta) 170 { 171 long ret = delta; 172 173 if (!spool) 174 return ret; 175 176 spin_lock_irq(&spool->lock); 177 178 if (spool->max_hpages != -1) { /* maximum size accounting */ 179 if ((spool->used_hpages + delta) <= spool->max_hpages) 180 spool->used_hpages += delta; 181 else { 182 ret = -ENOMEM; 183 goto unlock_ret; 184 } 185 } 186 187 /* minimum size accounting */ 188 if (spool->min_hpages != -1 && spool->rsv_hpages) { 189 if (delta > spool->rsv_hpages) { 190 /* 191 * Asking for more reserves than those already taken on 192 * behalf of subpool. Return difference. 193 */ 194 ret = delta - spool->rsv_hpages; 195 spool->rsv_hpages = 0; 196 } else { 197 ret = 0; /* reserves already accounted for */ 198 spool->rsv_hpages -= delta; 199 } 200 } 201 202 unlock_ret: 203 spin_unlock_irq(&spool->lock); 204 return ret; 205 } 206 207 /* 208 * Subpool accounting for freeing and unreserving pages. 209 * Return the number of global page reservations that must be dropped. 210 * The return value may only be different than the passed value (delta) 211 * in the case where a subpool minimum size must be maintained. 212 */ 213 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 214 long delta) 215 { 216 long ret = delta; 217 unsigned long flags; 218 219 if (!spool) 220 return delta; 221 222 spin_lock_irqsave(&spool->lock, flags); 223 224 if (spool->max_hpages != -1) /* maximum size accounting */ 225 spool->used_hpages -= delta; 226 227 /* minimum size accounting */ 228 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 229 if (spool->rsv_hpages + delta <= spool->min_hpages) 230 ret = 0; 231 else 232 ret = spool->rsv_hpages + delta - spool->min_hpages; 233 234 spool->rsv_hpages += delta; 235 if (spool->rsv_hpages > spool->min_hpages) 236 spool->rsv_hpages = spool->min_hpages; 237 } 238 239 /* 240 * If hugetlbfs_put_super couldn't free spool due to an outstanding 241 * quota reference, free it now. 242 */ 243 unlock_or_release_subpool(spool, flags); 244 245 return ret; 246 } 247 248 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 249 { 250 return HUGETLBFS_SB(inode->i_sb)->spool; 251 } 252 253 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 254 { 255 return subpool_inode(file_inode(vma->vm_file)); 256 } 257 258 /* 259 * hugetlb vma_lock helper routines 260 */ 261 static bool __vma_shareable_lock(struct vm_area_struct *vma) 262 { 263 return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && 264 vma->vm_private_data; 265 } 266 267 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 268 { 269 if (__vma_shareable_lock(vma)) { 270 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 271 272 down_read(&vma_lock->rw_sema); 273 } 274 } 275 276 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 277 { 278 if (__vma_shareable_lock(vma)) { 279 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 280 281 up_read(&vma_lock->rw_sema); 282 } 283 } 284 285 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 286 { 287 if (__vma_shareable_lock(vma)) { 288 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 289 290 down_write(&vma_lock->rw_sema); 291 } 292 } 293 294 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 295 { 296 if (__vma_shareable_lock(vma)) { 297 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 298 299 up_write(&vma_lock->rw_sema); 300 } 301 } 302 303 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 304 { 305 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 306 307 if (!__vma_shareable_lock(vma)) 308 return 1; 309 310 return down_write_trylock(&vma_lock->rw_sema); 311 } 312 313 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 314 { 315 if (__vma_shareable_lock(vma)) { 316 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 317 318 lockdep_assert_held(&vma_lock->rw_sema); 319 } 320 } 321 322 void hugetlb_vma_lock_release(struct kref *kref) 323 { 324 struct hugetlb_vma_lock *vma_lock = container_of(kref, 325 struct hugetlb_vma_lock, refs); 326 327 kfree(vma_lock); 328 } 329 330 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 331 { 332 struct vm_area_struct *vma = vma_lock->vma; 333 334 /* 335 * vma_lock structure may or not be released as a result of put, 336 * it certainly will no longer be attached to vma so clear pointer. 337 * Semaphore synchronizes access to vma_lock->vma field. 338 */ 339 vma_lock->vma = NULL; 340 vma->vm_private_data = NULL; 341 up_write(&vma_lock->rw_sema); 342 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 343 } 344 345 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 346 { 347 if (__vma_shareable_lock(vma)) { 348 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 349 350 __hugetlb_vma_unlock_write_put(vma_lock); 351 } 352 } 353 354 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 355 { 356 /* 357 * Only present in sharable vmas. 358 */ 359 if (!vma || !__vma_shareable_lock(vma)) 360 return; 361 362 if (vma->vm_private_data) { 363 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 364 365 down_write(&vma_lock->rw_sema); 366 __hugetlb_vma_unlock_write_put(vma_lock); 367 } 368 } 369 370 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 371 { 372 struct hugetlb_vma_lock *vma_lock; 373 374 /* Only establish in (flags) sharable vmas */ 375 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 376 return; 377 378 /* Should never get here with non-NULL vm_private_data */ 379 if (vma->vm_private_data) 380 return; 381 382 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 383 if (!vma_lock) { 384 /* 385 * If we can not allocate structure, then vma can not 386 * participate in pmd sharing. This is only a possible 387 * performance enhancement and memory saving issue. 388 * However, the lock is also used to synchronize page 389 * faults with truncation. If the lock is not present, 390 * unlikely races could leave pages in a file past i_size 391 * until the file is removed. Warn in the unlikely case of 392 * allocation failure. 393 */ 394 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 395 return; 396 } 397 398 kref_init(&vma_lock->refs); 399 init_rwsem(&vma_lock->rw_sema); 400 vma_lock->vma = vma; 401 vma->vm_private_data = vma_lock; 402 } 403 404 /* Helper that removes a struct file_region from the resv_map cache and returns 405 * it for use. 406 */ 407 static struct file_region * 408 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 409 { 410 struct file_region *nrg; 411 412 VM_BUG_ON(resv->region_cache_count <= 0); 413 414 resv->region_cache_count--; 415 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 416 list_del(&nrg->link); 417 418 nrg->from = from; 419 nrg->to = to; 420 421 return nrg; 422 } 423 424 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 425 struct file_region *rg) 426 { 427 #ifdef CONFIG_CGROUP_HUGETLB 428 nrg->reservation_counter = rg->reservation_counter; 429 nrg->css = rg->css; 430 if (rg->css) 431 css_get(rg->css); 432 #endif 433 } 434 435 /* Helper that records hugetlb_cgroup uncharge info. */ 436 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 437 struct hstate *h, 438 struct resv_map *resv, 439 struct file_region *nrg) 440 { 441 #ifdef CONFIG_CGROUP_HUGETLB 442 if (h_cg) { 443 nrg->reservation_counter = 444 &h_cg->rsvd_hugepage[hstate_index(h)]; 445 nrg->css = &h_cg->css; 446 /* 447 * The caller will hold exactly one h_cg->css reference for the 448 * whole contiguous reservation region. But this area might be 449 * scattered when there are already some file_regions reside in 450 * it. As a result, many file_regions may share only one css 451 * reference. In order to ensure that one file_region must hold 452 * exactly one h_cg->css reference, we should do css_get for 453 * each file_region and leave the reference held by caller 454 * untouched. 455 */ 456 css_get(&h_cg->css); 457 if (!resv->pages_per_hpage) 458 resv->pages_per_hpage = pages_per_huge_page(h); 459 /* pages_per_hpage should be the same for all entries in 460 * a resv_map. 461 */ 462 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 463 } else { 464 nrg->reservation_counter = NULL; 465 nrg->css = NULL; 466 } 467 #endif 468 } 469 470 static void put_uncharge_info(struct file_region *rg) 471 { 472 #ifdef CONFIG_CGROUP_HUGETLB 473 if (rg->css) 474 css_put(rg->css); 475 #endif 476 } 477 478 static bool has_same_uncharge_info(struct file_region *rg, 479 struct file_region *org) 480 { 481 #ifdef CONFIG_CGROUP_HUGETLB 482 return rg->reservation_counter == org->reservation_counter && 483 rg->css == org->css; 484 485 #else 486 return true; 487 #endif 488 } 489 490 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 491 { 492 struct file_region *nrg, *prg; 493 494 prg = list_prev_entry(rg, link); 495 if (&prg->link != &resv->regions && prg->to == rg->from && 496 has_same_uncharge_info(prg, rg)) { 497 prg->to = rg->to; 498 499 list_del(&rg->link); 500 put_uncharge_info(rg); 501 kfree(rg); 502 503 rg = prg; 504 } 505 506 nrg = list_next_entry(rg, link); 507 if (&nrg->link != &resv->regions && nrg->from == rg->to && 508 has_same_uncharge_info(nrg, rg)) { 509 nrg->from = rg->from; 510 511 list_del(&rg->link); 512 put_uncharge_info(rg); 513 kfree(rg); 514 } 515 } 516 517 static inline long 518 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 519 long to, struct hstate *h, struct hugetlb_cgroup *cg, 520 long *regions_needed) 521 { 522 struct file_region *nrg; 523 524 if (!regions_needed) { 525 nrg = get_file_region_entry_from_cache(map, from, to); 526 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 527 list_add(&nrg->link, rg); 528 coalesce_file_region(map, nrg); 529 } else 530 *regions_needed += 1; 531 532 return to - from; 533 } 534 535 /* 536 * Must be called with resv->lock held. 537 * 538 * Calling this with regions_needed != NULL will count the number of pages 539 * to be added but will not modify the linked list. And regions_needed will 540 * indicate the number of file_regions needed in the cache to carry out to add 541 * the regions for this range. 542 */ 543 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 544 struct hugetlb_cgroup *h_cg, 545 struct hstate *h, long *regions_needed) 546 { 547 long add = 0; 548 struct list_head *head = &resv->regions; 549 long last_accounted_offset = f; 550 struct file_region *iter, *trg = NULL; 551 struct list_head *rg = NULL; 552 553 if (regions_needed) 554 *regions_needed = 0; 555 556 /* In this loop, we essentially handle an entry for the range 557 * [last_accounted_offset, iter->from), at every iteration, with some 558 * bounds checking. 559 */ 560 list_for_each_entry_safe(iter, trg, head, link) { 561 /* Skip irrelevant regions that start before our range. */ 562 if (iter->from < f) { 563 /* If this region ends after the last accounted offset, 564 * then we need to update last_accounted_offset. 565 */ 566 if (iter->to > last_accounted_offset) 567 last_accounted_offset = iter->to; 568 continue; 569 } 570 571 /* When we find a region that starts beyond our range, we've 572 * finished. 573 */ 574 if (iter->from >= t) { 575 rg = iter->link.prev; 576 break; 577 } 578 579 /* Add an entry for last_accounted_offset -> iter->from, and 580 * update last_accounted_offset. 581 */ 582 if (iter->from > last_accounted_offset) 583 add += hugetlb_resv_map_add(resv, iter->link.prev, 584 last_accounted_offset, 585 iter->from, h, h_cg, 586 regions_needed); 587 588 last_accounted_offset = iter->to; 589 } 590 591 /* Handle the case where our range extends beyond 592 * last_accounted_offset. 593 */ 594 if (!rg) 595 rg = head->prev; 596 if (last_accounted_offset < t) 597 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 598 t, h, h_cg, regions_needed); 599 600 return add; 601 } 602 603 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 604 */ 605 static int allocate_file_region_entries(struct resv_map *resv, 606 int regions_needed) 607 __must_hold(&resv->lock) 608 { 609 LIST_HEAD(allocated_regions); 610 int to_allocate = 0, i = 0; 611 struct file_region *trg = NULL, *rg = NULL; 612 613 VM_BUG_ON(regions_needed < 0); 614 615 /* 616 * Check for sufficient descriptors in the cache to accommodate 617 * the number of in progress add operations plus regions_needed. 618 * 619 * This is a while loop because when we drop the lock, some other call 620 * to region_add or region_del may have consumed some region_entries, 621 * so we keep looping here until we finally have enough entries for 622 * (adds_in_progress + regions_needed). 623 */ 624 while (resv->region_cache_count < 625 (resv->adds_in_progress + regions_needed)) { 626 to_allocate = resv->adds_in_progress + regions_needed - 627 resv->region_cache_count; 628 629 /* At this point, we should have enough entries in the cache 630 * for all the existing adds_in_progress. We should only be 631 * needing to allocate for regions_needed. 632 */ 633 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 634 635 spin_unlock(&resv->lock); 636 for (i = 0; i < to_allocate; i++) { 637 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 638 if (!trg) 639 goto out_of_memory; 640 list_add(&trg->link, &allocated_regions); 641 } 642 643 spin_lock(&resv->lock); 644 645 list_splice(&allocated_regions, &resv->region_cache); 646 resv->region_cache_count += to_allocate; 647 } 648 649 return 0; 650 651 out_of_memory: 652 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 653 list_del(&rg->link); 654 kfree(rg); 655 } 656 return -ENOMEM; 657 } 658 659 /* 660 * Add the huge page range represented by [f, t) to the reserve 661 * map. Regions will be taken from the cache to fill in this range. 662 * Sufficient regions should exist in the cache due to the previous 663 * call to region_chg with the same range, but in some cases the cache will not 664 * have sufficient entries due to races with other code doing region_add or 665 * region_del. The extra needed entries will be allocated. 666 * 667 * regions_needed is the out value provided by a previous call to region_chg. 668 * 669 * Return the number of new huge pages added to the map. This number is greater 670 * than or equal to zero. If file_region entries needed to be allocated for 671 * this operation and we were not able to allocate, it returns -ENOMEM. 672 * region_add of regions of length 1 never allocate file_regions and cannot 673 * fail; region_chg will always allocate at least 1 entry and a region_add for 674 * 1 page will only require at most 1 entry. 675 */ 676 static long region_add(struct resv_map *resv, long f, long t, 677 long in_regions_needed, struct hstate *h, 678 struct hugetlb_cgroup *h_cg) 679 { 680 long add = 0, actual_regions_needed = 0; 681 682 spin_lock(&resv->lock); 683 retry: 684 685 /* Count how many regions are actually needed to execute this add. */ 686 add_reservation_in_range(resv, f, t, NULL, NULL, 687 &actual_regions_needed); 688 689 /* 690 * Check for sufficient descriptors in the cache to accommodate 691 * this add operation. Note that actual_regions_needed may be greater 692 * than in_regions_needed, as the resv_map may have been modified since 693 * the region_chg call. In this case, we need to make sure that we 694 * allocate extra entries, such that we have enough for all the 695 * existing adds_in_progress, plus the excess needed for this 696 * operation. 697 */ 698 if (actual_regions_needed > in_regions_needed && 699 resv->region_cache_count < 700 resv->adds_in_progress + 701 (actual_regions_needed - in_regions_needed)) { 702 /* region_add operation of range 1 should never need to 703 * allocate file_region entries. 704 */ 705 VM_BUG_ON(t - f <= 1); 706 707 if (allocate_file_region_entries( 708 resv, actual_regions_needed - in_regions_needed)) { 709 return -ENOMEM; 710 } 711 712 goto retry; 713 } 714 715 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 716 717 resv->adds_in_progress -= in_regions_needed; 718 719 spin_unlock(&resv->lock); 720 return add; 721 } 722 723 /* 724 * Examine the existing reserve map and determine how many 725 * huge pages in the specified range [f, t) are NOT currently 726 * represented. This routine is called before a subsequent 727 * call to region_add that will actually modify the reserve 728 * map to add the specified range [f, t). region_chg does 729 * not change the number of huge pages represented by the 730 * map. A number of new file_region structures is added to the cache as a 731 * placeholder, for the subsequent region_add call to use. At least 1 732 * file_region structure is added. 733 * 734 * out_regions_needed is the number of regions added to the 735 * resv->adds_in_progress. This value needs to be provided to a follow up call 736 * to region_add or region_abort for proper accounting. 737 * 738 * Returns the number of huge pages that need to be added to the existing 739 * reservation map for the range [f, t). This number is greater or equal to 740 * zero. -ENOMEM is returned if a new file_region structure or cache entry 741 * is needed and can not be allocated. 742 */ 743 static long region_chg(struct resv_map *resv, long f, long t, 744 long *out_regions_needed) 745 { 746 long chg = 0; 747 748 spin_lock(&resv->lock); 749 750 /* Count how many hugepages in this range are NOT represented. */ 751 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 752 out_regions_needed); 753 754 if (*out_regions_needed == 0) 755 *out_regions_needed = 1; 756 757 if (allocate_file_region_entries(resv, *out_regions_needed)) 758 return -ENOMEM; 759 760 resv->adds_in_progress += *out_regions_needed; 761 762 spin_unlock(&resv->lock); 763 return chg; 764 } 765 766 /* 767 * Abort the in progress add operation. The adds_in_progress field 768 * of the resv_map keeps track of the operations in progress between 769 * calls to region_chg and region_add. Operations are sometimes 770 * aborted after the call to region_chg. In such cases, region_abort 771 * is called to decrement the adds_in_progress counter. regions_needed 772 * is the value returned by the region_chg call, it is used to decrement 773 * the adds_in_progress counter. 774 * 775 * NOTE: The range arguments [f, t) are not needed or used in this 776 * routine. They are kept to make reading the calling code easier as 777 * arguments will match the associated region_chg call. 778 */ 779 static void region_abort(struct resv_map *resv, long f, long t, 780 long regions_needed) 781 { 782 spin_lock(&resv->lock); 783 VM_BUG_ON(!resv->region_cache_count); 784 resv->adds_in_progress -= regions_needed; 785 spin_unlock(&resv->lock); 786 } 787 788 /* 789 * Delete the specified range [f, t) from the reserve map. If the 790 * t parameter is LONG_MAX, this indicates that ALL regions after f 791 * should be deleted. Locate the regions which intersect [f, t) 792 * and either trim, delete or split the existing regions. 793 * 794 * Returns the number of huge pages deleted from the reserve map. 795 * In the normal case, the return value is zero or more. In the 796 * case where a region must be split, a new region descriptor must 797 * be allocated. If the allocation fails, -ENOMEM will be returned. 798 * NOTE: If the parameter t == LONG_MAX, then we will never split 799 * a region and possibly return -ENOMEM. Callers specifying 800 * t == LONG_MAX do not need to check for -ENOMEM error. 801 */ 802 static long region_del(struct resv_map *resv, long f, long t) 803 { 804 struct list_head *head = &resv->regions; 805 struct file_region *rg, *trg; 806 struct file_region *nrg = NULL; 807 long del = 0; 808 809 retry: 810 spin_lock(&resv->lock); 811 list_for_each_entry_safe(rg, trg, head, link) { 812 /* 813 * Skip regions before the range to be deleted. file_region 814 * ranges are normally of the form [from, to). However, there 815 * may be a "placeholder" entry in the map which is of the form 816 * (from, to) with from == to. Check for placeholder entries 817 * at the beginning of the range to be deleted. 818 */ 819 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 820 continue; 821 822 if (rg->from >= t) 823 break; 824 825 if (f > rg->from && t < rg->to) { /* Must split region */ 826 /* 827 * Check for an entry in the cache before dropping 828 * lock and attempting allocation. 829 */ 830 if (!nrg && 831 resv->region_cache_count > resv->adds_in_progress) { 832 nrg = list_first_entry(&resv->region_cache, 833 struct file_region, 834 link); 835 list_del(&nrg->link); 836 resv->region_cache_count--; 837 } 838 839 if (!nrg) { 840 spin_unlock(&resv->lock); 841 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 842 if (!nrg) 843 return -ENOMEM; 844 goto retry; 845 } 846 847 del += t - f; 848 hugetlb_cgroup_uncharge_file_region( 849 resv, rg, t - f, false); 850 851 /* New entry for end of split region */ 852 nrg->from = t; 853 nrg->to = rg->to; 854 855 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 856 857 INIT_LIST_HEAD(&nrg->link); 858 859 /* Original entry is trimmed */ 860 rg->to = f; 861 862 list_add(&nrg->link, &rg->link); 863 nrg = NULL; 864 break; 865 } 866 867 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 868 del += rg->to - rg->from; 869 hugetlb_cgroup_uncharge_file_region(resv, rg, 870 rg->to - rg->from, true); 871 list_del(&rg->link); 872 kfree(rg); 873 continue; 874 } 875 876 if (f <= rg->from) { /* Trim beginning of region */ 877 hugetlb_cgroup_uncharge_file_region(resv, rg, 878 t - rg->from, false); 879 880 del += t - rg->from; 881 rg->from = t; 882 } else { /* Trim end of region */ 883 hugetlb_cgroup_uncharge_file_region(resv, rg, 884 rg->to - f, false); 885 886 del += rg->to - f; 887 rg->to = f; 888 } 889 } 890 891 spin_unlock(&resv->lock); 892 kfree(nrg); 893 return del; 894 } 895 896 /* 897 * A rare out of memory error was encountered which prevented removal of 898 * the reserve map region for a page. The huge page itself was free'ed 899 * and removed from the page cache. This routine will adjust the subpool 900 * usage count, and the global reserve count if needed. By incrementing 901 * these counts, the reserve map entry which could not be deleted will 902 * appear as a "reserved" entry instead of simply dangling with incorrect 903 * counts. 904 */ 905 void hugetlb_fix_reserve_counts(struct inode *inode) 906 { 907 struct hugepage_subpool *spool = subpool_inode(inode); 908 long rsv_adjust; 909 bool reserved = false; 910 911 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 912 if (rsv_adjust > 0) { 913 struct hstate *h = hstate_inode(inode); 914 915 if (!hugetlb_acct_memory(h, 1)) 916 reserved = true; 917 } else if (!rsv_adjust) { 918 reserved = true; 919 } 920 921 if (!reserved) 922 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 923 } 924 925 /* 926 * Count and return the number of huge pages in the reserve map 927 * that intersect with the range [f, t). 928 */ 929 static long region_count(struct resv_map *resv, long f, long t) 930 { 931 struct list_head *head = &resv->regions; 932 struct file_region *rg; 933 long chg = 0; 934 935 spin_lock(&resv->lock); 936 /* Locate each segment we overlap with, and count that overlap. */ 937 list_for_each_entry(rg, head, link) { 938 long seg_from; 939 long seg_to; 940 941 if (rg->to <= f) 942 continue; 943 if (rg->from >= t) 944 break; 945 946 seg_from = max(rg->from, f); 947 seg_to = min(rg->to, t); 948 949 chg += seg_to - seg_from; 950 } 951 spin_unlock(&resv->lock); 952 953 return chg; 954 } 955 956 /* 957 * Convert the address within this vma to the page offset within 958 * the mapping, in pagecache page units; huge pages here. 959 */ 960 static pgoff_t vma_hugecache_offset(struct hstate *h, 961 struct vm_area_struct *vma, unsigned long address) 962 { 963 return ((address - vma->vm_start) >> huge_page_shift(h)) + 964 (vma->vm_pgoff >> huge_page_order(h)); 965 } 966 967 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 968 unsigned long address) 969 { 970 return vma_hugecache_offset(hstate_vma(vma), vma, address); 971 } 972 EXPORT_SYMBOL_GPL(linear_hugepage_index); 973 974 /* 975 * Return the size of the pages allocated when backing a VMA. In the majority 976 * cases this will be same size as used by the page table entries. 977 */ 978 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 979 { 980 if (vma->vm_ops && vma->vm_ops->pagesize) 981 return vma->vm_ops->pagesize(vma); 982 return PAGE_SIZE; 983 } 984 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 985 986 /* 987 * Return the page size being used by the MMU to back a VMA. In the majority 988 * of cases, the page size used by the kernel matches the MMU size. On 989 * architectures where it differs, an architecture-specific 'strong' 990 * version of this symbol is required. 991 */ 992 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 993 { 994 return vma_kernel_pagesize(vma); 995 } 996 997 /* 998 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 999 * bits of the reservation map pointer, which are always clear due to 1000 * alignment. 1001 */ 1002 #define HPAGE_RESV_OWNER (1UL << 0) 1003 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1004 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1005 1006 /* 1007 * These helpers are used to track how many pages are reserved for 1008 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1009 * is guaranteed to have their future faults succeed. 1010 * 1011 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1012 * the reserve counters are updated with the hugetlb_lock held. It is safe 1013 * to reset the VMA at fork() time as it is not in use yet and there is no 1014 * chance of the global counters getting corrupted as a result of the values. 1015 * 1016 * The private mapping reservation is represented in a subtly different 1017 * manner to a shared mapping. A shared mapping has a region map associated 1018 * with the underlying file, this region map represents the backing file 1019 * pages which have ever had a reservation assigned which this persists even 1020 * after the page is instantiated. A private mapping has a region map 1021 * associated with the original mmap which is attached to all VMAs which 1022 * reference it, this region map represents those offsets which have consumed 1023 * reservation ie. where pages have been instantiated. 1024 */ 1025 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1026 { 1027 return (unsigned long)vma->vm_private_data; 1028 } 1029 1030 static void set_vma_private_data(struct vm_area_struct *vma, 1031 unsigned long value) 1032 { 1033 vma->vm_private_data = (void *)value; 1034 } 1035 1036 static void 1037 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1038 struct hugetlb_cgroup *h_cg, 1039 struct hstate *h) 1040 { 1041 #ifdef CONFIG_CGROUP_HUGETLB 1042 if (!h_cg || !h) { 1043 resv_map->reservation_counter = NULL; 1044 resv_map->pages_per_hpage = 0; 1045 resv_map->css = NULL; 1046 } else { 1047 resv_map->reservation_counter = 1048 &h_cg->rsvd_hugepage[hstate_index(h)]; 1049 resv_map->pages_per_hpage = pages_per_huge_page(h); 1050 resv_map->css = &h_cg->css; 1051 } 1052 #endif 1053 } 1054 1055 struct resv_map *resv_map_alloc(void) 1056 { 1057 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1058 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1059 1060 if (!resv_map || !rg) { 1061 kfree(resv_map); 1062 kfree(rg); 1063 return NULL; 1064 } 1065 1066 kref_init(&resv_map->refs); 1067 spin_lock_init(&resv_map->lock); 1068 INIT_LIST_HEAD(&resv_map->regions); 1069 1070 resv_map->adds_in_progress = 0; 1071 /* 1072 * Initialize these to 0. On shared mappings, 0's here indicate these 1073 * fields don't do cgroup accounting. On private mappings, these will be 1074 * re-initialized to the proper values, to indicate that hugetlb cgroup 1075 * reservations are to be un-charged from here. 1076 */ 1077 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1078 1079 INIT_LIST_HEAD(&resv_map->region_cache); 1080 list_add(&rg->link, &resv_map->region_cache); 1081 resv_map->region_cache_count = 1; 1082 1083 return resv_map; 1084 } 1085 1086 void resv_map_release(struct kref *ref) 1087 { 1088 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1089 struct list_head *head = &resv_map->region_cache; 1090 struct file_region *rg, *trg; 1091 1092 /* Clear out any active regions before we release the map. */ 1093 region_del(resv_map, 0, LONG_MAX); 1094 1095 /* ... and any entries left in the cache */ 1096 list_for_each_entry_safe(rg, trg, head, link) { 1097 list_del(&rg->link); 1098 kfree(rg); 1099 } 1100 1101 VM_BUG_ON(resv_map->adds_in_progress); 1102 1103 kfree(resv_map); 1104 } 1105 1106 static inline struct resv_map *inode_resv_map(struct inode *inode) 1107 { 1108 /* 1109 * At inode evict time, i_mapping may not point to the original 1110 * address space within the inode. This original address space 1111 * contains the pointer to the resv_map. So, always use the 1112 * address space embedded within the inode. 1113 * The VERY common case is inode->mapping == &inode->i_data but, 1114 * this may not be true for device special inodes. 1115 */ 1116 return (struct resv_map *)(&inode->i_data)->private_data; 1117 } 1118 1119 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1120 { 1121 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1122 if (vma->vm_flags & VM_MAYSHARE) { 1123 struct address_space *mapping = vma->vm_file->f_mapping; 1124 struct inode *inode = mapping->host; 1125 1126 return inode_resv_map(inode); 1127 1128 } else { 1129 return (struct resv_map *)(get_vma_private_data(vma) & 1130 ~HPAGE_RESV_MASK); 1131 } 1132 } 1133 1134 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1135 { 1136 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1137 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1138 1139 set_vma_private_data(vma, (get_vma_private_data(vma) & 1140 HPAGE_RESV_MASK) | (unsigned long)map); 1141 } 1142 1143 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1144 { 1145 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1146 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1147 1148 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1149 } 1150 1151 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1152 { 1153 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1154 1155 return (get_vma_private_data(vma) & flag) != 0; 1156 } 1157 1158 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1159 { 1160 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1161 /* 1162 * Clear vm_private_data 1163 * - For shared mappings this is a per-vma semaphore that may be 1164 * allocated in a subsequent call to hugetlb_vm_op_open. 1165 * Before clearing, make sure pointer is not associated with vma 1166 * as this will leak the structure. This is the case when called 1167 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1168 * been called to allocate a new structure. 1169 * - For MAP_PRIVATE mappings, this is the reserve map which does 1170 * not apply to children. Faults generated by the children are 1171 * not guaranteed to succeed, even if read-only. 1172 */ 1173 if (vma->vm_flags & VM_MAYSHARE) { 1174 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1175 1176 if (vma_lock && vma_lock->vma != vma) 1177 vma->vm_private_data = NULL; 1178 } else 1179 vma->vm_private_data = NULL; 1180 } 1181 1182 /* 1183 * Reset and decrement one ref on hugepage private reservation. 1184 * Called with mm->mmap_sem writer semaphore held. 1185 * This function should be only used by move_vma() and operate on 1186 * same sized vma. It should never come here with last ref on the 1187 * reservation. 1188 */ 1189 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1190 { 1191 /* 1192 * Clear the old hugetlb private page reservation. 1193 * It has already been transferred to new_vma. 1194 * 1195 * During a mremap() operation of a hugetlb vma we call move_vma() 1196 * which copies vma into new_vma and unmaps vma. After the copy 1197 * operation both new_vma and vma share a reference to the resv_map 1198 * struct, and at that point vma is about to be unmapped. We don't 1199 * want to return the reservation to the pool at unmap of vma because 1200 * the reservation still lives on in new_vma, so simply decrement the 1201 * ref here and remove the resv_map reference from this vma. 1202 */ 1203 struct resv_map *reservations = vma_resv_map(vma); 1204 1205 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1206 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1207 kref_put(&reservations->refs, resv_map_release); 1208 } 1209 1210 hugetlb_dup_vma_private(vma); 1211 } 1212 1213 /* Returns true if the VMA has associated reserve pages */ 1214 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1215 { 1216 if (vma->vm_flags & VM_NORESERVE) { 1217 /* 1218 * This address is already reserved by other process(chg == 0), 1219 * so, we should decrement reserved count. Without decrementing, 1220 * reserve count remains after releasing inode, because this 1221 * allocated page will go into page cache and is regarded as 1222 * coming from reserved pool in releasing step. Currently, we 1223 * don't have any other solution to deal with this situation 1224 * properly, so add work-around here. 1225 */ 1226 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1227 return true; 1228 else 1229 return false; 1230 } 1231 1232 /* Shared mappings always use reserves */ 1233 if (vma->vm_flags & VM_MAYSHARE) { 1234 /* 1235 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1236 * be a region map for all pages. The only situation where 1237 * there is no region map is if a hole was punched via 1238 * fallocate. In this case, there really are no reserves to 1239 * use. This situation is indicated if chg != 0. 1240 */ 1241 if (chg) 1242 return false; 1243 else 1244 return true; 1245 } 1246 1247 /* 1248 * Only the process that called mmap() has reserves for 1249 * private mappings. 1250 */ 1251 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1252 /* 1253 * Like the shared case above, a hole punch or truncate 1254 * could have been performed on the private mapping. 1255 * Examine the value of chg to determine if reserves 1256 * actually exist or were previously consumed. 1257 * Very Subtle - The value of chg comes from a previous 1258 * call to vma_needs_reserves(). The reserve map for 1259 * private mappings has different (opposite) semantics 1260 * than that of shared mappings. vma_needs_reserves() 1261 * has already taken this difference in semantics into 1262 * account. Therefore, the meaning of chg is the same 1263 * as in the shared case above. Code could easily be 1264 * combined, but keeping it separate draws attention to 1265 * subtle differences. 1266 */ 1267 if (chg) 1268 return false; 1269 else 1270 return true; 1271 } 1272 1273 return false; 1274 } 1275 1276 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1277 { 1278 int nid = folio_nid(folio); 1279 1280 lockdep_assert_held(&hugetlb_lock); 1281 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1282 1283 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1284 h->free_huge_pages++; 1285 h->free_huge_pages_node[nid]++; 1286 folio_set_hugetlb_freed(folio); 1287 } 1288 1289 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 1290 { 1291 struct page *page; 1292 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1293 1294 lockdep_assert_held(&hugetlb_lock); 1295 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { 1296 if (pin && !is_longterm_pinnable_page(page)) 1297 continue; 1298 1299 if (PageHWPoison(page)) 1300 continue; 1301 1302 list_move(&page->lru, &h->hugepage_activelist); 1303 set_page_refcounted(page); 1304 ClearHPageFreed(page); 1305 h->free_huge_pages--; 1306 h->free_huge_pages_node[nid]--; 1307 return page; 1308 } 1309 1310 return NULL; 1311 } 1312 1313 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 1314 nodemask_t *nmask) 1315 { 1316 unsigned int cpuset_mems_cookie; 1317 struct zonelist *zonelist; 1318 struct zone *zone; 1319 struct zoneref *z; 1320 int node = NUMA_NO_NODE; 1321 1322 zonelist = node_zonelist(nid, gfp_mask); 1323 1324 retry_cpuset: 1325 cpuset_mems_cookie = read_mems_allowed_begin(); 1326 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1327 struct page *page; 1328 1329 if (!cpuset_zone_allowed(zone, gfp_mask)) 1330 continue; 1331 /* 1332 * no need to ask again on the same node. Pool is node rather than 1333 * zone aware 1334 */ 1335 if (zone_to_nid(zone) == node) 1336 continue; 1337 node = zone_to_nid(zone); 1338 1339 page = dequeue_huge_page_node_exact(h, node); 1340 if (page) 1341 return page; 1342 } 1343 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1344 goto retry_cpuset; 1345 1346 return NULL; 1347 } 1348 1349 static unsigned long available_huge_pages(struct hstate *h) 1350 { 1351 return h->free_huge_pages - h->resv_huge_pages; 1352 } 1353 1354 static struct page *dequeue_huge_page_vma(struct hstate *h, 1355 struct vm_area_struct *vma, 1356 unsigned long address, int avoid_reserve, 1357 long chg) 1358 { 1359 struct page *page = NULL; 1360 struct mempolicy *mpol; 1361 gfp_t gfp_mask; 1362 nodemask_t *nodemask; 1363 int nid; 1364 1365 /* 1366 * A child process with MAP_PRIVATE mappings created by their parent 1367 * have no page reserves. This check ensures that reservations are 1368 * not "stolen". The child may still get SIGKILLed 1369 */ 1370 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1371 goto err; 1372 1373 /* If reserves cannot be used, ensure enough pages are in the pool */ 1374 if (avoid_reserve && !available_huge_pages(h)) 1375 goto err; 1376 1377 gfp_mask = htlb_alloc_mask(h); 1378 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1379 1380 if (mpol_is_preferred_many(mpol)) { 1381 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1382 1383 /* Fallback to all nodes if page==NULL */ 1384 nodemask = NULL; 1385 } 1386 1387 if (!page) 1388 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1389 1390 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 1391 SetHPageRestoreReserve(page); 1392 h->resv_huge_pages--; 1393 } 1394 1395 mpol_cond_put(mpol); 1396 return page; 1397 1398 err: 1399 return NULL; 1400 } 1401 1402 /* 1403 * common helper functions for hstate_next_node_to_{alloc|free}. 1404 * We may have allocated or freed a huge page based on a different 1405 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1406 * be outside of *nodes_allowed. Ensure that we use an allowed 1407 * node for alloc or free. 1408 */ 1409 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1410 { 1411 nid = next_node_in(nid, *nodes_allowed); 1412 VM_BUG_ON(nid >= MAX_NUMNODES); 1413 1414 return nid; 1415 } 1416 1417 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1418 { 1419 if (!node_isset(nid, *nodes_allowed)) 1420 nid = next_node_allowed(nid, nodes_allowed); 1421 return nid; 1422 } 1423 1424 /* 1425 * returns the previously saved node ["this node"] from which to 1426 * allocate a persistent huge page for the pool and advance the 1427 * next node from which to allocate, handling wrap at end of node 1428 * mask. 1429 */ 1430 static int hstate_next_node_to_alloc(struct hstate *h, 1431 nodemask_t *nodes_allowed) 1432 { 1433 int nid; 1434 1435 VM_BUG_ON(!nodes_allowed); 1436 1437 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1438 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1439 1440 return nid; 1441 } 1442 1443 /* 1444 * helper for remove_pool_huge_page() - return the previously saved 1445 * node ["this node"] from which to free a huge page. Advance the 1446 * next node id whether or not we find a free huge page to free so 1447 * that the next attempt to free addresses the next node. 1448 */ 1449 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1450 { 1451 int nid; 1452 1453 VM_BUG_ON(!nodes_allowed); 1454 1455 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1456 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1457 1458 return nid; 1459 } 1460 1461 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1462 for (nr_nodes = nodes_weight(*mask); \ 1463 nr_nodes > 0 && \ 1464 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1465 nr_nodes--) 1466 1467 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1468 for (nr_nodes = nodes_weight(*mask); \ 1469 nr_nodes > 0 && \ 1470 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1471 nr_nodes--) 1472 1473 /* used to demote non-gigantic_huge pages as well */ 1474 static void __destroy_compound_gigantic_folio(struct folio *folio, 1475 unsigned int order, bool demote) 1476 { 1477 int i; 1478 int nr_pages = 1 << order; 1479 struct page *p; 1480 1481 atomic_set(folio_mapcount_ptr(folio), 0); 1482 atomic_set(folio_subpages_mapcount_ptr(folio), 0); 1483 atomic_set(folio_pincount_ptr(folio), 0); 1484 1485 for (i = 1; i < nr_pages; i++) { 1486 p = folio_page(folio, i); 1487 p->mapping = NULL; 1488 clear_compound_head(p); 1489 if (!demote) 1490 set_page_refcounted(p); 1491 } 1492 1493 folio_set_compound_order(folio, 0); 1494 __folio_clear_head(folio); 1495 } 1496 1497 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio, 1498 unsigned int order) 1499 { 1500 __destroy_compound_gigantic_folio(folio, order, true); 1501 } 1502 1503 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1504 static void destroy_compound_gigantic_folio(struct folio *folio, 1505 unsigned int order) 1506 { 1507 __destroy_compound_gigantic_folio(folio, order, false); 1508 } 1509 1510 static void free_gigantic_folio(struct folio *folio, unsigned int order) 1511 { 1512 /* 1513 * If the page isn't allocated using the cma allocator, 1514 * cma_release() returns false. 1515 */ 1516 #ifdef CONFIG_CMA 1517 int nid = folio_nid(folio); 1518 1519 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order)) 1520 return; 1521 #endif 1522 1523 free_contig_range(folio_pfn(folio), 1 << order); 1524 } 1525 1526 #ifdef CONFIG_CONTIG_ALLOC 1527 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1528 int nid, nodemask_t *nodemask) 1529 { 1530 struct page *page; 1531 unsigned long nr_pages = pages_per_huge_page(h); 1532 if (nid == NUMA_NO_NODE) 1533 nid = numa_mem_id(); 1534 1535 #ifdef CONFIG_CMA 1536 { 1537 int node; 1538 1539 if (hugetlb_cma[nid]) { 1540 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1541 huge_page_order(h), true); 1542 if (page) 1543 return page_folio(page); 1544 } 1545 1546 if (!(gfp_mask & __GFP_THISNODE)) { 1547 for_each_node_mask(node, *nodemask) { 1548 if (node == nid || !hugetlb_cma[node]) 1549 continue; 1550 1551 page = cma_alloc(hugetlb_cma[node], nr_pages, 1552 huge_page_order(h), true); 1553 if (page) 1554 return page_folio(page); 1555 } 1556 } 1557 } 1558 #endif 1559 1560 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1561 return page ? page_folio(page) : NULL; 1562 } 1563 1564 #else /* !CONFIG_CONTIG_ALLOC */ 1565 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1566 int nid, nodemask_t *nodemask) 1567 { 1568 return NULL; 1569 } 1570 #endif /* CONFIG_CONTIG_ALLOC */ 1571 1572 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1573 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1574 int nid, nodemask_t *nodemask) 1575 { 1576 return NULL; 1577 } 1578 static inline void free_gigantic_folio(struct folio *folio, 1579 unsigned int order) { } 1580 static inline void destroy_compound_gigantic_folio(struct folio *folio, 1581 unsigned int order) { } 1582 #endif 1583 1584 /* 1585 * Remove hugetlb folio from lists, and update dtor so that the folio appears 1586 * as just a compound page. 1587 * 1588 * A reference is held on the folio, except in the case of demote. 1589 * 1590 * Must be called with hugetlb lock held. 1591 */ 1592 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1593 bool adjust_surplus, 1594 bool demote) 1595 { 1596 int nid = folio_nid(folio); 1597 1598 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1599 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1600 1601 lockdep_assert_held(&hugetlb_lock); 1602 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1603 return; 1604 1605 list_del(&folio->lru); 1606 1607 if (folio_test_hugetlb_freed(folio)) { 1608 h->free_huge_pages--; 1609 h->free_huge_pages_node[nid]--; 1610 } 1611 if (adjust_surplus) { 1612 h->surplus_huge_pages--; 1613 h->surplus_huge_pages_node[nid]--; 1614 } 1615 1616 /* 1617 * Very subtle 1618 * 1619 * For non-gigantic pages set the destructor to the normal compound 1620 * page dtor. This is needed in case someone takes an additional 1621 * temporary ref to the page, and freeing is delayed until they drop 1622 * their reference. 1623 * 1624 * For gigantic pages set the destructor to the null dtor. This 1625 * destructor will never be called. Before freeing the gigantic 1626 * page destroy_compound_gigantic_folio will turn the folio into a 1627 * simple group of pages. After this the destructor does not 1628 * apply. 1629 * 1630 * This handles the case where more than one ref is held when and 1631 * after update_and_free_hugetlb_folio is called. 1632 * 1633 * In the case of demote we do not ref count the page as it will soon 1634 * be turned into a page of smaller size. 1635 */ 1636 if (!demote) 1637 folio_ref_unfreeze(folio, 1); 1638 if (hstate_is_gigantic(h)) 1639 folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); 1640 else 1641 folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); 1642 1643 h->nr_huge_pages--; 1644 h->nr_huge_pages_node[nid]--; 1645 } 1646 1647 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1648 bool adjust_surplus) 1649 { 1650 __remove_hugetlb_folio(h, folio, adjust_surplus, false); 1651 } 1652 1653 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, 1654 bool adjust_surplus) 1655 { 1656 __remove_hugetlb_folio(h, folio, adjust_surplus, true); 1657 } 1658 1659 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1660 bool adjust_surplus) 1661 { 1662 int zeroed; 1663 int nid = folio_nid(folio); 1664 1665 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1666 1667 lockdep_assert_held(&hugetlb_lock); 1668 1669 INIT_LIST_HEAD(&folio->lru); 1670 h->nr_huge_pages++; 1671 h->nr_huge_pages_node[nid]++; 1672 1673 if (adjust_surplus) { 1674 h->surplus_huge_pages++; 1675 h->surplus_huge_pages_node[nid]++; 1676 } 1677 1678 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1679 folio_change_private(folio, NULL); 1680 /* 1681 * We have to set hugetlb_vmemmap_optimized again as above 1682 * folio_change_private(folio, NULL) cleared it. 1683 */ 1684 folio_set_hugetlb_vmemmap_optimized(folio); 1685 1686 /* 1687 * This folio is about to be managed by the hugetlb allocator and 1688 * should have no users. Drop our reference, and check for others 1689 * just in case. 1690 */ 1691 zeroed = folio_put_testzero(folio); 1692 if (unlikely(!zeroed)) 1693 /* 1694 * It is VERY unlikely soneone else has taken a ref on 1695 * the page. In this case, we simply return as the 1696 * hugetlb destructor (free_huge_page) will be called 1697 * when this other ref is dropped. 1698 */ 1699 return; 1700 1701 arch_clear_hugepage_flags(&folio->page); 1702 enqueue_hugetlb_folio(h, folio); 1703 } 1704 1705 static void __update_and_free_page(struct hstate *h, struct page *page) 1706 { 1707 int i; 1708 struct folio *folio = page_folio(page); 1709 struct page *subpage; 1710 1711 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1712 return; 1713 1714 /* 1715 * If we don't know which subpages are hwpoisoned, we can't free 1716 * the hugepage, so it's leaked intentionally. 1717 */ 1718 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1719 return; 1720 1721 if (hugetlb_vmemmap_restore(h, page)) { 1722 spin_lock_irq(&hugetlb_lock); 1723 /* 1724 * If we cannot allocate vmemmap pages, just refuse to free the 1725 * page and put the page back on the hugetlb free list and treat 1726 * as a surplus page. 1727 */ 1728 add_hugetlb_folio(h, folio, true); 1729 spin_unlock_irq(&hugetlb_lock); 1730 return; 1731 } 1732 1733 /* 1734 * Move PageHWPoison flag from head page to the raw error pages, 1735 * which makes any healthy subpages reusable. 1736 */ 1737 if (unlikely(folio_test_hwpoison(folio))) 1738 hugetlb_clear_page_hwpoison(&folio->page); 1739 1740 for (i = 0; i < pages_per_huge_page(h); i++) { 1741 subpage = folio_page(folio, i); 1742 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1743 1 << PG_referenced | 1 << PG_dirty | 1744 1 << PG_active | 1 << PG_private | 1745 1 << PG_writeback); 1746 } 1747 1748 /* 1749 * Non-gigantic pages demoted from CMA allocated gigantic pages 1750 * need to be given back to CMA in free_gigantic_folio. 1751 */ 1752 if (hstate_is_gigantic(h) || 1753 hugetlb_cma_folio(folio, huge_page_order(h))) { 1754 destroy_compound_gigantic_folio(folio, huge_page_order(h)); 1755 free_gigantic_folio(folio, huge_page_order(h)); 1756 } else { 1757 __free_pages(page, huge_page_order(h)); 1758 } 1759 } 1760 1761 /* 1762 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1763 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1764 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1765 * the vmemmap pages. 1766 * 1767 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1768 * freed and frees them one-by-one. As the page->mapping pointer is going 1769 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1770 * structure of a lockless linked list of huge pages to be freed. 1771 */ 1772 static LLIST_HEAD(hpage_freelist); 1773 1774 static void free_hpage_workfn(struct work_struct *work) 1775 { 1776 struct llist_node *node; 1777 1778 node = llist_del_all(&hpage_freelist); 1779 1780 while (node) { 1781 struct page *page; 1782 struct hstate *h; 1783 1784 page = container_of((struct address_space **)node, 1785 struct page, mapping); 1786 node = node->next; 1787 page->mapping = NULL; 1788 /* 1789 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate() 1790 * is going to trigger because a previous call to 1791 * remove_hugetlb_folio() will call folio_set_compound_dtor 1792 * (folio, NULL_COMPOUND_DTOR), so do not use page_hstate() 1793 * directly. 1794 */ 1795 h = size_to_hstate(page_size(page)); 1796 1797 __update_and_free_page(h, page); 1798 1799 cond_resched(); 1800 } 1801 } 1802 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1803 1804 static inline void flush_free_hpage_work(struct hstate *h) 1805 { 1806 if (hugetlb_vmemmap_optimizable(h)) 1807 flush_work(&free_hpage_work); 1808 } 1809 1810 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1811 bool atomic) 1812 { 1813 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1814 __update_and_free_page(h, &folio->page); 1815 return; 1816 } 1817 1818 /* 1819 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1820 * 1821 * Only call schedule_work() if hpage_freelist is previously 1822 * empty. Otherwise, schedule_work() had been called but the workfn 1823 * hasn't retrieved the list yet. 1824 */ 1825 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1826 schedule_work(&free_hpage_work); 1827 } 1828 1829 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1830 { 1831 struct page *page, *t_page; 1832 struct folio *folio; 1833 1834 list_for_each_entry_safe(page, t_page, list, lru) { 1835 folio = page_folio(page); 1836 update_and_free_hugetlb_folio(h, folio, false); 1837 cond_resched(); 1838 } 1839 } 1840 1841 struct hstate *size_to_hstate(unsigned long size) 1842 { 1843 struct hstate *h; 1844 1845 for_each_hstate(h) { 1846 if (huge_page_size(h) == size) 1847 return h; 1848 } 1849 return NULL; 1850 } 1851 1852 void free_huge_page(struct page *page) 1853 { 1854 /* 1855 * Can't pass hstate in here because it is called from the 1856 * compound page destructor. 1857 */ 1858 struct folio *folio = page_folio(page); 1859 struct hstate *h = folio_hstate(folio); 1860 int nid = folio_nid(folio); 1861 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1862 bool restore_reserve; 1863 unsigned long flags; 1864 1865 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1866 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1867 1868 hugetlb_set_folio_subpool(folio, NULL); 1869 if (folio_test_anon(folio)) 1870 __ClearPageAnonExclusive(&folio->page); 1871 folio->mapping = NULL; 1872 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1873 folio_clear_hugetlb_restore_reserve(folio); 1874 1875 /* 1876 * If HPageRestoreReserve was set on page, page allocation consumed a 1877 * reservation. If the page was associated with a subpool, there 1878 * would have been a page reserved in the subpool before allocation 1879 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1880 * reservation, do not call hugepage_subpool_put_pages() as this will 1881 * remove the reserved page from the subpool. 1882 */ 1883 if (!restore_reserve) { 1884 /* 1885 * A return code of zero implies that the subpool will be 1886 * under its minimum size if the reservation is not restored 1887 * after page is free. Therefore, force restore_reserve 1888 * operation. 1889 */ 1890 if (hugepage_subpool_put_pages(spool, 1) == 0) 1891 restore_reserve = true; 1892 } 1893 1894 spin_lock_irqsave(&hugetlb_lock, flags); 1895 folio_clear_hugetlb_migratable(folio); 1896 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1897 pages_per_huge_page(h), folio); 1898 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1899 pages_per_huge_page(h), folio); 1900 if (restore_reserve) 1901 h->resv_huge_pages++; 1902 1903 if (folio_test_hugetlb_temporary(folio)) { 1904 remove_hugetlb_folio(h, folio, false); 1905 spin_unlock_irqrestore(&hugetlb_lock, flags); 1906 update_and_free_hugetlb_folio(h, folio, true); 1907 } else if (h->surplus_huge_pages_node[nid]) { 1908 /* remove the page from active list */ 1909 remove_hugetlb_folio(h, folio, true); 1910 spin_unlock_irqrestore(&hugetlb_lock, flags); 1911 update_and_free_hugetlb_folio(h, folio, true); 1912 } else { 1913 arch_clear_hugepage_flags(page); 1914 enqueue_hugetlb_folio(h, folio); 1915 spin_unlock_irqrestore(&hugetlb_lock, flags); 1916 } 1917 } 1918 1919 /* 1920 * Must be called with the hugetlb lock held 1921 */ 1922 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1923 { 1924 lockdep_assert_held(&hugetlb_lock); 1925 h->nr_huge_pages++; 1926 h->nr_huge_pages_node[nid]++; 1927 } 1928 1929 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1930 { 1931 hugetlb_vmemmap_optimize(h, &folio->page); 1932 INIT_LIST_HEAD(&folio->lru); 1933 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1934 hugetlb_set_folio_subpool(folio, NULL); 1935 set_hugetlb_cgroup(folio, NULL); 1936 set_hugetlb_cgroup_rsvd(folio, NULL); 1937 } 1938 1939 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1940 { 1941 __prep_new_hugetlb_folio(h, folio); 1942 spin_lock_irq(&hugetlb_lock); 1943 __prep_account_new_huge_page(h, nid); 1944 spin_unlock_irq(&hugetlb_lock); 1945 } 1946 1947 static bool __prep_compound_gigantic_folio(struct folio *folio, 1948 unsigned int order, bool demote) 1949 { 1950 int i, j; 1951 int nr_pages = 1 << order; 1952 struct page *p; 1953 1954 __folio_clear_reserved(folio); 1955 __folio_set_head(folio); 1956 /* we rely on prep_new_hugetlb_folio to set the destructor */ 1957 folio_set_compound_order(folio, order); 1958 for (i = 0; i < nr_pages; i++) { 1959 p = folio_page(folio, i); 1960 1961 /* 1962 * For gigantic hugepages allocated through bootmem at 1963 * boot, it's safer to be consistent with the not-gigantic 1964 * hugepages and clear the PG_reserved bit from all tail pages 1965 * too. Otherwise drivers using get_user_pages() to access tail 1966 * pages may get the reference counting wrong if they see 1967 * PG_reserved set on a tail page (despite the head page not 1968 * having PG_reserved set). Enforcing this consistency between 1969 * head and tail pages allows drivers to optimize away a check 1970 * on the head page when they need know if put_page() is needed 1971 * after get_user_pages(). 1972 */ 1973 if (i != 0) /* head page cleared above */ 1974 __ClearPageReserved(p); 1975 /* 1976 * Subtle and very unlikely 1977 * 1978 * Gigantic 'page allocators' such as memblock or cma will 1979 * return a set of pages with each page ref counted. We need 1980 * to turn this set of pages into a compound page with tail 1981 * page ref counts set to zero. Code such as speculative page 1982 * cache adding could take a ref on a 'to be' tail page. 1983 * We need to respect any increased ref count, and only set 1984 * the ref count to zero if count is currently 1. If count 1985 * is not 1, we return an error. An error return indicates 1986 * the set of pages can not be converted to a gigantic page. 1987 * The caller who allocated the pages should then discard the 1988 * pages using the appropriate free interface. 1989 * 1990 * In the case of demote, the ref count will be zero. 1991 */ 1992 if (!demote) { 1993 if (!page_ref_freeze(p, 1)) { 1994 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 1995 goto out_error; 1996 } 1997 } else { 1998 VM_BUG_ON_PAGE(page_count(p), p); 1999 } 2000 if (i != 0) 2001 set_compound_head(p, &folio->page); 2002 } 2003 atomic_set(folio_mapcount_ptr(folio), -1); 2004 atomic_set(folio_subpages_mapcount_ptr(folio), 0); 2005 atomic_set(folio_pincount_ptr(folio), 0); 2006 return true; 2007 2008 out_error: 2009 /* undo page modifications made above */ 2010 for (j = 0; j < i; j++) { 2011 p = folio_page(folio, j); 2012 if (j != 0) 2013 clear_compound_head(p); 2014 set_page_refcounted(p); 2015 } 2016 /* need to clear PG_reserved on remaining tail pages */ 2017 for (; j < nr_pages; j++) { 2018 p = folio_page(folio, j); 2019 __ClearPageReserved(p); 2020 } 2021 folio_set_compound_order(folio, 0); 2022 __folio_clear_head(folio); 2023 return false; 2024 } 2025 2026 static bool prep_compound_gigantic_folio(struct folio *folio, 2027 unsigned int order) 2028 { 2029 return __prep_compound_gigantic_folio(folio, order, false); 2030 } 2031 2032 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio, 2033 unsigned int order) 2034 { 2035 return __prep_compound_gigantic_folio(folio, order, true); 2036 } 2037 2038 /* 2039 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 2040 * transparent huge pages. See the PageTransHuge() documentation for more 2041 * details. 2042 */ 2043 int PageHuge(struct page *page) 2044 { 2045 if (!PageCompound(page)) 2046 return 0; 2047 2048 page = compound_head(page); 2049 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 2050 } 2051 EXPORT_SYMBOL_GPL(PageHuge); 2052 2053 /* 2054 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 2055 * normal or transparent huge pages. 2056 */ 2057 int PageHeadHuge(struct page *page_head) 2058 { 2059 if (!PageHead(page_head)) 2060 return 0; 2061 2062 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR; 2063 } 2064 EXPORT_SYMBOL_GPL(PageHeadHuge); 2065 2066 /* 2067 * Find and lock address space (mapping) in write mode. 2068 * 2069 * Upon entry, the page is locked which means that page_mapping() is 2070 * stable. Due to locking order, we can only trylock_write. If we can 2071 * not get the lock, simply return NULL to caller. 2072 */ 2073 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 2074 { 2075 struct address_space *mapping = page_mapping(hpage); 2076 2077 if (!mapping) 2078 return mapping; 2079 2080 if (i_mmap_trylock_write(mapping)) 2081 return mapping; 2082 2083 return NULL; 2084 } 2085 2086 pgoff_t hugetlb_basepage_index(struct page *page) 2087 { 2088 struct page *page_head = compound_head(page); 2089 pgoff_t index = page_index(page_head); 2090 unsigned long compound_idx; 2091 2092 if (compound_order(page_head) >= MAX_ORDER) 2093 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 2094 else 2095 compound_idx = page - page_head; 2096 2097 return (index << compound_order(page_head)) + compound_idx; 2098 } 2099 2100 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2101 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2102 nodemask_t *node_alloc_noretry) 2103 { 2104 int order = huge_page_order(h); 2105 struct page *page; 2106 bool alloc_try_hard = true; 2107 bool retry = true; 2108 2109 /* 2110 * By default we always try hard to allocate the page with 2111 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 2112 * a loop (to adjust global huge page counts) and previous allocation 2113 * failed, do not continue to try hard on the same node. Use the 2114 * node_alloc_noretry bitmap to manage this state information. 2115 */ 2116 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2117 alloc_try_hard = false; 2118 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 2119 if (alloc_try_hard) 2120 gfp_mask |= __GFP_RETRY_MAYFAIL; 2121 if (nid == NUMA_NO_NODE) 2122 nid = numa_mem_id(); 2123 retry: 2124 page = __alloc_pages(gfp_mask, order, nid, nmask); 2125 2126 /* Freeze head page */ 2127 if (page && !page_ref_freeze(page, 1)) { 2128 __free_pages(page, order); 2129 if (retry) { /* retry once */ 2130 retry = false; 2131 goto retry; 2132 } 2133 /* WOW! twice in a row. */ 2134 pr_warn("HugeTLB head page unexpected inflated ref count\n"); 2135 page = NULL; 2136 } 2137 2138 /* 2139 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 2140 * indicates an overall state change. Clear bit so that we resume 2141 * normal 'try hard' allocations. 2142 */ 2143 if (node_alloc_noretry && page && !alloc_try_hard) 2144 node_clear(nid, *node_alloc_noretry); 2145 2146 /* 2147 * If we tried hard to get a page but failed, set bit so that 2148 * subsequent attempts will not try as hard until there is an 2149 * overall state change. 2150 */ 2151 if (node_alloc_noretry && !page && alloc_try_hard) 2152 node_set(nid, *node_alloc_noretry); 2153 2154 if (!page) { 2155 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2156 return NULL; 2157 } 2158 2159 __count_vm_event(HTLB_BUDDY_PGALLOC); 2160 return page_folio(page); 2161 } 2162 2163 /* 2164 * Common helper to allocate a fresh hugetlb page. All specific allocators 2165 * should use this function to get new hugetlb pages 2166 * 2167 * Note that returned page is 'frozen': ref count of head page and all tail 2168 * pages is zero. 2169 */ 2170 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2171 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2172 nodemask_t *node_alloc_noretry) 2173 { 2174 struct folio *folio; 2175 bool retry = false; 2176 2177 retry: 2178 if (hstate_is_gigantic(h)) 2179 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2180 else 2181 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, 2182 nid, nmask, node_alloc_noretry); 2183 if (!folio) 2184 return NULL; 2185 if (hstate_is_gigantic(h)) { 2186 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { 2187 /* 2188 * Rare failure to convert pages to compound page. 2189 * Free pages and try again - ONCE! 2190 */ 2191 free_gigantic_folio(folio, huge_page_order(h)); 2192 if (!retry) { 2193 retry = true; 2194 goto retry; 2195 } 2196 return NULL; 2197 } 2198 } 2199 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2200 2201 return folio; 2202 } 2203 2204 /* 2205 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 2206 * manner. 2207 */ 2208 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 2209 nodemask_t *node_alloc_noretry) 2210 { 2211 struct folio *folio; 2212 int nr_nodes, node; 2213 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2214 2215 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2216 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2217 nodes_allowed, node_alloc_noretry); 2218 if (folio) { 2219 free_huge_page(&folio->page); /* free it into the hugepage allocator */ 2220 return 1; 2221 } 2222 } 2223 2224 return 0; 2225 } 2226 2227 /* 2228 * Remove huge page from pool from next node to free. Attempt to keep 2229 * persistent huge pages more or less balanced over allowed nodes. 2230 * This routine only 'removes' the hugetlb page. The caller must make 2231 * an additional call to free the page to low level allocators. 2232 * Called with hugetlb_lock locked. 2233 */ 2234 static struct page *remove_pool_huge_page(struct hstate *h, 2235 nodemask_t *nodes_allowed, 2236 bool acct_surplus) 2237 { 2238 int nr_nodes, node; 2239 struct page *page = NULL; 2240 struct folio *folio; 2241 2242 lockdep_assert_held(&hugetlb_lock); 2243 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2244 /* 2245 * If we're returning unused surplus pages, only examine 2246 * nodes with surplus pages. 2247 */ 2248 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2249 !list_empty(&h->hugepage_freelists[node])) { 2250 page = list_entry(h->hugepage_freelists[node].next, 2251 struct page, lru); 2252 folio = page_folio(page); 2253 remove_hugetlb_folio(h, folio, acct_surplus); 2254 break; 2255 } 2256 } 2257 2258 return page; 2259 } 2260 2261 /* 2262 * Dissolve a given free hugepage into free buddy pages. This function does 2263 * nothing for in-use hugepages and non-hugepages. 2264 * This function returns values like below: 2265 * 2266 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2267 * when the system is under memory pressure and the feature of 2268 * freeing unused vmemmap pages associated with each hugetlb page 2269 * is enabled. 2270 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2271 * (allocated or reserved.) 2272 * 0: successfully dissolved free hugepages or the page is not a 2273 * hugepage (considered as already dissolved) 2274 */ 2275 int dissolve_free_huge_page(struct page *page) 2276 { 2277 int rc = -EBUSY; 2278 struct folio *folio = page_folio(page); 2279 2280 retry: 2281 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2282 if (!folio_test_hugetlb(folio)) 2283 return 0; 2284 2285 spin_lock_irq(&hugetlb_lock); 2286 if (!folio_test_hugetlb(folio)) { 2287 rc = 0; 2288 goto out; 2289 } 2290 2291 if (!folio_ref_count(folio)) { 2292 struct hstate *h = folio_hstate(folio); 2293 if (!available_huge_pages(h)) 2294 goto out; 2295 2296 /* 2297 * We should make sure that the page is already on the free list 2298 * when it is dissolved. 2299 */ 2300 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2301 spin_unlock_irq(&hugetlb_lock); 2302 cond_resched(); 2303 2304 /* 2305 * Theoretically, we should return -EBUSY when we 2306 * encounter this race. In fact, we have a chance 2307 * to successfully dissolve the page if we do a 2308 * retry. Because the race window is quite small. 2309 * If we seize this opportunity, it is an optimization 2310 * for increasing the success rate of dissolving page. 2311 */ 2312 goto retry; 2313 } 2314 2315 remove_hugetlb_folio(h, folio, false); 2316 h->max_huge_pages--; 2317 spin_unlock_irq(&hugetlb_lock); 2318 2319 /* 2320 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2321 * before freeing the page. update_and_free_hugtlb_folio will fail to 2322 * free the page if it can not allocate required vmemmap. We 2323 * need to adjust max_huge_pages if the page is not freed. 2324 * Attempt to allocate vmemmmap here so that we can take 2325 * appropriate action on failure. 2326 */ 2327 rc = hugetlb_vmemmap_restore(h, &folio->page); 2328 if (!rc) { 2329 update_and_free_hugetlb_folio(h, folio, false); 2330 } else { 2331 spin_lock_irq(&hugetlb_lock); 2332 add_hugetlb_folio(h, folio, false); 2333 h->max_huge_pages++; 2334 spin_unlock_irq(&hugetlb_lock); 2335 } 2336 2337 return rc; 2338 } 2339 out: 2340 spin_unlock_irq(&hugetlb_lock); 2341 return rc; 2342 } 2343 2344 /* 2345 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2346 * make specified memory blocks removable from the system. 2347 * Note that this will dissolve a free gigantic hugepage completely, if any 2348 * part of it lies within the given range. 2349 * Also note that if dissolve_free_huge_page() returns with an error, all 2350 * free hugepages that were dissolved before that error are lost. 2351 */ 2352 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2353 { 2354 unsigned long pfn; 2355 struct page *page; 2356 int rc = 0; 2357 unsigned int order; 2358 struct hstate *h; 2359 2360 if (!hugepages_supported()) 2361 return rc; 2362 2363 order = huge_page_order(&default_hstate); 2364 for_each_hstate(h) 2365 order = min(order, huge_page_order(h)); 2366 2367 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2368 page = pfn_to_page(pfn); 2369 rc = dissolve_free_huge_page(page); 2370 if (rc) 2371 break; 2372 } 2373 2374 return rc; 2375 } 2376 2377 /* 2378 * Allocates a fresh surplus page from the page allocator. 2379 */ 2380 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 2381 int nid, nodemask_t *nmask) 2382 { 2383 struct folio *folio = NULL; 2384 2385 if (hstate_is_gigantic(h)) 2386 return NULL; 2387 2388 spin_lock_irq(&hugetlb_lock); 2389 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2390 goto out_unlock; 2391 spin_unlock_irq(&hugetlb_lock); 2392 2393 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2394 if (!folio) 2395 return NULL; 2396 2397 spin_lock_irq(&hugetlb_lock); 2398 /* 2399 * We could have raced with the pool size change. 2400 * Double check that and simply deallocate the new page 2401 * if we would end up overcommiting the surpluses. Abuse 2402 * temporary page to workaround the nasty free_huge_page 2403 * codeflow 2404 */ 2405 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2406 folio_set_hugetlb_temporary(folio); 2407 spin_unlock_irq(&hugetlb_lock); 2408 free_huge_page(&folio->page); 2409 return NULL; 2410 } 2411 2412 h->surplus_huge_pages++; 2413 h->surplus_huge_pages_node[folio_nid(folio)]++; 2414 2415 out_unlock: 2416 spin_unlock_irq(&hugetlb_lock); 2417 2418 return &folio->page; 2419 } 2420 2421 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 2422 int nid, nodemask_t *nmask) 2423 { 2424 struct folio *folio; 2425 2426 if (hstate_is_gigantic(h)) 2427 return NULL; 2428 2429 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2430 if (!folio) 2431 return NULL; 2432 2433 /* fresh huge pages are frozen */ 2434 folio_ref_unfreeze(folio, 1); 2435 /* 2436 * We do not account these pages as surplus because they are only 2437 * temporary and will be released properly on the last reference 2438 */ 2439 folio_set_hugetlb_temporary(folio); 2440 2441 return &folio->page; 2442 } 2443 2444 /* 2445 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2446 */ 2447 static 2448 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 2449 struct vm_area_struct *vma, unsigned long addr) 2450 { 2451 struct page *page = NULL; 2452 struct mempolicy *mpol; 2453 gfp_t gfp_mask = htlb_alloc_mask(h); 2454 int nid; 2455 nodemask_t *nodemask; 2456 2457 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2458 if (mpol_is_preferred_many(mpol)) { 2459 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2460 2461 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2462 page = alloc_surplus_huge_page(h, gfp, nid, nodemask); 2463 2464 /* Fallback to all nodes if page==NULL */ 2465 nodemask = NULL; 2466 } 2467 2468 if (!page) 2469 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); 2470 mpol_cond_put(mpol); 2471 return page; 2472 } 2473 2474 /* page migration callback function */ 2475 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 2476 nodemask_t *nmask, gfp_t gfp_mask) 2477 { 2478 spin_lock_irq(&hugetlb_lock); 2479 if (available_huge_pages(h)) { 2480 struct page *page; 2481 2482 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 2483 if (page) { 2484 spin_unlock_irq(&hugetlb_lock); 2485 return page; 2486 } 2487 } 2488 spin_unlock_irq(&hugetlb_lock); 2489 2490 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 2491 } 2492 2493 /* mempolicy aware migration callback */ 2494 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 2495 unsigned long address) 2496 { 2497 struct mempolicy *mpol; 2498 nodemask_t *nodemask; 2499 struct page *page; 2500 gfp_t gfp_mask; 2501 int node; 2502 2503 gfp_mask = htlb_alloc_mask(h); 2504 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2505 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); 2506 mpol_cond_put(mpol); 2507 2508 return page; 2509 } 2510 2511 /* 2512 * Increase the hugetlb pool such that it can accommodate a reservation 2513 * of size 'delta'. 2514 */ 2515 static int gather_surplus_pages(struct hstate *h, long delta) 2516 __must_hold(&hugetlb_lock) 2517 { 2518 LIST_HEAD(surplus_list); 2519 struct page *page, *tmp; 2520 int ret; 2521 long i; 2522 long needed, allocated; 2523 bool alloc_ok = true; 2524 2525 lockdep_assert_held(&hugetlb_lock); 2526 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2527 if (needed <= 0) { 2528 h->resv_huge_pages += delta; 2529 return 0; 2530 } 2531 2532 allocated = 0; 2533 2534 ret = -ENOMEM; 2535 retry: 2536 spin_unlock_irq(&hugetlb_lock); 2537 for (i = 0; i < needed; i++) { 2538 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 2539 NUMA_NO_NODE, NULL); 2540 if (!page) { 2541 alloc_ok = false; 2542 break; 2543 } 2544 list_add(&page->lru, &surplus_list); 2545 cond_resched(); 2546 } 2547 allocated += i; 2548 2549 /* 2550 * After retaking hugetlb_lock, we need to recalculate 'needed' 2551 * because either resv_huge_pages or free_huge_pages may have changed. 2552 */ 2553 spin_lock_irq(&hugetlb_lock); 2554 needed = (h->resv_huge_pages + delta) - 2555 (h->free_huge_pages + allocated); 2556 if (needed > 0) { 2557 if (alloc_ok) 2558 goto retry; 2559 /* 2560 * We were not able to allocate enough pages to 2561 * satisfy the entire reservation so we free what 2562 * we've allocated so far. 2563 */ 2564 goto free; 2565 } 2566 /* 2567 * The surplus_list now contains _at_least_ the number of extra pages 2568 * needed to accommodate the reservation. Add the appropriate number 2569 * of pages to the hugetlb pool and free the extras back to the buddy 2570 * allocator. Commit the entire reservation here to prevent another 2571 * process from stealing the pages as they are added to the pool but 2572 * before they are reserved. 2573 */ 2574 needed += allocated; 2575 h->resv_huge_pages += delta; 2576 ret = 0; 2577 2578 /* Free the needed pages to the hugetlb pool */ 2579 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 2580 if ((--needed) < 0) 2581 break; 2582 /* Add the page to the hugetlb allocator */ 2583 enqueue_hugetlb_folio(h, page_folio(page)); 2584 } 2585 free: 2586 spin_unlock_irq(&hugetlb_lock); 2587 2588 /* 2589 * Free unnecessary surplus pages to the buddy allocator. 2590 * Pages have no ref count, call free_huge_page directly. 2591 */ 2592 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2593 free_huge_page(page); 2594 spin_lock_irq(&hugetlb_lock); 2595 2596 return ret; 2597 } 2598 2599 /* 2600 * This routine has two main purposes: 2601 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2602 * in unused_resv_pages. This corresponds to the prior adjustments made 2603 * to the associated reservation map. 2604 * 2) Free any unused surplus pages that may have been allocated to satisfy 2605 * the reservation. As many as unused_resv_pages may be freed. 2606 */ 2607 static void return_unused_surplus_pages(struct hstate *h, 2608 unsigned long unused_resv_pages) 2609 { 2610 unsigned long nr_pages; 2611 struct page *page; 2612 LIST_HEAD(page_list); 2613 2614 lockdep_assert_held(&hugetlb_lock); 2615 /* Uncommit the reservation */ 2616 h->resv_huge_pages -= unused_resv_pages; 2617 2618 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2619 goto out; 2620 2621 /* 2622 * Part (or even all) of the reservation could have been backed 2623 * by pre-allocated pages. Only free surplus pages. 2624 */ 2625 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2626 2627 /* 2628 * We want to release as many surplus pages as possible, spread 2629 * evenly across all nodes with memory. Iterate across these nodes 2630 * until we can no longer free unreserved surplus pages. This occurs 2631 * when the nodes with surplus pages have no free pages. 2632 * remove_pool_huge_page() will balance the freed pages across the 2633 * on-line nodes with memory and will handle the hstate accounting. 2634 */ 2635 while (nr_pages--) { 2636 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2637 if (!page) 2638 goto out; 2639 2640 list_add(&page->lru, &page_list); 2641 } 2642 2643 out: 2644 spin_unlock_irq(&hugetlb_lock); 2645 update_and_free_pages_bulk(h, &page_list); 2646 spin_lock_irq(&hugetlb_lock); 2647 } 2648 2649 2650 /* 2651 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2652 * are used by the huge page allocation routines to manage reservations. 2653 * 2654 * vma_needs_reservation is called to determine if the huge page at addr 2655 * within the vma has an associated reservation. If a reservation is 2656 * needed, the value 1 is returned. The caller is then responsible for 2657 * managing the global reservation and subpool usage counts. After 2658 * the huge page has been allocated, vma_commit_reservation is called 2659 * to add the page to the reservation map. If the page allocation fails, 2660 * the reservation must be ended instead of committed. vma_end_reservation 2661 * is called in such cases. 2662 * 2663 * In the normal case, vma_commit_reservation returns the same value 2664 * as the preceding vma_needs_reservation call. The only time this 2665 * is not the case is if a reserve map was changed between calls. It 2666 * is the responsibility of the caller to notice the difference and 2667 * take appropriate action. 2668 * 2669 * vma_add_reservation is used in error paths where a reservation must 2670 * be restored when a newly allocated huge page must be freed. It is 2671 * to be called after calling vma_needs_reservation to determine if a 2672 * reservation exists. 2673 * 2674 * vma_del_reservation is used in error paths where an entry in the reserve 2675 * map was created during huge page allocation and must be removed. It is to 2676 * be called after calling vma_needs_reservation to determine if a reservation 2677 * exists. 2678 */ 2679 enum vma_resv_mode { 2680 VMA_NEEDS_RESV, 2681 VMA_COMMIT_RESV, 2682 VMA_END_RESV, 2683 VMA_ADD_RESV, 2684 VMA_DEL_RESV, 2685 }; 2686 static long __vma_reservation_common(struct hstate *h, 2687 struct vm_area_struct *vma, unsigned long addr, 2688 enum vma_resv_mode mode) 2689 { 2690 struct resv_map *resv; 2691 pgoff_t idx; 2692 long ret; 2693 long dummy_out_regions_needed; 2694 2695 resv = vma_resv_map(vma); 2696 if (!resv) 2697 return 1; 2698 2699 idx = vma_hugecache_offset(h, vma, addr); 2700 switch (mode) { 2701 case VMA_NEEDS_RESV: 2702 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2703 /* We assume that vma_reservation_* routines always operate on 2704 * 1 page, and that adding to resv map a 1 page entry can only 2705 * ever require 1 region. 2706 */ 2707 VM_BUG_ON(dummy_out_regions_needed != 1); 2708 break; 2709 case VMA_COMMIT_RESV: 2710 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2711 /* region_add calls of range 1 should never fail. */ 2712 VM_BUG_ON(ret < 0); 2713 break; 2714 case VMA_END_RESV: 2715 region_abort(resv, idx, idx + 1, 1); 2716 ret = 0; 2717 break; 2718 case VMA_ADD_RESV: 2719 if (vma->vm_flags & VM_MAYSHARE) { 2720 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2721 /* region_add calls of range 1 should never fail. */ 2722 VM_BUG_ON(ret < 0); 2723 } else { 2724 region_abort(resv, idx, idx + 1, 1); 2725 ret = region_del(resv, idx, idx + 1); 2726 } 2727 break; 2728 case VMA_DEL_RESV: 2729 if (vma->vm_flags & VM_MAYSHARE) { 2730 region_abort(resv, idx, idx + 1, 1); 2731 ret = region_del(resv, idx, idx + 1); 2732 } else { 2733 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2734 /* region_add calls of range 1 should never fail. */ 2735 VM_BUG_ON(ret < 0); 2736 } 2737 break; 2738 default: 2739 BUG(); 2740 } 2741 2742 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2743 return ret; 2744 /* 2745 * We know private mapping must have HPAGE_RESV_OWNER set. 2746 * 2747 * In most cases, reserves always exist for private mappings. 2748 * However, a file associated with mapping could have been 2749 * hole punched or truncated after reserves were consumed. 2750 * As subsequent fault on such a range will not use reserves. 2751 * Subtle - The reserve map for private mappings has the 2752 * opposite meaning than that of shared mappings. If NO 2753 * entry is in the reserve map, it means a reservation exists. 2754 * If an entry exists in the reserve map, it means the 2755 * reservation has already been consumed. As a result, the 2756 * return value of this routine is the opposite of the 2757 * value returned from reserve map manipulation routines above. 2758 */ 2759 if (ret > 0) 2760 return 0; 2761 if (ret == 0) 2762 return 1; 2763 return ret; 2764 } 2765 2766 static long vma_needs_reservation(struct hstate *h, 2767 struct vm_area_struct *vma, unsigned long addr) 2768 { 2769 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2770 } 2771 2772 static long vma_commit_reservation(struct hstate *h, 2773 struct vm_area_struct *vma, unsigned long addr) 2774 { 2775 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2776 } 2777 2778 static void vma_end_reservation(struct hstate *h, 2779 struct vm_area_struct *vma, unsigned long addr) 2780 { 2781 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2782 } 2783 2784 static long vma_add_reservation(struct hstate *h, 2785 struct vm_area_struct *vma, unsigned long addr) 2786 { 2787 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2788 } 2789 2790 static long vma_del_reservation(struct hstate *h, 2791 struct vm_area_struct *vma, unsigned long addr) 2792 { 2793 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2794 } 2795 2796 /* 2797 * This routine is called to restore reservation information on error paths. 2798 * It should ONLY be called for pages allocated via alloc_huge_page(), and 2799 * the hugetlb mutex should remain held when calling this routine. 2800 * 2801 * It handles two specific cases: 2802 * 1) A reservation was in place and the page consumed the reservation. 2803 * HPageRestoreReserve is set in the page. 2804 * 2) No reservation was in place for the page, so HPageRestoreReserve is 2805 * not set. However, alloc_huge_page always updates the reserve map. 2806 * 2807 * In case 1, free_huge_page later in the error path will increment the 2808 * global reserve count. But, free_huge_page does not have enough context 2809 * to adjust the reservation map. This case deals primarily with private 2810 * mappings. Adjust the reserve map here to be consistent with global 2811 * reserve count adjustments to be made by free_huge_page. Make sure the 2812 * reserve map indicates there is a reservation present. 2813 * 2814 * In case 2, simply undo reserve map modifications done by alloc_huge_page. 2815 */ 2816 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2817 unsigned long address, struct page *page) 2818 { 2819 long rc = vma_needs_reservation(h, vma, address); 2820 2821 if (HPageRestoreReserve(page)) { 2822 if (unlikely(rc < 0)) 2823 /* 2824 * Rare out of memory condition in reserve map 2825 * manipulation. Clear HPageRestoreReserve so that 2826 * global reserve count will not be incremented 2827 * by free_huge_page. This will make it appear 2828 * as though the reservation for this page was 2829 * consumed. This may prevent the task from 2830 * faulting in the page at a later time. This 2831 * is better than inconsistent global huge page 2832 * accounting of reserve counts. 2833 */ 2834 ClearHPageRestoreReserve(page); 2835 else if (rc) 2836 (void)vma_add_reservation(h, vma, address); 2837 else 2838 vma_end_reservation(h, vma, address); 2839 } else { 2840 if (!rc) { 2841 /* 2842 * This indicates there is an entry in the reserve map 2843 * not added by alloc_huge_page. We know it was added 2844 * before the alloc_huge_page call, otherwise 2845 * HPageRestoreReserve would be set on the page. 2846 * Remove the entry so that a subsequent allocation 2847 * does not consume a reservation. 2848 */ 2849 rc = vma_del_reservation(h, vma, address); 2850 if (rc < 0) 2851 /* 2852 * VERY rare out of memory condition. Since 2853 * we can not delete the entry, set 2854 * HPageRestoreReserve so that the reserve 2855 * count will be incremented when the page 2856 * is freed. This reserve will be consumed 2857 * on a subsequent allocation. 2858 */ 2859 SetHPageRestoreReserve(page); 2860 } else if (rc < 0) { 2861 /* 2862 * Rare out of memory condition from 2863 * vma_needs_reservation call. Memory allocation is 2864 * only attempted if a new entry is needed. Therefore, 2865 * this implies there is not an entry in the 2866 * reserve map. 2867 * 2868 * For shared mappings, no entry in the map indicates 2869 * no reservation. We are done. 2870 */ 2871 if (!(vma->vm_flags & VM_MAYSHARE)) 2872 /* 2873 * For private mappings, no entry indicates 2874 * a reservation is present. Since we can 2875 * not add an entry, set SetHPageRestoreReserve 2876 * on the page so reserve count will be 2877 * incremented when freed. This reserve will 2878 * be consumed on a subsequent allocation. 2879 */ 2880 SetHPageRestoreReserve(page); 2881 } else 2882 /* 2883 * No reservation present, do nothing 2884 */ 2885 vma_end_reservation(h, vma, address); 2886 } 2887 } 2888 2889 /* 2890 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2891 * the old one 2892 * @h: struct hstate old page belongs to 2893 * @old_folio: Old folio to dissolve 2894 * @list: List to isolate the page in case we need to 2895 * Returns 0 on success, otherwise negated error. 2896 */ 2897 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2898 struct folio *old_folio, struct list_head *list) 2899 { 2900 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2901 int nid = folio_nid(old_folio); 2902 struct folio *new_folio; 2903 int ret = 0; 2904 2905 /* 2906 * Before dissolving the folio, we need to allocate a new one for the 2907 * pool to remain stable. Here, we allocate the folio and 'prep' it 2908 * by doing everything but actually updating counters and adding to 2909 * the pool. This simplifies and let us do most of the processing 2910 * under the lock. 2911 */ 2912 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); 2913 if (!new_folio) 2914 return -ENOMEM; 2915 __prep_new_hugetlb_folio(h, new_folio); 2916 2917 retry: 2918 spin_lock_irq(&hugetlb_lock); 2919 if (!folio_test_hugetlb(old_folio)) { 2920 /* 2921 * Freed from under us. Drop new_folio too. 2922 */ 2923 goto free_new; 2924 } else if (folio_ref_count(old_folio)) { 2925 /* 2926 * Someone has grabbed the folio, try to isolate it here. 2927 * Fail with -EBUSY if not possible. 2928 */ 2929 spin_unlock_irq(&hugetlb_lock); 2930 ret = isolate_hugetlb(&old_folio->page, list); 2931 spin_lock_irq(&hugetlb_lock); 2932 goto free_new; 2933 } else if (!folio_test_hugetlb_freed(old_folio)) { 2934 /* 2935 * Folio's refcount is 0 but it has not been enqueued in the 2936 * freelist yet. Race window is small, so we can succeed here if 2937 * we retry. 2938 */ 2939 spin_unlock_irq(&hugetlb_lock); 2940 cond_resched(); 2941 goto retry; 2942 } else { 2943 /* 2944 * Ok, old_folio is still a genuine free hugepage. Remove it from 2945 * the freelist and decrease the counters. These will be 2946 * incremented again when calling __prep_account_new_huge_page() 2947 * and enqueue_hugetlb_folio() for new_folio. The counters will 2948 * remain stable since this happens under the lock. 2949 */ 2950 remove_hugetlb_folio(h, old_folio, false); 2951 2952 /* 2953 * Ref count on new_folio is already zero as it was dropped 2954 * earlier. It can be directly added to the pool free list. 2955 */ 2956 __prep_account_new_huge_page(h, nid); 2957 enqueue_hugetlb_folio(h, new_folio); 2958 2959 /* 2960 * Folio has been replaced, we can safely free the old one. 2961 */ 2962 spin_unlock_irq(&hugetlb_lock); 2963 update_and_free_hugetlb_folio(h, old_folio, false); 2964 } 2965 2966 return ret; 2967 2968 free_new: 2969 spin_unlock_irq(&hugetlb_lock); 2970 /* Folio has a zero ref count, but needs a ref to be freed */ 2971 folio_ref_unfreeze(new_folio, 1); 2972 update_and_free_hugetlb_folio(h, new_folio, false); 2973 2974 return ret; 2975 } 2976 2977 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2978 { 2979 struct hstate *h; 2980 struct folio *folio = page_folio(page); 2981 int ret = -EBUSY; 2982 2983 /* 2984 * The page might have been dissolved from under our feet, so make sure 2985 * to carefully check the state under the lock. 2986 * Return success when racing as if we dissolved the page ourselves. 2987 */ 2988 spin_lock_irq(&hugetlb_lock); 2989 if (folio_test_hugetlb(folio)) { 2990 h = folio_hstate(folio); 2991 } else { 2992 spin_unlock_irq(&hugetlb_lock); 2993 return 0; 2994 } 2995 spin_unlock_irq(&hugetlb_lock); 2996 2997 /* 2998 * Fence off gigantic pages as there is a cyclic dependency between 2999 * alloc_contig_range and them. Return -ENOMEM as this has the effect 3000 * of bailing out right away without further retrying. 3001 */ 3002 if (hstate_is_gigantic(h)) 3003 return -ENOMEM; 3004 3005 if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list)) 3006 ret = 0; 3007 else if (!folio_ref_count(folio)) 3008 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 3009 3010 return ret; 3011 } 3012 3013 struct page *alloc_huge_page(struct vm_area_struct *vma, 3014 unsigned long addr, int avoid_reserve) 3015 { 3016 struct hugepage_subpool *spool = subpool_vma(vma); 3017 struct hstate *h = hstate_vma(vma); 3018 struct page *page; 3019 struct folio *folio; 3020 long map_chg, map_commit; 3021 long gbl_chg; 3022 int ret, idx; 3023 struct hugetlb_cgroup *h_cg; 3024 bool deferred_reserve; 3025 3026 idx = hstate_index(h); 3027 /* 3028 * Examine the region/reserve map to determine if the process 3029 * has a reservation for the page to be allocated. A return 3030 * code of zero indicates a reservation exists (no change). 3031 */ 3032 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 3033 if (map_chg < 0) 3034 return ERR_PTR(-ENOMEM); 3035 3036 /* 3037 * Processes that did not create the mapping will have no 3038 * reserves as indicated by the region/reserve map. Check 3039 * that the allocation will not exceed the subpool limit. 3040 * Allocations for MAP_NORESERVE mappings also need to be 3041 * checked against any subpool limit. 3042 */ 3043 if (map_chg || avoid_reserve) { 3044 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3045 if (gbl_chg < 0) { 3046 vma_end_reservation(h, vma, addr); 3047 return ERR_PTR(-ENOSPC); 3048 } 3049 3050 /* 3051 * Even though there was no reservation in the region/reserve 3052 * map, there could be reservations associated with the 3053 * subpool that can be used. This would be indicated if the 3054 * return value of hugepage_subpool_get_pages() is zero. 3055 * However, if avoid_reserve is specified we still avoid even 3056 * the subpool reservations. 3057 */ 3058 if (avoid_reserve) 3059 gbl_chg = 1; 3060 } 3061 3062 /* If this allocation is not consuming a reservation, charge it now. 3063 */ 3064 deferred_reserve = map_chg || avoid_reserve; 3065 if (deferred_reserve) { 3066 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3067 idx, pages_per_huge_page(h), &h_cg); 3068 if (ret) 3069 goto out_subpool_put; 3070 } 3071 3072 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3073 if (ret) 3074 goto out_uncharge_cgroup_reservation; 3075 3076 spin_lock_irq(&hugetlb_lock); 3077 /* 3078 * glb_chg is passed to indicate whether or not a page must be taken 3079 * from the global free pool (global change). gbl_chg == 0 indicates 3080 * a reservation exists for the allocation. 3081 */ 3082 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 3083 if (!page) { 3084 spin_unlock_irq(&hugetlb_lock); 3085 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 3086 if (!page) 3087 goto out_uncharge_cgroup; 3088 spin_lock_irq(&hugetlb_lock); 3089 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3090 SetHPageRestoreReserve(page); 3091 h->resv_huge_pages--; 3092 } 3093 list_add(&page->lru, &h->hugepage_activelist); 3094 set_page_refcounted(page); 3095 /* Fall through */ 3096 } 3097 folio = page_folio(page); 3098 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 3099 /* If allocation is not consuming a reservation, also store the 3100 * hugetlb_cgroup pointer on the page. 3101 */ 3102 if (deferred_reserve) { 3103 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3104 h_cg, page); 3105 } 3106 3107 spin_unlock_irq(&hugetlb_lock); 3108 3109 hugetlb_set_page_subpool(page, spool); 3110 3111 map_commit = vma_commit_reservation(h, vma, addr); 3112 if (unlikely(map_chg > map_commit)) { 3113 /* 3114 * The page was added to the reservation map between 3115 * vma_needs_reservation and vma_commit_reservation. 3116 * This indicates a race with hugetlb_reserve_pages. 3117 * Adjust for the subpool count incremented above AND 3118 * in hugetlb_reserve_pages for the same page. Also, 3119 * the reservation count added in hugetlb_reserve_pages 3120 * no longer applies. 3121 */ 3122 long rsv_adjust; 3123 3124 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3125 hugetlb_acct_memory(h, -rsv_adjust); 3126 if (deferred_reserve) 3127 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3128 pages_per_huge_page(h), folio); 3129 } 3130 return page; 3131 3132 out_uncharge_cgroup: 3133 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3134 out_uncharge_cgroup_reservation: 3135 if (deferred_reserve) 3136 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3137 h_cg); 3138 out_subpool_put: 3139 if (map_chg || avoid_reserve) 3140 hugepage_subpool_put_pages(spool, 1); 3141 vma_end_reservation(h, vma, addr); 3142 return ERR_PTR(-ENOSPC); 3143 } 3144 3145 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3146 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3147 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3148 { 3149 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3150 int nr_nodes, node; 3151 3152 /* do node specific alloc */ 3153 if (nid != NUMA_NO_NODE) { 3154 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3155 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3156 if (!m) 3157 return 0; 3158 goto found; 3159 } 3160 /* allocate from next node when distributing huge pages */ 3161 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 3162 m = memblock_alloc_try_nid_raw( 3163 huge_page_size(h), huge_page_size(h), 3164 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3165 /* 3166 * Use the beginning of the huge page to store the 3167 * huge_bootmem_page struct (until gather_bootmem 3168 * puts them into the mem_map). 3169 */ 3170 if (!m) 3171 return 0; 3172 goto found; 3173 } 3174 3175 found: 3176 /* Put them into a private list first because mem_map is not up yet */ 3177 INIT_LIST_HEAD(&m->list); 3178 list_add(&m->list, &huge_boot_pages); 3179 m->hstate = h; 3180 return 1; 3181 } 3182 3183 /* 3184 * Put bootmem huge pages into the standard lists after mem_map is up. 3185 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3186 */ 3187 static void __init gather_bootmem_prealloc(void) 3188 { 3189 struct huge_bootmem_page *m; 3190 3191 list_for_each_entry(m, &huge_boot_pages, list) { 3192 struct page *page = virt_to_page(m); 3193 struct folio *folio = page_folio(page); 3194 struct hstate *h = m->hstate; 3195 3196 VM_BUG_ON(!hstate_is_gigantic(h)); 3197 WARN_ON(folio_ref_count(folio) != 1); 3198 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { 3199 WARN_ON(folio_test_reserved(folio)); 3200 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 3201 free_huge_page(page); /* add to the hugepage allocator */ 3202 } else { 3203 /* VERY unlikely inflated ref count on a tail page */ 3204 free_gigantic_folio(folio, huge_page_order(h)); 3205 } 3206 3207 /* 3208 * We need to restore the 'stolen' pages to totalram_pages 3209 * in order to fix confusing memory reports from free(1) and 3210 * other side-effects, like CommitLimit going negative. 3211 */ 3212 adjust_managed_page_count(page, pages_per_huge_page(h)); 3213 cond_resched(); 3214 } 3215 } 3216 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3217 { 3218 unsigned long i; 3219 char buf[32]; 3220 3221 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3222 if (hstate_is_gigantic(h)) { 3223 if (!alloc_bootmem_huge_page(h, nid)) 3224 break; 3225 } else { 3226 struct folio *folio; 3227 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3228 3229 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3230 &node_states[N_MEMORY], NULL); 3231 if (!folio) 3232 break; 3233 free_huge_page(&folio->page); /* free it into the hugepage allocator */ 3234 } 3235 cond_resched(); 3236 } 3237 if (i == h->max_huge_pages_node[nid]) 3238 return; 3239 3240 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3241 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3242 h->max_huge_pages_node[nid], buf, nid, i); 3243 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3244 h->max_huge_pages_node[nid] = i; 3245 } 3246 3247 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3248 { 3249 unsigned long i; 3250 nodemask_t *node_alloc_noretry; 3251 bool node_specific_alloc = false; 3252 3253 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3254 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3255 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3256 return; 3257 } 3258 3259 /* do node specific alloc */ 3260 for_each_online_node(i) { 3261 if (h->max_huge_pages_node[i] > 0) { 3262 hugetlb_hstate_alloc_pages_onenode(h, i); 3263 node_specific_alloc = true; 3264 } 3265 } 3266 3267 if (node_specific_alloc) 3268 return; 3269 3270 /* below will do all node balanced alloc */ 3271 if (!hstate_is_gigantic(h)) { 3272 /* 3273 * Bit mask controlling how hard we retry per-node allocations. 3274 * Ignore errors as lower level routines can deal with 3275 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3276 * time, we are likely in bigger trouble. 3277 */ 3278 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3279 GFP_KERNEL); 3280 } else { 3281 /* allocations done at boot time */ 3282 node_alloc_noretry = NULL; 3283 } 3284 3285 /* bit mask controlling how hard we retry per-node allocations */ 3286 if (node_alloc_noretry) 3287 nodes_clear(*node_alloc_noretry); 3288 3289 for (i = 0; i < h->max_huge_pages; ++i) { 3290 if (hstate_is_gigantic(h)) { 3291 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3292 break; 3293 } else if (!alloc_pool_huge_page(h, 3294 &node_states[N_MEMORY], 3295 node_alloc_noretry)) 3296 break; 3297 cond_resched(); 3298 } 3299 if (i < h->max_huge_pages) { 3300 char buf[32]; 3301 3302 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3303 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3304 h->max_huge_pages, buf, i); 3305 h->max_huge_pages = i; 3306 } 3307 kfree(node_alloc_noretry); 3308 } 3309 3310 static void __init hugetlb_init_hstates(void) 3311 { 3312 struct hstate *h, *h2; 3313 3314 for_each_hstate(h) { 3315 /* oversize hugepages were init'ed in early boot */ 3316 if (!hstate_is_gigantic(h)) 3317 hugetlb_hstate_alloc_pages(h); 3318 3319 /* 3320 * Set demote order for each hstate. Note that 3321 * h->demote_order is initially 0. 3322 * - We can not demote gigantic pages if runtime freeing 3323 * is not supported, so skip this. 3324 * - If CMA allocation is possible, we can not demote 3325 * HUGETLB_PAGE_ORDER or smaller size pages. 3326 */ 3327 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3328 continue; 3329 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3330 continue; 3331 for_each_hstate(h2) { 3332 if (h2 == h) 3333 continue; 3334 if (h2->order < h->order && 3335 h2->order > h->demote_order) 3336 h->demote_order = h2->order; 3337 } 3338 } 3339 } 3340 3341 static void __init report_hugepages(void) 3342 { 3343 struct hstate *h; 3344 3345 for_each_hstate(h) { 3346 char buf[32]; 3347 3348 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3349 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3350 buf, h->free_huge_pages); 3351 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3352 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3353 } 3354 } 3355 3356 #ifdef CONFIG_HIGHMEM 3357 static void try_to_free_low(struct hstate *h, unsigned long count, 3358 nodemask_t *nodes_allowed) 3359 { 3360 int i; 3361 LIST_HEAD(page_list); 3362 3363 lockdep_assert_held(&hugetlb_lock); 3364 if (hstate_is_gigantic(h)) 3365 return; 3366 3367 /* 3368 * Collect pages to be freed on a list, and free after dropping lock 3369 */ 3370 for_each_node_mask(i, *nodes_allowed) { 3371 struct page *page, *next; 3372 struct list_head *freel = &h->hugepage_freelists[i]; 3373 list_for_each_entry_safe(page, next, freel, lru) { 3374 if (count >= h->nr_huge_pages) 3375 goto out; 3376 if (PageHighMem(page)) 3377 continue; 3378 remove_hugetlb_folio(h, page_folio(page), false); 3379 list_add(&page->lru, &page_list); 3380 } 3381 } 3382 3383 out: 3384 spin_unlock_irq(&hugetlb_lock); 3385 update_and_free_pages_bulk(h, &page_list); 3386 spin_lock_irq(&hugetlb_lock); 3387 } 3388 #else 3389 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3390 nodemask_t *nodes_allowed) 3391 { 3392 } 3393 #endif 3394 3395 /* 3396 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3397 * balanced by operating on them in a round-robin fashion. 3398 * Returns 1 if an adjustment was made. 3399 */ 3400 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3401 int delta) 3402 { 3403 int nr_nodes, node; 3404 3405 lockdep_assert_held(&hugetlb_lock); 3406 VM_BUG_ON(delta != -1 && delta != 1); 3407 3408 if (delta < 0) { 3409 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3410 if (h->surplus_huge_pages_node[node]) 3411 goto found; 3412 } 3413 } else { 3414 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3415 if (h->surplus_huge_pages_node[node] < 3416 h->nr_huge_pages_node[node]) 3417 goto found; 3418 } 3419 } 3420 return 0; 3421 3422 found: 3423 h->surplus_huge_pages += delta; 3424 h->surplus_huge_pages_node[node] += delta; 3425 return 1; 3426 } 3427 3428 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3429 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3430 nodemask_t *nodes_allowed) 3431 { 3432 unsigned long min_count, ret; 3433 struct page *page; 3434 LIST_HEAD(page_list); 3435 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3436 3437 /* 3438 * Bit mask controlling how hard we retry per-node allocations. 3439 * If we can not allocate the bit mask, do not attempt to allocate 3440 * the requested huge pages. 3441 */ 3442 if (node_alloc_noretry) 3443 nodes_clear(*node_alloc_noretry); 3444 else 3445 return -ENOMEM; 3446 3447 /* 3448 * resize_lock mutex prevents concurrent adjustments to number of 3449 * pages in hstate via the proc/sysfs interfaces. 3450 */ 3451 mutex_lock(&h->resize_lock); 3452 flush_free_hpage_work(h); 3453 spin_lock_irq(&hugetlb_lock); 3454 3455 /* 3456 * Check for a node specific request. 3457 * Changing node specific huge page count may require a corresponding 3458 * change to the global count. In any case, the passed node mask 3459 * (nodes_allowed) will restrict alloc/free to the specified node. 3460 */ 3461 if (nid != NUMA_NO_NODE) { 3462 unsigned long old_count = count; 3463 3464 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3465 /* 3466 * User may have specified a large count value which caused the 3467 * above calculation to overflow. In this case, they wanted 3468 * to allocate as many huge pages as possible. Set count to 3469 * largest possible value to align with their intention. 3470 */ 3471 if (count < old_count) 3472 count = ULONG_MAX; 3473 } 3474 3475 /* 3476 * Gigantic pages runtime allocation depend on the capability for large 3477 * page range allocation. 3478 * If the system does not provide this feature, return an error when 3479 * the user tries to allocate gigantic pages but let the user free the 3480 * boottime allocated gigantic pages. 3481 */ 3482 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3483 if (count > persistent_huge_pages(h)) { 3484 spin_unlock_irq(&hugetlb_lock); 3485 mutex_unlock(&h->resize_lock); 3486 NODEMASK_FREE(node_alloc_noretry); 3487 return -EINVAL; 3488 } 3489 /* Fall through to decrease pool */ 3490 } 3491 3492 /* 3493 * Increase the pool size 3494 * First take pages out of surplus state. Then make up the 3495 * remaining difference by allocating fresh huge pages. 3496 * 3497 * We might race with alloc_surplus_huge_page() here and be unable 3498 * to convert a surplus huge page to a normal huge page. That is 3499 * not critical, though, it just means the overall size of the 3500 * pool might be one hugepage larger than it needs to be, but 3501 * within all the constraints specified by the sysctls. 3502 */ 3503 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3504 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3505 break; 3506 } 3507 3508 while (count > persistent_huge_pages(h)) { 3509 /* 3510 * If this allocation races such that we no longer need the 3511 * page, free_huge_page will handle it by freeing the page 3512 * and reducing the surplus. 3513 */ 3514 spin_unlock_irq(&hugetlb_lock); 3515 3516 /* yield cpu to avoid soft lockup */ 3517 cond_resched(); 3518 3519 ret = alloc_pool_huge_page(h, nodes_allowed, 3520 node_alloc_noretry); 3521 spin_lock_irq(&hugetlb_lock); 3522 if (!ret) 3523 goto out; 3524 3525 /* Bail for signals. Probably ctrl-c from user */ 3526 if (signal_pending(current)) 3527 goto out; 3528 } 3529 3530 /* 3531 * Decrease the pool size 3532 * First return free pages to the buddy allocator (being careful 3533 * to keep enough around to satisfy reservations). Then place 3534 * pages into surplus state as needed so the pool will shrink 3535 * to the desired size as pages become free. 3536 * 3537 * By placing pages into the surplus state independent of the 3538 * overcommit value, we are allowing the surplus pool size to 3539 * exceed overcommit. There are few sane options here. Since 3540 * alloc_surplus_huge_page() is checking the global counter, 3541 * though, we'll note that we're not allowed to exceed surplus 3542 * and won't grow the pool anywhere else. Not until one of the 3543 * sysctls are changed, or the surplus pages go out of use. 3544 */ 3545 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3546 min_count = max(count, min_count); 3547 try_to_free_low(h, min_count, nodes_allowed); 3548 3549 /* 3550 * Collect pages to be removed on list without dropping lock 3551 */ 3552 while (min_count < persistent_huge_pages(h)) { 3553 page = remove_pool_huge_page(h, nodes_allowed, 0); 3554 if (!page) 3555 break; 3556 3557 list_add(&page->lru, &page_list); 3558 } 3559 /* free the pages after dropping lock */ 3560 spin_unlock_irq(&hugetlb_lock); 3561 update_and_free_pages_bulk(h, &page_list); 3562 flush_free_hpage_work(h); 3563 spin_lock_irq(&hugetlb_lock); 3564 3565 while (count < persistent_huge_pages(h)) { 3566 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3567 break; 3568 } 3569 out: 3570 h->max_huge_pages = persistent_huge_pages(h); 3571 spin_unlock_irq(&hugetlb_lock); 3572 mutex_unlock(&h->resize_lock); 3573 3574 NODEMASK_FREE(node_alloc_noretry); 3575 3576 return 0; 3577 } 3578 3579 static int demote_free_huge_page(struct hstate *h, struct page *page) 3580 { 3581 int i, nid = page_to_nid(page); 3582 struct hstate *target_hstate; 3583 struct folio *folio = page_folio(page); 3584 struct page *subpage; 3585 int rc = 0; 3586 3587 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3588 3589 remove_hugetlb_folio_for_demote(h, folio, false); 3590 spin_unlock_irq(&hugetlb_lock); 3591 3592 rc = hugetlb_vmemmap_restore(h, page); 3593 if (rc) { 3594 /* Allocation of vmemmmap failed, we can not demote page */ 3595 spin_lock_irq(&hugetlb_lock); 3596 set_page_refcounted(page); 3597 add_hugetlb_folio(h, page_folio(page), false); 3598 return rc; 3599 } 3600 3601 /* 3602 * Use destroy_compound_hugetlb_folio_for_demote for all huge page 3603 * sizes as it will not ref count pages. 3604 */ 3605 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); 3606 3607 /* 3608 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3609 * Without the mutex, pages added to target hstate could be marked 3610 * as surplus. 3611 * 3612 * Note that we already hold h->resize_lock. To prevent deadlock, 3613 * use the convention of always taking larger size hstate mutex first. 3614 */ 3615 mutex_lock(&target_hstate->resize_lock); 3616 for (i = 0; i < pages_per_huge_page(h); 3617 i += pages_per_huge_page(target_hstate)) { 3618 subpage = nth_page(page, i); 3619 folio = page_folio(subpage); 3620 if (hstate_is_gigantic(target_hstate)) 3621 prep_compound_gigantic_folio_for_demote(folio, 3622 target_hstate->order); 3623 else 3624 prep_compound_page(subpage, target_hstate->order); 3625 set_page_private(subpage, 0); 3626 prep_new_hugetlb_folio(target_hstate, folio, nid); 3627 free_huge_page(subpage); 3628 } 3629 mutex_unlock(&target_hstate->resize_lock); 3630 3631 spin_lock_irq(&hugetlb_lock); 3632 3633 /* 3634 * Not absolutely necessary, but for consistency update max_huge_pages 3635 * based on pool changes for the demoted page. 3636 */ 3637 h->max_huge_pages--; 3638 target_hstate->max_huge_pages += 3639 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); 3640 3641 return rc; 3642 } 3643 3644 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3645 __must_hold(&hugetlb_lock) 3646 { 3647 int nr_nodes, node; 3648 struct page *page; 3649 3650 lockdep_assert_held(&hugetlb_lock); 3651 3652 /* We should never get here if no demote order */ 3653 if (!h->demote_order) { 3654 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3655 return -EINVAL; /* internal error */ 3656 } 3657 3658 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3659 list_for_each_entry(page, &h->hugepage_freelists[node], lru) { 3660 if (PageHWPoison(page)) 3661 continue; 3662 3663 return demote_free_huge_page(h, page); 3664 } 3665 } 3666 3667 /* 3668 * Only way to get here is if all pages on free lists are poisoned. 3669 * Return -EBUSY so that caller will not retry. 3670 */ 3671 return -EBUSY; 3672 } 3673 3674 #define HSTATE_ATTR_RO(_name) \ 3675 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3676 3677 #define HSTATE_ATTR_WO(_name) \ 3678 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3679 3680 #define HSTATE_ATTR(_name) \ 3681 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3682 3683 static struct kobject *hugepages_kobj; 3684 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3685 3686 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3687 3688 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3689 { 3690 int i; 3691 3692 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3693 if (hstate_kobjs[i] == kobj) { 3694 if (nidp) 3695 *nidp = NUMA_NO_NODE; 3696 return &hstates[i]; 3697 } 3698 3699 return kobj_to_node_hstate(kobj, nidp); 3700 } 3701 3702 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3703 struct kobj_attribute *attr, char *buf) 3704 { 3705 struct hstate *h; 3706 unsigned long nr_huge_pages; 3707 int nid; 3708 3709 h = kobj_to_hstate(kobj, &nid); 3710 if (nid == NUMA_NO_NODE) 3711 nr_huge_pages = h->nr_huge_pages; 3712 else 3713 nr_huge_pages = h->nr_huge_pages_node[nid]; 3714 3715 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3716 } 3717 3718 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3719 struct hstate *h, int nid, 3720 unsigned long count, size_t len) 3721 { 3722 int err; 3723 nodemask_t nodes_allowed, *n_mask; 3724 3725 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3726 return -EINVAL; 3727 3728 if (nid == NUMA_NO_NODE) { 3729 /* 3730 * global hstate attribute 3731 */ 3732 if (!(obey_mempolicy && 3733 init_nodemask_of_mempolicy(&nodes_allowed))) 3734 n_mask = &node_states[N_MEMORY]; 3735 else 3736 n_mask = &nodes_allowed; 3737 } else { 3738 /* 3739 * Node specific request. count adjustment happens in 3740 * set_max_huge_pages() after acquiring hugetlb_lock. 3741 */ 3742 init_nodemask_of_node(&nodes_allowed, nid); 3743 n_mask = &nodes_allowed; 3744 } 3745 3746 err = set_max_huge_pages(h, count, nid, n_mask); 3747 3748 return err ? err : len; 3749 } 3750 3751 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3752 struct kobject *kobj, const char *buf, 3753 size_t len) 3754 { 3755 struct hstate *h; 3756 unsigned long count; 3757 int nid; 3758 int err; 3759 3760 err = kstrtoul(buf, 10, &count); 3761 if (err) 3762 return err; 3763 3764 h = kobj_to_hstate(kobj, &nid); 3765 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3766 } 3767 3768 static ssize_t nr_hugepages_show(struct kobject *kobj, 3769 struct kobj_attribute *attr, char *buf) 3770 { 3771 return nr_hugepages_show_common(kobj, attr, buf); 3772 } 3773 3774 static ssize_t nr_hugepages_store(struct kobject *kobj, 3775 struct kobj_attribute *attr, const char *buf, size_t len) 3776 { 3777 return nr_hugepages_store_common(false, kobj, buf, len); 3778 } 3779 HSTATE_ATTR(nr_hugepages); 3780 3781 #ifdef CONFIG_NUMA 3782 3783 /* 3784 * hstate attribute for optionally mempolicy-based constraint on persistent 3785 * huge page alloc/free. 3786 */ 3787 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3788 struct kobj_attribute *attr, 3789 char *buf) 3790 { 3791 return nr_hugepages_show_common(kobj, attr, buf); 3792 } 3793 3794 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3795 struct kobj_attribute *attr, const char *buf, size_t len) 3796 { 3797 return nr_hugepages_store_common(true, kobj, buf, len); 3798 } 3799 HSTATE_ATTR(nr_hugepages_mempolicy); 3800 #endif 3801 3802 3803 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3804 struct kobj_attribute *attr, char *buf) 3805 { 3806 struct hstate *h = kobj_to_hstate(kobj, NULL); 3807 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3808 } 3809 3810 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3811 struct kobj_attribute *attr, const char *buf, size_t count) 3812 { 3813 int err; 3814 unsigned long input; 3815 struct hstate *h = kobj_to_hstate(kobj, NULL); 3816 3817 if (hstate_is_gigantic(h)) 3818 return -EINVAL; 3819 3820 err = kstrtoul(buf, 10, &input); 3821 if (err) 3822 return err; 3823 3824 spin_lock_irq(&hugetlb_lock); 3825 h->nr_overcommit_huge_pages = input; 3826 spin_unlock_irq(&hugetlb_lock); 3827 3828 return count; 3829 } 3830 HSTATE_ATTR(nr_overcommit_hugepages); 3831 3832 static ssize_t free_hugepages_show(struct kobject *kobj, 3833 struct kobj_attribute *attr, char *buf) 3834 { 3835 struct hstate *h; 3836 unsigned long free_huge_pages; 3837 int nid; 3838 3839 h = kobj_to_hstate(kobj, &nid); 3840 if (nid == NUMA_NO_NODE) 3841 free_huge_pages = h->free_huge_pages; 3842 else 3843 free_huge_pages = h->free_huge_pages_node[nid]; 3844 3845 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3846 } 3847 HSTATE_ATTR_RO(free_hugepages); 3848 3849 static ssize_t resv_hugepages_show(struct kobject *kobj, 3850 struct kobj_attribute *attr, char *buf) 3851 { 3852 struct hstate *h = kobj_to_hstate(kobj, NULL); 3853 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3854 } 3855 HSTATE_ATTR_RO(resv_hugepages); 3856 3857 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3858 struct kobj_attribute *attr, char *buf) 3859 { 3860 struct hstate *h; 3861 unsigned long surplus_huge_pages; 3862 int nid; 3863 3864 h = kobj_to_hstate(kobj, &nid); 3865 if (nid == NUMA_NO_NODE) 3866 surplus_huge_pages = h->surplus_huge_pages; 3867 else 3868 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3869 3870 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3871 } 3872 HSTATE_ATTR_RO(surplus_hugepages); 3873 3874 static ssize_t demote_store(struct kobject *kobj, 3875 struct kobj_attribute *attr, const char *buf, size_t len) 3876 { 3877 unsigned long nr_demote; 3878 unsigned long nr_available; 3879 nodemask_t nodes_allowed, *n_mask; 3880 struct hstate *h; 3881 int err; 3882 int nid; 3883 3884 err = kstrtoul(buf, 10, &nr_demote); 3885 if (err) 3886 return err; 3887 h = kobj_to_hstate(kobj, &nid); 3888 3889 if (nid != NUMA_NO_NODE) { 3890 init_nodemask_of_node(&nodes_allowed, nid); 3891 n_mask = &nodes_allowed; 3892 } else { 3893 n_mask = &node_states[N_MEMORY]; 3894 } 3895 3896 /* Synchronize with other sysfs operations modifying huge pages */ 3897 mutex_lock(&h->resize_lock); 3898 spin_lock_irq(&hugetlb_lock); 3899 3900 while (nr_demote) { 3901 /* 3902 * Check for available pages to demote each time thorough the 3903 * loop as demote_pool_huge_page will drop hugetlb_lock. 3904 */ 3905 if (nid != NUMA_NO_NODE) 3906 nr_available = h->free_huge_pages_node[nid]; 3907 else 3908 nr_available = h->free_huge_pages; 3909 nr_available -= h->resv_huge_pages; 3910 if (!nr_available) 3911 break; 3912 3913 err = demote_pool_huge_page(h, n_mask); 3914 if (err) 3915 break; 3916 3917 nr_demote--; 3918 } 3919 3920 spin_unlock_irq(&hugetlb_lock); 3921 mutex_unlock(&h->resize_lock); 3922 3923 if (err) 3924 return err; 3925 return len; 3926 } 3927 HSTATE_ATTR_WO(demote); 3928 3929 static ssize_t demote_size_show(struct kobject *kobj, 3930 struct kobj_attribute *attr, char *buf) 3931 { 3932 struct hstate *h = kobj_to_hstate(kobj, NULL); 3933 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3934 3935 return sysfs_emit(buf, "%lukB\n", demote_size); 3936 } 3937 3938 static ssize_t demote_size_store(struct kobject *kobj, 3939 struct kobj_attribute *attr, 3940 const char *buf, size_t count) 3941 { 3942 struct hstate *h, *demote_hstate; 3943 unsigned long demote_size; 3944 unsigned int demote_order; 3945 3946 demote_size = (unsigned long)memparse(buf, NULL); 3947 3948 demote_hstate = size_to_hstate(demote_size); 3949 if (!demote_hstate) 3950 return -EINVAL; 3951 demote_order = demote_hstate->order; 3952 if (demote_order < HUGETLB_PAGE_ORDER) 3953 return -EINVAL; 3954 3955 /* demote order must be smaller than hstate order */ 3956 h = kobj_to_hstate(kobj, NULL); 3957 if (demote_order >= h->order) 3958 return -EINVAL; 3959 3960 /* resize_lock synchronizes access to demote size and writes */ 3961 mutex_lock(&h->resize_lock); 3962 h->demote_order = demote_order; 3963 mutex_unlock(&h->resize_lock); 3964 3965 return count; 3966 } 3967 HSTATE_ATTR(demote_size); 3968 3969 static struct attribute *hstate_attrs[] = { 3970 &nr_hugepages_attr.attr, 3971 &nr_overcommit_hugepages_attr.attr, 3972 &free_hugepages_attr.attr, 3973 &resv_hugepages_attr.attr, 3974 &surplus_hugepages_attr.attr, 3975 #ifdef CONFIG_NUMA 3976 &nr_hugepages_mempolicy_attr.attr, 3977 #endif 3978 NULL, 3979 }; 3980 3981 static const struct attribute_group hstate_attr_group = { 3982 .attrs = hstate_attrs, 3983 }; 3984 3985 static struct attribute *hstate_demote_attrs[] = { 3986 &demote_size_attr.attr, 3987 &demote_attr.attr, 3988 NULL, 3989 }; 3990 3991 static const struct attribute_group hstate_demote_attr_group = { 3992 .attrs = hstate_demote_attrs, 3993 }; 3994 3995 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 3996 struct kobject **hstate_kobjs, 3997 const struct attribute_group *hstate_attr_group) 3998 { 3999 int retval; 4000 int hi = hstate_index(h); 4001 4002 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4003 if (!hstate_kobjs[hi]) 4004 return -ENOMEM; 4005 4006 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4007 if (retval) { 4008 kobject_put(hstate_kobjs[hi]); 4009 hstate_kobjs[hi] = NULL; 4010 return retval; 4011 } 4012 4013 if (h->demote_order) { 4014 retval = sysfs_create_group(hstate_kobjs[hi], 4015 &hstate_demote_attr_group); 4016 if (retval) { 4017 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4018 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4019 kobject_put(hstate_kobjs[hi]); 4020 hstate_kobjs[hi] = NULL; 4021 return retval; 4022 } 4023 } 4024 4025 return 0; 4026 } 4027 4028 #ifdef CONFIG_NUMA 4029 static bool hugetlb_sysfs_initialized __ro_after_init; 4030 4031 /* 4032 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4033 * with node devices in node_devices[] using a parallel array. The array 4034 * index of a node device or _hstate == node id. 4035 * This is here to avoid any static dependency of the node device driver, in 4036 * the base kernel, on the hugetlb module. 4037 */ 4038 struct node_hstate { 4039 struct kobject *hugepages_kobj; 4040 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4041 }; 4042 static struct node_hstate node_hstates[MAX_NUMNODES]; 4043 4044 /* 4045 * A subset of global hstate attributes for node devices 4046 */ 4047 static struct attribute *per_node_hstate_attrs[] = { 4048 &nr_hugepages_attr.attr, 4049 &free_hugepages_attr.attr, 4050 &surplus_hugepages_attr.attr, 4051 NULL, 4052 }; 4053 4054 static const struct attribute_group per_node_hstate_attr_group = { 4055 .attrs = per_node_hstate_attrs, 4056 }; 4057 4058 /* 4059 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4060 * Returns node id via non-NULL nidp. 4061 */ 4062 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4063 { 4064 int nid; 4065 4066 for (nid = 0; nid < nr_node_ids; nid++) { 4067 struct node_hstate *nhs = &node_hstates[nid]; 4068 int i; 4069 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4070 if (nhs->hstate_kobjs[i] == kobj) { 4071 if (nidp) 4072 *nidp = nid; 4073 return &hstates[i]; 4074 } 4075 } 4076 4077 BUG(); 4078 return NULL; 4079 } 4080 4081 /* 4082 * Unregister hstate attributes from a single node device. 4083 * No-op if no hstate attributes attached. 4084 */ 4085 void hugetlb_unregister_node(struct node *node) 4086 { 4087 struct hstate *h; 4088 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4089 4090 if (!nhs->hugepages_kobj) 4091 return; /* no hstate attributes */ 4092 4093 for_each_hstate(h) { 4094 int idx = hstate_index(h); 4095 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4096 4097 if (!hstate_kobj) 4098 continue; 4099 if (h->demote_order) 4100 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4101 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4102 kobject_put(hstate_kobj); 4103 nhs->hstate_kobjs[idx] = NULL; 4104 } 4105 4106 kobject_put(nhs->hugepages_kobj); 4107 nhs->hugepages_kobj = NULL; 4108 } 4109 4110 4111 /* 4112 * Register hstate attributes for a single node device. 4113 * No-op if attributes already registered. 4114 */ 4115 void hugetlb_register_node(struct node *node) 4116 { 4117 struct hstate *h; 4118 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4119 int err; 4120 4121 if (!hugetlb_sysfs_initialized) 4122 return; 4123 4124 if (nhs->hugepages_kobj) 4125 return; /* already allocated */ 4126 4127 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4128 &node->dev.kobj); 4129 if (!nhs->hugepages_kobj) 4130 return; 4131 4132 for_each_hstate(h) { 4133 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4134 nhs->hstate_kobjs, 4135 &per_node_hstate_attr_group); 4136 if (err) { 4137 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4138 h->name, node->dev.id); 4139 hugetlb_unregister_node(node); 4140 break; 4141 } 4142 } 4143 } 4144 4145 /* 4146 * hugetlb init time: register hstate attributes for all registered node 4147 * devices of nodes that have memory. All on-line nodes should have 4148 * registered their associated device by this time. 4149 */ 4150 static void __init hugetlb_register_all_nodes(void) 4151 { 4152 int nid; 4153 4154 for_each_online_node(nid) 4155 hugetlb_register_node(node_devices[nid]); 4156 } 4157 #else /* !CONFIG_NUMA */ 4158 4159 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4160 { 4161 BUG(); 4162 if (nidp) 4163 *nidp = -1; 4164 return NULL; 4165 } 4166 4167 static void hugetlb_register_all_nodes(void) { } 4168 4169 #endif 4170 4171 #ifdef CONFIG_CMA 4172 static void __init hugetlb_cma_check(void); 4173 #else 4174 static inline __init void hugetlb_cma_check(void) 4175 { 4176 } 4177 #endif 4178 4179 static void __init hugetlb_sysfs_init(void) 4180 { 4181 struct hstate *h; 4182 int err; 4183 4184 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4185 if (!hugepages_kobj) 4186 return; 4187 4188 for_each_hstate(h) { 4189 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4190 hstate_kobjs, &hstate_attr_group); 4191 if (err) 4192 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4193 } 4194 4195 #ifdef CONFIG_NUMA 4196 hugetlb_sysfs_initialized = true; 4197 #endif 4198 hugetlb_register_all_nodes(); 4199 } 4200 4201 static int __init hugetlb_init(void) 4202 { 4203 int i; 4204 4205 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4206 __NR_HPAGEFLAGS); 4207 4208 if (!hugepages_supported()) { 4209 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4210 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4211 return 0; 4212 } 4213 4214 /* 4215 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4216 * architectures depend on setup being done here. 4217 */ 4218 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4219 if (!parsed_default_hugepagesz) { 4220 /* 4221 * If we did not parse a default huge page size, set 4222 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4223 * number of huge pages for this default size was implicitly 4224 * specified, set that here as well. 4225 * Note that the implicit setting will overwrite an explicit 4226 * setting. A warning will be printed in this case. 4227 */ 4228 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4229 if (default_hstate_max_huge_pages) { 4230 if (default_hstate.max_huge_pages) { 4231 char buf[32]; 4232 4233 string_get_size(huge_page_size(&default_hstate), 4234 1, STRING_UNITS_2, buf, 32); 4235 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4236 default_hstate.max_huge_pages, buf); 4237 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4238 default_hstate_max_huge_pages); 4239 } 4240 default_hstate.max_huge_pages = 4241 default_hstate_max_huge_pages; 4242 4243 for_each_online_node(i) 4244 default_hstate.max_huge_pages_node[i] = 4245 default_hugepages_in_node[i]; 4246 } 4247 } 4248 4249 hugetlb_cma_check(); 4250 hugetlb_init_hstates(); 4251 gather_bootmem_prealloc(); 4252 report_hugepages(); 4253 4254 hugetlb_sysfs_init(); 4255 hugetlb_cgroup_file_init(); 4256 4257 #ifdef CONFIG_SMP 4258 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4259 #else 4260 num_fault_mutexes = 1; 4261 #endif 4262 hugetlb_fault_mutex_table = 4263 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4264 GFP_KERNEL); 4265 BUG_ON(!hugetlb_fault_mutex_table); 4266 4267 for (i = 0; i < num_fault_mutexes; i++) 4268 mutex_init(&hugetlb_fault_mutex_table[i]); 4269 return 0; 4270 } 4271 subsys_initcall(hugetlb_init); 4272 4273 /* Overwritten by architectures with more huge page sizes */ 4274 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4275 { 4276 return size == HPAGE_SIZE; 4277 } 4278 4279 void __init hugetlb_add_hstate(unsigned int order) 4280 { 4281 struct hstate *h; 4282 unsigned long i; 4283 4284 if (size_to_hstate(PAGE_SIZE << order)) { 4285 return; 4286 } 4287 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4288 BUG_ON(order == 0); 4289 h = &hstates[hugetlb_max_hstate++]; 4290 mutex_init(&h->resize_lock); 4291 h->order = order; 4292 h->mask = ~(huge_page_size(h) - 1); 4293 for (i = 0; i < MAX_NUMNODES; ++i) 4294 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4295 INIT_LIST_HEAD(&h->hugepage_activelist); 4296 h->next_nid_to_alloc = first_memory_node; 4297 h->next_nid_to_free = first_memory_node; 4298 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4299 huge_page_size(h)/SZ_1K); 4300 4301 parsed_hstate = h; 4302 } 4303 4304 bool __init __weak hugetlb_node_alloc_supported(void) 4305 { 4306 return true; 4307 } 4308 4309 static void __init hugepages_clear_pages_in_node(void) 4310 { 4311 if (!hugetlb_max_hstate) { 4312 default_hstate_max_huge_pages = 0; 4313 memset(default_hugepages_in_node, 0, 4314 sizeof(default_hugepages_in_node)); 4315 } else { 4316 parsed_hstate->max_huge_pages = 0; 4317 memset(parsed_hstate->max_huge_pages_node, 0, 4318 sizeof(parsed_hstate->max_huge_pages_node)); 4319 } 4320 } 4321 4322 /* 4323 * hugepages command line processing 4324 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4325 * specification. If not, ignore the hugepages value. hugepages can also 4326 * be the first huge page command line option in which case it implicitly 4327 * specifies the number of huge pages for the default size. 4328 */ 4329 static int __init hugepages_setup(char *s) 4330 { 4331 unsigned long *mhp; 4332 static unsigned long *last_mhp; 4333 int node = NUMA_NO_NODE; 4334 int count; 4335 unsigned long tmp; 4336 char *p = s; 4337 4338 if (!parsed_valid_hugepagesz) { 4339 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4340 parsed_valid_hugepagesz = true; 4341 return 1; 4342 } 4343 4344 /* 4345 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4346 * yet, so this hugepages= parameter goes to the "default hstate". 4347 * Otherwise, it goes with the previously parsed hugepagesz or 4348 * default_hugepagesz. 4349 */ 4350 else if (!hugetlb_max_hstate) 4351 mhp = &default_hstate_max_huge_pages; 4352 else 4353 mhp = &parsed_hstate->max_huge_pages; 4354 4355 if (mhp == last_mhp) { 4356 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4357 return 1; 4358 } 4359 4360 while (*p) { 4361 count = 0; 4362 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4363 goto invalid; 4364 /* Parameter is node format */ 4365 if (p[count] == ':') { 4366 if (!hugetlb_node_alloc_supported()) { 4367 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4368 return 1; 4369 } 4370 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4371 goto invalid; 4372 node = array_index_nospec(tmp, MAX_NUMNODES); 4373 p += count + 1; 4374 /* Parse hugepages */ 4375 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4376 goto invalid; 4377 if (!hugetlb_max_hstate) 4378 default_hugepages_in_node[node] = tmp; 4379 else 4380 parsed_hstate->max_huge_pages_node[node] = tmp; 4381 *mhp += tmp; 4382 /* Go to parse next node*/ 4383 if (p[count] == ',') 4384 p += count + 1; 4385 else 4386 break; 4387 } else { 4388 if (p != s) 4389 goto invalid; 4390 *mhp = tmp; 4391 break; 4392 } 4393 } 4394 4395 /* 4396 * Global state is always initialized later in hugetlb_init. 4397 * But we need to allocate gigantic hstates here early to still 4398 * use the bootmem allocator. 4399 */ 4400 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4401 hugetlb_hstate_alloc_pages(parsed_hstate); 4402 4403 last_mhp = mhp; 4404 4405 return 1; 4406 4407 invalid: 4408 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4409 hugepages_clear_pages_in_node(); 4410 return 1; 4411 } 4412 __setup("hugepages=", hugepages_setup); 4413 4414 /* 4415 * hugepagesz command line processing 4416 * A specific huge page size can only be specified once with hugepagesz. 4417 * hugepagesz is followed by hugepages on the command line. The global 4418 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4419 * hugepagesz argument was valid. 4420 */ 4421 static int __init hugepagesz_setup(char *s) 4422 { 4423 unsigned long size; 4424 struct hstate *h; 4425 4426 parsed_valid_hugepagesz = false; 4427 size = (unsigned long)memparse(s, NULL); 4428 4429 if (!arch_hugetlb_valid_size(size)) { 4430 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4431 return 1; 4432 } 4433 4434 h = size_to_hstate(size); 4435 if (h) { 4436 /* 4437 * hstate for this size already exists. This is normally 4438 * an error, but is allowed if the existing hstate is the 4439 * default hstate. More specifically, it is only allowed if 4440 * the number of huge pages for the default hstate was not 4441 * previously specified. 4442 */ 4443 if (!parsed_default_hugepagesz || h != &default_hstate || 4444 default_hstate.max_huge_pages) { 4445 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4446 return 1; 4447 } 4448 4449 /* 4450 * No need to call hugetlb_add_hstate() as hstate already 4451 * exists. But, do set parsed_hstate so that a following 4452 * hugepages= parameter will be applied to this hstate. 4453 */ 4454 parsed_hstate = h; 4455 parsed_valid_hugepagesz = true; 4456 return 1; 4457 } 4458 4459 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4460 parsed_valid_hugepagesz = true; 4461 return 1; 4462 } 4463 __setup("hugepagesz=", hugepagesz_setup); 4464 4465 /* 4466 * default_hugepagesz command line input 4467 * Only one instance of default_hugepagesz allowed on command line. 4468 */ 4469 static int __init default_hugepagesz_setup(char *s) 4470 { 4471 unsigned long size; 4472 int i; 4473 4474 parsed_valid_hugepagesz = false; 4475 if (parsed_default_hugepagesz) { 4476 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4477 return 1; 4478 } 4479 4480 size = (unsigned long)memparse(s, NULL); 4481 4482 if (!arch_hugetlb_valid_size(size)) { 4483 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4484 return 1; 4485 } 4486 4487 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4488 parsed_valid_hugepagesz = true; 4489 parsed_default_hugepagesz = true; 4490 default_hstate_idx = hstate_index(size_to_hstate(size)); 4491 4492 /* 4493 * The number of default huge pages (for this size) could have been 4494 * specified as the first hugetlb parameter: hugepages=X. If so, 4495 * then default_hstate_max_huge_pages is set. If the default huge 4496 * page size is gigantic (>= MAX_ORDER), then the pages must be 4497 * allocated here from bootmem allocator. 4498 */ 4499 if (default_hstate_max_huge_pages) { 4500 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4501 for_each_online_node(i) 4502 default_hstate.max_huge_pages_node[i] = 4503 default_hugepages_in_node[i]; 4504 if (hstate_is_gigantic(&default_hstate)) 4505 hugetlb_hstate_alloc_pages(&default_hstate); 4506 default_hstate_max_huge_pages = 0; 4507 } 4508 4509 return 1; 4510 } 4511 __setup("default_hugepagesz=", default_hugepagesz_setup); 4512 4513 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 4514 { 4515 #ifdef CONFIG_NUMA 4516 struct mempolicy *mpol = get_task_policy(current); 4517 4518 /* 4519 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 4520 * (from policy_nodemask) specifically for hugetlb case 4521 */ 4522 if (mpol->mode == MPOL_BIND && 4523 (apply_policy_zone(mpol, gfp_zone(gfp)) && 4524 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 4525 return &mpol->nodes; 4526 #endif 4527 return NULL; 4528 } 4529 4530 static unsigned int allowed_mems_nr(struct hstate *h) 4531 { 4532 int node; 4533 unsigned int nr = 0; 4534 nodemask_t *mbind_nodemask; 4535 unsigned int *array = h->free_huge_pages_node; 4536 gfp_t gfp_mask = htlb_alloc_mask(h); 4537 4538 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4539 for_each_node_mask(node, cpuset_current_mems_allowed) { 4540 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4541 nr += array[node]; 4542 } 4543 4544 return nr; 4545 } 4546 4547 #ifdef CONFIG_SYSCTL 4548 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4549 void *buffer, size_t *length, 4550 loff_t *ppos, unsigned long *out) 4551 { 4552 struct ctl_table dup_table; 4553 4554 /* 4555 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4556 * can duplicate the @table and alter the duplicate of it. 4557 */ 4558 dup_table = *table; 4559 dup_table.data = out; 4560 4561 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4562 } 4563 4564 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4565 struct ctl_table *table, int write, 4566 void *buffer, size_t *length, loff_t *ppos) 4567 { 4568 struct hstate *h = &default_hstate; 4569 unsigned long tmp = h->max_huge_pages; 4570 int ret; 4571 4572 if (!hugepages_supported()) 4573 return -EOPNOTSUPP; 4574 4575 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4576 &tmp); 4577 if (ret) 4578 goto out; 4579 4580 if (write) 4581 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4582 NUMA_NO_NODE, tmp, *length); 4583 out: 4584 return ret; 4585 } 4586 4587 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4588 void *buffer, size_t *length, loff_t *ppos) 4589 { 4590 4591 return hugetlb_sysctl_handler_common(false, table, write, 4592 buffer, length, ppos); 4593 } 4594 4595 #ifdef CONFIG_NUMA 4596 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4597 void *buffer, size_t *length, loff_t *ppos) 4598 { 4599 return hugetlb_sysctl_handler_common(true, table, write, 4600 buffer, length, ppos); 4601 } 4602 #endif /* CONFIG_NUMA */ 4603 4604 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4605 void *buffer, size_t *length, loff_t *ppos) 4606 { 4607 struct hstate *h = &default_hstate; 4608 unsigned long tmp; 4609 int ret; 4610 4611 if (!hugepages_supported()) 4612 return -EOPNOTSUPP; 4613 4614 tmp = h->nr_overcommit_huge_pages; 4615 4616 if (write && hstate_is_gigantic(h)) 4617 return -EINVAL; 4618 4619 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4620 &tmp); 4621 if (ret) 4622 goto out; 4623 4624 if (write) { 4625 spin_lock_irq(&hugetlb_lock); 4626 h->nr_overcommit_huge_pages = tmp; 4627 spin_unlock_irq(&hugetlb_lock); 4628 } 4629 out: 4630 return ret; 4631 } 4632 4633 #endif /* CONFIG_SYSCTL */ 4634 4635 void hugetlb_report_meminfo(struct seq_file *m) 4636 { 4637 struct hstate *h; 4638 unsigned long total = 0; 4639 4640 if (!hugepages_supported()) 4641 return; 4642 4643 for_each_hstate(h) { 4644 unsigned long count = h->nr_huge_pages; 4645 4646 total += huge_page_size(h) * count; 4647 4648 if (h == &default_hstate) 4649 seq_printf(m, 4650 "HugePages_Total: %5lu\n" 4651 "HugePages_Free: %5lu\n" 4652 "HugePages_Rsvd: %5lu\n" 4653 "HugePages_Surp: %5lu\n" 4654 "Hugepagesize: %8lu kB\n", 4655 count, 4656 h->free_huge_pages, 4657 h->resv_huge_pages, 4658 h->surplus_huge_pages, 4659 huge_page_size(h) / SZ_1K); 4660 } 4661 4662 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4663 } 4664 4665 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4666 { 4667 struct hstate *h = &default_hstate; 4668 4669 if (!hugepages_supported()) 4670 return 0; 4671 4672 return sysfs_emit_at(buf, len, 4673 "Node %d HugePages_Total: %5u\n" 4674 "Node %d HugePages_Free: %5u\n" 4675 "Node %d HugePages_Surp: %5u\n", 4676 nid, h->nr_huge_pages_node[nid], 4677 nid, h->free_huge_pages_node[nid], 4678 nid, h->surplus_huge_pages_node[nid]); 4679 } 4680 4681 void hugetlb_show_meminfo_node(int nid) 4682 { 4683 struct hstate *h; 4684 4685 if (!hugepages_supported()) 4686 return; 4687 4688 for_each_hstate(h) 4689 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4690 nid, 4691 h->nr_huge_pages_node[nid], 4692 h->free_huge_pages_node[nid], 4693 h->surplus_huge_pages_node[nid], 4694 huge_page_size(h) / SZ_1K); 4695 } 4696 4697 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4698 { 4699 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4700 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 4701 } 4702 4703 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4704 unsigned long hugetlb_total_pages(void) 4705 { 4706 struct hstate *h; 4707 unsigned long nr_total_pages = 0; 4708 4709 for_each_hstate(h) 4710 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4711 return nr_total_pages; 4712 } 4713 4714 static int hugetlb_acct_memory(struct hstate *h, long delta) 4715 { 4716 int ret = -ENOMEM; 4717 4718 if (!delta) 4719 return 0; 4720 4721 spin_lock_irq(&hugetlb_lock); 4722 /* 4723 * When cpuset is configured, it breaks the strict hugetlb page 4724 * reservation as the accounting is done on a global variable. Such 4725 * reservation is completely rubbish in the presence of cpuset because 4726 * the reservation is not checked against page availability for the 4727 * current cpuset. Application can still potentially OOM'ed by kernel 4728 * with lack of free htlb page in cpuset that the task is in. 4729 * Attempt to enforce strict accounting with cpuset is almost 4730 * impossible (or too ugly) because cpuset is too fluid that 4731 * task or memory node can be dynamically moved between cpusets. 4732 * 4733 * The change of semantics for shared hugetlb mapping with cpuset is 4734 * undesirable. However, in order to preserve some of the semantics, 4735 * we fall back to check against current free page availability as 4736 * a best attempt and hopefully to minimize the impact of changing 4737 * semantics that cpuset has. 4738 * 4739 * Apart from cpuset, we also have memory policy mechanism that 4740 * also determines from which node the kernel will allocate memory 4741 * in a NUMA system. So similar to cpuset, we also should consider 4742 * the memory policy of the current task. Similar to the description 4743 * above. 4744 */ 4745 if (delta > 0) { 4746 if (gather_surplus_pages(h, delta) < 0) 4747 goto out; 4748 4749 if (delta > allowed_mems_nr(h)) { 4750 return_unused_surplus_pages(h, delta); 4751 goto out; 4752 } 4753 } 4754 4755 ret = 0; 4756 if (delta < 0) 4757 return_unused_surplus_pages(h, (unsigned long) -delta); 4758 4759 out: 4760 spin_unlock_irq(&hugetlb_lock); 4761 return ret; 4762 } 4763 4764 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4765 { 4766 struct resv_map *resv = vma_resv_map(vma); 4767 4768 /* 4769 * HPAGE_RESV_OWNER indicates a private mapping. 4770 * This new VMA should share its siblings reservation map if present. 4771 * The VMA will only ever have a valid reservation map pointer where 4772 * it is being copied for another still existing VMA. As that VMA 4773 * has a reference to the reservation map it cannot disappear until 4774 * after this open call completes. It is therefore safe to take a 4775 * new reference here without additional locking. 4776 */ 4777 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4778 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4779 kref_get(&resv->refs); 4780 } 4781 4782 /* 4783 * vma_lock structure for sharable mappings is vma specific. 4784 * Clear old pointer (if copied via vm_area_dup) and allocate 4785 * new structure. Before clearing, make sure vma_lock is not 4786 * for this vma. 4787 */ 4788 if (vma->vm_flags & VM_MAYSHARE) { 4789 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 4790 4791 if (vma_lock) { 4792 if (vma_lock->vma != vma) { 4793 vma->vm_private_data = NULL; 4794 hugetlb_vma_lock_alloc(vma); 4795 } else 4796 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 4797 } else 4798 hugetlb_vma_lock_alloc(vma); 4799 } 4800 } 4801 4802 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4803 { 4804 struct hstate *h = hstate_vma(vma); 4805 struct resv_map *resv; 4806 struct hugepage_subpool *spool = subpool_vma(vma); 4807 unsigned long reserve, start, end; 4808 long gbl_reserve; 4809 4810 hugetlb_vma_lock_free(vma); 4811 4812 resv = vma_resv_map(vma); 4813 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4814 return; 4815 4816 start = vma_hugecache_offset(h, vma, vma->vm_start); 4817 end = vma_hugecache_offset(h, vma, vma->vm_end); 4818 4819 reserve = (end - start) - region_count(resv, start, end); 4820 hugetlb_cgroup_uncharge_counter(resv, start, end); 4821 if (reserve) { 4822 /* 4823 * Decrement reserve counts. The global reserve count may be 4824 * adjusted if the subpool has a minimum size. 4825 */ 4826 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4827 hugetlb_acct_memory(h, -gbl_reserve); 4828 } 4829 4830 kref_put(&resv->refs, resv_map_release); 4831 } 4832 4833 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4834 { 4835 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4836 return -EINVAL; 4837 return 0; 4838 } 4839 4840 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4841 { 4842 return huge_page_size(hstate_vma(vma)); 4843 } 4844 4845 /* 4846 * We cannot handle pagefaults against hugetlb pages at all. They cause 4847 * handle_mm_fault() to try to instantiate regular-sized pages in the 4848 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4849 * this far. 4850 */ 4851 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4852 { 4853 BUG(); 4854 return 0; 4855 } 4856 4857 /* 4858 * When a new function is introduced to vm_operations_struct and added 4859 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4860 * This is because under System V memory model, mappings created via 4861 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4862 * their original vm_ops are overwritten with shm_vm_ops. 4863 */ 4864 const struct vm_operations_struct hugetlb_vm_ops = { 4865 .fault = hugetlb_vm_op_fault, 4866 .open = hugetlb_vm_op_open, 4867 .close = hugetlb_vm_op_close, 4868 .may_split = hugetlb_vm_op_split, 4869 .pagesize = hugetlb_vm_op_pagesize, 4870 }; 4871 4872 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4873 int writable) 4874 { 4875 pte_t entry; 4876 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4877 4878 if (writable) { 4879 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4880 vma->vm_page_prot))); 4881 } else { 4882 entry = huge_pte_wrprotect(mk_huge_pte(page, 4883 vma->vm_page_prot)); 4884 } 4885 entry = pte_mkyoung(entry); 4886 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4887 4888 return entry; 4889 } 4890 4891 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4892 unsigned long address, pte_t *ptep) 4893 { 4894 pte_t entry; 4895 4896 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4897 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4898 update_mmu_cache(vma, address, ptep); 4899 } 4900 4901 bool is_hugetlb_entry_migration(pte_t pte) 4902 { 4903 swp_entry_t swp; 4904 4905 if (huge_pte_none(pte) || pte_present(pte)) 4906 return false; 4907 swp = pte_to_swp_entry(pte); 4908 if (is_migration_entry(swp)) 4909 return true; 4910 else 4911 return false; 4912 } 4913 4914 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 4915 { 4916 swp_entry_t swp; 4917 4918 if (huge_pte_none(pte) || pte_present(pte)) 4919 return false; 4920 swp = pte_to_swp_entry(pte); 4921 if (is_hwpoison_entry(swp)) 4922 return true; 4923 else 4924 return false; 4925 } 4926 4927 static void 4928 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4929 struct page *new_page) 4930 { 4931 __SetPageUptodate(new_page); 4932 hugepage_add_new_anon_rmap(new_page, vma, addr); 4933 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); 4934 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4935 SetHPageMigratable(new_page); 4936 } 4937 4938 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 4939 struct vm_area_struct *dst_vma, 4940 struct vm_area_struct *src_vma) 4941 { 4942 pte_t *src_pte, *dst_pte, entry; 4943 struct page *ptepage; 4944 unsigned long addr; 4945 bool cow = is_cow_mapping(src_vma->vm_flags); 4946 struct hstate *h = hstate_vma(src_vma); 4947 unsigned long sz = huge_page_size(h); 4948 unsigned long npages = pages_per_huge_page(h); 4949 struct mmu_notifier_range range; 4950 unsigned long last_addr_mask; 4951 int ret = 0; 4952 4953 if (cow) { 4954 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src, 4955 src_vma->vm_start, 4956 src_vma->vm_end); 4957 mmu_notifier_invalidate_range_start(&range); 4958 mmap_assert_write_locked(src); 4959 raw_write_seqcount_begin(&src->write_protect_seq); 4960 } else { 4961 /* 4962 * For shared mappings the vma lock must be held before 4963 * calling huge_pte_offset in the src vma. Otherwise, the 4964 * returned ptep could go away if part of a shared pmd and 4965 * another thread calls huge_pmd_unshare. 4966 */ 4967 hugetlb_vma_lock_read(src_vma); 4968 } 4969 4970 last_addr_mask = hugetlb_mask_last_page(h); 4971 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 4972 spinlock_t *src_ptl, *dst_ptl; 4973 src_pte = huge_pte_offset(src, addr, sz); 4974 if (!src_pte) { 4975 addr |= last_addr_mask; 4976 continue; 4977 } 4978 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 4979 if (!dst_pte) { 4980 ret = -ENOMEM; 4981 break; 4982 } 4983 4984 /* 4985 * If the pagetables are shared don't copy or take references. 4986 * 4987 * dst_pte == src_pte is the common case of src/dest sharing. 4988 * However, src could have 'unshared' and dst shares with 4989 * another vma. So page_count of ptep page is checked instead 4990 * to reliably determine whether pte is shared. 4991 */ 4992 if (page_count(virt_to_page(dst_pte)) > 1) { 4993 addr |= last_addr_mask; 4994 continue; 4995 } 4996 4997 dst_ptl = huge_pte_lock(h, dst, dst_pte); 4998 src_ptl = huge_pte_lockptr(h, src, src_pte); 4999 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5000 entry = huge_ptep_get(src_pte); 5001 again: 5002 if (huge_pte_none(entry)) { 5003 /* 5004 * Skip if src entry none. 5005 */ 5006 ; 5007 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5008 bool uffd_wp = huge_pte_uffd_wp(entry); 5009 5010 if (!userfaultfd_wp(dst_vma) && uffd_wp) 5011 entry = huge_pte_clear_uffd_wp(entry); 5012 set_huge_pte_at(dst, addr, dst_pte, entry); 5013 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5014 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5015 bool uffd_wp = huge_pte_uffd_wp(entry); 5016 5017 if (!is_readable_migration_entry(swp_entry) && cow) { 5018 /* 5019 * COW mappings require pages in both 5020 * parent and child to be set to read. 5021 */ 5022 swp_entry = make_readable_migration_entry( 5023 swp_offset(swp_entry)); 5024 entry = swp_entry_to_pte(swp_entry); 5025 if (userfaultfd_wp(src_vma) && uffd_wp) 5026 entry = huge_pte_mkuffd_wp(entry); 5027 set_huge_pte_at(src, addr, src_pte, entry); 5028 } 5029 if (!userfaultfd_wp(dst_vma) && uffd_wp) 5030 entry = huge_pte_clear_uffd_wp(entry); 5031 set_huge_pte_at(dst, addr, dst_pte, entry); 5032 } else if (unlikely(is_pte_marker(entry))) { 5033 /* 5034 * We copy the pte marker only if the dst vma has 5035 * uffd-wp enabled. 5036 */ 5037 if (userfaultfd_wp(dst_vma)) 5038 set_huge_pte_at(dst, addr, dst_pte, entry); 5039 } else { 5040 entry = huge_ptep_get(src_pte); 5041 ptepage = pte_page(entry); 5042 get_page(ptepage); 5043 5044 /* 5045 * Failing to duplicate the anon rmap is a rare case 5046 * where we see pinned hugetlb pages while they're 5047 * prone to COW. We need to do the COW earlier during 5048 * fork. 5049 * 5050 * When pre-allocating the page or copying data, we 5051 * need to be without the pgtable locks since we could 5052 * sleep during the process. 5053 */ 5054 if (!PageAnon(ptepage)) { 5055 page_dup_file_rmap(ptepage, true); 5056 } else if (page_try_dup_anon_rmap(ptepage, true, 5057 src_vma)) { 5058 pte_t src_pte_old = entry; 5059 struct page *new; 5060 5061 spin_unlock(src_ptl); 5062 spin_unlock(dst_ptl); 5063 /* Do not use reserve as it's private owned */ 5064 new = alloc_huge_page(dst_vma, addr, 1); 5065 if (IS_ERR(new)) { 5066 put_page(ptepage); 5067 ret = PTR_ERR(new); 5068 break; 5069 } 5070 copy_user_huge_page(new, ptepage, addr, dst_vma, 5071 npages); 5072 put_page(ptepage); 5073 5074 /* Install the new huge page if src pte stable */ 5075 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5076 src_ptl = huge_pte_lockptr(h, src, src_pte); 5077 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5078 entry = huge_ptep_get(src_pte); 5079 if (!pte_same(src_pte_old, entry)) { 5080 restore_reserve_on_error(h, dst_vma, addr, 5081 new); 5082 put_page(new); 5083 /* huge_ptep of dst_pte won't change as in child */ 5084 goto again; 5085 } 5086 hugetlb_install_page(dst_vma, dst_pte, addr, new); 5087 spin_unlock(src_ptl); 5088 spin_unlock(dst_ptl); 5089 continue; 5090 } 5091 5092 if (cow) { 5093 /* 5094 * No need to notify as we are downgrading page 5095 * table protection not changing it to point 5096 * to a new page. 5097 * 5098 * See Documentation/mm/mmu_notifier.rst 5099 */ 5100 huge_ptep_set_wrprotect(src, addr, src_pte); 5101 entry = huge_pte_wrprotect(entry); 5102 } 5103 5104 set_huge_pte_at(dst, addr, dst_pte, entry); 5105 hugetlb_count_add(npages, dst); 5106 } 5107 spin_unlock(src_ptl); 5108 spin_unlock(dst_ptl); 5109 } 5110 5111 if (cow) { 5112 raw_write_seqcount_end(&src->write_protect_seq); 5113 mmu_notifier_invalidate_range_end(&range); 5114 } else { 5115 hugetlb_vma_unlock_read(src_vma); 5116 } 5117 5118 return ret; 5119 } 5120 5121 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5122 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 5123 { 5124 struct hstate *h = hstate_vma(vma); 5125 struct mm_struct *mm = vma->vm_mm; 5126 spinlock_t *src_ptl, *dst_ptl; 5127 pte_t pte; 5128 5129 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5130 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5131 5132 /* 5133 * We don't have to worry about the ordering of src and dst ptlocks 5134 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock. 5135 */ 5136 if (src_ptl != dst_ptl) 5137 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5138 5139 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5140 set_huge_pte_at(mm, new_addr, dst_pte, pte); 5141 5142 if (src_ptl != dst_ptl) 5143 spin_unlock(src_ptl); 5144 spin_unlock(dst_ptl); 5145 } 5146 5147 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5148 struct vm_area_struct *new_vma, 5149 unsigned long old_addr, unsigned long new_addr, 5150 unsigned long len) 5151 { 5152 struct hstate *h = hstate_vma(vma); 5153 struct address_space *mapping = vma->vm_file->f_mapping; 5154 unsigned long sz = huge_page_size(h); 5155 struct mm_struct *mm = vma->vm_mm; 5156 unsigned long old_end = old_addr + len; 5157 unsigned long last_addr_mask; 5158 pte_t *src_pte, *dst_pte; 5159 struct mmu_notifier_range range; 5160 bool shared_pmd = false; 5161 5162 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr, 5163 old_end); 5164 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5165 /* 5166 * In case of shared PMDs, we should cover the maximum possible 5167 * range. 5168 */ 5169 flush_cache_range(vma, range.start, range.end); 5170 5171 mmu_notifier_invalidate_range_start(&range); 5172 last_addr_mask = hugetlb_mask_last_page(h); 5173 /* Prevent race with file truncation */ 5174 hugetlb_vma_lock_write(vma); 5175 i_mmap_lock_write(mapping); 5176 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5177 src_pte = huge_pte_offset(mm, old_addr, sz); 5178 if (!src_pte) { 5179 old_addr |= last_addr_mask; 5180 new_addr |= last_addr_mask; 5181 continue; 5182 } 5183 if (huge_pte_none(huge_ptep_get(src_pte))) 5184 continue; 5185 5186 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5187 shared_pmd = true; 5188 old_addr |= last_addr_mask; 5189 new_addr |= last_addr_mask; 5190 continue; 5191 } 5192 5193 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5194 if (!dst_pte) 5195 break; 5196 5197 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 5198 } 5199 5200 if (shared_pmd) 5201 flush_tlb_range(vma, range.start, range.end); 5202 else 5203 flush_tlb_range(vma, old_end - len, old_end); 5204 mmu_notifier_invalidate_range_end(&range); 5205 i_mmap_unlock_write(mapping); 5206 hugetlb_vma_unlock_write(vma); 5207 5208 return len + old_addr - old_end; 5209 } 5210 5211 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5212 unsigned long start, unsigned long end, 5213 struct page *ref_page, zap_flags_t zap_flags) 5214 { 5215 struct mm_struct *mm = vma->vm_mm; 5216 unsigned long address; 5217 pte_t *ptep; 5218 pte_t pte; 5219 spinlock_t *ptl; 5220 struct page *page; 5221 struct hstate *h = hstate_vma(vma); 5222 unsigned long sz = huge_page_size(h); 5223 unsigned long last_addr_mask; 5224 bool force_flush = false; 5225 5226 WARN_ON(!is_vm_hugetlb_page(vma)); 5227 BUG_ON(start & ~huge_page_mask(h)); 5228 BUG_ON(end & ~huge_page_mask(h)); 5229 5230 /* 5231 * This is a hugetlb vma, all the pte entries should point 5232 * to huge page. 5233 */ 5234 tlb_change_page_size(tlb, sz); 5235 tlb_start_vma(tlb, vma); 5236 5237 last_addr_mask = hugetlb_mask_last_page(h); 5238 address = start; 5239 for (; address < end; address += sz) { 5240 ptep = huge_pte_offset(mm, address, sz); 5241 if (!ptep) { 5242 address |= last_addr_mask; 5243 continue; 5244 } 5245 5246 ptl = huge_pte_lock(h, mm, ptep); 5247 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5248 spin_unlock(ptl); 5249 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5250 force_flush = true; 5251 address |= last_addr_mask; 5252 continue; 5253 } 5254 5255 pte = huge_ptep_get(ptep); 5256 if (huge_pte_none(pte)) { 5257 spin_unlock(ptl); 5258 continue; 5259 } 5260 5261 /* 5262 * Migrating hugepage or HWPoisoned hugepage is already 5263 * unmapped and its refcount is dropped, so just clear pte here. 5264 */ 5265 if (unlikely(!pte_present(pte))) { 5266 /* 5267 * If the pte was wr-protected by uffd-wp in any of the 5268 * swap forms, meanwhile the caller does not want to 5269 * drop the uffd-wp bit in this zap, then replace the 5270 * pte with a marker. 5271 */ 5272 if (pte_swp_uffd_wp_any(pte) && 5273 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5274 set_huge_pte_at(mm, address, ptep, 5275 make_pte_marker(PTE_MARKER_UFFD_WP)); 5276 else 5277 huge_pte_clear(mm, address, ptep, sz); 5278 spin_unlock(ptl); 5279 continue; 5280 } 5281 5282 page = pte_page(pte); 5283 /* 5284 * If a reference page is supplied, it is because a specific 5285 * page is being unmapped, not a range. Ensure the page we 5286 * are about to unmap is the actual page of interest. 5287 */ 5288 if (ref_page) { 5289 if (page != ref_page) { 5290 spin_unlock(ptl); 5291 continue; 5292 } 5293 /* 5294 * Mark the VMA as having unmapped its page so that 5295 * future faults in this VMA will fail rather than 5296 * looking like data was lost 5297 */ 5298 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5299 } 5300 5301 pte = huge_ptep_get_and_clear(mm, address, ptep); 5302 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5303 if (huge_pte_dirty(pte)) 5304 set_page_dirty(page); 5305 /* Leave a uffd-wp pte marker if needed */ 5306 if (huge_pte_uffd_wp(pte) && 5307 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5308 set_huge_pte_at(mm, address, ptep, 5309 make_pte_marker(PTE_MARKER_UFFD_WP)); 5310 hugetlb_count_sub(pages_per_huge_page(h), mm); 5311 page_remove_rmap(page, vma, true); 5312 5313 spin_unlock(ptl); 5314 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5315 /* 5316 * Bail out after unmapping reference page if supplied 5317 */ 5318 if (ref_page) 5319 break; 5320 } 5321 tlb_end_vma(tlb, vma); 5322 5323 /* 5324 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5325 * could defer the flush until now, since by holding i_mmap_rwsem we 5326 * guaranteed that the last refernece would not be dropped. But we must 5327 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5328 * dropped and the last reference to the shared PMDs page might be 5329 * dropped as well. 5330 * 5331 * In theory we could defer the freeing of the PMD pages as well, but 5332 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5333 * detect sharing, so we cannot defer the release of the page either. 5334 * Instead, do flush now. 5335 */ 5336 if (force_flush) 5337 tlb_flush_mmu_tlbonly(tlb); 5338 } 5339 5340 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5341 struct vm_area_struct *vma, unsigned long start, 5342 unsigned long end, struct page *ref_page, 5343 zap_flags_t zap_flags) 5344 { 5345 hugetlb_vma_lock_write(vma); 5346 i_mmap_lock_write(vma->vm_file->f_mapping); 5347 5348 /* mmu notification performed in caller */ 5349 __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); 5350 5351 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5352 /* 5353 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5354 * When the vma_lock is freed, this makes the vma ineligible 5355 * for pmd sharing. And, i_mmap_rwsem is required to set up 5356 * pmd sharing. This is important as page tables for this 5357 * unmapped range will be asynchrously deleted. If the page 5358 * tables are shared, there will be issues when accessed by 5359 * someone else. 5360 */ 5361 __hugetlb_vma_unlock_write_free(vma); 5362 i_mmap_unlock_write(vma->vm_file->f_mapping); 5363 } else { 5364 i_mmap_unlock_write(vma->vm_file->f_mapping); 5365 hugetlb_vma_unlock_write(vma); 5366 } 5367 } 5368 5369 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5370 unsigned long end, struct page *ref_page, 5371 zap_flags_t zap_flags) 5372 { 5373 struct mmu_notifier_range range; 5374 struct mmu_gather tlb; 5375 5376 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 5377 start, end); 5378 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5379 mmu_notifier_invalidate_range_start(&range); 5380 tlb_gather_mmu(&tlb, vma->vm_mm); 5381 5382 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5383 5384 mmu_notifier_invalidate_range_end(&range); 5385 tlb_finish_mmu(&tlb); 5386 } 5387 5388 /* 5389 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5390 * mapping it owns the reserve page for. The intention is to unmap the page 5391 * from other VMAs and let the children be SIGKILLed if they are faulting the 5392 * same region. 5393 */ 5394 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5395 struct page *page, unsigned long address) 5396 { 5397 struct hstate *h = hstate_vma(vma); 5398 struct vm_area_struct *iter_vma; 5399 struct address_space *mapping; 5400 pgoff_t pgoff; 5401 5402 /* 5403 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5404 * from page cache lookup which is in HPAGE_SIZE units. 5405 */ 5406 address = address & huge_page_mask(h); 5407 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5408 vma->vm_pgoff; 5409 mapping = vma->vm_file->f_mapping; 5410 5411 /* 5412 * Take the mapping lock for the duration of the table walk. As 5413 * this mapping should be shared between all the VMAs, 5414 * __unmap_hugepage_range() is called as the lock is already held 5415 */ 5416 i_mmap_lock_write(mapping); 5417 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5418 /* Do not unmap the current VMA */ 5419 if (iter_vma == vma) 5420 continue; 5421 5422 /* 5423 * Shared VMAs have their own reserves and do not affect 5424 * MAP_PRIVATE accounting but it is possible that a shared 5425 * VMA is using the same page so check and skip such VMAs. 5426 */ 5427 if (iter_vma->vm_flags & VM_MAYSHARE) 5428 continue; 5429 5430 /* 5431 * Unmap the page from other VMAs without their own reserves. 5432 * They get marked to be SIGKILLed if they fault in these 5433 * areas. This is because a future no-page fault on this VMA 5434 * could insert a zeroed page instead of the data existing 5435 * from the time of fork. This would look like data corruption 5436 */ 5437 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5438 unmap_hugepage_range(iter_vma, address, 5439 address + huge_page_size(h), page, 0); 5440 } 5441 i_mmap_unlock_write(mapping); 5442 } 5443 5444 /* 5445 * hugetlb_wp() should be called with page lock of the original hugepage held. 5446 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5447 * cannot race with other handlers or page migration. 5448 * Keep the pte_same checks anyway to make transition from the mutex easier. 5449 */ 5450 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5451 unsigned long address, pte_t *ptep, unsigned int flags, 5452 struct page *pagecache_page, spinlock_t *ptl) 5453 { 5454 const bool unshare = flags & FAULT_FLAG_UNSHARE; 5455 pte_t pte; 5456 struct hstate *h = hstate_vma(vma); 5457 struct page *old_page, *new_page; 5458 int outside_reserve = 0; 5459 vm_fault_t ret = 0; 5460 unsigned long haddr = address & huge_page_mask(h); 5461 struct mmu_notifier_range range; 5462 5463 /* 5464 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5465 * PTE mapped R/O such as maybe_mkwrite() would do. 5466 */ 5467 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5468 return VM_FAULT_SIGSEGV; 5469 5470 /* Let's take out MAP_SHARED mappings first. */ 5471 if (vma->vm_flags & VM_MAYSHARE) { 5472 set_huge_ptep_writable(vma, haddr, ptep); 5473 return 0; 5474 } 5475 5476 pte = huge_ptep_get(ptep); 5477 old_page = pte_page(pte); 5478 5479 delayacct_wpcopy_start(); 5480 5481 retry_avoidcopy: 5482 /* 5483 * If no-one else is actually using this page, we're the exclusive 5484 * owner and can reuse this page. 5485 */ 5486 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 5487 if (!PageAnonExclusive(old_page)) 5488 page_move_anon_rmap(old_page, vma); 5489 if (likely(!unshare)) 5490 set_huge_ptep_writable(vma, haddr, ptep); 5491 5492 delayacct_wpcopy_end(); 5493 return 0; 5494 } 5495 VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), 5496 old_page); 5497 5498 /* 5499 * If the process that created a MAP_PRIVATE mapping is about to 5500 * perform a COW due to a shared page count, attempt to satisfy 5501 * the allocation without using the existing reserves. The pagecache 5502 * page is used to determine if the reserve at this address was 5503 * consumed or not. If reserves were used, a partial faulted mapping 5504 * at the time of fork() could consume its reserves on COW instead 5505 * of the full address range. 5506 */ 5507 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5508 old_page != pagecache_page) 5509 outside_reserve = 1; 5510 5511 get_page(old_page); 5512 5513 /* 5514 * Drop page table lock as buddy allocator may be called. It will 5515 * be acquired again before returning to the caller, as expected. 5516 */ 5517 spin_unlock(ptl); 5518 new_page = alloc_huge_page(vma, haddr, outside_reserve); 5519 5520 if (IS_ERR(new_page)) { 5521 /* 5522 * If a process owning a MAP_PRIVATE mapping fails to COW, 5523 * it is due to references held by a child and an insufficient 5524 * huge page pool. To guarantee the original mappers 5525 * reliability, unmap the page from child processes. The child 5526 * may get SIGKILLed if it later faults. 5527 */ 5528 if (outside_reserve) { 5529 struct address_space *mapping = vma->vm_file->f_mapping; 5530 pgoff_t idx; 5531 u32 hash; 5532 5533 put_page(old_page); 5534 /* 5535 * Drop hugetlb_fault_mutex and vma_lock before 5536 * unmapping. unmapping needs to hold vma_lock 5537 * in write mode. Dropping vma_lock in read mode 5538 * here is OK as COW mappings do not interact with 5539 * PMD sharing. 5540 * 5541 * Reacquire both after unmap operation. 5542 */ 5543 idx = vma_hugecache_offset(h, vma, haddr); 5544 hash = hugetlb_fault_mutex_hash(mapping, idx); 5545 hugetlb_vma_unlock_read(vma); 5546 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5547 5548 unmap_ref_private(mm, vma, old_page, haddr); 5549 5550 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5551 hugetlb_vma_lock_read(vma); 5552 spin_lock(ptl); 5553 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5554 if (likely(ptep && 5555 pte_same(huge_ptep_get(ptep), pte))) 5556 goto retry_avoidcopy; 5557 /* 5558 * race occurs while re-acquiring page table 5559 * lock, and our job is done. 5560 */ 5561 delayacct_wpcopy_end(); 5562 return 0; 5563 } 5564 5565 ret = vmf_error(PTR_ERR(new_page)); 5566 goto out_release_old; 5567 } 5568 5569 /* 5570 * When the original hugepage is shared one, it does not have 5571 * anon_vma prepared. 5572 */ 5573 if (unlikely(anon_vma_prepare(vma))) { 5574 ret = VM_FAULT_OOM; 5575 goto out_release_all; 5576 } 5577 5578 copy_user_huge_page(new_page, old_page, address, vma, 5579 pages_per_huge_page(h)); 5580 __SetPageUptodate(new_page); 5581 5582 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, 5583 haddr + huge_page_size(h)); 5584 mmu_notifier_invalidate_range_start(&range); 5585 5586 /* 5587 * Retake the page table lock to check for racing updates 5588 * before the page tables are altered 5589 */ 5590 spin_lock(ptl); 5591 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5592 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5593 /* Break COW or unshare */ 5594 huge_ptep_clear_flush(vma, haddr, ptep); 5595 mmu_notifier_invalidate_range(mm, range.start, range.end); 5596 page_remove_rmap(old_page, vma, true); 5597 hugepage_add_new_anon_rmap(new_page, vma, haddr); 5598 set_huge_pte_at(mm, haddr, ptep, 5599 make_huge_pte(vma, new_page, !unshare)); 5600 SetHPageMigratable(new_page); 5601 /* Make the old page be freed below */ 5602 new_page = old_page; 5603 } 5604 spin_unlock(ptl); 5605 mmu_notifier_invalidate_range_end(&range); 5606 out_release_all: 5607 /* 5608 * No restore in case of successful pagetable update (Break COW or 5609 * unshare) 5610 */ 5611 if (new_page != old_page) 5612 restore_reserve_on_error(h, vma, haddr, new_page); 5613 put_page(new_page); 5614 out_release_old: 5615 put_page(old_page); 5616 5617 spin_lock(ptl); /* Caller expects lock to be held */ 5618 5619 delayacct_wpcopy_end(); 5620 return ret; 5621 } 5622 5623 /* 5624 * Return whether there is a pagecache page to back given address within VMA. 5625 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 5626 */ 5627 static bool hugetlbfs_pagecache_present(struct hstate *h, 5628 struct vm_area_struct *vma, unsigned long address) 5629 { 5630 struct address_space *mapping; 5631 pgoff_t idx; 5632 struct page *page; 5633 5634 mapping = vma->vm_file->f_mapping; 5635 idx = vma_hugecache_offset(h, vma, address); 5636 5637 page = find_get_page(mapping, idx); 5638 if (page) 5639 put_page(page); 5640 return page != NULL; 5641 } 5642 5643 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping, 5644 pgoff_t idx) 5645 { 5646 struct folio *folio = page_folio(page); 5647 struct inode *inode = mapping->host; 5648 struct hstate *h = hstate_inode(inode); 5649 int err; 5650 5651 __folio_set_locked(folio); 5652 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5653 5654 if (unlikely(err)) { 5655 __folio_clear_locked(folio); 5656 return err; 5657 } 5658 ClearHPageRestoreReserve(page); 5659 5660 /* 5661 * mark folio dirty so that it will not be removed from cache/file 5662 * by non-hugetlbfs specific code paths. 5663 */ 5664 folio_mark_dirty(folio); 5665 5666 spin_lock(&inode->i_lock); 5667 inode->i_blocks += blocks_per_huge_page(h); 5668 spin_unlock(&inode->i_lock); 5669 return 0; 5670 } 5671 5672 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5673 struct address_space *mapping, 5674 pgoff_t idx, 5675 unsigned int flags, 5676 unsigned long haddr, 5677 unsigned long addr, 5678 unsigned long reason) 5679 { 5680 u32 hash; 5681 struct vm_fault vmf = { 5682 .vma = vma, 5683 .address = haddr, 5684 .real_address = addr, 5685 .flags = flags, 5686 5687 /* 5688 * Hard to debug if it ends up being 5689 * used by a callee that assumes 5690 * something about the other 5691 * uninitialized fields... same as in 5692 * memory.c 5693 */ 5694 }; 5695 5696 /* 5697 * vma_lock and hugetlb_fault_mutex must be dropped before handling 5698 * userfault. Also mmap_lock could be dropped due to handling 5699 * userfault, any vma operation should be careful from here. 5700 */ 5701 hugetlb_vma_unlock_read(vma); 5702 hash = hugetlb_fault_mutex_hash(mapping, idx); 5703 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5704 return handle_userfault(&vmf, reason); 5705 } 5706 5707 /* 5708 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 5709 * false if pte changed or is changing. 5710 */ 5711 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, 5712 pte_t *ptep, pte_t old_pte) 5713 { 5714 spinlock_t *ptl; 5715 bool same; 5716 5717 ptl = huge_pte_lock(h, mm, ptep); 5718 same = pte_same(huge_ptep_get(ptep), old_pte); 5719 spin_unlock(ptl); 5720 5721 return same; 5722 } 5723 5724 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5725 struct vm_area_struct *vma, 5726 struct address_space *mapping, pgoff_t idx, 5727 unsigned long address, pte_t *ptep, 5728 pte_t old_pte, unsigned int flags) 5729 { 5730 struct hstate *h = hstate_vma(vma); 5731 vm_fault_t ret = VM_FAULT_SIGBUS; 5732 int anon_rmap = 0; 5733 unsigned long size; 5734 struct page *page; 5735 pte_t new_pte; 5736 spinlock_t *ptl; 5737 unsigned long haddr = address & huge_page_mask(h); 5738 bool new_page, new_pagecache_page = false; 5739 u32 hash = hugetlb_fault_mutex_hash(mapping, idx); 5740 5741 /* 5742 * Currently, we are forced to kill the process in the event the 5743 * original mapper has unmapped pages from the child due to a failed 5744 * COW/unsharing. Warn that such a situation has occurred as it may not 5745 * be obvious. 5746 */ 5747 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5748 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5749 current->pid); 5750 goto out; 5751 } 5752 5753 /* 5754 * Use page lock to guard against racing truncation 5755 * before we get page_table_lock. 5756 */ 5757 new_page = false; 5758 page = find_lock_page(mapping, idx); 5759 if (!page) { 5760 size = i_size_read(mapping->host) >> huge_page_shift(h); 5761 if (idx >= size) 5762 goto out; 5763 /* Check for page in userfault range */ 5764 if (userfaultfd_missing(vma)) { 5765 /* 5766 * Since hugetlb_no_page() was examining pte 5767 * without pgtable lock, we need to re-test under 5768 * lock because the pte may not be stable and could 5769 * have changed from under us. Try to detect 5770 * either changed or during-changing ptes and retry 5771 * properly when needed. 5772 * 5773 * Note that userfaultfd is actually fine with 5774 * false positives (e.g. caused by pte changed), 5775 * but not wrong logical events (e.g. caused by 5776 * reading a pte during changing). The latter can 5777 * confuse the userspace, so the strictness is very 5778 * much preferred. E.g., MISSING event should 5779 * never happen on the page after UFFDIO_COPY has 5780 * correctly installed the page and returned. 5781 */ 5782 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5783 ret = 0; 5784 goto out; 5785 } 5786 5787 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5788 haddr, address, 5789 VM_UFFD_MISSING); 5790 } 5791 5792 page = alloc_huge_page(vma, haddr, 0); 5793 if (IS_ERR(page)) { 5794 /* 5795 * Returning error will result in faulting task being 5796 * sent SIGBUS. The hugetlb fault mutex prevents two 5797 * tasks from racing to fault in the same page which 5798 * could result in false unable to allocate errors. 5799 * Page migration does not take the fault mutex, but 5800 * does a clear then write of pte's under page table 5801 * lock. Page fault code could race with migration, 5802 * notice the clear pte and try to allocate a page 5803 * here. Before returning error, get ptl and make 5804 * sure there really is no pte entry. 5805 */ 5806 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) 5807 ret = vmf_error(PTR_ERR(page)); 5808 else 5809 ret = 0; 5810 goto out; 5811 } 5812 clear_huge_page(page, address, pages_per_huge_page(h)); 5813 __SetPageUptodate(page); 5814 new_page = true; 5815 5816 if (vma->vm_flags & VM_MAYSHARE) { 5817 int err = hugetlb_add_to_page_cache(page, mapping, idx); 5818 if (err) { 5819 /* 5820 * err can't be -EEXIST which implies someone 5821 * else consumed the reservation since hugetlb 5822 * fault mutex is held when add a hugetlb page 5823 * to the page cache. So it's safe to call 5824 * restore_reserve_on_error() here. 5825 */ 5826 restore_reserve_on_error(h, vma, haddr, page); 5827 put_page(page); 5828 goto out; 5829 } 5830 new_pagecache_page = true; 5831 } else { 5832 lock_page(page); 5833 if (unlikely(anon_vma_prepare(vma))) { 5834 ret = VM_FAULT_OOM; 5835 goto backout_unlocked; 5836 } 5837 anon_rmap = 1; 5838 } 5839 } else { 5840 /* 5841 * If memory error occurs between mmap() and fault, some process 5842 * don't have hwpoisoned swap entry for errored virtual address. 5843 * So we need to block hugepage fault by PG_hwpoison bit check. 5844 */ 5845 if (unlikely(PageHWPoison(page))) { 5846 ret = VM_FAULT_HWPOISON_LARGE | 5847 VM_FAULT_SET_HINDEX(hstate_index(h)); 5848 goto backout_unlocked; 5849 } 5850 5851 /* Check for page in userfault range. */ 5852 if (userfaultfd_minor(vma)) { 5853 unlock_page(page); 5854 put_page(page); 5855 /* See comment in userfaultfd_missing() block above */ 5856 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5857 ret = 0; 5858 goto out; 5859 } 5860 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5861 haddr, address, 5862 VM_UFFD_MINOR); 5863 } 5864 } 5865 5866 /* 5867 * If we are going to COW a private mapping later, we examine the 5868 * pending reservations for this page now. This will ensure that 5869 * any allocations necessary to record that reservation occur outside 5870 * the spinlock. 5871 */ 5872 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5873 if (vma_needs_reservation(h, vma, haddr) < 0) { 5874 ret = VM_FAULT_OOM; 5875 goto backout_unlocked; 5876 } 5877 /* Just decrements count, does not deallocate */ 5878 vma_end_reservation(h, vma, haddr); 5879 } 5880 5881 ptl = huge_pte_lock(h, mm, ptep); 5882 ret = 0; 5883 /* If pte changed from under us, retry */ 5884 if (!pte_same(huge_ptep_get(ptep), old_pte)) 5885 goto backout; 5886 5887 if (anon_rmap) 5888 hugepage_add_new_anon_rmap(page, vma, haddr); 5889 else 5890 page_dup_file_rmap(page, true); 5891 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 5892 && (vma->vm_flags & VM_SHARED))); 5893 /* 5894 * If this pte was previously wr-protected, keep it wr-protected even 5895 * if populated. 5896 */ 5897 if (unlikely(pte_marker_uffd_wp(old_pte))) 5898 new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte)); 5899 set_huge_pte_at(mm, haddr, ptep, new_pte); 5900 5901 hugetlb_count_add(pages_per_huge_page(h), mm); 5902 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5903 /* Optimization, do the COW without a second fault */ 5904 ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl); 5905 } 5906 5907 spin_unlock(ptl); 5908 5909 /* 5910 * Only set HPageMigratable in newly allocated pages. Existing pages 5911 * found in the pagecache may not have HPageMigratableset if they have 5912 * been isolated for migration. 5913 */ 5914 if (new_page) 5915 SetHPageMigratable(page); 5916 5917 unlock_page(page); 5918 out: 5919 hugetlb_vma_unlock_read(vma); 5920 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5921 return ret; 5922 5923 backout: 5924 spin_unlock(ptl); 5925 backout_unlocked: 5926 if (new_page && !new_pagecache_page) 5927 restore_reserve_on_error(h, vma, haddr, page); 5928 5929 unlock_page(page); 5930 put_page(page); 5931 goto out; 5932 } 5933 5934 #ifdef CONFIG_SMP 5935 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5936 { 5937 unsigned long key[2]; 5938 u32 hash; 5939 5940 key[0] = (unsigned long) mapping; 5941 key[1] = idx; 5942 5943 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 5944 5945 return hash & (num_fault_mutexes - 1); 5946 } 5947 #else 5948 /* 5949 * For uniprocessor systems we always use a single mutex, so just 5950 * return 0 and avoid the hashing overhead. 5951 */ 5952 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5953 { 5954 return 0; 5955 } 5956 #endif 5957 5958 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 5959 unsigned long address, unsigned int flags) 5960 { 5961 pte_t *ptep, entry; 5962 spinlock_t *ptl; 5963 vm_fault_t ret; 5964 u32 hash; 5965 pgoff_t idx; 5966 struct page *page = NULL; 5967 struct page *pagecache_page = NULL; 5968 struct hstate *h = hstate_vma(vma); 5969 struct address_space *mapping; 5970 int need_wait_lock = 0; 5971 unsigned long haddr = address & huge_page_mask(h); 5972 5973 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5974 if (ptep) { 5975 /* 5976 * Since we hold no locks, ptep could be stale. That is 5977 * OK as we are only making decisions based on content and 5978 * not actually modifying content here. 5979 */ 5980 entry = huge_ptep_get(ptep); 5981 if (unlikely(is_hugetlb_entry_migration(entry))) { 5982 migration_entry_wait_huge(vma, ptep); 5983 return 0; 5984 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 5985 return VM_FAULT_HWPOISON_LARGE | 5986 VM_FAULT_SET_HINDEX(hstate_index(h)); 5987 } 5988 5989 /* 5990 * Serialize hugepage allocation and instantiation, so that we don't 5991 * get spurious allocation failures if two CPUs race to instantiate 5992 * the same page in the page cache. 5993 */ 5994 mapping = vma->vm_file->f_mapping; 5995 idx = vma_hugecache_offset(h, vma, haddr); 5996 hash = hugetlb_fault_mutex_hash(mapping, idx); 5997 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5998 5999 /* 6000 * Acquire vma lock before calling huge_pte_alloc and hold 6001 * until finished with ptep. This prevents huge_pmd_unshare from 6002 * being called elsewhere and making the ptep no longer valid. 6003 * 6004 * ptep could have already be assigned via huge_pte_offset. That 6005 * is OK, as huge_pte_alloc will return the same value unless 6006 * something has changed. 6007 */ 6008 hugetlb_vma_lock_read(vma); 6009 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 6010 if (!ptep) { 6011 hugetlb_vma_unlock_read(vma); 6012 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6013 return VM_FAULT_OOM; 6014 } 6015 6016 entry = huge_ptep_get(ptep); 6017 /* PTE markers should be handled the same way as none pte */ 6018 if (huge_pte_none_mostly(entry)) 6019 /* 6020 * hugetlb_no_page will drop vma lock and hugetlb fault 6021 * mutex internally, which make us return immediately. 6022 */ 6023 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, 6024 entry, flags); 6025 6026 ret = 0; 6027 6028 /* 6029 * entry could be a migration/hwpoison entry at this point, so this 6030 * check prevents the kernel from going below assuming that we have 6031 * an active hugepage in pagecache. This goto expects the 2nd page 6032 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 6033 * properly handle it. 6034 */ 6035 if (!pte_present(entry)) 6036 goto out_mutex; 6037 6038 /* 6039 * If we are going to COW/unshare the mapping later, we examine the 6040 * pending reservations for this page now. This will ensure that any 6041 * allocations necessary to record that reservation occur outside the 6042 * spinlock. Also lookup the pagecache page now as it is used to 6043 * determine if a reservation has been consumed. 6044 */ 6045 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6046 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { 6047 if (vma_needs_reservation(h, vma, haddr) < 0) { 6048 ret = VM_FAULT_OOM; 6049 goto out_mutex; 6050 } 6051 /* Just decrements count, does not deallocate */ 6052 vma_end_reservation(h, vma, haddr); 6053 6054 pagecache_page = find_lock_page(mapping, idx); 6055 } 6056 6057 ptl = huge_pte_lock(h, mm, ptep); 6058 6059 /* Check for a racing update before calling hugetlb_wp() */ 6060 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 6061 goto out_ptl; 6062 6063 /* Handle userfault-wp first, before trying to lock more pages */ 6064 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && 6065 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 6066 struct vm_fault vmf = { 6067 .vma = vma, 6068 .address = haddr, 6069 .real_address = address, 6070 .flags = flags, 6071 }; 6072 6073 spin_unlock(ptl); 6074 if (pagecache_page) { 6075 unlock_page(pagecache_page); 6076 put_page(pagecache_page); 6077 } 6078 hugetlb_vma_unlock_read(vma); 6079 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6080 return handle_userfault(&vmf, VM_UFFD_WP); 6081 } 6082 6083 /* 6084 * hugetlb_wp() requires page locks of pte_page(entry) and 6085 * pagecache_page, so here we need take the former one 6086 * when page != pagecache_page or !pagecache_page. 6087 */ 6088 page = pte_page(entry); 6089 if (page != pagecache_page) 6090 if (!trylock_page(page)) { 6091 need_wait_lock = 1; 6092 goto out_ptl; 6093 } 6094 6095 get_page(page); 6096 6097 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6098 if (!huge_pte_write(entry)) { 6099 ret = hugetlb_wp(mm, vma, address, ptep, flags, 6100 pagecache_page, ptl); 6101 goto out_put_page; 6102 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6103 entry = huge_pte_mkdirty(entry); 6104 } 6105 } 6106 entry = pte_mkyoung(entry); 6107 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 6108 flags & FAULT_FLAG_WRITE)) 6109 update_mmu_cache(vma, haddr, ptep); 6110 out_put_page: 6111 if (page != pagecache_page) 6112 unlock_page(page); 6113 put_page(page); 6114 out_ptl: 6115 spin_unlock(ptl); 6116 6117 if (pagecache_page) { 6118 unlock_page(pagecache_page); 6119 put_page(pagecache_page); 6120 } 6121 out_mutex: 6122 hugetlb_vma_unlock_read(vma); 6123 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6124 /* 6125 * Generally it's safe to hold refcount during waiting page lock. But 6126 * here we just wait to defer the next page fault to avoid busy loop and 6127 * the page is not used after unlocked before returning from the current 6128 * page fault. So we are safe from accessing freed page, even if we wait 6129 * here without taking refcount. 6130 */ 6131 if (need_wait_lock) 6132 wait_on_page_locked(page); 6133 return ret; 6134 } 6135 6136 #ifdef CONFIG_USERFAULTFD 6137 /* 6138 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 6139 * modifications for huge pages. 6140 */ 6141 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 6142 pte_t *dst_pte, 6143 struct vm_area_struct *dst_vma, 6144 unsigned long dst_addr, 6145 unsigned long src_addr, 6146 enum mcopy_atomic_mode mode, 6147 struct page **pagep, 6148 bool wp_copy) 6149 { 6150 bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); 6151 struct hstate *h = hstate_vma(dst_vma); 6152 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6153 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6154 unsigned long size; 6155 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6156 pte_t _dst_pte; 6157 spinlock_t *ptl; 6158 int ret = -ENOMEM; 6159 struct page *page; 6160 int writable; 6161 bool page_in_pagecache = false; 6162 6163 if (is_continue) { 6164 ret = -EFAULT; 6165 page = find_lock_page(mapping, idx); 6166 if (!page) 6167 goto out; 6168 page_in_pagecache = true; 6169 } else if (!*pagep) { 6170 /* If a page already exists, then it's UFFDIO_COPY for 6171 * a non-missing case. Return -EEXIST. 6172 */ 6173 if (vm_shared && 6174 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6175 ret = -EEXIST; 6176 goto out; 6177 } 6178 6179 page = alloc_huge_page(dst_vma, dst_addr, 0); 6180 if (IS_ERR(page)) { 6181 ret = -ENOMEM; 6182 goto out; 6183 } 6184 6185 ret = copy_huge_page_from_user(page, 6186 (const void __user *) src_addr, 6187 pages_per_huge_page(h), false); 6188 6189 /* fallback to copy_from_user outside mmap_lock */ 6190 if (unlikely(ret)) { 6191 ret = -ENOENT; 6192 /* Free the allocated page which may have 6193 * consumed a reservation. 6194 */ 6195 restore_reserve_on_error(h, dst_vma, dst_addr, page); 6196 put_page(page); 6197 6198 /* Allocate a temporary page to hold the copied 6199 * contents. 6200 */ 6201 page = alloc_huge_page_vma(h, dst_vma, dst_addr); 6202 if (!page) { 6203 ret = -ENOMEM; 6204 goto out; 6205 } 6206 *pagep = page; 6207 /* Set the outparam pagep and return to the caller to 6208 * copy the contents outside the lock. Don't free the 6209 * page. 6210 */ 6211 goto out; 6212 } 6213 } else { 6214 if (vm_shared && 6215 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6216 put_page(*pagep); 6217 ret = -EEXIST; 6218 *pagep = NULL; 6219 goto out; 6220 } 6221 6222 page = alloc_huge_page(dst_vma, dst_addr, 0); 6223 if (IS_ERR(page)) { 6224 put_page(*pagep); 6225 ret = -ENOMEM; 6226 *pagep = NULL; 6227 goto out; 6228 } 6229 copy_user_huge_page(page, *pagep, dst_addr, dst_vma, 6230 pages_per_huge_page(h)); 6231 put_page(*pagep); 6232 *pagep = NULL; 6233 } 6234 6235 /* 6236 * The memory barrier inside __SetPageUptodate makes sure that 6237 * preceding stores to the page contents become visible before 6238 * the set_pte_at() write. 6239 */ 6240 __SetPageUptodate(page); 6241 6242 /* Add shared, newly allocated pages to the page cache. */ 6243 if (vm_shared && !is_continue) { 6244 size = i_size_read(mapping->host) >> huge_page_shift(h); 6245 ret = -EFAULT; 6246 if (idx >= size) 6247 goto out_release_nounlock; 6248 6249 /* 6250 * Serialization between remove_inode_hugepages() and 6251 * hugetlb_add_to_page_cache() below happens through the 6252 * hugetlb_fault_mutex_table that here must be hold by 6253 * the caller. 6254 */ 6255 ret = hugetlb_add_to_page_cache(page, mapping, idx); 6256 if (ret) 6257 goto out_release_nounlock; 6258 page_in_pagecache = true; 6259 } 6260 6261 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6262 6263 ret = -EIO; 6264 if (PageHWPoison(page)) 6265 goto out_release_unlock; 6266 6267 /* 6268 * We allow to overwrite a pte marker: consider when both MISSING|WP 6269 * registered, we firstly wr-protect a none pte which has no page cache 6270 * page backing it, then access the page. 6271 */ 6272 ret = -EEXIST; 6273 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6274 goto out_release_unlock; 6275 6276 if (page_in_pagecache) 6277 page_dup_file_rmap(page, true); 6278 else 6279 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 6280 6281 /* 6282 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6283 * with wp flag set, don't set pte write bit. 6284 */ 6285 if (wp_copy || (is_continue && !vm_shared)) 6286 writable = 0; 6287 else 6288 writable = dst_vma->vm_flags & VM_WRITE; 6289 6290 _dst_pte = make_huge_pte(dst_vma, page, writable); 6291 /* 6292 * Always mark UFFDIO_COPY page dirty; note that this may not be 6293 * extremely important for hugetlbfs for now since swapping is not 6294 * supported, but we should still be clear in that this page cannot be 6295 * thrown away at will, even if write bit not set. 6296 */ 6297 _dst_pte = huge_pte_mkdirty(_dst_pte); 6298 _dst_pte = pte_mkyoung(_dst_pte); 6299 6300 if (wp_copy) 6301 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6302 6303 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6304 6305 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6306 6307 /* No need to invalidate - it was non-present before */ 6308 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6309 6310 spin_unlock(ptl); 6311 if (!is_continue) 6312 SetHPageMigratable(page); 6313 if (vm_shared || is_continue) 6314 unlock_page(page); 6315 ret = 0; 6316 out: 6317 return ret; 6318 out_release_unlock: 6319 spin_unlock(ptl); 6320 if (vm_shared || is_continue) 6321 unlock_page(page); 6322 out_release_nounlock: 6323 if (!page_in_pagecache) 6324 restore_reserve_on_error(h, dst_vma, dst_addr, page); 6325 put_page(page); 6326 goto out; 6327 } 6328 #endif /* CONFIG_USERFAULTFD */ 6329 6330 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, 6331 int refs, struct page **pages, 6332 struct vm_area_struct **vmas) 6333 { 6334 int nr; 6335 6336 for (nr = 0; nr < refs; nr++) { 6337 if (likely(pages)) 6338 pages[nr] = nth_page(page, nr); 6339 if (vmas) 6340 vmas[nr] = vma; 6341 } 6342 } 6343 6344 static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma, 6345 unsigned int flags, pte_t *pte, 6346 bool *unshare) 6347 { 6348 pte_t pteval = huge_ptep_get(pte); 6349 6350 *unshare = false; 6351 if (is_swap_pte(pteval)) 6352 return true; 6353 if (huge_pte_write(pteval)) 6354 return false; 6355 if (flags & FOLL_WRITE) 6356 return true; 6357 if (gup_must_unshare(vma, flags, pte_page(pteval))) { 6358 *unshare = true; 6359 return true; 6360 } 6361 return false; 6362 } 6363 6364 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 6365 unsigned long address, unsigned int flags) 6366 { 6367 struct hstate *h = hstate_vma(vma); 6368 struct mm_struct *mm = vma->vm_mm; 6369 unsigned long haddr = address & huge_page_mask(h); 6370 struct page *page = NULL; 6371 spinlock_t *ptl; 6372 pte_t *pte, entry; 6373 6374 /* 6375 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via 6376 * follow_hugetlb_page(). 6377 */ 6378 if (WARN_ON_ONCE(flags & FOLL_PIN)) 6379 return NULL; 6380 6381 retry: 6382 pte = huge_pte_offset(mm, haddr, huge_page_size(h)); 6383 if (!pte) 6384 return NULL; 6385 6386 ptl = huge_pte_lock(h, mm, pte); 6387 entry = huge_ptep_get(pte); 6388 if (pte_present(entry)) { 6389 page = pte_page(entry) + 6390 ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); 6391 /* 6392 * Note that page may be a sub-page, and with vmemmap 6393 * optimizations the page struct may be read only. 6394 * try_grab_page() will increase the ref count on the 6395 * head page, so this will be OK. 6396 * 6397 * try_grab_page() should always be able to get the page here, 6398 * because we hold the ptl lock and have verified pte_present(). 6399 */ 6400 if (try_grab_page(page, flags)) { 6401 page = NULL; 6402 goto out; 6403 } 6404 } else { 6405 if (is_hugetlb_entry_migration(entry)) { 6406 spin_unlock(ptl); 6407 __migration_entry_wait_huge(pte, ptl); 6408 goto retry; 6409 } 6410 /* 6411 * hwpoisoned entry is treated as no_page_table in 6412 * follow_page_mask(). 6413 */ 6414 } 6415 out: 6416 spin_unlock(ptl); 6417 return page; 6418 } 6419 6420 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 6421 struct page **pages, struct vm_area_struct **vmas, 6422 unsigned long *position, unsigned long *nr_pages, 6423 long i, unsigned int flags, int *locked) 6424 { 6425 unsigned long pfn_offset; 6426 unsigned long vaddr = *position; 6427 unsigned long remainder = *nr_pages; 6428 struct hstate *h = hstate_vma(vma); 6429 int err = -EFAULT, refs; 6430 6431 while (vaddr < vma->vm_end && remainder) { 6432 pte_t *pte; 6433 spinlock_t *ptl = NULL; 6434 bool unshare = false; 6435 int absent; 6436 struct page *page; 6437 6438 /* 6439 * If we have a pending SIGKILL, don't keep faulting pages and 6440 * potentially allocating memory. 6441 */ 6442 if (fatal_signal_pending(current)) { 6443 remainder = 0; 6444 break; 6445 } 6446 6447 /* 6448 * Some archs (sparc64, sh*) have multiple pte_ts to 6449 * each hugepage. We have to make sure we get the 6450 * first, for the page indexing below to work. 6451 * 6452 * Note that page table lock is not held when pte is null. 6453 */ 6454 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 6455 huge_page_size(h)); 6456 if (pte) 6457 ptl = huge_pte_lock(h, mm, pte); 6458 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 6459 6460 /* 6461 * When coredumping, it suits get_dump_page if we just return 6462 * an error where there's an empty slot with no huge pagecache 6463 * to back it. This way, we avoid allocating a hugepage, and 6464 * the sparse dumpfile avoids allocating disk blocks, but its 6465 * huge holes still show up with zeroes where they need to be. 6466 */ 6467 if (absent && (flags & FOLL_DUMP) && 6468 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 6469 if (pte) 6470 spin_unlock(ptl); 6471 remainder = 0; 6472 break; 6473 } 6474 6475 /* 6476 * We need call hugetlb_fault for both hugepages under migration 6477 * (in which case hugetlb_fault waits for the migration,) and 6478 * hwpoisoned hugepages (in which case we need to prevent the 6479 * caller from accessing to them.) In order to do this, we use 6480 * here is_swap_pte instead of is_hugetlb_entry_migration and 6481 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 6482 * both cases, and because we can't follow correct pages 6483 * directly from any kind of swap entries. 6484 */ 6485 if (absent || 6486 __follow_hugetlb_must_fault(vma, flags, pte, &unshare)) { 6487 vm_fault_t ret; 6488 unsigned int fault_flags = 0; 6489 6490 if (pte) 6491 spin_unlock(ptl); 6492 if (flags & FOLL_WRITE) 6493 fault_flags |= FAULT_FLAG_WRITE; 6494 else if (unshare) 6495 fault_flags |= FAULT_FLAG_UNSHARE; 6496 if (locked) { 6497 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6498 FAULT_FLAG_KILLABLE; 6499 if (flags & FOLL_INTERRUPTIBLE) 6500 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 6501 } 6502 if (flags & FOLL_NOWAIT) 6503 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6504 FAULT_FLAG_RETRY_NOWAIT; 6505 if (flags & FOLL_TRIED) { 6506 /* 6507 * Note: FAULT_FLAG_ALLOW_RETRY and 6508 * FAULT_FLAG_TRIED can co-exist 6509 */ 6510 fault_flags |= FAULT_FLAG_TRIED; 6511 } 6512 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 6513 if (ret & VM_FAULT_ERROR) { 6514 err = vm_fault_to_errno(ret, flags); 6515 remainder = 0; 6516 break; 6517 } 6518 if (ret & VM_FAULT_RETRY) { 6519 if (locked && 6520 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 6521 *locked = 0; 6522 *nr_pages = 0; 6523 /* 6524 * VM_FAULT_RETRY must not return an 6525 * error, it will return zero 6526 * instead. 6527 * 6528 * No need to update "position" as the 6529 * caller will not check it after 6530 * *nr_pages is set to 0. 6531 */ 6532 return i; 6533 } 6534 continue; 6535 } 6536 6537 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 6538 page = pte_page(huge_ptep_get(pte)); 6539 6540 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 6541 !PageAnonExclusive(page), page); 6542 6543 /* 6544 * If subpage information not requested, update counters 6545 * and skip the same_page loop below. 6546 */ 6547 if (!pages && !vmas && !pfn_offset && 6548 (vaddr + huge_page_size(h) < vma->vm_end) && 6549 (remainder >= pages_per_huge_page(h))) { 6550 vaddr += huge_page_size(h); 6551 remainder -= pages_per_huge_page(h); 6552 i += pages_per_huge_page(h); 6553 spin_unlock(ptl); 6554 continue; 6555 } 6556 6557 /* vaddr may not be aligned to PAGE_SIZE */ 6558 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, 6559 (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); 6560 6561 if (pages || vmas) 6562 record_subpages_vmas(nth_page(page, pfn_offset), 6563 vma, refs, 6564 likely(pages) ? pages + i : NULL, 6565 vmas ? vmas + i : NULL); 6566 6567 if (pages) { 6568 /* 6569 * try_grab_folio() should always succeed here, 6570 * because: a) we hold the ptl lock, and b) we've just 6571 * checked that the huge page is present in the page 6572 * tables. If the huge page is present, then the tail 6573 * pages must also be present. The ptl prevents the 6574 * head page and tail pages from being rearranged in 6575 * any way. As this is hugetlb, the pages will never 6576 * be p2pdma or not longterm pinable. So this page 6577 * must be available at this point, unless the page 6578 * refcount overflowed: 6579 */ 6580 if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs, 6581 flags))) { 6582 spin_unlock(ptl); 6583 remainder = 0; 6584 err = -ENOMEM; 6585 break; 6586 } 6587 } 6588 6589 vaddr += (refs << PAGE_SHIFT); 6590 remainder -= refs; 6591 i += refs; 6592 6593 spin_unlock(ptl); 6594 } 6595 *nr_pages = remainder; 6596 /* 6597 * setting position is actually required only if remainder is 6598 * not zero but it's faster not to add a "if (remainder)" 6599 * branch. 6600 */ 6601 *position = vaddr; 6602 6603 return i ? i : err; 6604 } 6605 6606 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 6607 unsigned long address, unsigned long end, 6608 pgprot_t newprot, unsigned long cp_flags) 6609 { 6610 struct mm_struct *mm = vma->vm_mm; 6611 unsigned long start = address; 6612 pte_t *ptep; 6613 pte_t pte; 6614 struct hstate *h = hstate_vma(vma); 6615 unsigned long pages = 0, psize = huge_page_size(h); 6616 bool shared_pmd = false; 6617 struct mmu_notifier_range range; 6618 unsigned long last_addr_mask; 6619 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6620 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6621 6622 /* 6623 * In the case of shared PMDs, the area to flush could be beyond 6624 * start/end. Set range.start/range.end to cover the maximum possible 6625 * range if PMD sharing is possible. 6626 */ 6627 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6628 0, vma, mm, start, end); 6629 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6630 6631 BUG_ON(address >= end); 6632 flush_cache_range(vma, range.start, range.end); 6633 6634 mmu_notifier_invalidate_range_start(&range); 6635 hugetlb_vma_lock_write(vma); 6636 i_mmap_lock_write(vma->vm_file->f_mapping); 6637 last_addr_mask = hugetlb_mask_last_page(h); 6638 for (; address < end; address += psize) { 6639 spinlock_t *ptl; 6640 ptep = huge_pte_offset(mm, address, psize); 6641 if (!ptep) { 6642 address |= last_addr_mask; 6643 continue; 6644 } 6645 ptl = huge_pte_lock(h, mm, ptep); 6646 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6647 /* 6648 * When uffd-wp is enabled on the vma, unshare 6649 * shouldn't happen at all. Warn about it if it 6650 * happened due to some reason. 6651 */ 6652 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6653 pages++; 6654 spin_unlock(ptl); 6655 shared_pmd = true; 6656 address |= last_addr_mask; 6657 continue; 6658 } 6659 pte = huge_ptep_get(ptep); 6660 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6661 spin_unlock(ptl); 6662 continue; 6663 } 6664 if (unlikely(is_hugetlb_entry_migration(pte))) { 6665 swp_entry_t entry = pte_to_swp_entry(pte); 6666 struct page *page = pfn_swap_entry_to_page(entry); 6667 6668 if (!is_readable_migration_entry(entry)) { 6669 pte_t newpte; 6670 6671 if (PageAnon(page)) 6672 entry = make_readable_exclusive_migration_entry( 6673 swp_offset(entry)); 6674 else 6675 entry = make_readable_migration_entry( 6676 swp_offset(entry)); 6677 newpte = swp_entry_to_pte(entry); 6678 if (uffd_wp) 6679 newpte = pte_swp_mkuffd_wp(newpte); 6680 else if (uffd_wp_resolve) 6681 newpte = pte_swp_clear_uffd_wp(newpte); 6682 set_huge_pte_at(mm, address, ptep, newpte); 6683 pages++; 6684 } 6685 spin_unlock(ptl); 6686 continue; 6687 } 6688 if (unlikely(pte_marker_uffd_wp(pte))) { 6689 /* 6690 * This is changing a non-present pte into a none pte, 6691 * no need for huge_ptep_modify_prot_start/commit(). 6692 */ 6693 if (uffd_wp_resolve) 6694 huge_pte_clear(mm, address, ptep, psize); 6695 } 6696 if (!huge_pte_none(pte)) { 6697 pte_t old_pte; 6698 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6699 6700 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6701 pte = huge_pte_modify(old_pte, newprot); 6702 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6703 if (uffd_wp) 6704 pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte)); 6705 else if (uffd_wp_resolve) 6706 pte = huge_pte_clear_uffd_wp(pte); 6707 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6708 pages++; 6709 } else { 6710 /* None pte */ 6711 if (unlikely(uffd_wp)) 6712 /* Safe to modify directly (none->non-present). */ 6713 set_huge_pte_at(mm, address, ptep, 6714 make_pte_marker(PTE_MARKER_UFFD_WP)); 6715 } 6716 spin_unlock(ptl); 6717 } 6718 /* 6719 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6720 * may have cleared our pud entry and done put_page on the page table: 6721 * once we release i_mmap_rwsem, another task can do the final put_page 6722 * and that page table be reused and filled with junk. If we actually 6723 * did unshare a page of pmds, flush the range corresponding to the pud. 6724 */ 6725 if (shared_pmd) 6726 flush_hugetlb_tlb_range(vma, range.start, range.end); 6727 else 6728 flush_hugetlb_tlb_range(vma, start, end); 6729 /* 6730 * No need to call mmu_notifier_invalidate_range() we are downgrading 6731 * page table protection not changing it to point to a new page. 6732 * 6733 * See Documentation/mm/mmu_notifier.rst 6734 */ 6735 i_mmap_unlock_write(vma->vm_file->f_mapping); 6736 hugetlb_vma_unlock_write(vma); 6737 mmu_notifier_invalidate_range_end(&range); 6738 6739 return pages << h->order; 6740 } 6741 6742 /* Return true if reservation was successful, false otherwise. */ 6743 bool hugetlb_reserve_pages(struct inode *inode, 6744 long from, long to, 6745 struct vm_area_struct *vma, 6746 vm_flags_t vm_flags) 6747 { 6748 long chg, add = -1; 6749 struct hstate *h = hstate_inode(inode); 6750 struct hugepage_subpool *spool = subpool_inode(inode); 6751 struct resv_map *resv_map; 6752 struct hugetlb_cgroup *h_cg = NULL; 6753 long gbl_reserve, regions_needed = 0; 6754 6755 /* This should never happen */ 6756 if (from > to) { 6757 VM_WARN(1, "%s called with a negative range\n", __func__); 6758 return false; 6759 } 6760 6761 /* 6762 * vma specific semaphore used for pmd sharing and fault/truncation 6763 * synchronization 6764 */ 6765 hugetlb_vma_lock_alloc(vma); 6766 6767 /* 6768 * Only apply hugepage reservation if asked. At fault time, an 6769 * attempt will be made for VM_NORESERVE to allocate a page 6770 * without using reserves 6771 */ 6772 if (vm_flags & VM_NORESERVE) 6773 return true; 6774 6775 /* 6776 * Shared mappings base their reservation on the number of pages that 6777 * are already allocated on behalf of the file. Private mappings need 6778 * to reserve the full area even if read-only as mprotect() may be 6779 * called to make the mapping read-write. Assume !vma is a shm mapping 6780 */ 6781 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6782 /* 6783 * resv_map can not be NULL as hugetlb_reserve_pages is only 6784 * called for inodes for which resv_maps were created (see 6785 * hugetlbfs_get_inode). 6786 */ 6787 resv_map = inode_resv_map(inode); 6788 6789 chg = region_chg(resv_map, from, to, ®ions_needed); 6790 } else { 6791 /* Private mapping. */ 6792 resv_map = resv_map_alloc(); 6793 if (!resv_map) 6794 goto out_err; 6795 6796 chg = to - from; 6797 6798 set_vma_resv_map(vma, resv_map); 6799 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6800 } 6801 6802 if (chg < 0) 6803 goto out_err; 6804 6805 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6806 chg * pages_per_huge_page(h), &h_cg) < 0) 6807 goto out_err; 6808 6809 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6810 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6811 * of the resv_map. 6812 */ 6813 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6814 } 6815 6816 /* 6817 * There must be enough pages in the subpool for the mapping. If 6818 * the subpool has a minimum size, there may be some global 6819 * reservations already in place (gbl_reserve). 6820 */ 6821 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6822 if (gbl_reserve < 0) 6823 goto out_uncharge_cgroup; 6824 6825 /* 6826 * Check enough hugepages are available for the reservation. 6827 * Hand the pages back to the subpool if there are not 6828 */ 6829 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6830 goto out_put_pages; 6831 6832 /* 6833 * Account for the reservations made. Shared mappings record regions 6834 * that have reservations as they are shared by multiple VMAs. 6835 * When the last VMA disappears, the region map says how much 6836 * the reservation was and the page cache tells how much of 6837 * the reservation was consumed. Private mappings are per-VMA and 6838 * only the consumed reservations are tracked. When the VMA 6839 * disappears, the original reservation is the VMA size and the 6840 * consumed reservations are stored in the map. Hence, nothing 6841 * else has to be done for private mappings here 6842 */ 6843 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6844 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6845 6846 if (unlikely(add < 0)) { 6847 hugetlb_acct_memory(h, -gbl_reserve); 6848 goto out_put_pages; 6849 } else if (unlikely(chg > add)) { 6850 /* 6851 * pages in this range were added to the reserve 6852 * map between region_chg and region_add. This 6853 * indicates a race with alloc_huge_page. Adjust 6854 * the subpool and reserve counts modified above 6855 * based on the difference. 6856 */ 6857 long rsv_adjust; 6858 6859 /* 6860 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6861 * reference to h_cg->css. See comment below for detail. 6862 */ 6863 hugetlb_cgroup_uncharge_cgroup_rsvd( 6864 hstate_index(h), 6865 (chg - add) * pages_per_huge_page(h), h_cg); 6866 6867 rsv_adjust = hugepage_subpool_put_pages(spool, 6868 chg - add); 6869 hugetlb_acct_memory(h, -rsv_adjust); 6870 } else if (h_cg) { 6871 /* 6872 * The file_regions will hold their own reference to 6873 * h_cg->css. So we should release the reference held 6874 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6875 * done. 6876 */ 6877 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6878 } 6879 } 6880 return true; 6881 6882 out_put_pages: 6883 /* put back original number of pages, chg */ 6884 (void)hugepage_subpool_put_pages(spool, chg); 6885 out_uncharge_cgroup: 6886 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6887 chg * pages_per_huge_page(h), h_cg); 6888 out_err: 6889 hugetlb_vma_lock_free(vma); 6890 if (!vma || vma->vm_flags & VM_MAYSHARE) 6891 /* Only call region_abort if the region_chg succeeded but the 6892 * region_add failed or didn't run. 6893 */ 6894 if (chg >= 0 && add < 0) 6895 region_abort(resv_map, from, to, regions_needed); 6896 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6897 kref_put(&resv_map->refs, resv_map_release); 6898 return false; 6899 } 6900 6901 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6902 long freed) 6903 { 6904 struct hstate *h = hstate_inode(inode); 6905 struct resv_map *resv_map = inode_resv_map(inode); 6906 long chg = 0; 6907 struct hugepage_subpool *spool = subpool_inode(inode); 6908 long gbl_reserve; 6909 6910 /* 6911 * Since this routine can be called in the evict inode path for all 6912 * hugetlbfs inodes, resv_map could be NULL. 6913 */ 6914 if (resv_map) { 6915 chg = region_del(resv_map, start, end); 6916 /* 6917 * region_del() can fail in the rare case where a region 6918 * must be split and another region descriptor can not be 6919 * allocated. If end == LONG_MAX, it will not fail. 6920 */ 6921 if (chg < 0) 6922 return chg; 6923 } 6924 6925 spin_lock(&inode->i_lock); 6926 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6927 spin_unlock(&inode->i_lock); 6928 6929 /* 6930 * If the subpool has a minimum size, the number of global 6931 * reservations to be released may be adjusted. 6932 * 6933 * Note that !resv_map implies freed == 0. So (chg - freed) 6934 * won't go negative. 6935 */ 6936 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6937 hugetlb_acct_memory(h, -gbl_reserve); 6938 6939 return 0; 6940 } 6941 6942 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6943 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6944 struct vm_area_struct *vma, 6945 unsigned long addr, pgoff_t idx) 6946 { 6947 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6948 svma->vm_start; 6949 unsigned long sbase = saddr & PUD_MASK; 6950 unsigned long s_end = sbase + PUD_SIZE; 6951 6952 /* Allow segments to share if only one is marked locked */ 6953 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 6954 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 6955 6956 /* 6957 * match the virtual addresses, permission and the alignment of the 6958 * page table page. 6959 * 6960 * Also, vma_lock (vm_private_data) is required for sharing. 6961 */ 6962 if (pmd_index(addr) != pmd_index(saddr) || 6963 vm_flags != svm_flags || 6964 !range_in_vma(svma, sbase, s_end) || 6965 !svma->vm_private_data) 6966 return 0; 6967 6968 return saddr; 6969 } 6970 6971 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6972 { 6973 unsigned long start = addr & PUD_MASK; 6974 unsigned long end = start + PUD_SIZE; 6975 6976 #ifdef CONFIG_USERFAULTFD 6977 if (uffd_disable_huge_pmd_share(vma)) 6978 return false; 6979 #endif 6980 /* 6981 * check on proper vm_flags and page table alignment 6982 */ 6983 if (!(vma->vm_flags & VM_MAYSHARE)) 6984 return false; 6985 if (!vma->vm_private_data) /* vma lock required for sharing */ 6986 return false; 6987 if (!range_in_vma(vma, start, end)) 6988 return false; 6989 return true; 6990 } 6991 6992 /* 6993 * Determine if start,end range within vma could be mapped by shared pmd. 6994 * If yes, adjust start and end to cover range associated with possible 6995 * shared pmd mappings. 6996 */ 6997 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6998 unsigned long *start, unsigned long *end) 6999 { 7000 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7001 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7002 7003 /* 7004 * vma needs to span at least one aligned PUD size, and the range 7005 * must be at least partially within in. 7006 */ 7007 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7008 (*end <= v_start) || (*start >= v_end)) 7009 return; 7010 7011 /* Extend the range to be PUD aligned for a worst case scenario */ 7012 if (*start > v_start) 7013 *start = ALIGN_DOWN(*start, PUD_SIZE); 7014 7015 if (*end < v_end) 7016 *end = ALIGN(*end, PUD_SIZE); 7017 } 7018 7019 /* 7020 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7021 * and returns the corresponding pte. While this is not necessary for the 7022 * !shared pmd case because we can allocate the pmd later as well, it makes the 7023 * code much cleaner. pmd allocation is essential for the shared case because 7024 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7025 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7026 * bad pmd for sharing. 7027 */ 7028 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7029 unsigned long addr, pud_t *pud) 7030 { 7031 struct address_space *mapping = vma->vm_file->f_mapping; 7032 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7033 vma->vm_pgoff; 7034 struct vm_area_struct *svma; 7035 unsigned long saddr; 7036 pte_t *spte = NULL; 7037 pte_t *pte; 7038 spinlock_t *ptl; 7039 7040 i_mmap_lock_read(mapping); 7041 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7042 if (svma == vma) 7043 continue; 7044 7045 saddr = page_table_shareable(svma, vma, addr, idx); 7046 if (saddr) { 7047 spte = huge_pte_offset(svma->vm_mm, saddr, 7048 vma_mmu_pagesize(svma)); 7049 if (spte) { 7050 get_page(virt_to_page(spte)); 7051 break; 7052 } 7053 } 7054 } 7055 7056 if (!spte) 7057 goto out; 7058 7059 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 7060 if (pud_none(*pud)) { 7061 pud_populate(mm, pud, 7062 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7063 mm_inc_nr_pmds(mm); 7064 } else { 7065 put_page(virt_to_page(spte)); 7066 } 7067 spin_unlock(ptl); 7068 out: 7069 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7070 i_mmap_unlock_read(mapping); 7071 return pte; 7072 } 7073 7074 /* 7075 * unmap huge page backed by shared pte. 7076 * 7077 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 7078 * indicated by page_count > 1, unmap is achieved by clearing pud and 7079 * decrementing the ref count. If count == 1, the pte page is not shared. 7080 * 7081 * Called with page table lock held. 7082 * 7083 * returns: 1 successfully unmapped a shared pte page 7084 * 0 the underlying pte page is not shared, or it is the last user 7085 */ 7086 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7087 unsigned long addr, pte_t *ptep) 7088 { 7089 pgd_t *pgd = pgd_offset(mm, addr); 7090 p4d_t *p4d = p4d_offset(pgd, addr); 7091 pud_t *pud = pud_offset(p4d, addr); 7092 7093 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7094 hugetlb_vma_assert_locked(vma); 7095 BUG_ON(page_count(virt_to_page(ptep)) == 0); 7096 if (page_count(virt_to_page(ptep)) == 1) 7097 return 0; 7098 7099 pud_clear(pud); 7100 put_page(virt_to_page(ptep)); 7101 mm_dec_nr_pmds(mm); 7102 return 1; 7103 } 7104 7105 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7106 7107 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7108 unsigned long addr, pud_t *pud) 7109 { 7110 return NULL; 7111 } 7112 7113 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7114 unsigned long addr, pte_t *ptep) 7115 { 7116 return 0; 7117 } 7118 7119 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7120 unsigned long *start, unsigned long *end) 7121 { 7122 } 7123 7124 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7125 { 7126 return false; 7127 } 7128 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7129 7130 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7131 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7132 unsigned long addr, unsigned long sz) 7133 { 7134 pgd_t *pgd; 7135 p4d_t *p4d; 7136 pud_t *pud; 7137 pte_t *pte = NULL; 7138 7139 pgd = pgd_offset(mm, addr); 7140 p4d = p4d_alloc(mm, pgd, addr); 7141 if (!p4d) 7142 return NULL; 7143 pud = pud_alloc(mm, p4d, addr); 7144 if (pud) { 7145 if (sz == PUD_SIZE) { 7146 pte = (pte_t *)pud; 7147 } else { 7148 BUG_ON(sz != PMD_SIZE); 7149 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7150 pte = huge_pmd_share(mm, vma, addr, pud); 7151 else 7152 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7153 } 7154 } 7155 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 7156 7157 return pte; 7158 } 7159 7160 /* 7161 * huge_pte_offset() - Walk the page table to resolve the hugepage 7162 * entry at address @addr 7163 * 7164 * Return: Pointer to page table entry (PUD or PMD) for 7165 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7166 * size @sz doesn't match the hugepage size at this level of the page 7167 * table. 7168 */ 7169 pte_t *huge_pte_offset(struct mm_struct *mm, 7170 unsigned long addr, unsigned long sz) 7171 { 7172 pgd_t *pgd; 7173 p4d_t *p4d; 7174 pud_t *pud; 7175 pmd_t *pmd; 7176 7177 pgd = pgd_offset(mm, addr); 7178 if (!pgd_present(*pgd)) 7179 return NULL; 7180 p4d = p4d_offset(pgd, addr); 7181 if (!p4d_present(*p4d)) 7182 return NULL; 7183 7184 pud = pud_offset(p4d, addr); 7185 if (sz == PUD_SIZE) 7186 /* must be pud huge, non-present or none */ 7187 return (pte_t *)pud; 7188 if (!pud_present(*pud)) 7189 return NULL; 7190 /* must have a valid entry and size to go further */ 7191 7192 pmd = pmd_offset(pud, addr); 7193 /* must be pmd huge, non-present or none */ 7194 return (pte_t *)pmd; 7195 } 7196 7197 /* 7198 * Return a mask that can be used to update an address to the last huge 7199 * page in a page table page mapping size. Used to skip non-present 7200 * page table entries when linearly scanning address ranges. Architectures 7201 * with unique huge page to page table relationships can define their own 7202 * version of this routine. 7203 */ 7204 unsigned long hugetlb_mask_last_page(struct hstate *h) 7205 { 7206 unsigned long hp_size = huge_page_size(h); 7207 7208 if (hp_size == PUD_SIZE) 7209 return P4D_SIZE - PUD_SIZE; 7210 else if (hp_size == PMD_SIZE) 7211 return PUD_SIZE - PMD_SIZE; 7212 else 7213 return 0UL; 7214 } 7215 7216 #else 7217 7218 /* See description above. Architectures can provide their own version. */ 7219 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7220 { 7221 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 7222 if (huge_page_size(h) == PMD_SIZE) 7223 return PUD_SIZE - PMD_SIZE; 7224 #endif 7225 return 0UL; 7226 } 7227 7228 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7229 7230 /* 7231 * These functions are overwritable if your architecture needs its own 7232 * behavior. 7233 */ 7234 int isolate_hugetlb(struct page *page, struct list_head *list) 7235 { 7236 int ret = 0; 7237 7238 spin_lock_irq(&hugetlb_lock); 7239 if (!PageHeadHuge(page) || 7240 !HPageMigratable(page) || 7241 !get_page_unless_zero(page)) { 7242 ret = -EBUSY; 7243 goto unlock; 7244 } 7245 ClearHPageMigratable(page); 7246 list_move_tail(&page->lru, list); 7247 unlock: 7248 spin_unlock_irq(&hugetlb_lock); 7249 return ret; 7250 } 7251 7252 int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison) 7253 { 7254 int ret = 0; 7255 7256 *hugetlb = false; 7257 spin_lock_irq(&hugetlb_lock); 7258 if (PageHeadHuge(page)) { 7259 *hugetlb = true; 7260 if (HPageFreed(page)) 7261 ret = 0; 7262 else if (HPageMigratable(page) || unpoison) 7263 ret = get_page_unless_zero(page); 7264 else 7265 ret = -EBUSY; 7266 } 7267 spin_unlock_irq(&hugetlb_lock); 7268 return ret; 7269 } 7270 7271 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7272 bool *migratable_cleared) 7273 { 7274 int ret; 7275 7276 spin_lock_irq(&hugetlb_lock); 7277 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7278 spin_unlock_irq(&hugetlb_lock); 7279 return ret; 7280 } 7281 7282 void putback_active_hugepage(struct page *page) 7283 { 7284 spin_lock_irq(&hugetlb_lock); 7285 SetHPageMigratable(page); 7286 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 7287 spin_unlock_irq(&hugetlb_lock); 7288 put_page(page); 7289 } 7290 7291 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7292 { 7293 struct hstate *h = folio_hstate(old_folio); 7294 7295 hugetlb_cgroup_migrate(old_folio, new_folio); 7296 set_page_owner_migrate_reason(&new_folio->page, reason); 7297 7298 /* 7299 * transfer temporary state of the new hugetlb folio. This is 7300 * reverse to other transitions because the newpage is going to 7301 * be final while the old one will be freed so it takes over 7302 * the temporary status. 7303 * 7304 * Also note that we have to transfer the per-node surplus state 7305 * here as well otherwise the global surplus count will not match 7306 * the per-node's. 7307 */ 7308 if (folio_test_hugetlb_temporary(new_folio)) { 7309 int old_nid = folio_nid(old_folio); 7310 int new_nid = folio_nid(new_folio); 7311 7312 folio_set_hugetlb_temporary(old_folio); 7313 folio_clear_hugetlb_temporary(new_folio); 7314 7315 7316 /* 7317 * There is no need to transfer the per-node surplus state 7318 * when we do not cross the node. 7319 */ 7320 if (new_nid == old_nid) 7321 return; 7322 spin_lock_irq(&hugetlb_lock); 7323 if (h->surplus_huge_pages_node[old_nid]) { 7324 h->surplus_huge_pages_node[old_nid]--; 7325 h->surplus_huge_pages_node[new_nid]++; 7326 } 7327 spin_unlock_irq(&hugetlb_lock); 7328 } 7329 } 7330 7331 /* 7332 * This function will unconditionally remove all the shared pmd pgtable entries 7333 * within the specific vma for a hugetlbfs memory range. 7334 */ 7335 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7336 { 7337 struct hstate *h = hstate_vma(vma); 7338 unsigned long sz = huge_page_size(h); 7339 struct mm_struct *mm = vma->vm_mm; 7340 struct mmu_notifier_range range; 7341 unsigned long address, start, end; 7342 spinlock_t *ptl; 7343 pte_t *ptep; 7344 7345 if (!(vma->vm_flags & VM_MAYSHARE)) 7346 return; 7347 7348 start = ALIGN(vma->vm_start, PUD_SIZE); 7349 end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7350 7351 if (start >= end) 7352 return; 7353 7354 flush_cache_range(vma, start, end); 7355 /* 7356 * No need to call adjust_range_if_pmd_sharing_possible(), because 7357 * we have already done the PUD_SIZE alignment. 7358 */ 7359 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 7360 start, end); 7361 mmu_notifier_invalidate_range_start(&range); 7362 hugetlb_vma_lock_write(vma); 7363 i_mmap_lock_write(vma->vm_file->f_mapping); 7364 for (address = start; address < end; address += PUD_SIZE) { 7365 ptep = huge_pte_offset(mm, address, sz); 7366 if (!ptep) 7367 continue; 7368 ptl = huge_pte_lock(h, mm, ptep); 7369 huge_pmd_unshare(mm, vma, address, ptep); 7370 spin_unlock(ptl); 7371 } 7372 flush_hugetlb_tlb_range(vma, start, end); 7373 i_mmap_unlock_write(vma->vm_file->f_mapping); 7374 hugetlb_vma_unlock_write(vma); 7375 /* 7376 * No need to call mmu_notifier_invalidate_range(), see 7377 * Documentation/mm/mmu_notifier.rst. 7378 */ 7379 mmu_notifier_invalidate_range_end(&range); 7380 } 7381 7382 #ifdef CONFIG_CMA 7383 static bool cma_reserve_called __initdata; 7384 7385 static int __init cmdline_parse_hugetlb_cma(char *p) 7386 { 7387 int nid, count = 0; 7388 unsigned long tmp; 7389 char *s = p; 7390 7391 while (*s) { 7392 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7393 break; 7394 7395 if (s[count] == ':') { 7396 if (tmp >= MAX_NUMNODES) 7397 break; 7398 nid = array_index_nospec(tmp, MAX_NUMNODES); 7399 7400 s += count + 1; 7401 tmp = memparse(s, &s); 7402 hugetlb_cma_size_in_node[nid] = tmp; 7403 hugetlb_cma_size += tmp; 7404 7405 /* 7406 * Skip the separator if have one, otherwise 7407 * break the parsing. 7408 */ 7409 if (*s == ',') 7410 s++; 7411 else 7412 break; 7413 } else { 7414 hugetlb_cma_size = memparse(p, &p); 7415 break; 7416 } 7417 } 7418 7419 return 0; 7420 } 7421 7422 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7423 7424 void __init hugetlb_cma_reserve(int order) 7425 { 7426 unsigned long size, reserved, per_node; 7427 bool node_specific_cma_alloc = false; 7428 int nid; 7429 7430 cma_reserve_called = true; 7431 7432 if (!hugetlb_cma_size) 7433 return; 7434 7435 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7436 if (hugetlb_cma_size_in_node[nid] == 0) 7437 continue; 7438 7439 if (!node_online(nid)) { 7440 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7441 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7442 hugetlb_cma_size_in_node[nid] = 0; 7443 continue; 7444 } 7445 7446 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7447 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7448 nid, (PAGE_SIZE << order) / SZ_1M); 7449 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7450 hugetlb_cma_size_in_node[nid] = 0; 7451 } else { 7452 node_specific_cma_alloc = true; 7453 } 7454 } 7455 7456 /* Validate the CMA size again in case some invalid nodes specified. */ 7457 if (!hugetlb_cma_size) 7458 return; 7459 7460 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7461 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7462 (PAGE_SIZE << order) / SZ_1M); 7463 hugetlb_cma_size = 0; 7464 return; 7465 } 7466 7467 if (!node_specific_cma_alloc) { 7468 /* 7469 * If 3 GB area is requested on a machine with 4 numa nodes, 7470 * let's allocate 1 GB on first three nodes and ignore the last one. 7471 */ 7472 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7473 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7474 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7475 } 7476 7477 reserved = 0; 7478 for_each_online_node(nid) { 7479 int res; 7480 char name[CMA_MAX_NAME]; 7481 7482 if (node_specific_cma_alloc) { 7483 if (hugetlb_cma_size_in_node[nid] == 0) 7484 continue; 7485 7486 size = hugetlb_cma_size_in_node[nid]; 7487 } else { 7488 size = min(per_node, hugetlb_cma_size - reserved); 7489 } 7490 7491 size = round_up(size, PAGE_SIZE << order); 7492 7493 snprintf(name, sizeof(name), "hugetlb%d", nid); 7494 /* 7495 * Note that 'order per bit' is based on smallest size that 7496 * may be returned to CMA allocator in the case of 7497 * huge page demotion. 7498 */ 7499 res = cma_declare_contiguous_nid(0, size, 0, 7500 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7501 0, false, name, 7502 &hugetlb_cma[nid], nid); 7503 if (res) { 7504 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7505 res, nid); 7506 continue; 7507 } 7508 7509 reserved += size; 7510 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7511 size / SZ_1M, nid); 7512 7513 if (reserved >= hugetlb_cma_size) 7514 break; 7515 } 7516 7517 if (!reserved) 7518 /* 7519 * hugetlb_cma_size is used to determine if allocations from 7520 * cma are possible. Set to zero if no cma regions are set up. 7521 */ 7522 hugetlb_cma_size = 0; 7523 } 7524 7525 static void __init hugetlb_cma_check(void) 7526 { 7527 if (!hugetlb_cma_size || cma_reserve_called) 7528 return; 7529 7530 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7531 } 7532 7533 #endif /* CONFIG_CMA */ 7534