1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpumask.h> 18 #include <linux/cpuset.h> 19 #include <linux/mutex.h> 20 #include <linux/memblock.h> 21 #include <linux/minmax.h> 22 #include <linux/sysfs.h> 23 #include <linux/slab.h> 24 #include <linux/sched/mm.h> 25 #include <linux/mmdebug.h> 26 #include <linux/sched/signal.h> 27 #include <linux/rmap.h> 28 #include <linux/string_helpers.h> 29 #include <linux/swap.h> 30 #include <linux/swapops.h> 31 #include <linux/jhash.h> 32 #include <linux/numa.h> 33 #include <linux/llist.h> 34 #include <linux/cma.h> 35 #include <linux/migrate.h> 36 #include <linux/nospec.h> 37 #include <linux/delayacct.h> 38 #include <linux/memory.h> 39 #include <linux/mm_inline.h> 40 #include <linux/padata.h> 41 42 #include <asm/page.h> 43 #include <asm/pgalloc.h> 44 #include <asm/tlb.h> 45 #include <asm/setup.h> 46 47 #include <linux/io.h> 48 #include <linux/hugetlb.h> 49 #include <linux/hugetlb_cgroup.h> 50 #include <linux/node.h> 51 #include <linux/page_owner.h> 52 #include "internal.h" 53 #include "hugetlb_vmemmap.h" 54 #include "hugetlb_cma.h" 55 #include <linux/page-isolation.h> 56 57 int hugetlb_max_hstate __read_mostly; 58 unsigned int default_hstate_idx; 59 struct hstate hstates[HUGE_MAX_HSTATE]; 60 61 __initdata nodemask_t hugetlb_bootmem_nodes; 62 __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; 63 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; 64 65 /* 66 * Due to ordering constraints across the init code for various 67 * architectures, hugetlb hstate cmdline parameters can't simply 68 * be early_param. early_param might call the setup function 69 * before valid hugetlb page sizes are determined, leading to 70 * incorrect rejection of valid hugepagesz= options. 71 * 72 * So, record the parameters early and consume them whenever the 73 * init code is ready for them, by calling hugetlb_parse_params(). 74 */ 75 76 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */ 77 #define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1) 78 struct hugetlb_cmdline { 79 char *val; 80 int (*setup)(char *val); 81 }; 82 83 /* for command line parsing */ 84 static struct hstate * __initdata parsed_hstate; 85 static unsigned long __initdata default_hstate_max_huge_pages; 86 static bool __initdata parsed_valid_hugepagesz = true; 87 static bool __initdata parsed_default_hugepagesz; 88 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 89 static unsigned long hugepage_allocation_threads __initdata; 90 91 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; 92 static int hstate_cmdline_index __initdata; 93 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata; 94 static int hugetlb_param_index __initdata; 95 static __init int hugetlb_add_param(char *s, int (*setup)(char *val)); 96 static __init void hugetlb_parse_params(void); 97 98 #define hugetlb_early_param(str, func) \ 99 static __init int func##args(char *s) \ 100 { \ 101 return hugetlb_add_param(s, func); \ 102 } \ 103 early_param(str, func##args) 104 105 /* 106 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 107 * free_huge_pages, and surplus_huge_pages. 108 */ 109 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); 110 111 /* 112 * Serializes faults on the same logical page. This is used to 113 * prevent spurious OOMs when the hugepage pool is fully utilized. 114 */ 115 static int num_fault_mutexes __ro_after_init; 116 struct mutex *hugetlb_fault_mutex_table __ro_after_init; 117 118 /* Forward declaration */ 119 static int hugetlb_acct_memory(struct hstate *h, long delta); 120 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 121 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 122 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 123 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 124 unsigned long start, unsigned long end); 125 static struct resv_map *vma_resv_map(struct vm_area_struct *vma); 126 127 static void hugetlb_free_folio(struct folio *folio) 128 { 129 if (folio_test_hugetlb_cma(folio)) { 130 hugetlb_cma_free_folio(folio); 131 return; 132 } 133 134 folio_put(folio); 135 } 136 137 static inline bool subpool_is_free(struct hugepage_subpool *spool) 138 { 139 if (spool->count) 140 return false; 141 if (spool->max_hpages != -1) 142 return spool->used_hpages == 0; 143 if (spool->min_hpages != -1) 144 return spool->rsv_hpages == spool->min_hpages; 145 146 return true; 147 } 148 149 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 150 unsigned long irq_flags) 151 { 152 spin_unlock_irqrestore(&spool->lock, irq_flags); 153 154 /* If no pages are used, and no other handles to the subpool 155 * remain, give up any reservations based on minimum size and 156 * free the subpool */ 157 if (subpool_is_free(spool)) { 158 if (spool->min_hpages != -1) 159 hugetlb_acct_memory(spool->hstate, 160 -spool->min_hpages); 161 kfree(spool); 162 } 163 } 164 165 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 166 long min_hpages) 167 { 168 struct hugepage_subpool *spool; 169 170 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 171 if (!spool) 172 return NULL; 173 174 spin_lock_init(&spool->lock); 175 spool->count = 1; 176 spool->max_hpages = max_hpages; 177 spool->hstate = h; 178 spool->min_hpages = min_hpages; 179 180 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 181 kfree(spool); 182 return NULL; 183 } 184 spool->rsv_hpages = min_hpages; 185 186 return spool; 187 } 188 189 void hugepage_put_subpool(struct hugepage_subpool *spool) 190 { 191 unsigned long flags; 192 193 spin_lock_irqsave(&spool->lock, flags); 194 BUG_ON(!spool->count); 195 spool->count--; 196 unlock_or_release_subpool(spool, flags); 197 } 198 199 /* 200 * Subpool accounting for allocating and reserving pages. 201 * Return -ENOMEM if there are not enough resources to satisfy the 202 * request. Otherwise, return the number of pages by which the 203 * global pools must be adjusted (upward). The returned value may 204 * only be different than the passed value (delta) in the case where 205 * a subpool minimum size must be maintained. 206 */ 207 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 208 long delta) 209 { 210 long ret = delta; 211 212 if (!spool) 213 return ret; 214 215 spin_lock_irq(&spool->lock); 216 217 if (spool->max_hpages != -1) { /* maximum size accounting */ 218 if ((spool->used_hpages + delta) <= spool->max_hpages) 219 spool->used_hpages += delta; 220 else { 221 ret = -ENOMEM; 222 goto unlock_ret; 223 } 224 } 225 226 /* minimum size accounting */ 227 if (spool->min_hpages != -1 && spool->rsv_hpages) { 228 if (delta > spool->rsv_hpages) { 229 /* 230 * Asking for more reserves than those already taken on 231 * behalf of subpool. Return difference. 232 */ 233 ret = delta - spool->rsv_hpages; 234 spool->rsv_hpages = 0; 235 } else { 236 ret = 0; /* reserves already accounted for */ 237 spool->rsv_hpages -= delta; 238 } 239 } 240 241 unlock_ret: 242 spin_unlock_irq(&spool->lock); 243 return ret; 244 } 245 246 /* 247 * Subpool accounting for freeing and unreserving pages. 248 * Return the number of global page reservations that must be dropped. 249 * The return value may only be different than the passed value (delta) 250 * in the case where a subpool minimum size must be maintained. 251 */ 252 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 253 long delta) 254 { 255 long ret = delta; 256 unsigned long flags; 257 258 if (!spool) 259 return delta; 260 261 spin_lock_irqsave(&spool->lock, flags); 262 263 if (spool->max_hpages != -1) /* maximum size accounting */ 264 spool->used_hpages -= delta; 265 266 /* minimum size accounting */ 267 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 268 if (spool->rsv_hpages + delta <= spool->min_hpages) 269 ret = 0; 270 else 271 ret = spool->rsv_hpages + delta - spool->min_hpages; 272 273 spool->rsv_hpages += delta; 274 if (spool->rsv_hpages > spool->min_hpages) 275 spool->rsv_hpages = spool->min_hpages; 276 } 277 278 /* 279 * If hugetlbfs_put_super couldn't free spool due to an outstanding 280 * quota reference, free it now. 281 */ 282 unlock_or_release_subpool(spool, flags); 283 284 return ret; 285 } 286 287 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 288 { 289 return HUGETLBFS_SB(inode->i_sb)->spool; 290 } 291 292 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 293 { 294 return subpool_inode(file_inode(vma->vm_file)); 295 } 296 297 /* 298 * hugetlb vma_lock helper routines 299 */ 300 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 301 { 302 if (__vma_shareable_lock(vma)) { 303 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 304 305 down_read(&vma_lock->rw_sema); 306 } else if (__vma_private_lock(vma)) { 307 struct resv_map *resv_map = vma_resv_map(vma); 308 309 down_read(&resv_map->rw_sema); 310 } 311 } 312 313 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 314 { 315 if (__vma_shareable_lock(vma)) { 316 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 317 318 up_read(&vma_lock->rw_sema); 319 } else if (__vma_private_lock(vma)) { 320 struct resv_map *resv_map = vma_resv_map(vma); 321 322 up_read(&resv_map->rw_sema); 323 } 324 } 325 326 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 327 { 328 if (__vma_shareable_lock(vma)) { 329 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 330 331 down_write(&vma_lock->rw_sema); 332 } else if (__vma_private_lock(vma)) { 333 struct resv_map *resv_map = vma_resv_map(vma); 334 335 down_write(&resv_map->rw_sema); 336 } 337 } 338 339 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 340 { 341 if (__vma_shareable_lock(vma)) { 342 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 343 344 up_write(&vma_lock->rw_sema); 345 } else if (__vma_private_lock(vma)) { 346 struct resv_map *resv_map = vma_resv_map(vma); 347 348 up_write(&resv_map->rw_sema); 349 } 350 } 351 352 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 353 { 354 355 if (__vma_shareable_lock(vma)) { 356 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 357 358 return down_write_trylock(&vma_lock->rw_sema); 359 } else if (__vma_private_lock(vma)) { 360 struct resv_map *resv_map = vma_resv_map(vma); 361 362 return down_write_trylock(&resv_map->rw_sema); 363 } 364 365 return 1; 366 } 367 368 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 369 { 370 if (__vma_shareable_lock(vma)) { 371 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 372 373 lockdep_assert_held(&vma_lock->rw_sema); 374 } else if (__vma_private_lock(vma)) { 375 struct resv_map *resv_map = vma_resv_map(vma); 376 377 lockdep_assert_held(&resv_map->rw_sema); 378 } 379 } 380 381 void hugetlb_vma_lock_release(struct kref *kref) 382 { 383 struct hugetlb_vma_lock *vma_lock = container_of(kref, 384 struct hugetlb_vma_lock, refs); 385 386 kfree(vma_lock); 387 } 388 389 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 390 { 391 struct vm_area_struct *vma = vma_lock->vma; 392 393 /* 394 * vma_lock structure may or not be released as a result of put, 395 * it certainly will no longer be attached to vma so clear pointer. 396 * Semaphore synchronizes access to vma_lock->vma field. 397 */ 398 vma_lock->vma = NULL; 399 vma->vm_private_data = NULL; 400 up_write(&vma_lock->rw_sema); 401 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 402 } 403 404 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 405 { 406 if (__vma_shareable_lock(vma)) { 407 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 408 409 __hugetlb_vma_unlock_write_put(vma_lock); 410 } else if (__vma_private_lock(vma)) { 411 struct resv_map *resv_map = vma_resv_map(vma); 412 413 /* no free for anon vmas, but still need to unlock */ 414 up_write(&resv_map->rw_sema); 415 } 416 } 417 418 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 419 { 420 /* 421 * Only present in sharable vmas. 422 */ 423 if (!vma || !__vma_shareable_lock(vma)) 424 return; 425 426 if (vma->vm_private_data) { 427 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 428 429 down_write(&vma_lock->rw_sema); 430 __hugetlb_vma_unlock_write_put(vma_lock); 431 } 432 } 433 434 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 435 { 436 struct hugetlb_vma_lock *vma_lock; 437 438 /* Only establish in (flags) sharable vmas */ 439 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 440 return; 441 442 /* Should never get here with non-NULL vm_private_data */ 443 if (vma->vm_private_data) 444 return; 445 446 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 447 if (!vma_lock) { 448 /* 449 * If we can not allocate structure, then vma can not 450 * participate in pmd sharing. This is only a possible 451 * performance enhancement and memory saving issue. 452 * However, the lock is also used to synchronize page 453 * faults with truncation. If the lock is not present, 454 * unlikely races could leave pages in a file past i_size 455 * until the file is removed. Warn in the unlikely case of 456 * allocation failure. 457 */ 458 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 459 return; 460 } 461 462 kref_init(&vma_lock->refs); 463 init_rwsem(&vma_lock->rw_sema); 464 vma_lock->vma = vma; 465 vma->vm_private_data = vma_lock; 466 } 467 468 /* Helper that removes a struct file_region from the resv_map cache and returns 469 * it for use. 470 */ 471 static struct file_region * 472 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 473 { 474 struct file_region *nrg; 475 476 VM_BUG_ON(resv->region_cache_count <= 0); 477 478 resv->region_cache_count--; 479 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 480 list_del(&nrg->link); 481 482 nrg->from = from; 483 nrg->to = to; 484 485 return nrg; 486 } 487 488 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 489 struct file_region *rg) 490 { 491 #ifdef CONFIG_CGROUP_HUGETLB 492 nrg->reservation_counter = rg->reservation_counter; 493 nrg->css = rg->css; 494 if (rg->css) 495 css_get(rg->css); 496 #endif 497 } 498 499 /* Helper that records hugetlb_cgroup uncharge info. */ 500 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 501 struct hstate *h, 502 struct resv_map *resv, 503 struct file_region *nrg) 504 { 505 #ifdef CONFIG_CGROUP_HUGETLB 506 if (h_cg) { 507 nrg->reservation_counter = 508 &h_cg->rsvd_hugepage[hstate_index(h)]; 509 nrg->css = &h_cg->css; 510 /* 511 * The caller will hold exactly one h_cg->css reference for the 512 * whole contiguous reservation region. But this area might be 513 * scattered when there are already some file_regions reside in 514 * it. As a result, many file_regions may share only one css 515 * reference. In order to ensure that one file_region must hold 516 * exactly one h_cg->css reference, we should do css_get for 517 * each file_region and leave the reference held by caller 518 * untouched. 519 */ 520 css_get(&h_cg->css); 521 if (!resv->pages_per_hpage) 522 resv->pages_per_hpage = pages_per_huge_page(h); 523 /* pages_per_hpage should be the same for all entries in 524 * a resv_map. 525 */ 526 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 527 } else { 528 nrg->reservation_counter = NULL; 529 nrg->css = NULL; 530 } 531 #endif 532 } 533 534 static void put_uncharge_info(struct file_region *rg) 535 { 536 #ifdef CONFIG_CGROUP_HUGETLB 537 if (rg->css) 538 css_put(rg->css); 539 #endif 540 } 541 542 static bool has_same_uncharge_info(struct file_region *rg, 543 struct file_region *org) 544 { 545 #ifdef CONFIG_CGROUP_HUGETLB 546 return rg->reservation_counter == org->reservation_counter && 547 rg->css == org->css; 548 549 #else 550 return true; 551 #endif 552 } 553 554 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 555 { 556 struct file_region *nrg, *prg; 557 558 prg = list_prev_entry(rg, link); 559 if (&prg->link != &resv->regions && prg->to == rg->from && 560 has_same_uncharge_info(prg, rg)) { 561 prg->to = rg->to; 562 563 list_del(&rg->link); 564 put_uncharge_info(rg); 565 kfree(rg); 566 567 rg = prg; 568 } 569 570 nrg = list_next_entry(rg, link); 571 if (&nrg->link != &resv->regions && nrg->from == rg->to && 572 has_same_uncharge_info(nrg, rg)) { 573 nrg->from = rg->from; 574 575 list_del(&rg->link); 576 put_uncharge_info(rg); 577 kfree(rg); 578 } 579 } 580 581 static inline long 582 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 583 long to, struct hstate *h, struct hugetlb_cgroup *cg, 584 long *regions_needed) 585 { 586 struct file_region *nrg; 587 588 if (!regions_needed) { 589 nrg = get_file_region_entry_from_cache(map, from, to); 590 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 591 list_add(&nrg->link, rg); 592 coalesce_file_region(map, nrg); 593 } else 594 *regions_needed += 1; 595 596 return to - from; 597 } 598 599 /* 600 * Must be called with resv->lock held. 601 * 602 * Calling this with regions_needed != NULL will count the number of pages 603 * to be added but will not modify the linked list. And regions_needed will 604 * indicate the number of file_regions needed in the cache to carry out to add 605 * the regions for this range. 606 */ 607 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 608 struct hugetlb_cgroup *h_cg, 609 struct hstate *h, long *regions_needed) 610 { 611 long add = 0; 612 struct list_head *head = &resv->regions; 613 long last_accounted_offset = f; 614 struct file_region *iter, *trg = NULL; 615 struct list_head *rg = NULL; 616 617 if (regions_needed) 618 *regions_needed = 0; 619 620 /* In this loop, we essentially handle an entry for the range 621 * [last_accounted_offset, iter->from), at every iteration, with some 622 * bounds checking. 623 */ 624 list_for_each_entry_safe(iter, trg, head, link) { 625 /* Skip irrelevant regions that start before our range. */ 626 if (iter->from < f) { 627 /* If this region ends after the last accounted offset, 628 * then we need to update last_accounted_offset. 629 */ 630 if (iter->to > last_accounted_offset) 631 last_accounted_offset = iter->to; 632 continue; 633 } 634 635 /* When we find a region that starts beyond our range, we've 636 * finished. 637 */ 638 if (iter->from >= t) { 639 rg = iter->link.prev; 640 break; 641 } 642 643 /* Add an entry for last_accounted_offset -> iter->from, and 644 * update last_accounted_offset. 645 */ 646 if (iter->from > last_accounted_offset) 647 add += hugetlb_resv_map_add(resv, iter->link.prev, 648 last_accounted_offset, 649 iter->from, h, h_cg, 650 regions_needed); 651 652 last_accounted_offset = iter->to; 653 } 654 655 /* Handle the case where our range extends beyond 656 * last_accounted_offset. 657 */ 658 if (!rg) 659 rg = head->prev; 660 if (last_accounted_offset < t) 661 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 662 t, h, h_cg, regions_needed); 663 664 return add; 665 } 666 667 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 668 */ 669 static int allocate_file_region_entries(struct resv_map *resv, 670 int regions_needed) 671 __must_hold(&resv->lock) 672 { 673 LIST_HEAD(allocated_regions); 674 int to_allocate = 0, i = 0; 675 struct file_region *trg = NULL, *rg = NULL; 676 677 VM_BUG_ON(regions_needed < 0); 678 679 /* 680 * Check for sufficient descriptors in the cache to accommodate 681 * the number of in progress add operations plus regions_needed. 682 * 683 * This is a while loop because when we drop the lock, some other call 684 * to region_add or region_del may have consumed some region_entries, 685 * so we keep looping here until we finally have enough entries for 686 * (adds_in_progress + regions_needed). 687 */ 688 while (resv->region_cache_count < 689 (resv->adds_in_progress + regions_needed)) { 690 to_allocate = resv->adds_in_progress + regions_needed - 691 resv->region_cache_count; 692 693 /* At this point, we should have enough entries in the cache 694 * for all the existing adds_in_progress. We should only be 695 * needing to allocate for regions_needed. 696 */ 697 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 698 699 spin_unlock(&resv->lock); 700 for (i = 0; i < to_allocate; i++) { 701 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 702 if (!trg) 703 goto out_of_memory; 704 list_add(&trg->link, &allocated_regions); 705 } 706 707 spin_lock(&resv->lock); 708 709 list_splice(&allocated_regions, &resv->region_cache); 710 resv->region_cache_count += to_allocate; 711 } 712 713 return 0; 714 715 out_of_memory: 716 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 717 list_del(&rg->link); 718 kfree(rg); 719 } 720 return -ENOMEM; 721 } 722 723 /* 724 * Add the huge page range represented by [f, t) to the reserve 725 * map. Regions will be taken from the cache to fill in this range. 726 * Sufficient regions should exist in the cache due to the previous 727 * call to region_chg with the same range, but in some cases the cache will not 728 * have sufficient entries due to races with other code doing region_add or 729 * region_del. The extra needed entries will be allocated. 730 * 731 * regions_needed is the out value provided by a previous call to region_chg. 732 * 733 * Return the number of new huge pages added to the map. This number is greater 734 * than or equal to zero. If file_region entries needed to be allocated for 735 * this operation and we were not able to allocate, it returns -ENOMEM. 736 * region_add of regions of length 1 never allocate file_regions and cannot 737 * fail; region_chg will always allocate at least 1 entry and a region_add for 738 * 1 page will only require at most 1 entry. 739 */ 740 static long region_add(struct resv_map *resv, long f, long t, 741 long in_regions_needed, struct hstate *h, 742 struct hugetlb_cgroup *h_cg) 743 { 744 long add = 0, actual_regions_needed = 0; 745 746 spin_lock(&resv->lock); 747 retry: 748 749 /* Count how many regions are actually needed to execute this add. */ 750 add_reservation_in_range(resv, f, t, NULL, NULL, 751 &actual_regions_needed); 752 753 /* 754 * Check for sufficient descriptors in the cache to accommodate 755 * this add operation. Note that actual_regions_needed may be greater 756 * than in_regions_needed, as the resv_map may have been modified since 757 * the region_chg call. In this case, we need to make sure that we 758 * allocate extra entries, such that we have enough for all the 759 * existing adds_in_progress, plus the excess needed for this 760 * operation. 761 */ 762 if (actual_regions_needed > in_regions_needed && 763 resv->region_cache_count < 764 resv->adds_in_progress + 765 (actual_regions_needed - in_regions_needed)) { 766 /* region_add operation of range 1 should never need to 767 * allocate file_region entries. 768 */ 769 VM_BUG_ON(t - f <= 1); 770 771 if (allocate_file_region_entries( 772 resv, actual_regions_needed - in_regions_needed)) { 773 return -ENOMEM; 774 } 775 776 goto retry; 777 } 778 779 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 780 781 resv->adds_in_progress -= in_regions_needed; 782 783 spin_unlock(&resv->lock); 784 return add; 785 } 786 787 /* 788 * Examine the existing reserve map and determine how many 789 * huge pages in the specified range [f, t) are NOT currently 790 * represented. This routine is called before a subsequent 791 * call to region_add that will actually modify the reserve 792 * map to add the specified range [f, t). region_chg does 793 * not change the number of huge pages represented by the 794 * map. A number of new file_region structures is added to the cache as a 795 * placeholder, for the subsequent region_add call to use. At least 1 796 * file_region structure is added. 797 * 798 * out_regions_needed is the number of regions added to the 799 * resv->adds_in_progress. This value needs to be provided to a follow up call 800 * to region_add or region_abort for proper accounting. 801 * 802 * Returns the number of huge pages that need to be added to the existing 803 * reservation map for the range [f, t). This number is greater or equal to 804 * zero. -ENOMEM is returned if a new file_region structure or cache entry 805 * is needed and can not be allocated. 806 */ 807 static long region_chg(struct resv_map *resv, long f, long t, 808 long *out_regions_needed) 809 { 810 long chg = 0; 811 812 spin_lock(&resv->lock); 813 814 /* Count how many hugepages in this range are NOT represented. */ 815 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 816 out_regions_needed); 817 818 if (*out_regions_needed == 0) 819 *out_regions_needed = 1; 820 821 if (allocate_file_region_entries(resv, *out_regions_needed)) 822 return -ENOMEM; 823 824 resv->adds_in_progress += *out_regions_needed; 825 826 spin_unlock(&resv->lock); 827 return chg; 828 } 829 830 /* 831 * Abort the in progress add operation. The adds_in_progress field 832 * of the resv_map keeps track of the operations in progress between 833 * calls to region_chg and region_add. Operations are sometimes 834 * aborted after the call to region_chg. In such cases, region_abort 835 * is called to decrement the adds_in_progress counter. regions_needed 836 * is the value returned by the region_chg call, it is used to decrement 837 * the adds_in_progress counter. 838 * 839 * NOTE: The range arguments [f, t) are not needed or used in this 840 * routine. They are kept to make reading the calling code easier as 841 * arguments will match the associated region_chg call. 842 */ 843 static void region_abort(struct resv_map *resv, long f, long t, 844 long regions_needed) 845 { 846 spin_lock(&resv->lock); 847 VM_BUG_ON(!resv->region_cache_count); 848 resv->adds_in_progress -= regions_needed; 849 spin_unlock(&resv->lock); 850 } 851 852 /* 853 * Delete the specified range [f, t) from the reserve map. If the 854 * t parameter is LONG_MAX, this indicates that ALL regions after f 855 * should be deleted. Locate the regions which intersect [f, t) 856 * and either trim, delete or split the existing regions. 857 * 858 * Returns the number of huge pages deleted from the reserve map. 859 * In the normal case, the return value is zero or more. In the 860 * case where a region must be split, a new region descriptor must 861 * be allocated. If the allocation fails, -ENOMEM will be returned. 862 * NOTE: If the parameter t == LONG_MAX, then we will never split 863 * a region and possibly return -ENOMEM. Callers specifying 864 * t == LONG_MAX do not need to check for -ENOMEM error. 865 */ 866 static long region_del(struct resv_map *resv, long f, long t) 867 { 868 struct list_head *head = &resv->regions; 869 struct file_region *rg, *trg; 870 struct file_region *nrg = NULL; 871 long del = 0; 872 873 retry: 874 spin_lock(&resv->lock); 875 list_for_each_entry_safe(rg, trg, head, link) { 876 /* 877 * Skip regions before the range to be deleted. file_region 878 * ranges are normally of the form [from, to). However, there 879 * may be a "placeholder" entry in the map which is of the form 880 * (from, to) with from == to. Check for placeholder entries 881 * at the beginning of the range to be deleted. 882 */ 883 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 884 continue; 885 886 if (rg->from >= t) 887 break; 888 889 if (f > rg->from && t < rg->to) { /* Must split region */ 890 /* 891 * Check for an entry in the cache before dropping 892 * lock and attempting allocation. 893 */ 894 if (!nrg && 895 resv->region_cache_count > resv->adds_in_progress) { 896 nrg = list_first_entry(&resv->region_cache, 897 struct file_region, 898 link); 899 list_del(&nrg->link); 900 resv->region_cache_count--; 901 } 902 903 if (!nrg) { 904 spin_unlock(&resv->lock); 905 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 906 if (!nrg) 907 return -ENOMEM; 908 goto retry; 909 } 910 911 del += t - f; 912 hugetlb_cgroup_uncharge_file_region( 913 resv, rg, t - f, false); 914 915 /* New entry for end of split region */ 916 nrg->from = t; 917 nrg->to = rg->to; 918 919 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 920 921 INIT_LIST_HEAD(&nrg->link); 922 923 /* Original entry is trimmed */ 924 rg->to = f; 925 926 list_add(&nrg->link, &rg->link); 927 nrg = NULL; 928 break; 929 } 930 931 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 932 del += rg->to - rg->from; 933 hugetlb_cgroup_uncharge_file_region(resv, rg, 934 rg->to - rg->from, true); 935 list_del(&rg->link); 936 kfree(rg); 937 continue; 938 } 939 940 if (f <= rg->from) { /* Trim beginning of region */ 941 hugetlb_cgroup_uncharge_file_region(resv, rg, 942 t - rg->from, false); 943 944 del += t - rg->from; 945 rg->from = t; 946 } else { /* Trim end of region */ 947 hugetlb_cgroup_uncharge_file_region(resv, rg, 948 rg->to - f, false); 949 950 del += rg->to - f; 951 rg->to = f; 952 } 953 } 954 955 spin_unlock(&resv->lock); 956 kfree(nrg); 957 return del; 958 } 959 960 /* 961 * A rare out of memory error was encountered which prevented removal of 962 * the reserve map region for a page. The huge page itself was free'ed 963 * and removed from the page cache. This routine will adjust the subpool 964 * usage count, and the global reserve count if needed. By incrementing 965 * these counts, the reserve map entry which could not be deleted will 966 * appear as a "reserved" entry instead of simply dangling with incorrect 967 * counts. 968 */ 969 void hugetlb_fix_reserve_counts(struct inode *inode) 970 { 971 struct hugepage_subpool *spool = subpool_inode(inode); 972 long rsv_adjust; 973 bool reserved = false; 974 975 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 976 if (rsv_adjust > 0) { 977 struct hstate *h = hstate_inode(inode); 978 979 if (!hugetlb_acct_memory(h, 1)) 980 reserved = true; 981 } else if (!rsv_adjust) { 982 reserved = true; 983 } 984 985 if (!reserved) 986 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 987 } 988 989 /* 990 * Count and return the number of huge pages in the reserve map 991 * that intersect with the range [f, t). 992 */ 993 static long region_count(struct resv_map *resv, long f, long t) 994 { 995 struct list_head *head = &resv->regions; 996 struct file_region *rg; 997 long chg = 0; 998 999 spin_lock(&resv->lock); 1000 /* Locate each segment we overlap with, and count that overlap. */ 1001 list_for_each_entry(rg, head, link) { 1002 long seg_from; 1003 long seg_to; 1004 1005 if (rg->to <= f) 1006 continue; 1007 if (rg->from >= t) 1008 break; 1009 1010 seg_from = max(rg->from, f); 1011 seg_to = min(rg->to, t); 1012 1013 chg += seg_to - seg_from; 1014 } 1015 spin_unlock(&resv->lock); 1016 1017 return chg; 1018 } 1019 1020 /* 1021 * Convert the address within this vma to the page offset within 1022 * the mapping, huge page units here. 1023 */ 1024 static pgoff_t vma_hugecache_offset(struct hstate *h, 1025 struct vm_area_struct *vma, unsigned long address) 1026 { 1027 return ((address - vma->vm_start) >> huge_page_shift(h)) + 1028 (vma->vm_pgoff >> huge_page_order(h)); 1029 } 1030 1031 /** 1032 * vma_kernel_pagesize - Page size granularity for this VMA. 1033 * @vma: The user mapping. 1034 * 1035 * Folios in this VMA will be aligned to, and at least the size of the 1036 * number of bytes returned by this function. 1037 * 1038 * Return: The default size of the folios allocated when backing a VMA. 1039 */ 1040 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1041 { 1042 if (vma->vm_ops && vma->vm_ops->pagesize) 1043 return vma->vm_ops->pagesize(vma); 1044 return PAGE_SIZE; 1045 } 1046 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 1047 1048 /* 1049 * Return the page size being used by the MMU to back a VMA. In the majority 1050 * of cases, the page size used by the kernel matches the MMU size. On 1051 * architectures where it differs, an architecture-specific 'strong' 1052 * version of this symbol is required. 1053 */ 1054 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1055 { 1056 return vma_kernel_pagesize(vma); 1057 } 1058 1059 /* 1060 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1061 * bits of the reservation map pointer, which are always clear due to 1062 * alignment. 1063 */ 1064 #define HPAGE_RESV_OWNER (1UL << 0) 1065 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1066 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1067 1068 /* 1069 * These helpers are used to track how many pages are reserved for 1070 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1071 * is guaranteed to have their future faults succeed. 1072 * 1073 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1074 * the reserve counters are updated with the hugetlb_lock held. It is safe 1075 * to reset the VMA at fork() time as it is not in use yet and there is no 1076 * chance of the global counters getting corrupted as a result of the values. 1077 * 1078 * The private mapping reservation is represented in a subtly different 1079 * manner to a shared mapping. A shared mapping has a region map associated 1080 * with the underlying file, this region map represents the backing file 1081 * pages which have ever had a reservation assigned which this persists even 1082 * after the page is instantiated. A private mapping has a region map 1083 * associated with the original mmap which is attached to all VMAs which 1084 * reference it, this region map represents those offsets which have consumed 1085 * reservation ie. where pages have been instantiated. 1086 */ 1087 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1088 { 1089 return (unsigned long)vma->vm_private_data; 1090 } 1091 1092 static void set_vma_private_data(struct vm_area_struct *vma, 1093 unsigned long value) 1094 { 1095 vma->vm_private_data = (void *)value; 1096 } 1097 1098 static void 1099 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1100 struct hugetlb_cgroup *h_cg, 1101 struct hstate *h) 1102 { 1103 #ifdef CONFIG_CGROUP_HUGETLB 1104 if (!h_cg || !h) { 1105 resv_map->reservation_counter = NULL; 1106 resv_map->pages_per_hpage = 0; 1107 resv_map->css = NULL; 1108 } else { 1109 resv_map->reservation_counter = 1110 &h_cg->rsvd_hugepage[hstate_index(h)]; 1111 resv_map->pages_per_hpage = pages_per_huge_page(h); 1112 resv_map->css = &h_cg->css; 1113 } 1114 #endif 1115 } 1116 1117 struct resv_map *resv_map_alloc(void) 1118 { 1119 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1120 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1121 1122 if (!resv_map || !rg) { 1123 kfree(resv_map); 1124 kfree(rg); 1125 return NULL; 1126 } 1127 1128 kref_init(&resv_map->refs); 1129 spin_lock_init(&resv_map->lock); 1130 INIT_LIST_HEAD(&resv_map->regions); 1131 init_rwsem(&resv_map->rw_sema); 1132 1133 resv_map->adds_in_progress = 0; 1134 /* 1135 * Initialize these to 0. On shared mappings, 0's here indicate these 1136 * fields don't do cgroup accounting. On private mappings, these will be 1137 * re-initialized to the proper values, to indicate that hugetlb cgroup 1138 * reservations are to be un-charged from here. 1139 */ 1140 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1141 1142 INIT_LIST_HEAD(&resv_map->region_cache); 1143 list_add(&rg->link, &resv_map->region_cache); 1144 resv_map->region_cache_count = 1; 1145 1146 return resv_map; 1147 } 1148 1149 void resv_map_release(struct kref *ref) 1150 { 1151 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1152 struct list_head *head = &resv_map->region_cache; 1153 struct file_region *rg, *trg; 1154 1155 /* Clear out any active regions before we release the map. */ 1156 region_del(resv_map, 0, LONG_MAX); 1157 1158 /* ... and any entries left in the cache */ 1159 list_for_each_entry_safe(rg, trg, head, link) { 1160 list_del(&rg->link); 1161 kfree(rg); 1162 } 1163 1164 VM_BUG_ON(resv_map->adds_in_progress); 1165 1166 kfree(resv_map); 1167 } 1168 1169 static inline struct resv_map *inode_resv_map(struct inode *inode) 1170 { 1171 /* 1172 * At inode evict time, i_mapping may not point to the original 1173 * address space within the inode. This original address space 1174 * contains the pointer to the resv_map. So, always use the 1175 * address space embedded within the inode. 1176 * The VERY common case is inode->mapping == &inode->i_data but, 1177 * this may not be true for device special inodes. 1178 */ 1179 return (struct resv_map *)(&inode->i_data)->i_private_data; 1180 } 1181 1182 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1183 { 1184 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1185 if (vma->vm_flags & VM_MAYSHARE) { 1186 struct address_space *mapping = vma->vm_file->f_mapping; 1187 struct inode *inode = mapping->host; 1188 1189 return inode_resv_map(inode); 1190 1191 } else { 1192 return (struct resv_map *)(get_vma_private_data(vma) & 1193 ~HPAGE_RESV_MASK); 1194 } 1195 } 1196 1197 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1198 { 1199 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1200 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1201 1202 set_vma_private_data(vma, (unsigned long)map); 1203 } 1204 1205 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1206 { 1207 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1208 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1209 1210 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1211 } 1212 1213 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1214 { 1215 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1216 1217 return (get_vma_private_data(vma) & flag) != 0; 1218 } 1219 1220 bool __vma_private_lock(struct vm_area_struct *vma) 1221 { 1222 return !(vma->vm_flags & VM_MAYSHARE) && 1223 get_vma_private_data(vma) & ~HPAGE_RESV_MASK && 1224 is_vma_resv_set(vma, HPAGE_RESV_OWNER); 1225 } 1226 1227 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1228 { 1229 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1230 /* 1231 * Clear vm_private_data 1232 * - For shared mappings this is a per-vma semaphore that may be 1233 * allocated in a subsequent call to hugetlb_vm_op_open. 1234 * Before clearing, make sure pointer is not associated with vma 1235 * as this will leak the structure. This is the case when called 1236 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1237 * been called to allocate a new structure. 1238 * - For MAP_PRIVATE mappings, this is the reserve map which does 1239 * not apply to children. Faults generated by the children are 1240 * not guaranteed to succeed, even if read-only. 1241 */ 1242 if (vma->vm_flags & VM_MAYSHARE) { 1243 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1244 1245 if (vma_lock && vma_lock->vma != vma) 1246 vma->vm_private_data = NULL; 1247 } else 1248 vma->vm_private_data = NULL; 1249 } 1250 1251 /* 1252 * Reset and decrement one ref on hugepage private reservation. 1253 * Called with mm->mmap_lock writer semaphore held. 1254 * This function should be only used by mremap and operate on 1255 * same sized vma. It should never come here with last ref on the 1256 * reservation. 1257 */ 1258 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1259 { 1260 /* 1261 * Clear the old hugetlb private page reservation. 1262 * It has already been transferred to new_vma. 1263 * 1264 * During a mremap() operation of a hugetlb vma we call move_vma() 1265 * which copies vma into new_vma and unmaps vma. After the copy 1266 * operation both new_vma and vma share a reference to the resv_map 1267 * struct, and at that point vma is about to be unmapped. We don't 1268 * want to return the reservation to the pool at unmap of vma because 1269 * the reservation still lives on in new_vma, so simply decrement the 1270 * ref here and remove the resv_map reference from this vma. 1271 */ 1272 struct resv_map *reservations = vma_resv_map(vma); 1273 1274 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1275 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1276 kref_put(&reservations->refs, resv_map_release); 1277 } 1278 1279 hugetlb_dup_vma_private(vma); 1280 } 1281 1282 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1283 { 1284 int nid = folio_nid(folio); 1285 1286 lockdep_assert_held(&hugetlb_lock); 1287 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1288 1289 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1290 h->free_huge_pages++; 1291 h->free_huge_pages_node[nid]++; 1292 folio_set_hugetlb_freed(folio); 1293 } 1294 1295 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1296 int nid) 1297 { 1298 struct folio *folio; 1299 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1300 1301 lockdep_assert_held(&hugetlb_lock); 1302 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1303 if (pin && !folio_is_longterm_pinnable(folio)) 1304 continue; 1305 1306 if (folio_test_hwpoison(folio)) 1307 continue; 1308 1309 if (is_migrate_isolate_page(&folio->page)) 1310 continue; 1311 1312 list_move(&folio->lru, &h->hugepage_activelist); 1313 folio_ref_unfreeze(folio, 1); 1314 folio_clear_hugetlb_freed(folio); 1315 h->free_huge_pages--; 1316 h->free_huge_pages_node[nid]--; 1317 return folio; 1318 } 1319 1320 return NULL; 1321 } 1322 1323 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1324 int nid, nodemask_t *nmask) 1325 { 1326 unsigned int cpuset_mems_cookie; 1327 struct zonelist *zonelist; 1328 struct zone *zone; 1329 struct zoneref *z; 1330 int node = NUMA_NO_NODE; 1331 1332 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ 1333 if (nid == NUMA_NO_NODE) 1334 nid = numa_node_id(); 1335 1336 zonelist = node_zonelist(nid, gfp_mask); 1337 1338 retry_cpuset: 1339 cpuset_mems_cookie = read_mems_allowed_begin(); 1340 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1341 struct folio *folio; 1342 1343 if (!cpuset_zone_allowed(zone, gfp_mask)) 1344 continue; 1345 /* 1346 * no need to ask again on the same node. Pool is node rather than 1347 * zone aware 1348 */ 1349 if (zone_to_nid(zone) == node) 1350 continue; 1351 node = zone_to_nid(zone); 1352 1353 folio = dequeue_hugetlb_folio_node_exact(h, node); 1354 if (folio) 1355 return folio; 1356 } 1357 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1358 goto retry_cpuset; 1359 1360 return NULL; 1361 } 1362 1363 static unsigned long available_huge_pages(struct hstate *h) 1364 { 1365 return h->free_huge_pages - h->resv_huge_pages; 1366 } 1367 1368 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1369 struct vm_area_struct *vma, 1370 unsigned long address, long gbl_chg) 1371 { 1372 struct folio *folio = NULL; 1373 struct mempolicy *mpol; 1374 gfp_t gfp_mask; 1375 nodemask_t *nodemask; 1376 int nid; 1377 1378 /* 1379 * gbl_chg==1 means the allocation requires a new page that was not 1380 * reserved before. Making sure there's at least one free page. 1381 */ 1382 if (gbl_chg && !available_huge_pages(h)) 1383 goto err; 1384 1385 gfp_mask = htlb_alloc_mask(h); 1386 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1387 1388 if (mpol_is_preferred_many(mpol)) { 1389 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1390 nid, nodemask); 1391 1392 /* Fallback to all nodes if page==NULL */ 1393 nodemask = NULL; 1394 } 1395 1396 if (!folio) 1397 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1398 nid, nodemask); 1399 1400 mpol_cond_put(mpol); 1401 return folio; 1402 1403 err: 1404 return NULL; 1405 } 1406 1407 /* 1408 * common helper functions for hstate_next_node_to_{alloc|free}. 1409 * We may have allocated or freed a huge page based on a different 1410 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1411 * be outside of *nodes_allowed. Ensure that we use an allowed 1412 * node for alloc or free. 1413 */ 1414 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1415 { 1416 nid = next_node_in(nid, *nodes_allowed); 1417 VM_BUG_ON(nid >= MAX_NUMNODES); 1418 1419 return nid; 1420 } 1421 1422 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1423 { 1424 if (!node_isset(nid, *nodes_allowed)) 1425 nid = next_node_allowed(nid, nodes_allowed); 1426 return nid; 1427 } 1428 1429 /* 1430 * returns the previously saved node ["this node"] from which to 1431 * allocate a persistent huge page for the pool and advance the 1432 * next node from which to allocate, handling wrap at end of node 1433 * mask. 1434 */ 1435 static int hstate_next_node_to_alloc(int *next_node, 1436 nodemask_t *nodes_allowed) 1437 { 1438 int nid; 1439 1440 VM_BUG_ON(!nodes_allowed); 1441 1442 nid = get_valid_node_allowed(*next_node, nodes_allowed); 1443 *next_node = next_node_allowed(nid, nodes_allowed); 1444 1445 return nid; 1446 } 1447 1448 /* 1449 * helper for remove_pool_hugetlb_folio() - return the previously saved 1450 * node ["this node"] from which to free a huge page. Advance the 1451 * next node id whether or not we find a free huge page to free so 1452 * that the next attempt to free addresses the next node. 1453 */ 1454 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1455 { 1456 int nid; 1457 1458 VM_BUG_ON(!nodes_allowed); 1459 1460 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1461 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1462 1463 return nid; 1464 } 1465 1466 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \ 1467 for (nr_nodes = nodes_weight(*mask); \ 1468 nr_nodes > 0 && \ 1469 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \ 1470 nr_nodes--) 1471 1472 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1473 for (nr_nodes = nodes_weight(*mask); \ 1474 nr_nodes > 0 && \ 1475 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1476 nr_nodes--) 1477 1478 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1479 #ifdef CONFIG_CONTIG_ALLOC 1480 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1481 int nid, nodemask_t *nodemask) 1482 { 1483 struct folio *folio; 1484 int order = huge_page_order(h); 1485 bool retried = false; 1486 1487 if (nid == NUMA_NO_NODE) 1488 nid = numa_mem_id(); 1489 retry: 1490 folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); 1491 if (!folio) { 1492 if (hugetlb_cma_exclusive_alloc()) 1493 return NULL; 1494 1495 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); 1496 if (!folio) 1497 return NULL; 1498 } 1499 1500 if (folio_ref_freeze(folio, 1)) 1501 return folio; 1502 1503 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); 1504 hugetlb_free_folio(folio); 1505 if (!retried) { 1506 retried = true; 1507 goto retry; 1508 } 1509 return NULL; 1510 } 1511 1512 #else /* !CONFIG_CONTIG_ALLOC */ 1513 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1514 int nid, nodemask_t *nodemask) 1515 { 1516 return NULL; 1517 } 1518 #endif /* CONFIG_CONTIG_ALLOC */ 1519 1520 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1521 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1522 int nid, nodemask_t *nodemask) 1523 { 1524 return NULL; 1525 } 1526 #endif 1527 1528 /* 1529 * Remove hugetlb folio from lists. 1530 * If vmemmap exists for the folio, clear the hugetlb flag so that the 1531 * folio appears as just a compound page. Otherwise, wait until after 1532 * allocating vmemmap to clear the flag. 1533 * 1534 * Must be called with hugetlb lock held. 1535 */ 1536 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1537 bool adjust_surplus) 1538 { 1539 int nid = folio_nid(folio); 1540 1541 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1542 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1543 1544 lockdep_assert_held(&hugetlb_lock); 1545 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1546 return; 1547 1548 list_del(&folio->lru); 1549 1550 if (folio_test_hugetlb_freed(folio)) { 1551 folio_clear_hugetlb_freed(folio); 1552 h->free_huge_pages--; 1553 h->free_huge_pages_node[nid]--; 1554 } 1555 if (adjust_surplus) { 1556 h->surplus_huge_pages--; 1557 h->surplus_huge_pages_node[nid]--; 1558 } 1559 1560 /* 1561 * We can only clear the hugetlb flag after allocating vmemmap 1562 * pages. Otherwise, someone (memory error handling) may try to write 1563 * to tail struct pages. 1564 */ 1565 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1566 __folio_clear_hugetlb(folio); 1567 1568 h->nr_huge_pages--; 1569 h->nr_huge_pages_node[nid]--; 1570 } 1571 1572 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1573 bool adjust_surplus) 1574 { 1575 int nid = folio_nid(folio); 1576 1577 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1578 1579 lockdep_assert_held(&hugetlb_lock); 1580 1581 INIT_LIST_HEAD(&folio->lru); 1582 h->nr_huge_pages++; 1583 h->nr_huge_pages_node[nid]++; 1584 1585 if (adjust_surplus) { 1586 h->surplus_huge_pages++; 1587 h->surplus_huge_pages_node[nid]++; 1588 } 1589 1590 __folio_set_hugetlb(folio); 1591 folio_change_private(folio, NULL); 1592 /* 1593 * We have to set hugetlb_vmemmap_optimized again as above 1594 * folio_change_private(folio, NULL) cleared it. 1595 */ 1596 folio_set_hugetlb_vmemmap_optimized(folio); 1597 1598 arch_clear_hugetlb_flags(folio); 1599 enqueue_hugetlb_folio(h, folio); 1600 } 1601 1602 static void __update_and_free_hugetlb_folio(struct hstate *h, 1603 struct folio *folio) 1604 { 1605 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); 1606 1607 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1608 return; 1609 1610 /* 1611 * If we don't know which subpages are hwpoisoned, we can't free 1612 * the hugepage, so it's leaked intentionally. 1613 */ 1614 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1615 return; 1616 1617 /* 1618 * If folio is not vmemmap optimized (!clear_flag), then the folio 1619 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio 1620 * can only be passed hugetlb pages and will BUG otherwise. 1621 */ 1622 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { 1623 spin_lock_irq(&hugetlb_lock); 1624 /* 1625 * If we cannot allocate vmemmap pages, just refuse to free the 1626 * page and put the page back on the hugetlb free list and treat 1627 * as a surplus page. 1628 */ 1629 add_hugetlb_folio(h, folio, true); 1630 spin_unlock_irq(&hugetlb_lock); 1631 return; 1632 } 1633 1634 /* 1635 * If vmemmap pages were allocated above, then we need to clear the 1636 * hugetlb flag under the hugetlb lock. 1637 */ 1638 if (folio_test_hugetlb(folio)) { 1639 spin_lock_irq(&hugetlb_lock); 1640 __folio_clear_hugetlb(folio); 1641 spin_unlock_irq(&hugetlb_lock); 1642 } 1643 1644 /* 1645 * Move PageHWPoison flag from head page to the raw error pages, 1646 * which makes any healthy subpages reusable. 1647 */ 1648 if (unlikely(folio_test_hwpoison(folio))) 1649 folio_clear_hugetlb_hwpoison(folio); 1650 1651 folio_ref_unfreeze(folio, 1); 1652 1653 hugetlb_free_folio(folio); 1654 } 1655 1656 /* 1657 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1658 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1659 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1660 * the vmemmap pages. 1661 * 1662 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1663 * freed and frees them one-by-one. As the page->mapping pointer is going 1664 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1665 * structure of a lockless linked list of huge pages to be freed. 1666 */ 1667 static LLIST_HEAD(hpage_freelist); 1668 1669 static void free_hpage_workfn(struct work_struct *work) 1670 { 1671 struct llist_node *node; 1672 1673 node = llist_del_all(&hpage_freelist); 1674 1675 while (node) { 1676 struct folio *folio; 1677 struct hstate *h; 1678 1679 folio = container_of((struct address_space **)node, 1680 struct folio, mapping); 1681 node = node->next; 1682 folio->mapping = NULL; 1683 /* 1684 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1685 * folio_hstate() is going to trigger because a previous call to 1686 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1687 * not use folio_hstate() directly. 1688 */ 1689 h = size_to_hstate(folio_size(folio)); 1690 1691 __update_and_free_hugetlb_folio(h, folio); 1692 1693 cond_resched(); 1694 } 1695 } 1696 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1697 1698 static inline void flush_free_hpage_work(struct hstate *h) 1699 { 1700 if (hugetlb_vmemmap_optimizable(h)) 1701 flush_work(&free_hpage_work); 1702 } 1703 1704 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1705 bool atomic) 1706 { 1707 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1708 __update_and_free_hugetlb_folio(h, folio); 1709 return; 1710 } 1711 1712 /* 1713 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1714 * 1715 * Only call schedule_work() if hpage_freelist is previously 1716 * empty. Otherwise, schedule_work() had been called but the workfn 1717 * hasn't retrieved the list yet. 1718 */ 1719 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1720 schedule_work(&free_hpage_work); 1721 } 1722 1723 static void bulk_vmemmap_restore_error(struct hstate *h, 1724 struct list_head *folio_list, 1725 struct list_head *non_hvo_folios) 1726 { 1727 struct folio *folio, *t_folio; 1728 1729 if (!list_empty(non_hvo_folios)) { 1730 /* 1731 * Free any restored hugetlb pages so that restore of the 1732 * entire list can be retried. 1733 * The idea is that in the common case of ENOMEM errors freeing 1734 * hugetlb pages with vmemmap we will free up memory so that we 1735 * can allocate vmemmap for more hugetlb pages. 1736 */ 1737 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { 1738 list_del(&folio->lru); 1739 spin_lock_irq(&hugetlb_lock); 1740 __folio_clear_hugetlb(folio); 1741 spin_unlock_irq(&hugetlb_lock); 1742 update_and_free_hugetlb_folio(h, folio, false); 1743 cond_resched(); 1744 } 1745 } else { 1746 /* 1747 * In the case where there are no folios which can be 1748 * immediately freed, we loop through the list trying to restore 1749 * vmemmap individually in the hope that someone elsewhere may 1750 * have done something to cause success (such as freeing some 1751 * memory). If unable to restore a hugetlb page, the hugetlb 1752 * page is made a surplus page and removed from the list. 1753 * If are able to restore vmemmap and free one hugetlb page, we 1754 * quit processing the list to retry the bulk operation. 1755 */ 1756 list_for_each_entry_safe(folio, t_folio, folio_list, lru) 1757 if (hugetlb_vmemmap_restore_folio(h, folio)) { 1758 list_del(&folio->lru); 1759 spin_lock_irq(&hugetlb_lock); 1760 add_hugetlb_folio(h, folio, true); 1761 spin_unlock_irq(&hugetlb_lock); 1762 } else { 1763 list_del(&folio->lru); 1764 spin_lock_irq(&hugetlb_lock); 1765 __folio_clear_hugetlb(folio); 1766 spin_unlock_irq(&hugetlb_lock); 1767 update_and_free_hugetlb_folio(h, folio, false); 1768 cond_resched(); 1769 break; 1770 } 1771 } 1772 } 1773 1774 static void update_and_free_pages_bulk(struct hstate *h, 1775 struct list_head *folio_list) 1776 { 1777 long ret; 1778 struct folio *folio, *t_folio; 1779 LIST_HEAD(non_hvo_folios); 1780 1781 /* 1782 * First allocate required vmemmmap (if necessary) for all folios. 1783 * Carefully handle errors and free up any available hugetlb pages 1784 * in an effort to make forward progress. 1785 */ 1786 retry: 1787 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); 1788 if (ret < 0) { 1789 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); 1790 goto retry; 1791 } 1792 1793 /* 1794 * At this point, list should be empty, ret should be >= 0 and there 1795 * should only be pages on the non_hvo_folios list. 1796 * Do note that the non_hvo_folios list could be empty. 1797 * Without HVO enabled, ret will be 0 and there is no need to call 1798 * __folio_clear_hugetlb as this was done previously. 1799 */ 1800 VM_WARN_ON(!list_empty(folio_list)); 1801 VM_WARN_ON(ret < 0); 1802 if (!list_empty(&non_hvo_folios) && ret) { 1803 spin_lock_irq(&hugetlb_lock); 1804 list_for_each_entry(folio, &non_hvo_folios, lru) 1805 __folio_clear_hugetlb(folio); 1806 spin_unlock_irq(&hugetlb_lock); 1807 } 1808 1809 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { 1810 update_and_free_hugetlb_folio(h, folio, false); 1811 cond_resched(); 1812 } 1813 } 1814 1815 struct hstate *size_to_hstate(unsigned long size) 1816 { 1817 struct hstate *h; 1818 1819 for_each_hstate(h) { 1820 if (huge_page_size(h) == size) 1821 return h; 1822 } 1823 return NULL; 1824 } 1825 1826 void free_huge_folio(struct folio *folio) 1827 { 1828 /* 1829 * Can't pass hstate in here because it is called from the 1830 * generic mm code. 1831 */ 1832 struct hstate *h = folio_hstate(folio); 1833 int nid = folio_nid(folio); 1834 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1835 bool restore_reserve; 1836 unsigned long flags; 1837 1838 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1839 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1840 1841 hugetlb_set_folio_subpool(folio, NULL); 1842 if (folio_test_anon(folio)) 1843 __ClearPageAnonExclusive(&folio->page); 1844 folio->mapping = NULL; 1845 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1846 folio_clear_hugetlb_restore_reserve(folio); 1847 1848 /* 1849 * If HPageRestoreReserve was set on page, page allocation consumed a 1850 * reservation. If the page was associated with a subpool, there 1851 * would have been a page reserved in the subpool before allocation 1852 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1853 * reservation, do not call hugepage_subpool_put_pages() as this will 1854 * remove the reserved page from the subpool. 1855 */ 1856 if (!restore_reserve) { 1857 /* 1858 * A return code of zero implies that the subpool will be 1859 * under its minimum size if the reservation is not restored 1860 * after page is free. Therefore, force restore_reserve 1861 * operation. 1862 */ 1863 if (hugepage_subpool_put_pages(spool, 1) == 0) 1864 restore_reserve = true; 1865 } 1866 1867 spin_lock_irqsave(&hugetlb_lock, flags); 1868 folio_clear_hugetlb_migratable(folio); 1869 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1870 pages_per_huge_page(h), folio); 1871 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1872 pages_per_huge_page(h), folio); 1873 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); 1874 mem_cgroup_uncharge(folio); 1875 if (restore_reserve) 1876 h->resv_huge_pages++; 1877 1878 if (folio_test_hugetlb_temporary(folio)) { 1879 remove_hugetlb_folio(h, folio, false); 1880 spin_unlock_irqrestore(&hugetlb_lock, flags); 1881 update_and_free_hugetlb_folio(h, folio, true); 1882 } else if (h->surplus_huge_pages_node[nid]) { 1883 /* remove the page from active list */ 1884 remove_hugetlb_folio(h, folio, true); 1885 spin_unlock_irqrestore(&hugetlb_lock, flags); 1886 update_and_free_hugetlb_folio(h, folio, true); 1887 } else { 1888 arch_clear_hugetlb_flags(folio); 1889 enqueue_hugetlb_folio(h, folio); 1890 spin_unlock_irqrestore(&hugetlb_lock, flags); 1891 } 1892 } 1893 1894 /* 1895 * Must be called with the hugetlb lock held 1896 */ 1897 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1898 { 1899 lockdep_assert_held(&hugetlb_lock); 1900 h->nr_huge_pages++; 1901 h->nr_huge_pages_node[nid]++; 1902 } 1903 1904 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1905 { 1906 __folio_set_hugetlb(folio); 1907 INIT_LIST_HEAD(&folio->lru); 1908 hugetlb_set_folio_subpool(folio, NULL); 1909 set_hugetlb_cgroup(folio, NULL); 1910 set_hugetlb_cgroup_rsvd(folio, NULL); 1911 } 1912 1913 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1914 { 1915 init_new_hugetlb_folio(h, folio); 1916 hugetlb_vmemmap_optimize_folio(h, folio); 1917 } 1918 1919 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1920 { 1921 __prep_new_hugetlb_folio(h, folio); 1922 spin_lock_irq(&hugetlb_lock); 1923 __prep_account_new_huge_page(h, nid); 1924 spin_unlock_irq(&hugetlb_lock); 1925 } 1926 1927 /* 1928 * Find and lock address space (mapping) in write mode. 1929 * 1930 * Upon entry, the folio is locked which means that folio_mapping() is 1931 * stable. Due to locking order, we can only trylock_write. If we can 1932 * not get the lock, simply return NULL to caller. 1933 */ 1934 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) 1935 { 1936 struct address_space *mapping = folio_mapping(folio); 1937 1938 if (!mapping) 1939 return mapping; 1940 1941 if (i_mmap_trylock_write(mapping)) 1942 return mapping; 1943 1944 return NULL; 1945 } 1946 1947 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 1948 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1949 nodemask_t *node_alloc_noretry) 1950 { 1951 int order = huge_page_order(h); 1952 struct folio *folio; 1953 bool alloc_try_hard = true; 1954 1955 /* 1956 * By default we always try hard to allocate the folio with 1957 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in 1958 * a loop (to adjust global huge page counts) and previous allocation 1959 * failed, do not continue to try hard on the same node. Use the 1960 * node_alloc_noretry bitmap to manage this state information. 1961 */ 1962 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1963 alloc_try_hard = false; 1964 if (alloc_try_hard) 1965 gfp_mask |= __GFP_RETRY_MAYFAIL; 1966 if (nid == NUMA_NO_NODE) 1967 nid = numa_mem_id(); 1968 1969 folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask); 1970 1971 /* 1972 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a 1973 * folio this indicates an overall state change. Clear bit so 1974 * that we resume normal 'try hard' allocations. 1975 */ 1976 if (node_alloc_noretry && folio && !alloc_try_hard) 1977 node_clear(nid, *node_alloc_noretry); 1978 1979 /* 1980 * If we tried hard to get a folio but failed, set bit so that 1981 * subsequent attempts will not try as hard until there is an 1982 * overall state change. 1983 */ 1984 if (node_alloc_noretry && !folio && alloc_try_hard) 1985 node_set(nid, *node_alloc_noretry); 1986 1987 if (!folio) { 1988 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1989 return NULL; 1990 } 1991 1992 __count_vm_event(HTLB_BUDDY_PGALLOC); 1993 return folio; 1994 } 1995 1996 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, 1997 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1998 nodemask_t *node_alloc_noretry) 1999 { 2000 struct folio *folio; 2001 2002 if (hstate_is_gigantic(h)) 2003 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2004 else 2005 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 2006 if (folio) 2007 init_new_hugetlb_folio(h, folio); 2008 return folio; 2009 } 2010 2011 /* 2012 * Common helper to allocate a fresh hugetlb page. All specific allocators 2013 * should use this function to get new hugetlb pages 2014 * 2015 * Note that returned page is 'frozen': ref count of head page and all tail 2016 * pages is zero. 2017 */ 2018 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2019 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2020 { 2021 struct folio *folio; 2022 2023 if (hstate_is_gigantic(h)) 2024 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2025 else 2026 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2027 if (!folio) 2028 return NULL; 2029 2030 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2031 return folio; 2032 } 2033 2034 static void prep_and_add_allocated_folios(struct hstate *h, 2035 struct list_head *folio_list) 2036 { 2037 unsigned long flags; 2038 struct folio *folio, *tmp_f; 2039 2040 /* Send list for bulk vmemmap optimization processing */ 2041 hugetlb_vmemmap_optimize_folios(h, folio_list); 2042 2043 /* Add all new pool pages to free lists in one lock cycle */ 2044 spin_lock_irqsave(&hugetlb_lock, flags); 2045 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 2046 __prep_account_new_huge_page(h, folio_nid(folio)); 2047 enqueue_hugetlb_folio(h, folio); 2048 } 2049 spin_unlock_irqrestore(&hugetlb_lock, flags); 2050 } 2051 2052 /* 2053 * Allocates a fresh hugetlb page in a node interleaved manner. The page 2054 * will later be added to the appropriate hugetlb pool. 2055 */ 2056 static struct folio *alloc_pool_huge_folio(struct hstate *h, 2057 nodemask_t *nodes_allowed, 2058 nodemask_t *node_alloc_noretry, 2059 int *next_node) 2060 { 2061 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2062 int nr_nodes, node; 2063 2064 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { 2065 struct folio *folio; 2066 2067 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2068 nodes_allowed, node_alloc_noretry); 2069 if (folio) 2070 return folio; 2071 } 2072 2073 return NULL; 2074 } 2075 2076 /* 2077 * Remove huge page from pool from next node to free. Attempt to keep 2078 * persistent huge pages more or less balanced over allowed nodes. 2079 * This routine only 'removes' the hugetlb page. The caller must make 2080 * an additional call to free the page to low level allocators. 2081 * Called with hugetlb_lock locked. 2082 */ 2083 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, 2084 nodemask_t *nodes_allowed, bool acct_surplus) 2085 { 2086 int nr_nodes, node; 2087 struct folio *folio = NULL; 2088 2089 lockdep_assert_held(&hugetlb_lock); 2090 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2091 /* 2092 * If we're returning unused surplus pages, only examine 2093 * nodes with surplus pages. 2094 */ 2095 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2096 !list_empty(&h->hugepage_freelists[node])) { 2097 folio = list_entry(h->hugepage_freelists[node].next, 2098 struct folio, lru); 2099 remove_hugetlb_folio(h, folio, acct_surplus); 2100 break; 2101 } 2102 } 2103 2104 return folio; 2105 } 2106 2107 /* 2108 * Dissolve a given free hugetlb folio into free buddy pages. This function 2109 * does nothing for in-use hugetlb folios and non-hugetlb folios. 2110 * This function returns values like below: 2111 * 2112 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2113 * when the system is under memory pressure and the feature of 2114 * freeing unused vmemmap pages associated with each hugetlb page 2115 * is enabled. 2116 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2117 * (allocated or reserved.) 2118 * 0: successfully dissolved free hugepages or the page is not a 2119 * hugepage (considered as already dissolved) 2120 */ 2121 int dissolve_free_hugetlb_folio(struct folio *folio) 2122 { 2123 int rc = -EBUSY; 2124 2125 retry: 2126 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2127 if (!folio_test_hugetlb(folio)) 2128 return 0; 2129 2130 spin_lock_irq(&hugetlb_lock); 2131 if (!folio_test_hugetlb(folio)) { 2132 rc = 0; 2133 goto out; 2134 } 2135 2136 if (!folio_ref_count(folio)) { 2137 struct hstate *h = folio_hstate(folio); 2138 bool adjust_surplus = false; 2139 2140 if (!available_huge_pages(h)) 2141 goto out; 2142 2143 /* 2144 * We should make sure that the page is already on the free list 2145 * when it is dissolved. 2146 */ 2147 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2148 spin_unlock_irq(&hugetlb_lock); 2149 cond_resched(); 2150 2151 /* 2152 * Theoretically, we should return -EBUSY when we 2153 * encounter this race. In fact, we have a chance 2154 * to successfully dissolve the page if we do a 2155 * retry. Because the race window is quite small. 2156 * If we seize this opportunity, it is an optimization 2157 * for increasing the success rate of dissolving page. 2158 */ 2159 goto retry; 2160 } 2161 2162 if (h->surplus_huge_pages_node[folio_nid(folio)]) 2163 adjust_surplus = true; 2164 remove_hugetlb_folio(h, folio, adjust_surplus); 2165 h->max_huge_pages--; 2166 spin_unlock_irq(&hugetlb_lock); 2167 2168 /* 2169 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2170 * before freeing the page. update_and_free_hugtlb_folio will fail to 2171 * free the page if it can not allocate required vmemmap. We 2172 * need to adjust max_huge_pages if the page is not freed. 2173 * Attempt to allocate vmemmmap here so that we can take 2174 * appropriate action on failure. 2175 * 2176 * The folio_test_hugetlb check here is because 2177 * remove_hugetlb_folio will clear hugetlb folio flag for 2178 * non-vmemmap optimized hugetlb folios. 2179 */ 2180 if (folio_test_hugetlb(folio)) { 2181 rc = hugetlb_vmemmap_restore_folio(h, folio); 2182 if (rc) { 2183 spin_lock_irq(&hugetlb_lock); 2184 add_hugetlb_folio(h, folio, adjust_surplus); 2185 h->max_huge_pages++; 2186 goto out; 2187 } 2188 } else 2189 rc = 0; 2190 2191 update_and_free_hugetlb_folio(h, folio, false); 2192 return rc; 2193 } 2194 out: 2195 spin_unlock_irq(&hugetlb_lock); 2196 return rc; 2197 } 2198 2199 /* 2200 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2201 * make specified memory blocks removable from the system. 2202 * Note that this will dissolve a free gigantic hugepage completely, if any 2203 * part of it lies within the given range. 2204 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all 2205 * free hugetlb folios that were dissolved before that error are lost. 2206 */ 2207 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) 2208 { 2209 unsigned long pfn; 2210 struct folio *folio; 2211 int rc = 0; 2212 unsigned int order; 2213 struct hstate *h; 2214 2215 if (!hugepages_supported()) 2216 return rc; 2217 2218 order = huge_page_order(&default_hstate); 2219 for_each_hstate(h) 2220 order = min(order, huge_page_order(h)); 2221 2222 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2223 folio = pfn_folio(pfn); 2224 rc = dissolve_free_hugetlb_folio(folio); 2225 if (rc) 2226 break; 2227 } 2228 2229 return rc; 2230 } 2231 2232 /* 2233 * Allocates a fresh surplus page from the page allocator. 2234 */ 2235 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2236 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2237 { 2238 struct folio *folio = NULL; 2239 2240 if (hstate_is_gigantic(h)) 2241 return NULL; 2242 2243 spin_lock_irq(&hugetlb_lock); 2244 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2245 goto out_unlock; 2246 spin_unlock_irq(&hugetlb_lock); 2247 2248 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2249 if (!folio) 2250 return NULL; 2251 2252 hugetlb_vmemmap_optimize_folio(h, folio); 2253 2254 spin_lock_irq(&hugetlb_lock); 2255 /* 2256 * nr_huge_pages needs to be adjusted within the same lock cycle 2257 * as surplus_pages, otherwise it might confuse 2258 * persistent_huge_pages() momentarily. 2259 */ 2260 __prep_account_new_huge_page(h, folio_nid(folio)); 2261 2262 /* 2263 * We could have raced with the pool size change. 2264 * Double check that and simply deallocate the new page 2265 * if we would end up overcommiting the surpluses. Abuse 2266 * temporary page to workaround the nasty free_huge_folio 2267 * codeflow 2268 */ 2269 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2270 folio_set_hugetlb_temporary(folio); 2271 spin_unlock_irq(&hugetlb_lock); 2272 free_huge_folio(folio); 2273 return NULL; 2274 } 2275 2276 h->surplus_huge_pages++; 2277 h->surplus_huge_pages_node[folio_nid(folio)]++; 2278 2279 out_unlock: 2280 spin_unlock_irq(&hugetlb_lock); 2281 2282 return folio; 2283 } 2284 2285 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2286 int nid, nodemask_t *nmask) 2287 { 2288 struct folio *folio; 2289 2290 if (hstate_is_gigantic(h)) 2291 return NULL; 2292 2293 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2294 if (!folio) 2295 return NULL; 2296 2297 /* fresh huge pages are frozen */ 2298 folio_ref_unfreeze(folio, 1); 2299 /* 2300 * We do not account these pages as surplus because they are only 2301 * temporary and will be released properly on the last reference 2302 */ 2303 folio_set_hugetlb_temporary(folio); 2304 2305 return folio; 2306 } 2307 2308 /* 2309 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2310 */ 2311 static 2312 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2313 struct vm_area_struct *vma, unsigned long addr) 2314 { 2315 struct folio *folio = NULL; 2316 struct mempolicy *mpol; 2317 gfp_t gfp_mask = htlb_alloc_mask(h); 2318 int nid; 2319 nodemask_t *nodemask; 2320 2321 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2322 if (mpol_is_preferred_many(mpol)) { 2323 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2324 2325 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2326 2327 /* Fallback to all nodes if page==NULL */ 2328 nodemask = NULL; 2329 } 2330 2331 if (!folio) 2332 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2333 mpol_cond_put(mpol); 2334 return folio; 2335 } 2336 2337 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 2338 nodemask_t *nmask, gfp_t gfp_mask) 2339 { 2340 struct folio *folio; 2341 2342 spin_lock_irq(&hugetlb_lock); 2343 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, 2344 nmask); 2345 if (folio) { 2346 VM_BUG_ON(!h->resv_huge_pages); 2347 h->resv_huge_pages--; 2348 } 2349 2350 spin_unlock_irq(&hugetlb_lock); 2351 return folio; 2352 } 2353 2354 /* folio migration callback function */ 2355 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2356 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) 2357 { 2358 spin_lock_irq(&hugetlb_lock); 2359 if (available_huge_pages(h)) { 2360 struct folio *folio; 2361 2362 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2363 preferred_nid, nmask); 2364 if (folio) { 2365 spin_unlock_irq(&hugetlb_lock); 2366 return folio; 2367 } 2368 } 2369 spin_unlock_irq(&hugetlb_lock); 2370 2371 /* We cannot fallback to other nodes, as we could break the per-node pool. */ 2372 if (!allow_alloc_fallback) 2373 gfp_mask |= __GFP_THISNODE; 2374 2375 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2376 } 2377 2378 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 2379 { 2380 #ifdef CONFIG_NUMA 2381 struct mempolicy *mpol = get_task_policy(current); 2382 2383 /* 2384 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 2385 * (from policy_nodemask) specifically for hugetlb case 2386 */ 2387 if (mpol->mode == MPOL_BIND && 2388 (apply_policy_zone(mpol, gfp_zone(gfp)) && 2389 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 2390 return &mpol->nodes; 2391 #endif 2392 return NULL; 2393 } 2394 2395 /* 2396 * Increase the hugetlb pool such that it can accommodate a reservation 2397 * of size 'delta'. 2398 */ 2399 static int gather_surplus_pages(struct hstate *h, long delta) 2400 __must_hold(&hugetlb_lock) 2401 { 2402 LIST_HEAD(surplus_list); 2403 struct folio *folio, *tmp; 2404 int ret; 2405 long i; 2406 long needed, allocated; 2407 bool alloc_ok = true; 2408 nodemask_t *mbind_nodemask, alloc_nodemask; 2409 2410 mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); 2411 if (mbind_nodemask) 2412 nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); 2413 else 2414 alloc_nodemask = cpuset_current_mems_allowed; 2415 2416 lockdep_assert_held(&hugetlb_lock); 2417 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2418 if (needed <= 0) { 2419 h->resv_huge_pages += delta; 2420 return 0; 2421 } 2422 2423 allocated = 0; 2424 2425 ret = -ENOMEM; 2426 retry: 2427 spin_unlock_irq(&hugetlb_lock); 2428 for (i = 0; i < needed; i++) { 2429 folio = NULL; 2430 2431 /* 2432 * It is okay to use NUMA_NO_NODE because we use numa_mem_id() 2433 * down the road to pick the current node if that is the case. 2434 */ 2435 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2436 NUMA_NO_NODE, &alloc_nodemask); 2437 if (!folio) { 2438 alloc_ok = false; 2439 break; 2440 } 2441 list_add(&folio->lru, &surplus_list); 2442 cond_resched(); 2443 } 2444 allocated += i; 2445 2446 /* 2447 * After retaking hugetlb_lock, we need to recalculate 'needed' 2448 * because either resv_huge_pages or free_huge_pages may have changed. 2449 */ 2450 spin_lock_irq(&hugetlb_lock); 2451 needed = (h->resv_huge_pages + delta) - 2452 (h->free_huge_pages + allocated); 2453 if (needed > 0) { 2454 if (alloc_ok) 2455 goto retry; 2456 /* 2457 * We were not able to allocate enough pages to 2458 * satisfy the entire reservation so we free what 2459 * we've allocated so far. 2460 */ 2461 goto free; 2462 } 2463 /* 2464 * The surplus_list now contains _at_least_ the number of extra pages 2465 * needed to accommodate the reservation. Add the appropriate number 2466 * of pages to the hugetlb pool and free the extras back to the buddy 2467 * allocator. Commit the entire reservation here to prevent another 2468 * process from stealing the pages as they are added to the pool but 2469 * before they are reserved. 2470 */ 2471 needed += allocated; 2472 h->resv_huge_pages += delta; 2473 ret = 0; 2474 2475 /* Free the needed pages to the hugetlb pool */ 2476 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2477 if ((--needed) < 0) 2478 break; 2479 /* Add the page to the hugetlb allocator */ 2480 enqueue_hugetlb_folio(h, folio); 2481 } 2482 free: 2483 spin_unlock_irq(&hugetlb_lock); 2484 2485 /* 2486 * Free unnecessary surplus pages to the buddy allocator. 2487 * Pages have no ref count, call free_huge_folio directly. 2488 */ 2489 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2490 free_huge_folio(folio); 2491 spin_lock_irq(&hugetlb_lock); 2492 2493 return ret; 2494 } 2495 2496 /* 2497 * This routine has two main purposes: 2498 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2499 * in unused_resv_pages. This corresponds to the prior adjustments made 2500 * to the associated reservation map. 2501 * 2) Free any unused surplus pages that may have been allocated to satisfy 2502 * the reservation. As many as unused_resv_pages may be freed. 2503 */ 2504 static void return_unused_surplus_pages(struct hstate *h, 2505 unsigned long unused_resv_pages) 2506 { 2507 unsigned long nr_pages; 2508 LIST_HEAD(page_list); 2509 2510 lockdep_assert_held(&hugetlb_lock); 2511 /* Uncommit the reservation */ 2512 h->resv_huge_pages -= unused_resv_pages; 2513 2514 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2515 goto out; 2516 2517 /* 2518 * Part (or even all) of the reservation could have been backed 2519 * by pre-allocated pages. Only free surplus pages. 2520 */ 2521 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2522 2523 /* 2524 * We want to release as many surplus pages as possible, spread 2525 * evenly across all nodes with memory. Iterate across these nodes 2526 * until we can no longer free unreserved surplus pages. This occurs 2527 * when the nodes with surplus pages have no free pages. 2528 * remove_pool_hugetlb_folio() will balance the freed pages across the 2529 * on-line nodes with memory and will handle the hstate accounting. 2530 */ 2531 while (nr_pages--) { 2532 struct folio *folio; 2533 2534 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 2535 if (!folio) 2536 goto out; 2537 2538 list_add(&folio->lru, &page_list); 2539 } 2540 2541 out: 2542 spin_unlock_irq(&hugetlb_lock); 2543 update_and_free_pages_bulk(h, &page_list); 2544 spin_lock_irq(&hugetlb_lock); 2545 } 2546 2547 2548 /* 2549 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2550 * are used by the huge page allocation routines to manage reservations. 2551 * 2552 * vma_needs_reservation is called to determine if the huge page at addr 2553 * within the vma has an associated reservation. If a reservation is 2554 * needed, the value 1 is returned. The caller is then responsible for 2555 * managing the global reservation and subpool usage counts. After 2556 * the huge page has been allocated, vma_commit_reservation is called 2557 * to add the page to the reservation map. If the page allocation fails, 2558 * the reservation must be ended instead of committed. vma_end_reservation 2559 * is called in such cases. 2560 * 2561 * In the normal case, vma_commit_reservation returns the same value 2562 * as the preceding vma_needs_reservation call. The only time this 2563 * is not the case is if a reserve map was changed between calls. It 2564 * is the responsibility of the caller to notice the difference and 2565 * take appropriate action. 2566 * 2567 * vma_add_reservation is used in error paths where a reservation must 2568 * be restored when a newly allocated huge page must be freed. It is 2569 * to be called after calling vma_needs_reservation to determine if a 2570 * reservation exists. 2571 * 2572 * vma_del_reservation is used in error paths where an entry in the reserve 2573 * map was created during huge page allocation and must be removed. It is to 2574 * be called after calling vma_needs_reservation to determine if a reservation 2575 * exists. 2576 */ 2577 enum vma_resv_mode { 2578 VMA_NEEDS_RESV, 2579 VMA_COMMIT_RESV, 2580 VMA_END_RESV, 2581 VMA_ADD_RESV, 2582 VMA_DEL_RESV, 2583 }; 2584 static long __vma_reservation_common(struct hstate *h, 2585 struct vm_area_struct *vma, unsigned long addr, 2586 enum vma_resv_mode mode) 2587 { 2588 struct resv_map *resv; 2589 pgoff_t idx; 2590 long ret; 2591 long dummy_out_regions_needed; 2592 2593 resv = vma_resv_map(vma); 2594 if (!resv) 2595 return 1; 2596 2597 idx = vma_hugecache_offset(h, vma, addr); 2598 switch (mode) { 2599 case VMA_NEEDS_RESV: 2600 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2601 /* We assume that vma_reservation_* routines always operate on 2602 * 1 page, and that adding to resv map a 1 page entry can only 2603 * ever require 1 region. 2604 */ 2605 VM_BUG_ON(dummy_out_regions_needed != 1); 2606 break; 2607 case VMA_COMMIT_RESV: 2608 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2609 /* region_add calls of range 1 should never fail. */ 2610 VM_BUG_ON(ret < 0); 2611 break; 2612 case VMA_END_RESV: 2613 region_abort(resv, idx, idx + 1, 1); 2614 ret = 0; 2615 break; 2616 case VMA_ADD_RESV: 2617 if (vma->vm_flags & VM_MAYSHARE) { 2618 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2619 /* region_add calls of range 1 should never fail. */ 2620 VM_BUG_ON(ret < 0); 2621 } else { 2622 region_abort(resv, idx, idx + 1, 1); 2623 ret = region_del(resv, idx, idx + 1); 2624 } 2625 break; 2626 case VMA_DEL_RESV: 2627 if (vma->vm_flags & VM_MAYSHARE) { 2628 region_abort(resv, idx, idx + 1, 1); 2629 ret = region_del(resv, idx, idx + 1); 2630 } else { 2631 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2632 /* region_add calls of range 1 should never fail. */ 2633 VM_BUG_ON(ret < 0); 2634 } 2635 break; 2636 default: 2637 BUG(); 2638 } 2639 2640 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2641 return ret; 2642 /* 2643 * We know private mapping must have HPAGE_RESV_OWNER set. 2644 * 2645 * In most cases, reserves always exist for private mappings. 2646 * However, a file associated with mapping could have been 2647 * hole punched or truncated after reserves were consumed. 2648 * As subsequent fault on such a range will not use reserves. 2649 * Subtle - The reserve map for private mappings has the 2650 * opposite meaning than that of shared mappings. If NO 2651 * entry is in the reserve map, it means a reservation exists. 2652 * If an entry exists in the reserve map, it means the 2653 * reservation has already been consumed. As a result, the 2654 * return value of this routine is the opposite of the 2655 * value returned from reserve map manipulation routines above. 2656 */ 2657 if (ret > 0) 2658 return 0; 2659 if (ret == 0) 2660 return 1; 2661 return ret; 2662 } 2663 2664 static long vma_needs_reservation(struct hstate *h, 2665 struct vm_area_struct *vma, unsigned long addr) 2666 { 2667 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2668 } 2669 2670 static long vma_commit_reservation(struct hstate *h, 2671 struct vm_area_struct *vma, unsigned long addr) 2672 { 2673 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2674 } 2675 2676 static void vma_end_reservation(struct hstate *h, 2677 struct vm_area_struct *vma, unsigned long addr) 2678 { 2679 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2680 } 2681 2682 static long vma_add_reservation(struct hstate *h, 2683 struct vm_area_struct *vma, unsigned long addr) 2684 { 2685 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2686 } 2687 2688 static long vma_del_reservation(struct hstate *h, 2689 struct vm_area_struct *vma, unsigned long addr) 2690 { 2691 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2692 } 2693 2694 /* 2695 * This routine is called to restore reservation information on error paths. 2696 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2697 * and the hugetlb mutex should remain held when calling this routine. 2698 * 2699 * It handles two specific cases: 2700 * 1) A reservation was in place and the folio consumed the reservation. 2701 * hugetlb_restore_reserve is set in the folio. 2702 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2703 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2704 * 2705 * In case 1, free_huge_folio later in the error path will increment the 2706 * global reserve count. But, free_huge_folio does not have enough context 2707 * to adjust the reservation map. This case deals primarily with private 2708 * mappings. Adjust the reserve map here to be consistent with global 2709 * reserve count adjustments to be made by free_huge_folio. Make sure the 2710 * reserve map indicates there is a reservation present. 2711 * 2712 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2713 */ 2714 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2715 unsigned long address, struct folio *folio) 2716 { 2717 long rc = vma_needs_reservation(h, vma, address); 2718 2719 if (folio_test_hugetlb_restore_reserve(folio)) { 2720 if (unlikely(rc < 0)) 2721 /* 2722 * Rare out of memory condition in reserve map 2723 * manipulation. Clear hugetlb_restore_reserve so 2724 * that global reserve count will not be incremented 2725 * by free_huge_folio. This will make it appear 2726 * as though the reservation for this folio was 2727 * consumed. This may prevent the task from 2728 * faulting in the folio at a later time. This 2729 * is better than inconsistent global huge page 2730 * accounting of reserve counts. 2731 */ 2732 folio_clear_hugetlb_restore_reserve(folio); 2733 else if (rc) 2734 (void)vma_add_reservation(h, vma, address); 2735 else 2736 vma_end_reservation(h, vma, address); 2737 } else { 2738 if (!rc) { 2739 /* 2740 * This indicates there is an entry in the reserve map 2741 * not added by alloc_hugetlb_folio. We know it was added 2742 * before the alloc_hugetlb_folio call, otherwise 2743 * hugetlb_restore_reserve would be set on the folio. 2744 * Remove the entry so that a subsequent allocation 2745 * does not consume a reservation. 2746 */ 2747 rc = vma_del_reservation(h, vma, address); 2748 if (rc < 0) 2749 /* 2750 * VERY rare out of memory condition. Since 2751 * we can not delete the entry, set 2752 * hugetlb_restore_reserve so that the reserve 2753 * count will be incremented when the folio 2754 * is freed. This reserve will be consumed 2755 * on a subsequent allocation. 2756 */ 2757 folio_set_hugetlb_restore_reserve(folio); 2758 } else if (rc < 0) { 2759 /* 2760 * Rare out of memory condition from 2761 * vma_needs_reservation call. Memory allocation is 2762 * only attempted if a new entry is needed. Therefore, 2763 * this implies there is not an entry in the 2764 * reserve map. 2765 * 2766 * For shared mappings, no entry in the map indicates 2767 * no reservation. We are done. 2768 */ 2769 if (!(vma->vm_flags & VM_MAYSHARE)) 2770 /* 2771 * For private mappings, no entry indicates 2772 * a reservation is present. Since we can 2773 * not add an entry, set hugetlb_restore_reserve 2774 * on the folio so reserve count will be 2775 * incremented when freed. This reserve will 2776 * be consumed on a subsequent allocation. 2777 */ 2778 folio_set_hugetlb_restore_reserve(folio); 2779 } else 2780 /* 2781 * No reservation present, do nothing 2782 */ 2783 vma_end_reservation(h, vma, address); 2784 } 2785 } 2786 2787 /* 2788 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2789 * the old one 2790 * @h: struct hstate old page belongs to 2791 * @old_folio: Old folio to dissolve 2792 * @list: List to isolate the page in case we need to 2793 * Returns 0 on success, otherwise negated error. 2794 */ 2795 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2796 struct folio *old_folio, struct list_head *list) 2797 { 2798 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2799 int nid = folio_nid(old_folio); 2800 struct folio *new_folio = NULL; 2801 int ret = 0; 2802 2803 retry: 2804 spin_lock_irq(&hugetlb_lock); 2805 if (!folio_test_hugetlb(old_folio)) { 2806 /* 2807 * Freed from under us. Drop new_folio too. 2808 */ 2809 goto free_new; 2810 } else if (folio_ref_count(old_folio)) { 2811 bool isolated; 2812 2813 /* 2814 * Someone has grabbed the folio, try to isolate it here. 2815 * Fail with -EBUSY if not possible. 2816 */ 2817 spin_unlock_irq(&hugetlb_lock); 2818 isolated = folio_isolate_hugetlb(old_folio, list); 2819 ret = isolated ? 0 : -EBUSY; 2820 spin_lock_irq(&hugetlb_lock); 2821 goto free_new; 2822 } else if (!folio_test_hugetlb_freed(old_folio)) { 2823 /* 2824 * Folio's refcount is 0 but it has not been enqueued in the 2825 * freelist yet. Race window is small, so we can succeed here if 2826 * we retry. 2827 */ 2828 spin_unlock_irq(&hugetlb_lock); 2829 cond_resched(); 2830 goto retry; 2831 } else { 2832 if (!new_folio) { 2833 spin_unlock_irq(&hugetlb_lock); 2834 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, 2835 NULL, NULL); 2836 if (!new_folio) 2837 return -ENOMEM; 2838 __prep_new_hugetlb_folio(h, new_folio); 2839 goto retry; 2840 } 2841 2842 /* 2843 * Ok, old_folio is still a genuine free hugepage. Remove it from 2844 * the freelist and decrease the counters. These will be 2845 * incremented again when calling __prep_account_new_huge_page() 2846 * and enqueue_hugetlb_folio() for new_folio. The counters will 2847 * remain stable since this happens under the lock. 2848 */ 2849 remove_hugetlb_folio(h, old_folio, false); 2850 2851 /* 2852 * Ref count on new_folio is already zero as it was dropped 2853 * earlier. It can be directly added to the pool free list. 2854 */ 2855 __prep_account_new_huge_page(h, nid); 2856 enqueue_hugetlb_folio(h, new_folio); 2857 2858 /* 2859 * Folio has been replaced, we can safely free the old one. 2860 */ 2861 spin_unlock_irq(&hugetlb_lock); 2862 update_and_free_hugetlb_folio(h, old_folio, false); 2863 } 2864 2865 return ret; 2866 2867 free_new: 2868 spin_unlock_irq(&hugetlb_lock); 2869 if (new_folio) 2870 update_and_free_hugetlb_folio(h, new_folio, false); 2871 2872 return ret; 2873 } 2874 2875 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) 2876 { 2877 struct hstate *h; 2878 int ret = -EBUSY; 2879 2880 /* 2881 * The page might have been dissolved from under our feet, so make sure 2882 * to carefully check the state under the lock. 2883 * Return success when racing as if we dissolved the page ourselves. 2884 */ 2885 spin_lock_irq(&hugetlb_lock); 2886 if (folio_test_hugetlb(folio)) { 2887 h = folio_hstate(folio); 2888 } else { 2889 spin_unlock_irq(&hugetlb_lock); 2890 return 0; 2891 } 2892 spin_unlock_irq(&hugetlb_lock); 2893 2894 /* 2895 * Fence off gigantic pages as there is a cyclic dependency between 2896 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2897 * of bailing out right away without further retrying. 2898 */ 2899 if (hstate_is_gigantic(h)) 2900 return -ENOMEM; 2901 2902 if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) 2903 ret = 0; 2904 else if (!folio_ref_count(folio)) 2905 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 2906 2907 return ret; 2908 } 2909 2910 /* 2911 * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn 2912 * range with new folios. 2913 * @start_pfn: start pfn of the given pfn range 2914 * @end_pfn: end pfn of the given pfn range 2915 * Returns 0 on success, otherwise negated error. 2916 */ 2917 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) 2918 { 2919 struct hstate *h; 2920 struct folio *folio; 2921 int ret = 0; 2922 2923 LIST_HEAD(isolate_list); 2924 2925 while (start_pfn < end_pfn) { 2926 folio = pfn_folio(start_pfn); 2927 2928 /* 2929 * The folio might have been dissolved from under our feet, so make sure 2930 * to carefully check the state under the lock. 2931 */ 2932 spin_lock_irq(&hugetlb_lock); 2933 if (folio_test_hugetlb(folio)) { 2934 h = folio_hstate(folio); 2935 } else { 2936 spin_unlock_irq(&hugetlb_lock); 2937 start_pfn++; 2938 continue; 2939 } 2940 spin_unlock_irq(&hugetlb_lock); 2941 2942 if (!folio_ref_count(folio)) { 2943 ret = alloc_and_dissolve_hugetlb_folio(h, folio, 2944 &isolate_list); 2945 if (ret) 2946 break; 2947 2948 putback_movable_pages(&isolate_list); 2949 } 2950 start_pfn++; 2951 } 2952 2953 return ret; 2954 } 2955 2956 void wait_for_freed_hugetlb_folios(void) 2957 { 2958 if (llist_empty(&hpage_freelist)) 2959 return; 2960 2961 flush_work(&free_hpage_work); 2962 } 2963 2964 typedef enum { 2965 /* 2966 * For either 0/1: we checked the per-vma resv map, and one resv 2967 * count either can be reused (0), or an extra needed (1). 2968 */ 2969 MAP_CHG_REUSE = 0, 2970 MAP_CHG_NEEDED = 1, 2971 /* 2972 * Cannot use per-vma resv count can be used, hence a new resv 2973 * count is enforced. 2974 * 2975 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except 2976 * that currently vma_needs_reservation() has an unwanted side 2977 * effect to either use end() or commit() to complete the 2978 * transaction. Hence it needs to differenciate from NEEDED. 2979 */ 2980 MAP_CHG_ENFORCED = 2, 2981 } map_chg_state; 2982 2983 /* 2984 * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW 2985 * faults of hugetlb private mappings on top of a non-page-cache folio (in 2986 * which case even if there's a private vma resv map it won't cover such 2987 * allocation). New call sites should (probably) never set it to true!! 2988 * When it's set, the allocation will bypass all vma level reservations. 2989 */ 2990 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 2991 unsigned long addr, bool cow_from_owner) 2992 { 2993 struct hugepage_subpool *spool = subpool_vma(vma); 2994 struct hstate *h = hstate_vma(vma); 2995 struct folio *folio; 2996 long retval, gbl_chg, gbl_reserve; 2997 map_chg_state map_chg; 2998 int ret, idx; 2999 struct hugetlb_cgroup *h_cg = NULL; 3000 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; 3001 3002 idx = hstate_index(h); 3003 3004 /* Whether we need a separate per-vma reservation? */ 3005 if (cow_from_owner) { 3006 /* 3007 * Special case! Since it's a CoW on top of a reserved 3008 * page, the private resv map doesn't count. So it cannot 3009 * consume the per-vma resv map even if it's reserved. 3010 */ 3011 map_chg = MAP_CHG_ENFORCED; 3012 } else { 3013 /* 3014 * Examine the region/reserve map to determine if the process 3015 * has a reservation for the page to be allocated. A return 3016 * code of zero indicates a reservation exists (no change). 3017 */ 3018 retval = vma_needs_reservation(h, vma, addr); 3019 if (retval < 0) 3020 return ERR_PTR(-ENOMEM); 3021 map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; 3022 } 3023 3024 /* 3025 * Whether we need a separate global reservation? 3026 * 3027 * Processes that did not create the mapping will have no 3028 * reserves as indicated by the region/reserve map. Check 3029 * that the allocation will not exceed the subpool limit. 3030 * Or if it can get one from the pool reservation directly. 3031 */ 3032 if (map_chg) { 3033 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3034 if (gbl_chg < 0) 3035 goto out_end_reservation; 3036 } else { 3037 /* 3038 * If we have the vma reservation ready, no need for extra 3039 * global reservation. 3040 */ 3041 gbl_chg = 0; 3042 } 3043 3044 /* 3045 * If this allocation is not consuming a per-vma reservation, 3046 * charge the hugetlb cgroup now. 3047 */ 3048 if (map_chg) { 3049 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3050 idx, pages_per_huge_page(h), &h_cg); 3051 if (ret) 3052 goto out_subpool_put; 3053 } 3054 3055 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3056 if (ret) 3057 goto out_uncharge_cgroup_reservation; 3058 3059 spin_lock_irq(&hugetlb_lock); 3060 /* 3061 * glb_chg is passed to indicate whether or not a page must be taken 3062 * from the global free pool (global change). gbl_chg == 0 indicates 3063 * a reservation exists for the allocation. 3064 */ 3065 folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); 3066 if (!folio) { 3067 spin_unlock_irq(&hugetlb_lock); 3068 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3069 if (!folio) 3070 goto out_uncharge_cgroup; 3071 spin_lock_irq(&hugetlb_lock); 3072 list_add(&folio->lru, &h->hugepage_activelist); 3073 folio_ref_unfreeze(folio, 1); 3074 /* Fall through */ 3075 } 3076 3077 /* 3078 * Either dequeued or buddy-allocated folio needs to add special 3079 * mark to the folio when it consumes a global reservation. 3080 */ 3081 if (!gbl_chg) { 3082 folio_set_hugetlb_restore_reserve(folio); 3083 h->resv_huge_pages--; 3084 } 3085 3086 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3087 /* If allocation is not consuming a reservation, also store the 3088 * hugetlb_cgroup pointer on the page. 3089 */ 3090 if (map_chg) { 3091 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3092 h_cg, folio); 3093 } 3094 3095 spin_unlock_irq(&hugetlb_lock); 3096 3097 hugetlb_set_folio_subpool(folio, spool); 3098 3099 if (map_chg != MAP_CHG_ENFORCED) { 3100 /* commit() is only needed if the map_chg is not enforced */ 3101 retval = vma_commit_reservation(h, vma, addr); 3102 /* 3103 * Check for possible race conditions. When it happens.. 3104 * The page was added to the reservation map between 3105 * vma_needs_reservation and vma_commit_reservation. 3106 * This indicates a race with hugetlb_reserve_pages. 3107 * Adjust for the subpool count incremented above AND 3108 * in hugetlb_reserve_pages for the same page. Also, 3109 * the reservation count added in hugetlb_reserve_pages 3110 * no longer applies. 3111 */ 3112 if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { 3113 long rsv_adjust; 3114 3115 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3116 hugetlb_acct_memory(h, -rsv_adjust); 3117 if (map_chg) { 3118 spin_lock_irq(&hugetlb_lock); 3119 hugetlb_cgroup_uncharge_folio_rsvd( 3120 hstate_index(h), pages_per_huge_page(h), 3121 folio); 3122 spin_unlock_irq(&hugetlb_lock); 3123 } 3124 } 3125 } 3126 3127 ret = mem_cgroup_charge_hugetlb(folio, gfp); 3128 /* 3129 * Unconditionally increment NR_HUGETLB here. If it turns out that 3130 * mem_cgroup_charge_hugetlb failed, then immediately free the page and 3131 * decrement NR_HUGETLB. 3132 */ 3133 lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); 3134 3135 if (ret == -ENOMEM) { 3136 free_huge_folio(folio); 3137 return ERR_PTR(-ENOMEM); 3138 } 3139 3140 return folio; 3141 3142 out_uncharge_cgroup: 3143 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3144 out_uncharge_cgroup_reservation: 3145 if (map_chg) 3146 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3147 h_cg); 3148 out_subpool_put: 3149 /* 3150 * put page to subpool iff the quota of subpool's rsv_hpages is used 3151 * during hugepage_subpool_get_pages. 3152 */ 3153 if (map_chg && !gbl_chg) { 3154 gbl_reserve = hugepage_subpool_put_pages(spool, 1); 3155 hugetlb_acct_memory(h, -gbl_reserve); 3156 } 3157 3158 3159 out_end_reservation: 3160 if (map_chg != MAP_CHG_ENFORCED) 3161 vma_end_reservation(h, vma, addr); 3162 return ERR_PTR(-ENOSPC); 3163 } 3164 3165 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) 3166 { 3167 struct huge_bootmem_page *m; 3168 int listnode = nid; 3169 3170 if (hugetlb_early_cma(h)) 3171 m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact); 3172 else { 3173 if (node_exact) 3174 m = memblock_alloc_exact_nid_raw(huge_page_size(h), 3175 huge_page_size(h), 0, 3176 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3177 else { 3178 m = memblock_alloc_try_nid_raw(huge_page_size(h), 3179 huge_page_size(h), 0, 3180 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3181 /* 3182 * For pre-HVO to work correctly, pages need to be on 3183 * the list for the node they were actually allocated 3184 * from. That node may be different in the case of 3185 * fallback by memblock_alloc_try_nid_raw. So, 3186 * extract the actual node first. 3187 */ 3188 if (m) 3189 listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); 3190 } 3191 3192 if (m) { 3193 m->flags = 0; 3194 m->cma = NULL; 3195 } 3196 } 3197 3198 if (m) { 3199 /* 3200 * Use the beginning of the huge page to store the 3201 * huge_bootmem_page struct (until gather_bootmem 3202 * puts them into the mem_map). 3203 * 3204 * Put them into a private list first because mem_map 3205 * is not up yet. 3206 */ 3207 INIT_LIST_HEAD(&m->list); 3208 list_add(&m->list, &huge_boot_pages[listnode]); 3209 m->hstate = h; 3210 } 3211 3212 return m; 3213 } 3214 3215 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3216 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3217 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3218 { 3219 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3220 int nr_nodes, node = nid; 3221 3222 /* do node specific alloc */ 3223 if (nid != NUMA_NO_NODE) { 3224 m = alloc_bootmem(h, node, true); 3225 if (!m) 3226 return 0; 3227 goto found; 3228 } 3229 3230 /* allocate from next node when distributing huge pages */ 3231 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, 3232 &hugetlb_bootmem_nodes) { 3233 m = alloc_bootmem(h, node, false); 3234 if (!m) 3235 return 0; 3236 goto found; 3237 } 3238 3239 found: 3240 3241 /* 3242 * Only initialize the head struct page in memmap_init_reserved_pages, 3243 * rest of the struct pages will be initialized by the HugeTLB 3244 * subsystem itself. 3245 * The head struct page is used to get folio information by the HugeTLB 3246 * subsystem like zone id and node id. 3247 */ 3248 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3249 huge_page_size(h) - PAGE_SIZE); 3250 3251 return 1; 3252 } 3253 3254 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ 3255 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, 3256 unsigned long start_page_number, 3257 unsigned long end_page_number) 3258 { 3259 enum zone_type zone = zone_idx(folio_zone(folio)); 3260 int nid = folio_nid(folio); 3261 unsigned long head_pfn = folio_pfn(folio); 3262 unsigned long pfn, end_pfn = head_pfn + end_page_number; 3263 int ret; 3264 3265 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { 3266 struct page *page = pfn_to_page(pfn); 3267 3268 __init_single_page(page, pfn, zone, nid); 3269 prep_compound_tail((struct page *)folio, pfn - head_pfn); 3270 ret = page_ref_freeze(page, 1); 3271 VM_BUG_ON(!ret); 3272 } 3273 } 3274 3275 static void __init hugetlb_folio_init_vmemmap(struct folio *folio, 3276 struct hstate *h, 3277 unsigned long nr_pages) 3278 { 3279 int ret; 3280 3281 /* Prepare folio head */ 3282 __folio_clear_reserved(folio); 3283 __folio_set_head(folio); 3284 ret = folio_ref_freeze(folio, 1); 3285 VM_BUG_ON(!ret); 3286 /* Initialize the necessary tail struct pages */ 3287 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); 3288 prep_compound_head((struct page *)folio, huge_page_order(h)); 3289 } 3290 3291 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) 3292 { 3293 return m->flags & HUGE_BOOTMEM_HVO; 3294 } 3295 3296 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m) 3297 { 3298 return m->flags & HUGE_BOOTMEM_CMA; 3299 } 3300 3301 /* 3302 * memblock-allocated pageblocks might not have the migrate type set 3303 * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) 3304 * here, or MIGRATE_CMA if this was a page allocated through an early CMA 3305 * reservation. 3306 * 3307 * In case of vmemmap optimized folios, the tail vmemmap pages are mapped 3308 * read-only, but that's ok - for sparse vmemmap this does not write to 3309 * the page structure. 3310 */ 3311 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, 3312 struct hstate *h) 3313 { 3314 unsigned long nr_pages = pages_per_huge_page(h), i; 3315 3316 WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); 3317 3318 for (i = 0; i < nr_pages; i += pageblock_nr_pages) { 3319 if (folio_test_hugetlb_cma(folio)) 3320 init_cma_pageblock(folio_page(folio, i)); 3321 else 3322 set_pageblock_migratetype(folio_page(folio, i), 3323 MIGRATE_MOVABLE); 3324 } 3325 } 3326 3327 static void __init prep_and_add_bootmem_folios(struct hstate *h, 3328 struct list_head *folio_list) 3329 { 3330 unsigned long flags; 3331 struct folio *folio, *tmp_f; 3332 3333 /* Send list for bulk vmemmap optimization processing */ 3334 hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list); 3335 3336 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 3337 if (!folio_test_hugetlb_vmemmap_optimized(folio)) { 3338 /* 3339 * If HVO fails, initialize all tail struct pages 3340 * We do not worry about potential long lock hold 3341 * time as this is early in boot and there should 3342 * be no contention. 3343 */ 3344 hugetlb_folio_init_tail_vmemmap(folio, 3345 HUGETLB_VMEMMAP_RESERVE_PAGES, 3346 pages_per_huge_page(h)); 3347 } 3348 hugetlb_bootmem_init_migratetype(folio, h); 3349 /* Subdivide locks to achieve better parallel performance */ 3350 spin_lock_irqsave(&hugetlb_lock, flags); 3351 __prep_account_new_huge_page(h, folio_nid(folio)); 3352 enqueue_hugetlb_folio(h, folio); 3353 spin_unlock_irqrestore(&hugetlb_lock, flags); 3354 } 3355 } 3356 3357 bool __init hugetlb_bootmem_page_zones_valid(int nid, 3358 struct huge_bootmem_page *m) 3359 { 3360 unsigned long start_pfn; 3361 bool valid; 3362 3363 if (m->flags & HUGE_BOOTMEM_ZONES_VALID) { 3364 /* 3365 * Already validated, skip check. 3366 */ 3367 return true; 3368 } 3369 3370 if (hugetlb_bootmem_page_earlycma(m)) { 3371 valid = cma_validate_zones(m->cma); 3372 goto out; 3373 } 3374 3375 start_pfn = virt_to_phys(m) >> PAGE_SHIFT; 3376 3377 valid = !pfn_range_intersects_zones(nid, start_pfn, 3378 pages_per_huge_page(m->hstate)); 3379 out: 3380 if (!valid) 3381 hstate_boot_nrinvalid[hstate_index(m->hstate)]++; 3382 3383 return valid; 3384 } 3385 3386 /* 3387 * Free a bootmem page that was found to be invalid (intersecting with 3388 * multiple zones). 3389 * 3390 * Since it intersects with multiple zones, we can't just do a free 3391 * operation on all pages at once, but instead have to walk all 3392 * pages, freeing them one by one. 3393 */ 3394 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, 3395 struct hstate *h) 3396 { 3397 unsigned long npages = pages_per_huge_page(h); 3398 unsigned long pfn; 3399 3400 while (npages--) { 3401 pfn = page_to_pfn(page); 3402 __init_page_from_nid(pfn, nid); 3403 free_reserved_page(page); 3404 page++; 3405 } 3406 } 3407 3408 /* 3409 * Put bootmem huge pages into the standard lists after mem_map is up. 3410 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. 3411 */ 3412 static void __init gather_bootmem_prealloc_node(unsigned long nid) 3413 { 3414 LIST_HEAD(folio_list); 3415 struct huge_bootmem_page *m, *tm; 3416 struct hstate *h = NULL, *prev_h = NULL; 3417 3418 list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { 3419 struct page *page = virt_to_page(m); 3420 struct folio *folio = (void *)page; 3421 3422 h = m->hstate; 3423 if (!hugetlb_bootmem_page_zones_valid(nid, m)) { 3424 /* 3425 * Can't use this page. Initialize the 3426 * page structures if that hasn't already 3427 * been done, and give them to the page 3428 * allocator. 3429 */ 3430 hugetlb_bootmem_free_invalid_page(nid, page, h); 3431 continue; 3432 } 3433 3434 /* 3435 * It is possible to have multiple huge page sizes (hstates) 3436 * in this list. If so, process each size separately. 3437 */ 3438 if (h != prev_h && prev_h != NULL) 3439 prep_and_add_bootmem_folios(prev_h, &folio_list); 3440 prev_h = h; 3441 3442 VM_BUG_ON(!hstate_is_gigantic(h)); 3443 WARN_ON(folio_ref_count(folio) != 1); 3444 3445 hugetlb_folio_init_vmemmap(folio, h, 3446 HUGETLB_VMEMMAP_RESERVE_PAGES); 3447 init_new_hugetlb_folio(h, folio); 3448 3449 if (hugetlb_bootmem_page_prehvo(m)) 3450 /* 3451 * If pre-HVO was done, just set the 3452 * flag, the HVO code will then skip 3453 * this folio. 3454 */ 3455 folio_set_hugetlb_vmemmap_optimized(folio); 3456 3457 if (hugetlb_bootmem_page_earlycma(m)) 3458 folio_set_hugetlb_cma(folio); 3459 3460 list_add(&folio->lru, &folio_list); 3461 3462 /* 3463 * We need to restore the 'stolen' pages to totalram_pages 3464 * in order to fix confusing memory reports from free(1) and 3465 * other side-effects, like CommitLimit going negative. 3466 * 3467 * For CMA pages, this is done in init_cma_pageblock 3468 * (via hugetlb_bootmem_init_migratetype), so skip it here. 3469 */ 3470 if (!folio_test_hugetlb_cma(folio)) 3471 adjust_managed_page_count(page, pages_per_huge_page(h)); 3472 cond_resched(); 3473 } 3474 3475 prep_and_add_bootmem_folios(h, &folio_list); 3476 } 3477 3478 static void __init gather_bootmem_prealloc_parallel(unsigned long start, 3479 unsigned long end, void *arg) 3480 { 3481 int nid; 3482 3483 for (nid = start; nid < end; nid++) 3484 gather_bootmem_prealloc_node(nid); 3485 } 3486 3487 static void __init gather_bootmem_prealloc(void) 3488 { 3489 struct padata_mt_job job = { 3490 .thread_fn = gather_bootmem_prealloc_parallel, 3491 .fn_arg = NULL, 3492 .start = 0, 3493 .size = nr_node_ids, 3494 .align = 1, 3495 .min_chunk = 1, 3496 .max_threads = num_node_state(N_MEMORY), 3497 .numa_aware = true, 3498 }; 3499 3500 padata_do_multithreaded(&job); 3501 } 3502 3503 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3504 { 3505 unsigned long i; 3506 char buf[32]; 3507 LIST_HEAD(folio_list); 3508 3509 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3510 if (hstate_is_gigantic(h)) { 3511 if (!alloc_bootmem_huge_page(h, nid)) 3512 break; 3513 } else { 3514 struct folio *folio; 3515 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3516 3517 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3518 &node_states[N_MEMORY], NULL); 3519 if (!folio) 3520 break; 3521 list_add(&folio->lru, &folio_list); 3522 } 3523 cond_resched(); 3524 } 3525 3526 if (!list_empty(&folio_list)) 3527 prep_and_add_allocated_folios(h, &folio_list); 3528 3529 if (i == h->max_huge_pages_node[nid]) 3530 return; 3531 3532 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3533 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3534 h->max_huge_pages_node[nid], buf, nid, i); 3535 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3536 h->max_huge_pages_node[nid] = i; 3537 } 3538 3539 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) 3540 { 3541 int i; 3542 bool node_specific_alloc = false; 3543 3544 for_each_online_node(i) { 3545 if (h->max_huge_pages_node[i] > 0) { 3546 hugetlb_hstate_alloc_pages_onenode(h, i); 3547 node_specific_alloc = true; 3548 } 3549 } 3550 3551 return node_specific_alloc; 3552 } 3553 3554 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) 3555 { 3556 if (allocated < h->max_huge_pages) { 3557 char buf[32]; 3558 3559 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3560 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3561 h->max_huge_pages, buf, allocated); 3562 h->max_huge_pages = allocated; 3563 } 3564 } 3565 3566 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) 3567 { 3568 struct hstate *h = (struct hstate *)arg; 3569 int i, num = end - start; 3570 nodemask_t node_alloc_noretry; 3571 LIST_HEAD(folio_list); 3572 int next_node = first_online_node; 3573 3574 /* Bit mask controlling how hard we retry per-node allocations.*/ 3575 nodes_clear(node_alloc_noretry); 3576 3577 for (i = 0; i < num; ++i) { 3578 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], 3579 &node_alloc_noretry, &next_node); 3580 if (!folio) 3581 break; 3582 3583 list_move(&folio->lru, &folio_list); 3584 cond_resched(); 3585 } 3586 3587 prep_and_add_allocated_folios(h, &folio_list); 3588 } 3589 3590 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) 3591 { 3592 unsigned long i; 3593 3594 for (i = 0; i < h->max_huge_pages; ++i) { 3595 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3596 break; 3597 cond_resched(); 3598 } 3599 3600 return i; 3601 } 3602 3603 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) 3604 { 3605 struct padata_mt_job job = { 3606 .fn_arg = h, 3607 .align = 1, 3608 .numa_aware = true 3609 }; 3610 3611 unsigned long jiffies_start; 3612 unsigned long jiffies_end; 3613 3614 job.thread_fn = hugetlb_pages_alloc_boot_node; 3615 job.start = 0; 3616 job.size = h->max_huge_pages; 3617 3618 /* 3619 * job.max_threads is 25% of the available cpu threads by default. 3620 * 3621 * On large servers with terabytes of memory, huge page allocation 3622 * can consume a considerably amount of time. 3623 * 3624 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages. 3625 * 2MiB huge pages. Using more threads can significantly improve allocation time. 3626 * 3627 * +-----------------------+-------+-------+-------+-------+-------+ 3628 * | threads | 8 | 16 | 32 | 64 | 128 | 3629 * +-----------------------+-------+-------+-------+-------+-------+ 3630 * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | 3631 * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | 3632 * +-----------------------+-------+-------+-------+-------+-------+ 3633 */ 3634 if (hugepage_allocation_threads == 0) { 3635 hugepage_allocation_threads = num_online_cpus() / 4; 3636 hugepage_allocation_threads = max(hugepage_allocation_threads, 1); 3637 } 3638 3639 job.max_threads = hugepage_allocation_threads; 3640 job.min_chunk = h->max_huge_pages / hugepage_allocation_threads; 3641 3642 jiffies_start = jiffies; 3643 padata_do_multithreaded(&job); 3644 jiffies_end = jiffies; 3645 3646 pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n", 3647 jiffies_to_msecs(jiffies_end - jiffies_start), 3648 hugepage_allocation_threads); 3649 3650 return h->nr_huge_pages; 3651 } 3652 3653 /* 3654 * NOTE: this routine is called in different contexts for gigantic and 3655 * non-gigantic pages. 3656 * - For gigantic pages, this is called early in the boot process and 3657 * pages are allocated from memblock allocated or something similar. 3658 * Gigantic pages are actually added to pools later with the routine 3659 * gather_bootmem_prealloc. 3660 * - For non-gigantic pages, this is called later in the boot process after 3661 * all of mm is up and functional. Pages are allocated from buddy and 3662 * then added to hugetlb pools. 3663 */ 3664 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3665 { 3666 unsigned long allocated; 3667 3668 /* 3669 * Skip gigantic hugepages allocation if early CMA 3670 * reservations are not available. 3671 */ 3672 if (hstate_is_gigantic(h) && hugetlb_cma_total_size() && 3673 !hugetlb_early_cma(h)) { 3674 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3675 return; 3676 } 3677 3678 /* do node specific alloc */ 3679 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) 3680 return; 3681 3682 /* below will do all node balanced alloc */ 3683 if (hstate_is_gigantic(h)) 3684 allocated = hugetlb_gigantic_pages_alloc_boot(h); 3685 else 3686 allocated = hugetlb_pages_alloc_boot(h); 3687 3688 hugetlb_hstate_alloc_pages_errcheck(allocated, h); 3689 } 3690 3691 static void __init hugetlb_init_hstates(void) 3692 { 3693 struct hstate *h, *h2; 3694 3695 for_each_hstate(h) { 3696 /* 3697 * Always reset to first_memory_node here, even if 3698 * next_nid_to_alloc was set before - we can't 3699 * reference hugetlb_bootmem_nodes after init, and 3700 * first_memory_node is right for all further allocations. 3701 */ 3702 h->next_nid_to_alloc = first_memory_node; 3703 h->next_nid_to_free = first_memory_node; 3704 3705 /* oversize hugepages were init'ed in early boot */ 3706 if (!hstate_is_gigantic(h)) 3707 hugetlb_hstate_alloc_pages(h); 3708 3709 /* 3710 * Set demote order for each hstate. Note that 3711 * h->demote_order is initially 0. 3712 * - We can not demote gigantic pages if runtime freeing 3713 * is not supported, so skip this. 3714 * - If CMA allocation is possible, we can not demote 3715 * HUGETLB_PAGE_ORDER or smaller size pages. 3716 */ 3717 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3718 continue; 3719 if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER) 3720 continue; 3721 for_each_hstate(h2) { 3722 if (h2 == h) 3723 continue; 3724 if (h2->order < h->order && 3725 h2->order > h->demote_order) 3726 h->demote_order = h2->order; 3727 } 3728 } 3729 } 3730 3731 static void __init report_hugepages(void) 3732 { 3733 struct hstate *h; 3734 unsigned long nrinvalid; 3735 3736 for_each_hstate(h) { 3737 char buf[32]; 3738 3739 nrinvalid = hstate_boot_nrinvalid[hstate_index(h)]; 3740 h->max_huge_pages -= nrinvalid; 3741 3742 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3743 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3744 buf, h->nr_huge_pages); 3745 if (nrinvalid) 3746 pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n", 3747 buf, nrinvalid, nrinvalid > 1 ? "s" : ""); 3748 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3749 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3750 } 3751 } 3752 3753 #ifdef CONFIG_HIGHMEM 3754 static void try_to_free_low(struct hstate *h, unsigned long count, 3755 nodemask_t *nodes_allowed) 3756 { 3757 int i; 3758 LIST_HEAD(page_list); 3759 3760 lockdep_assert_held(&hugetlb_lock); 3761 if (hstate_is_gigantic(h)) 3762 return; 3763 3764 /* 3765 * Collect pages to be freed on a list, and free after dropping lock 3766 */ 3767 for_each_node_mask(i, *nodes_allowed) { 3768 struct folio *folio, *next; 3769 struct list_head *freel = &h->hugepage_freelists[i]; 3770 list_for_each_entry_safe(folio, next, freel, lru) { 3771 if (count >= h->nr_huge_pages) 3772 goto out; 3773 if (folio_test_highmem(folio)) 3774 continue; 3775 remove_hugetlb_folio(h, folio, false); 3776 list_add(&folio->lru, &page_list); 3777 } 3778 } 3779 3780 out: 3781 spin_unlock_irq(&hugetlb_lock); 3782 update_and_free_pages_bulk(h, &page_list); 3783 spin_lock_irq(&hugetlb_lock); 3784 } 3785 #else 3786 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3787 nodemask_t *nodes_allowed) 3788 { 3789 } 3790 #endif 3791 3792 /* 3793 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3794 * balanced by operating on them in a round-robin fashion. 3795 * Returns 1 if an adjustment was made. 3796 */ 3797 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3798 int delta) 3799 { 3800 int nr_nodes, node; 3801 3802 lockdep_assert_held(&hugetlb_lock); 3803 VM_BUG_ON(delta != -1 && delta != 1); 3804 3805 if (delta < 0) { 3806 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { 3807 if (h->surplus_huge_pages_node[node]) 3808 goto found; 3809 } 3810 } else { 3811 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3812 if (h->surplus_huge_pages_node[node] < 3813 h->nr_huge_pages_node[node]) 3814 goto found; 3815 } 3816 } 3817 return 0; 3818 3819 found: 3820 h->surplus_huge_pages += delta; 3821 h->surplus_huge_pages_node[node] += delta; 3822 return 1; 3823 } 3824 3825 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3826 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3827 nodemask_t *nodes_allowed) 3828 { 3829 unsigned long persistent_free_count; 3830 unsigned long min_count; 3831 unsigned long allocated; 3832 struct folio *folio; 3833 LIST_HEAD(page_list); 3834 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3835 3836 /* 3837 * Bit mask controlling how hard we retry per-node allocations. 3838 * If we can not allocate the bit mask, do not attempt to allocate 3839 * the requested huge pages. 3840 */ 3841 if (node_alloc_noretry) 3842 nodes_clear(*node_alloc_noretry); 3843 else 3844 return -ENOMEM; 3845 3846 /* 3847 * resize_lock mutex prevents concurrent adjustments to number of 3848 * pages in hstate via the proc/sysfs interfaces. 3849 */ 3850 mutex_lock(&h->resize_lock); 3851 flush_free_hpage_work(h); 3852 spin_lock_irq(&hugetlb_lock); 3853 3854 /* 3855 * Check for a node specific request. 3856 * Changing node specific huge page count may require a corresponding 3857 * change to the global count. In any case, the passed node mask 3858 * (nodes_allowed) will restrict alloc/free to the specified node. 3859 */ 3860 if (nid != NUMA_NO_NODE) { 3861 unsigned long old_count = count; 3862 3863 count += persistent_huge_pages(h) - 3864 (h->nr_huge_pages_node[nid] - 3865 h->surplus_huge_pages_node[nid]); 3866 /* 3867 * User may have specified a large count value which caused the 3868 * above calculation to overflow. In this case, they wanted 3869 * to allocate as many huge pages as possible. Set count to 3870 * largest possible value to align with their intention. 3871 */ 3872 if (count < old_count) 3873 count = ULONG_MAX; 3874 } 3875 3876 /* 3877 * Gigantic pages runtime allocation depend on the capability for large 3878 * page range allocation. 3879 * If the system does not provide this feature, return an error when 3880 * the user tries to allocate gigantic pages but let the user free the 3881 * boottime allocated gigantic pages. 3882 */ 3883 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3884 if (count > persistent_huge_pages(h)) { 3885 spin_unlock_irq(&hugetlb_lock); 3886 mutex_unlock(&h->resize_lock); 3887 NODEMASK_FREE(node_alloc_noretry); 3888 return -EINVAL; 3889 } 3890 /* Fall through to decrease pool */ 3891 } 3892 3893 /* 3894 * Increase the pool size 3895 * First take pages out of surplus state. Then make up the 3896 * remaining difference by allocating fresh huge pages. 3897 * 3898 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3899 * to convert a surplus huge page to a normal huge page. That is 3900 * not critical, though, it just means the overall size of the 3901 * pool might be one hugepage larger than it needs to be, but 3902 * within all the constraints specified by the sysctls. 3903 */ 3904 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3905 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3906 break; 3907 } 3908 3909 allocated = 0; 3910 while (count > (persistent_huge_pages(h) + allocated)) { 3911 /* 3912 * If this allocation races such that we no longer need the 3913 * page, free_huge_folio will handle it by freeing the page 3914 * and reducing the surplus. 3915 */ 3916 spin_unlock_irq(&hugetlb_lock); 3917 3918 /* yield cpu to avoid soft lockup */ 3919 cond_resched(); 3920 3921 folio = alloc_pool_huge_folio(h, nodes_allowed, 3922 node_alloc_noretry, 3923 &h->next_nid_to_alloc); 3924 if (!folio) { 3925 prep_and_add_allocated_folios(h, &page_list); 3926 spin_lock_irq(&hugetlb_lock); 3927 goto out; 3928 } 3929 3930 list_add(&folio->lru, &page_list); 3931 allocated++; 3932 3933 /* Bail for signals. Probably ctrl-c from user */ 3934 if (signal_pending(current)) { 3935 prep_and_add_allocated_folios(h, &page_list); 3936 spin_lock_irq(&hugetlb_lock); 3937 goto out; 3938 } 3939 3940 spin_lock_irq(&hugetlb_lock); 3941 } 3942 3943 /* Add allocated pages to the pool */ 3944 if (!list_empty(&page_list)) { 3945 spin_unlock_irq(&hugetlb_lock); 3946 prep_and_add_allocated_folios(h, &page_list); 3947 spin_lock_irq(&hugetlb_lock); 3948 } 3949 3950 /* 3951 * Decrease the pool size 3952 * First return free pages to the buddy allocator (being careful 3953 * to keep enough around to satisfy reservations). Then place 3954 * pages into surplus state as needed so the pool will shrink 3955 * to the desired size as pages become free. 3956 * 3957 * By placing pages into the surplus state independent of the 3958 * overcommit value, we are allowing the surplus pool size to 3959 * exceed overcommit. There are few sane options here. Since 3960 * alloc_surplus_hugetlb_folio() is checking the global counter, 3961 * though, we'll note that we're not allowed to exceed surplus 3962 * and won't grow the pool anywhere else. Not until one of the 3963 * sysctls are changed, or the surplus pages go out of use. 3964 * 3965 * min_count is the expected number of persistent pages, we 3966 * shouldn't calculate min_count by using 3967 * resv_huge_pages + persistent_huge_pages() - free_huge_pages, 3968 * because there may exist free surplus huge pages, and this will 3969 * lead to subtracting twice. Free surplus huge pages come from HVO 3970 * failing to restore vmemmap, see comments in the callers of 3971 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate 3972 * persistent free count first. 3973 */ 3974 persistent_free_count = h->free_huge_pages; 3975 if (h->free_huge_pages > persistent_huge_pages(h)) { 3976 if (h->free_huge_pages > h->surplus_huge_pages) 3977 persistent_free_count -= h->surplus_huge_pages; 3978 else 3979 persistent_free_count = 0; 3980 } 3981 min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count; 3982 min_count = max(count, min_count); 3983 try_to_free_low(h, min_count, nodes_allowed); 3984 3985 /* 3986 * Collect pages to be removed on list without dropping lock 3987 */ 3988 while (min_count < persistent_huge_pages(h)) { 3989 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); 3990 if (!folio) 3991 break; 3992 3993 list_add(&folio->lru, &page_list); 3994 } 3995 /* free the pages after dropping lock */ 3996 spin_unlock_irq(&hugetlb_lock); 3997 update_and_free_pages_bulk(h, &page_list); 3998 flush_free_hpage_work(h); 3999 spin_lock_irq(&hugetlb_lock); 4000 4001 while (count < persistent_huge_pages(h)) { 4002 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 4003 break; 4004 } 4005 out: 4006 h->max_huge_pages = persistent_huge_pages(h); 4007 spin_unlock_irq(&hugetlb_lock); 4008 mutex_unlock(&h->resize_lock); 4009 4010 NODEMASK_FREE(node_alloc_noretry); 4011 4012 return 0; 4013 } 4014 4015 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, 4016 struct list_head *src_list) 4017 { 4018 long rc; 4019 struct folio *folio, *next; 4020 LIST_HEAD(dst_list); 4021 LIST_HEAD(ret_list); 4022 4023 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); 4024 list_splice_init(&ret_list, src_list); 4025 4026 /* 4027 * Taking target hstate mutex synchronizes with set_max_huge_pages. 4028 * Without the mutex, pages added to target hstate could be marked 4029 * as surplus. 4030 * 4031 * Note that we already hold src->resize_lock. To prevent deadlock, 4032 * use the convention of always taking larger size hstate mutex first. 4033 */ 4034 mutex_lock(&dst->resize_lock); 4035 4036 list_for_each_entry_safe(folio, next, src_list, lru) { 4037 int i; 4038 bool cma; 4039 4040 if (folio_test_hugetlb_vmemmap_optimized(folio)) 4041 continue; 4042 4043 cma = folio_test_hugetlb_cma(folio); 4044 4045 list_del(&folio->lru); 4046 4047 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); 4048 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); 4049 4050 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { 4051 struct page *page = folio_page(folio, i); 4052 /* Careful: see __split_huge_page_tail() */ 4053 struct folio *new_folio = (struct folio *)page; 4054 4055 clear_compound_head(page); 4056 prep_compound_page(page, dst->order); 4057 4058 new_folio->mapping = NULL; 4059 init_new_hugetlb_folio(dst, new_folio); 4060 /* Copy the CMA flag so that it is freed correctly */ 4061 if (cma) 4062 folio_set_hugetlb_cma(new_folio); 4063 list_add(&new_folio->lru, &dst_list); 4064 } 4065 } 4066 4067 prep_and_add_allocated_folios(dst, &dst_list); 4068 4069 mutex_unlock(&dst->resize_lock); 4070 4071 return rc; 4072 } 4073 4074 static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, 4075 unsigned long nr_to_demote) 4076 __must_hold(&hugetlb_lock) 4077 { 4078 int nr_nodes, node; 4079 struct hstate *dst; 4080 long rc = 0; 4081 long nr_demoted = 0; 4082 4083 lockdep_assert_held(&hugetlb_lock); 4084 4085 /* We should never get here if no demote order */ 4086 if (!src->demote_order) { 4087 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 4088 return -EINVAL; /* internal error */ 4089 } 4090 dst = size_to_hstate(PAGE_SIZE << src->demote_order); 4091 4092 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { 4093 LIST_HEAD(list); 4094 struct folio *folio, *next; 4095 4096 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { 4097 if (folio_test_hwpoison(folio)) 4098 continue; 4099 4100 remove_hugetlb_folio(src, folio, false); 4101 list_add(&folio->lru, &list); 4102 4103 if (++nr_demoted == nr_to_demote) 4104 break; 4105 } 4106 4107 spin_unlock_irq(&hugetlb_lock); 4108 4109 rc = demote_free_hugetlb_folios(src, dst, &list); 4110 4111 spin_lock_irq(&hugetlb_lock); 4112 4113 list_for_each_entry_safe(folio, next, &list, lru) { 4114 list_del(&folio->lru); 4115 add_hugetlb_folio(src, folio, false); 4116 4117 nr_demoted--; 4118 } 4119 4120 if (rc < 0 || nr_demoted == nr_to_demote) 4121 break; 4122 } 4123 4124 /* 4125 * Not absolutely necessary, but for consistency update max_huge_pages 4126 * based on pool changes for the demoted page. 4127 */ 4128 src->max_huge_pages -= nr_demoted; 4129 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); 4130 4131 if (rc < 0) 4132 return rc; 4133 4134 if (nr_demoted) 4135 return nr_demoted; 4136 /* 4137 * Only way to get here is if all pages on free lists are poisoned. 4138 * Return -EBUSY so that caller will not retry. 4139 */ 4140 return -EBUSY; 4141 } 4142 4143 #define HSTATE_ATTR_RO(_name) \ 4144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 4145 4146 #define HSTATE_ATTR_WO(_name) \ 4147 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 4148 4149 #define HSTATE_ATTR(_name) \ 4150 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 4151 4152 static struct kobject *hugepages_kobj; 4153 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4154 4155 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 4156 4157 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 4158 { 4159 int i; 4160 4161 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4162 if (hstate_kobjs[i] == kobj) { 4163 if (nidp) 4164 *nidp = NUMA_NO_NODE; 4165 return &hstates[i]; 4166 } 4167 4168 return kobj_to_node_hstate(kobj, nidp); 4169 } 4170 4171 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 4172 struct kobj_attribute *attr, char *buf) 4173 { 4174 struct hstate *h; 4175 unsigned long nr_huge_pages; 4176 int nid; 4177 4178 h = kobj_to_hstate(kobj, &nid); 4179 if (nid == NUMA_NO_NODE) 4180 nr_huge_pages = h->nr_huge_pages; 4181 else 4182 nr_huge_pages = h->nr_huge_pages_node[nid]; 4183 4184 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 4185 } 4186 4187 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 4188 struct hstate *h, int nid, 4189 unsigned long count, size_t len) 4190 { 4191 int err; 4192 nodemask_t nodes_allowed, *n_mask; 4193 4194 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 4195 return -EINVAL; 4196 4197 if (nid == NUMA_NO_NODE) { 4198 /* 4199 * global hstate attribute 4200 */ 4201 if (!(obey_mempolicy && 4202 init_nodemask_of_mempolicy(&nodes_allowed))) 4203 n_mask = &node_states[N_MEMORY]; 4204 else 4205 n_mask = &nodes_allowed; 4206 } else { 4207 /* 4208 * Node specific request. count adjustment happens in 4209 * set_max_huge_pages() after acquiring hugetlb_lock. 4210 */ 4211 init_nodemask_of_node(&nodes_allowed, nid); 4212 n_mask = &nodes_allowed; 4213 } 4214 4215 err = set_max_huge_pages(h, count, nid, n_mask); 4216 4217 return err ? err : len; 4218 } 4219 4220 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 4221 struct kobject *kobj, const char *buf, 4222 size_t len) 4223 { 4224 struct hstate *h; 4225 unsigned long count; 4226 int nid; 4227 int err; 4228 4229 err = kstrtoul(buf, 10, &count); 4230 if (err) 4231 return err; 4232 4233 h = kobj_to_hstate(kobj, &nid); 4234 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 4235 } 4236 4237 static ssize_t nr_hugepages_show(struct kobject *kobj, 4238 struct kobj_attribute *attr, char *buf) 4239 { 4240 return nr_hugepages_show_common(kobj, attr, buf); 4241 } 4242 4243 static ssize_t nr_hugepages_store(struct kobject *kobj, 4244 struct kobj_attribute *attr, const char *buf, size_t len) 4245 { 4246 return nr_hugepages_store_common(false, kobj, buf, len); 4247 } 4248 HSTATE_ATTR(nr_hugepages); 4249 4250 #ifdef CONFIG_NUMA 4251 4252 /* 4253 * hstate attribute for optionally mempolicy-based constraint on persistent 4254 * huge page alloc/free. 4255 */ 4256 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 4257 struct kobj_attribute *attr, 4258 char *buf) 4259 { 4260 return nr_hugepages_show_common(kobj, attr, buf); 4261 } 4262 4263 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 4264 struct kobj_attribute *attr, const char *buf, size_t len) 4265 { 4266 return nr_hugepages_store_common(true, kobj, buf, len); 4267 } 4268 HSTATE_ATTR(nr_hugepages_mempolicy); 4269 #endif 4270 4271 4272 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 4273 struct kobj_attribute *attr, char *buf) 4274 { 4275 struct hstate *h = kobj_to_hstate(kobj, NULL); 4276 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 4277 } 4278 4279 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 4280 struct kobj_attribute *attr, const char *buf, size_t count) 4281 { 4282 int err; 4283 unsigned long input; 4284 struct hstate *h = kobj_to_hstate(kobj, NULL); 4285 4286 if (hstate_is_gigantic(h)) 4287 return -EINVAL; 4288 4289 err = kstrtoul(buf, 10, &input); 4290 if (err) 4291 return err; 4292 4293 spin_lock_irq(&hugetlb_lock); 4294 h->nr_overcommit_huge_pages = input; 4295 spin_unlock_irq(&hugetlb_lock); 4296 4297 return count; 4298 } 4299 HSTATE_ATTR(nr_overcommit_hugepages); 4300 4301 static ssize_t free_hugepages_show(struct kobject *kobj, 4302 struct kobj_attribute *attr, char *buf) 4303 { 4304 struct hstate *h; 4305 unsigned long free_huge_pages; 4306 int nid; 4307 4308 h = kobj_to_hstate(kobj, &nid); 4309 if (nid == NUMA_NO_NODE) 4310 free_huge_pages = h->free_huge_pages; 4311 else 4312 free_huge_pages = h->free_huge_pages_node[nid]; 4313 4314 return sysfs_emit(buf, "%lu\n", free_huge_pages); 4315 } 4316 HSTATE_ATTR_RO(free_hugepages); 4317 4318 static ssize_t resv_hugepages_show(struct kobject *kobj, 4319 struct kobj_attribute *attr, char *buf) 4320 { 4321 struct hstate *h = kobj_to_hstate(kobj, NULL); 4322 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 4323 } 4324 HSTATE_ATTR_RO(resv_hugepages); 4325 4326 static ssize_t surplus_hugepages_show(struct kobject *kobj, 4327 struct kobj_attribute *attr, char *buf) 4328 { 4329 struct hstate *h; 4330 unsigned long surplus_huge_pages; 4331 int nid; 4332 4333 h = kobj_to_hstate(kobj, &nid); 4334 if (nid == NUMA_NO_NODE) 4335 surplus_huge_pages = h->surplus_huge_pages; 4336 else 4337 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 4338 4339 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 4340 } 4341 HSTATE_ATTR_RO(surplus_hugepages); 4342 4343 static ssize_t demote_store(struct kobject *kobj, 4344 struct kobj_attribute *attr, const char *buf, size_t len) 4345 { 4346 unsigned long nr_demote; 4347 unsigned long nr_available; 4348 nodemask_t nodes_allowed, *n_mask; 4349 struct hstate *h; 4350 int err; 4351 int nid; 4352 4353 err = kstrtoul(buf, 10, &nr_demote); 4354 if (err) 4355 return err; 4356 h = kobj_to_hstate(kobj, &nid); 4357 4358 if (nid != NUMA_NO_NODE) { 4359 init_nodemask_of_node(&nodes_allowed, nid); 4360 n_mask = &nodes_allowed; 4361 } else { 4362 n_mask = &node_states[N_MEMORY]; 4363 } 4364 4365 /* Synchronize with other sysfs operations modifying huge pages */ 4366 mutex_lock(&h->resize_lock); 4367 spin_lock_irq(&hugetlb_lock); 4368 4369 while (nr_demote) { 4370 long rc; 4371 4372 /* 4373 * Check for available pages to demote each time thorough the 4374 * loop as demote_pool_huge_page will drop hugetlb_lock. 4375 */ 4376 if (nid != NUMA_NO_NODE) 4377 nr_available = h->free_huge_pages_node[nid]; 4378 else 4379 nr_available = h->free_huge_pages; 4380 nr_available -= h->resv_huge_pages; 4381 if (!nr_available) 4382 break; 4383 4384 rc = demote_pool_huge_page(h, n_mask, nr_demote); 4385 if (rc < 0) { 4386 err = rc; 4387 break; 4388 } 4389 4390 nr_demote -= rc; 4391 } 4392 4393 spin_unlock_irq(&hugetlb_lock); 4394 mutex_unlock(&h->resize_lock); 4395 4396 if (err) 4397 return err; 4398 return len; 4399 } 4400 HSTATE_ATTR_WO(demote); 4401 4402 static ssize_t demote_size_show(struct kobject *kobj, 4403 struct kobj_attribute *attr, char *buf) 4404 { 4405 struct hstate *h = kobj_to_hstate(kobj, NULL); 4406 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 4407 4408 return sysfs_emit(buf, "%lukB\n", demote_size); 4409 } 4410 4411 static ssize_t demote_size_store(struct kobject *kobj, 4412 struct kobj_attribute *attr, 4413 const char *buf, size_t count) 4414 { 4415 struct hstate *h, *demote_hstate; 4416 unsigned long demote_size; 4417 unsigned int demote_order; 4418 4419 demote_size = (unsigned long)memparse(buf, NULL); 4420 4421 demote_hstate = size_to_hstate(demote_size); 4422 if (!demote_hstate) 4423 return -EINVAL; 4424 demote_order = demote_hstate->order; 4425 if (demote_order < HUGETLB_PAGE_ORDER) 4426 return -EINVAL; 4427 4428 /* demote order must be smaller than hstate order */ 4429 h = kobj_to_hstate(kobj, NULL); 4430 if (demote_order >= h->order) 4431 return -EINVAL; 4432 4433 /* resize_lock synchronizes access to demote size and writes */ 4434 mutex_lock(&h->resize_lock); 4435 h->demote_order = demote_order; 4436 mutex_unlock(&h->resize_lock); 4437 4438 return count; 4439 } 4440 HSTATE_ATTR(demote_size); 4441 4442 static struct attribute *hstate_attrs[] = { 4443 &nr_hugepages_attr.attr, 4444 &nr_overcommit_hugepages_attr.attr, 4445 &free_hugepages_attr.attr, 4446 &resv_hugepages_attr.attr, 4447 &surplus_hugepages_attr.attr, 4448 #ifdef CONFIG_NUMA 4449 &nr_hugepages_mempolicy_attr.attr, 4450 #endif 4451 NULL, 4452 }; 4453 4454 static const struct attribute_group hstate_attr_group = { 4455 .attrs = hstate_attrs, 4456 }; 4457 4458 static struct attribute *hstate_demote_attrs[] = { 4459 &demote_size_attr.attr, 4460 &demote_attr.attr, 4461 NULL, 4462 }; 4463 4464 static const struct attribute_group hstate_demote_attr_group = { 4465 .attrs = hstate_demote_attrs, 4466 }; 4467 4468 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4469 struct kobject **hstate_kobjs, 4470 const struct attribute_group *hstate_attr_group) 4471 { 4472 int retval; 4473 int hi = hstate_index(h); 4474 4475 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4476 if (!hstate_kobjs[hi]) 4477 return -ENOMEM; 4478 4479 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4480 if (retval) { 4481 kobject_put(hstate_kobjs[hi]); 4482 hstate_kobjs[hi] = NULL; 4483 return retval; 4484 } 4485 4486 if (h->demote_order) { 4487 retval = sysfs_create_group(hstate_kobjs[hi], 4488 &hstate_demote_attr_group); 4489 if (retval) { 4490 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4491 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4492 kobject_put(hstate_kobjs[hi]); 4493 hstate_kobjs[hi] = NULL; 4494 return retval; 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 #ifdef CONFIG_NUMA 4502 static bool hugetlb_sysfs_initialized __ro_after_init; 4503 4504 /* 4505 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4506 * with node devices in node_devices[] using a parallel array. The array 4507 * index of a node device or _hstate == node id. 4508 * This is here to avoid any static dependency of the node device driver, in 4509 * the base kernel, on the hugetlb module. 4510 */ 4511 struct node_hstate { 4512 struct kobject *hugepages_kobj; 4513 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4514 }; 4515 static struct node_hstate node_hstates[MAX_NUMNODES]; 4516 4517 /* 4518 * A subset of global hstate attributes for node devices 4519 */ 4520 static struct attribute *per_node_hstate_attrs[] = { 4521 &nr_hugepages_attr.attr, 4522 &free_hugepages_attr.attr, 4523 &surplus_hugepages_attr.attr, 4524 NULL, 4525 }; 4526 4527 static const struct attribute_group per_node_hstate_attr_group = { 4528 .attrs = per_node_hstate_attrs, 4529 }; 4530 4531 /* 4532 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4533 * Returns node id via non-NULL nidp. 4534 */ 4535 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4536 { 4537 int nid; 4538 4539 for (nid = 0; nid < nr_node_ids; nid++) { 4540 struct node_hstate *nhs = &node_hstates[nid]; 4541 int i; 4542 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4543 if (nhs->hstate_kobjs[i] == kobj) { 4544 if (nidp) 4545 *nidp = nid; 4546 return &hstates[i]; 4547 } 4548 } 4549 4550 BUG(); 4551 return NULL; 4552 } 4553 4554 /* 4555 * Unregister hstate attributes from a single node device. 4556 * No-op if no hstate attributes attached. 4557 */ 4558 void hugetlb_unregister_node(struct node *node) 4559 { 4560 struct hstate *h; 4561 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4562 4563 if (!nhs->hugepages_kobj) 4564 return; /* no hstate attributes */ 4565 4566 for_each_hstate(h) { 4567 int idx = hstate_index(h); 4568 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4569 4570 if (!hstate_kobj) 4571 continue; 4572 if (h->demote_order) 4573 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4574 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4575 kobject_put(hstate_kobj); 4576 nhs->hstate_kobjs[idx] = NULL; 4577 } 4578 4579 kobject_put(nhs->hugepages_kobj); 4580 nhs->hugepages_kobj = NULL; 4581 } 4582 4583 4584 /* 4585 * Register hstate attributes for a single node device. 4586 * No-op if attributes already registered. 4587 */ 4588 void hugetlb_register_node(struct node *node) 4589 { 4590 struct hstate *h; 4591 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4592 int err; 4593 4594 if (!hugetlb_sysfs_initialized) 4595 return; 4596 4597 if (nhs->hugepages_kobj) 4598 return; /* already allocated */ 4599 4600 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4601 &node->dev.kobj); 4602 if (!nhs->hugepages_kobj) 4603 return; 4604 4605 for_each_hstate(h) { 4606 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4607 nhs->hstate_kobjs, 4608 &per_node_hstate_attr_group); 4609 if (err) { 4610 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4611 h->name, node->dev.id); 4612 hugetlb_unregister_node(node); 4613 break; 4614 } 4615 } 4616 } 4617 4618 /* 4619 * hugetlb init time: register hstate attributes for all registered node 4620 * devices of nodes that have memory. All on-line nodes should have 4621 * registered their associated device by this time. 4622 */ 4623 static void __init hugetlb_register_all_nodes(void) 4624 { 4625 int nid; 4626 4627 for_each_online_node(nid) 4628 hugetlb_register_node(node_devices[nid]); 4629 } 4630 #else /* !CONFIG_NUMA */ 4631 4632 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4633 { 4634 BUG(); 4635 if (nidp) 4636 *nidp = -1; 4637 return NULL; 4638 } 4639 4640 static void hugetlb_register_all_nodes(void) { } 4641 4642 #endif 4643 4644 static void __init hugetlb_sysfs_init(void) 4645 { 4646 struct hstate *h; 4647 int err; 4648 4649 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4650 if (!hugepages_kobj) 4651 return; 4652 4653 for_each_hstate(h) { 4654 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4655 hstate_kobjs, &hstate_attr_group); 4656 if (err) 4657 pr_err("HugeTLB: Unable to add hstate %s\n", h->name); 4658 } 4659 4660 #ifdef CONFIG_NUMA 4661 hugetlb_sysfs_initialized = true; 4662 #endif 4663 hugetlb_register_all_nodes(); 4664 } 4665 4666 #ifdef CONFIG_SYSCTL 4667 static void hugetlb_sysctl_init(void); 4668 #else 4669 static inline void hugetlb_sysctl_init(void) { } 4670 #endif 4671 4672 static int __init hugetlb_init(void) 4673 { 4674 int i; 4675 4676 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4677 __NR_HPAGEFLAGS); 4678 4679 if (!hugepages_supported()) { 4680 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4681 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4682 return 0; 4683 } 4684 4685 /* 4686 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4687 * architectures depend on setup being done here. 4688 */ 4689 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4690 if (!parsed_default_hugepagesz) { 4691 /* 4692 * If we did not parse a default huge page size, set 4693 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4694 * number of huge pages for this default size was implicitly 4695 * specified, set that here as well. 4696 * Note that the implicit setting will overwrite an explicit 4697 * setting. A warning will be printed in this case. 4698 */ 4699 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4700 if (default_hstate_max_huge_pages) { 4701 if (default_hstate.max_huge_pages) { 4702 char buf[32]; 4703 4704 string_get_size(huge_page_size(&default_hstate), 4705 1, STRING_UNITS_2, buf, 32); 4706 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4707 default_hstate.max_huge_pages, buf); 4708 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4709 default_hstate_max_huge_pages); 4710 } 4711 default_hstate.max_huge_pages = 4712 default_hstate_max_huge_pages; 4713 4714 for_each_online_node(i) 4715 default_hstate.max_huge_pages_node[i] = 4716 default_hugepages_in_node[i]; 4717 } 4718 } 4719 4720 hugetlb_cma_check(); 4721 hugetlb_init_hstates(); 4722 gather_bootmem_prealloc(); 4723 report_hugepages(); 4724 4725 hugetlb_sysfs_init(); 4726 hugetlb_cgroup_file_init(); 4727 hugetlb_sysctl_init(); 4728 4729 #ifdef CONFIG_SMP 4730 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4731 #else 4732 num_fault_mutexes = 1; 4733 #endif 4734 hugetlb_fault_mutex_table = 4735 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4736 GFP_KERNEL); 4737 BUG_ON(!hugetlb_fault_mutex_table); 4738 4739 for (i = 0; i < num_fault_mutexes; i++) 4740 mutex_init(&hugetlb_fault_mutex_table[i]); 4741 return 0; 4742 } 4743 subsys_initcall(hugetlb_init); 4744 4745 /* Overwritten by architectures with more huge page sizes */ 4746 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4747 { 4748 return size == HPAGE_SIZE; 4749 } 4750 4751 void __init hugetlb_add_hstate(unsigned int order) 4752 { 4753 struct hstate *h; 4754 unsigned long i; 4755 4756 if (size_to_hstate(PAGE_SIZE << order)) { 4757 return; 4758 } 4759 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4760 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); 4761 h = &hstates[hugetlb_max_hstate++]; 4762 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); 4763 h->order = order; 4764 h->mask = ~(huge_page_size(h) - 1); 4765 for (i = 0; i < MAX_NUMNODES; ++i) 4766 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4767 INIT_LIST_HEAD(&h->hugepage_activelist); 4768 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4769 huge_page_size(h)/SZ_1K); 4770 4771 parsed_hstate = h; 4772 } 4773 4774 bool __init __weak hugetlb_node_alloc_supported(void) 4775 { 4776 return true; 4777 } 4778 4779 static void __init hugepages_clear_pages_in_node(void) 4780 { 4781 if (!hugetlb_max_hstate) { 4782 default_hstate_max_huge_pages = 0; 4783 memset(default_hugepages_in_node, 0, 4784 sizeof(default_hugepages_in_node)); 4785 } else { 4786 parsed_hstate->max_huge_pages = 0; 4787 memset(parsed_hstate->max_huge_pages_node, 0, 4788 sizeof(parsed_hstate->max_huge_pages_node)); 4789 } 4790 } 4791 4792 static __init int hugetlb_add_param(char *s, int (*setup)(char *)) 4793 { 4794 size_t len; 4795 char *p; 4796 4797 if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS) 4798 return -EINVAL; 4799 4800 len = strlen(s) + 1; 4801 if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf)) 4802 return -EINVAL; 4803 4804 p = &hstate_cmdline_buf[hstate_cmdline_index]; 4805 memcpy(p, s, len); 4806 hstate_cmdline_index += len; 4807 4808 hugetlb_params[hugetlb_param_index].val = p; 4809 hugetlb_params[hugetlb_param_index].setup = setup; 4810 4811 hugetlb_param_index++; 4812 4813 return 0; 4814 } 4815 4816 static __init void hugetlb_parse_params(void) 4817 { 4818 int i; 4819 struct hugetlb_cmdline *hcp; 4820 4821 for (i = 0; i < hugetlb_param_index; i++) { 4822 hcp = &hugetlb_params[i]; 4823 4824 hcp->setup(hcp->val); 4825 } 4826 4827 hugetlb_cma_validate_params(); 4828 } 4829 4830 /* 4831 * hugepages command line processing 4832 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4833 * specification. If not, ignore the hugepages value. hugepages can also 4834 * be the first huge page command line option in which case it implicitly 4835 * specifies the number of huge pages for the default size. 4836 */ 4837 static int __init hugepages_setup(char *s) 4838 { 4839 unsigned long *mhp; 4840 static unsigned long *last_mhp; 4841 int node = NUMA_NO_NODE; 4842 int count; 4843 unsigned long tmp; 4844 char *p = s; 4845 4846 if (!parsed_valid_hugepagesz) { 4847 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4848 parsed_valid_hugepagesz = true; 4849 return -EINVAL; 4850 } 4851 4852 /* 4853 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4854 * yet, so this hugepages= parameter goes to the "default hstate". 4855 * Otherwise, it goes with the previously parsed hugepagesz or 4856 * default_hugepagesz. 4857 */ 4858 else if (!hugetlb_max_hstate) 4859 mhp = &default_hstate_max_huge_pages; 4860 else 4861 mhp = &parsed_hstate->max_huge_pages; 4862 4863 if (mhp == last_mhp) { 4864 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4865 return 1; 4866 } 4867 4868 while (*p) { 4869 count = 0; 4870 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4871 goto invalid; 4872 /* Parameter is node format */ 4873 if (p[count] == ':') { 4874 if (!hugetlb_node_alloc_supported()) { 4875 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4876 return 1; 4877 } 4878 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4879 goto invalid; 4880 node = array_index_nospec(tmp, MAX_NUMNODES); 4881 p += count + 1; 4882 /* Parse hugepages */ 4883 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4884 goto invalid; 4885 if (!hugetlb_max_hstate) 4886 default_hugepages_in_node[node] = tmp; 4887 else 4888 parsed_hstate->max_huge_pages_node[node] = tmp; 4889 *mhp += tmp; 4890 /* Go to parse next node*/ 4891 if (p[count] == ',') 4892 p += count + 1; 4893 else 4894 break; 4895 } else { 4896 if (p != s) 4897 goto invalid; 4898 *mhp = tmp; 4899 break; 4900 } 4901 } 4902 4903 last_mhp = mhp; 4904 4905 return 0; 4906 4907 invalid: 4908 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4909 hugepages_clear_pages_in_node(); 4910 return -EINVAL; 4911 } 4912 hugetlb_early_param("hugepages", hugepages_setup); 4913 4914 /* 4915 * hugepagesz command line processing 4916 * A specific huge page size can only be specified once with hugepagesz. 4917 * hugepagesz is followed by hugepages on the command line. The global 4918 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4919 * hugepagesz argument was valid. 4920 */ 4921 static int __init hugepagesz_setup(char *s) 4922 { 4923 unsigned long size; 4924 struct hstate *h; 4925 4926 parsed_valid_hugepagesz = false; 4927 size = (unsigned long)memparse(s, NULL); 4928 4929 if (!arch_hugetlb_valid_size(size)) { 4930 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4931 return -EINVAL; 4932 } 4933 4934 h = size_to_hstate(size); 4935 if (h) { 4936 /* 4937 * hstate for this size already exists. This is normally 4938 * an error, but is allowed if the existing hstate is the 4939 * default hstate. More specifically, it is only allowed if 4940 * the number of huge pages for the default hstate was not 4941 * previously specified. 4942 */ 4943 if (!parsed_default_hugepagesz || h != &default_hstate || 4944 default_hstate.max_huge_pages) { 4945 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4946 return -EINVAL; 4947 } 4948 4949 /* 4950 * No need to call hugetlb_add_hstate() as hstate already 4951 * exists. But, do set parsed_hstate so that a following 4952 * hugepages= parameter will be applied to this hstate. 4953 */ 4954 parsed_hstate = h; 4955 parsed_valid_hugepagesz = true; 4956 return 0; 4957 } 4958 4959 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4960 parsed_valid_hugepagesz = true; 4961 return 0; 4962 } 4963 hugetlb_early_param("hugepagesz", hugepagesz_setup); 4964 4965 /* 4966 * default_hugepagesz command line input 4967 * Only one instance of default_hugepagesz allowed on command line. 4968 */ 4969 static int __init default_hugepagesz_setup(char *s) 4970 { 4971 unsigned long size; 4972 int i; 4973 4974 parsed_valid_hugepagesz = false; 4975 if (parsed_default_hugepagesz) { 4976 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4977 return -EINVAL; 4978 } 4979 4980 size = (unsigned long)memparse(s, NULL); 4981 4982 if (!arch_hugetlb_valid_size(size)) { 4983 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4984 return -EINVAL; 4985 } 4986 4987 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4988 parsed_valid_hugepagesz = true; 4989 parsed_default_hugepagesz = true; 4990 default_hstate_idx = hstate_index(size_to_hstate(size)); 4991 4992 /* 4993 * The number of default huge pages (for this size) could have been 4994 * specified as the first hugetlb parameter: hugepages=X. If so, 4995 * then default_hstate_max_huge_pages is set. If the default huge 4996 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be 4997 * allocated here from bootmem allocator. 4998 */ 4999 if (default_hstate_max_huge_pages) { 5000 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 5001 /* 5002 * Since this is an early parameter, we can't check 5003 * NUMA node state yet, so loop through MAX_NUMNODES. 5004 */ 5005 for (i = 0; i < MAX_NUMNODES; i++) { 5006 if (default_hugepages_in_node[i] != 0) 5007 default_hstate.max_huge_pages_node[i] = 5008 default_hugepages_in_node[i]; 5009 } 5010 default_hstate_max_huge_pages = 0; 5011 } 5012 5013 return 0; 5014 } 5015 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); 5016 5017 void __init hugetlb_bootmem_set_nodes(void) 5018 { 5019 int i, nid; 5020 unsigned long start_pfn, end_pfn; 5021 5022 if (!nodes_empty(hugetlb_bootmem_nodes)) 5023 return; 5024 5025 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5026 if (end_pfn > start_pfn) 5027 node_set(nid, hugetlb_bootmem_nodes); 5028 } 5029 } 5030 5031 static bool __hugetlb_bootmem_allocated __initdata; 5032 5033 bool __init hugetlb_bootmem_allocated(void) 5034 { 5035 return __hugetlb_bootmem_allocated; 5036 } 5037 5038 void __init hugetlb_bootmem_alloc(void) 5039 { 5040 struct hstate *h; 5041 int i; 5042 5043 if (__hugetlb_bootmem_allocated) 5044 return; 5045 5046 hugetlb_bootmem_set_nodes(); 5047 5048 for (i = 0; i < MAX_NUMNODES; i++) 5049 INIT_LIST_HEAD(&huge_boot_pages[i]); 5050 5051 hugetlb_parse_params(); 5052 5053 for_each_hstate(h) { 5054 h->next_nid_to_alloc = first_online_node; 5055 5056 if (hstate_is_gigantic(h)) 5057 hugetlb_hstate_alloc_pages(h); 5058 } 5059 5060 __hugetlb_bootmem_allocated = true; 5061 } 5062 5063 /* 5064 * hugepage_alloc_threads command line parsing. 5065 * 5066 * When set, use this specific number of threads for the boot 5067 * allocation of hugepages. 5068 */ 5069 static int __init hugepage_alloc_threads_setup(char *s) 5070 { 5071 unsigned long allocation_threads; 5072 5073 if (kstrtoul(s, 0, &allocation_threads) != 0) 5074 return 1; 5075 5076 if (allocation_threads == 0) 5077 return 1; 5078 5079 hugepage_allocation_threads = allocation_threads; 5080 5081 return 1; 5082 } 5083 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup); 5084 5085 static unsigned int allowed_mems_nr(struct hstate *h) 5086 { 5087 int node; 5088 unsigned int nr = 0; 5089 nodemask_t *mbind_nodemask; 5090 unsigned int *array = h->free_huge_pages_node; 5091 gfp_t gfp_mask = htlb_alloc_mask(h); 5092 5093 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 5094 for_each_node_mask(node, cpuset_current_mems_allowed) { 5095 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 5096 nr += array[node]; 5097 } 5098 5099 return nr; 5100 } 5101 5102 #ifdef CONFIG_SYSCTL 5103 static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write, 5104 void *buffer, size_t *length, 5105 loff_t *ppos, unsigned long *out) 5106 { 5107 struct ctl_table dup_table; 5108 5109 /* 5110 * In order to avoid races with __do_proc_doulongvec_minmax(), we 5111 * can duplicate the @table and alter the duplicate of it. 5112 */ 5113 dup_table = *table; 5114 dup_table.data = out; 5115 5116 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 5117 } 5118 5119 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 5120 const struct ctl_table *table, int write, 5121 void *buffer, size_t *length, loff_t *ppos) 5122 { 5123 struct hstate *h = &default_hstate; 5124 unsigned long tmp = h->max_huge_pages; 5125 int ret; 5126 5127 if (!hugepages_supported()) 5128 return -EOPNOTSUPP; 5129 5130 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 5131 &tmp); 5132 if (ret) 5133 goto out; 5134 5135 if (write) 5136 ret = __nr_hugepages_store_common(obey_mempolicy, h, 5137 NUMA_NO_NODE, tmp, *length); 5138 out: 5139 return ret; 5140 } 5141 5142 static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 5143 void *buffer, size_t *length, loff_t *ppos) 5144 { 5145 5146 return hugetlb_sysctl_handler_common(false, table, write, 5147 buffer, length, ppos); 5148 } 5149 5150 #ifdef CONFIG_NUMA 5151 static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 5152 void *buffer, size_t *length, loff_t *ppos) 5153 { 5154 return hugetlb_sysctl_handler_common(true, table, write, 5155 buffer, length, ppos); 5156 } 5157 #endif /* CONFIG_NUMA */ 5158 5159 static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 5160 void *buffer, size_t *length, loff_t *ppos) 5161 { 5162 struct hstate *h = &default_hstate; 5163 unsigned long tmp; 5164 int ret; 5165 5166 if (!hugepages_supported()) 5167 return -EOPNOTSUPP; 5168 5169 tmp = h->nr_overcommit_huge_pages; 5170 5171 if (write && hstate_is_gigantic(h)) 5172 return -EINVAL; 5173 5174 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 5175 &tmp); 5176 if (ret) 5177 goto out; 5178 5179 if (write) { 5180 spin_lock_irq(&hugetlb_lock); 5181 h->nr_overcommit_huge_pages = tmp; 5182 spin_unlock_irq(&hugetlb_lock); 5183 } 5184 out: 5185 return ret; 5186 } 5187 5188 static const struct ctl_table hugetlb_table[] = { 5189 { 5190 .procname = "nr_hugepages", 5191 .data = NULL, 5192 .maxlen = sizeof(unsigned long), 5193 .mode = 0644, 5194 .proc_handler = hugetlb_sysctl_handler, 5195 }, 5196 #ifdef CONFIG_NUMA 5197 { 5198 .procname = "nr_hugepages_mempolicy", 5199 .data = NULL, 5200 .maxlen = sizeof(unsigned long), 5201 .mode = 0644, 5202 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 5203 }, 5204 #endif 5205 { 5206 .procname = "hugetlb_shm_group", 5207 .data = &sysctl_hugetlb_shm_group, 5208 .maxlen = sizeof(gid_t), 5209 .mode = 0644, 5210 .proc_handler = proc_dointvec, 5211 }, 5212 { 5213 .procname = "nr_overcommit_hugepages", 5214 .data = NULL, 5215 .maxlen = sizeof(unsigned long), 5216 .mode = 0644, 5217 .proc_handler = hugetlb_overcommit_handler, 5218 }, 5219 }; 5220 5221 static void __init hugetlb_sysctl_init(void) 5222 { 5223 register_sysctl_init("vm", hugetlb_table); 5224 } 5225 #endif /* CONFIG_SYSCTL */ 5226 5227 void hugetlb_report_meminfo(struct seq_file *m) 5228 { 5229 struct hstate *h; 5230 unsigned long total = 0; 5231 5232 if (!hugepages_supported()) 5233 return; 5234 5235 for_each_hstate(h) { 5236 unsigned long count = h->nr_huge_pages; 5237 5238 total += huge_page_size(h) * count; 5239 5240 if (h == &default_hstate) 5241 seq_printf(m, 5242 "HugePages_Total: %5lu\n" 5243 "HugePages_Free: %5lu\n" 5244 "HugePages_Rsvd: %5lu\n" 5245 "HugePages_Surp: %5lu\n" 5246 "Hugepagesize: %8lu kB\n", 5247 count, 5248 h->free_huge_pages, 5249 h->resv_huge_pages, 5250 h->surplus_huge_pages, 5251 huge_page_size(h) / SZ_1K); 5252 } 5253 5254 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 5255 } 5256 5257 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 5258 { 5259 struct hstate *h = &default_hstate; 5260 5261 if (!hugepages_supported()) 5262 return 0; 5263 5264 return sysfs_emit_at(buf, len, 5265 "Node %d HugePages_Total: %5u\n" 5266 "Node %d HugePages_Free: %5u\n" 5267 "Node %d HugePages_Surp: %5u\n", 5268 nid, h->nr_huge_pages_node[nid], 5269 nid, h->free_huge_pages_node[nid], 5270 nid, h->surplus_huge_pages_node[nid]); 5271 } 5272 5273 void hugetlb_show_meminfo_node(int nid) 5274 { 5275 struct hstate *h; 5276 5277 if (!hugepages_supported()) 5278 return; 5279 5280 for_each_hstate(h) 5281 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 5282 nid, 5283 h->nr_huge_pages_node[nid], 5284 h->free_huge_pages_node[nid], 5285 h->surplus_huge_pages_node[nid], 5286 huge_page_size(h) / SZ_1K); 5287 } 5288 5289 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 5290 { 5291 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 5292 K(atomic_long_read(&mm->hugetlb_usage))); 5293 } 5294 5295 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 5296 unsigned long hugetlb_total_pages(void) 5297 { 5298 struct hstate *h; 5299 unsigned long nr_total_pages = 0; 5300 5301 for_each_hstate(h) 5302 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 5303 return nr_total_pages; 5304 } 5305 5306 static int hugetlb_acct_memory(struct hstate *h, long delta) 5307 { 5308 int ret = -ENOMEM; 5309 5310 if (!delta) 5311 return 0; 5312 5313 spin_lock_irq(&hugetlb_lock); 5314 /* 5315 * When cpuset is configured, it breaks the strict hugetlb page 5316 * reservation as the accounting is done on a global variable. Such 5317 * reservation is completely rubbish in the presence of cpuset because 5318 * the reservation is not checked against page availability for the 5319 * current cpuset. Application can still potentially OOM'ed by kernel 5320 * with lack of free htlb page in cpuset that the task is in. 5321 * Attempt to enforce strict accounting with cpuset is almost 5322 * impossible (or too ugly) because cpuset is too fluid that 5323 * task or memory node can be dynamically moved between cpusets. 5324 * 5325 * The change of semantics for shared hugetlb mapping with cpuset is 5326 * undesirable. However, in order to preserve some of the semantics, 5327 * we fall back to check against current free page availability as 5328 * a best attempt and hopefully to minimize the impact of changing 5329 * semantics that cpuset has. 5330 * 5331 * Apart from cpuset, we also have memory policy mechanism that 5332 * also determines from which node the kernel will allocate memory 5333 * in a NUMA system. So similar to cpuset, we also should consider 5334 * the memory policy of the current task. Similar to the description 5335 * above. 5336 */ 5337 if (delta > 0) { 5338 if (gather_surplus_pages(h, delta) < 0) 5339 goto out; 5340 5341 if (delta > allowed_mems_nr(h)) { 5342 return_unused_surplus_pages(h, delta); 5343 goto out; 5344 } 5345 } 5346 5347 ret = 0; 5348 if (delta < 0) 5349 return_unused_surplus_pages(h, (unsigned long) -delta); 5350 5351 out: 5352 spin_unlock_irq(&hugetlb_lock); 5353 return ret; 5354 } 5355 5356 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 5357 { 5358 struct resv_map *resv = vma_resv_map(vma); 5359 5360 /* 5361 * HPAGE_RESV_OWNER indicates a private mapping. 5362 * This new VMA should share its siblings reservation map if present. 5363 * The VMA will only ever have a valid reservation map pointer where 5364 * it is being copied for another still existing VMA. As that VMA 5365 * has a reference to the reservation map it cannot disappear until 5366 * after this open call completes. It is therefore safe to take a 5367 * new reference here without additional locking. 5368 */ 5369 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 5370 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 5371 kref_get(&resv->refs); 5372 } 5373 5374 /* 5375 * vma_lock structure for sharable mappings is vma specific. 5376 * Clear old pointer (if copied via vm_area_dup) and allocate 5377 * new structure. Before clearing, make sure vma_lock is not 5378 * for this vma. 5379 */ 5380 if (vma->vm_flags & VM_MAYSHARE) { 5381 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 5382 5383 if (vma_lock) { 5384 if (vma_lock->vma != vma) { 5385 vma->vm_private_data = NULL; 5386 hugetlb_vma_lock_alloc(vma); 5387 } else 5388 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 5389 } else 5390 hugetlb_vma_lock_alloc(vma); 5391 } 5392 } 5393 5394 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 5395 { 5396 struct hstate *h = hstate_vma(vma); 5397 struct resv_map *resv; 5398 struct hugepage_subpool *spool = subpool_vma(vma); 5399 unsigned long reserve, start, end; 5400 long gbl_reserve; 5401 5402 hugetlb_vma_lock_free(vma); 5403 5404 resv = vma_resv_map(vma); 5405 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5406 return; 5407 5408 start = vma_hugecache_offset(h, vma, vma->vm_start); 5409 end = vma_hugecache_offset(h, vma, vma->vm_end); 5410 5411 reserve = (end - start) - region_count(resv, start, end); 5412 hugetlb_cgroup_uncharge_counter(resv, start, end); 5413 if (reserve) { 5414 /* 5415 * Decrement reserve counts. The global reserve count may be 5416 * adjusted if the subpool has a minimum size. 5417 */ 5418 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 5419 hugetlb_acct_memory(h, -gbl_reserve); 5420 } 5421 5422 kref_put(&resv->refs, resv_map_release); 5423 } 5424 5425 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 5426 { 5427 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 5428 return -EINVAL; 5429 5430 /* 5431 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 5432 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 5433 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 5434 */ 5435 if (addr & ~PUD_MASK) { 5436 /* 5437 * hugetlb_vm_op_split is called right before we attempt to 5438 * split the VMA. We will need to unshare PMDs in the old and 5439 * new VMAs, so let's unshare before we split. 5440 */ 5441 unsigned long floor = addr & PUD_MASK; 5442 unsigned long ceil = floor + PUD_SIZE; 5443 5444 if (floor >= vma->vm_start && ceil <= vma->vm_end) 5445 hugetlb_unshare_pmds(vma, floor, ceil); 5446 } 5447 5448 return 0; 5449 } 5450 5451 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 5452 { 5453 return huge_page_size(hstate_vma(vma)); 5454 } 5455 5456 /* 5457 * We cannot handle pagefaults against hugetlb pages at all. They cause 5458 * handle_mm_fault() to try to instantiate regular-sized pages in the 5459 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5460 * this far. 5461 */ 5462 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 5463 { 5464 BUG(); 5465 return 0; 5466 } 5467 5468 /* 5469 * When a new function is introduced to vm_operations_struct and added 5470 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 5471 * This is because under System V memory model, mappings created via 5472 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 5473 * their original vm_ops are overwritten with shm_vm_ops. 5474 */ 5475 const struct vm_operations_struct hugetlb_vm_ops = { 5476 .fault = hugetlb_vm_op_fault, 5477 .open = hugetlb_vm_op_open, 5478 .close = hugetlb_vm_op_close, 5479 .may_split = hugetlb_vm_op_split, 5480 .pagesize = hugetlb_vm_op_pagesize, 5481 }; 5482 5483 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio, 5484 bool try_mkwrite) 5485 { 5486 pte_t entry = folio_mk_pte(folio, vma->vm_page_prot); 5487 unsigned int shift = huge_page_shift(hstate_vma(vma)); 5488 5489 if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { 5490 entry = pte_mkwrite_novma(pte_mkdirty(entry)); 5491 } else { 5492 entry = pte_wrprotect(entry); 5493 } 5494 entry = pte_mkyoung(entry); 5495 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 5496 5497 return entry; 5498 } 5499 5500 static void set_huge_ptep_writable(struct vm_area_struct *vma, 5501 unsigned long address, pte_t *ptep) 5502 { 5503 pte_t entry; 5504 5505 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); 5506 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 5507 update_mmu_cache(vma, address, ptep); 5508 } 5509 5510 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, 5511 unsigned long address, pte_t *ptep) 5512 { 5513 if (vma->vm_flags & VM_WRITE) 5514 set_huge_ptep_writable(vma, address, ptep); 5515 } 5516 5517 bool is_hugetlb_entry_migration(pte_t pte) 5518 { 5519 swp_entry_t swp; 5520 5521 if (huge_pte_none(pte) || pte_present(pte)) 5522 return false; 5523 swp = pte_to_swp_entry(pte); 5524 if (is_migration_entry(swp)) 5525 return true; 5526 else 5527 return false; 5528 } 5529 5530 bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5531 { 5532 swp_entry_t swp; 5533 5534 if (huge_pte_none(pte) || pte_present(pte)) 5535 return false; 5536 swp = pte_to_swp_entry(pte); 5537 if (is_hwpoison_entry(swp)) 5538 return true; 5539 else 5540 return false; 5541 } 5542 5543 static void 5544 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5545 struct folio *new_folio, pte_t old, unsigned long sz) 5546 { 5547 pte_t newpte = make_huge_pte(vma, new_folio, true); 5548 5549 __folio_mark_uptodate(new_folio); 5550 hugetlb_add_new_anon_rmap(new_folio, vma, addr); 5551 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5552 newpte = huge_pte_mkuffd_wp(newpte); 5553 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 5554 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5555 folio_set_hugetlb_migratable(new_folio); 5556 } 5557 5558 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5559 struct vm_area_struct *dst_vma, 5560 struct vm_area_struct *src_vma) 5561 { 5562 pte_t *src_pte, *dst_pte, entry; 5563 struct folio *pte_folio; 5564 unsigned long addr; 5565 bool cow = is_cow_mapping(src_vma->vm_flags); 5566 struct hstate *h = hstate_vma(src_vma); 5567 unsigned long sz = huge_page_size(h); 5568 unsigned long npages = pages_per_huge_page(h); 5569 struct mmu_notifier_range range; 5570 unsigned long last_addr_mask; 5571 int ret = 0; 5572 5573 if (cow) { 5574 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5575 src_vma->vm_start, 5576 src_vma->vm_end); 5577 mmu_notifier_invalidate_range_start(&range); 5578 vma_assert_write_locked(src_vma); 5579 raw_write_seqcount_begin(&src->write_protect_seq); 5580 } else { 5581 /* 5582 * For shared mappings the vma lock must be held before 5583 * calling hugetlb_walk() in the src vma. Otherwise, the 5584 * returned ptep could go away if part of a shared pmd and 5585 * another thread calls huge_pmd_unshare. 5586 */ 5587 hugetlb_vma_lock_read(src_vma); 5588 } 5589 5590 last_addr_mask = hugetlb_mask_last_page(h); 5591 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5592 spinlock_t *src_ptl, *dst_ptl; 5593 src_pte = hugetlb_walk(src_vma, addr, sz); 5594 if (!src_pte) { 5595 addr |= last_addr_mask; 5596 continue; 5597 } 5598 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5599 if (!dst_pte) { 5600 ret = -ENOMEM; 5601 break; 5602 } 5603 5604 /* 5605 * If the pagetables are shared don't copy or take references. 5606 * 5607 * dst_pte == src_pte is the common case of src/dest sharing. 5608 * However, src could have 'unshared' and dst shares with 5609 * another vma. So page_count of ptep page is checked instead 5610 * to reliably determine whether pte is shared. 5611 */ 5612 if (page_count(virt_to_page(dst_pte)) > 1) { 5613 addr |= last_addr_mask; 5614 continue; 5615 } 5616 5617 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5618 src_ptl = huge_pte_lockptr(h, src, src_pte); 5619 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5620 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5621 again: 5622 if (huge_pte_none(entry)) { 5623 /* 5624 * Skip if src entry none. 5625 */ 5626 ; 5627 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5628 if (!userfaultfd_wp(dst_vma)) 5629 entry = huge_pte_clear_uffd_wp(entry); 5630 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5631 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5632 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5633 bool uffd_wp = pte_swp_uffd_wp(entry); 5634 5635 if (!is_readable_migration_entry(swp_entry) && cow) { 5636 /* 5637 * COW mappings require pages in both 5638 * parent and child to be set to read. 5639 */ 5640 swp_entry = make_readable_migration_entry( 5641 swp_offset(swp_entry)); 5642 entry = swp_entry_to_pte(swp_entry); 5643 if (userfaultfd_wp(src_vma) && uffd_wp) 5644 entry = pte_swp_mkuffd_wp(entry); 5645 set_huge_pte_at(src, addr, src_pte, entry, sz); 5646 } 5647 if (!userfaultfd_wp(dst_vma)) 5648 entry = huge_pte_clear_uffd_wp(entry); 5649 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5650 } else if (unlikely(is_pte_marker(entry))) { 5651 pte_marker marker = copy_pte_marker( 5652 pte_to_swp_entry(entry), dst_vma); 5653 5654 if (marker) 5655 set_huge_pte_at(dst, addr, dst_pte, 5656 make_pte_marker(marker), sz); 5657 } else { 5658 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5659 pte_folio = page_folio(pte_page(entry)); 5660 folio_get(pte_folio); 5661 5662 /* 5663 * Failing to duplicate the anon rmap is a rare case 5664 * where we see pinned hugetlb pages while they're 5665 * prone to COW. We need to do the COW earlier during 5666 * fork. 5667 * 5668 * When pre-allocating the page or copying data, we 5669 * need to be without the pgtable locks since we could 5670 * sleep during the process. 5671 */ 5672 if (!folio_test_anon(pte_folio)) { 5673 hugetlb_add_file_rmap(pte_folio); 5674 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { 5675 pte_t src_pte_old = entry; 5676 struct folio *new_folio; 5677 5678 spin_unlock(src_ptl); 5679 spin_unlock(dst_ptl); 5680 /* Do not use reserve as it's private owned */ 5681 new_folio = alloc_hugetlb_folio(dst_vma, addr, false); 5682 if (IS_ERR(new_folio)) { 5683 folio_put(pte_folio); 5684 ret = PTR_ERR(new_folio); 5685 break; 5686 } 5687 ret = copy_user_large_folio(new_folio, pte_folio, 5688 addr, dst_vma); 5689 folio_put(pte_folio); 5690 if (ret) { 5691 folio_put(new_folio); 5692 break; 5693 } 5694 5695 /* Install the new hugetlb folio if src pte stable */ 5696 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5697 src_ptl = huge_pte_lockptr(h, src, src_pte); 5698 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5699 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5700 if (!pte_same(src_pte_old, entry)) { 5701 restore_reserve_on_error(h, dst_vma, addr, 5702 new_folio); 5703 folio_put(new_folio); 5704 /* huge_ptep of dst_pte won't change as in child */ 5705 goto again; 5706 } 5707 hugetlb_install_folio(dst_vma, dst_pte, addr, 5708 new_folio, src_pte_old, sz); 5709 spin_unlock(src_ptl); 5710 spin_unlock(dst_ptl); 5711 continue; 5712 } 5713 5714 if (cow) { 5715 /* 5716 * No need to notify as we are downgrading page 5717 * table protection not changing it to point 5718 * to a new page. 5719 * 5720 * See Documentation/mm/mmu_notifier.rst 5721 */ 5722 huge_ptep_set_wrprotect(src, addr, src_pte); 5723 entry = huge_pte_wrprotect(entry); 5724 } 5725 5726 if (!userfaultfd_wp(dst_vma)) 5727 entry = huge_pte_clear_uffd_wp(entry); 5728 5729 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5730 hugetlb_count_add(npages, dst); 5731 } 5732 spin_unlock(src_ptl); 5733 spin_unlock(dst_ptl); 5734 } 5735 5736 if (cow) { 5737 raw_write_seqcount_end(&src->write_protect_seq); 5738 mmu_notifier_invalidate_range_end(&range); 5739 } else { 5740 hugetlb_vma_unlock_read(src_vma); 5741 } 5742 5743 return ret; 5744 } 5745 5746 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5747 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5748 unsigned long sz) 5749 { 5750 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 5751 struct hstate *h = hstate_vma(vma); 5752 struct mm_struct *mm = vma->vm_mm; 5753 spinlock_t *src_ptl, *dst_ptl; 5754 pte_t pte; 5755 5756 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5757 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5758 5759 /* 5760 * We don't have to worry about the ordering of src and dst ptlocks 5761 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5762 */ 5763 if (src_ptl != dst_ptl) 5764 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5765 5766 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); 5767 5768 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) 5769 huge_pte_clear(mm, new_addr, dst_pte, sz); 5770 else { 5771 if (need_clear_uffd_wp) { 5772 if (pte_present(pte)) 5773 pte = huge_pte_clear_uffd_wp(pte); 5774 else if (is_swap_pte(pte)) 5775 pte = pte_swp_clear_uffd_wp(pte); 5776 } 5777 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5778 } 5779 5780 if (src_ptl != dst_ptl) 5781 spin_unlock(src_ptl); 5782 spin_unlock(dst_ptl); 5783 } 5784 5785 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5786 struct vm_area_struct *new_vma, 5787 unsigned long old_addr, unsigned long new_addr, 5788 unsigned long len) 5789 { 5790 struct hstate *h = hstate_vma(vma); 5791 struct address_space *mapping = vma->vm_file->f_mapping; 5792 unsigned long sz = huge_page_size(h); 5793 struct mm_struct *mm = vma->vm_mm; 5794 unsigned long old_end = old_addr + len; 5795 unsigned long last_addr_mask; 5796 pte_t *src_pte, *dst_pte; 5797 struct mmu_notifier_range range; 5798 bool shared_pmd = false; 5799 5800 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5801 old_end); 5802 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5803 /* 5804 * In case of shared PMDs, we should cover the maximum possible 5805 * range. 5806 */ 5807 flush_cache_range(vma, range.start, range.end); 5808 5809 mmu_notifier_invalidate_range_start(&range); 5810 last_addr_mask = hugetlb_mask_last_page(h); 5811 /* Prevent race with file truncation */ 5812 hugetlb_vma_lock_write(vma); 5813 i_mmap_lock_write(mapping); 5814 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5815 src_pte = hugetlb_walk(vma, old_addr, sz); 5816 if (!src_pte) { 5817 old_addr |= last_addr_mask; 5818 new_addr |= last_addr_mask; 5819 continue; 5820 } 5821 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) 5822 continue; 5823 5824 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5825 shared_pmd = true; 5826 old_addr |= last_addr_mask; 5827 new_addr |= last_addr_mask; 5828 continue; 5829 } 5830 5831 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5832 if (!dst_pte) 5833 break; 5834 5835 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5836 } 5837 5838 if (shared_pmd) 5839 flush_hugetlb_tlb_range(vma, range.start, range.end); 5840 else 5841 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5842 mmu_notifier_invalidate_range_end(&range); 5843 i_mmap_unlock_write(mapping); 5844 hugetlb_vma_unlock_write(vma); 5845 5846 return len + old_addr - old_end; 5847 } 5848 5849 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5850 unsigned long start, unsigned long end, 5851 struct folio *folio, zap_flags_t zap_flags) 5852 { 5853 struct mm_struct *mm = vma->vm_mm; 5854 const bool folio_provided = !!folio; 5855 unsigned long address; 5856 pte_t *ptep; 5857 pte_t pte; 5858 spinlock_t *ptl; 5859 struct hstate *h = hstate_vma(vma); 5860 unsigned long sz = huge_page_size(h); 5861 bool adjust_reservation = false; 5862 unsigned long last_addr_mask; 5863 bool force_flush = false; 5864 5865 WARN_ON(!is_vm_hugetlb_page(vma)); 5866 BUG_ON(start & ~huge_page_mask(h)); 5867 BUG_ON(end & ~huge_page_mask(h)); 5868 5869 /* 5870 * This is a hugetlb vma, all the pte entries should point 5871 * to huge page. 5872 */ 5873 tlb_change_page_size(tlb, sz); 5874 tlb_start_vma(tlb, vma); 5875 5876 last_addr_mask = hugetlb_mask_last_page(h); 5877 address = start; 5878 for (; address < end; address += sz) { 5879 ptep = hugetlb_walk(vma, address, sz); 5880 if (!ptep) { 5881 address |= last_addr_mask; 5882 continue; 5883 } 5884 5885 ptl = huge_pte_lock(h, mm, ptep); 5886 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5887 spin_unlock(ptl); 5888 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5889 force_flush = true; 5890 address |= last_addr_mask; 5891 continue; 5892 } 5893 5894 pte = huge_ptep_get(mm, address, ptep); 5895 if (huge_pte_none(pte)) { 5896 spin_unlock(ptl); 5897 continue; 5898 } 5899 5900 /* 5901 * Migrating hugepage or HWPoisoned hugepage is already 5902 * unmapped and its refcount is dropped, so just clear pte here. 5903 */ 5904 if (unlikely(!pte_present(pte))) { 5905 /* 5906 * If the pte was wr-protected by uffd-wp in any of the 5907 * swap forms, meanwhile the caller does not want to 5908 * drop the uffd-wp bit in this zap, then replace the 5909 * pte with a marker. 5910 */ 5911 if (pte_swp_uffd_wp_any(pte) && 5912 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5913 set_huge_pte_at(mm, address, ptep, 5914 make_pte_marker(PTE_MARKER_UFFD_WP), 5915 sz); 5916 else 5917 huge_pte_clear(mm, address, ptep, sz); 5918 spin_unlock(ptl); 5919 continue; 5920 } 5921 5922 /* 5923 * If a folio is supplied, it is because a specific 5924 * folio is being unmapped, not a range. Ensure the folio we 5925 * are about to unmap is the actual folio of interest. 5926 */ 5927 if (folio_provided) { 5928 if (folio != page_folio(pte_page(pte))) { 5929 spin_unlock(ptl); 5930 continue; 5931 } 5932 /* 5933 * Mark the VMA as having unmapped its page so that 5934 * future faults in this VMA will fail rather than 5935 * looking like data was lost 5936 */ 5937 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5938 } else { 5939 folio = page_folio(pte_page(pte)); 5940 } 5941 5942 pte = huge_ptep_get_and_clear(mm, address, ptep, sz); 5943 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5944 if (huge_pte_dirty(pte)) 5945 folio_mark_dirty(folio); 5946 /* Leave a uffd-wp pte marker if needed */ 5947 if (huge_pte_uffd_wp(pte) && 5948 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5949 set_huge_pte_at(mm, address, ptep, 5950 make_pte_marker(PTE_MARKER_UFFD_WP), 5951 sz); 5952 hugetlb_count_sub(pages_per_huge_page(h), mm); 5953 hugetlb_remove_rmap(folio); 5954 5955 /* 5956 * Restore the reservation for anonymous page, otherwise the 5957 * backing page could be stolen by someone. 5958 * If there we are freeing a surplus, do not set the restore 5959 * reservation bit. 5960 */ 5961 if (!h->surplus_huge_pages && __vma_private_lock(vma) && 5962 folio_test_anon(folio)) { 5963 folio_set_hugetlb_restore_reserve(folio); 5964 /* Reservation to be adjusted after the spin lock */ 5965 adjust_reservation = true; 5966 } 5967 5968 spin_unlock(ptl); 5969 5970 /* 5971 * Adjust the reservation for the region that will have the 5972 * reserve restored. Keep in mind that vma_needs_reservation() changes 5973 * resv->adds_in_progress if it succeeds. If this is not done, 5974 * do_exit() will not see it, and will keep the reservation 5975 * forever. 5976 */ 5977 if (adjust_reservation) { 5978 int rc = vma_needs_reservation(h, vma, address); 5979 5980 if (rc < 0) 5981 /* Pressumably allocate_file_region_entries failed 5982 * to allocate a file_region struct. Clear 5983 * hugetlb_restore_reserve so that global reserve 5984 * count will not be incremented by free_huge_folio. 5985 * Act as if we consumed the reservation. 5986 */ 5987 folio_clear_hugetlb_restore_reserve(folio); 5988 else if (rc) 5989 vma_add_reservation(h, vma, address); 5990 } 5991 5992 tlb_remove_page_size(tlb, folio_page(folio, 0), 5993 folio_size(folio)); 5994 /* 5995 * If we were instructed to unmap a specific folio, we're done. 5996 */ 5997 if (folio_provided) 5998 break; 5999 } 6000 tlb_end_vma(tlb, vma); 6001 6002 /* 6003 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 6004 * could defer the flush until now, since by holding i_mmap_rwsem we 6005 * guaranteed that the last refernece would not be dropped. But we must 6006 * do the flushing before we return, as otherwise i_mmap_rwsem will be 6007 * dropped and the last reference to the shared PMDs page might be 6008 * dropped as well. 6009 * 6010 * In theory we could defer the freeing of the PMD pages as well, but 6011 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 6012 * detect sharing, so we cannot defer the release of the page either. 6013 * Instead, do flush now. 6014 */ 6015 if (force_flush) 6016 tlb_flush_mmu_tlbonly(tlb); 6017 } 6018 6019 void __hugetlb_zap_begin(struct vm_area_struct *vma, 6020 unsigned long *start, unsigned long *end) 6021 { 6022 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 6023 return; 6024 6025 adjust_range_if_pmd_sharing_possible(vma, start, end); 6026 hugetlb_vma_lock_write(vma); 6027 if (vma->vm_file) 6028 i_mmap_lock_write(vma->vm_file->f_mapping); 6029 } 6030 6031 void __hugetlb_zap_end(struct vm_area_struct *vma, 6032 struct zap_details *details) 6033 { 6034 zap_flags_t zap_flags = details ? details->zap_flags : 0; 6035 6036 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 6037 return; 6038 6039 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 6040 /* 6041 * Unlock and free the vma lock before releasing i_mmap_rwsem. 6042 * When the vma_lock is freed, this makes the vma ineligible 6043 * for pmd sharing. And, i_mmap_rwsem is required to set up 6044 * pmd sharing. This is important as page tables for this 6045 * unmapped range will be asynchrously deleted. If the page 6046 * tables are shared, there will be issues when accessed by 6047 * someone else. 6048 */ 6049 __hugetlb_vma_unlock_write_free(vma); 6050 } else { 6051 hugetlb_vma_unlock_write(vma); 6052 } 6053 6054 if (vma->vm_file) 6055 i_mmap_unlock_write(vma->vm_file->f_mapping); 6056 } 6057 6058 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 6059 unsigned long end, struct folio *folio, 6060 zap_flags_t zap_flags) 6061 { 6062 struct mmu_notifier_range range; 6063 struct mmu_gather tlb; 6064 6065 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 6066 start, end); 6067 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6068 mmu_notifier_invalidate_range_start(&range); 6069 tlb_gather_mmu(&tlb, vma->vm_mm); 6070 6071 __unmap_hugepage_range(&tlb, vma, start, end, 6072 folio, zap_flags); 6073 6074 mmu_notifier_invalidate_range_end(&range); 6075 tlb_finish_mmu(&tlb); 6076 } 6077 6078 /* 6079 * This is called when the original mapper is failing to COW a MAP_PRIVATE 6080 * mapping it owns the reserve page for. The intention is to unmap the page 6081 * from other VMAs and let the children be SIGKILLed if they are faulting the 6082 * same region. 6083 */ 6084 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 6085 struct folio *folio, unsigned long address) 6086 { 6087 struct hstate *h = hstate_vma(vma); 6088 struct vm_area_struct *iter_vma; 6089 struct address_space *mapping; 6090 pgoff_t pgoff; 6091 6092 /* 6093 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 6094 * from page cache lookup which is in HPAGE_SIZE units. 6095 */ 6096 address = address & huge_page_mask(h); 6097 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 6098 vma->vm_pgoff; 6099 mapping = vma->vm_file->f_mapping; 6100 6101 /* 6102 * Take the mapping lock for the duration of the table walk. As 6103 * this mapping should be shared between all the VMAs, 6104 * __unmap_hugepage_range() is called as the lock is already held 6105 */ 6106 i_mmap_lock_write(mapping); 6107 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 6108 /* Do not unmap the current VMA */ 6109 if (iter_vma == vma) 6110 continue; 6111 6112 /* 6113 * Shared VMAs have their own reserves and do not affect 6114 * MAP_PRIVATE accounting but it is possible that a shared 6115 * VMA is using the same page so check and skip such VMAs. 6116 */ 6117 if (iter_vma->vm_flags & VM_MAYSHARE) 6118 continue; 6119 6120 /* 6121 * Unmap the page from other VMAs without their own reserves. 6122 * They get marked to be SIGKILLed if they fault in these 6123 * areas. This is because a future no-page fault on this VMA 6124 * could insert a zeroed page instead of the data existing 6125 * from the time of fork. This would look like data corruption 6126 */ 6127 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 6128 unmap_hugepage_range(iter_vma, address, 6129 address + huge_page_size(h), 6130 folio, 0); 6131 } 6132 i_mmap_unlock_write(mapping); 6133 } 6134 6135 /* 6136 * hugetlb_wp() should be called with page lock of the original hugepage held. 6137 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 6138 * cannot race with other handlers or page migration. 6139 * Keep the pte_same checks anyway to make transition from the mutex easier. 6140 */ 6141 static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 6142 struct vm_fault *vmf) 6143 { 6144 struct vm_area_struct *vma = vmf->vma; 6145 struct mm_struct *mm = vma->vm_mm; 6146 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 6147 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); 6148 struct hstate *h = hstate_vma(vma); 6149 struct folio *old_folio; 6150 struct folio *new_folio; 6151 bool cow_from_owner = 0; 6152 vm_fault_t ret = 0; 6153 struct mmu_notifier_range range; 6154 6155 /* 6156 * Never handle CoW for uffd-wp protected pages. It should be only 6157 * handled when the uffd-wp protection is removed. 6158 * 6159 * Note that only the CoW optimization path (in hugetlb_no_page()) 6160 * can trigger this, because hugetlb_fault() will always resolve 6161 * uffd-wp bit first. 6162 */ 6163 if (!unshare && huge_pte_uffd_wp(pte)) 6164 return 0; 6165 6166 /* Let's take out MAP_SHARED mappings first. */ 6167 if (vma->vm_flags & VM_MAYSHARE) { 6168 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 6169 return 0; 6170 } 6171 6172 old_folio = page_folio(pte_page(pte)); 6173 6174 delayacct_wpcopy_start(); 6175 6176 retry_avoidcopy: 6177 /* 6178 * If no-one else is actually using this page, we're the exclusive 6179 * owner and can reuse this page. 6180 * 6181 * Note that we don't rely on the (safer) folio refcount here, because 6182 * copying the hugetlb folio when there are unexpected (temporary) 6183 * folio references could harm simple fork()+exit() users when 6184 * we run out of free hugetlb folios: we would have to kill processes 6185 * in scenarios that used to work. As a side effect, there can still 6186 * be leaks between processes, for example, with FOLL_GET users. 6187 */ 6188 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 6189 if (!PageAnonExclusive(&old_folio->page)) { 6190 folio_move_anon_rmap(old_folio, vma); 6191 SetPageAnonExclusive(&old_folio->page); 6192 } 6193 if (likely(!unshare)) 6194 set_huge_ptep_maybe_writable(vma, vmf->address, 6195 vmf->pte); 6196 6197 delayacct_wpcopy_end(); 6198 return 0; 6199 } 6200 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 6201 PageAnonExclusive(&old_folio->page), &old_folio->page); 6202 6203 /* 6204 * If the process that created a MAP_PRIVATE mapping is about to 6205 * perform a COW due to a shared page count, attempt to satisfy 6206 * the allocation without using the existing reserves. The pagecache 6207 * page is used to determine if the reserve at this address was 6208 * consumed or not. If reserves were used, a partial faulted mapping 6209 * at the time of fork() could consume its reserves on COW instead 6210 * of the full address range. 6211 */ 6212 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 6213 old_folio != pagecache_folio) 6214 cow_from_owner = true; 6215 6216 folio_get(old_folio); 6217 6218 /* 6219 * Drop page table lock as buddy allocator may be called. It will 6220 * be acquired again before returning to the caller, as expected. 6221 */ 6222 spin_unlock(vmf->ptl); 6223 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); 6224 6225 if (IS_ERR(new_folio)) { 6226 /* 6227 * If a process owning a MAP_PRIVATE mapping fails to COW, 6228 * it is due to references held by a child and an insufficient 6229 * huge page pool. To guarantee the original mappers 6230 * reliability, unmap the page from child processes. The child 6231 * may get SIGKILLed if it later faults. 6232 */ 6233 if (cow_from_owner) { 6234 struct address_space *mapping = vma->vm_file->f_mapping; 6235 pgoff_t idx; 6236 u32 hash; 6237 6238 folio_put(old_folio); 6239 /* 6240 * Drop hugetlb_fault_mutex and vma_lock before 6241 * unmapping. unmapping needs to hold vma_lock 6242 * in write mode. Dropping vma_lock in read mode 6243 * here is OK as COW mappings do not interact with 6244 * PMD sharing. 6245 * 6246 * Reacquire both after unmap operation. 6247 */ 6248 idx = vma_hugecache_offset(h, vma, vmf->address); 6249 hash = hugetlb_fault_mutex_hash(mapping, idx); 6250 hugetlb_vma_unlock_read(vma); 6251 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6252 6253 unmap_ref_private(mm, vma, old_folio, vmf->address); 6254 6255 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6256 hugetlb_vma_lock_read(vma); 6257 spin_lock(vmf->ptl); 6258 vmf->pte = hugetlb_walk(vma, vmf->address, 6259 huge_page_size(h)); 6260 if (likely(vmf->pte && 6261 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) 6262 goto retry_avoidcopy; 6263 /* 6264 * race occurs while re-acquiring page table 6265 * lock, and our job is done. 6266 */ 6267 delayacct_wpcopy_end(); 6268 return 0; 6269 } 6270 6271 ret = vmf_error(PTR_ERR(new_folio)); 6272 goto out_release_old; 6273 } 6274 6275 /* 6276 * When the original hugepage is shared one, it does not have 6277 * anon_vma prepared. 6278 */ 6279 ret = __vmf_anon_prepare(vmf); 6280 if (unlikely(ret)) 6281 goto out_release_all; 6282 6283 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 6284 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); 6285 goto out_release_all; 6286 } 6287 __folio_mark_uptodate(new_folio); 6288 6289 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 6290 vmf->address + huge_page_size(h)); 6291 mmu_notifier_invalidate_range_start(&range); 6292 6293 /* 6294 * Retake the page table lock to check for racing updates 6295 * before the page tables are altered 6296 */ 6297 spin_lock(vmf->ptl); 6298 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 6299 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { 6300 pte_t newpte = make_huge_pte(vma, new_folio, !unshare); 6301 6302 /* Break COW or unshare */ 6303 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 6304 hugetlb_remove_rmap(old_folio); 6305 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 6306 if (huge_pte_uffd_wp(pte)) 6307 newpte = huge_pte_mkuffd_wp(newpte); 6308 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 6309 huge_page_size(h)); 6310 folio_set_hugetlb_migratable(new_folio); 6311 /* Make the old page be freed below */ 6312 new_folio = old_folio; 6313 } 6314 spin_unlock(vmf->ptl); 6315 mmu_notifier_invalidate_range_end(&range); 6316 out_release_all: 6317 /* 6318 * No restore in case of successful pagetable update (Break COW or 6319 * unshare) 6320 */ 6321 if (new_folio != old_folio) 6322 restore_reserve_on_error(h, vma, vmf->address, new_folio); 6323 folio_put(new_folio); 6324 out_release_old: 6325 folio_put(old_folio); 6326 6327 spin_lock(vmf->ptl); /* Caller expects lock to be held */ 6328 6329 delayacct_wpcopy_end(); 6330 return ret; 6331 } 6332 6333 /* 6334 * Return whether there is a pagecache page to back given address within VMA. 6335 */ 6336 bool hugetlbfs_pagecache_present(struct hstate *h, 6337 struct vm_area_struct *vma, unsigned long address) 6338 { 6339 struct address_space *mapping = vma->vm_file->f_mapping; 6340 pgoff_t idx = linear_page_index(vma, address); 6341 struct folio *folio; 6342 6343 folio = filemap_get_folio(mapping, idx); 6344 if (IS_ERR(folio)) 6345 return false; 6346 folio_put(folio); 6347 return true; 6348 } 6349 6350 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 6351 pgoff_t idx) 6352 { 6353 struct inode *inode = mapping->host; 6354 struct hstate *h = hstate_inode(inode); 6355 int err; 6356 6357 idx <<= huge_page_order(h); 6358 __folio_set_locked(folio); 6359 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 6360 6361 if (unlikely(err)) { 6362 __folio_clear_locked(folio); 6363 return err; 6364 } 6365 folio_clear_hugetlb_restore_reserve(folio); 6366 6367 /* 6368 * mark folio dirty so that it will not be removed from cache/file 6369 * by non-hugetlbfs specific code paths. 6370 */ 6371 folio_mark_dirty(folio); 6372 6373 spin_lock(&inode->i_lock); 6374 inode->i_blocks += blocks_per_huge_page(h); 6375 spin_unlock(&inode->i_lock); 6376 return 0; 6377 } 6378 6379 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, 6380 struct address_space *mapping, 6381 unsigned long reason) 6382 { 6383 u32 hash; 6384 6385 /* 6386 * vma_lock and hugetlb_fault_mutex must be dropped before handling 6387 * userfault. Also mmap_lock could be dropped due to handling 6388 * userfault, any vma operation should be careful from here. 6389 */ 6390 hugetlb_vma_unlock_read(vmf->vma); 6391 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6392 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6393 return handle_userfault(vmf, reason); 6394 } 6395 6396 /* 6397 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 6398 * false if pte changed or is changing. 6399 */ 6400 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, 6401 pte_t *ptep, pte_t old_pte) 6402 { 6403 spinlock_t *ptl; 6404 bool same; 6405 6406 ptl = huge_pte_lock(h, mm, ptep); 6407 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); 6408 spin_unlock(ptl); 6409 6410 return same; 6411 } 6412 6413 static vm_fault_t hugetlb_no_page(struct address_space *mapping, 6414 struct vm_fault *vmf) 6415 { 6416 struct vm_area_struct *vma = vmf->vma; 6417 struct mm_struct *mm = vma->vm_mm; 6418 struct hstate *h = hstate_vma(vma); 6419 vm_fault_t ret = VM_FAULT_SIGBUS; 6420 int anon_rmap = 0; 6421 unsigned long size; 6422 struct folio *folio; 6423 pte_t new_pte; 6424 bool new_folio, new_pagecache_folio = false; 6425 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6426 6427 /* 6428 * Currently, we are forced to kill the process in the event the 6429 * original mapper has unmapped pages from the child due to a failed 6430 * COW/unsharing. Warn that such a situation has occurred as it may not 6431 * be obvious. 6432 */ 6433 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 6434 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 6435 current->pid); 6436 goto out; 6437 } 6438 6439 /* 6440 * Use page lock to guard against racing truncation 6441 * before we get page_table_lock. 6442 */ 6443 new_folio = false; 6444 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); 6445 if (IS_ERR(folio)) { 6446 size = i_size_read(mapping->host) >> huge_page_shift(h); 6447 if (vmf->pgoff >= size) 6448 goto out; 6449 /* Check for page in userfault range */ 6450 if (userfaultfd_missing(vma)) { 6451 /* 6452 * Since hugetlb_no_page() was examining pte 6453 * without pgtable lock, we need to re-test under 6454 * lock because the pte may not be stable and could 6455 * have changed from under us. Try to detect 6456 * either changed or during-changing ptes and retry 6457 * properly when needed. 6458 * 6459 * Note that userfaultfd is actually fine with 6460 * false positives (e.g. caused by pte changed), 6461 * but not wrong logical events (e.g. caused by 6462 * reading a pte during changing). The latter can 6463 * confuse the userspace, so the strictness is very 6464 * much preferred. E.g., MISSING event should 6465 * never happen on the page after UFFDIO_COPY has 6466 * correctly installed the page and returned. 6467 */ 6468 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6469 ret = 0; 6470 goto out; 6471 } 6472 6473 return hugetlb_handle_userfault(vmf, mapping, 6474 VM_UFFD_MISSING); 6475 } 6476 6477 if (!(vma->vm_flags & VM_MAYSHARE)) { 6478 ret = __vmf_anon_prepare(vmf); 6479 if (unlikely(ret)) 6480 goto out; 6481 } 6482 6483 folio = alloc_hugetlb_folio(vma, vmf->address, false); 6484 if (IS_ERR(folio)) { 6485 /* 6486 * Returning error will result in faulting task being 6487 * sent SIGBUS. The hugetlb fault mutex prevents two 6488 * tasks from racing to fault in the same page which 6489 * could result in false unable to allocate errors. 6490 * Page migration does not take the fault mutex, but 6491 * does a clear then write of pte's under page table 6492 * lock. Page fault code could race with migration, 6493 * notice the clear pte and try to allocate a page 6494 * here. Before returning error, get ptl and make 6495 * sure there really is no pte entry. 6496 */ 6497 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) 6498 ret = vmf_error(PTR_ERR(folio)); 6499 else 6500 ret = 0; 6501 goto out; 6502 } 6503 folio_zero_user(folio, vmf->real_address); 6504 __folio_mark_uptodate(folio); 6505 new_folio = true; 6506 6507 if (vma->vm_flags & VM_MAYSHARE) { 6508 int err = hugetlb_add_to_page_cache(folio, mapping, 6509 vmf->pgoff); 6510 if (err) { 6511 /* 6512 * err can't be -EEXIST which implies someone 6513 * else consumed the reservation since hugetlb 6514 * fault mutex is held when add a hugetlb page 6515 * to the page cache. So it's safe to call 6516 * restore_reserve_on_error() here. 6517 */ 6518 restore_reserve_on_error(h, vma, vmf->address, 6519 folio); 6520 folio_put(folio); 6521 ret = VM_FAULT_SIGBUS; 6522 goto out; 6523 } 6524 new_pagecache_folio = true; 6525 } else { 6526 folio_lock(folio); 6527 anon_rmap = 1; 6528 } 6529 } else { 6530 /* 6531 * If memory error occurs between mmap() and fault, some process 6532 * don't have hwpoisoned swap entry for errored virtual address. 6533 * So we need to block hugepage fault by PG_hwpoison bit check. 6534 */ 6535 if (unlikely(folio_test_hwpoison(folio))) { 6536 ret = VM_FAULT_HWPOISON_LARGE | 6537 VM_FAULT_SET_HINDEX(hstate_index(h)); 6538 goto backout_unlocked; 6539 } 6540 6541 /* Check for page in userfault range. */ 6542 if (userfaultfd_minor(vma)) { 6543 folio_unlock(folio); 6544 folio_put(folio); 6545 /* See comment in userfaultfd_missing() block above */ 6546 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6547 ret = 0; 6548 goto out; 6549 } 6550 return hugetlb_handle_userfault(vmf, mapping, 6551 VM_UFFD_MINOR); 6552 } 6553 } 6554 6555 /* 6556 * If we are going to COW a private mapping later, we examine the 6557 * pending reservations for this page now. This will ensure that 6558 * any allocations necessary to record that reservation occur outside 6559 * the spinlock. 6560 */ 6561 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6562 if (vma_needs_reservation(h, vma, vmf->address) < 0) { 6563 ret = VM_FAULT_OOM; 6564 goto backout_unlocked; 6565 } 6566 /* Just decrements count, does not deallocate */ 6567 vma_end_reservation(h, vma, vmf->address); 6568 } 6569 6570 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); 6571 ret = 0; 6572 /* If pte changed from under us, retry */ 6573 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) 6574 goto backout; 6575 6576 if (anon_rmap) 6577 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); 6578 else 6579 hugetlb_add_file_rmap(folio); 6580 new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED); 6581 /* 6582 * If this pte was previously wr-protected, keep it wr-protected even 6583 * if populated. 6584 */ 6585 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) 6586 new_pte = huge_pte_mkuffd_wp(new_pte); 6587 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); 6588 6589 hugetlb_count_add(pages_per_huge_page(h), mm); 6590 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6591 /* Optimization, do the COW without a second fault */ 6592 ret = hugetlb_wp(folio, vmf); 6593 } 6594 6595 spin_unlock(vmf->ptl); 6596 6597 /* 6598 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6599 * found in the pagecache may not have hugetlb_migratable if they have 6600 * been isolated for migration. 6601 */ 6602 if (new_folio) 6603 folio_set_hugetlb_migratable(folio); 6604 6605 folio_unlock(folio); 6606 out: 6607 hugetlb_vma_unlock_read(vma); 6608 6609 /* 6610 * We must check to release the per-VMA lock. __vmf_anon_prepare() is 6611 * the only way ret can be set to VM_FAULT_RETRY. 6612 */ 6613 if (unlikely(ret & VM_FAULT_RETRY)) 6614 vma_end_read(vma); 6615 6616 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6617 return ret; 6618 6619 backout: 6620 spin_unlock(vmf->ptl); 6621 backout_unlocked: 6622 if (new_folio && !new_pagecache_folio) 6623 restore_reserve_on_error(h, vma, vmf->address, folio); 6624 6625 folio_unlock(folio); 6626 folio_put(folio); 6627 goto out; 6628 } 6629 6630 #ifdef CONFIG_SMP 6631 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6632 { 6633 unsigned long key[2]; 6634 u32 hash; 6635 6636 key[0] = (unsigned long) mapping; 6637 key[1] = idx; 6638 6639 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6640 6641 return hash & (num_fault_mutexes - 1); 6642 } 6643 #else 6644 /* 6645 * For uniprocessor systems we always use a single mutex, so just 6646 * return 0 and avoid the hashing overhead. 6647 */ 6648 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6649 { 6650 return 0; 6651 } 6652 #endif 6653 6654 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6655 unsigned long address, unsigned int flags) 6656 { 6657 vm_fault_t ret; 6658 u32 hash; 6659 struct folio *folio = NULL; 6660 struct folio *pagecache_folio = NULL; 6661 struct hstate *h = hstate_vma(vma); 6662 struct address_space *mapping; 6663 int need_wait_lock = 0; 6664 struct vm_fault vmf = { 6665 .vma = vma, 6666 .address = address & huge_page_mask(h), 6667 .real_address = address, 6668 .flags = flags, 6669 .pgoff = vma_hugecache_offset(h, vma, 6670 address & huge_page_mask(h)), 6671 /* TODO: Track hugetlb faults using vm_fault */ 6672 6673 /* 6674 * Some fields may not be initialized, be careful as it may 6675 * be hard to debug if called functions make assumptions 6676 */ 6677 }; 6678 6679 /* 6680 * Serialize hugepage allocation and instantiation, so that we don't 6681 * get spurious allocation failures if two CPUs race to instantiate 6682 * the same page in the page cache. 6683 */ 6684 mapping = vma->vm_file->f_mapping; 6685 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); 6686 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6687 6688 /* 6689 * Acquire vma lock before calling huge_pte_alloc and hold 6690 * until finished with vmf.pte. This prevents huge_pmd_unshare from 6691 * being called elsewhere and making the vmf.pte no longer valid. 6692 */ 6693 hugetlb_vma_lock_read(vma); 6694 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); 6695 if (!vmf.pte) { 6696 hugetlb_vma_unlock_read(vma); 6697 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6698 return VM_FAULT_OOM; 6699 } 6700 6701 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); 6702 if (huge_pte_none_mostly(vmf.orig_pte)) { 6703 if (is_pte_marker(vmf.orig_pte)) { 6704 pte_marker marker = 6705 pte_marker_get(pte_to_swp_entry(vmf.orig_pte)); 6706 6707 if (marker & PTE_MARKER_POISONED) { 6708 ret = VM_FAULT_HWPOISON_LARGE | 6709 VM_FAULT_SET_HINDEX(hstate_index(h)); 6710 goto out_mutex; 6711 } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) { 6712 /* This isn't supported in hugetlb. */ 6713 ret = VM_FAULT_SIGSEGV; 6714 goto out_mutex; 6715 } 6716 } 6717 6718 /* 6719 * Other PTE markers should be handled the same way as none PTE. 6720 * 6721 * hugetlb_no_page will drop vma lock and hugetlb fault 6722 * mutex internally, which make us return immediately. 6723 */ 6724 return hugetlb_no_page(mapping, &vmf); 6725 } 6726 6727 ret = 0; 6728 6729 /* 6730 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this 6731 * point, so this check prevents the kernel from going below assuming 6732 * that we have an active hugepage in pagecache. This goto expects 6733 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned) 6734 * check will properly handle it. 6735 */ 6736 if (!pte_present(vmf.orig_pte)) { 6737 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) { 6738 /* 6739 * Release the hugetlb fault lock now, but retain 6740 * the vma lock, because it is needed to guard the 6741 * huge_pte_lockptr() later in 6742 * migration_entry_wait_huge(). The vma lock will 6743 * be released there. 6744 */ 6745 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6746 migration_entry_wait_huge(vma, vmf.address, vmf.pte); 6747 return 0; 6748 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte))) 6749 ret = VM_FAULT_HWPOISON_LARGE | 6750 VM_FAULT_SET_HINDEX(hstate_index(h)); 6751 goto out_mutex; 6752 } 6753 6754 /* 6755 * If we are going to COW/unshare the mapping later, we examine the 6756 * pending reservations for this page now. This will ensure that any 6757 * allocations necessary to record that reservation occur outside the 6758 * spinlock. Also lookup the pagecache page now as it is used to 6759 * determine if a reservation has been consumed. 6760 */ 6761 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6762 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { 6763 if (vma_needs_reservation(h, vma, vmf.address) < 0) { 6764 ret = VM_FAULT_OOM; 6765 goto out_mutex; 6766 } 6767 /* Just decrements count, does not deallocate */ 6768 vma_end_reservation(h, vma, vmf.address); 6769 6770 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, 6771 vmf.pgoff); 6772 if (IS_ERR(pagecache_folio)) 6773 pagecache_folio = NULL; 6774 } 6775 6776 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); 6777 6778 /* Check for a racing update before calling hugetlb_wp() */ 6779 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) 6780 goto out_ptl; 6781 6782 /* Handle userfault-wp first, before trying to lock more pages */ 6783 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && 6784 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { 6785 if (!userfaultfd_wp_async(vma)) { 6786 spin_unlock(vmf.ptl); 6787 if (pagecache_folio) { 6788 folio_unlock(pagecache_folio); 6789 folio_put(pagecache_folio); 6790 } 6791 hugetlb_vma_unlock_read(vma); 6792 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6793 return handle_userfault(&vmf, VM_UFFD_WP); 6794 } 6795 6796 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); 6797 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, 6798 huge_page_size(hstate_vma(vma))); 6799 /* Fallthrough to CoW */ 6800 } 6801 6802 /* 6803 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and 6804 * pagecache_folio, so here we need take the former one 6805 * when folio != pagecache_folio or !pagecache_folio. 6806 */ 6807 folio = page_folio(pte_page(vmf.orig_pte)); 6808 if (folio != pagecache_folio) 6809 if (!folio_trylock(folio)) { 6810 need_wait_lock = 1; 6811 goto out_ptl; 6812 } 6813 6814 folio_get(folio); 6815 6816 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6817 if (!huge_pte_write(vmf.orig_pte)) { 6818 ret = hugetlb_wp(pagecache_folio, &vmf); 6819 goto out_put_page; 6820 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6821 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); 6822 } 6823 } 6824 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); 6825 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, 6826 flags & FAULT_FLAG_WRITE)) 6827 update_mmu_cache(vma, vmf.address, vmf.pte); 6828 out_put_page: 6829 if (folio != pagecache_folio) 6830 folio_unlock(folio); 6831 folio_put(folio); 6832 out_ptl: 6833 spin_unlock(vmf.ptl); 6834 6835 if (pagecache_folio) { 6836 folio_unlock(pagecache_folio); 6837 folio_put(pagecache_folio); 6838 } 6839 out_mutex: 6840 hugetlb_vma_unlock_read(vma); 6841 6842 /* 6843 * We must check to release the per-VMA lock. __vmf_anon_prepare() in 6844 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. 6845 */ 6846 if (unlikely(ret & VM_FAULT_RETRY)) 6847 vma_end_read(vma); 6848 6849 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6850 /* 6851 * Generally it's safe to hold refcount during waiting page lock. But 6852 * here we just wait to defer the next page fault to avoid busy loop and 6853 * the page is not used after unlocked before returning from the current 6854 * page fault. So we are safe from accessing freed page, even if we wait 6855 * here without taking refcount. 6856 */ 6857 if (need_wait_lock) 6858 folio_wait_locked(folio); 6859 return ret; 6860 } 6861 6862 #ifdef CONFIG_USERFAULTFD 6863 /* 6864 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). 6865 */ 6866 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 6867 struct vm_area_struct *vma, unsigned long address) 6868 { 6869 struct mempolicy *mpol; 6870 nodemask_t *nodemask; 6871 struct folio *folio; 6872 gfp_t gfp_mask; 6873 int node; 6874 6875 gfp_mask = htlb_alloc_mask(h); 6876 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 6877 /* 6878 * This is used to allocate a temporary hugetlb to hold the copied 6879 * content, which will then be copied again to the final hugetlb 6880 * consuming a reservation. Set the alloc_fallback to false to indicate 6881 * that breaking the per-node hugetlb pool is not allowed in this case. 6882 */ 6883 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); 6884 mpol_cond_put(mpol); 6885 6886 return folio; 6887 } 6888 6889 /* 6890 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6891 * with modifications for hugetlb pages. 6892 */ 6893 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6894 struct vm_area_struct *dst_vma, 6895 unsigned long dst_addr, 6896 unsigned long src_addr, 6897 uffd_flags_t flags, 6898 struct folio **foliop) 6899 { 6900 struct mm_struct *dst_mm = dst_vma->vm_mm; 6901 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6902 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6903 struct hstate *h = hstate_vma(dst_vma); 6904 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6905 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6906 unsigned long size = huge_page_size(h); 6907 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6908 pte_t _dst_pte; 6909 spinlock_t *ptl; 6910 int ret = -ENOMEM; 6911 struct folio *folio; 6912 bool folio_in_pagecache = false; 6913 6914 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6915 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6916 6917 /* Don't overwrite any existing PTEs (even markers) */ 6918 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 6919 spin_unlock(ptl); 6920 return -EEXIST; 6921 } 6922 6923 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6924 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6925 6926 /* No need to invalidate - it was non-present before */ 6927 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6928 6929 spin_unlock(ptl); 6930 return 0; 6931 } 6932 6933 if (is_continue) { 6934 ret = -EFAULT; 6935 folio = filemap_lock_hugetlb_folio(h, mapping, idx); 6936 if (IS_ERR(folio)) 6937 goto out; 6938 folio_in_pagecache = true; 6939 } else if (!*foliop) { 6940 /* If a folio already exists, then it's UFFDIO_COPY for 6941 * a non-missing case. Return -EEXIST. 6942 */ 6943 if (vm_shared && 6944 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6945 ret = -EEXIST; 6946 goto out; 6947 } 6948 6949 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6950 if (IS_ERR(folio)) { 6951 ret = -ENOMEM; 6952 goto out; 6953 } 6954 6955 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6956 false); 6957 6958 /* fallback to copy_from_user outside mmap_lock */ 6959 if (unlikely(ret)) { 6960 ret = -ENOENT; 6961 /* Free the allocated folio which may have 6962 * consumed a reservation. 6963 */ 6964 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6965 folio_put(folio); 6966 6967 /* Allocate a temporary folio to hold the copied 6968 * contents. 6969 */ 6970 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6971 if (!folio) { 6972 ret = -ENOMEM; 6973 goto out; 6974 } 6975 *foliop = folio; 6976 /* Set the outparam foliop and return to the caller to 6977 * copy the contents outside the lock. Don't free the 6978 * folio. 6979 */ 6980 goto out; 6981 } 6982 } else { 6983 if (vm_shared && 6984 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6985 folio_put(*foliop); 6986 ret = -EEXIST; 6987 *foliop = NULL; 6988 goto out; 6989 } 6990 6991 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6992 if (IS_ERR(folio)) { 6993 folio_put(*foliop); 6994 ret = -ENOMEM; 6995 *foliop = NULL; 6996 goto out; 6997 } 6998 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6999 folio_put(*foliop); 7000 *foliop = NULL; 7001 if (ret) { 7002 folio_put(folio); 7003 goto out; 7004 } 7005 } 7006 7007 /* 7008 * If we just allocated a new page, we need a memory barrier to ensure 7009 * that preceding stores to the page become visible before the 7010 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate 7011 * is what we need. 7012 * 7013 * In the case where we have not allocated a new page (is_continue), 7014 * the page must already be uptodate. UFFDIO_CONTINUE already includes 7015 * an earlier smp_wmb() to ensure that prior stores will be visible 7016 * before the set_pte_at() write. 7017 */ 7018 if (!is_continue) 7019 __folio_mark_uptodate(folio); 7020 else 7021 WARN_ON_ONCE(!folio_test_uptodate(folio)); 7022 7023 /* Add shared, newly allocated pages to the page cache. */ 7024 if (vm_shared && !is_continue) { 7025 ret = -EFAULT; 7026 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) 7027 goto out_release_nounlock; 7028 7029 /* 7030 * Serialization between remove_inode_hugepages() and 7031 * hugetlb_add_to_page_cache() below happens through the 7032 * hugetlb_fault_mutex_table that here must be hold by 7033 * the caller. 7034 */ 7035 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 7036 if (ret) 7037 goto out_release_nounlock; 7038 folio_in_pagecache = true; 7039 } 7040 7041 ptl = huge_pte_lock(h, dst_mm, dst_pte); 7042 7043 ret = -EIO; 7044 if (folio_test_hwpoison(folio)) 7045 goto out_release_unlock; 7046 7047 /* 7048 * We allow to overwrite a pte marker: consider when both MISSING|WP 7049 * registered, we firstly wr-protect a none pte which has no page cache 7050 * page backing it, then access the page. 7051 */ 7052 ret = -EEXIST; 7053 if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) 7054 goto out_release_unlock; 7055 7056 if (folio_in_pagecache) 7057 hugetlb_add_file_rmap(folio); 7058 else 7059 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); 7060 7061 /* 7062 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 7063 * with wp flag set, don't set pte write bit. 7064 */ 7065 _dst_pte = make_huge_pte(dst_vma, folio, 7066 !wp_enabled && !(is_continue && !vm_shared)); 7067 /* 7068 * Always mark UFFDIO_COPY page dirty; note that this may not be 7069 * extremely important for hugetlbfs for now since swapping is not 7070 * supported, but we should still be clear in that this page cannot be 7071 * thrown away at will, even if write bit not set. 7072 */ 7073 _dst_pte = huge_pte_mkdirty(_dst_pte); 7074 _dst_pte = pte_mkyoung(_dst_pte); 7075 7076 if (wp_enabled) 7077 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 7078 7079 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 7080 7081 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 7082 7083 /* No need to invalidate - it was non-present before */ 7084 update_mmu_cache(dst_vma, dst_addr, dst_pte); 7085 7086 spin_unlock(ptl); 7087 if (!is_continue) 7088 folio_set_hugetlb_migratable(folio); 7089 if (vm_shared || is_continue) 7090 folio_unlock(folio); 7091 ret = 0; 7092 out: 7093 return ret; 7094 out_release_unlock: 7095 spin_unlock(ptl); 7096 if (vm_shared || is_continue) 7097 folio_unlock(folio); 7098 out_release_nounlock: 7099 if (!folio_in_pagecache) 7100 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 7101 folio_put(folio); 7102 goto out; 7103 } 7104 #endif /* CONFIG_USERFAULTFD */ 7105 7106 long hugetlb_change_protection(struct vm_area_struct *vma, 7107 unsigned long address, unsigned long end, 7108 pgprot_t newprot, unsigned long cp_flags) 7109 { 7110 struct mm_struct *mm = vma->vm_mm; 7111 unsigned long start = address; 7112 pte_t *ptep; 7113 pte_t pte; 7114 struct hstate *h = hstate_vma(vma); 7115 long pages = 0, psize = huge_page_size(h); 7116 bool shared_pmd = false; 7117 struct mmu_notifier_range range; 7118 unsigned long last_addr_mask; 7119 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 7120 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 7121 7122 /* 7123 * In the case of shared PMDs, the area to flush could be beyond 7124 * start/end. Set range.start/range.end to cover the maximum possible 7125 * range if PMD sharing is possible. 7126 */ 7127 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 7128 0, mm, start, end); 7129 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 7130 7131 BUG_ON(address >= end); 7132 flush_cache_range(vma, range.start, range.end); 7133 7134 mmu_notifier_invalidate_range_start(&range); 7135 hugetlb_vma_lock_write(vma); 7136 i_mmap_lock_write(vma->vm_file->f_mapping); 7137 last_addr_mask = hugetlb_mask_last_page(h); 7138 for (; address < end; address += psize) { 7139 spinlock_t *ptl; 7140 ptep = hugetlb_walk(vma, address, psize); 7141 if (!ptep) { 7142 if (!uffd_wp) { 7143 address |= last_addr_mask; 7144 continue; 7145 } 7146 /* 7147 * Userfaultfd wr-protect requires pgtable 7148 * pre-allocations to install pte markers. 7149 */ 7150 ptep = huge_pte_alloc(mm, vma, address, psize); 7151 if (!ptep) { 7152 pages = -ENOMEM; 7153 break; 7154 } 7155 } 7156 ptl = huge_pte_lock(h, mm, ptep); 7157 if (huge_pmd_unshare(mm, vma, address, ptep)) { 7158 /* 7159 * When uffd-wp is enabled on the vma, unshare 7160 * shouldn't happen at all. Warn about it if it 7161 * happened due to some reason. 7162 */ 7163 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 7164 pages++; 7165 spin_unlock(ptl); 7166 shared_pmd = true; 7167 address |= last_addr_mask; 7168 continue; 7169 } 7170 pte = huge_ptep_get(mm, address, ptep); 7171 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 7172 /* Nothing to do. */ 7173 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 7174 swp_entry_t entry = pte_to_swp_entry(pte); 7175 struct page *page = pfn_swap_entry_to_page(entry); 7176 pte_t newpte = pte; 7177 7178 if (is_writable_migration_entry(entry)) { 7179 if (PageAnon(page)) 7180 entry = make_readable_exclusive_migration_entry( 7181 swp_offset(entry)); 7182 else 7183 entry = make_readable_migration_entry( 7184 swp_offset(entry)); 7185 newpte = swp_entry_to_pte(entry); 7186 pages++; 7187 } 7188 7189 if (uffd_wp) 7190 newpte = pte_swp_mkuffd_wp(newpte); 7191 else if (uffd_wp_resolve) 7192 newpte = pte_swp_clear_uffd_wp(newpte); 7193 if (!pte_same(pte, newpte)) 7194 set_huge_pte_at(mm, address, ptep, newpte, psize); 7195 } else if (unlikely(is_pte_marker(pte))) { 7196 /* 7197 * Do nothing on a poison marker; page is 7198 * corrupted, permissons do not apply. Here 7199 * pte_marker_uffd_wp()==true implies !poison 7200 * because they're mutual exclusive. 7201 */ 7202 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve) 7203 /* Safe to modify directly (non-present->none). */ 7204 huge_pte_clear(mm, address, ptep, psize); 7205 } else if (!huge_pte_none(pte)) { 7206 pte_t old_pte; 7207 unsigned int shift = huge_page_shift(hstate_vma(vma)); 7208 7209 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 7210 pte = huge_pte_modify(old_pte, newprot); 7211 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 7212 if (uffd_wp) 7213 pte = huge_pte_mkuffd_wp(pte); 7214 else if (uffd_wp_resolve) 7215 pte = huge_pte_clear_uffd_wp(pte); 7216 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 7217 pages++; 7218 } else { 7219 /* None pte */ 7220 if (unlikely(uffd_wp)) 7221 /* Safe to modify directly (none->non-present). */ 7222 set_huge_pte_at(mm, address, ptep, 7223 make_pte_marker(PTE_MARKER_UFFD_WP), 7224 psize); 7225 } 7226 spin_unlock(ptl); 7227 } 7228 /* 7229 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 7230 * may have cleared our pud entry and done put_page on the page table: 7231 * once we release i_mmap_rwsem, another task can do the final put_page 7232 * and that page table be reused and filled with junk. If we actually 7233 * did unshare a page of pmds, flush the range corresponding to the pud. 7234 */ 7235 if (shared_pmd) 7236 flush_hugetlb_tlb_range(vma, range.start, range.end); 7237 else 7238 flush_hugetlb_tlb_range(vma, start, end); 7239 /* 7240 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 7241 * downgrading page table protection not changing it to point to a new 7242 * page. 7243 * 7244 * See Documentation/mm/mmu_notifier.rst 7245 */ 7246 i_mmap_unlock_write(vma->vm_file->f_mapping); 7247 hugetlb_vma_unlock_write(vma); 7248 mmu_notifier_invalidate_range_end(&range); 7249 7250 return pages > 0 ? (pages << h->order) : pages; 7251 } 7252 7253 /* Return true if reservation was successful, false otherwise. */ 7254 bool hugetlb_reserve_pages(struct inode *inode, 7255 long from, long to, 7256 struct vm_area_struct *vma, 7257 vm_flags_t vm_flags) 7258 { 7259 long chg = -1, add = -1, spool_resv, gbl_resv; 7260 struct hstate *h = hstate_inode(inode); 7261 struct hugepage_subpool *spool = subpool_inode(inode); 7262 struct resv_map *resv_map; 7263 struct hugetlb_cgroup *h_cg = NULL; 7264 long gbl_reserve, regions_needed = 0; 7265 7266 /* This should never happen */ 7267 if (from > to) { 7268 VM_WARN(1, "%s called with a negative range\n", __func__); 7269 return false; 7270 } 7271 7272 /* 7273 * vma specific semaphore used for pmd sharing and fault/truncation 7274 * synchronization 7275 */ 7276 hugetlb_vma_lock_alloc(vma); 7277 7278 /* 7279 * Only apply hugepage reservation if asked. At fault time, an 7280 * attempt will be made for VM_NORESERVE to allocate a page 7281 * without using reserves 7282 */ 7283 if (vm_flags & VM_NORESERVE) 7284 return true; 7285 7286 /* 7287 * Shared mappings base their reservation on the number of pages that 7288 * are already allocated on behalf of the file. Private mappings need 7289 * to reserve the full area even if read-only as mprotect() may be 7290 * called to make the mapping read-write. Assume !vma is a shm mapping 7291 */ 7292 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7293 /* 7294 * resv_map can not be NULL as hugetlb_reserve_pages is only 7295 * called for inodes for which resv_maps were created (see 7296 * hugetlbfs_get_inode). 7297 */ 7298 resv_map = inode_resv_map(inode); 7299 7300 chg = region_chg(resv_map, from, to, ®ions_needed); 7301 } else { 7302 /* Private mapping. */ 7303 resv_map = resv_map_alloc(); 7304 if (!resv_map) 7305 goto out_err; 7306 7307 chg = to - from; 7308 7309 set_vma_resv_map(vma, resv_map); 7310 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 7311 } 7312 7313 if (chg < 0) 7314 goto out_err; 7315 7316 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 7317 chg * pages_per_huge_page(h), &h_cg) < 0) 7318 goto out_err; 7319 7320 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 7321 /* For private mappings, the hugetlb_cgroup uncharge info hangs 7322 * of the resv_map. 7323 */ 7324 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 7325 } 7326 7327 /* 7328 * There must be enough pages in the subpool for the mapping. If 7329 * the subpool has a minimum size, there may be some global 7330 * reservations already in place (gbl_reserve). 7331 */ 7332 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 7333 if (gbl_reserve < 0) 7334 goto out_uncharge_cgroup; 7335 7336 /* 7337 * Check enough hugepages are available for the reservation. 7338 * Hand the pages back to the subpool if there are not 7339 */ 7340 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 7341 goto out_put_pages; 7342 7343 /* 7344 * Account for the reservations made. Shared mappings record regions 7345 * that have reservations as they are shared by multiple VMAs. 7346 * When the last VMA disappears, the region map says how much 7347 * the reservation was and the page cache tells how much of 7348 * the reservation was consumed. Private mappings are per-VMA and 7349 * only the consumed reservations are tracked. When the VMA 7350 * disappears, the original reservation is the VMA size and the 7351 * consumed reservations are stored in the map. Hence, nothing 7352 * else has to be done for private mappings here 7353 */ 7354 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7355 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 7356 7357 if (unlikely(add < 0)) { 7358 hugetlb_acct_memory(h, -gbl_reserve); 7359 goto out_put_pages; 7360 } else if (unlikely(chg > add)) { 7361 /* 7362 * pages in this range were added to the reserve 7363 * map between region_chg and region_add. This 7364 * indicates a race with alloc_hugetlb_folio. Adjust 7365 * the subpool and reserve counts modified above 7366 * based on the difference. 7367 */ 7368 long rsv_adjust; 7369 7370 /* 7371 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 7372 * reference to h_cg->css. See comment below for detail. 7373 */ 7374 hugetlb_cgroup_uncharge_cgroup_rsvd( 7375 hstate_index(h), 7376 (chg - add) * pages_per_huge_page(h), h_cg); 7377 7378 rsv_adjust = hugepage_subpool_put_pages(spool, 7379 chg - add); 7380 hugetlb_acct_memory(h, -rsv_adjust); 7381 } else if (h_cg) { 7382 /* 7383 * The file_regions will hold their own reference to 7384 * h_cg->css. So we should release the reference held 7385 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 7386 * done. 7387 */ 7388 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 7389 } 7390 } 7391 return true; 7392 7393 out_put_pages: 7394 spool_resv = chg - gbl_reserve; 7395 if (spool_resv) { 7396 /* put sub pool's reservation back, chg - gbl_reserve */ 7397 gbl_resv = hugepage_subpool_put_pages(spool, spool_resv); 7398 /* 7399 * subpool's reserved pages can not be put back due to race, 7400 * return to hstate. 7401 */ 7402 hugetlb_acct_memory(h, -gbl_resv); 7403 } 7404 out_uncharge_cgroup: 7405 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 7406 chg * pages_per_huge_page(h), h_cg); 7407 out_err: 7408 hugetlb_vma_lock_free(vma); 7409 if (!vma || vma->vm_flags & VM_MAYSHARE) 7410 /* Only call region_abort if the region_chg succeeded but the 7411 * region_add failed or didn't run. 7412 */ 7413 if (chg >= 0 && add < 0) 7414 region_abort(resv_map, from, to, regions_needed); 7415 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 7416 kref_put(&resv_map->refs, resv_map_release); 7417 set_vma_resv_map(vma, NULL); 7418 } 7419 return false; 7420 } 7421 7422 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 7423 long freed) 7424 { 7425 struct hstate *h = hstate_inode(inode); 7426 struct resv_map *resv_map = inode_resv_map(inode); 7427 long chg = 0; 7428 struct hugepage_subpool *spool = subpool_inode(inode); 7429 long gbl_reserve; 7430 7431 /* 7432 * Since this routine can be called in the evict inode path for all 7433 * hugetlbfs inodes, resv_map could be NULL. 7434 */ 7435 if (resv_map) { 7436 chg = region_del(resv_map, start, end); 7437 /* 7438 * region_del() can fail in the rare case where a region 7439 * must be split and another region descriptor can not be 7440 * allocated. If end == LONG_MAX, it will not fail. 7441 */ 7442 if (chg < 0) 7443 return chg; 7444 } 7445 7446 spin_lock(&inode->i_lock); 7447 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 7448 spin_unlock(&inode->i_lock); 7449 7450 /* 7451 * If the subpool has a minimum size, the number of global 7452 * reservations to be released may be adjusted. 7453 * 7454 * Note that !resv_map implies freed == 0. So (chg - freed) 7455 * won't go negative. 7456 */ 7457 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 7458 hugetlb_acct_memory(h, -gbl_reserve); 7459 7460 return 0; 7461 } 7462 7463 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7464 static unsigned long page_table_shareable(struct vm_area_struct *svma, 7465 struct vm_area_struct *vma, 7466 unsigned long addr, pgoff_t idx) 7467 { 7468 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 7469 svma->vm_start; 7470 unsigned long sbase = saddr & PUD_MASK; 7471 unsigned long s_end = sbase + PUD_SIZE; 7472 7473 /* Allow segments to share if only one is marked locked */ 7474 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7475 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7476 7477 /* 7478 * match the virtual addresses, permission and the alignment of the 7479 * page table page. 7480 * 7481 * Also, vma_lock (vm_private_data) is required for sharing. 7482 */ 7483 if (pmd_index(addr) != pmd_index(saddr) || 7484 vm_flags != svm_flags || 7485 !range_in_vma(svma, sbase, s_end) || 7486 !svma->vm_private_data) 7487 return 0; 7488 7489 return saddr; 7490 } 7491 7492 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7493 { 7494 unsigned long start = addr & PUD_MASK; 7495 unsigned long end = start + PUD_SIZE; 7496 7497 #ifdef CONFIG_USERFAULTFD 7498 if (uffd_disable_huge_pmd_share(vma)) 7499 return false; 7500 #endif 7501 /* 7502 * check on proper vm_flags and page table alignment 7503 */ 7504 if (!(vma->vm_flags & VM_MAYSHARE)) 7505 return false; 7506 if (!vma->vm_private_data) /* vma lock required for sharing */ 7507 return false; 7508 if (!range_in_vma(vma, start, end)) 7509 return false; 7510 return true; 7511 } 7512 7513 /* 7514 * Determine if start,end range within vma could be mapped by shared pmd. 7515 * If yes, adjust start and end to cover range associated with possible 7516 * shared pmd mappings. 7517 */ 7518 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7519 unsigned long *start, unsigned long *end) 7520 { 7521 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7522 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7523 7524 /* 7525 * vma needs to span at least one aligned PUD size, and the range 7526 * must be at least partially within in. 7527 */ 7528 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7529 (*end <= v_start) || (*start >= v_end)) 7530 return; 7531 7532 /* Extend the range to be PUD aligned for a worst case scenario */ 7533 if (*start > v_start) 7534 *start = ALIGN_DOWN(*start, PUD_SIZE); 7535 7536 if (*end < v_end) 7537 *end = ALIGN(*end, PUD_SIZE); 7538 } 7539 7540 /* 7541 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7542 * and returns the corresponding pte. While this is not necessary for the 7543 * !shared pmd case because we can allocate the pmd later as well, it makes the 7544 * code much cleaner. pmd allocation is essential for the shared case because 7545 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7546 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7547 * bad pmd for sharing. 7548 */ 7549 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7550 unsigned long addr, pud_t *pud) 7551 { 7552 struct address_space *mapping = vma->vm_file->f_mapping; 7553 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7554 vma->vm_pgoff; 7555 struct vm_area_struct *svma; 7556 unsigned long saddr; 7557 pte_t *spte = NULL; 7558 pte_t *pte; 7559 7560 i_mmap_lock_read(mapping); 7561 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7562 if (svma == vma) 7563 continue; 7564 7565 saddr = page_table_shareable(svma, vma, addr, idx); 7566 if (saddr) { 7567 spte = hugetlb_walk(svma, saddr, 7568 vma_mmu_pagesize(svma)); 7569 if (spte) { 7570 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); 7571 break; 7572 } 7573 } 7574 } 7575 7576 if (!spte) 7577 goto out; 7578 7579 spin_lock(&mm->page_table_lock); 7580 if (pud_none(*pud)) { 7581 pud_populate(mm, pud, 7582 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7583 mm_inc_nr_pmds(mm); 7584 } else { 7585 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); 7586 } 7587 spin_unlock(&mm->page_table_lock); 7588 out: 7589 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7590 i_mmap_unlock_read(mapping); 7591 return pte; 7592 } 7593 7594 /* 7595 * unmap huge page backed by shared pte. 7596 * 7597 * Called with page table lock held. 7598 * 7599 * returns: 1 successfully unmapped a shared pte page 7600 * 0 the underlying pte page is not shared, or it is the last user 7601 */ 7602 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7603 unsigned long addr, pte_t *ptep) 7604 { 7605 unsigned long sz = huge_page_size(hstate_vma(vma)); 7606 pgd_t *pgd = pgd_offset(mm, addr); 7607 p4d_t *p4d = p4d_offset(pgd, addr); 7608 pud_t *pud = pud_offset(p4d, addr); 7609 7610 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7611 hugetlb_vma_assert_locked(vma); 7612 if (sz != PMD_SIZE) 7613 return 0; 7614 if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) 7615 return 0; 7616 7617 pud_clear(pud); 7618 ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); 7619 mm_dec_nr_pmds(mm); 7620 return 1; 7621 } 7622 7623 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7624 7625 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7626 unsigned long addr, pud_t *pud) 7627 { 7628 return NULL; 7629 } 7630 7631 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7632 unsigned long addr, pte_t *ptep) 7633 { 7634 return 0; 7635 } 7636 7637 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7638 unsigned long *start, unsigned long *end) 7639 { 7640 } 7641 7642 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7643 { 7644 return false; 7645 } 7646 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7647 7648 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7649 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7650 unsigned long addr, unsigned long sz) 7651 { 7652 pgd_t *pgd; 7653 p4d_t *p4d; 7654 pud_t *pud; 7655 pte_t *pte = NULL; 7656 7657 pgd = pgd_offset(mm, addr); 7658 p4d = p4d_alloc(mm, pgd, addr); 7659 if (!p4d) 7660 return NULL; 7661 pud = pud_alloc(mm, p4d, addr); 7662 if (pud) { 7663 if (sz == PUD_SIZE) { 7664 pte = (pte_t *)pud; 7665 } else { 7666 BUG_ON(sz != PMD_SIZE); 7667 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7668 pte = huge_pmd_share(mm, vma, addr, pud); 7669 else 7670 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7671 } 7672 } 7673 7674 if (pte) { 7675 pte_t pteval = ptep_get_lockless(pte); 7676 7677 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7678 } 7679 7680 return pte; 7681 } 7682 7683 /* 7684 * huge_pte_offset() - Walk the page table to resolve the hugepage 7685 * entry at address @addr 7686 * 7687 * Return: Pointer to page table entry (PUD or PMD) for 7688 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7689 * size @sz doesn't match the hugepage size at this level of the page 7690 * table. 7691 */ 7692 pte_t *huge_pte_offset(struct mm_struct *mm, 7693 unsigned long addr, unsigned long sz) 7694 { 7695 pgd_t *pgd; 7696 p4d_t *p4d; 7697 pud_t *pud; 7698 pmd_t *pmd; 7699 7700 pgd = pgd_offset(mm, addr); 7701 if (!pgd_present(*pgd)) 7702 return NULL; 7703 p4d = p4d_offset(pgd, addr); 7704 if (!p4d_present(*p4d)) 7705 return NULL; 7706 7707 pud = pud_offset(p4d, addr); 7708 if (sz == PUD_SIZE) 7709 /* must be pud huge, non-present or none */ 7710 return (pte_t *)pud; 7711 if (!pud_present(*pud)) 7712 return NULL; 7713 /* must have a valid entry and size to go further */ 7714 7715 pmd = pmd_offset(pud, addr); 7716 /* must be pmd huge, non-present or none */ 7717 return (pte_t *)pmd; 7718 } 7719 7720 /* 7721 * Return a mask that can be used to update an address to the last huge 7722 * page in a page table page mapping size. Used to skip non-present 7723 * page table entries when linearly scanning address ranges. Architectures 7724 * with unique huge page to page table relationships can define their own 7725 * version of this routine. 7726 */ 7727 unsigned long hugetlb_mask_last_page(struct hstate *h) 7728 { 7729 unsigned long hp_size = huge_page_size(h); 7730 7731 if (hp_size == PUD_SIZE) 7732 return P4D_SIZE - PUD_SIZE; 7733 else if (hp_size == PMD_SIZE) 7734 return PUD_SIZE - PMD_SIZE; 7735 else 7736 return 0UL; 7737 } 7738 7739 #else 7740 7741 /* See description above. Architectures can provide their own version. */ 7742 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7743 { 7744 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7745 if (huge_page_size(h) == PMD_SIZE) 7746 return PUD_SIZE - PMD_SIZE; 7747 #endif 7748 return 0UL; 7749 } 7750 7751 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7752 7753 /** 7754 * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio 7755 * @folio: the folio to isolate 7756 * @list: the list to add the folio to on success 7757 * 7758 * Isolate an allocated (refcount > 0) hugetlb folio, marking it as 7759 * isolated/non-migratable, and moving it from the active list to the 7760 * given list. 7761 * 7762 * Isolation will fail if @folio is not an allocated hugetlb folio, or if 7763 * it is already isolated/non-migratable. 7764 * 7765 * On success, an additional folio reference is taken that must be dropped 7766 * using folio_putback_hugetlb() to undo the isolation. 7767 * 7768 * Return: True if isolation worked, otherwise False. 7769 */ 7770 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) 7771 { 7772 bool ret = true; 7773 7774 spin_lock_irq(&hugetlb_lock); 7775 if (!folio_test_hugetlb(folio) || 7776 !folio_test_hugetlb_migratable(folio) || 7777 !folio_try_get(folio)) { 7778 ret = false; 7779 goto unlock; 7780 } 7781 folio_clear_hugetlb_migratable(folio); 7782 list_move_tail(&folio->lru, list); 7783 unlock: 7784 spin_unlock_irq(&hugetlb_lock); 7785 return ret; 7786 } 7787 7788 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7789 { 7790 int ret = 0; 7791 7792 *hugetlb = false; 7793 spin_lock_irq(&hugetlb_lock); 7794 if (folio_test_hugetlb(folio)) { 7795 *hugetlb = true; 7796 if (folio_test_hugetlb_freed(folio)) 7797 ret = 0; 7798 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7799 ret = folio_try_get(folio); 7800 else 7801 ret = -EBUSY; 7802 } 7803 spin_unlock_irq(&hugetlb_lock); 7804 return ret; 7805 } 7806 7807 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7808 bool *migratable_cleared) 7809 { 7810 int ret; 7811 7812 spin_lock_irq(&hugetlb_lock); 7813 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7814 spin_unlock_irq(&hugetlb_lock); 7815 return ret; 7816 } 7817 7818 /** 7819 * folio_putback_hugetlb - unisolate a hugetlb folio 7820 * @folio: the isolated hugetlb folio 7821 * 7822 * Putback/un-isolate the hugetlb folio that was previous isolated using 7823 * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it 7824 * back onto the active list. 7825 * 7826 * Will drop the additional folio reference obtained through 7827 * folio_isolate_hugetlb(). 7828 */ 7829 void folio_putback_hugetlb(struct folio *folio) 7830 { 7831 spin_lock_irq(&hugetlb_lock); 7832 folio_set_hugetlb_migratable(folio); 7833 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7834 spin_unlock_irq(&hugetlb_lock); 7835 folio_put(folio); 7836 } 7837 7838 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7839 { 7840 struct hstate *h = folio_hstate(old_folio); 7841 7842 hugetlb_cgroup_migrate(old_folio, new_folio); 7843 set_page_owner_migrate_reason(&new_folio->page, reason); 7844 7845 /* 7846 * transfer temporary state of the new hugetlb folio. This is 7847 * reverse to other transitions because the newpage is going to 7848 * be final while the old one will be freed so it takes over 7849 * the temporary status. 7850 * 7851 * Also note that we have to transfer the per-node surplus state 7852 * here as well otherwise the global surplus count will not match 7853 * the per-node's. 7854 */ 7855 if (folio_test_hugetlb_temporary(new_folio)) { 7856 int old_nid = folio_nid(old_folio); 7857 int new_nid = folio_nid(new_folio); 7858 7859 folio_set_hugetlb_temporary(old_folio); 7860 folio_clear_hugetlb_temporary(new_folio); 7861 7862 7863 /* 7864 * There is no need to transfer the per-node surplus state 7865 * when we do not cross the node. 7866 */ 7867 if (new_nid == old_nid) 7868 return; 7869 spin_lock_irq(&hugetlb_lock); 7870 if (h->surplus_huge_pages_node[old_nid]) { 7871 h->surplus_huge_pages_node[old_nid]--; 7872 h->surplus_huge_pages_node[new_nid]++; 7873 } 7874 spin_unlock_irq(&hugetlb_lock); 7875 } 7876 7877 /* 7878 * Our old folio is isolated and has "migratable" cleared until it 7879 * is putback. As migration succeeded, set the new folio "migratable" 7880 * and add it to the active list. 7881 */ 7882 spin_lock_irq(&hugetlb_lock); 7883 folio_set_hugetlb_migratable(new_folio); 7884 list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist); 7885 spin_unlock_irq(&hugetlb_lock); 7886 } 7887 7888 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7889 unsigned long start, 7890 unsigned long end) 7891 { 7892 struct hstate *h = hstate_vma(vma); 7893 unsigned long sz = huge_page_size(h); 7894 struct mm_struct *mm = vma->vm_mm; 7895 struct mmu_notifier_range range; 7896 unsigned long address; 7897 spinlock_t *ptl; 7898 pte_t *ptep; 7899 7900 if (!(vma->vm_flags & VM_MAYSHARE)) 7901 return; 7902 7903 if (start >= end) 7904 return; 7905 7906 flush_cache_range(vma, start, end); 7907 /* 7908 * No need to call adjust_range_if_pmd_sharing_possible(), because 7909 * we have already done the PUD_SIZE alignment. 7910 */ 7911 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7912 start, end); 7913 mmu_notifier_invalidate_range_start(&range); 7914 hugetlb_vma_lock_write(vma); 7915 i_mmap_lock_write(vma->vm_file->f_mapping); 7916 for (address = start; address < end; address += PUD_SIZE) { 7917 ptep = hugetlb_walk(vma, address, sz); 7918 if (!ptep) 7919 continue; 7920 ptl = huge_pte_lock(h, mm, ptep); 7921 huge_pmd_unshare(mm, vma, address, ptep); 7922 spin_unlock(ptl); 7923 } 7924 flush_hugetlb_tlb_range(vma, start, end); 7925 i_mmap_unlock_write(vma->vm_file->f_mapping); 7926 hugetlb_vma_unlock_write(vma); 7927 /* 7928 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7929 * Documentation/mm/mmu_notifier.rst. 7930 */ 7931 mmu_notifier_invalidate_range_end(&range); 7932 } 7933 7934 /* 7935 * This function will unconditionally remove all the shared pmd pgtable entries 7936 * within the specific vma for a hugetlbfs memory range. 7937 */ 7938 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7939 { 7940 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7941 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7942 } 7943 7944 /* 7945 * For hugetlb, mremap() is an odd edge case - while the VMA copying is 7946 * performed, we permit both the old and new VMAs to reference the same 7947 * reservation. 7948 * 7949 * We fix this up after the operation succeeds, or if a newly allocated VMA 7950 * is closed as a result of a failure to allocate memory. 7951 */ 7952 void fixup_hugetlb_reservations(struct vm_area_struct *vma) 7953 { 7954 if (is_vm_hugetlb_page(vma)) 7955 clear_vma_resv_huge_pages(vma); 7956 } 7957