1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpumask.h> 18 #include <linux/cpuset.h> 19 #include <linux/mutex.h> 20 #include <linux/memblock.h> 21 #include <linux/minmax.h> 22 #include <linux/sysfs.h> 23 #include <linux/slab.h> 24 #include <linux/sched/mm.h> 25 #include <linux/mmdebug.h> 26 #include <linux/sched/signal.h> 27 #include <linux/rmap.h> 28 #include <linux/string_helpers.h> 29 #include <linux/swap.h> 30 #include <linux/swapops.h> 31 #include <linux/jhash.h> 32 #include <linux/numa.h> 33 #include <linux/llist.h> 34 #include <linux/cma.h> 35 #include <linux/migrate.h> 36 #include <linux/nospec.h> 37 #include <linux/delayacct.h> 38 #include <linux/memory.h> 39 #include <linux/mm_inline.h> 40 #include <linux/padata.h> 41 42 #include <asm/page.h> 43 #include <asm/pgalloc.h> 44 #include <asm/tlb.h> 45 #include <asm/setup.h> 46 47 #include <linux/io.h> 48 #include <linux/hugetlb.h> 49 #include <linux/hugetlb_cgroup.h> 50 #include <linux/node.h> 51 #include <linux/page_owner.h> 52 #include "internal.h" 53 #include "hugetlb_vmemmap.h" 54 #include "hugetlb_cma.h" 55 #include <linux/page-isolation.h> 56 57 int hugetlb_max_hstate __read_mostly; 58 unsigned int default_hstate_idx; 59 struct hstate hstates[HUGE_MAX_HSTATE]; 60 61 __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; 62 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; 63 64 /* 65 * Due to ordering constraints across the init code for various 66 * architectures, hugetlb hstate cmdline parameters can't simply 67 * be early_param. early_param might call the setup function 68 * before valid hugetlb page sizes are determined, leading to 69 * incorrect rejection of valid hugepagesz= options. 70 * 71 * So, record the parameters early and consume them whenever the 72 * init code is ready for them, by calling hugetlb_parse_params(). 73 */ 74 75 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */ 76 #define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1) 77 struct hugetlb_cmdline { 78 char *val; 79 int (*setup)(char *val); 80 }; 81 82 /* for command line parsing */ 83 static struct hstate * __initdata parsed_hstate; 84 static unsigned long __initdata default_hstate_max_huge_pages; 85 static bool __initdata parsed_valid_hugepagesz = true; 86 static bool __initdata parsed_default_hugepagesz; 87 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 88 static unsigned long hugepage_allocation_threads __initdata; 89 90 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; 91 static int hstate_cmdline_index __initdata; 92 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata; 93 static int hugetlb_param_index __initdata; 94 static __init int hugetlb_add_param(char *s, int (*setup)(char *val)); 95 static __init void hugetlb_parse_params(void); 96 97 #define hugetlb_early_param(str, func) \ 98 static __init int func##args(char *s) \ 99 { \ 100 return hugetlb_add_param(s, func); \ 101 } \ 102 early_param(str, func##args) 103 104 /* 105 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 106 * free_huge_pages, and surplus_huge_pages. 107 */ 108 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock); 109 110 /* 111 * Serializes faults on the same logical page. This is used to 112 * prevent spurious OOMs when the hugepage pool is fully utilized. 113 */ 114 static int num_fault_mutexes __ro_after_init; 115 struct mutex *hugetlb_fault_mutex_table __ro_after_init; 116 117 /* Forward declaration */ 118 static int hugetlb_acct_memory(struct hstate *h, long delta); 119 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 120 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 121 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 122 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 123 unsigned long start, unsigned long end); 124 static struct resv_map *vma_resv_map(struct vm_area_struct *vma); 125 126 static void hugetlb_free_folio(struct folio *folio) 127 { 128 if (folio_test_hugetlb_cma(folio)) { 129 hugetlb_cma_free_folio(folio); 130 return; 131 } 132 133 folio_put(folio); 134 } 135 136 static inline bool subpool_is_free(struct hugepage_subpool *spool) 137 { 138 if (spool->count) 139 return false; 140 if (spool->max_hpages != -1) 141 return spool->used_hpages == 0; 142 if (spool->min_hpages != -1) 143 return spool->rsv_hpages == spool->min_hpages; 144 145 return true; 146 } 147 148 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 149 unsigned long irq_flags) 150 { 151 spin_unlock_irqrestore(&spool->lock, irq_flags); 152 153 /* If no pages are used, and no other handles to the subpool 154 * remain, give up any reservations based on minimum size and 155 * free the subpool */ 156 if (subpool_is_free(spool)) { 157 if (spool->min_hpages != -1) 158 hugetlb_acct_memory(spool->hstate, 159 -spool->min_hpages); 160 kfree(spool); 161 } 162 } 163 164 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 165 long min_hpages) 166 { 167 struct hugepage_subpool *spool; 168 169 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 170 if (!spool) 171 return NULL; 172 173 spin_lock_init(&spool->lock); 174 spool->count = 1; 175 spool->max_hpages = max_hpages; 176 spool->hstate = h; 177 spool->min_hpages = min_hpages; 178 179 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 180 kfree(spool); 181 return NULL; 182 } 183 spool->rsv_hpages = min_hpages; 184 185 return spool; 186 } 187 188 void hugepage_put_subpool(struct hugepage_subpool *spool) 189 { 190 unsigned long flags; 191 192 spin_lock_irqsave(&spool->lock, flags); 193 BUG_ON(!spool->count); 194 spool->count--; 195 unlock_or_release_subpool(spool, flags); 196 } 197 198 /* 199 * Subpool accounting for allocating and reserving pages. 200 * Return -ENOMEM if there are not enough resources to satisfy the 201 * request. Otherwise, return the number of pages by which the 202 * global pools must be adjusted (upward). The returned value may 203 * only be different than the passed value (delta) in the case where 204 * a subpool minimum size must be maintained. 205 */ 206 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 207 long delta) 208 { 209 long ret = delta; 210 211 if (!spool) 212 return ret; 213 214 spin_lock_irq(&spool->lock); 215 216 if (spool->max_hpages != -1) { /* maximum size accounting */ 217 if ((spool->used_hpages + delta) <= spool->max_hpages) 218 spool->used_hpages += delta; 219 else { 220 ret = -ENOMEM; 221 goto unlock_ret; 222 } 223 } 224 225 /* minimum size accounting */ 226 if (spool->min_hpages != -1 && spool->rsv_hpages) { 227 if (delta > spool->rsv_hpages) { 228 /* 229 * Asking for more reserves than those already taken on 230 * behalf of subpool. Return difference. 231 */ 232 ret = delta - spool->rsv_hpages; 233 spool->rsv_hpages = 0; 234 } else { 235 ret = 0; /* reserves already accounted for */ 236 spool->rsv_hpages -= delta; 237 } 238 } 239 240 unlock_ret: 241 spin_unlock_irq(&spool->lock); 242 return ret; 243 } 244 245 /* 246 * Subpool accounting for freeing and unreserving pages. 247 * Return the number of global page reservations that must be dropped. 248 * The return value may only be different than the passed value (delta) 249 * in the case where a subpool minimum size must be maintained. 250 */ 251 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 252 long delta) 253 { 254 long ret = delta; 255 unsigned long flags; 256 257 if (!spool) 258 return delta; 259 260 spin_lock_irqsave(&spool->lock, flags); 261 262 if (spool->max_hpages != -1) /* maximum size accounting */ 263 spool->used_hpages -= delta; 264 265 /* minimum size accounting */ 266 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 267 if (spool->rsv_hpages + delta <= spool->min_hpages) 268 ret = 0; 269 else 270 ret = spool->rsv_hpages + delta - spool->min_hpages; 271 272 spool->rsv_hpages += delta; 273 if (spool->rsv_hpages > spool->min_hpages) 274 spool->rsv_hpages = spool->min_hpages; 275 } 276 277 /* 278 * If hugetlbfs_put_super couldn't free spool due to an outstanding 279 * quota reference, free it now. 280 */ 281 unlock_or_release_subpool(spool, flags); 282 283 return ret; 284 } 285 286 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 287 { 288 return HUGETLBFS_SB(inode->i_sb)->spool; 289 } 290 291 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 292 { 293 return subpool_inode(file_inode(vma->vm_file)); 294 } 295 296 /* 297 * hugetlb vma_lock helper routines 298 */ 299 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 300 { 301 if (__vma_shareable_lock(vma)) { 302 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 303 304 down_read(&vma_lock->rw_sema); 305 } else if (__vma_private_lock(vma)) { 306 struct resv_map *resv_map = vma_resv_map(vma); 307 308 down_read(&resv_map->rw_sema); 309 } 310 } 311 312 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 313 { 314 if (__vma_shareable_lock(vma)) { 315 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 316 317 up_read(&vma_lock->rw_sema); 318 } else if (__vma_private_lock(vma)) { 319 struct resv_map *resv_map = vma_resv_map(vma); 320 321 up_read(&resv_map->rw_sema); 322 } 323 } 324 325 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 326 { 327 if (__vma_shareable_lock(vma)) { 328 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 329 330 down_write(&vma_lock->rw_sema); 331 } else if (__vma_private_lock(vma)) { 332 struct resv_map *resv_map = vma_resv_map(vma); 333 334 down_write(&resv_map->rw_sema); 335 } 336 } 337 338 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 339 { 340 if (__vma_shareable_lock(vma)) { 341 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 342 343 up_write(&vma_lock->rw_sema); 344 } else if (__vma_private_lock(vma)) { 345 struct resv_map *resv_map = vma_resv_map(vma); 346 347 up_write(&resv_map->rw_sema); 348 } 349 } 350 351 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 352 { 353 354 if (__vma_shareable_lock(vma)) { 355 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 356 357 return down_write_trylock(&vma_lock->rw_sema); 358 } else if (__vma_private_lock(vma)) { 359 struct resv_map *resv_map = vma_resv_map(vma); 360 361 return down_write_trylock(&resv_map->rw_sema); 362 } 363 364 return 1; 365 } 366 367 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 368 { 369 if (__vma_shareable_lock(vma)) { 370 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 371 372 lockdep_assert_held(&vma_lock->rw_sema); 373 } else if (__vma_private_lock(vma)) { 374 struct resv_map *resv_map = vma_resv_map(vma); 375 376 lockdep_assert_held(&resv_map->rw_sema); 377 } 378 } 379 380 void hugetlb_vma_lock_release(struct kref *kref) 381 { 382 struct hugetlb_vma_lock *vma_lock = container_of(kref, 383 struct hugetlb_vma_lock, refs); 384 385 kfree(vma_lock); 386 } 387 388 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 389 { 390 struct vm_area_struct *vma = vma_lock->vma; 391 392 /* 393 * vma_lock structure may or not be released as a result of put, 394 * it certainly will no longer be attached to vma so clear pointer. 395 * Semaphore synchronizes access to vma_lock->vma field. 396 */ 397 vma_lock->vma = NULL; 398 vma->vm_private_data = NULL; 399 up_write(&vma_lock->rw_sema); 400 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 401 } 402 403 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 404 { 405 if (__vma_shareable_lock(vma)) { 406 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 407 408 __hugetlb_vma_unlock_write_put(vma_lock); 409 } else if (__vma_private_lock(vma)) { 410 struct resv_map *resv_map = vma_resv_map(vma); 411 412 /* no free for anon vmas, but still need to unlock */ 413 up_write(&resv_map->rw_sema); 414 } 415 } 416 417 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 418 { 419 /* 420 * Only present in sharable vmas. 421 */ 422 if (!vma || !__vma_shareable_lock(vma)) 423 return; 424 425 if (vma->vm_private_data) { 426 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 427 428 down_write(&vma_lock->rw_sema); 429 __hugetlb_vma_unlock_write_put(vma_lock); 430 } 431 } 432 433 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 434 { 435 struct hugetlb_vma_lock *vma_lock; 436 437 /* Only establish in (flags) sharable vmas */ 438 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 439 return; 440 441 /* Should never get here with non-NULL vm_private_data */ 442 if (vma->vm_private_data) 443 return; 444 445 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 446 if (!vma_lock) { 447 /* 448 * If we can not allocate structure, then vma can not 449 * participate in pmd sharing. This is only a possible 450 * performance enhancement and memory saving issue. 451 * However, the lock is also used to synchronize page 452 * faults with truncation. If the lock is not present, 453 * unlikely races could leave pages in a file past i_size 454 * until the file is removed. Warn in the unlikely case of 455 * allocation failure. 456 */ 457 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 458 return; 459 } 460 461 kref_init(&vma_lock->refs); 462 init_rwsem(&vma_lock->rw_sema); 463 vma_lock->vma = vma; 464 vma->vm_private_data = vma_lock; 465 } 466 467 /* Helper that removes a struct file_region from the resv_map cache and returns 468 * it for use. 469 */ 470 static struct file_region * 471 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 472 { 473 struct file_region *nrg; 474 475 VM_BUG_ON(resv->region_cache_count <= 0); 476 477 resv->region_cache_count--; 478 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 479 list_del(&nrg->link); 480 481 nrg->from = from; 482 nrg->to = to; 483 484 return nrg; 485 } 486 487 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 488 struct file_region *rg) 489 { 490 #ifdef CONFIG_CGROUP_HUGETLB 491 nrg->reservation_counter = rg->reservation_counter; 492 nrg->css = rg->css; 493 if (rg->css) 494 css_get(rg->css); 495 #endif 496 } 497 498 /* Helper that records hugetlb_cgroup uncharge info. */ 499 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 500 struct hstate *h, 501 struct resv_map *resv, 502 struct file_region *nrg) 503 { 504 #ifdef CONFIG_CGROUP_HUGETLB 505 if (h_cg) { 506 nrg->reservation_counter = 507 &h_cg->rsvd_hugepage[hstate_index(h)]; 508 nrg->css = &h_cg->css; 509 /* 510 * The caller will hold exactly one h_cg->css reference for the 511 * whole contiguous reservation region. But this area might be 512 * scattered when there are already some file_regions reside in 513 * it. As a result, many file_regions may share only one css 514 * reference. In order to ensure that one file_region must hold 515 * exactly one h_cg->css reference, we should do css_get for 516 * each file_region and leave the reference held by caller 517 * untouched. 518 */ 519 css_get(&h_cg->css); 520 if (!resv->pages_per_hpage) 521 resv->pages_per_hpage = pages_per_huge_page(h); 522 /* pages_per_hpage should be the same for all entries in 523 * a resv_map. 524 */ 525 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 526 } else { 527 nrg->reservation_counter = NULL; 528 nrg->css = NULL; 529 } 530 #endif 531 } 532 533 static void put_uncharge_info(struct file_region *rg) 534 { 535 #ifdef CONFIG_CGROUP_HUGETLB 536 if (rg->css) 537 css_put(rg->css); 538 #endif 539 } 540 541 static bool has_same_uncharge_info(struct file_region *rg, 542 struct file_region *org) 543 { 544 #ifdef CONFIG_CGROUP_HUGETLB 545 return rg->reservation_counter == org->reservation_counter && 546 rg->css == org->css; 547 548 #else 549 return true; 550 #endif 551 } 552 553 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 554 { 555 struct file_region *nrg, *prg; 556 557 prg = list_prev_entry(rg, link); 558 if (&prg->link != &resv->regions && prg->to == rg->from && 559 has_same_uncharge_info(prg, rg)) { 560 prg->to = rg->to; 561 562 list_del(&rg->link); 563 put_uncharge_info(rg); 564 kfree(rg); 565 566 rg = prg; 567 } 568 569 nrg = list_next_entry(rg, link); 570 if (&nrg->link != &resv->regions && nrg->from == rg->to && 571 has_same_uncharge_info(nrg, rg)) { 572 nrg->from = rg->from; 573 574 list_del(&rg->link); 575 put_uncharge_info(rg); 576 kfree(rg); 577 } 578 } 579 580 static inline long 581 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 582 long to, struct hstate *h, struct hugetlb_cgroup *cg, 583 long *regions_needed) 584 { 585 struct file_region *nrg; 586 587 if (!regions_needed) { 588 nrg = get_file_region_entry_from_cache(map, from, to); 589 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 590 list_add(&nrg->link, rg); 591 coalesce_file_region(map, nrg); 592 } else 593 *regions_needed += 1; 594 595 return to - from; 596 } 597 598 /* 599 * Must be called with resv->lock held. 600 * 601 * Calling this with regions_needed != NULL will count the number of pages 602 * to be added but will not modify the linked list. And regions_needed will 603 * indicate the number of file_regions needed in the cache to carry out to add 604 * the regions for this range. 605 */ 606 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 607 struct hugetlb_cgroup *h_cg, 608 struct hstate *h, long *regions_needed) 609 { 610 long add = 0; 611 struct list_head *head = &resv->regions; 612 long last_accounted_offset = f; 613 struct file_region *iter, *trg = NULL; 614 struct list_head *rg = NULL; 615 616 if (regions_needed) 617 *regions_needed = 0; 618 619 /* In this loop, we essentially handle an entry for the range 620 * [last_accounted_offset, iter->from), at every iteration, with some 621 * bounds checking. 622 */ 623 list_for_each_entry_safe(iter, trg, head, link) { 624 /* Skip irrelevant regions that start before our range. */ 625 if (iter->from < f) { 626 /* If this region ends after the last accounted offset, 627 * then we need to update last_accounted_offset. 628 */ 629 if (iter->to > last_accounted_offset) 630 last_accounted_offset = iter->to; 631 continue; 632 } 633 634 /* When we find a region that starts beyond our range, we've 635 * finished. 636 */ 637 if (iter->from >= t) { 638 rg = iter->link.prev; 639 break; 640 } 641 642 /* Add an entry for last_accounted_offset -> iter->from, and 643 * update last_accounted_offset. 644 */ 645 if (iter->from > last_accounted_offset) 646 add += hugetlb_resv_map_add(resv, iter->link.prev, 647 last_accounted_offset, 648 iter->from, h, h_cg, 649 regions_needed); 650 651 last_accounted_offset = iter->to; 652 } 653 654 /* Handle the case where our range extends beyond 655 * last_accounted_offset. 656 */ 657 if (!rg) 658 rg = head->prev; 659 if (last_accounted_offset < t) 660 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 661 t, h, h_cg, regions_needed); 662 663 return add; 664 } 665 666 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 667 */ 668 static int allocate_file_region_entries(struct resv_map *resv, 669 int regions_needed) 670 __must_hold(&resv->lock) 671 { 672 LIST_HEAD(allocated_regions); 673 int to_allocate = 0, i = 0; 674 struct file_region *trg = NULL, *rg = NULL; 675 676 VM_BUG_ON(regions_needed < 0); 677 678 /* 679 * Check for sufficient descriptors in the cache to accommodate 680 * the number of in progress add operations plus regions_needed. 681 * 682 * This is a while loop because when we drop the lock, some other call 683 * to region_add or region_del may have consumed some region_entries, 684 * so we keep looping here until we finally have enough entries for 685 * (adds_in_progress + regions_needed). 686 */ 687 while (resv->region_cache_count < 688 (resv->adds_in_progress + regions_needed)) { 689 to_allocate = resv->adds_in_progress + regions_needed - 690 resv->region_cache_count; 691 692 /* At this point, we should have enough entries in the cache 693 * for all the existing adds_in_progress. We should only be 694 * needing to allocate for regions_needed. 695 */ 696 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 697 698 spin_unlock(&resv->lock); 699 for (i = 0; i < to_allocate; i++) { 700 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 701 if (!trg) 702 goto out_of_memory; 703 list_add(&trg->link, &allocated_regions); 704 } 705 706 spin_lock(&resv->lock); 707 708 list_splice(&allocated_regions, &resv->region_cache); 709 resv->region_cache_count += to_allocate; 710 } 711 712 return 0; 713 714 out_of_memory: 715 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 716 list_del(&rg->link); 717 kfree(rg); 718 } 719 return -ENOMEM; 720 } 721 722 /* 723 * Add the huge page range represented by [f, t) to the reserve 724 * map. Regions will be taken from the cache to fill in this range. 725 * Sufficient regions should exist in the cache due to the previous 726 * call to region_chg with the same range, but in some cases the cache will not 727 * have sufficient entries due to races with other code doing region_add or 728 * region_del. The extra needed entries will be allocated. 729 * 730 * regions_needed is the out value provided by a previous call to region_chg. 731 * 732 * Return the number of new huge pages added to the map. This number is greater 733 * than or equal to zero. If file_region entries needed to be allocated for 734 * this operation and we were not able to allocate, it returns -ENOMEM. 735 * region_add of regions of length 1 never allocate file_regions and cannot 736 * fail; region_chg will always allocate at least 1 entry and a region_add for 737 * 1 page will only require at most 1 entry. 738 */ 739 static long region_add(struct resv_map *resv, long f, long t, 740 long in_regions_needed, struct hstate *h, 741 struct hugetlb_cgroup *h_cg) 742 { 743 long add = 0, actual_regions_needed = 0; 744 745 spin_lock(&resv->lock); 746 retry: 747 748 /* Count how many regions are actually needed to execute this add. */ 749 add_reservation_in_range(resv, f, t, NULL, NULL, 750 &actual_regions_needed); 751 752 /* 753 * Check for sufficient descriptors in the cache to accommodate 754 * this add operation. Note that actual_regions_needed may be greater 755 * than in_regions_needed, as the resv_map may have been modified since 756 * the region_chg call. In this case, we need to make sure that we 757 * allocate extra entries, such that we have enough for all the 758 * existing adds_in_progress, plus the excess needed for this 759 * operation. 760 */ 761 if (actual_regions_needed > in_regions_needed && 762 resv->region_cache_count < 763 resv->adds_in_progress + 764 (actual_regions_needed - in_regions_needed)) { 765 /* region_add operation of range 1 should never need to 766 * allocate file_region entries. 767 */ 768 VM_BUG_ON(t - f <= 1); 769 770 if (allocate_file_region_entries( 771 resv, actual_regions_needed - in_regions_needed)) { 772 return -ENOMEM; 773 } 774 775 goto retry; 776 } 777 778 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 779 780 resv->adds_in_progress -= in_regions_needed; 781 782 spin_unlock(&resv->lock); 783 return add; 784 } 785 786 /* 787 * Examine the existing reserve map and determine how many 788 * huge pages in the specified range [f, t) are NOT currently 789 * represented. This routine is called before a subsequent 790 * call to region_add that will actually modify the reserve 791 * map to add the specified range [f, t). region_chg does 792 * not change the number of huge pages represented by the 793 * map. A number of new file_region structures is added to the cache as a 794 * placeholder, for the subsequent region_add call to use. At least 1 795 * file_region structure is added. 796 * 797 * out_regions_needed is the number of regions added to the 798 * resv->adds_in_progress. This value needs to be provided to a follow up call 799 * to region_add or region_abort for proper accounting. 800 * 801 * Returns the number of huge pages that need to be added to the existing 802 * reservation map for the range [f, t). This number is greater or equal to 803 * zero. -ENOMEM is returned if a new file_region structure or cache entry 804 * is needed and can not be allocated. 805 */ 806 static long region_chg(struct resv_map *resv, long f, long t, 807 long *out_regions_needed) 808 { 809 long chg = 0; 810 811 spin_lock(&resv->lock); 812 813 /* Count how many hugepages in this range are NOT represented. */ 814 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 815 out_regions_needed); 816 817 if (*out_regions_needed == 0) 818 *out_regions_needed = 1; 819 820 if (allocate_file_region_entries(resv, *out_regions_needed)) 821 return -ENOMEM; 822 823 resv->adds_in_progress += *out_regions_needed; 824 825 spin_unlock(&resv->lock); 826 return chg; 827 } 828 829 /* 830 * Abort the in progress add operation. The adds_in_progress field 831 * of the resv_map keeps track of the operations in progress between 832 * calls to region_chg and region_add. Operations are sometimes 833 * aborted after the call to region_chg. In such cases, region_abort 834 * is called to decrement the adds_in_progress counter. regions_needed 835 * is the value returned by the region_chg call, it is used to decrement 836 * the adds_in_progress counter. 837 * 838 * NOTE: The range arguments [f, t) are not needed or used in this 839 * routine. They are kept to make reading the calling code easier as 840 * arguments will match the associated region_chg call. 841 */ 842 static void region_abort(struct resv_map *resv, long f, long t, 843 long regions_needed) 844 { 845 spin_lock(&resv->lock); 846 VM_BUG_ON(!resv->region_cache_count); 847 resv->adds_in_progress -= regions_needed; 848 spin_unlock(&resv->lock); 849 } 850 851 /* 852 * Delete the specified range [f, t) from the reserve map. If the 853 * t parameter is LONG_MAX, this indicates that ALL regions after f 854 * should be deleted. Locate the regions which intersect [f, t) 855 * and either trim, delete or split the existing regions. 856 * 857 * Returns the number of huge pages deleted from the reserve map. 858 * In the normal case, the return value is zero or more. In the 859 * case where a region must be split, a new region descriptor must 860 * be allocated. If the allocation fails, -ENOMEM will be returned. 861 * NOTE: If the parameter t == LONG_MAX, then we will never split 862 * a region and possibly return -ENOMEM. Callers specifying 863 * t == LONG_MAX do not need to check for -ENOMEM error. 864 */ 865 static long region_del(struct resv_map *resv, long f, long t) 866 { 867 struct list_head *head = &resv->regions; 868 struct file_region *rg, *trg; 869 struct file_region *nrg = NULL; 870 long del = 0; 871 872 retry: 873 spin_lock(&resv->lock); 874 list_for_each_entry_safe(rg, trg, head, link) { 875 /* 876 * Skip regions before the range to be deleted. file_region 877 * ranges are normally of the form [from, to). However, there 878 * may be a "placeholder" entry in the map which is of the form 879 * (from, to) with from == to. Check for placeholder entries 880 * at the beginning of the range to be deleted. 881 */ 882 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 883 continue; 884 885 if (rg->from >= t) 886 break; 887 888 if (f > rg->from && t < rg->to) { /* Must split region */ 889 /* 890 * Check for an entry in the cache before dropping 891 * lock and attempting allocation. 892 */ 893 if (!nrg && 894 resv->region_cache_count > resv->adds_in_progress) { 895 nrg = list_first_entry(&resv->region_cache, 896 struct file_region, 897 link); 898 list_del(&nrg->link); 899 resv->region_cache_count--; 900 } 901 902 if (!nrg) { 903 spin_unlock(&resv->lock); 904 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 905 if (!nrg) 906 return -ENOMEM; 907 goto retry; 908 } 909 910 del += t - f; 911 hugetlb_cgroup_uncharge_file_region( 912 resv, rg, t - f, false); 913 914 /* New entry for end of split region */ 915 nrg->from = t; 916 nrg->to = rg->to; 917 918 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 919 920 INIT_LIST_HEAD(&nrg->link); 921 922 /* Original entry is trimmed */ 923 rg->to = f; 924 925 list_add(&nrg->link, &rg->link); 926 nrg = NULL; 927 break; 928 } 929 930 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 931 del += rg->to - rg->from; 932 hugetlb_cgroup_uncharge_file_region(resv, rg, 933 rg->to - rg->from, true); 934 list_del(&rg->link); 935 kfree(rg); 936 continue; 937 } 938 939 if (f <= rg->from) { /* Trim beginning of region */ 940 hugetlb_cgroup_uncharge_file_region(resv, rg, 941 t - rg->from, false); 942 943 del += t - rg->from; 944 rg->from = t; 945 } else { /* Trim end of region */ 946 hugetlb_cgroup_uncharge_file_region(resv, rg, 947 rg->to - f, false); 948 949 del += rg->to - f; 950 rg->to = f; 951 } 952 } 953 954 spin_unlock(&resv->lock); 955 kfree(nrg); 956 return del; 957 } 958 959 /* 960 * A rare out of memory error was encountered which prevented removal of 961 * the reserve map region for a page. The huge page itself was free'ed 962 * and removed from the page cache. This routine will adjust the subpool 963 * usage count, and the global reserve count if needed. By incrementing 964 * these counts, the reserve map entry which could not be deleted will 965 * appear as a "reserved" entry instead of simply dangling with incorrect 966 * counts. 967 */ 968 void hugetlb_fix_reserve_counts(struct inode *inode) 969 { 970 struct hugepage_subpool *spool = subpool_inode(inode); 971 long rsv_adjust; 972 bool reserved = false; 973 974 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 975 if (rsv_adjust > 0) { 976 struct hstate *h = hstate_inode(inode); 977 978 if (!hugetlb_acct_memory(h, 1)) 979 reserved = true; 980 } else if (!rsv_adjust) { 981 reserved = true; 982 } 983 984 if (!reserved) 985 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 986 } 987 988 /* 989 * Count and return the number of huge pages in the reserve map 990 * that intersect with the range [f, t). 991 */ 992 static long region_count(struct resv_map *resv, long f, long t) 993 { 994 struct list_head *head = &resv->regions; 995 struct file_region *rg; 996 long chg = 0; 997 998 spin_lock(&resv->lock); 999 /* Locate each segment we overlap with, and count that overlap. */ 1000 list_for_each_entry(rg, head, link) { 1001 long seg_from; 1002 long seg_to; 1003 1004 if (rg->to <= f) 1005 continue; 1006 if (rg->from >= t) 1007 break; 1008 1009 seg_from = max(rg->from, f); 1010 seg_to = min(rg->to, t); 1011 1012 chg += seg_to - seg_from; 1013 } 1014 spin_unlock(&resv->lock); 1015 1016 return chg; 1017 } 1018 1019 /* 1020 * Convert the address within this vma to the page offset within 1021 * the mapping, huge page units here. 1022 */ 1023 static pgoff_t vma_hugecache_offset(struct hstate *h, 1024 struct vm_area_struct *vma, unsigned long address) 1025 { 1026 return ((address - vma->vm_start) >> huge_page_shift(h)) + 1027 (vma->vm_pgoff >> huge_page_order(h)); 1028 } 1029 1030 /** 1031 * vma_kernel_pagesize - Page size granularity for this VMA. 1032 * @vma: The user mapping. 1033 * 1034 * Folios in this VMA will be aligned to, and at least the size of the 1035 * number of bytes returned by this function. 1036 * 1037 * Return: The default size of the folios allocated when backing a VMA. 1038 */ 1039 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1040 { 1041 if (vma->vm_ops && vma->vm_ops->pagesize) 1042 return vma->vm_ops->pagesize(vma); 1043 return PAGE_SIZE; 1044 } 1045 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 1046 1047 /* 1048 * Return the page size being used by the MMU to back a VMA. In the majority 1049 * of cases, the page size used by the kernel matches the MMU size. On 1050 * architectures where it differs, an architecture-specific 'strong' 1051 * version of this symbol is required. 1052 */ 1053 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1054 { 1055 return vma_kernel_pagesize(vma); 1056 } 1057 1058 /* 1059 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 1060 * bits of the reservation map pointer, which are always clear due to 1061 * alignment. 1062 */ 1063 #define HPAGE_RESV_OWNER (1UL << 0) 1064 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1065 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1066 1067 /* 1068 * These helpers are used to track how many pages are reserved for 1069 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1070 * is guaranteed to have their future faults succeed. 1071 * 1072 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1073 * the reserve counters are updated with the hugetlb_lock held. It is safe 1074 * to reset the VMA at fork() time as it is not in use yet and there is no 1075 * chance of the global counters getting corrupted as a result of the values. 1076 * 1077 * The private mapping reservation is represented in a subtly different 1078 * manner to a shared mapping. A shared mapping has a region map associated 1079 * with the underlying file, this region map represents the backing file 1080 * pages which have ever had a reservation assigned which this persists even 1081 * after the page is instantiated. A private mapping has a region map 1082 * associated with the original mmap which is attached to all VMAs which 1083 * reference it, this region map represents those offsets which have consumed 1084 * reservation ie. where pages have been instantiated. 1085 */ 1086 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1087 { 1088 return (unsigned long)vma->vm_private_data; 1089 } 1090 1091 static void set_vma_private_data(struct vm_area_struct *vma, 1092 unsigned long value) 1093 { 1094 vma->vm_private_data = (void *)value; 1095 } 1096 1097 static void 1098 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1099 struct hugetlb_cgroup *h_cg, 1100 struct hstate *h) 1101 { 1102 #ifdef CONFIG_CGROUP_HUGETLB 1103 if (!h_cg || !h) { 1104 resv_map->reservation_counter = NULL; 1105 resv_map->pages_per_hpage = 0; 1106 resv_map->css = NULL; 1107 } else { 1108 resv_map->reservation_counter = 1109 &h_cg->rsvd_hugepage[hstate_index(h)]; 1110 resv_map->pages_per_hpage = pages_per_huge_page(h); 1111 resv_map->css = &h_cg->css; 1112 } 1113 #endif 1114 } 1115 1116 struct resv_map *resv_map_alloc(void) 1117 { 1118 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1119 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1120 1121 if (!resv_map || !rg) { 1122 kfree(resv_map); 1123 kfree(rg); 1124 return NULL; 1125 } 1126 1127 kref_init(&resv_map->refs); 1128 spin_lock_init(&resv_map->lock); 1129 INIT_LIST_HEAD(&resv_map->regions); 1130 init_rwsem(&resv_map->rw_sema); 1131 1132 resv_map->adds_in_progress = 0; 1133 /* 1134 * Initialize these to 0. On shared mappings, 0's here indicate these 1135 * fields don't do cgroup accounting. On private mappings, these will be 1136 * re-initialized to the proper values, to indicate that hugetlb cgroup 1137 * reservations are to be un-charged from here. 1138 */ 1139 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1140 1141 INIT_LIST_HEAD(&resv_map->region_cache); 1142 list_add(&rg->link, &resv_map->region_cache); 1143 resv_map->region_cache_count = 1; 1144 1145 return resv_map; 1146 } 1147 1148 void resv_map_release(struct kref *ref) 1149 { 1150 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1151 struct list_head *head = &resv_map->region_cache; 1152 struct file_region *rg, *trg; 1153 1154 /* Clear out any active regions before we release the map. */ 1155 region_del(resv_map, 0, LONG_MAX); 1156 1157 /* ... and any entries left in the cache */ 1158 list_for_each_entry_safe(rg, trg, head, link) { 1159 list_del(&rg->link); 1160 kfree(rg); 1161 } 1162 1163 VM_BUG_ON(resv_map->adds_in_progress); 1164 1165 kfree(resv_map); 1166 } 1167 1168 static inline struct resv_map *inode_resv_map(struct inode *inode) 1169 { 1170 /* 1171 * At inode evict time, i_mapping may not point to the original 1172 * address space within the inode. This original address space 1173 * contains the pointer to the resv_map. So, always use the 1174 * address space embedded within the inode. 1175 * The VERY common case is inode->mapping == &inode->i_data but, 1176 * this may not be true for device special inodes. 1177 */ 1178 return (struct resv_map *)(&inode->i_data)->i_private_data; 1179 } 1180 1181 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1182 { 1183 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1184 if (vma->vm_flags & VM_MAYSHARE) { 1185 struct address_space *mapping = vma->vm_file->f_mapping; 1186 struct inode *inode = mapping->host; 1187 1188 return inode_resv_map(inode); 1189 1190 } else { 1191 return (struct resv_map *)(get_vma_private_data(vma) & 1192 ~HPAGE_RESV_MASK); 1193 } 1194 } 1195 1196 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1197 { 1198 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1199 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1200 1201 set_vma_private_data(vma, (unsigned long)map); 1202 } 1203 1204 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1205 { 1206 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1207 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1208 1209 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1210 } 1211 1212 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1213 { 1214 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1215 1216 return (get_vma_private_data(vma) & flag) != 0; 1217 } 1218 1219 bool __vma_private_lock(struct vm_area_struct *vma) 1220 { 1221 return !(vma->vm_flags & VM_MAYSHARE) && 1222 get_vma_private_data(vma) & ~HPAGE_RESV_MASK && 1223 is_vma_resv_set(vma, HPAGE_RESV_OWNER); 1224 } 1225 1226 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1227 { 1228 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1229 /* 1230 * Clear vm_private_data 1231 * - For shared mappings this is a per-vma semaphore that may be 1232 * allocated in a subsequent call to hugetlb_vm_op_open. 1233 * Before clearing, make sure pointer is not associated with vma 1234 * as this will leak the structure. This is the case when called 1235 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1236 * been called to allocate a new structure. 1237 * - For MAP_PRIVATE mappings, this is the reserve map which does 1238 * not apply to children. Faults generated by the children are 1239 * not guaranteed to succeed, even if read-only. 1240 */ 1241 if (vma->vm_flags & VM_MAYSHARE) { 1242 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1243 1244 if (vma_lock && vma_lock->vma != vma) 1245 vma->vm_private_data = NULL; 1246 } else 1247 vma->vm_private_data = NULL; 1248 } 1249 1250 /* 1251 * Reset and decrement one ref on hugepage private reservation. 1252 * Called with mm->mmap_lock writer semaphore held. 1253 * This function should be only used by move_vma() and operate on 1254 * same sized vma. It should never come here with last ref on the 1255 * reservation. 1256 */ 1257 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1258 { 1259 /* 1260 * Clear the old hugetlb private page reservation. 1261 * It has already been transferred to new_vma. 1262 * 1263 * During a mremap() operation of a hugetlb vma we call move_vma() 1264 * which copies vma into new_vma and unmaps vma. After the copy 1265 * operation both new_vma and vma share a reference to the resv_map 1266 * struct, and at that point vma is about to be unmapped. We don't 1267 * want to return the reservation to the pool at unmap of vma because 1268 * the reservation still lives on in new_vma, so simply decrement the 1269 * ref here and remove the resv_map reference from this vma. 1270 */ 1271 struct resv_map *reservations = vma_resv_map(vma); 1272 1273 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1274 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1275 kref_put(&reservations->refs, resv_map_release); 1276 } 1277 1278 hugetlb_dup_vma_private(vma); 1279 } 1280 1281 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1282 { 1283 int nid = folio_nid(folio); 1284 1285 lockdep_assert_held(&hugetlb_lock); 1286 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1287 1288 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1289 h->free_huge_pages++; 1290 h->free_huge_pages_node[nid]++; 1291 folio_set_hugetlb_freed(folio); 1292 } 1293 1294 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1295 int nid) 1296 { 1297 struct folio *folio; 1298 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1299 1300 lockdep_assert_held(&hugetlb_lock); 1301 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1302 if (pin && !folio_is_longterm_pinnable(folio)) 1303 continue; 1304 1305 if (folio_test_hwpoison(folio)) 1306 continue; 1307 1308 if (is_migrate_isolate_page(&folio->page)) 1309 continue; 1310 1311 list_move(&folio->lru, &h->hugepage_activelist); 1312 folio_ref_unfreeze(folio, 1); 1313 folio_clear_hugetlb_freed(folio); 1314 h->free_huge_pages--; 1315 h->free_huge_pages_node[nid]--; 1316 return folio; 1317 } 1318 1319 return NULL; 1320 } 1321 1322 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1323 int nid, nodemask_t *nmask) 1324 { 1325 unsigned int cpuset_mems_cookie; 1326 struct zonelist *zonelist; 1327 struct zone *zone; 1328 struct zoneref *z; 1329 int node = NUMA_NO_NODE; 1330 1331 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */ 1332 if (nid == NUMA_NO_NODE) 1333 nid = numa_node_id(); 1334 1335 zonelist = node_zonelist(nid, gfp_mask); 1336 1337 retry_cpuset: 1338 cpuset_mems_cookie = read_mems_allowed_begin(); 1339 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1340 struct folio *folio; 1341 1342 if (!cpuset_zone_allowed(zone, gfp_mask)) 1343 continue; 1344 /* 1345 * no need to ask again on the same node. Pool is node rather than 1346 * zone aware 1347 */ 1348 if (zone_to_nid(zone) == node) 1349 continue; 1350 node = zone_to_nid(zone); 1351 1352 folio = dequeue_hugetlb_folio_node_exact(h, node); 1353 if (folio) 1354 return folio; 1355 } 1356 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1357 goto retry_cpuset; 1358 1359 return NULL; 1360 } 1361 1362 static unsigned long available_huge_pages(struct hstate *h) 1363 { 1364 return h->free_huge_pages - h->resv_huge_pages; 1365 } 1366 1367 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1368 struct vm_area_struct *vma, 1369 unsigned long address, long gbl_chg) 1370 { 1371 struct folio *folio = NULL; 1372 struct mempolicy *mpol; 1373 gfp_t gfp_mask; 1374 nodemask_t *nodemask; 1375 int nid; 1376 1377 /* 1378 * gbl_chg==1 means the allocation requires a new page that was not 1379 * reserved before. Making sure there's at least one free page. 1380 */ 1381 if (gbl_chg && !available_huge_pages(h)) 1382 goto err; 1383 1384 gfp_mask = htlb_alloc_mask(h); 1385 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1386 1387 if (mpol_is_preferred_many(mpol)) { 1388 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1389 nid, nodemask); 1390 1391 /* Fallback to all nodes if page==NULL */ 1392 nodemask = NULL; 1393 } 1394 1395 if (!folio) 1396 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1397 nid, nodemask); 1398 1399 mpol_cond_put(mpol); 1400 return folio; 1401 1402 err: 1403 return NULL; 1404 } 1405 1406 /* 1407 * common helper functions for hstate_next_node_to_{alloc|free}. 1408 * We may have allocated or freed a huge page based on a different 1409 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1410 * be outside of *nodes_allowed. Ensure that we use an allowed 1411 * node for alloc or free. 1412 */ 1413 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1414 { 1415 nid = next_node_in(nid, *nodes_allowed); 1416 VM_BUG_ON(nid >= MAX_NUMNODES); 1417 1418 return nid; 1419 } 1420 1421 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1422 { 1423 if (!node_isset(nid, *nodes_allowed)) 1424 nid = next_node_allowed(nid, nodes_allowed); 1425 return nid; 1426 } 1427 1428 /* 1429 * returns the previously saved node ["this node"] from which to 1430 * allocate a persistent huge page for the pool and advance the 1431 * next node from which to allocate, handling wrap at end of node 1432 * mask. 1433 */ 1434 static int hstate_next_node_to_alloc(int *next_node, 1435 nodemask_t *nodes_allowed) 1436 { 1437 int nid; 1438 1439 VM_BUG_ON(!nodes_allowed); 1440 1441 nid = get_valid_node_allowed(*next_node, nodes_allowed); 1442 *next_node = next_node_allowed(nid, nodes_allowed); 1443 1444 return nid; 1445 } 1446 1447 /* 1448 * helper for remove_pool_hugetlb_folio() - return the previously saved 1449 * node ["this node"] from which to free a huge page. Advance the 1450 * next node id whether or not we find a free huge page to free so 1451 * that the next attempt to free addresses the next node. 1452 */ 1453 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1454 { 1455 int nid; 1456 1457 VM_BUG_ON(!nodes_allowed); 1458 1459 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1460 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1461 1462 return nid; 1463 } 1464 1465 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \ 1466 for (nr_nodes = nodes_weight(*mask); \ 1467 nr_nodes > 0 && \ 1468 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \ 1469 nr_nodes--) 1470 1471 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1472 for (nr_nodes = nodes_weight(*mask); \ 1473 nr_nodes > 0 && \ 1474 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1475 nr_nodes--) 1476 1477 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1478 #ifdef CONFIG_CONTIG_ALLOC 1479 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1480 int nid, nodemask_t *nodemask) 1481 { 1482 struct folio *folio; 1483 int order = huge_page_order(h); 1484 bool retried = false; 1485 1486 if (nid == NUMA_NO_NODE) 1487 nid = numa_mem_id(); 1488 retry: 1489 folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); 1490 if (!folio) { 1491 if (hugetlb_cma_exclusive_alloc()) 1492 return NULL; 1493 1494 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); 1495 if (!folio) 1496 return NULL; 1497 } 1498 1499 if (folio_ref_freeze(folio, 1)) 1500 return folio; 1501 1502 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio)); 1503 hugetlb_free_folio(folio); 1504 if (!retried) { 1505 retried = true; 1506 goto retry; 1507 } 1508 return NULL; 1509 } 1510 1511 #else /* !CONFIG_CONTIG_ALLOC */ 1512 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1513 int nid, nodemask_t *nodemask) 1514 { 1515 return NULL; 1516 } 1517 #endif /* CONFIG_CONTIG_ALLOC */ 1518 1519 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1520 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1521 int nid, nodemask_t *nodemask) 1522 { 1523 return NULL; 1524 } 1525 #endif 1526 1527 /* 1528 * Remove hugetlb folio from lists. 1529 * If vmemmap exists for the folio, clear the hugetlb flag so that the 1530 * folio appears as just a compound page. Otherwise, wait until after 1531 * allocating vmemmap to clear the flag. 1532 * 1533 * Must be called with hugetlb lock held. 1534 */ 1535 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1536 bool adjust_surplus) 1537 { 1538 int nid = folio_nid(folio); 1539 1540 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1541 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1542 1543 lockdep_assert_held(&hugetlb_lock); 1544 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1545 return; 1546 1547 list_del(&folio->lru); 1548 1549 if (folio_test_hugetlb_freed(folio)) { 1550 folio_clear_hugetlb_freed(folio); 1551 h->free_huge_pages--; 1552 h->free_huge_pages_node[nid]--; 1553 } 1554 if (adjust_surplus) { 1555 h->surplus_huge_pages--; 1556 h->surplus_huge_pages_node[nid]--; 1557 } 1558 1559 /* 1560 * We can only clear the hugetlb flag after allocating vmemmap 1561 * pages. Otherwise, someone (memory error handling) may try to write 1562 * to tail struct pages. 1563 */ 1564 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1565 __folio_clear_hugetlb(folio); 1566 1567 h->nr_huge_pages--; 1568 h->nr_huge_pages_node[nid]--; 1569 } 1570 1571 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1572 bool adjust_surplus) 1573 { 1574 int nid = folio_nid(folio); 1575 1576 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1577 1578 lockdep_assert_held(&hugetlb_lock); 1579 1580 INIT_LIST_HEAD(&folio->lru); 1581 h->nr_huge_pages++; 1582 h->nr_huge_pages_node[nid]++; 1583 1584 if (adjust_surplus) { 1585 h->surplus_huge_pages++; 1586 h->surplus_huge_pages_node[nid]++; 1587 } 1588 1589 __folio_set_hugetlb(folio); 1590 folio_change_private(folio, NULL); 1591 /* 1592 * We have to set hugetlb_vmemmap_optimized again as above 1593 * folio_change_private(folio, NULL) cleared it. 1594 */ 1595 folio_set_hugetlb_vmemmap_optimized(folio); 1596 1597 arch_clear_hugetlb_flags(folio); 1598 enqueue_hugetlb_folio(h, folio); 1599 } 1600 1601 static void __update_and_free_hugetlb_folio(struct hstate *h, 1602 struct folio *folio) 1603 { 1604 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio); 1605 1606 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1607 return; 1608 1609 /* 1610 * If we don't know which subpages are hwpoisoned, we can't free 1611 * the hugepage, so it's leaked intentionally. 1612 */ 1613 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1614 return; 1615 1616 /* 1617 * If folio is not vmemmap optimized (!clear_flag), then the folio 1618 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio 1619 * can only be passed hugetlb pages and will BUG otherwise. 1620 */ 1621 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { 1622 spin_lock_irq(&hugetlb_lock); 1623 /* 1624 * If we cannot allocate vmemmap pages, just refuse to free the 1625 * page and put the page back on the hugetlb free list and treat 1626 * as a surplus page. 1627 */ 1628 add_hugetlb_folio(h, folio, true); 1629 spin_unlock_irq(&hugetlb_lock); 1630 return; 1631 } 1632 1633 /* 1634 * If vmemmap pages were allocated above, then we need to clear the 1635 * hugetlb flag under the hugetlb lock. 1636 */ 1637 if (folio_test_hugetlb(folio)) { 1638 spin_lock_irq(&hugetlb_lock); 1639 __folio_clear_hugetlb(folio); 1640 spin_unlock_irq(&hugetlb_lock); 1641 } 1642 1643 /* 1644 * Move PageHWPoison flag from head page to the raw error pages, 1645 * which makes any healthy subpages reusable. 1646 */ 1647 if (unlikely(folio_test_hwpoison(folio))) 1648 folio_clear_hugetlb_hwpoison(folio); 1649 1650 folio_ref_unfreeze(folio, 1); 1651 1652 hugetlb_free_folio(folio); 1653 } 1654 1655 /* 1656 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1657 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1658 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1659 * the vmemmap pages. 1660 * 1661 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1662 * freed and frees them one-by-one. As the page->mapping pointer is going 1663 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1664 * structure of a lockless linked list of huge pages to be freed. 1665 */ 1666 static LLIST_HEAD(hpage_freelist); 1667 1668 static void free_hpage_workfn(struct work_struct *work) 1669 { 1670 struct llist_node *node; 1671 1672 node = llist_del_all(&hpage_freelist); 1673 1674 while (node) { 1675 struct folio *folio; 1676 struct hstate *h; 1677 1678 folio = container_of((struct address_space **)node, 1679 struct folio, mapping); 1680 node = node->next; 1681 folio->mapping = NULL; 1682 /* 1683 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1684 * folio_hstate() is going to trigger because a previous call to 1685 * remove_hugetlb_folio() will clear the hugetlb bit, so do 1686 * not use folio_hstate() directly. 1687 */ 1688 h = size_to_hstate(folio_size(folio)); 1689 1690 __update_and_free_hugetlb_folio(h, folio); 1691 1692 cond_resched(); 1693 } 1694 } 1695 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1696 1697 static inline void flush_free_hpage_work(struct hstate *h) 1698 { 1699 if (hugetlb_vmemmap_optimizable(h)) 1700 flush_work(&free_hpage_work); 1701 } 1702 1703 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1704 bool atomic) 1705 { 1706 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1707 __update_and_free_hugetlb_folio(h, folio); 1708 return; 1709 } 1710 1711 /* 1712 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1713 * 1714 * Only call schedule_work() if hpage_freelist is previously 1715 * empty. Otherwise, schedule_work() had been called but the workfn 1716 * hasn't retrieved the list yet. 1717 */ 1718 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1719 schedule_work(&free_hpage_work); 1720 } 1721 1722 static void bulk_vmemmap_restore_error(struct hstate *h, 1723 struct list_head *folio_list, 1724 struct list_head *non_hvo_folios) 1725 { 1726 struct folio *folio, *t_folio; 1727 1728 if (!list_empty(non_hvo_folios)) { 1729 /* 1730 * Free any restored hugetlb pages so that restore of the 1731 * entire list can be retried. 1732 * The idea is that in the common case of ENOMEM errors freeing 1733 * hugetlb pages with vmemmap we will free up memory so that we 1734 * can allocate vmemmap for more hugetlb pages. 1735 */ 1736 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { 1737 list_del(&folio->lru); 1738 spin_lock_irq(&hugetlb_lock); 1739 __folio_clear_hugetlb(folio); 1740 spin_unlock_irq(&hugetlb_lock); 1741 update_and_free_hugetlb_folio(h, folio, false); 1742 cond_resched(); 1743 } 1744 } else { 1745 /* 1746 * In the case where there are no folios which can be 1747 * immediately freed, we loop through the list trying to restore 1748 * vmemmap individually in the hope that someone elsewhere may 1749 * have done something to cause success (such as freeing some 1750 * memory). If unable to restore a hugetlb page, the hugetlb 1751 * page is made a surplus page and removed from the list. 1752 * If are able to restore vmemmap and free one hugetlb page, we 1753 * quit processing the list to retry the bulk operation. 1754 */ 1755 list_for_each_entry_safe(folio, t_folio, folio_list, lru) 1756 if (hugetlb_vmemmap_restore_folio(h, folio)) { 1757 list_del(&folio->lru); 1758 spin_lock_irq(&hugetlb_lock); 1759 add_hugetlb_folio(h, folio, true); 1760 spin_unlock_irq(&hugetlb_lock); 1761 } else { 1762 list_del(&folio->lru); 1763 spin_lock_irq(&hugetlb_lock); 1764 __folio_clear_hugetlb(folio); 1765 spin_unlock_irq(&hugetlb_lock); 1766 update_and_free_hugetlb_folio(h, folio, false); 1767 cond_resched(); 1768 break; 1769 } 1770 } 1771 } 1772 1773 static void update_and_free_pages_bulk(struct hstate *h, 1774 struct list_head *folio_list) 1775 { 1776 long ret; 1777 struct folio *folio, *t_folio; 1778 LIST_HEAD(non_hvo_folios); 1779 1780 /* 1781 * First allocate required vmemmmap (if necessary) for all folios. 1782 * Carefully handle errors and free up any available hugetlb pages 1783 * in an effort to make forward progress. 1784 */ 1785 retry: 1786 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); 1787 if (ret < 0) { 1788 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); 1789 goto retry; 1790 } 1791 1792 /* 1793 * At this point, list should be empty, ret should be >= 0 and there 1794 * should only be pages on the non_hvo_folios list. 1795 * Do note that the non_hvo_folios list could be empty. 1796 * Without HVO enabled, ret will be 0 and there is no need to call 1797 * __folio_clear_hugetlb as this was done previously. 1798 */ 1799 VM_WARN_ON(!list_empty(folio_list)); 1800 VM_WARN_ON(ret < 0); 1801 if (!list_empty(&non_hvo_folios) && ret) { 1802 spin_lock_irq(&hugetlb_lock); 1803 list_for_each_entry(folio, &non_hvo_folios, lru) 1804 __folio_clear_hugetlb(folio); 1805 spin_unlock_irq(&hugetlb_lock); 1806 } 1807 1808 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { 1809 update_and_free_hugetlb_folio(h, folio, false); 1810 cond_resched(); 1811 } 1812 } 1813 1814 struct hstate *size_to_hstate(unsigned long size) 1815 { 1816 struct hstate *h; 1817 1818 for_each_hstate(h) { 1819 if (huge_page_size(h) == size) 1820 return h; 1821 } 1822 return NULL; 1823 } 1824 1825 void free_huge_folio(struct folio *folio) 1826 { 1827 /* 1828 * Can't pass hstate in here because it is called from the 1829 * generic mm code. 1830 */ 1831 struct hstate *h = folio_hstate(folio); 1832 int nid = folio_nid(folio); 1833 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1834 bool restore_reserve; 1835 unsigned long flags; 1836 1837 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1838 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1839 1840 hugetlb_set_folio_subpool(folio, NULL); 1841 if (folio_test_anon(folio)) 1842 __ClearPageAnonExclusive(&folio->page); 1843 folio->mapping = NULL; 1844 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1845 folio_clear_hugetlb_restore_reserve(folio); 1846 1847 /* 1848 * If HPageRestoreReserve was set on page, page allocation consumed a 1849 * reservation. If the page was associated with a subpool, there 1850 * would have been a page reserved in the subpool before allocation 1851 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1852 * reservation, do not call hugepage_subpool_put_pages() as this will 1853 * remove the reserved page from the subpool. 1854 */ 1855 if (!restore_reserve) { 1856 /* 1857 * A return code of zero implies that the subpool will be 1858 * under its minimum size if the reservation is not restored 1859 * after page is free. Therefore, force restore_reserve 1860 * operation. 1861 */ 1862 if (hugepage_subpool_put_pages(spool, 1) == 0) 1863 restore_reserve = true; 1864 } 1865 1866 spin_lock_irqsave(&hugetlb_lock, flags); 1867 folio_clear_hugetlb_migratable(folio); 1868 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1869 pages_per_huge_page(h), folio); 1870 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1871 pages_per_huge_page(h), folio); 1872 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); 1873 mem_cgroup_uncharge(folio); 1874 if (restore_reserve) 1875 h->resv_huge_pages++; 1876 1877 if (folio_test_hugetlb_temporary(folio)) { 1878 remove_hugetlb_folio(h, folio, false); 1879 spin_unlock_irqrestore(&hugetlb_lock, flags); 1880 update_and_free_hugetlb_folio(h, folio, true); 1881 } else if (h->surplus_huge_pages_node[nid]) { 1882 /* remove the page from active list */ 1883 remove_hugetlb_folio(h, folio, true); 1884 spin_unlock_irqrestore(&hugetlb_lock, flags); 1885 update_and_free_hugetlb_folio(h, folio, true); 1886 } else { 1887 arch_clear_hugetlb_flags(folio); 1888 enqueue_hugetlb_folio(h, folio); 1889 spin_unlock_irqrestore(&hugetlb_lock, flags); 1890 } 1891 } 1892 1893 /* 1894 * Must be called with the hugetlb lock held 1895 */ 1896 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1897 { 1898 lockdep_assert_held(&hugetlb_lock); 1899 h->nr_huge_pages++; 1900 h->nr_huge_pages_node[nid]++; 1901 } 1902 1903 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1904 { 1905 __folio_set_hugetlb(folio); 1906 INIT_LIST_HEAD(&folio->lru); 1907 hugetlb_set_folio_subpool(folio, NULL); 1908 set_hugetlb_cgroup(folio, NULL); 1909 set_hugetlb_cgroup_rsvd(folio, NULL); 1910 } 1911 1912 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1913 { 1914 init_new_hugetlb_folio(h, folio); 1915 hugetlb_vmemmap_optimize_folio(h, folio); 1916 } 1917 1918 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1919 { 1920 __prep_new_hugetlb_folio(h, folio); 1921 spin_lock_irq(&hugetlb_lock); 1922 __prep_account_new_huge_page(h, nid); 1923 spin_unlock_irq(&hugetlb_lock); 1924 } 1925 1926 /* 1927 * Find and lock address space (mapping) in write mode. 1928 * 1929 * Upon entry, the folio is locked which means that folio_mapping() is 1930 * stable. Due to locking order, we can only trylock_write. If we can 1931 * not get the lock, simply return NULL to caller. 1932 */ 1933 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) 1934 { 1935 struct address_space *mapping = folio_mapping(folio); 1936 1937 if (!mapping) 1938 return mapping; 1939 1940 if (i_mmap_trylock_write(mapping)) 1941 return mapping; 1942 1943 return NULL; 1944 } 1945 1946 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 1947 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1948 nodemask_t *node_alloc_noretry) 1949 { 1950 int order = huge_page_order(h); 1951 struct folio *folio; 1952 bool alloc_try_hard = true; 1953 bool retry = true; 1954 1955 /* 1956 * By default we always try hard to allocate the folio with 1957 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in 1958 * a loop (to adjust global huge page counts) and previous allocation 1959 * failed, do not continue to try hard on the same node. Use the 1960 * node_alloc_noretry bitmap to manage this state information. 1961 */ 1962 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1963 alloc_try_hard = false; 1964 if (alloc_try_hard) 1965 gfp_mask |= __GFP_RETRY_MAYFAIL; 1966 if (nid == NUMA_NO_NODE) 1967 nid = numa_mem_id(); 1968 retry: 1969 folio = __folio_alloc(gfp_mask, order, nid, nmask); 1970 /* Ensure hugetlb folio won't have large_rmappable flag set. */ 1971 if (folio) 1972 folio_clear_large_rmappable(folio); 1973 1974 if (folio && !folio_ref_freeze(folio, 1)) { 1975 folio_put(folio); 1976 if (retry) { /* retry once */ 1977 retry = false; 1978 goto retry; 1979 } 1980 /* WOW! twice in a row. */ 1981 pr_warn("HugeTLB unexpected inflated folio ref count\n"); 1982 folio = NULL; 1983 } 1984 1985 /* 1986 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a 1987 * folio this indicates an overall state change. Clear bit so 1988 * that we resume normal 'try hard' allocations. 1989 */ 1990 if (node_alloc_noretry && folio && !alloc_try_hard) 1991 node_clear(nid, *node_alloc_noretry); 1992 1993 /* 1994 * If we tried hard to get a folio but failed, set bit so that 1995 * subsequent attempts will not try as hard until there is an 1996 * overall state change. 1997 */ 1998 if (node_alloc_noretry && !folio && alloc_try_hard) 1999 node_set(nid, *node_alloc_noretry); 2000 2001 if (!folio) { 2002 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2003 return NULL; 2004 } 2005 2006 __count_vm_event(HTLB_BUDDY_PGALLOC); 2007 return folio; 2008 } 2009 2010 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, 2011 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2012 nodemask_t *node_alloc_noretry) 2013 { 2014 struct folio *folio; 2015 2016 if (hstate_is_gigantic(h)) 2017 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2018 else 2019 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 2020 if (folio) 2021 init_new_hugetlb_folio(h, folio); 2022 return folio; 2023 } 2024 2025 /* 2026 * Common helper to allocate a fresh hugetlb page. All specific allocators 2027 * should use this function to get new hugetlb pages 2028 * 2029 * Note that returned page is 'frozen': ref count of head page and all tail 2030 * pages is zero. 2031 */ 2032 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2033 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2034 { 2035 struct folio *folio; 2036 2037 if (hstate_is_gigantic(h)) 2038 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2039 else 2040 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2041 if (!folio) 2042 return NULL; 2043 2044 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2045 return folio; 2046 } 2047 2048 static void prep_and_add_allocated_folios(struct hstate *h, 2049 struct list_head *folio_list) 2050 { 2051 unsigned long flags; 2052 struct folio *folio, *tmp_f; 2053 2054 /* Send list for bulk vmemmap optimization processing */ 2055 hugetlb_vmemmap_optimize_folios(h, folio_list); 2056 2057 /* Add all new pool pages to free lists in one lock cycle */ 2058 spin_lock_irqsave(&hugetlb_lock, flags); 2059 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 2060 __prep_account_new_huge_page(h, folio_nid(folio)); 2061 enqueue_hugetlb_folio(h, folio); 2062 } 2063 spin_unlock_irqrestore(&hugetlb_lock, flags); 2064 } 2065 2066 /* 2067 * Allocates a fresh hugetlb page in a node interleaved manner. The page 2068 * will later be added to the appropriate hugetlb pool. 2069 */ 2070 static struct folio *alloc_pool_huge_folio(struct hstate *h, 2071 nodemask_t *nodes_allowed, 2072 nodemask_t *node_alloc_noretry, 2073 int *next_node) 2074 { 2075 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2076 int nr_nodes, node; 2077 2078 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) { 2079 struct folio *folio; 2080 2081 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2082 nodes_allowed, node_alloc_noretry); 2083 if (folio) 2084 return folio; 2085 } 2086 2087 return NULL; 2088 } 2089 2090 /* 2091 * Remove huge page from pool from next node to free. Attempt to keep 2092 * persistent huge pages more or less balanced over allowed nodes. 2093 * This routine only 'removes' the hugetlb page. The caller must make 2094 * an additional call to free the page to low level allocators. 2095 * Called with hugetlb_lock locked. 2096 */ 2097 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, 2098 nodemask_t *nodes_allowed, bool acct_surplus) 2099 { 2100 int nr_nodes, node; 2101 struct folio *folio = NULL; 2102 2103 lockdep_assert_held(&hugetlb_lock); 2104 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2105 /* 2106 * If we're returning unused surplus pages, only examine 2107 * nodes with surplus pages. 2108 */ 2109 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2110 !list_empty(&h->hugepage_freelists[node])) { 2111 folio = list_entry(h->hugepage_freelists[node].next, 2112 struct folio, lru); 2113 remove_hugetlb_folio(h, folio, acct_surplus); 2114 break; 2115 } 2116 } 2117 2118 return folio; 2119 } 2120 2121 /* 2122 * Dissolve a given free hugetlb folio into free buddy pages. This function 2123 * does nothing for in-use hugetlb folios and non-hugetlb folios. 2124 * This function returns values like below: 2125 * 2126 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2127 * when the system is under memory pressure and the feature of 2128 * freeing unused vmemmap pages associated with each hugetlb page 2129 * is enabled. 2130 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2131 * (allocated or reserved.) 2132 * 0: successfully dissolved free hugepages or the page is not a 2133 * hugepage (considered as already dissolved) 2134 */ 2135 int dissolve_free_hugetlb_folio(struct folio *folio) 2136 { 2137 int rc = -EBUSY; 2138 2139 retry: 2140 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2141 if (!folio_test_hugetlb(folio)) 2142 return 0; 2143 2144 spin_lock_irq(&hugetlb_lock); 2145 if (!folio_test_hugetlb(folio)) { 2146 rc = 0; 2147 goto out; 2148 } 2149 2150 if (!folio_ref_count(folio)) { 2151 struct hstate *h = folio_hstate(folio); 2152 bool adjust_surplus = false; 2153 2154 if (!available_huge_pages(h)) 2155 goto out; 2156 2157 /* 2158 * We should make sure that the page is already on the free list 2159 * when it is dissolved. 2160 */ 2161 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2162 spin_unlock_irq(&hugetlb_lock); 2163 cond_resched(); 2164 2165 /* 2166 * Theoretically, we should return -EBUSY when we 2167 * encounter this race. In fact, we have a chance 2168 * to successfully dissolve the page if we do a 2169 * retry. Because the race window is quite small. 2170 * If we seize this opportunity, it is an optimization 2171 * for increasing the success rate of dissolving page. 2172 */ 2173 goto retry; 2174 } 2175 2176 if (h->surplus_huge_pages_node[folio_nid(folio)]) 2177 adjust_surplus = true; 2178 remove_hugetlb_folio(h, folio, adjust_surplus); 2179 h->max_huge_pages--; 2180 spin_unlock_irq(&hugetlb_lock); 2181 2182 /* 2183 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2184 * before freeing the page. update_and_free_hugtlb_folio will fail to 2185 * free the page if it can not allocate required vmemmap. We 2186 * need to adjust max_huge_pages if the page is not freed. 2187 * Attempt to allocate vmemmmap here so that we can take 2188 * appropriate action on failure. 2189 * 2190 * The folio_test_hugetlb check here is because 2191 * remove_hugetlb_folio will clear hugetlb folio flag for 2192 * non-vmemmap optimized hugetlb folios. 2193 */ 2194 if (folio_test_hugetlb(folio)) { 2195 rc = hugetlb_vmemmap_restore_folio(h, folio); 2196 if (rc) { 2197 spin_lock_irq(&hugetlb_lock); 2198 add_hugetlb_folio(h, folio, adjust_surplus); 2199 h->max_huge_pages++; 2200 goto out; 2201 } 2202 } else 2203 rc = 0; 2204 2205 update_and_free_hugetlb_folio(h, folio, false); 2206 return rc; 2207 } 2208 out: 2209 spin_unlock_irq(&hugetlb_lock); 2210 return rc; 2211 } 2212 2213 /* 2214 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2215 * make specified memory blocks removable from the system. 2216 * Note that this will dissolve a free gigantic hugepage completely, if any 2217 * part of it lies within the given range. 2218 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all 2219 * free hugetlb folios that were dissolved before that error are lost. 2220 */ 2221 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) 2222 { 2223 unsigned long pfn; 2224 struct folio *folio; 2225 int rc = 0; 2226 unsigned int order; 2227 struct hstate *h; 2228 2229 if (!hugepages_supported()) 2230 return rc; 2231 2232 order = huge_page_order(&default_hstate); 2233 for_each_hstate(h) 2234 order = min(order, huge_page_order(h)); 2235 2236 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2237 folio = pfn_folio(pfn); 2238 rc = dissolve_free_hugetlb_folio(folio); 2239 if (rc) 2240 break; 2241 } 2242 2243 return rc; 2244 } 2245 2246 /* 2247 * Allocates a fresh surplus page from the page allocator. 2248 */ 2249 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2250 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2251 { 2252 struct folio *folio = NULL; 2253 2254 if (hstate_is_gigantic(h)) 2255 return NULL; 2256 2257 spin_lock_irq(&hugetlb_lock); 2258 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2259 goto out_unlock; 2260 spin_unlock_irq(&hugetlb_lock); 2261 2262 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2263 if (!folio) 2264 return NULL; 2265 2266 hugetlb_vmemmap_optimize_folio(h, folio); 2267 2268 spin_lock_irq(&hugetlb_lock); 2269 /* 2270 * nr_huge_pages needs to be adjusted within the same lock cycle 2271 * as surplus_pages, otherwise it might confuse 2272 * persistent_huge_pages() momentarily. 2273 */ 2274 __prep_account_new_huge_page(h, folio_nid(folio)); 2275 2276 /* 2277 * We could have raced with the pool size change. 2278 * Double check that and simply deallocate the new page 2279 * if we would end up overcommiting the surpluses. Abuse 2280 * temporary page to workaround the nasty free_huge_folio 2281 * codeflow 2282 */ 2283 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2284 folio_set_hugetlb_temporary(folio); 2285 spin_unlock_irq(&hugetlb_lock); 2286 free_huge_folio(folio); 2287 return NULL; 2288 } 2289 2290 h->surplus_huge_pages++; 2291 h->surplus_huge_pages_node[folio_nid(folio)]++; 2292 2293 out_unlock: 2294 spin_unlock_irq(&hugetlb_lock); 2295 2296 return folio; 2297 } 2298 2299 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2300 int nid, nodemask_t *nmask) 2301 { 2302 struct folio *folio; 2303 2304 if (hstate_is_gigantic(h)) 2305 return NULL; 2306 2307 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); 2308 if (!folio) 2309 return NULL; 2310 2311 /* fresh huge pages are frozen */ 2312 folio_ref_unfreeze(folio, 1); 2313 /* 2314 * We do not account these pages as surplus because they are only 2315 * temporary and will be released properly on the last reference 2316 */ 2317 folio_set_hugetlb_temporary(folio); 2318 2319 return folio; 2320 } 2321 2322 /* 2323 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2324 */ 2325 static 2326 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2327 struct vm_area_struct *vma, unsigned long addr) 2328 { 2329 struct folio *folio = NULL; 2330 struct mempolicy *mpol; 2331 gfp_t gfp_mask = htlb_alloc_mask(h); 2332 int nid; 2333 nodemask_t *nodemask; 2334 2335 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2336 if (mpol_is_preferred_many(mpol)) { 2337 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2338 2339 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2340 2341 /* Fallback to all nodes if page==NULL */ 2342 nodemask = NULL; 2343 } 2344 2345 if (!folio) 2346 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2347 mpol_cond_put(mpol); 2348 return folio; 2349 } 2350 2351 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 2352 nodemask_t *nmask, gfp_t gfp_mask) 2353 { 2354 struct folio *folio; 2355 2356 spin_lock_irq(&hugetlb_lock); 2357 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, 2358 nmask); 2359 if (folio) { 2360 VM_BUG_ON(!h->resv_huge_pages); 2361 h->resv_huge_pages--; 2362 } 2363 2364 spin_unlock_irq(&hugetlb_lock); 2365 return folio; 2366 } 2367 2368 /* folio migration callback function */ 2369 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2370 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) 2371 { 2372 spin_lock_irq(&hugetlb_lock); 2373 if (available_huge_pages(h)) { 2374 struct folio *folio; 2375 2376 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2377 preferred_nid, nmask); 2378 if (folio) { 2379 spin_unlock_irq(&hugetlb_lock); 2380 return folio; 2381 } 2382 } 2383 spin_unlock_irq(&hugetlb_lock); 2384 2385 /* We cannot fallback to other nodes, as we could break the per-node pool. */ 2386 if (!allow_alloc_fallback) 2387 gfp_mask |= __GFP_THISNODE; 2388 2389 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2390 } 2391 2392 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 2393 { 2394 #ifdef CONFIG_NUMA 2395 struct mempolicy *mpol = get_task_policy(current); 2396 2397 /* 2398 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 2399 * (from policy_nodemask) specifically for hugetlb case 2400 */ 2401 if (mpol->mode == MPOL_BIND && 2402 (apply_policy_zone(mpol, gfp_zone(gfp)) && 2403 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 2404 return &mpol->nodes; 2405 #endif 2406 return NULL; 2407 } 2408 2409 /* 2410 * Increase the hugetlb pool such that it can accommodate a reservation 2411 * of size 'delta'. 2412 */ 2413 static int gather_surplus_pages(struct hstate *h, long delta) 2414 __must_hold(&hugetlb_lock) 2415 { 2416 LIST_HEAD(surplus_list); 2417 struct folio *folio, *tmp; 2418 int ret; 2419 long i; 2420 long needed, allocated; 2421 bool alloc_ok = true; 2422 int node; 2423 nodemask_t *mbind_nodemask, alloc_nodemask; 2424 2425 mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); 2426 if (mbind_nodemask) 2427 nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); 2428 else 2429 alloc_nodemask = cpuset_current_mems_allowed; 2430 2431 lockdep_assert_held(&hugetlb_lock); 2432 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2433 if (needed <= 0) { 2434 h->resv_huge_pages += delta; 2435 return 0; 2436 } 2437 2438 allocated = 0; 2439 2440 ret = -ENOMEM; 2441 retry: 2442 spin_unlock_irq(&hugetlb_lock); 2443 for (i = 0; i < needed; i++) { 2444 folio = NULL; 2445 2446 /* Prioritize current node */ 2447 if (node_isset(numa_mem_id(), alloc_nodemask)) 2448 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2449 numa_mem_id(), NULL); 2450 2451 if (!folio) { 2452 for_each_node_mask(node, alloc_nodemask) { 2453 if (node == numa_mem_id()) 2454 continue; 2455 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2456 node, NULL); 2457 if (folio) 2458 break; 2459 } 2460 } 2461 if (!folio) { 2462 alloc_ok = false; 2463 break; 2464 } 2465 list_add(&folio->lru, &surplus_list); 2466 cond_resched(); 2467 } 2468 allocated += i; 2469 2470 /* 2471 * After retaking hugetlb_lock, we need to recalculate 'needed' 2472 * because either resv_huge_pages or free_huge_pages may have changed. 2473 */ 2474 spin_lock_irq(&hugetlb_lock); 2475 needed = (h->resv_huge_pages + delta) - 2476 (h->free_huge_pages + allocated); 2477 if (needed > 0) { 2478 if (alloc_ok) 2479 goto retry; 2480 /* 2481 * We were not able to allocate enough pages to 2482 * satisfy the entire reservation so we free what 2483 * we've allocated so far. 2484 */ 2485 goto free; 2486 } 2487 /* 2488 * The surplus_list now contains _at_least_ the number of extra pages 2489 * needed to accommodate the reservation. Add the appropriate number 2490 * of pages to the hugetlb pool and free the extras back to the buddy 2491 * allocator. Commit the entire reservation here to prevent another 2492 * process from stealing the pages as they are added to the pool but 2493 * before they are reserved. 2494 */ 2495 needed += allocated; 2496 h->resv_huge_pages += delta; 2497 ret = 0; 2498 2499 /* Free the needed pages to the hugetlb pool */ 2500 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2501 if ((--needed) < 0) 2502 break; 2503 /* Add the page to the hugetlb allocator */ 2504 enqueue_hugetlb_folio(h, folio); 2505 } 2506 free: 2507 spin_unlock_irq(&hugetlb_lock); 2508 2509 /* 2510 * Free unnecessary surplus pages to the buddy allocator. 2511 * Pages have no ref count, call free_huge_folio directly. 2512 */ 2513 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2514 free_huge_folio(folio); 2515 spin_lock_irq(&hugetlb_lock); 2516 2517 return ret; 2518 } 2519 2520 /* 2521 * This routine has two main purposes: 2522 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2523 * in unused_resv_pages. This corresponds to the prior adjustments made 2524 * to the associated reservation map. 2525 * 2) Free any unused surplus pages that may have been allocated to satisfy 2526 * the reservation. As many as unused_resv_pages may be freed. 2527 */ 2528 static void return_unused_surplus_pages(struct hstate *h, 2529 unsigned long unused_resv_pages) 2530 { 2531 unsigned long nr_pages; 2532 LIST_HEAD(page_list); 2533 2534 lockdep_assert_held(&hugetlb_lock); 2535 /* Uncommit the reservation */ 2536 h->resv_huge_pages -= unused_resv_pages; 2537 2538 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2539 goto out; 2540 2541 /* 2542 * Part (or even all) of the reservation could have been backed 2543 * by pre-allocated pages. Only free surplus pages. 2544 */ 2545 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2546 2547 /* 2548 * We want to release as many surplus pages as possible, spread 2549 * evenly across all nodes with memory. Iterate across these nodes 2550 * until we can no longer free unreserved surplus pages. This occurs 2551 * when the nodes with surplus pages have no free pages. 2552 * remove_pool_hugetlb_folio() will balance the freed pages across the 2553 * on-line nodes with memory and will handle the hstate accounting. 2554 */ 2555 while (nr_pages--) { 2556 struct folio *folio; 2557 2558 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 2559 if (!folio) 2560 goto out; 2561 2562 list_add(&folio->lru, &page_list); 2563 } 2564 2565 out: 2566 spin_unlock_irq(&hugetlb_lock); 2567 update_and_free_pages_bulk(h, &page_list); 2568 spin_lock_irq(&hugetlb_lock); 2569 } 2570 2571 2572 /* 2573 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2574 * are used by the huge page allocation routines to manage reservations. 2575 * 2576 * vma_needs_reservation is called to determine if the huge page at addr 2577 * within the vma has an associated reservation. If a reservation is 2578 * needed, the value 1 is returned. The caller is then responsible for 2579 * managing the global reservation and subpool usage counts. After 2580 * the huge page has been allocated, vma_commit_reservation is called 2581 * to add the page to the reservation map. If the page allocation fails, 2582 * the reservation must be ended instead of committed. vma_end_reservation 2583 * is called in such cases. 2584 * 2585 * In the normal case, vma_commit_reservation returns the same value 2586 * as the preceding vma_needs_reservation call. The only time this 2587 * is not the case is if a reserve map was changed between calls. It 2588 * is the responsibility of the caller to notice the difference and 2589 * take appropriate action. 2590 * 2591 * vma_add_reservation is used in error paths where a reservation must 2592 * be restored when a newly allocated huge page must be freed. It is 2593 * to be called after calling vma_needs_reservation to determine if a 2594 * reservation exists. 2595 * 2596 * vma_del_reservation is used in error paths where an entry in the reserve 2597 * map was created during huge page allocation and must be removed. It is to 2598 * be called after calling vma_needs_reservation to determine if a reservation 2599 * exists. 2600 */ 2601 enum vma_resv_mode { 2602 VMA_NEEDS_RESV, 2603 VMA_COMMIT_RESV, 2604 VMA_END_RESV, 2605 VMA_ADD_RESV, 2606 VMA_DEL_RESV, 2607 }; 2608 static long __vma_reservation_common(struct hstate *h, 2609 struct vm_area_struct *vma, unsigned long addr, 2610 enum vma_resv_mode mode) 2611 { 2612 struct resv_map *resv; 2613 pgoff_t idx; 2614 long ret; 2615 long dummy_out_regions_needed; 2616 2617 resv = vma_resv_map(vma); 2618 if (!resv) 2619 return 1; 2620 2621 idx = vma_hugecache_offset(h, vma, addr); 2622 switch (mode) { 2623 case VMA_NEEDS_RESV: 2624 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2625 /* We assume that vma_reservation_* routines always operate on 2626 * 1 page, and that adding to resv map a 1 page entry can only 2627 * ever require 1 region. 2628 */ 2629 VM_BUG_ON(dummy_out_regions_needed != 1); 2630 break; 2631 case VMA_COMMIT_RESV: 2632 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2633 /* region_add calls of range 1 should never fail. */ 2634 VM_BUG_ON(ret < 0); 2635 break; 2636 case VMA_END_RESV: 2637 region_abort(resv, idx, idx + 1, 1); 2638 ret = 0; 2639 break; 2640 case VMA_ADD_RESV: 2641 if (vma->vm_flags & VM_MAYSHARE) { 2642 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2643 /* region_add calls of range 1 should never fail. */ 2644 VM_BUG_ON(ret < 0); 2645 } else { 2646 region_abort(resv, idx, idx + 1, 1); 2647 ret = region_del(resv, idx, idx + 1); 2648 } 2649 break; 2650 case VMA_DEL_RESV: 2651 if (vma->vm_flags & VM_MAYSHARE) { 2652 region_abort(resv, idx, idx + 1, 1); 2653 ret = region_del(resv, idx, idx + 1); 2654 } else { 2655 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2656 /* region_add calls of range 1 should never fail. */ 2657 VM_BUG_ON(ret < 0); 2658 } 2659 break; 2660 default: 2661 BUG(); 2662 } 2663 2664 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2665 return ret; 2666 /* 2667 * We know private mapping must have HPAGE_RESV_OWNER set. 2668 * 2669 * In most cases, reserves always exist for private mappings. 2670 * However, a file associated with mapping could have been 2671 * hole punched or truncated after reserves were consumed. 2672 * As subsequent fault on such a range will not use reserves. 2673 * Subtle - The reserve map for private mappings has the 2674 * opposite meaning than that of shared mappings. If NO 2675 * entry is in the reserve map, it means a reservation exists. 2676 * If an entry exists in the reserve map, it means the 2677 * reservation has already been consumed. As a result, the 2678 * return value of this routine is the opposite of the 2679 * value returned from reserve map manipulation routines above. 2680 */ 2681 if (ret > 0) 2682 return 0; 2683 if (ret == 0) 2684 return 1; 2685 return ret; 2686 } 2687 2688 static long vma_needs_reservation(struct hstate *h, 2689 struct vm_area_struct *vma, unsigned long addr) 2690 { 2691 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2692 } 2693 2694 static long vma_commit_reservation(struct hstate *h, 2695 struct vm_area_struct *vma, unsigned long addr) 2696 { 2697 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2698 } 2699 2700 static void vma_end_reservation(struct hstate *h, 2701 struct vm_area_struct *vma, unsigned long addr) 2702 { 2703 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2704 } 2705 2706 static long vma_add_reservation(struct hstate *h, 2707 struct vm_area_struct *vma, unsigned long addr) 2708 { 2709 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2710 } 2711 2712 static long vma_del_reservation(struct hstate *h, 2713 struct vm_area_struct *vma, unsigned long addr) 2714 { 2715 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2716 } 2717 2718 /* 2719 * This routine is called to restore reservation information on error paths. 2720 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2721 * and the hugetlb mutex should remain held when calling this routine. 2722 * 2723 * It handles two specific cases: 2724 * 1) A reservation was in place and the folio consumed the reservation. 2725 * hugetlb_restore_reserve is set in the folio. 2726 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2727 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2728 * 2729 * In case 1, free_huge_folio later in the error path will increment the 2730 * global reserve count. But, free_huge_folio does not have enough context 2731 * to adjust the reservation map. This case deals primarily with private 2732 * mappings. Adjust the reserve map here to be consistent with global 2733 * reserve count adjustments to be made by free_huge_folio. Make sure the 2734 * reserve map indicates there is a reservation present. 2735 * 2736 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2737 */ 2738 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2739 unsigned long address, struct folio *folio) 2740 { 2741 long rc = vma_needs_reservation(h, vma, address); 2742 2743 if (folio_test_hugetlb_restore_reserve(folio)) { 2744 if (unlikely(rc < 0)) 2745 /* 2746 * Rare out of memory condition in reserve map 2747 * manipulation. Clear hugetlb_restore_reserve so 2748 * that global reserve count will not be incremented 2749 * by free_huge_folio. This will make it appear 2750 * as though the reservation for this folio was 2751 * consumed. This may prevent the task from 2752 * faulting in the folio at a later time. This 2753 * is better than inconsistent global huge page 2754 * accounting of reserve counts. 2755 */ 2756 folio_clear_hugetlb_restore_reserve(folio); 2757 else if (rc) 2758 (void)vma_add_reservation(h, vma, address); 2759 else 2760 vma_end_reservation(h, vma, address); 2761 } else { 2762 if (!rc) { 2763 /* 2764 * This indicates there is an entry in the reserve map 2765 * not added by alloc_hugetlb_folio. We know it was added 2766 * before the alloc_hugetlb_folio call, otherwise 2767 * hugetlb_restore_reserve would be set on the folio. 2768 * Remove the entry so that a subsequent allocation 2769 * does not consume a reservation. 2770 */ 2771 rc = vma_del_reservation(h, vma, address); 2772 if (rc < 0) 2773 /* 2774 * VERY rare out of memory condition. Since 2775 * we can not delete the entry, set 2776 * hugetlb_restore_reserve so that the reserve 2777 * count will be incremented when the folio 2778 * is freed. This reserve will be consumed 2779 * on a subsequent allocation. 2780 */ 2781 folio_set_hugetlb_restore_reserve(folio); 2782 } else if (rc < 0) { 2783 /* 2784 * Rare out of memory condition from 2785 * vma_needs_reservation call. Memory allocation is 2786 * only attempted if a new entry is needed. Therefore, 2787 * this implies there is not an entry in the 2788 * reserve map. 2789 * 2790 * For shared mappings, no entry in the map indicates 2791 * no reservation. We are done. 2792 */ 2793 if (!(vma->vm_flags & VM_MAYSHARE)) 2794 /* 2795 * For private mappings, no entry indicates 2796 * a reservation is present. Since we can 2797 * not add an entry, set hugetlb_restore_reserve 2798 * on the folio so reserve count will be 2799 * incremented when freed. This reserve will 2800 * be consumed on a subsequent allocation. 2801 */ 2802 folio_set_hugetlb_restore_reserve(folio); 2803 } else 2804 /* 2805 * No reservation present, do nothing 2806 */ 2807 vma_end_reservation(h, vma, address); 2808 } 2809 } 2810 2811 /* 2812 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2813 * the old one 2814 * @h: struct hstate old page belongs to 2815 * @old_folio: Old folio to dissolve 2816 * @list: List to isolate the page in case we need to 2817 * Returns 0 on success, otherwise negated error. 2818 */ 2819 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2820 struct folio *old_folio, struct list_head *list) 2821 { 2822 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2823 int nid = folio_nid(old_folio); 2824 struct folio *new_folio = NULL; 2825 int ret = 0; 2826 2827 retry: 2828 spin_lock_irq(&hugetlb_lock); 2829 if (!folio_test_hugetlb(old_folio)) { 2830 /* 2831 * Freed from under us. Drop new_folio too. 2832 */ 2833 goto free_new; 2834 } else if (folio_ref_count(old_folio)) { 2835 bool isolated; 2836 2837 /* 2838 * Someone has grabbed the folio, try to isolate it here. 2839 * Fail with -EBUSY if not possible. 2840 */ 2841 spin_unlock_irq(&hugetlb_lock); 2842 isolated = folio_isolate_hugetlb(old_folio, list); 2843 ret = isolated ? 0 : -EBUSY; 2844 spin_lock_irq(&hugetlb_lock); 2845 goto free_new; 2846 } else if (!folio_test_hugetlb_freed(old_folio)) { 2847 /* 2848 * Folio's refcount is 0 but it has not been enqueued in the 2849 * freelist yet. Race window is small, so we can succeed here if 2850 * we retry. 2851 */ 2852 spin_unlock_irq(&hugetlb_lock); 2853 cond_resched(); 2854 goto retry; 2855 } else { 2856 if (!new_folio) { 2857 spin_unlock_irq(&hugetlb_lock); 2858 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, 2859 NULL, NULL); 2860 if (!new_folio) 2861 return -ENOMEM; 2862 __prep_new_hugetlb_folio(h, new_folio); 2863 goto retry; 2864 } 2865 2866 /* 2867 * Ok, old_folio is still a genuine free hugepage. Remove it from 2868 * the freelist and decrease the counters. These will be 2869 * incremented again when calling __prep_account_new_huge_page() 2870 * and enqueue_hugetlb_folio() for new_folio. The counters will 2871 * remain stable since this happens under the lock. 2872 */ 2873 remove_hugetlb_folio(h, old_folio, false); 2874 2875 /* 2876 * Ref count on new_folio is already zero as it was dropped 2877 * earlier. It can be directly added to the pool free list. 2878 */ 2879 __prep_account_new_huge_page(h, nid); 2880 enqueue_hugetlb_folio(h, new_folio); 2881 2882 /* 2883 * Folio has been replaced, we can safely free the old one. 2884 */ 2885 spin_unlock_irq(&hugetlb_lock); 2886 update_and_free_hugetlb_folio(h, old_folio, false); 2887 } 2888 2889 return ret; 2890 2891 free_new: 2892 spin_unlock_irq(&hugetlb_lock); 2893 if (new_folio) 2894 update_and_free_hugetlb_folio(h, new_folio, false); 2895 2896 return ret; 2897 } 2898 2899 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) 2900 { 2901 struct hstate *h; 2902 int ret = -EBUSY; 2903 2904 /* 2905 * The page might have been dissolved from under our feet, so make sure 2906 * to carefully check the state under the lock. 2907 * Return success when racing as if we dissolved the page ourselves. 2908 */ 2909 spin_lock_irq(&hugetlb_lock); 2910 if (folio_test_hugetlb(folio)) { 2911 h = folio_hstate(folio); 2912 } else { 2913 spin_unlock_irq(&hugetlb_lock); 2914 return 0; 2915 } 2916 spin_unlock_irq(&hugetlb_lock); 2917 2918 /* 2919 * Fence off gigantic pages as there is a cyclic dependency between 2920 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2921 * of bailing out right away without further retrying. 2922 */ 2923 if (hstate_is_gigantic(h)) 2924 return -ENOMEM; 2925 2926 if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) 2927 ret = 0; 2928 else if (!folio_ref_count(folio)) 2929 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 2930 2931 return ret; 2932 } 2933 2934 /* 2935 * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn 2936 * range with new folios. 2937 * @start_pfn: start pfn of the given pfn range 2938 * @end_pfn: end pfn of the given pfn range 2939 * Returns 0 on success, otherwise negated error. 2940 */ 2941 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) 2942 { 2943 struct hstate *h; 2944 struct folio *folio; 2945 int ret = 0; 2946 2947 LIST_HEAD(isolate_list); 2948 2949 while (start_pfn < end_pfn) { 2950 folio = pfn_folio(start_pfn); 2951 if (folio_test_hugetlb(folio)) { 2952 h = folio_hstate(folio); 2953 } else { 2954 start_pfn++; 2955 continue; 2956 } 2957 2958 if (!folio_ref_count(folio)) { 2959 ret = alloc_and_dissolve_hugetlb_folio(h, folio, 2960 &isolate_list); 2961 if (ret) 2962 break; 2963 2964 putback_movable_pages(&isolate_list); 2965 } 2966 start_pfn++; 2967 } 2968 2969 return ret; 2970 } 2971 2972 void wait_for_freed_hugetlb_folios(void) 2973 { 2974 if (llist_empty(&hpage_freelist)) 2975 return; 2976 2977 flush_work(&free_hpage_work); 2978 } 2979 2980 typedef enum { 2981 /* 2982 * For either 0/1: we checked the per-vma resv map, and one resv 2983 * count either can be reused (0), or an extra needed (1). 2984 */ 2985 MAP_CHG_REUSE = 0, 2986 MAP_CHG_NEEDED = 1, 2987 /* 2988 * Cannot use per-vma resv count can be used, hence a new resv 2989 * count is enforced. 2990 * 2991 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except 2992 * that currently vma_needs_reservation() has an unwanted side 2993 * effect to either use end() or commit() to complete the 2994 * transaction. Hence it needs to differenciate from NEEDED. 2995 */ 2996 MAP_CHG_ENFORCED = 2, 2997 } map_chg_state; 2998 2999 /* 3000 * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW 3001 * faults of hugetlb private mappings on top of a non-page-cache folio (in 3002 * which case even if there's a private vma resv map it won't cover such 3003 * allocation). New call sites should (probably) never set it to true!! 3004 * When it's set, the allocation will bypass all vma level reservations. 3005 */ 3006 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 3007 unsigned long addr, bool cow_from_owner) 3008 { 3009 struct hugepage_subpool *spool = subpool_vma(vma); 3010 struct hstate *h = hstate_vma(vma); 3011 struct folio *folio; 3012 long retval, gbl_chg, gbl_reserve; 3013 map_chg_state map_chg; 3014 int ret, idx; 3015 struct hugetlb_cgroup *h_cg = NULL; 3016 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; 3017 3018 idx = hstate_index(h); 3019 3020 /* Whether we need a separate per-vma reservation? */ 3021 if (cow_from_owner) { 3022 /* 3023 * Special case! Since it's a CoW on top of a reserved 3024 * page, the private resv map doesn't count. So it cannot 3025 * consume the per-vma resv map even if it's reserved. 3026 */ 3027 map_chg = MAP_CHG_ENFORCED; 3028 } else { 3029 /* 3030 * Examine the region/reserve map to determine if the process 3031 * has a reservation for the page to be allocated. A return 3032 * code of zero indicates a reservation exists (no change). 3033 */ 3034 retval = vma_needs_reservation(h, vma, addr); 3035 if (retval < 0) 3036 return ERR_PTR(-ENOMEM); 3037 map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; 3038 } 3039 3040 /* 3041 * Whether we need a separate global reservation? 3042 * 3043 * Processes that did not create the mapping will have no 3044 * reserves as indicated by the region/reserve map. Check 3045 * that the allocation will not exceed the subpool limit. 3046 * Or if it can get one from the pool reservation directly. 3047 */ 3048 if (map_chg) { 3049 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3050 if (gbl_chg < 0) 3051 goto out_end_reservation; 3052 } else { 3053 /* 3054 * If we have the vma reservation ready, no need for extra 3055 * global reservation. 3056 */ 3057 gbl_chg = 0; 3058 } 3059 3060 /* 3061 * If this allocation is not consuming a per-vma reservation, 3062 * charge the hugetlb cgroup now. 3063 */ 3064 if (map_chg) { 3065 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3066 idx, pages_per_huge_page(h), &h_cg); 3067 if (ret) 3068 goto out_subpool_put; 3069 } 3070 3071 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3072 if (ret) 3073 goto out_uncharge_cgroup_reservation; 3074 3075 spin_lock_irq(&hugetlb_lock); 3076 /* 3077 * glb_chg is passed to indicate whether or not a page must be taken 3078 * from the global free pool (global change). gbl_chg == 0 indicates 3079 * a reservation exists for the allocation. 3080 */ 3081 folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); 3082 if (!folio) { 3083 spin_unlock_irq(&hugetlb_lock); 3084 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3085 if (!folio) 3086 goto out_uncharge_cgroup; 3087 spin_lock_irq(&hugetlb_lock); 3088 list_add(&folio->lru, &h->hugepage_activelist); 3089 folio_ref_unfreeze(folio, 1); 3090 /* Fall through */ 3091 } 3092 3093 /* 3094 * Either dequeued or buddy-allocated folio needs to add special 3095 * mark to the folio when it consumes a global reservation. 3096 */ 3097 if (!gbl_chg) { 3098 folio_set_hugetlb_restore_reserve(folio); 3099 h->resv_huge_pages--; 3100 } 3101 3102 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3103 /* If allocation is not consuming a reservation, also store the 3104 * hugetlb_cgroup pointer on the page. 3105 */ 3106 if (map_chg) { 3107 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3108 h_cg, folio); 3109 } 3110 3111 spin_unlock_irq(&hugetlb_lock); 3112 3113 hugetlb_set_folio_subpool(folio, spool); 3114 3115 if (map_chg != MAP_CHG_ENFORCED) { 3116 /* commit() is only needed if the map_chg is not enforced */ 3117 retval = vma_commit_reservation(h, vma, addr); 3118 /* 3119 * Check for possible race conditions. When it happens.. 3120 * The page was added to the reservation map between 3121 * vma_needs_reservation and vma_commit_reservation. 3122 * This indicates a race with hugetlb_reserve_pages. 3123 * Adjust for the subpool count incremented above AND 3124 * in hugetlb_reserve_pages for the same page. Also, 3125 * the reservation count added in hugetlb_reserve_pages 3126 * no longer applies. 3127 */ 3128 if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { 3129 long rsv_adjust; 3130 3131 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3132 hugetlb_acct_memory(h, -rsv_adjust); 3133 if (map_chg) { 3134 spin_lock_irq(&hugetlb_lock); 3135 hugetlb_cgroup_uncharge_folio_rsvd( 3136 hstate_index(h), pages_per_huge_page(h), 3137 folio); 3138 spin_unlock_irq(&hugetlb_lock); 3139 } 3140 } 3141 } 3142 3143 ret = mem_cgroup_charge_hugetlb(folio, gfp); 3144 /* 3145 * Unconditionally increment NR_HUGETLB here. If it turns out that 3146 * mem_cgroup_charge_hugetlb failed, then immediately free the page and 3147 * decrement NR_HUGETLB. 3148 */ 3149 lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); 3150 3151 if (ret == -ENOMEM) { 3152 free_huge_folio(folio); 3153 return ERR_PTR(-ENOMEM); 3154 } 3155 3156 return folio; 3157 3158 out_uncharge_cgroup: 3159 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3160 out_uncharge_cgroup_reservation: 3161 if (map_chg) 3162 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3163 h_cg); 3164 out_subpool_put: 3165 /* 3166 * put page to subpool iff the quota of subpool's rsv_hpages is used 3167 * during hugepage_subpool_get_pages. 3168 */ 3169 if (map_chg && !gbl_chg) { 3170 gbl_reserve = hugepage_subpool_put_pages(spool, 1); 3171 hugetlb_acct_memory(h, -gbl_reserve); 3172 } 3173 3174 3175 out_end_reservation: 3176 if (map_chg != MAP_CHG_ENFORCED) 3177 vma_end_reservation(h, vma, addr); 3178 return ERR_PTR(-ENOSPC); 3179 } 3180 3181 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) 3182 { 3183 struct huge_bootmem_page *m; 3184 int listnode = nid; 3185 3186 if (hugetlb_early_cma(h)) 3187 m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact); 3188 else { 3189 if (node_exact) 3190 m = memblock_alloc_exact_nid_raw(huge_page_size(h), 3191 huge_page_size(h), 0, 3192 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3193 else { 3194 m = memblock_alloc_try_nid_raw(huge_page_size(h), 3195 huge_page_size(h), 0, 3196 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3197 /* 3198 * For pre-HVO to work correctly, pages need to be on 3199 * the list for the node they were actually allocated 3200 * from. That node may be different in the case of 3201 * fallback by memblock_alloc_try_nid_raw. So, 3202 * extract the actual node first. 3203 */ 3204 if (m) 3205 listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); 3206 } 3207 3208 if (m) { 3209 m->flags = 0; 3210 m->cma = NULL; 3211 } 3212 } 3213 3214 if (m) { 3215 /* 3216 * Use the beginning of the huge page to store the 3217 * huge_bootmem_page struct (until gather_bootmem 3218 * puts them into the mem_map). 3219 * 3220 * Put them into a private list first because mem_map 3221 * is not up yet. 3222 */ 3223 INIT_LIST_HEAD(&m->list); 3224 list_add(&m->list, &huge_boot_pages[listnode]); 3225 m->hstate = h; 3226 } 3227 3228 return m; 3229 } 3230 3231 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3232 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3233 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3234 { 3235 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3236 int nr_nodes, node = nid; 3237 3238 /* do node specific alloc */ 3239 if (nid != NUMA_NO_NODE) { 3240 m = alloc_bootmem(h, node, true); 3241 if (!m) 3242 return 0; 3243 goto found; 3244 } 3245 3246 /* allocate from next node when distributing huge pages */ 3247 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) { 3248 m = alloc_bootmem(h, node, false); 3249 if (!m) 3250 return 0; 3251 goto found; 3252 } 3253 3254 found: 3255 3256 /* 3257 * Only initialize the head struct page in memmap_init_reserved_pages, 3258 * rest of the struct pages will be initialized by the HugeTLB 3259 * subsystem itself. 3260 * The head struct page is used to get folio information by the HugeTLB 3261 * subsystem like zone id and node id. 3262 */ 3263 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3264 huge_page_size(h) - PAGE_SIZE); 3265 3266 return 1; 3267 } 3268 3269 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */ 3270 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, 3271 unsigned long start_page_number, 3272 unsigned long end_page_number) 3273 { 3274 enum zone_type zone = zone_idx(folio_zone(folio)); 3275 int nid = folio_nid(folio); 3276 unsigned long head_pfn = folio_pfn(folio); 3277 unsigned long pfn, end_pfn = head_pfn + end_page_number; 3278 int ret; 3279 3280 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { 3281 struct page *page = pfn_to_page(pfn); 3282 3283 __init_single_page(page, pfn, zone, nid); 3284 prep_compound_tail((struct page *)folio, pfn - head_pfn); 3285 ret = page_ref_freeze(page, 1); 3286 VM_BUG_ON(!ret); 3287 } 3288 } 3289 3290 static void __init hugetlb_folio_init_vmemmap(struct folio *folio, 3291 struct hstate *h, 3292 unsigned long nr_pages) 3293 { 3294 int ret; 3295 3296 /* Prepare folio head */ 3297 __folio_clear_reserved(folio); 3298 __folio_set_head(folio); 3299 ret = folio_ref_freeze(folio, 1); 3300 VM_BUG_ON(!ret); 3301 /* Initialize the necessary tail struct pages */ 3302 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages); 3303 prep_compound_head((struct page *)folio, huge_page_order(h)); 3304 } 3305 3306 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) 3307 { 3308 return m->flags & HUGE_BOOTMEM_HVO; 3309 } 3310 3311 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m) 3312 { 3313 return m->flags & HUGE_BOOTMEM_CMA; 3314 } 3315 3316 /* 3317 * memblock-allocated pageblocks might not have the migrate type set 3318 * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) 3319 * here, or MIGRATE_CMA if this was a page allocated through an early CMA 3320 * reservation. 3321 * 3322 * In case of vmemmap optimized folios, the tail vmemmap pages are mapped 3323 * read-only, but that's ok - for sparse vmemmap this does not write to 3324 * the page structure. 3325 */ 3326 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, 3327 struct hstate *h) 3328 { 3329 unsigned long nr_pages = pages_per_huge_page(h), i; 3330 3331 WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); 3332 3333 for (i = 0; i < nr_pages; i += pageblock_nr_pages) { 3334 if (folio_test_hugetlb_cma(folio)) 3335 init_cma_pageblock(folio_page(folio, i)); 3336 else 3337 set_pageblock_migratetype(folio_page(folio, i), 3338 MIGRATE_MOVABLE); 3339 } 3340 } 3341 3342 static void __init prep_and_add_bootmem_folios(struct hstate *h, 3343 struct list_head *folio_list) 3344 { 3345 unsigned long flags; 3346 struct folio *folio, *tmp_f; 3347 3348 /* Send list for bulk vmemmap optimization processing */ 3349 hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list); 3350 3351 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { 3352 if (!folio_test_hugetlb_vmemmap_optimized(folio)) { 3353 /* 3354 * If HVO fails, initialize all tail struct pages 3355 * We do not worry about potential long lock hold 3356 * time as this is early in boot and there should 3357 * be no contention. 3358 */ 3359 hugetlb_folio_init_tail_vmemmap(folio, 3360 HUGETLB_VMEMMAP_RESERVE_PAGES, 3361 pages_per_huge_page(h)); 3362 } 3363 hugetlb_bootmem_init_migratetype(folio, h); 3364 /* Subdivide locks to achieve better parallel performance */ 3365 spin_lock_irqsave(&hugetlb_lock, flags); 3366 __prep_account_new_huge_page(h, folio_nid(folio)); 3367 enqueue_hugetlb_folio(h, folio); 3368 spin_unlock_irqrestore(&hugetlb_lock, flags); 3369 } 3370 } 3371 3372 bool __init hugetlb_bootmem_page_zones_valid(int nid, 3373 struct huge_bootmem_page *m) 3374 { 3375 unsigned long start_pfn; 3376 bool valid; 3377 3378 if (m->flags & HUGE_BOOTMEM_ZONES_VALID) { 3379 /* 3380 * Already validated, skip check. 3381 */ 3382 return true; 3383 } 3384 3385 if (hugetlb_bootmem_page_earlycma(m)) { 3386 valid = cma_validate_zones(m->cma); 3387 goto out; 3388 } 3389 3390 start_pfn = virt_to_phys(m) >> PAGE_SHIFT; 3391 3392 valid = !pfn_range_intersects_zones(nid, start_pfn, 3393 pages_per_huge_page(m->hstate)); 3394 out: 3395 if (!valid) 3396 hstate_boot_nrinvalid[hstate_index(m->hstate)]++; 3397 3398 return valid; 3399 } 3400 3401 /* 3402 * Free a bootmem page that was found to be invalid (intersecting with 3403 * multiple zones). 3404 * 3405 * Since it intersects with multiple zones, we can't just do a free 3406 * operation on all pages at once, but instead have to walk all 3407 * pages, freeing them one by one. 3408 */ 3409 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, 3410 struct hstate *h) 3411 { 3412 unsigned long npages = pages_per_huge_page(h); 3413 unsigned long pfn; 3414 3415 while (npages--) { 3416 pfn = page_to_pfn(page); 3417 __init_page_from_nid(pfn, nid); 3418 free_reserved_page(page); 3419 page++; 3420 } 3421 } 3422 3423 /* 3424 * Put bootmem huge pages into the standard lists after mem_map is up. 3425 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. 3426 */ 3427 static void __init gather_bootmem_prealloc_node(unsigned long nid) 3428 { 3429 LIST_HEAD(folio_list); 3430 struct huge_bootmem_page *m, *tm; 3431 struct hstate *h = NULL, *prev_h = NULL; 3432 3433 list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { 3434 struct page *page = virt_to_page(m); 3435 struct folio *folio = (void *)page; 3436 3437 h = m->hstate; 3438 if (!hugetlb_bootmem_page_zones_valid(nid, m)) { 3439 /* 3440 * Can't use this page. Initialize the 3441 * page structures if that hasn't already 3442 * been done, and give them to the page 3443 * allocator. 3444 */ 3445 hugetlb_bootmem_free_invalid_page(nid, page, h); 3446 continue; 3447 } 3448 3449 /* 3450 * It is possible to have multiple huge page sizes (hstates) 3451 * in this list. If so, process each size separately. 3452 */ 3453 if (h != prev_h && prev_h != NULL) 3454 prep_and_add_bootmem_folios(prev_h, &folio_list); 3455 prev_h = h; 3456 3457 VM_BUG_ON(!hstate_is_gigantic(h)); 3458 WARN_ON(folio_ref_count(folio) != 1); 3459 3460 hugetlb_folio_init_vmemmap(folio, h, 3461 HUGETLB_VMEMMAP_RESERVE_PAGES); 3462 init_new_hugetlb_folio(h, folio); 3463 3464 if (hugetlb_bootmem_page_prehvo(m)) 3465 /* 3466 * If pre-HVO was done, just set the 3467 * flag, the HVO code will then skip 3468 * this folio. 3469 */ 3470 folio_set_hugetlb_vmemmap_optimized(folio); 3471 3472 if (hugetlb_bootmem_page_earlycma(m)) 3473 folio_set_hugetlb_cma(folio); 3474 3475 list_add(&folio->lru, &folio_list); 3476 3477 /* 3478 * We need to restore the 'stolen' pages to totalram_pages 3479 * in order to fix confusing memory reports from free(1) and 3480 * other side-effects, like CommitLimit going negative. 3481 * 3482 * For CMA pages, this is done in init_cma_pageblock 3483 * (via hugetlb_bootmem_init_migratetype), so skip it here. 3484 */ 3485 if (!folio_test_hugetlb_cma(folio)) 3486 adjust_managed_page_count(page, pages_per_huge_page(h)); 3487 cond_resched(); 3488 } 3489 3490 prep_and_add_bootmem_folios(h, &folio_list); 3491 } 3492 3493 static void __init gather_bootmem_prealloc_parallel(unsigned long start, 3494 unsigned long end, void *arg) 3495 { 3496 int nid; 3497 3498 for (nid = start; nid < end; nid++) 3499 gather_bootmem_prealloc_node(nid); 3500 } 3501 3502 static void __init gather_bootmem_prealloc(void) 3503 { 3504 struct padata_mt_job job = { 3505 .thread_fn = gather_bootmem_prealloc_parallel, 3506 .fn_arg = NULL, 3507 .start = 0, 3508 .size = nr_node_ids, 3509 .align = 1, 3510 .min_chunk = 1, 3511 .max_threads = num_node_state(N_MEMORY), 3512 .numa_aware = true, 3513 }; 3514 3515 padata_do_multithreaded(&job); 3516 } 3517 3518 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3519 { 3520 unsigned long i; 3521 char buf[32]; 3522 LIST_HEAD(folio_list); 3523 3524 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3525 if (hstate_is_gigantic(h)) { 3526 if (!alloc_bootmem_huge_page(h, nid)) 3527 break; 3528 } else { 3529 struct folio *folio; 3530 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3531 3532 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3533 &node_states[N_MEMORY], NULL); 3534 if (!folio) 3535 break; 3536 list_add(&folio->lru, &folio_list); 3537 } 3538 cond_resched(); 3539 } 3540 3541 if (!list_empty(&folio_list)) 3542 prep_and_add_allocated_folios(h, &folio_list); 3543 3544 if (i == h->max_huge_pages_node[nid]) 3545 return; 3546 3547 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3548 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3549 h->max_huge_pages_node[nid], buf, nid, i); 3550 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3551 h->max_huge_pages_node[nid] = i; 3552 } 3553 3554 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) 3555 { 3556 int i; 3557 bool node_specific_alloc = false; 3558 3559 for_each_online_node(i) { 3560 if (h->max_huge_pages_node[i] > 0) { 3561 hugetlb_hstate_alloc_pages_onenode(h, i); 3562 node_specific_alloc = true; 3563 } 3564 } 3565 3566 return node_specific_alloc; 3567 } 3568 3569 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) 3570 { 3571 if (allocated < h->max_huge_pages) { 3572 char buf[32]; 3573 3574 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3575 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3576 h->max_huge_pages, buf, allocated); 3577 h->max_huge_pages = allocated; 3578 } 3579 } 3580 3581 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) 3582 { 3583 struct hstate *h = (struct hstate *)arg; 3584 int i, num = end - start; 3585 nodemask_t node_alloc_noretry; 3586 LIST_HEAD(folio_list); 3587 int next_node = first_online_node; 3588 3589 /* Bit mask controlling how hard we retry per-node allocations.*/ 3590 nodes_clear(node_alloc_noretry); 3591 3592 for (i = 0; i < num; ++i) { 3593 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], 3594 &node_alloc_noretry, &next_node); 3595 if (!folio) 3596 break; 3597 3598 list_move(&folio->lru, &folio_list); 3599 cond_resched(); 3600 } 3601 3602 prep_and_add_allocated_folios(h, &folio_list); 3603 } 3604 3605 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) 3606 { 3607 unsigned long i; 3608 3609 for (i = 0; i < h->max_huge_pages; ++i) { 3610 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3611 break; 3612 cond_resched(); 3613 } 3614 3615 return i; 3616 } 3617 3618 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) 3619 { 3620 struct padata_mt_job job = { 3621 .fn_arg = h, 3622 .align = 1, 3623 .numa_aware = true 3624 }; 3625 3626 unsigned long jiffies_start; 3627 unsigned long jiffies_end; 3628 3629 job.thread_fn = hugetlb_pages_alloc_boot_node; 3630 job.start = 0; 3631 job.size = h->max_huge_pages; 3632 3633 /* 3634 * job.max_threads is 25% of the available cpu threads by default. 3635 * 3636 * On large servers with terabytes of memory, huge page allocation 3637 * can consume a considerably amount of time. 3638 * 3639 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages. 3640 * 2MiB huge pages. Using more threads can significantly improve allocation time. 3641 * 3642 * +-----------------------+-------+-------+-------+-------+-------+ 3643 * | threads | 8 | 16 | 32 | 64 | 128 | 3644 * +-----------------------+-------+-------+-------+-------+-------+ 3645 * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | 3646 * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | 3647 * +-----------------------+-------+-------+-------+-------+-------+ 3648 */ 3649 if (hugepage_allocation_threads == 0) { 3650 hugepage_allocation_threads = num_online_cpus() / 4; 3651 hugepage_allocation_threads = max(hugepage_allocation_threads, 1); 3652 } 3653 3654 job.max_threads = hugepage_allocation_threads; 3655 job.min_chunk = h->max_huge_pages / hugepage_allocation_threads; 3656 3657 jiffies_start = jiffies; 3658 padata_do_multithreaded(&job); 3659 jiffies_end = jiffies; 3660 3661 pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n", 3662 jiffies_to_msecs(jiffies_end - jiffies_start), 3663 hugepage_allocation_threads); 3664 3665 return h->nr_huge_pages; 3666 } 3667 3668 /* 3669 * NOTE: this routine is called in different contexts for gigantic and 3670 * non-gigantic pages. 3671 * - For gigantic pages, this is called early in the boot process and 3672 * pages are allocated from memblock allocated or something similar. 3673 * Gigantic pages are actually added to pools later with the routine 3674 * gather_bootmem_prealloc. 3675 * - For non-gigantic pages, this is called later in the boot process after 3676 * all of mm is up and functional. Pages are allocated from buddy and 3677 * then added to hugetlb pools. 3678 */ 3679 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3680 { 3681 unsigned long allocated; 3682 3683 /* 3684 * Skip gigantic hugepages allocation if early CMA 3685 * reservations are not available. 3686 */ 3687 if (hstate_is_gigantic(h) && hugetlb_cma_total_size() && 3688 !hugetlb_early_cma(h)) { 3689 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3690 return; 3691 } 3692 3693 /* do node specific alloc */ 3694 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) 3695 return; 3696 3697 /* below will do all node balanced alloc */ 3698 if (hstate_is_gigantic(h)) 3699 allocated = hugetlb_gigantic_pages_alloc_boot(h); 3700 else 3701 allocated = hugetlb_pages_alloc_boot(h); 3702 3703 hugetlb_hstate_alloc_pages_errcheck(allocated, h); 3704 } 3705 3706 static void __init hugetlb_init_hstates(void) 3707 { 3708 struct hstate *h, *h2; 3709 3710 for_each_hstate(h) { 3711 /* oversize hugepages were init'ed in early boot */ 3712 if (!hstate_is_gigantic(h)) 3713 hugetlb_hstate_alloc_pages(h); 3714 3715 /* 3716 * Set demote order for each hstate. Note that 3717 * h->demote_order is initially 0. 3718 * - We can not demote gigantic pages if runtime freeing 3719 * is not supported, so skip this. 3720 * - If CMA allocation is possible, we can not demote 3721 * HUGETLB_PAGE_ORDER or smaller size pages. 3722 */ 3723 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3724 continue; 3725 if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER) 3726 continue; 3727 for_each_hstate(h2) { 3728 if (h2 == h) 3729 continue; 3730 if (h2->order < h->order && 3731 h2->order > h->demote_order) 3732 h->demote_order = h2->order; 3733 } 3734 } 3735 } 3736 3737 static void __init report_hugepages(void) 3738 { 3739 struct hstate *h; 3740 unsigned long nrinvalid; 3741 3742 for_each_hstate(h) { 3743 char buf[32]; 3744 3745 nrinvalid = hstate_boot_nrinvalid[hstate_index(h)]; 3746 h->max_huge_pages -= nrinvalid; 3747 3748 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3749 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3750 buf, h->free_huge_pages); 3751 if (nrinvalid) 3752 pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n", 3753 buf, nrinvalid, nrinvalid > 1 ? "s" : ""); 3754 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3755 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3756 } 3757 } 3758 3759 #ifdef CONFIG_HIGHMEM 3760 static void try_to_free_low(struct hstate *h, unsigned long count, 3761 nodemask_t *nodes_allowed) 3762 { 3763 int i; 3764 LIST_HEAD(page_list); 3765 3766 lockdep_assert_held(&hugetlb_lock); 3767 if (hstate_is_gigantic(h)) 3768 return; 3769 3770 /* 3771 * Collect pages to be freed on a list, and free after dropping lock 3772 */ 3773 for_each_node_mask(i, *nodes_allowed) { 3774 struct folio *folio, *next; 3775 struct list_head *freel = &h->hugepage_freelists[i]; 3776 list_for_each_entry_safe(folio, next, freel, lru) { 3777 if (count >= h->nr_huge_pages) 3778 goto out; 3779 if (folio_test_highmem(folio)) 3780 continue; 3781 remove_hugetlb_folio(h, folio, false); 3782 list_add(&folio->lru, &page_list); 3783 } 3784 } 3785 3786 out: 3787 spin_unlock_irq(&hugetlb_lock); 3788 update_and_free_pages_bulk(h, &page_list); 3789 spin_lock_irq(&hugetlb_lock); 3790 } 3791 #else 3792 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3793 nodemask_t *nodes_allowed) 3794 { 3795 } 3796 #endif 3797 3798 /* 3799 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3800 * balanced by operating on them in a round-robin fashion. 3801 * Returns 1 if an adjustment was made. 3802 */ 3803 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3804 int delta) 3805 { 3806 int nr_nodes, node; 3807 3808 lockdep_assert_held(&hugetlb_lock); 3809 VM_BUG_ON(delta != -1 && delta != 1); 3810 3811 if (delta < 0) { 3812 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { 3813 if (h->surplus_huge_pages_node[node]) 3814 goto found; 3815 } 3816 } else { 3817 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3818 if (h->surplus_huge_pages_node[node] < 3819 h->nr_huge_pages_node[node]) 3820 goto found; 3821 } 3822 } 3823 return 0; 3824 3825 found: 3826 h->surplus_huge_pages += delta; 3827 h->surplus_huge_pages_node[node] += delta; 3828 return 1; 3829 } 3830 3831 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3832 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3833 nodemask_t *nodes_allowed) 3834 { 3835 unsigned long persistent_free_count; 3836 unsigned long min_count; 3837 unsigned long allocated; 3838 struct folio *folio; 3839 LIST_HEAD(page_list); 3840 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3841 3842 /* 3843 * Bit mask controlling how hard we retry per-node allocations. 3844 * If we can not allocate the bit mask, do not attempt to allocate 3845 * the requested huge pages. 3846 */ 3847 if (node_alloc_noretry) 3848 nodes_clear(*node_alloc_noretry); 3849 else 3850 return -ENOMEM; 3851 3852 /* 3853 * resize_lock mutex prevents concurrent adjustments to number of 3854 * pages in hstate via the proc/sysfs interfaces. 3855 */ 3856 mutex_lock(&h->resize_lock); 3857 flush_free_hpage_work(h); 3858 spin_lock_irq(&hugetlb_lock); 3859 3860 /* 3861 * Check for a node specific request. 3862 * Changing node specific huge page count may require a corresponding 3863 * change to the global count. In any case, the passed node mask 3864 * (nodes_allowed) will restrict alloc/free to the specified node. 3865 */ 3866 if (nid != NUMA_NO_NODE) { 3867 unsigned long old_count = count; 3868 3869 count += persistent_huge_pages(h) - 3870 (h->nr_huge_pages_node[nid] - 3871 h->surplus_huge_pages_node[nid]); 3872 /* 3873 * User may have specified a large count value which caused the 3874 * above calculation to overflow. In this case, they wanted 3875 * to allocate as many huge pages as possible. Set count to 3876 * largest possible value to align with their intention. 3877 */ 3878 if (count < old_count) 3879 count = ULONG_MAX; 3880 } 3881 3882 /* 3883 * Gigantic pages runtime allocation depend on the capability for large 3884 * page range allocation. 3885 * If the system does not provide this feature, return an error when 3886 * the user tries to allocate gigantic pages but let the user free the 3887 * boottime allocated gigantic pages. 3888 */ 3889 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3890 if (count > persistent_huge_pages(h)) { 3891 spin_unlock_irq(&hugetlb_lock); 3892 mutex_unlock(&h->resize_lock); 3893 NODEMASK_FREE(node_alloc_noretry); 3894 return -EINVAL; 3895 } 3896 /* Fall through to decrease pool */ 3897 } 3898 3899 /* 3900 * Increase the pool size 3901 * First take pages out of surplus state. Then make up the 3902 * remaining difference by allocating fresh huge pages. 3903 * 3904 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3905 * to convert a surplus huge page to a normal huge page. That is 3906 * not critical, though, it just means the overall size of the 3907 * pool might be one hugepage larger than it needs to be, but 3908 * within all the constraints specified by the sysctls. 3909 */ 3910 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3911 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3912 break; 3913 } 3914 3915 allocated = 0; 3916 while (count > (persistent_huge_pages(h) + allocated)) { 3917 /* 3918 * If this allocation races such that we no longer need the 3919 * page, free_huge_folio will handle it by freeing the page 3920 * and reducing the surplus. 3921 */ 3922 spin_unlock_irq(&hugetlb_lock); 3923 3924 /* yield cpu to avoid soft lockup */ 3925 cond_resched(); 3926 3927 folio = alloc_pool_huge_folio(h, nodes_allowed, 3928 node_alloc_noretry, 3929 &h->next_nid_to_alloc); 3930 if (!folio) { 3931 prep_and_add_allocated_folios(h, &page_list); 3932 spin_lock_irq(&hugetlb_lock); 3933 goto out; 3934 } 3935 3936 list_add(&folio->lru, &page_list); 3937 allocated++; 3938 3939 /* Bail for signals. Probably ctrl-c from user */ 3940 if (signal_pending(current)) { 3941 prep_and_add_allocated_folios(h, &page_list); 3942 spin_lock_irq(&hugetlb_lock); 3943 goto out; 3944 } 3945 3946 spin_lock_irq(&hugetlb_lock); 3947 } 3948 3949 /* Add allocated pages to the pool */ 3950 if (!list_empty(&page_list)) { 3951 spin_unlock_irq(&hugetlb_lock); 3952 prep_and_add_allocated_folios(h, &page_list); 3953 spin_lock_irq(&hugetlb_lock); 3954 } 3955 3956 /* 3957 * Decrease the pool size 3958 * First return free pages to the buddy allocator (being careful 3959 * to keep enough around to satisfy reservations). Then place 3960 * pages into surplus state as needed so the pool will shrink 3961 * to the desired size as pages become free. 3962 * 3963 * By placing pages into the surplus state independent of the 3964 * overcommit value, we are allowing the surplus pool size to 3965 * exceed overcommit. There are few sane options here. Since 3966 * alloc_surplus_hugetlb_folio() is checking the global counter, 3967 * though, we'll note that we're not allowed to exceed surplus 3968 * and won't grow the pool anywhere else. Not until one of the 3969 * sysctls are changed, or the surplus pages go out of use. 3970 * 3971 * min_count is the expected number of persistent pages, we 3972 * shouldn't calculate min_count by using 3973 * resv_huge_pages + persistent_huge_pages() - free_huge_pages, 3974 * because there may exist free surplus huge pages, and this will 3975 * lead to subtracting twice. Free surplus huge pages come from HVO 3976 * failing to restore vmemmap, see comments in the callers of 3977 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate 3978 * persistent free count first. 3979 */ 3980 persistent_free_count = h->free_huge_pages; 3981 if (h->free_huge_pages > persistent_huge_pages(h)) { 3982 if (h->free_huge_pages > h->surplus_huge_pages) 3983 persistent_free_count -= h->surplus_huge_pages; 3984 else 3985 persistent_free_count = 0; 3986 } 3987 min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count; 3988 min_count = max(count, min_count); 3989 try_to_free_low(h, min_count, nodes_allowed); 3990 3991 /* 3992 * Collect pages to be removed on list without dropping lock 3993 */ 3994 while (min_count < persistent_huge_pages(h)) { 3995 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); 3996 if (!folio) 3997 break; 3998 3999 list_add(&folio->lru, &page_list); 4000 } 4001 /* free the pages after dropping lock */ 4002 spin_unlock_irq(&hugetlb_lock); 4003 update_and_free_pages_bulk(h, &page_list); 4004 flush_free_hpage_work(h); 4005 spin_lock_irq(&hugetlb_lock); 4006 4007 while (count < persistent_huge_pages(h)) { 4008 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 4009 break; 4010 } 4011 out: 4012 h->max_huge_pages = persistent_huge_pages(h); 4013 spin_unlock_irq(&hugetlb_lock); 4014 mutex_unlock(&h->resize_lock); 4015 4016 NODEMASK_FREE(node_alloc_noretry); 4017 4018 return 0; 4019 } 4020 4021 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, 4022 struct list_head *src_list) 4023 { 4024 long rc; 4025 struct folio *folio, *next; 4026 LIST_HEAD(dst_list); 4027 LIST_HEAD(ret_list); 4028 4029 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); 4030 list_splice_init(&ret_list, src_list); 4031 4032 /* 4033 * Taking target hstate mutex synchronizes with set_max_huge_pages. 4034 * Without the mutex, pages added to target hstate could be marked 4035 * as surplus. 4036 * 4037 * Note that we already hold src->resize_lock. To prevent deadlock, 4038 * use the convention of always taking larger size hstate mutex first. 4039 */ 4040 mutex_lock(&dst->resize_lock); 4041 4042 list_for_each_entry_safe(folio, next, src_list, lru) { 4043 int i; 4044 bool cma; 4045 4046 if (folio_test_hugetlb_vmemmap_optimized(folio)) 4047 continue; 4048 4049 cma = folio_test_hugetlb_cma(folio); 4050 4051 list_del(&folio->lru); 4052 4053 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); 4054 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst)); 4055 4056 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { 4057 struct page *page = folio_page(folio, i); 4058 /* Careful: see __split_huge_page_tail() */ 4059 struct folio *new_folio = (struct folio *)page; 4060 4061 clear_compound_head(page); 4062 prep_compound_page(page, dst->order); 4063 4064 new_folio->mapping = NULL; 4065 init_new_hugetlb_folio(dst, new_folio); 4066 /* Copy the CMA flag so that it is freed correctly */ 4067 if (cma) 4068 folio_set_hugetlb_cma(new_folio); 4069 list_add(&new_folio->lru, &dst_list); 4070 } 4071 } 4072 4073 prep_and_add_allocated_folios(dst, &dst_list); 4074 4075 mutex_unlock(&dst->resize_lock); 4076 4077 return rc; 4078 } 4079 4080 static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, 4081 unsigned long nr_to_demote) 4082 __must_hold(&hugetlb_lock) 4083 { 4084 int nr_nodes, node; 4085 struct hstate *dst; 4086 long rc = 0; 4087 long nr_demoted = 0; 4088 4089 lockdep_assert_held(&hugetlb_lock); 4090 4091 /* We should never get here if no demote order */ 4092 if (!src->demote_order) { 4093 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 4094 return -EINVAL; /* internal error */ 4095 } 4096 dst = size_to_hstate(PAGE_SIZE << src->demote_order); 4097 4098 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { 4099 LIST_HEAD(list); 4100 struct folio *folio, *next; 4101 4102 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { 4103 if (folio_test_hwpoison(folio)) 4104 continue; 4105 4106 remove_hugetlb_folio(src, folio, false); 4107 list_add(&folio->lru, &list); 4108 4109 if (++nr_demoted == nr_to_demote) 4110 break; 4111 } 4112 4113 spin_unlock_irq(&hugetlb_lock); 4114 4115 rc = demote_free_hugetlb_folios(src, dst, &list); 4116 4117 spin_lock_irq(&hugetlb_lock); 4118 4119 list_for_each_entry_safe(folio, next, &list, lru) { 4120 list_del(&folio->lru); 4121 add_hugetlb_folio(src, folio, false); 4122 4123 nr_demoted--; 4124 } 4125 4126 if (rc < 0 || nr_demoted == nr_to_demote) 4127 break; 4128 } 4129 4130 /* 4131 * Not absolutely necessary, but for consistency update max_huge_pages 4132 * based on pool changes for the demoted page. 4133 */ 4134 src->max_huge_pages -= nr_demoted; 4135 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); 4136 4137 if (rc < 0) 4138 return rc; 4139 4140 if (nr_demoted) 4141 return nr_demoted; 4142 /* 4143 * Only way to get here is if all pages on free lists are poisoned. 4144 * Return -EBUSY so that caller will not retry. 4145 */ 4146 return -EBUSY; 4147 } 4148 4149 #define HSTATE_ATTR_RO(_name) \ 4150 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 4151 4152 #define HSTATE_ATTR_WO(_name) \ 4153 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 4154 4155 #define HSTATE_ATTR(_name) \ 4156 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 4157 4158 static struct kobject *hugepages_kobj; 4159 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4160 4161 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 4162 4163 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 4164 { 4165 int i; 4166 4167 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4168 if (hstate_kobjs[i] == kobj) { 4169 if (nidp) 4170 *nidp = NUMA_NO_NODE; 4171 return &hstates[i]; 4172 } 4173 4174 return kobj_to_node_hstate(kobj, nidp); 4175 } 4176 4177 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 4178 struct kobj_attribute *attr, char *buf) 4179 { 4180 struct hstate *h; 4181 unsigned long nr_huge_pages; 4182 int nid; 4183 4184 h = kobj_to_hstate(kobj, &nid); 4185 if (nid == NUMA_NO_NODE) 4186 nr_huge_pages = h->nr_huge_pages; 4187 else 4188 nr_huge_pages = h->nr_huge_pages_node[nid]; 4189 4190 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 4191 } 4192 4193 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 4194 struct hstate *h, int nid, 4195 unsigned long count, size_t len) 4196 { 4197 int err; 4198 nodemask_t nodes_allowed, *n_mask; 4199 4200 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 4201 return -EINVAL; 4202 4203 if (nid == NUMA_NO_NODE) { 4204 /* 4205 * global hstate attribute 4206 */ 4207 if (!(obey_mempolicy && 4208 init_nodemask_of_mempolicy(&nodes_allowed))) 4209 n_mask = &node_states[N_MEMORY]; 4210 else 4211 n_mask = &nodes_allowed; 4212 } else { 4213 /* 4214 * Node specific request. count adjustment happens in 4215 * set_max_huge_pages() after acquiring hugetlb_lock. 4216 */ 4217 init_nodemask_of_node(&nodes_allowed, nid); 4218 n_mask = &nodes_allowed; 4219 } 4220 4221 err = set_max_huge_pages(h, count, nid, n_mask); 4222 4223 return err ? err : len; 4224 } 4225 4226 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 4227 struct kobject *kobj, const char *buf, 4228 size_t len) 4229 { 4230 struct hstate *h; 4231 unsigned long count; 4232 int nid; 4233 int err; 4234 4235 err = kstrtoul(buf, 10, &count); 4236 if (err) 4237 return err; 4238 4239 h = kobj_to_hstate(kobj, &nid); 4240 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 4241 } 4242 4243 static ssize_t nr_hugepages_show(struct kobject *kobj, 4244 struct kobj_attribute *attr, char *buf) 4245 { 4246 return nr_hugepages_show_common(kobj, attr, buf); 4247 } 4248 4249 static ssize_t nr_hugepages_store(struct kobject *kobj, 4250 struct kobj_attribute *attr, const char *buf, size_t len) 4251 { 4252 return nr_hugepages_store_common(false, kobj, buf, len); 4253 } 4254 HSTATE_ATTR(nr_hugepages); 4255 4256 #ifdef CONFIG_NUMA 4257 4258 /* 4259 * hstate attribute for optionally mempolicy-based constraint on persistent 4260 * huge page alloc/free. 4261 */ 4262 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 4263 struct kobj_attribute *attr, 4264 char *buf) 4265 { 4266 return nr_hugepages_show_common(kobj, attr, buf); 4267 } 4268 4269 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 4270 struct kobj_attribute *attr, const char *buf, size_t len) 4271 { 4272 return nr_hugepages_store_common(true, kobj, buf, len); 4273 } 4274 HSTATE_ATTR(nr_hugepages_mempolicy); 4275 #endif 4276 4277 4278 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 4279 struct kobj_attribute *attr, char *buf) 4280 { 4281 struct hstate *h = kobj_to_hstate(kobj, NULL); 4282 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 4283 } 4284 4285 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 4286 struct kobj_attribute *attr, const char *buf, size_t count) 4287 { 4288 int err; 4289 unsigned long input; 4290 struct hstate *h = kobj_to_hstate(kobj, NULL); 4291 4292 if (hstate_is_gigantic(h)) 4293 return -EINVAL; 4294 4295 err = kstrtoul(buf, 10, &input); 4296 if (err) 4297 return err; 4298 4299 spin_lock_irq(&hugetlb_lock); 4300 h->nr_overcommit_huge_pages = input; 4301 spin_unlock_irq(&hugetlb_lock); 4302 4303 return count; 4304 } 4305 HSTATE_ATTR(nr_overcommit_hugepages); 4306 4307 static ssize_t free_hugepages_show(struct kobject *kobj, 4308 struct kobj_attribute *attr, char *buf) 4309 { 4310 struct hstate *h; 4311 unsigned long free_huge_pages; 4312 int nid; 4313 4314 h = kobj_to_hstate(kobj, &nid); 4315 if (nid == NUMA_NO_NODE) 4316 free_huge_pages = h->free_huge_pages; 4317 else 4318 free_huge_pages = h->free_huge_pages_node[nid]; 4319 4320 return sysfs_emit(buf, "%lu\n", free_huge_pages); 4321 } 4322 HSTATE_ATTR_RO(free_hugepages); 4323 4324 static ssize_t resv_hugepages_show(struct kobject *kobj, 4325 struct kobj_attribute *attr, char *buf) 4326 { 4327 struct hstate *h = kobj_to_hstate(kobj, NULL); 4328 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 4329 } 4330 HSTATE_ATTR_RO(resv_hugepages); 4331 4332 static ssize_t surplus_hugepages_show(struct kobject *kobj, 4333 struct kobj_attribute *attr, char *buf) 4334 { 4335 struct hstate *h; 4336 unsigned long surplus_huge_pages; 4337 int nid; 4338 4339 h = kobj_to_hstate(kobj, &nid); 4340 if (nid == NUMA_NO_NODE) 4341 surplus_huge_pages = h->surplus_huge_pages; 4342 else 4343 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 4344 4345 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 4346 } 4347 HSTATE_ATTR_RO(surplus_hugepages); 4348 4349 static ssize_t demote_store(struct kobject *kobj, 4350 struct kobj_attribute *attr, const char *buf, size_t len) 4351 { 4352 unsigned long nr_demote; 4353 unsigned long nr_available; 4354 nodemask_t nodes_allowed, *n_mask; 4355 struct hstate *h; 4356 int err; 4357 int nid; 4358 4359 err = kstrtoul(buf, 10, &nr_demote); 4360 if (err) 4361 return err; 4362 h = kobj_to_hstate(kobj, &nid); 4363 4364 if (nid != NUMA_NO_NODE) { 4365 init_nodemask_of_node(&nodes_allowed, nid); 4366 n_mask = &nodes_allowed; 4367 } else { 4368 n_mask = &node_states[N_MEMORY]; 4369 } 4370 4371 /* Synchronize with other sysfs operations modifying huge pages */ 4372 mutex_lock(&h->resize_lock); 4373 spin_lock_irq(&hugetlb_lock); 4374 4375 while (nr_demote) { 4376 long rc; 4377 4378 /* 4379 * Check for available pages to demote each time thorough the 4380 * loop as demote_pool_huge_page will drop hugetlb_lock. 4381 */ 4382 if (nid != NUMA_NO_NODE) 4383 nr_available = h->free_huge_pages_node[nid]; 4384 else 4385 nr_available = h->free_huge_pages; 4386 nr_available -= h->resv_huge_pages; 4387 if (!nr_available) 4388 break; 4389 4390 rc = demote_pool_huge_page(h, n_mask, nr_demote); 4391 if (rc < 0) { 4392 err = rc; 4393 break; 4394 } 4395 4396 nr_demote -= rc; 4397 } 4398 4399 spin_unlock_irq(&hugetlb_lock); 4400 mutex_unlock(&h->resize_lock); 4401 4402 if (err) 4403 return err; 4404 return len; 4405 } 4406 HSTATE_ATTR_WO(demote); 4407 4408 static ssize_t demote_size_show(struct kobject *kobj, 4409 struct kobj_attribute *attr, char *buf) 4410 { 4411 struct hstate *h = kobj_to_hstate(kobj, NULL); 4412 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 4413 4414 return sysfs_emit(buf, "%lukB\n", demote_size); 4415 } 4416 4417 static ssize_t demote_size_store(struct kobject *kobj, 4418 struct kobj_attribute *attr, 4419 const char *buf, size_t count) 4420 { 4421 struct hstate *h, *demote_hstate; 4422 unsigned long demote_size; 4423 unsigned int demote_order; 4424 4425 demote_size = (unsigned long)memparse(buf, NULL); 4426 4427 demote_hstate = size_to_hstate(demote_size); 4428 if (!demote_hstate) 4429 return -EINVAL; 4430 demote_order = demote_hstate->order; 4431 if (demote_order < HUGETLB_PAGE_ORDER) 4432 return -EINVAL; 4433 4434 /* demote order must be smaller than hstate order */ 4435 h = kobj_to_hstate(kobj, NULL); 4436 if (demote_order >= h->order) 4437 return -EINVAL; 4438 4439 /* resize_lock synchronizes access to demote size and writes */ 4440 mutex_lock(&h->resize_lock); 4441 h->demote_order = demote_order; 4442 mutex_unlock(&h->resize_lock); 4443 4444 return count; 4445 } 4446 HSTATE_ATTR(demote_size); 4447 4448 static struct attribute *hstate_attrs[] = { 4449 &nr_hugepages_attr.attr, 4450 &nr_overcommit_hugepages_attr.attr, 4451 &free_hugepages_attr.attr, 4452 &resv_hugepages_attr.attr, 4453 &surplus_hugepages_attr.attr, 4454 #ifdef CONFIG_NUMA 4455 &nr_hugepages_mempolicy_attr.attr, 4456 #endif 4457 NULL, 4458 }; 4459 4460 static const struct attribute_group hstate_attr_group = { 4461 .attrs = hstate_attrs, 4462 }; 4463 4464 static struct attribute *hstate_demote_attrs[] = { 4465 &demote_size_attr.attr, 4466 &demote_attr.attr, 4467 NULL, 4468 }; 4469 4470 static const struct attribute_group hstate_demote_attr_group = { 4471 .attrs = hstate_demote_attrs, 4472 }; 4473 4474 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4475 struct kobject **hstate_kobjs, 4476 const struct attribute_group *hstate_attr_group) 4477 { 4478 int retval; 4479 int hi = hstate_index(h); 4480 4481 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4482 if (!hstate_kobjs[hi]) 4483 return -ENOMEM; 4484 4485 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4486 if (retval) { 4487 kobject_put(hstate_kobjs[hi]); 4488 hstate_kobjs[hi] = NULL; 4489 return retval; 4490 } 4491 4492 if (h->demote_order) { 4493 retval = sysfs_create_group(hstate_kobjs[hi], 4494 &hstate_demote_attr_group); 4495 if (retval) { 4496 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4497 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4498 kobject_put(hstate_kobjs[hi]); 4499 hstate_kobjs[hi] = NULL; 4500 return retval; 4501 } 4502 } 4503 4504 return 0; 4505 } 4506 4507 #ifdef CONFIG_NUMA 4508 static bool hugetlb_sysfs_initialized __ro_after_init; 4509 4510 /* 4511 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4512 * with node devices in node_devices[] using a parallel array. The array 4513 * index of a node device or _hstate == node id. 4514 * This is here to avoid any static dependency of the node device driver, in 4515 * the base kernel, on the hugetlb module. 4516 */ 4517 struct node_hstate { 4518 struct kobject *hugepages_kobj; 4519 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4520 }; 4521 static struct node_hstate node_hstates[MAX_NUMNODES]; 4522 4523 /* 4524 * A subset of global hstate attributes for node devices 4525 */ 4526 static struct attribute *per_node_hstate_attrs[] = { 4527 &nr_hugepages_attr.attr, 4528 &free_hugepages_attr.attr, 4529 &surplus_hugepages_attr.attr, 4530 NULL, 4531 }; 4532 4533 static const struct attribute_group per_node_hstate_attr_group = { 4534 .attrs = per_node_hstate_attrs, 4535 }; 4536 4537 /* 4538 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4539 * Returns node id via non-NULL nidp. 4540 */ 4541 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4542 { 4543 int nid; 4544 4545 for (nid = 0; nid < nr_node_ids; nid++) { 4546 struct node_hstate *nhs = &node_hstates[nid]; 4547 int i; 4548 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4549 if (nhs->hstate_kobjs[i] == kobj) { 4550 if (nidp) 4551 *nidp = nid; 4552 return &hstates[i]; 4553 } 4554 } 4555 4556 BUG(); 4557 return NULL; 4558 } 4559 4560 /* 4561 * Unregister hstate attributes from a single node device. 4562 * No-op if no hstate attributes attached. 4563 */ 4564 void hugetlb_unregister_node(struct node *node) 4565 { 4566 struct hstate *h; 4567 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4568 4569 if (!nhs->hugepages_kobj) 4570 return; /* no hstate attributes */ 4571 4572 for_each_hstate(h) { 4573 int idx = hstate_index(h); 4574 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4575 4576 if (!hstate_kobj) 4577 continue; 4578 if (h->demote_order) 4579 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4580 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4581 kobject_put(hstate_kobj); 4582 nhs->hstate_kobjs[idx] = NULL; 4583 } 4584 4585 kobject_put(nhs->hugepages_kobj); 4586 nhs->hugepages_kobj = NULL; 4587 } 4588 4589 4590 /* 4591 * Register hstate attributes for a single node device. 4592 * No-op if attributes already registered. 4593 */ 4594 void hugetlb_register_node(struct node *node) 4595 { 4596 struct hstate *h; 4597 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4598 int err; 4599 4600 if (!hugetlb_sysfs_initialized) 4601 return; 4602 4603 if (nhs->hugepages_kobj) 4604 return; /* already allocated */ 4605 4606 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4607 &node->dev.kobj); 4608 if (!nhs->hugepages_kobj) 4609 return; 4610 4611 for_each_hstate(h) { 4612 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4613 nhs->hstate_kobjs, 4614 &per_node_hstate_attr_group); 4615 if (err) { 4616 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4617 h->name, node->dev.id); 4618 hugetlb_unregister_node(node); 4619 break; 4620 } 4621 } 4622 } 4623 4624 /* 4625 * hugetlb init time: register hstate attributes for all registered node 4626 * devices of nodes that have memory. All on-line nodes should have 4627 * registered their associated device by this time. 4628 */ 4629 static void __init hugetlb_register_all_nodes(void) 4630 { 4631 int nid; 4632 4633 for_each_online_node(nid) 4634 hugetlb_register_node(node_devices[nid]); 4635 } 4636 #else /* !CONFIG_NUMA */ 4637 4638 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4639 { 4640 BUG(); 4641 if (nidp) 4642 *nidp = -1; 4643 return NULL; 4644 } 4645 4646 static void hugetlb_register_all_nodes(void) { } 4647 4648 #endif 4649 4650 static void __init hugetlb_sysfs_init(void) 4651 { 4652 struct hstate *h; 4653 int err; 4654 4655 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4656 if (!hugepages_kobj) 4657 return; 4658 4659 for_each_hstate(h) { 4660 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4661 hstate_kobjs, &hstate_attr_group); 4662 if (err) 4663 pr_err("HugeTLB: Unable to add hstate %s\n", h->name); 4664 } 4665 4666 #ifdef CONFIG_NUMA 4667 hugetlb_sysfs_initialized = true; 4668 #endif 4669 hugetlb_register_all_nodes(); 4670 } 4671 4672 #ifdef CONFIG_SYSCTL 4673 static void hugetlb_sysctl_init(void); 4674 #else 4675 static inline void hugetlb_sysctl_init(void) { } 4676 #endif 4677 4678 static int __init hugetlb_init(void) 4679 { 4680 int i; 4681 4682 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4683 __NR_HPAGEFLAGS); 4684 4685 if (!hugepages_supported()) { 4686 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4687 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4688 return 0; 4689 } 4690 4691 /* 4692 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4693 * architectures depend on setup being done here. 4694 */ 4695 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4696 if (!parsed_default_hugepagesz) { 4697 /* 4698 * If we did not parse a default huge page size, set 4699 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4700 * number of huge pages for this default size was implicitly 4701 * specified, set that here as well. 4702 * Note that the implicit setting will overwrite an explicit 4703 * setting. A warning will be printed in this case. 4704 */ 4705 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4706 if (default_hstate_max_huge_pages) { 4707 if (default_hstate.max_huge_pages) { 4708 char buf[32]; 4709 4710 string_get_size(huge_page_size(&default_hstate), 4711 1, STRING_UNITS_2, buf, 32); 4712 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4713 default_hstate.max_huge_pages, buf); 4714 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4715 default_hstate_max_huge_pages); 4716 } 4717 default_hstate.max_huge_pages = 4718 default_hstate_max_huge_pages; 4719 4720 for_each_online_node(i) 4721 default_hstate.max_huge_pages_node[i] = 4722 default_hugepages_in_node[i]; 4723 } 4724 } 4725 4726 hugetlb_cma_check(); 4727 hugetlb_init_hstates(); 4728 gather_bootmem_prealloc(); 4729 report_hugepages(); 4730 4731 hugetlb_sysfs_init(); 4732 hugetlb_cgroup_file_init(); 4733 hugetlb_sysctl_init(); 4734 4735 #ifdef CONFIG_SMP 4736 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4737 #else 4738 num_fault_mutexes = 1; 4739 #endif 4740 hugetlb_fault_mutex_table = 4741 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4742 GFP_KERNEL); 4743 BUG_ON(!hugetlb_fault_mutex_table); 4744 4745 for (i = 0; i < num_fault_mutexes; i++) 4746 mutex_init(&hugetlb_fault_mutex_table[i]); 4747 return 0; 4748 } 4749 subsys_initcall(hugetlb_init); 4750 4751 /* Overwritten by architectures with more huge page sizes */ 4752 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4753 { 4754 return size == HPAGE_SIZE; 4755 } 4756 4757 void __init hugetlb_add_hstate(unsigned int order) 4758 { 4759 struct hstate *h; 4760 unsigned long i; 4761 4762 if (size_to_hstate(PAGE_SIZE << order)) { 4763 return; 4764 } 4765 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4766 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); 4767 h = &hstates[hugetlb_max_hstate++]; 4768 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); 4769 h->order = order; 4770 h->mask = ~(huge_page_size(h) - 1); 4771 for (i = 0; i < MAX_NUMNODES; ++i) 4772 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4773 INIT_LIST_HEAD(&h->hugepage_activelist); 4774 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4775 huge_page_size(h)/SZ_1K); 4776 4777 parsed_hstate = h; 4778 } 4779 4780 bool __init __weak hugetlb_node_alloc_supported(void) 4781 { 4782 return true; 4783 } 4784 4785 static void __init hugepages_clear_pages_in_node(void) 4786 { 4787 if (!hugetlb_max_hstate) { 4788 default_hstate_max_huge_pages = 0; 4789 memset(default_hugepages_in_node, 0, 4790 sizeof(default_hugepages_in_node)); 4791 } else { 4792 parsed_hstate->max_huge_pages = 0; 4793 memset(parsed_hstate->max_huge_pages_node, 0, 4794 sizeof(parsed_hstate->max_huge_pages_node)); 4795 } 4796 } 4797 4798 static __init int hugetlb_add_param(char *s, int (*setup)(char *)) 4799 { 4800 size_t len; 4801 char *p; 4802 4803 if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS) 4804 return -EINVAL; 4805 4806 len = strlen(s) + 1; 4807 if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf)) 4808 return -EINVAL; 4809 4810 p = &hstate_cmdline_buf[hstate_cmdline_index]; 4811 memcpy(p, s, len); 4812 hstate_cmdline_index += len; 4813 4814 hugetlb_params[hugetlb_param_index].val = p; 4815 hugetlb_params[hugetlb_param_index].setup = setup; 4816 4817 hugetlb_param_index++; 4818 4819 return 0; 4820 } 4821 4822 static __init void hugetlb_parse_params(void) 4823 { 4824 int i; 4825 struct hugetlb_cmdline *hcp; 4826 4827 for (i = 0; i < hugetlb_param_index; i++) { 4828 hcp = &hugetlb_params[i]; 4829 4830 hcp->setup(hcp->val); 4831 } 4832 4833 hugetlb_cma_validate_params(); 4834 } 4835 4836 /* 4837 * hugepages command line processing 4838 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4839 * specification. If not, ignore the hugepages value. hugepages can also 4840 * be the first huge page command line option in which case it implicitly 4841 * specifies the number of huge pages for the default size. 4842 */ 4843 static int __init hugepages_setup(char *s) 4844 { 4845 unsigned long *mhp; 4846 static unsigned long *last_mhp; 4847 int node = NUMA_NO_NODE; 4848 int count; 4849 unsigned long tmp; 4850 char *p = s; 4851 4852 if (!parsed_valid_hugepagesz) { 4853 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4854 parsed_valid_hugepagesz = true; 4855 return -EINVAL; 4856 } 4857 4858 /* 4859 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4860 * yet, so this hugepages= parameter goes to the "default hstate". 4861 * Otherwise, it goes with the previously parsed hugepagesz or 4862 * default_hugepagesz. 4863 */ 4864 else if (!hugetlb_max_hstate) 4865 mhp = &default_hstate_max_huge_pages; 4866 else 4867 mhp = &parsed_hstate->max_huge_pages; 4868 4869 if (mhp == last_mhp) { 4870 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4871 return 1; 4872 } 4873 4874 while (*p) { 4875 count = 0; 4876 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4877 goto invalid; 4878 /* Parameter is node format */ 4879 if (p[count] == ':') { 4880 if (!hugetlb_node_alloc_supported()) { 4881 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4882 return 1; 4883 } 4884 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4885 goto invalid; 4886 node = array_index_nospec(tmp, MAX_NUMNODES); 4887 p += count + 1; 4888 /* Parse hugepages */ 4889 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4890 goto invalid; 4891 if (!hugetlb_max_hstate) 4892 default_hugepages_in_node[node] = tmp; 4893 else 4894 parsed_hstate->max_huge_pages_node[node] = tmp; 4895 *mhp += tmp; 4896 /* Go to parse next node*/ 4897 if (p[count] == ',') 4898 p += count + 1; 4899 else 4900 break; 4901 } else { 4902 if (p != s) 4903 goto invalid; 4904 *mhp = tmp; 4905 break; 4906 } 4907 } 4908 4909 last_mhp = mhp; 4910 4911 return 0; 4912 4913 invalid: 4914 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4915 hugepages_clear_pages_in_node(); 4916 return -EINVAL; 4917 } 4918 hugetlb_early_param("hugepages", hugepages_setup); 4919 4920 /* 4921 * hugepagesz command line processing 4922 * A specific huge page size can only be specified once with hugepagesz. 4923 * hugepagesz is followed by hugepages on the command line. The global 4924 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4925 * hugepagesz argument was valid. 4926 */ 4927 static int __init hugepagesz_setup(char *s) 4928 { 4929 unsigned long size; 4930 struct hstate *h; 4931 4932 parsed_valid_hugepagesz = false; 4933 size = (unsigned long)memparse(s, NULL); 4934 4935 if (!arch_hugetlb_valid_size(size)) { 4936 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4937 return -EINVAL; 4938 } 4939 4940 h = size_to_hstate(size); 4941 if (h) { 4942 /* 4943 * hstate for this size already exists. This is normally 4944 * an error, but is allowed if the existing hstate is the 4945 * default hstate. More specifically, it is only allowed if 4946 * the number of huge pages for the default hstate was not 4947 * previously specified. 4948 */ 4949 if (!parsed_default_hugepagesz || h != &default_hstate || 4950 default_hstate.max_huge_pages) { 4951 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4952 return -EINVAL; 4953 } 4954 4955 /* 4956 * No need to call hugetlb_add_hstate() as hstate already 4957 * exists. But, do set parsed_hstate so that a following 4958 * hugepages= parameter will be applied to this hstate. 4959 */ 4960 parsed_hstate = h; 4961 parsed_valid_hugepagesz = true; 4962 return 0; 4963 } 4964 4965 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4966 parsed_valid_hugepagesz = true; 4967 return 0; 4968 } 4969 hugetlb_early_param("hugepagesz", hugepagesz_setup); 4970 4971 /* 4972 * default_hugepagesz command line input 4973 * Only one instance of default_hugepagesz allowed on command line. 4974 */ 4975 static int __init default_hugepagesz_setup(char *s) 4976 { 4977 unsigned long size; 4978 int i; 4979 4980 parsed_valid_hugepagesz = false; 4981 if (parsed_default_hugepagesz) { 4982 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4983 return -EINVAL; 4984 } 4985 4986 size = (unsigned long)memparse(s, NULL); 4987 4988 if (!arch_hugetlb_valid_size(size)) { 4989 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4990 return -EINVAL; 4991 } 4992 4993 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4994 parsed_valid_hugepagesz = true; 4995 parsed_default_hugepagesz = true; 4996 default_hstate_idx = hstate_index(size_to_hstate(size)); 4997 4998 /* 4999 * The number of default huge pages (for this size) could have been 5000 * specified as the first hugetlb parameter: hugepages=X. If so, 5001 * then default_hstate_max_huge_pages is set. If the default huge 5002 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be 5003 * allocated here from bootmem allocator. 5004 */ 5005 if (default_hstate_max_huge_pages) { 5006 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 5007 /* 5008 * Since this is an early parameter, we can't check 5009 * NUMA node state yet, so loop through MAX_NUMNODES. 5010 */ 5011 for (i = 0; i < MAX_NUMNODES; i++) { 5012 if (default_hugepages_in_node[i] != 0) 5013 default_hstate.max_huge_pages_node[i] = 5014 default_hugepages_in_node[i]; 5015 } 5016 default_hstate_max_huge_pages = 0; 5017 } 5018 5019 return 0; 5020 } 5021 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); 5022 5023 static bool __hugetlb_bootmem_allocated __initdata; 5024 5025 bool __init hugetlb_bootmem_allocated(void) 5026 { 5027 return __hugetlb_bootmem_allocated; 5028 } 5029 5030 void __init hugetlb_bootmem_alloc(void) 5031 { 5032 struct hstate *h; 5033 int i; 5034 5035 if (__hugetlb_bootmem_allocated) 5036 return; 5037 5038 for (i = 0; i < MAX_NUMNODES; i++) 5039 INIT_LIST_HEAD(&huge_boot_pages[i]); 5040 5041 hugetlb_parse_params(); 5042 5043 for_each_hstate(h) { 5044 h->next_nid_to_alloc = first_online_node; 5045 h->next_nid_to_free = first_online_node; 5046 5047 if (hstate_is_gigantic(h)) 5048 hugetlb_hstate_alloc_pages(h); 5049 } 5050 5051 __hugetlb_bootmem_allocated = true; 5052 } 5053 5054 /* 5055 * hugepage_alloc_threads command line parsing. 5056 * 5057 * When set, use this specific number of threads for the boot 5058 * allocation of hugepages. 5059 */ 5060 static int __init hugepage_alloc_threads_setup(char *s) 5061 { 5062 unsigned long allocation_threads; 5063 5064 if (kstrtoul(s, 0, &allocation_threads) != 0) 5065 return 1; 5066 5067 if (allocation_threads == 0) 5068 return 1; 5069 5070 hugepage_allocation_threads = allocation_threads; 5071 5072 return 1; 5073 } 5074 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup); 5075 5076 static unsigned int allowed_mems_nr(struct hstate *h) 5077 { 5078 int node; 5079 unsigned int nr = 0; 5080 nodemask_t *mbind_nodemask; 5081 unsigned int *array = h->free_huge_pages_node; 5082 gfp_t gfp_mask = htlb_alloc_mask(h); 5083 5084 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 5085 for_each_node_mask(node, cpuset_current_mems_allowed) { 5086 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 5087 nr += array[node]; 5088 } 5089 5090 return nr; 5091 } 5092 5093 #ifdef CONFIG_SYSCTL 5094 static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write, 5095 void *buffer, size_t *length, 5096 loff_t *ppos, unsigned long *out) 5097 { 5098 struct ctl_table dup_table; 5099 5100 /* 5101 * In order to avoid races with __do_proc_doulongvec_minmax(), we 5102 * can duplicate the @table and alter the duplicate of it. 5103 */ 5104 dup_table = *table; 5105 dup_table.data = out; 5106 5107 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 5108 } 5109 5110 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 5111 const struct ctl_table *table, int write, 5112 void *buffer, size_t *length, loff_t *ppos) 5113 { 5114 struct hstate *h = &default_hstate; 5115 unsigned long tmp = h->max_huge_pages; 5116 int ret; 5117 5118 if (!hugepages_supported()) 5119 return -EOPNOTSUPP; 5120 5121 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 5122 &tmp); 5123 if (ret) 5124 goto out; 5125 5126 if (write) 5127 ret = __nr_hugepages_store_common(obey_mempolicy, h, 5128 NUMA_NO_NODE, tmp, *length); 5129 out: 5130 return ret; 5131 } 5132 5133 static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 5134 void *buffer, size_t *length, loff_t *ppos) 5135 { 5136 5137 return hugetlb_sysctl_handler_common(false, table, write, 5138 buffer, length, ppos); 5139 } 5140 5141 #ifdef CONFIG_NUMA 5142 static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 5143 void *buffer, size_t *length, loff_t *ppos) 5144 { 5145 return hugetlb_sysctl_handler_common(true, table, write, 5146 buffer, length, ppos); 5147 } 5148 #endif /* CONFIG_NUMA */ 5149 5150 static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 5151 void *buffer, size_t *length, loff_t *ppos) 5152 { 5153 struct hstate *h = &default_hstate; 5154 unsigned long tmp; 5155 int ret; 5156 5157 if (!hugepages_supported()) 5158 return -EOPNOTSUPP; 5159 5160 tmp = h->nr_overcommit_huge_pages; 5161 5162 if (write && hstate_is_gigantic(h)) 5163 return -EINVAL; 5164 5165 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 5166 &tmp); 5167 if (ret) 5168 goto out; 5169 5170 if (write) { 5171 spin_lock_irq(&hugetlb_lock); 5172 h->nr_overcommit_huge_pages = tmp; 5173 spin_unlock_irq(&hugetlb_lock); 5174 } 5175 out: 5176 return ret; 5177 } 5178 5179 static const struct ctl_table hugetlb_table[] = { 5180 { 5181 .procname = "nr_hugepages", 5182 .data = NULL, 5183 .maxlen = sizeof(unsigned long), 5184 .mode = 0644, 5185 .proc_handler = hugetlb_sysctl_handler, 5186 }, 5187 #ifdef CONFIG_NUMA 5188 { 5189 .procname = "nr_hugepages_mempolicy", 5190 .data = NULL, 5191 .maxlen = sizeof(unsigned long), 5192 .mode = 0644, 5193 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 5194 }, 5195 #endif 5196 { 5197 .procname = "hugetlb_shm_group", 5198 .data = &sysctl_hugetlb_shm_group, 5199 .maxlen = sizeof(gid_t), 5200 .mode = 0644, 5201 .proc_handler = proc_dointvec, 5202 }, 5203 { 5204 .procname = "nr_overcommit_hugepages", 5205 .data = NULL, 5206 .maxlen = sizeof(unsigned long), 5207 .mode = 0644, 5208 .proc_handler = hugetlb_overcommit_handler, 5209 }, 5210 }; 5211 5212 static void __init hugetlb_sysctl_init(void) 5213 { 5214 register_sysctl_init("vm", hugetlb_table); 5215 } 5216 #endif /* CONFIG_SYSCTL */ 5217 5218 void hugetlb_report_meminfo(struct seq_file *m) 5219 { 5220 struct hstate *h; 5221 unsigned long total = 0; 5222 5223 if (!hugepages_supported()) 5224 return; 5225 5226 for_each_hstate(h) { 5227 unsigned long count = h->nr_huge_pages; 5228 5229 total += huge_page_size(h) * count; 5230 5231 if (h == &default_hstate) 5232 seq_printf(m, 5233 "HugePages_Total: %5lu\n" 5234 "HugePages_Free: %5lu\n" 5235 "HugePages_Rsvd: %5lu\n" 5236 "HugePages_Surp: %5lu\n" 5237 "Hugepagesize: %8lu kB\n", 5238 count, 5239 h->free_huge_pages, 5240 h->resv_huge_pages, 5241 h->surplus_huge_pages, 5242 huge_page_size(h) / SZ_1K); 5243 } 5244 5245 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 5246 } 5247 5248 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 5249 { 5250 struct hstate *h = &default_hstate; 5251 5252 if (!hugepages_supported()) 5253 return 0; 5254 5255 return sysfs_emit_at(buf, len, 5256 "Node %d HugePages_Total: %5u\n" 5257 "Node %d HugePages_Free: %5u\n" 5258 "Node %d HugePages_Surp: %5u\n", 5259 nid, h->nr_huge_pages_node[nid], 5260 nid, h->free_huge_pages_node[nid], 5261 nid, h->surplus_huge_pages_node[nid]); 5262 } 5263 5264 void hugetlb_show_meminfo_node(int nid) 5265 { 5266 struct hstate *h; 5267 5268 if (!hugepages_supported()) 5269 return; 5270 5271 for_each_hstate(h) 5272 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 5273 nid, 5274 h->nr_huge_pages_node[nid], 5275 h->free_huge_pages_node[nid], 5276 h->surplus_huge_pages_node[nid], 5277 huge_page_size(h) / SZ_1K); 5278 } 5279 5280 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 5281 { 5282 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 5283 K(atomic_long_read(&mm->hugetlb_usage))); 5284 } 5285 5286 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 5287 unsigned long hugetlb_total_pages(void) 5288 { 5289 struct hstate *h; 5290 unsigned long nr_total_pages = 0; 5291 5292 for_each_hstate(h) 5293 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 5294 return nr_total_pages; 5295 } 5296 5297 static int hugetlb_acct_memory(struct hstate *h, long delta) 5298 { 5299 int ret = -ENOMEM; 5300 5301 if (!delta) 5302 return 0; 5303 5304 spin_lock_irq(&hugetlb_lock); 5305 /* 5306 * When cpuset is configured, it breaks the strict hugetlb page 5307 * reservation as the accounting is done on a global variable. Such 5308 * reservation is completely rubbish in the presence of cpuset because 5309 * the reservation is not checked against page availability for the 5310 * current cpuset. Application can still potentially OOM'ed by kernel 5311 * with lack of free htlb page in cpuset that the task is in. 5312 * Attempt to enforce strict accounting with cpuset is almost 5313 * impossible (or too ugly) because cpuset is too fluid that 5314 * task or memory node can be dynamically moved between cpusets. 5315 * 5316 * The change of semantics for shared hugetlb mapping with cpuset is 5317 * undesirable. However, in order to preserve some of the semantics, 5318 * we fall back to check against current free page availability as 5319 * a best attempt and hopefully to minimize the impact of changing 5320 * semantics that cpuset has. 5321 * 5322 * Apart from cpuset, we also have memory policy mechanism that 5323 * also determines from which node the kernel will allocate memory 5324 * in a NUMA system. So similar to cpuset, we also should consider 5325 * the memory policy of the current task. Similar to the description 5326 * above. 5327 */ 5328 if (delta > 0) { 5329 if (gather_surplus_pages(h, delta) < 0) 5330 goto out; 5331 5332 if (delta > allowed_mems_nr(h)) { 5333 return_unused_surplus_pages(h, delta); 5334 goto out; 5335 } 5336 } 5337 5338 ret = 0; 5339 if (delta < 0) 5340 return_unused_surplus_pages(h, (unsigned long) -delta); 5341 5342 out: 5343 spin_unlock_irq(&hugetlb_lock); 5344 return ret; 5345 } 5346 5347 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 5348 { 5349 struct resv_map *resv = vma_resv_map(vma); 5350 5351 /* 5352 * HPAGE_RESV_OWNER indicates a private mapping. 5353 * This new VMA should share its siblings reservation map if present. 5354 * The VMA will only ever have a valid reservation map pointer where 5355 * it is being copied for another still existing VMA. As that VMA 5356 * has a reference to the reservation map it cannot disappear until 5357 * after this open call completes. It is therefore safe to take a 5358 * new reference here without additional locking. 5359 */ 5360 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 5361 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 5362 kref_get(&resv->refs); 5363 } 5364 5365 /* 5366 * vma_lock structure for sharable mappings is vma specific. 5367 * Clear old pointer (if copied via vm_area_dup) and allocate 5368 * new structure. Before clearing, make sure vma_lock is not 5369 * for this vma. 5370 */ 5371 if (vma->vm_flags & VM_MAYSHARE) { 5372 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 5373 5374 if (vma_lock) { 5375 if (vma_lock->vma != vma) { 5376 vma->vm_private_data = NULL; 5377 hugetlb_vma_lock_alloc(vma); 5378 } else 5379 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 5380 } else 5381 hugetlb_vma_lock_alloc(vma); 5382 } 5383 } 5384 5385 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 5386 { 5387 struct hstate *h = hstate_vma(vma); 5388 struct resv_map *resv; 5389 struct hugepage_subpool *spool = subpool_vma(vma); 5390 unsigned long reserve, start, end; 5391 long gbl_reserve; 5392 5393 hugetlb_vma_lock_free(vma); 5394 5395 resv = vma_resv_map(vma); 5396 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5397 return; 5398 5399 start = vma_hugecache_offset(h, vma, vma->vm_start); 5400 end = vma_hugecache_offset(h, vma, vma->vm_end); 5401 5402 reserve = (end - start) - region_count(resv, start, end); 5403 hugetlb_cgroup_uncharge_counter(resv, start, end); 5404 if (reserve) { 5405 /* 5406 * Decrement reserve counts. The global reserve count may be 5407 * adjusted if the subpool has a minimum size. 5408 */ 5409 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 5410 hugetlb_acct_memory(h, -gbl_reserve); 5411 } 5412 5413 kref_put(&resv->refs, resv_map_release); 5414 } 5415 5416 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 5417 { 5418 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 5419 return -EINVAL; 5420 5421 /* 5422 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 5423 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 5424 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 5425 */ 5426 if (addr & ~PUD_MASK) { 5427 /* 5428 * hugetlb_vm_op_split is called right before we attempt to 5429 * split the VMA. We will need to unshare PMDs in the old and 5430 * new VMAs, so let's unshare before we split. 5431 */ 5432 unsigned long floor = addr & PUD_MASK; 5433 unsigned long ceil = floor + PUD_SIZE; 5434 5435 if (floor >= vma->vm_start && ceil <= vma->vm_end) 5436 hugetlb_unshare_pmds(vma, floor, ceil); 5437 } 5438 5439 return 0; 5440 } 5441 5442 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 5443 { 5444 return huge_page_size(hstate_vma(vma)); 5445 } 5446 5447 /* 5448 * We cannot handle pagefaults against hugetlb pages at all. They cause 5449 * handle_mm_fault() to try to instantiate regular-sized pages in the 5450 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5451 * this far. 5452 */ 5453 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 5454 { 5455 BUG(); 5456 return 0; 5457 } 5458 5459 /* 5460 * When a new function is introduced to vm_operations_struct and added 5461 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 5462 * This is because under System V memory model, mappings created via 5463 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 5464 * their original vm_ops are overwritten with shm_vm_ops. 5465 */ 5466 const struct vm_operations_struct hugetlb_vm_ops = { 5467 .fault = hugetlb_vm_op_fault, 5468 .open = hugetlb_vm_op_open, 5469 .close = hugetlb_vm_op_close, 5470 .may_split = hugetlb_vm_op_split, 5471 .pagesize = hugetlb_vm_op_pagesize, 5472 }; 5473 5474 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio, 5475 bool try_mkwrite) 5476 { 5477 pte_t entry = folio_mk_pte(folio, vma->vm_page_prot); 5478 unsigned int shift = huge_page_shift(hstate_vma(vma)); 5479 5480 if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { 5481 entry = pte_mkwrite_novma(pte_mkdirty(entry)); 5482 } else { 5483 entry = pte_wrprotect(entry); 5484 } 5485 entry = pte_mkyoung(entry); 5486 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 5487 5488 return entry; 5489 } 5490 5491 static void set_huge_ptep_writable(struct vm_area_struct *vma, 5492 unsigned long address, pte_t *ptep) 5493 { 5494 pte_t entry; 5495 5496 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); 5497 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 5498 update_mmu_cache(vma, address, ptep); 5499 } 5500 5501 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, 5502 unsigned long address, pte_t *ptep) 5503 { 5504 if (vma->vm_flags & VM_WRITE) 5505 set_huge_ptep_writable(vma, address, ptep); 5506 } 5507 5508 bool is_hugetlb_entry_migration(pte_t pte) 5509 { 5510 swp_entry_t swp; 5511 5512 if (huge_pte_none(pte) || pte_present(pte)) 5513 return false; 5514 swp = pte_to_swp_entry(pte); 5515 if (is_migration_entry(swp)) 5516 return true; 5517 else 5518 return false; 5519 } 5520 5521 bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5522 { 5523 swp_entry_t swp; 5524 5525 if (huge_pte_none(pte) || pte_present(pte)) 5526 return false; 5527 swp = pte_to_swp_entry(pte); 5528 if (is_hwpoison_entry(swp)) 5529 return true; 5530 else 5531 return false; 5532 } 5533 5534 static void 5535 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5536 struct folio *new_folio, pte_t old, unsigned long sz) 5537 { 5538 pte_t newpte = make_huge_pte(vma, new_folio, true); 5539 5540 __folio_mark_uptodate(new_folio); 5541 hugetlb_add_new_anon_rmap(new_folio, vma, addr); 5542 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5543 newpte = huge_pte_mkuffd_wp(newpte); 5544 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 5545 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5546 folio_set_hugetlb_migratable(new_folio); 5547 } 5548 5549 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5550 struct vm_area_struct *dst_vma, 5551 struct vm_area_struct *src_vma) 5552 { 5553 pte_t *src_pte, *dst_pte, entry; 5554 struct folio *pte_folio; 5555 unsigned long addr; 5556 bool cow = is_cow_mapping(src_vma->vm_flags); 5557 struct hstate *h = hstate_vma(src_vma); 5558 unsigned long sz = huge_page_size(h); 5559 unsigned long npages = pages_per_huge_page(h); 5560 struct mmu_notifier_range range; 5561 unsigned long last_addr_mask; 5562 int ret = 0; 5563 5564 if (cow) { 5565 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5566 src_vma->vm_start, 5567 src_vma->vm_end); 5568 mmu_notifier_invalidate_range_start(&range); 5569 vma_assert_write_locked(src_vma); 5570 raw_write_seqcount_begin(&src->write_protect_seq); 5571 } else { 5572 /* 5573 * For shared mappings the vma lock must be held before 5574 * calling hugetlb_walk() in the src vma. Otherwise, the 5575 * returned ptep could go away if part of a shared pmd and 5576 * another thread calls huge_pmd_unshare. 5577 */ 5578 hugetlb_vma_lock_read(src_vma); 5579 } 5580 5581 last_addr_mask = hugetlb_mask_last_page(h); 5582 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5583 spinlock_t *src_ptl, *dst_ptl; 5584 src_pte = hugetlb_walk(src_vma, addr, sz); 5585 if (!src_pte) { 5586 addr |= last_addr_mask; 5587 continue; 5588 } 5589 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5590 if (!dst_pte) { 5591 ret = -ENOMEM; 5592 break; 5593 } 5594 5595 /* 5596 * If the pagetables are shared don't copy or take references. 5597 * 5598 * dst_pte == src_pte is the common case of src/dest sharing. 5599 * However, src could have 'unshared' and dst shares with 5600 * another vma. So page_count of ptep page is checked instead 5601 * to reliably determine whether pte is shared. 5602 */ 5603 if (page_count(virt_to_page(dst_pte)) > 1) { 5604 addr |= last_addr_mask; 5605 continue; 5606 } 5607 5608 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5609 src_ptl = huge_pte_lockptr(h, src, src_pte); 5610 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5611 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5612 again: 5613 if (huge_pte_none(entry)) { 5614 /* 5615 * Skip if src entry none. 5616 */ 5617 ; 5618 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5619 if (!userfaultfd_wp(dst_vma)) 5620 entry = huge_pte_clear_uffd_wp(entry); 5621 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5622 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5623 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5624 bool uffd_wp = pte_swp_uffd_wp(entry); 5625 5626 if (!is_readable_migration_entry(swp_entry) && cow) { 5627 /* 5628 * COW mappings require pages in both 5629 * parent and child to be set to read. 5630 */ 5631 swp_entry = make_readable_migration_entry( 5632 swp_offset(swp_entry)); 5633 entry = swp_entry_to_pte(swp_entry); 5634 if (userfaultfd_wp(src_vma) && uffd_wp) 5635 entry = pte_swp_mkuffd_wp(entry); 5636 set_huge_pte_at(src, addr, src_pte, entry, sz); 5637 } 5638 if (!userfaultfd_wp(dst_vma)) 5639 entry = huge_pte_clear_uffd_wp(entry); 5640 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5641 } else if (unlikely(is_pte_marker(entry))) { 5642 pte_marker marker = copy_pte_marker( 5643 pte_to_swp_entry(entry), dst_vma); 5644 5645 if (marker) 5646 set_huge_pte_at(dst, addr, dst_pte, 5647 make_pte_marker(marker), sz); 5648 } else { 5649 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5650 pte_folio = page_folio(pte_page(entry)); 5651 folio_get(pte_folio); 5652 5653 /* 5654 * Failing to duplicate the anon rmap is a rare case 5655 * where we see pinned hugetlb pages while they're 5656 * prone to COW. We need to do the COW earlier during 5657 * fork. 5658 * 5659 * When pre-allocating the page or copying data, we 5660 * need to be without the pgtable locks since we could 5661 * sleep during the process. 5662 */ 5663 if (!folio_test_anon(pte_folio)) { 5664 hugetlb_add_file_rmap(pte_folio); 5665 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { 5666 pte_t src_pte_old = entry; 5667 struct folio *new_folio; 5668 5669 spin_unlock(src_ptl); 5670 spin_unlock(dst_ptl); 5671 /* Do not use reserve as it's private owned */ 5672 new_folio = alloc_hugetlb_folio(dst_vma, addr, false); 5673 if (IS_ERR(new_folio)) { 5674 folio_put(pte_folio); 5675 ret = PTR_ERR(new_folio); 5676 break; 5677 } 5678 ret = copy_user_large_folio(new_folio, pte_folio, 5679 addr, dst_vma); 5680 folio_put(pte_folio); 5681 if (ret) { 5682 folio_put(new_folio); 5683 break; 5684 } 5685 5686 /* Install the new hugetlb folio if src pte stable */ 5687 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5688 src_ptl = huge_pte_lockptr(h, src, src_pte); 5689 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5690 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); 5691 if (!pte_same(src_pte_old, entry)) { 5692 restore_reserve_on_error(h, dst_vma, addr, 5693 new_folio); 5694 folio_put(new_folio); 5695 /* huge_ptep of dst_pte won't change as in child */ 5696 goto again; 5697 } 5698 hugetlb_install_folio(dst_vma, dst_pte, addr, 5699 new_folio, src_pte_old, sz); 5700 spin_unlock(src_ptl); 5701 spin_unlock(dst_ptl); 5702 continue; 5703 } 5704 5705 if (cow) { 5706 /* 5707 * No need to notify as we are downgrading page 5708 * table protection not changing it to point 5709 * to a new page. 5710 * 5711 * See Documentation/mm/mmu_notifier.rst 5712 */ 5713 huge_ptep_set_wrprotect(src, addr, src_pte); 5714 entry = huge_pte_wrprotect(entry); 5715 } 5716 5717 if (!userfaultfd_wp(dst_vma)) 5718 entry = huge_pte_clear_uffd_wp(entry); 5719 5720 set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5721 hugetlb_count_add(npages, dst); 5722 } 5723 spin_unlock(src_ptl); 5724 spin_unlock(dst_ptl); 5725 } 5726 5727 if (cow) { 5728 raw_write_seqcount_end(&src->write_protect_seq); 5729 mmu_notifier_invalidate_range_end(&range); 5730 } else { 5731 hugetlb_vma_unlock_read(src_vma); 5732 } 5733 5734 return ret; 5735 } 5736 5737 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5738 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5739 unsigned long sz) 5740 { 5741 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 5742 struct hstate *h = hstate_vma(vma); 5743 struct mm_struct *mm = vma->vm_mm; 5744 spinlock_t *src_ptl, *dst_ptl; 5745 pte_t pte; 5746 5747 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5748 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5749 5750 /* 5751 * We don't have to worry about the ordering of src and dst ptlocks 5752 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5753 */ 5754 if (src_ptl != dst_ptl) 5755 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5756 5757 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); 5758 5759 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) 5760 huge_pte_clear(mm, new_addr, dst_pte, sz); 5761 else { 5762 if (need_clear_uffd_wp) { 5763 if (pte_present(pte)) 5764 pte = huge_pte_clear_uffd_wp(pte); 5765 else if (is_swap_pte(pte)) 5766 pte = pte_swp_clear_uffd_wp(pte); 5767 } 5768 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5769 } 5770 5771 if (src_ptl != dst_ptl) 5772 spin_unlock(src_ptl); 5773 spin_unlock(dst_ptl); 5774 } 5775 5776 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5777 struct vm_area_struct *new_vma, 5778 unsigned long old_addr, unsigned long new_addr, 5779 unsigned long len) 5780 { 5781 struct hstate *h = hstate_vma(vma); 5782 struct address_space *mapping = vma->vm_file->f_mapping; 5783 unsigned long sz = huge_page_size(h); 5784 struct mm_struct *mm = vma->vm_mm; 5785 unsigned long old_end = old_addr + len; 5786 unsigned long last_addr_mask; 5787 pte_t *src_pte, *dst_pte; 5788 struct mmu_notifier_range range; 5789 bool shared_pmd = false; 5790 5791 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5792 old_end); 5793 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5794 /* 5795 * In case of shared PMDs, we should cover the maximum possible 5796 * range. 5797 */ 5798 flush_cache_range(vma, range.start, range.end); 5799 5800 mmu_notifier_invalidate_range_start(&range); 5801 last_addr_mask = hugetlb_mask_last_page(h); 5802 /* Prevent race with file truncation */ 5803 hugetlb_vma_lock_write(vma); 5804 i_mmap_lock_write(mapping); 5805 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5806 src_pte = hugetlb_walk(vma, old_addr, sz); 5807 if (!src_pte) { 5808 old_addr |= last_addr_mask; 5809 new_addr |= last_addr_mask; 5810 continue; 5811 } 5812 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) 5813 continue; 5814 5815 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5816 shared_pmd = true; 5817 old_addr |= last_addr_mask; 5818 new_addr |= last_addr_mask; 5819 continue; 5820 } 5821 5822 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5823 if (!dst_pte) 5824 break; 5825 5826 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5827 } 5828 5829 if (shared_pmd) 5830 flush_hugetlb_tlb_range(vma, range.start, range.end); 5831 else 5832 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5833 mmu_notifier_invalidate_range_end(&range); 5834 i_mmap_unlock_write(mapping); 5835 hugetlb_vma_unlock_write(vma); 5836 5837 return len + old_addr - old_end; 5838 } 5839 5840 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5841 unsigned long start, unsigned long end, 5842 struct page *ref_page, zap_flags_t zap_flags) 5843 { 5844 struct mm_struct *mm = vma->vm_mm; 5845 unsigned long address; 5846 pte_t *ptep; 5847 pte_t pte; 5848 spinlock_t *ptl; 5849 struct page *page; 5850 struct hstate *h = hstate_vma(vma); 5851 unsigned long sz = huge_page_size(h); 5852 bool adjust_reservation = false; 5853 unsigned long last_addr_mask; 5854 bool force_flush = false; 5855 5856 WARN_ON(!is_vm_hugetlb_page(vma)); 5857 BUG_ON(start & ~huge_page_mask(h)); 5858 BUG_ON(end & ~huge_page_mask(h)); 5859 5860 /* 5861 * This is a hugetlb vma, all the pte entries should point 5862 * to huge page. 5863 */ 5864 tlb_change_page_size(tlb, sz); 5865 tlb_start_vma(tlb, vma); 5866 5867 last_addr_mask = hugetlb_mask_last_page(h); 5868 address = start; 5869 for (; address < end; address += sz) { 5870 ptep = hugetlb_walk(vma, address, sz); 5871 if (!ptep) { 5872 address |= last_addr_mask; 5873 continue; 5874 } 5875 5876 ptl = huge_pte_lock(h, mm, ptep); 5877 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5878 spin_unlock(ptl); 5879 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5880 force_flush = true; 5881 address |= last_addr_mask; 5882 continue; 5883 } 5884 5885 pte = huge_ptep_get(mm, address, ptep); 5886 if (huge_pte_none(pte)) { 5887 spin_unlock(ptl); 5888 continue; 5889 } 5890 5891 /* 5892 * Migrating hugepage or HWPoisoned hugepage is already 5893 * unmapped and its refcount is dropped, so just clear pte here. 5894 */ 5895 if (unlikely(!pte_present(pte))) { 5896 /* 5897 * If the pte was wr-protected by uffd-wp in any of the 5898 * swap forms, meanwhile the caller does not want to 5899 * drop the uffd-wp bit in this zap, then replace the 5900 * pte with a marker. 5901 */ 5902 if (pte_swp_uffd_wp_any(pte) && 5903 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5904 set_huge_pte_at(mm, address, ptep, 5905 make_pte_marker(PTE_MARKER_UFFD_WP), 5906 sz); 5907 else 5908 huge_pte_clear(mm, address, ptep, sz); 5909 spin_unlock(ptl); 5910 continue; 5911 } 5912 5913 page = pte_page(pte); 5914 /* 5915 * If a reference page is supplied, it is because a specific 5916 * page is being unmapped, not a range. Ensure the page we 5917 * are about to unmap is the actual page of interest. 5918 */ 5919 if (ref_page) { 5920 if (page != ref_page) { 5921 spin_unlock(ptl); 5922 continue; 5923 } 5924 /* 5925 * Mark the VMA as having unmapped its page so that 5926 * future faults in this VMA will fail rather than 5927 * looking like data was lost 5928 */ 5929 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5930 } 5931 5932 pte = huge_ptep_get_and_clear(mm, address, ptep, sz); 5933 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5934 if (huge_pte_dirty(pte)) 5935 set_page_dirty(page); 5936 /* Leave a uffd-wp pte marker if needed */ 5937 if (huge_pte_uffd_wp(pte) && 5938 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5939 set_huge_pte_at(mm, address, ptep, 5940 make_pte_marker(PTE_MARKER_UFFD_WP), 5941 sz); 5942 hugetlb_count_sub(pages_per_huge_page(h), mm); 5943 hugetlb_remove_rmap(page_folio(page)); 5944 5945 /* 5946 * Restore the reservation for anonymous page, otherwise the 5947 * backing page could be stolen by someone. 5948 * If there we are freeing a surplus, do not set the restore 5949 * reservation bit. 5950 */ 5951 if (!h->surplus_huge_pages && __vma_private_lock(vma) && 5952 folio_test_anon(page_folio(page))) { 5953 folio_set_hugetlb_restore_reserve(page_folio(page)); 5954 /* Reservation to be adjusted after the spin lock */ 5955 adjust_reservation = true; 5956 } 5957 5958 spin_unlock(ptl); 5959 5960 /* 5961 * Adjust the reservation for the region that will have the 5962 * reserve restored. Keep in mind that vma_needs_reservation() changes 5963 * resv->adds_in_progress if it succeeds. If this is not done, 5964 * do_exit() will not see it, and will keep the reservation 5965 * forever. 5966 */ 5967 if (adjust_reservation) { 5968 int rc = vma_needs_reservation(h, vma, address); 5969 5970 if (rc < 0) 5971 /* Pressumably allocate_file_region_entries failed 5972 * to allocate a file_region struct. Clear 5973 * hugetlb_restore_reserve so that global reserve 5974 * count will not be incremented by free_huge_folio. 5975 * Act as if we consumed the reservation. 5976 */ 5977 folio_clear_hugetlb_restore_reserve(page_folio(page)); 5978 else if (rc) 5979 vma_add_reservation(h, vma, address); 5980 } 5981 5982 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5983 /* 5984 * Bail out after unmapping reference page if supplied 5985 */ 5986 if (ref_page) 5987 break; 5988 } 5989 tlb_end_vma(tlb, vma); 5990 5991 /* 5992 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5993 * could defer the flush until now, since by holding i_mmap_rwsem we 5994 * guaranteed that the last refernece would not be dropped. But we must 5995 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5996 * dropped and the last reference to the shared PMDs page might be 5997 * dropped as well. 5998 * 5999 * In theory we could defer the freeing of the PMD pages as well, but 6000 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 6001 * detect sharing, so we cannot defer the release of the page either. 6002 * Instead, do flush now. 6003 */ 6004 if (force_flush) 6005 tlb_flush_mmu_tlbonly(tlb); 6006 } 6007 6008 void __hugetlb_zap_begin(struct vm_area_struct *vma, 6009 unsigned long *start, unsigned long *end) 6010 { 6011 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 6012 return; 6013 6014 adjust_range_if_pmd_sharing_possible(vma, start, end); 6015 hugetlb_vma_lock_write(vma); 6016 if (vma->vm_file) 6017 i_mmap_lock_write(vma->vm_file->f_mapping); 6018 } 6019 6020 void __hugetlb_zap_end(struct vm_area_struct *vma, 6021 struct zap_details *details) 6022 { 6023 zap_flags_t zap_flags = details ? details->zap_flags : 0; 6024 6025 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ 6026 return; 6027 6028 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 6029 /* 6030 * Unlock and free the vma lock before releasing i_mmap_rwsem. 6031 * When the vma_lock is freed, this makes the vma ineligible 6032 * for pmd sharing. And, i_mmap_rwsem is required to set up 6033 * pmd sharing. This is important as page tables for this 6034 * unmapped range will be asynchrously deleted. If the page 6035 * tables are shared, there will be issues when accessed by 6036 * someone else. 6037 */ 6038 __hugetlb_vma_unlock_write_free(vma); 6039 } else { 6040 hugetlb_vma_unlock_write(vma); 6041 } 6042 6043 if (vma->vm_file) 6044 i_mmap_unlock_write(vma->vm_file->f_mapping); 6045 } 6046 6047 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 6048 unsigned long end, struct page *ref_page, 6049 zap_flags_t zap_flags) 6050 { 6051 struct mmu_notifier_range range; 6052 struct mmu_gather tlb; 6053 6054 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 6055 start, end); 6056 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6057 mmu_notifier_invalidate_range_start(&range); 6058 tlb_gather_mmu(&tlb, vma->vm_mm); 6059 6060 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 6061 6062 mmu_notifier_invalidate_range_end(&range); 6063 tlb_finish_mmu(&tlb); 6064 } 6065 6066 /* 6067 * This is called when the original mapper is failing to COW a MAP_PRIVATE 6068 * mapping it owns the reserve page for. The intention is to unmap the page 6069 * from other VMAs and let the children be SIGKILLed if they are faulting the 6070 * same region. 6071 */ 6072 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 6073 struct page *page, unsigned long address) 6074 { 6075 struct hstate *h = hstate_vma(vma); 6076 struct vm_area_struct *iter_vma; 6077 struct address_space *mapping; 6078 pgoff_t pgoff; 6079 6080 /* 6081 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 6082 * from page cache lookup which is in HPAGE_SIZE units. 6083 */ 6084 address = address & huge_page_mask(h); 6085 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 6086 vma->vm_pgoff; 6087 mapping = vma->vm_file->f_mapping; 6088 6089 /* 6090 * Take the mapping lock for the duration of the table walk. As 6091 * this mapping should be shared between all the VMAs, 6092 * __unmap_hugepage_range() is called as the lock is already held 6093 */ 6094 i_mmap_lock_write(mapping); 6095 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 6096 /* Do not unmap the current VMA */ 6097 if (iter_vma == vma) 6098 continue; 6099 6100 /* 6101 * Shared VMAs have their own reserves and do not affect 6102 * MAP_PRIVATE accounting but it is possible that a shared 6103 * VMA is using the same page so check and skip such VMAs. 6104 */ 6105 if (iter_vma->vm_flags & VM_MAYSHARE) 6106 continue; 6107 6108 /* 6109 * Unmap the page from other VMAs without their own reserves. 6110 * They get marked to be SIGKILLed if they fault in these 6111 * areas. This is because a future no-page fault on this VMA 6112 * could insert a zeroed page instead of the data existing 6113 * from the time of fork. This would look like data corruption 6114 */ 6115 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 6116 unmap_hugepage_range(iter_vma, address, 6117 address + huge_page_size(h), page, 0); 6118 } 6119 i_mmap_unlock_write(mapping); 6120 } 6121 6122 /* 6123 * hugetlb_wp() should be called with page lock of the original hugepage held. 6124 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 6125 * cannot race with other handlers or page migration. 6126 * Keep the pte_same checks anyway to make transition from the mutex easier. 6127 */ 6128 static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 6129 struct vm_fault *vmf) 6130 { 6131 struct vm_area_struct *vma = vmf->vma; 6132 struct mm_struct *mm = vma->vm_mm; 6133 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 6134 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); 6135 struct hstate *h = hstate_vma(vma); 6136 struct folio *old_folio; 6137 struct folio *new_folio; 6138 bool cow_from_owner = 0; 6139 vm_fault_t ret = 0; 6140 struct mmu_notifier_range range; 6141 6142 /* 6143 * Never handle CoW for uffd-wp protected pages. It should be only 6144 * handled when the uffd-wp protection is removed. 6145 * 6146 * Note that only the CoW optimization path (in hugetlb_no_page()) 6147 * can trigger this, because hugetlb_fault() will always resolve 6148 * uffd-wp bit first. 6149 */ 6150 if (!unshare && huge_pte_uffd_wp(pte)) 6151 return 0; 6152 6153 /* Let's take out MAP_SHARED mappings first. */ 6154 if (vma->vm_flags & VM_MAYSHARE) { 6155 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 6156 return 0; 6157 } 6158 6159 old_folio = page_folio(pte_page(pte)); 6160 6161 delayacct_wpcopy_start(); 6162 6163 retry_avoidcopy: 6164 /* 6165 * If no-one else is actually using this page, we're the exclusive 6166 * owner and can reuse this page. 6167 * 6168 * Note that we don't rely on the (safer) folio refcount here, because 6169 * copying the hugetlb folio when there are unexpected (temporary) 6170 * folio references could harm simple fork()+exit() users when 6171 * we run out of free hugetlb folios: we would have to kill processes 6172 * in scenarios that used to work. As a side effect, there can still 6173 * be leaks between processes, for example, with FOLL_GET users. 6174 */ 6175 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 6176 if (!PageAnonExclusive(&old_folio->page)) { 6177 folio_move_anon_rmap(old_folio, vma); 6178 SetPageAnonExclusive(&old_folio->page); 6179 } 6180 if (likely(!unshare)) 6181 set_huge_ptep_maybe_writable(vma, vmf->address, 6182 vmf->pte); 6183 6184 delayacct_wpcopy_end(); 6185 return 0; 6186 } 6187 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 6188 PageAnonExclusive(&old_folio->page), &old_folio->page); 6189 6190 /* 6191 * If the process that created a MAP_PRIVATE mapping is about to 6192 * perform a COW due to a shared page count, attempt to satisfy 6193 * the allocation without using the existing reserves. The pagecache 6194 * page is used to determine if the reserve at this address was 6195 * consumed or not. If reserves were used, a partial faulted mapping 6196 * at the time of fork() could consume its reserves on COW instead 6197 * of the full address range. 6198 */ 6199 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 6200 old_folio != pagecache_folio) 6201 cow_from_owner = true; 6202 6203 folio_get(old_folio); 6204 6205 /* 6206 * Drop page table lock as buddy allocator may be called. It will 6207 * be acquired again before returning to the caller, as expected. 6208 */ 6209 spin_unlock(vmf->ptl); 6210 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); 6211 6212 if (IS_ERR(new_folio)) { 6213 /* 6214 * If a process owning a MAP_PRIVATE mapping fails to COW, 6215 * it is due to references held by a child and an insufficient 6216 * huge page pool. To guarantee the original mappers 6217 * reliability, unmap the page from child processes. The child 6218 * may get SIGKILLed if it later faults. 6219 */ 6220 if (cow_from_owner) { 6221 struct address_space *mapping = vma->vm_file->f_mapping; 6222 pgoff_t idx; 6223 u32 hash; 6224 6225 folio_put(old_folio); 6226 /* 6227 * Drop hugetlb_fault_mutex and vma_lock before 6228 * unmapping. unmapping needs to hold vma_lock 6229 * in write mode. Dropping vma_lock in read mode 6230 * here is OK as COW mappings do not interact with 6231 * PMD sharing. 6232 * 6233 * Reacquire both after unmap operation. 6234 */ 6235 idx = vma_hugecache_offset(h, vma, vmf->address); 6236 hash = hugetlb_fault_mutex_hash(mapping, idx); 6237 hugetlb_vma_unlock_read(vma); 6238 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6239 6240 unmap_ref_private(mm, vma, &old_folio->page, 6241 vmf->address); 6242 6243 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6244 hugetlb_vma_lock_read(vma); 6245 spin_lock(vmf->ptl); 6246 vmf->pte = hugetlb_walk(vma, vmf->address, 6247 huge_page_size(h)); 6248 if (likely(vmf->pte && 6249 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) 6250 goto retry_avoidcopy; 6251 /* 6252 * race occurs while re-acquiring page table 6253 * lock, and our job is done. 6254 */ 6255 delayacct_wpcopy_end(); 6256 return 0; 6257 } 6258 6259 ret = vmf_error(PTR_ERR(new_folio)); 6260 goto out_release_old; 6261 } 6262 6263 /* 6264 * When the original hugepage is shared one, it does not have 6265 * anon_vma prepared. 6266 */ 6267 ret = __vmf_anon_prepare(vmf); 6268 if (unlikely(ret)) 6269 goto out_release_all; 6270 6271 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 6272 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); 6273 goto out_release_all; 6274 } 6275 __folio_mark_uptodate(new_folio); 6276 6277 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 6278 vmf->address + huge_page_size(h)); 6279 mmu_notifier_invalidate_range_start(&range); 6280 6281 /* 6282 * Retake the page table lock to check for racing updates 6283 * before the page tables are altered 6284 */ 6285 spin_lock(vmf->ptl); 6286 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 6287 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { 6288 pte_t newpte = make_huge_pte(vma, new_folio, !unshare); 6289 6290 /* Break COW or unshare */ 6291 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 6292 hugetlb_remove_rmap(old_folio); 6293 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 6294 if (huge_pte_uffd_wp(pte)) 6295 newpte = huge_pte_mkuffd_wp(newpte); 6296 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 6297 huge_page_size(h)); 6298 folio_set_hugetlb_migratable(new_folio); 6299 /* Make the old page be freed below */ 6300 new_folio = old_folio; 6301 } 6302 spin_unlock(vmf->ptl); 6303 mmu_notifier_invalidate_range_end(&range); 6304 out_release_all: 6305 /* 6306 * No restore in case of successful pagetable update (Break COW or 6307 * unshare) 6308 */ 6309 if (new_folio != old_folio) 6310 restore_reserve_on_error(h, vma, vmf->address, new_folio); 6311 folio_put(new_folio); 6312 out_release_old: 6313 folio_put(old_folio); 6314 6315 spin_lock(vmf->ptl); /* Caller expects lock to be held */ 6316 6317 delayacct_wpcopy_end(); 6318 return ret; 6319 } 6320 6321 /* 6322 * Return whether there is a pagecache page to back given address within VMA. 6323 */ 6324 bool hugetlbfs_pagecache_present(struct hstate *h, 6325 struct vm_area_struct *vma, unsigned long address) 6326 { 6327 struct address_space *mapping = vma->vm_file->f_mapping; 6328 pgoff_t idx = linear_page_index(vma, address); 6329 struct folio *folio; 6330 6331 folio = filemap_get_folio(mapping, idx); 6332 if (IS_ERR(folio)) 6333 return false; 6334 folio_put(folio); 6335 return true; 6336 } 6337 6338 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 6339 pgoff_t idx) 6340 { 6341 struct inode *inode = mapping->host; 6342 struct hstate *h = hstate_inode(inode); 6343 int err; 6344 6345 idx <<= huge_page_order(h); 6346 __folio_set_locked(folio); 6347 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 6348 6349 if (unlikely(err)) { 6350 __folio_clear_locked(folio); 6351 return err; 6352 } 6353 folio_clear_hugetlb_restore_reserve(folio); 6354 6355 /* 6356 * mark folio dirty so that it will not be removed from cache/file 6357 * by non-hugetlbfs specific code paths. 6358 */ 6359 folio_mark_dirty(folio); 6360 6361 spin_lock(&inode->i_lock); 6362 inode->i_blocks += blocks_per_huge_page(h); 6363 spin_unlock(&inode->i_lock); 6364 return 0; 6365 } 6366 6367 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf, 6368 struct address_space *mapping, 6369 unsigned long reason) 6370 { 6371 u32 hash; 6372 6373 /* 6374 * vma_lock and hugetlb_fault_mutex must be dropped before handling 6375 * userfault. Also mmap_lock could be dropped due to handling 6376 * userfault, any vma operation should be careful from here. 6377 */ 6378 hugetlb_vma_unlock_read(vmf->vma); 6379 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6380 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6381 return handle_userfault(vmf, reason); 6382 } 6383 6384 /* 6385 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 6386 * false if pte changed or is changing. 6387 */ 6388 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, 6389 pte_t *ptep, pte_t old_pte) 6390 { 6391 spinlock_t *ptl; 6392 bool same; 6393 6394 ptl = huge_pte_lock(h, mm, ptep); 6395 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte); 6396 spin_unlock(ptl); 6397 6398 return same; 6399 } 6400 6401 static vm_fault_t hugetlb_no_page(struct address_space *mapping, 6402 struct vm_fault *vmf) 6403 { 6404 struct vm_area_struct *vma = vmf->vma; 6405 struct mm_struct *mm = vma->vm_mm; 6406 struct hstate *h = hstate_vma(vma); 6407 vm_fault_t ret = VM_FAULT_SIGBUS; 6408 int anon_rmap = 0; 6409 unsigned long size; 6410 struct folio *folio; 6411 pte_t new_pte; 6412 bool new_folio, new_pagecache_folio = false; 6413 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); 6414 6415 /* 6416 * Currently, we are forced to kill the process in the event the 6417 * original mapper has unmapped pages from the child due to a failed 6418 * COW/unsharing. Warn that such a situation has occurred as it may not 6419 * be obvious. 6420 */ 6421 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 6422 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 6423 current->pid); 6424 goto out; 6425 } 6426 6427 /* 6428 * Use page lock to guard against racing truncation 6429 * before we get page_table_lock. 6430 */ 6431 new_folio = false; 6432 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); 6433 if (IS_ERR(folio)) { 6434 size = i_size_read(mapping->host) >> huge_page_shift(h); 6435 if (vmf->pgoff >= size) 6436 goto out; 6437 /* Check for page in userfault range */ 6438 if (userfaultfd_missing(vma)) { 6439 /* 6440 * Since hugetlb_no_page() was examining pte 6441 * without pgtable lock, we need to re-test under 6442 * lock because the pte may not be stable and could 6443 * have changed from under us. Try to detect 6444 * either changed or during-changing ptes and retry 6445 * properly when needed. 6446 * 6447 * Note that userfaultfd is actually fine with 6448 * false positives (e.g. caused by pte changed), 6449 * but not wrong logical events (e.g. caused by 6450 * reading a pte during changing). The latter can 6451 * confuse the userspace, so the strictness is very 6452 * much preferred. E.g., MISSING event should 6453 * never happen on the page after UFFDIO_COPY has 6454 * correctly installed the page and returned. 6455 */ 6456 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6457 ret = 0; 6458 goto out; 6459 } 6460 6461 return hugetlb_handle_userfault(vmf, mapping, 6462 VM_UFFD_MISSING); 6463 } 6464 6465 if (!(vma->vm_flags & VM_MAYSHARE)) { 6466 ret = __vmf_anon_prepare(vmf); 6467 if (unlikely(ret)) 6468 goto out; 6469 } 6470 6471 folio = alloc_hugetlb_folio(vma, vmf->address, false); 6472 if (IS_ERR(folio)) { 6473 /* 6474 * Returning error will result in faulting task being 6475 * sent SIGBUS. The hugetlb fault mutex prevents two 6476 * tasks from racing to fault in the same page which 6477 * could result in false unable to allocate errors. 6478 * Page migration does not take the fault mutex, but 6479 * does a clear then write of pte's under page table 6480 * lock. Page fault code could race with migration, 6481 * notice the clear pte and try to allocate a page 6482 * here. Before returning error, get ptl and make 6483 * sure there really is no pte entry. 6484 */ 6485 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) 6486 ret = vmf_error(PTR_ERR(folio)); 6487 else 6488 ret = 0; 6489 goto out; 6490 } 6491 folio_zero_user(folio, vmf->real_address); 6492 __folio_mark_uptodate(folio); 6493 new_folio = true; 6494 6495 if (vma->vm_flags & VM_MAYSHARE) { 6496 int err = hugetlb_add_to_page_cache(folio, mapping, 6497 vmf->pgoff); 6498 if (err) { 6499 /* 6500 * err can't be -EEXIST which implies someone 6501 * else consumed the reservation since hugetlb 6502 * fault mutex is held when add a hugetlb page 6503 * to the page cache. So it's safe to call 6504 * restore_reserve_on_error() here. 6505 */ 6506 restore_reserve_on_error(h, vma, vmf->address, 6507 folio); 6508 folio_put(folio); 6509 ret = VM_FAULT_SIGBUS; 6510 goto out; 6511 } 6512 new_pagecache_folio = true; 6513 } else { 6514 folio_lock(folio); 6515 anon_rmap = 1; 6516 } 6517 } else { 6518 /* 6519 * If memory error occurs between mmap() and fault, some process 6520 * don't have hwpoisoned swap entry for errored virtual address. 6521 * So we need to block hugepage fault by PG_hwpoison bit check. 6522 */ 6523 if (unlikely(folio_test_hwpoison(folio))) { 6524 ret = VM_FAULT_HWPOISON_LARGE | 6525 VM_FAULT_SET_HINDEX(hstate_index(h)); 6526 goto backout_unlocked; 6527 } 6528 6529 /* Check for page in userfault range. */ 6530 if (userfaultfd_minor(vma)) { 6531 folio_unlock(folio); 6532 folio_put(folio); 6533 /* See comment in userfaultfd_missing() block above */ 6534 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { 6535 ret = 0; 6536 goto out; 6537 } 6538 return hugetlb_handle_userfault(vmf, mapping, 6539 VM_UFFD_MINOR); 6540 } 6541 } 6542 6543 /* 6544 * If we are going to COW a private mapping later, we examine the 6545 * pending reservations for this page now. This will ensure that 6546 * any allocations necessary to record that reservation occur outside 6547 * the spinlock. 6548 */ 6549 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6550 if (vma_needs_reservation(h, vma, vmf->address) < 0) { 6551 ret = VM_FAULT_OOM; 6552 goto backout_unlocked; 6553 } 6554 /* Just decrements count, does not deallocate */ 6555 vma_end_reservation(h, vma, vmf->address); 6556 } 6557 6558 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); 6559 ret = 0; 6560 /* If pte changed from under us, retry */ 6561 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) 6562 goto backout; 6563 6564 if (anon_rmap) 6565 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); 6566 else 6567 hugetlb_add_file_rmap(folio); 6568 new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED); 6569 /* 6570 * If this pte was previously wr-protected, keep it wr-protected even 6571 * if populated. 6572 */ 6573 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) 6574 new_pte = huge_pte_mkuffd_wp(new_pte); 6575 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); 6576 6577 hugetlb_count_add(pages_per_huge_page(h), mm); 6578 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6579 /* Optimization, do the COW without a second fault */ 6580 ret = hugetlb_wp(folio, vmf); 6581 } 6582 6583 spin_unlock(vmf->ptl); 6584 6585 /* 6586 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6587 * found in the pagecache may not have hugetlb_migratable if they have 6588 * been isolated for migration. 6589 */ 6590 if (new_folio) 6591 folio_set_hugetlb_migratable(folio); 6592 6593 folio_unlock(folio); 6594 out: 6595 hugetlb_vma_unlock_read(vma); 6596 6597 /* 6598 * We must check to release the per-VMA lock. __vmf_anon_prepare() is 6599 * the only way ret can be set to VM_FAULT_RETRY. 6600 */ 6601 if (unlikely(ret & VM_FAULT_RETRY)) 6602 vma_end_read(vma); 6603 6604 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6605 return ret; 6606 6607 backout: 6608 spin_unlock(vmf->ptl); 6609 backout_unlocked: 6610 if (new_folio && !new_pagecache_folio) 6611 restore_reserve_on_error(h, vma, vmf->address, folio); 6612 6613 folio_unlock(folio); 6614 folio_put(folio); 6615 goto out; 6616 } 6617 6618 #ifdef CONFIG_SMP 6619 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6620 { 6621 unsigned long key[2]; 6622 u32 hash; 6623 6624 key[0] = (unsigned long) mapping; 6625 key[1] = idx; 6626 6627 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6628 6629 return hash & (num_fault_mutexes - 1); 6630 } 6631 #else 6632 /* 6633 * For uniprocessor systems we always use a single mutex, so just 6634 * return 0 and avoid the hashing overhead. 6635 */ 6636 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6637 { 6638 return 0; 6639 } 6640 #endif 6641 6642 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6643 unsigned long address, unsigned int flags) 6644 { 6645 vm_fault_t ret; 6646 u32 hash; 6647 struct folio *folio = NULL; 6648 struct folio *pagecache_folio = NULL; 6649 struct hstate *h = hstate_vma(vma); 6650 struct address_space *mapping; 6651 int need_wait_lock = 0; 6652 struct vm_fault vmf = { 6653 .vma = vma, 6654 .address = address & huge_page_mask(h), 6655 .real_address = address, 6656 .flags = flags, 6657 .pgoff = vma_hugecache_offset(h, vma, 6658 address & huge_page_mask(h)), 6659 /* TODO: Track hugetlb faults using vm_fault */ 6660 6661 /* 6662 * Some fields may not be initialized, be careful as it may 6663 * be hard to debug if called functions make assumptions 6664 */ 6665 }; 6666 6667 /* 6668 * Serialize hugepage allocation and instantiation, so that we don't 6669 * get spurious allocation failures if two CPUs race to instantiate 6670 * the same page in the page cache. 6671 */ 6672 mapping = vma->vm_file->f_mapping; 6673 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff); 6674 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6675 6676 /* 6677 * Acquire vma lock before calling huge_pte_alloc and hold 6678 * until finished with vmf.pte. This prevents huge_pmd_unshare from 6679 * being called elsewhere and making the vmf.pte no longer valid. 6680 */ 6681 hugetlb_vma_lock_read(vma); 6682 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); 6683 if (!vmf.pte) { 6684 hugetlb_vma_unlock_read(vma); 6685 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6686 return VM_FAULT_OOM; 6687 } 6688 6689 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); 6690 if (huge_pte_none_mostly(vmf.orig_pte)) { 6691 if (is_pte_marker(vmf.orig_pte)) { 6692 pte_marker marker = 6693 pte_marker_get(pte_to_swp_entry(vmf.orig_pte)); 6694 6695 if (marker & PTE_MARKER_POISONED) { 6696 ret = VM_FAULT_HWPOISON_LARGE | 6697 VM_FAULT_SET_HINDEX(hstate_index(h)); 6698 goto out_mutex; 6699 } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) { 6700 /* This isn't supported in hugetlb. */ 6701 ret = VM_FAULT_SIGSEGV; 6702 goto out_mutex; 6703 } 6704 } 6705 6706 /* 6707 * Other PTE markers should be handled the same way as none PTE. 6708 * 6709 * hugetlb_no_page will drop vma lock and hugetlb fault 6710 * mutex internally, which make us return immediately. 6711 */ 6712 return hugetlb_no_page(mapping, &vmf); 6713 } 6714 6715 ret = 0; 6716 6717 /* 6718 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this 6719 * point, so this check prevents the kernel from going below assuming 6720 * that we have an active hugepage in pagecache. This goto expects 6721 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned) 6722 * check will properly handle it. 6723 */ 6724 if (!pte_present(vmf.orig_pte)) { 6725 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) { 6726 /* 6727 * Release the hugetlb fault lock now, but retain 6728 * the vma lock, because it is needed to guard the 6729 * huge_pte_lockptr() later in 6730 * migration_entry_wait_huge(). The vma lock will 6731 * be released there. 6732 */ 6733 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6734 migration_entry_wait_huge(vma, vmf.address, vmf.pte); 6735 return 0; 6736 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte))) 6737 ret = VM_FAULT_HWPOISON_LARGE | 6738 VM_FAULT_SET_HINDEX(hstate_index(h)); 6739 goto out_mutex; 6740 } 6741 6742 /* 6743 * If we are going to COW/unshare the mapping later, we examine the 6744 * pending reservations for this page now. This will ensure that any 6745 * allocations necessary to record that reservation occur outside the 6746 * spinlock. Also lookup the pagecache page now as it is used to 6747 * determine if a reservation has been consumed. 6748 */ 6749 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6750 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { 6751 if (vma_needs_reservation(h, vma, vmf.address) < 0) { 6752 ret = VM_FAULT_OOM; 6753 goto out_mutex; 6754 } 6755 /* Just decrements count, does not deallocate */ 6756 vma_end_reservation(h, vma, vmf.address); 6757 6758 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, 6759 vmf.pgoff); 6760 if (IS_ERR(pagecache_folio)) 6761 pagecache_folio = NULL; 6762 } 6763 6764 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); 6765 6766 /* Check for a racing update before calling hugetlb_wp() */ 6767 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) 6768 goto out_ptl; 6769 6770 /* Handle userfault-wp first, before trying to lock more pages */ 6771 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) && 6772 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) { 6773 if (!userfaultfd_wp_async(vma)) { 6774 spin_unlock(vmf.ptl); 6775 if (pagecache_folio) { 6776 folio_unlock(pagecache_folio); 6777 folio_put(pagecache_folio); 6778 } 6779 hugetlb_vma_unlock_read(vma); 6780 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6781 return handle_userfault(&vmf, VM_UFFD_WP); 6782 } 6783 6784 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte); 6785 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, 6786 huge_page_size(hstate_vma(vma))); 6787 /* Fallthrough to CoW */ 6788 } 6789 6790 /* 6791 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and 6792 * pagecache_folio, so here we need take the former one 6793 * when folio != pagecache_folio or !pagecache_folio. 6794 */ 6795 folio = page_folio(pte_page(vmf.orig_pte)); 6796 if (folio != pagecache_folio) 6797 if (!folio_trylock(folio)) { 6798 need_wait_lock = 1; 6799 goto out_ptl; 6800 } 6801 6802 folio_get(folio); 6803 6804 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6805 if (!huge_pte_write(vmf.orig_pte)) { 6806 ret = hugetlb_wp(pagecache_folio, &vmf); 6807 goto out_put_page; 6808 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6809 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); 6810 } 6811 } 6812 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); 6813 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte, 6814 flags & FAULT_FLAG_WRITE)) 6815 update_mmu_cache(vma, vmf.address, vmf.pte); 6816 out_put_page: 6817 if (folio != pagecache_folio) 6818 folio_unlock(folio); 6819 folio_put(folio); 6820 out_ptl: 6821 spin_unlock(vmf.ptl); 6822 6823 if (pagecache_folio) { 6824 folio_unlock(pagecache_folio); 6825 folio_put(pagecache_folio); 6826 } 6827 out_mutex: 6828 hugetlb_vma_unlock_read(vma); 6829 6830 /* 6831 * We must check to release the per-VMA lock. __vmf_anon_prepare() in 6832 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. 6833 */ 6834 if (unlikely(ret & VM_FAULT_RETRY)) 6835 vma_end_read(vma); 6836 6837 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6838 /* 6839 * Generally it's safe to hold refcount during waiting page lock. But 6840 * here we just wait to defer the next page fault to avoid busy loop and 6841 * the page is not used after unlocked before returning from the current 6842 * page fault. So we are safe from accessing freed page, even if we wait 6843 * here without taking refcount. 6844 */ 6845 if (need_wait_lock) 6846 folio_wait_locked(folio); 6847 return ret; 6848 } 6849 6850 #ifdef CONFIG_USERFAULTFD 6851 /* 6852 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte(). 6853 */ 6854 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 6855 struct vm_area_struct *vma, unsigned long address) 6856 { 6857 struct mempolicy *mpol; 6858 nodemask_t *nodemask; 6859 struct folio *folio; 6860 gfp_t gfp_mask; 6861 int node; 6862 6863 gfp_mask = htlb_alloc_mask(h); 6864 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 6865 /* 6866 * This is used to allocate a temporary hugetlb to hold the copied 6867 * content, which will then be copied again to the final hugetlb 6868 * consuming a reservation. Set the alloc_fallback to false to indicate 6869 * that breaking the per-node hugetlb pool is not allowed in this case. 6870 */ 6871 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); 6872 mpol_cond_put(mpol); 6873 6874 return folio; 6875 } 6876 6877 /* 6878 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6879 * with modifications for hugetlb pages. 6880 */ 6881 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6882 struct vm_area_struct *dst_vma, 6883 unsigned long dst_addr, 6884 unsigned long src_addr, 6885 uffd_flags_t flags, 6886 struct folio **foliop) 6887 { 6888 struct mm_struct *dst_mm = dst_vma->vm_mm; 6889 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6890 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6891 struct hstate *h = hstate_vma(dst_vma); 6892 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6893 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6894 unsigned long size = huge_page_size(h); 6895 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6896 pte_t _dst_pte; 6897 spinlock_t *ptl; 6898 int ret = -ENOMEM; 6899 struct folio *folio; 6900 bool folio_in_pagecache = false; 6901 6902 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6903 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6904 6905 /* Don't overwrite any existing PTEs (even markers) */ 6906 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 6907 spin_unlock(ptl); 6908 return -EEXIST; 6909 } 6910 6911 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6912 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 6913 6914 /* No need to invalidate - it was non-present before */ 6915 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6916 6917 spin_unlock(ptl); 6918 return 0; 6919 } 6920 6921 if (is_continue) { 6922 ret = -EFAULT; 6923 folio = filemap_lock_hugetlb_folio(h, mapping, idx); 6924 if (IS_ERR(folio)) 6925 goto out; 6926 folio_in_pagecache = true; 6927 } else if (!*foliop) { 6928 /* If a folio already exists, then it's UFFDIO_COPY for 6929 * a non-missing case. Return -EEXIST. 6930 */ 6931 if (vm_shared && 6932 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6933 ret = -EEXIST; 6934 goto out; 6935 } 6936 6937 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6938 if (IS_ERR(folio)) { 6939 ret = -ENOMEM; 6940 goto out; 6941 } 6942 6943 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6944 false); 6945 6946 /* fallback to copy_from_user outside mmap_lock */ 6947 if (unlikely(ret)) { 6948 ret = -ENOENT; 6949 /* Free the allocated folio which may have 6950 * consumed a reservation. 6951 */ 6952 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6953 folio_put(folio); 6954 6955 /* Allocate a temporary folio to hold the copied 6956 * contents. 6957 */ 6958 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6959 if (!folio) { 6960 ret = -ENOMEM; 6961 goto out; 6962 } 6963 *foliop = folio; 6964 /* Set the outparam foliop and return to the caller to 6965 * copy the contents outside the lock. Don't free the 6966 * folio. 6967 */ 6968 goto out; 6969 } 6970 } else { 6971 if (vm_shared && 6972 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6973 folio_put(*foliop); 6974 ret = -EEXIST; 6975 *foliop = NULL; 6976 goto out; 6977 } 6978 6979 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); 6980 if (IS_ERR(folio)) { 6981 folio_put(*foliop); 6982 ret = -ENOMEM; 6983 *foliop = NULL; 6984 goto out; 6985 } 6986 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6987 folio_put(*foliop); 6988 *foliop = NULL; 6989 if (ret) { 6990 folio_put(folio); 6991 goto out; 6992 } 6993 } 6994 6995 /* 6996 * If we just allocated a new page, we need a memory barrier to ensure 6997 * that preceding stores to the page become visible before the 6998 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate 6999 * is what we need. 7000 * 7001 * In the case where we have not allocated a new page (is_continue), 7002 * the page must already be uptodate. UFFDIO_CONTINUE already includes 7003 * an earlier smp_wmb() to ensure that prior stores will be visible 7004 * before the set_pte_at() write. 7005 */ 7006 if (!is_continue) 7007 __folio_mark_uptodate(folio); 7008 else 7009 WARN_ON_ONCE(!folio_test_uptodate(folio)); 7010 7011 /* Add shared, newly allocated pages to the page cache. */ 7012 if (vm_shared && !is_continue) { 7013 ret = -EFAULT; 7014 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) 7015 goto out_release_nounlock; 7016 7017 /* 7018 * Serialization between remove_inode_hugepages() and 7019 * hugetlb_add_to_page_cache() below happens through the 7020 * hugetlb_fault_mutex_table that here must be hold by 7021 * the caller. 7022 */ 7023 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 7024 if (ret) 7025 goto out_release_nounlock; 7026 folio_in_pagecache = true; 7027 } 7028 7029 ptl = huge_pte_lock(h, dst_mm, dst_pte); 7030 7031 ret = -EIO; 7032 if (folio_test_hwpoison(folio)) 7033 goto out_release_unlock; 7034 7035 /* 7036 * We allow to overwrite a pte marker: consider when both MISSING|WP 7037 * registered, we firstly wr-protect a none pte which has no page cache 7038 * page backing it, then access the page. 7039 */ 7040 ret = -EEXIST; 7041 if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) 7042 goto out_release_unlock; 7043 7044 if (folio_in_pagecache) 7045 hugetlb_add_file_rmap(folio); 7046 else 7047 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); 7048 7049 /* 7050 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 7051 * with wp flag set, don't set pte write bit. 7052 */ 7053 _dst_pte = make_huge_pte(dst_vma, folio, 7054 !wp_enabled && !(is_continue && !vm_shared)); 7055 /* 7056 * Always mark UFFDIO_COPY page dirty; note that this may not be 7057 * extremely important for hugetlbfs for now since swapping is not 7058 * supported, but we should still be clear in that this page cannot be 7059 * thrown away at will, even if write bit not set. 7060 */ 7061 _dst_pte = huge_pte_mkdirty(_dst_pte); 7062 _dst_pte = pte_mkyoung(_dst_pte); 7063 7064 if (wp_enabled) 7065 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 7066 7067 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size); 7068 7069 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 7070 7071 /* No need to invalidate - it was non-present before */ 7072 update_mmu_cache(dst_vma, dst_addr, dst_pte); 7073 7074 spin_unlock(ptl); 7075 if (!is_continue) 7076 folio_set_hugetlb_migratable(folio); 7077 if (vm_shared || is_continue) 7078 folio_unlock(folio); 7079 ret = 0; 7080 out: 7081 return ret; 7082 out_release_unlock: 7083 spin_unlock(ptl); 7084 if (vm_shared || is_continue) 7085 folio_unlock(folio); 7086 out_release_nounlock: 7087 if (!folio_in_pagecache) 7088 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 7089 folio_put(folio); 7090 goto out; 7091 } 7092 #endif /* CONFIG_USERFAULTFD */ 7093 7094 long hugetlb_change_protection(struct vm_area_struct *vma, 7095 unsigned long address, unsigned long end, 7096 pgprot_t newprot, unsigned long cp_flags) 7097 { 7098 struct mm_struct *mm = vma->vm_mm; 7099 unsigned long start = address; 7100 pte_t *ptep; 7101 pte_t pte; 7102 struct hstate *h = hstate_vma(vma); 7103 long pages = 0, psize = huge_page_size(h); 7104 bool shared_pmd = false; 7105 struct mmu_notifier_range range; 7106 unsigned long last_addr_mask; 7107 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 7108 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 7109 7110 /* 7111 * In the case of shared PMDs, the area to flush could be beyond 7112 * start/end. Set range.start/range.end to cover the maximum possible 7113 * range if PMD sharing is possible. 7114 */ 7115 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 7116 0, mm, start, end); 7117 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 7118 7119 BUG_ON(address >= end); 7120 flush_cache_range(vma, range.start, range.end); 7121 7122 mmu_notifier_invalidate_range_start(&range); 7123 hugetlb_vma_lock_write(vma); 7124 i_mmap_lock_write(vma->vm_file->f_mapping); 7125 last_addr_mask = hugetlb_mask_last_page(h); 7126 for (; address < end; address += psize) { 7127 spinlock_t *ptl; 7128 ptep = hugetlb_walk(vma, address, psize); 7129 if (!ptep) { 7130 if (!uffd_wp) { 7131 address |= last_addr_mask; 7132 continue; 7133 } 7134 /* 7135 * Userfaultfd wr-protect requires pgtable 7136 * pre-allocations to install pte markers. 7137 */ 7138 ptep = huge_pte_alloc(mm, vma, address, psize); 7139 if (!ptep) { 7140 pages = -ENOMEM; 7141 break; 7142 } 7143 } 7144 ptl = huge_pte_lock(h, mm, ptep); 7145 if (huge_pmd_unshare(mm, vma, address, ptep)) { 7146 /* 7147 * When uffd-wp is enabled on the vma, unshare 7148 * shouldn't happen at all. Warn about it if it 7149 * happened due to some reason. 7150 */ 7151 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 7152 pages++; 7153 spin_unlock(ptl); 7154 shared_pmd = true; 7155 address |= last_addr_mask; 7156 continue; 7157 } 7158 pte = huge_ptep_get(mm, address, ptep); 7159 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 7160 /* Nothing to do. */ 7161 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 7162 swp_entry_t entry = pte_to_swp_entry(pte); 7163 struct page *page = pfn_swap_entry_to_page(entry); 7164 pte_t newpte = pte; 7165 7166 if (is_writable_migration_entry(entry)) { 7167 if (PageAnon(page)) 7168 entry = make_readable_exclusive_migration_entry( 7169 swp_offset(entry)); 7170 else 7171 entry = make_readable_migration_entry( 7172 swp_offset(entry)); 7173 newpte = swp_entry_to_pte(entry); 7174 pages++; 7175 } 7176 7177 if (uffd_wp) 7178 newpte = pte_swp_mkuffd_wp(newpte); 7179 else if (uffd_wp_resolve) 7180 newpte = pte_swp_clear_uffd_wp(newpte); 7181 if (!pte_same(pte, newpte)) 7182 set_huge_pte_at(mm, address, ptep, newpte, psize); 7183 } else if (unlikely(is_pte_marker(pte))) { 7184 /* 7185 * Do nothing on a poison marker; page is 7186 * corrupted, permissons do not apply. Here 7187 * pte_marker_uffd_wp()==true implies !poison 7188 * because they're mutual exclusive. 7189 */ 7190 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve) 7191 /* Safe to modify directly (non-present->none). */ 7192 huge_pte_clear(mm, address, ptep, psize); 7193 } else if (!huge_pte_none(pte)) { 7194 pte_t old_pte; 7195 unsigned int shift = huge_page_shift(hstate_vma(vma)); 7196 7197 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 7198 pte = huge_pte_modify(old_pte, newprot); 7199 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 7200 if (uffd_wp) 7201 pte = huge_pte_mkuffd_wp(pte); 7202 else if (uffd_wp_resolve) 7203 pte = huge_pte_clear_uffd_wp(pte); 7204 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 7205 pages++; 7206 } else { 7207 /* None pte */ 7208 if (unlikely(uffd_wp)) 7209 /* Safe to modify directly (none->non-present). */ 7210 set_huge_pte_at(mm, address, ptep, 7211 make_pte_marker(PTE_MARKER_UFFD_WP), 7212 psize); 7213 } 7214 spin_unlock(ptl); 7215 } 7216 /* 7217 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 7218 * may have cleared our pud entry and done put_page on the page table: 7219 * once we release i_mmap_rwsem, another task can do the final put_page 7220 * and that page table be reused and filled with junk. If we actually 7221 * did unshare a page of pmds, flush the range corresponding to the pud. 7222 */ 7223 if (shared_pmd) 7224 flush_hugetlb_tlb_range(vma, range.start, range.end); 7225 else 7226 flush_hugetlb_tlb_range(vma, start, end); 7227 /* 7228 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 7229 * downgrading page table protection not changing it to point to a new 7230 * page. 7231 * 7232 * See Documentation/mm/mmu_notifier.rst 7233 */ 7234 i_mmap_unlock_write(vma->vm_file->f_mapping); 7235 hugetlb_vma_unlock_write(vma); 7236 mmu_notifier_invalidate_range_end(&range); 7237 7238 return pages > 0 ? (pages << h->order) : pages; 7239 } 7240 7241 /* Return true if reservation was successful, false otherwise. */ 7242 bool hugetlb_reserve_pages(struct inode *inode, 7243 long from, long to, 7244 struct vm_area_struct *vma, 7245 vm_flags_t vm_flags) 7246 { 7247 long chg = -1, add = -1, spool_resv, gbl_resv; 7248 struct hstate *h = hstate_inode(inode); 7249 struct hugepage_subpool *spool = subpool_inode(inode); 7250 struct resv_map *resv_map; 7251 struct hugetlb_cgroup *h_cg = NULL; 7252 long gbl_reserve, regions_needed = 0; 7253 7254 /* This should never happen */ 7255 if (from > to) { 7256 VM_WARN(1, "%s called with a negative range\n", __func__); 7257 return false; 7258 } 7259 7260 /* 7261 * vma specific semaphore used for pmd sharing and fault/truncation 7262 * synchronization 7263 */ 7264 hugetlb_vma_lock_alloc(vma); 7265 7266 /* 7267 * Only apply hugepage reservation if asked. At fault time, an 7268 * attempt will be made for VM_NORESERVE to allocate a page 7269 * without using reserves 7270 */ 7271 if (vm_flags & VM_NORESERVE) 7272 return true; 7273 7274 /* 7275 * Shared mappings base their reservation on the number of pages that 7276 * are already allocated on behalf of the file. Private mappings need 7277 * to reserve the full area even if read-only as mprotect() may be 7278 * called to make the mapping read-write. Assume !vma is a shm mapping 7279 */ 7280 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7281 /* 7282 * resv_map can not be NULL as hugetlb_reserve_pages is only 7283 * called for inodes for which resv_maps were created (see 7284 * hugetlbfs_get_inode). 7285 */ 7286 resv_map = inode_resv_map(inode); 7287 7288 chg = region_chg(resv_map, from, to, ®ions_needed); 7289 } else { 7290 /* Private mapping. */ 7291 resv_map = resv_map_alloc(); 7292 if (!resv_map) 7293 goto out_err; 7294 7295 chg = to - from; 7296 7297 set_vma_resv_map(vma, resv_map); 7298 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 7299 } 7300 7301 if (chg < 0) 7302 goto out_err; 7303 7304 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 7305 chg * pages_per_huge_page(h), &h_cg) < 0) 7306 goto out_err; 7307 7308 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 7309 /* For private mappings, the hugetlb_cgroup uncharge info hangs 7310 * of the resv_map. 7311 */ 7312 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 7313 } 7314 7315 /* 7316 * There must be enough pages in the subpool for the mapping. If 7317 * the subpool has a minimum size, there may be some global 7318 * reservations already in place (gbl_reserve). 7319 */ 7320 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 7321 if (gbl_reserve < 0) 7322 goto out_uncharge_cgroup; 7323 7324 /* 7325 * Check enough hugepages are available for the reservation. 7326 * Hand the pages back to the subpool if there are not 7327 */ 7328 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 7329 goto out_put_pages; 7330 7331 /* 7332 * Account for the reservations made. Shared mappings record regions 7333 * that have reservations as they are shared by multiple VMAs. 7334 * When the last VMA disappears, the region map says how much 7335 * the reservation was and the page cache tells how much of 7336 * the reservation was consumed. Private mappings are per-VMA and 7337 * only the consumed reservations are tracked. When the VMA 7338 * disappears, the original reservation is the VMA size and the 7339 * consumed reservations are stored in the map. Hence, nothing 7340 * else has to be done for private mappings here 7341 */ 7342 if (!vma || vma->vm_flags & VM_MAYSHARE) { 7343 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 7344 7345 if (unlikely(add < 0)) { 7346 hugetlb_acct_memory(h, -gbl_reserve); 7347 goto out_put_pages; 7348 } else if (unlikely(chg > add)) { 7349 /* 7350 * pages in this range were added to the reserve 7351 * map between region_chg and region_add. This 7352 * indicates a race with alloc_hugetlb_folio. Adjust 7353 * the subpool and reserve counts modified above 7354 * based on the difference. 7355 */ 7356 long rsv_adjust; 7357 7358 /* 7359 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 7360 * reference to h_cg->css. See comment below for detail. 7361 */ 7362 hugetlb_cgroup_uncharge_cgroup_rsvd( 7363 hstate_index(h), 7364 (chg - add) * pages_per_huge_page(h), h_cg); 7365 7366 rsv_adjust = hugepage_subpool_put_pages(spool, 7367 chg - add); 7368 hugetlb_acct_memory(h, -rsv_adjust); 7369 } else if (h_cg) { 7370 /* 7371 * The file_regions will hold their own reference to 7372 * h_cg->css. So we should release the reference held 7373 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 7374 * done. 7375 */ 7376 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 7377 } 7378 } 7379 return true; 7380 7381 out_put_pages: 7382 spool_resv = chg - gbl_reserve; 7383 if (spool_resv) { 7384 /* put sub pool's reservation back, chg - gbl_reserve */ 7385 gbl_resv = hugepage_subpool_put_pages(spool, spool_resv); 7386 /* 7387 * subpool's reserved pages can not be put back due to race, 7388 * return to hstate. 7389 */ 7390 hugetlb_acct_memory(h, -gbl_resv); 7391 } 7392 out_uncharge_cgroup: 7393 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 7394 chg * pages_per_huge_page(h), h_cg); 7395 out_err: 7396 hugetlb_vma_lock_free(vma); 7397 if (!vma || vma->vm_flags & VM_MAYSHARE) 7398 /* Only call region_abort if the region_chg succeeded but the 7399 * region_add failed or didn't run. 7400 */ 7401 if (chg >= 0 && add < 0) 7402 region_abort(resv_map, from, to, regions_needed); 7403 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 7404 kref_put(&resv_map->refs, resv_map_release); 7405 set_vma_resv_map(vma, NULL); 7406 } 7407 return false; 7408 } 7409 7410 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 7411 long freed) 7412 { 7413 struct hstate *h = hstate_inode(inode); 7414 struct resv_map *resv_map = inode_resv_map(inode); 7415 long chg = 0; 7416 struct hugepage_subpool *spool = subpool_inode(inode); 7417 long gbl_reserve; 7418 7419 /* 7420 * Since this routine can be called in the evict inode path for all 7421 * hugetlbfs inodes, resv_map could be NULL. 7422 */ 7423 if (resv_map) { 7424 chg = region_del(resv_map, start, end); 7425 /* 7426 * region_del() can fail in the rare case where a region 7427 * must be split and another region descriptor can not be 7428 * allocated. If end == LONG_MAX, it will not fail. 7429 */ 7430 if (chg < 0) 7431 return chg; 7432 } 7433 7434 spin_lock(&inode->i_lock); 7435 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 7436 spin_unlock(&inode->i_lock); 7437 7438 /* 7439 * If the subpool has a minimum size, the number of global 7440 * reservations to be released may be adjusted. 7441 * 7442 * Note that !resv_map implies freed == 0. So (chg - freed) 7443 * won't go negative. 7444 */ 7445 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 7446 hugetlb_acct_memory(h, -gbl_reserve); 7447 7448 return 0; 7449 } 7450 7451 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7452 static unsigned long page_table_shareable(struct vm_area_struct *svma, 7453 struct vm_area_struct *vma, 7454 unsigned long addr, pgoff_t idx) 7455 { 7456 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 7457 svma->vm_start; 7458 unsigned long sbase = saddr & PUD_MASK; 7459 unsigned long s_end = sbase + PUD_SIZE; 7460 7461 /* Allow segments to share if only one is marked locked */ 7462 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7463 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7464 7465 /* 7466 * match the virtual addresses, permission and the alignment of the 7467 * page table page. 7468 * 7469 * Also, vma_lock (vm_private_data) is required for sharing. 7470 */ 7471 if (pmd_index(addr) != pmd_index(saddr) || 7472 vm_flags != svm_flags || 7473 !range_in_vma(svma, sbase, s_end) || 7474 !svma->vm_private_data) 7475 return 0; 7476 7477 return saddr; 7478 } 7479 7480 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7481 { 7482 unsigned long start = addr & PUD_MASK; 7483 unsigned long end = start + PUD_SIZE; 7484 7485 #ifdef CONFIG_USERFAULTFD 7486 if (uffd_disable_huge_pmd_share(vma)) 7487 return false; 7488 #endif 7489 /* 7490 * check on proper vm_flags and page table alignment 7491 */ 7492 if (!(vma->vm_flags & VM_MAYSHARE)) 7493 return false; 7494 if (!vma->vm_private_data) /* vma lock required for sharing */ 7495 return false; 7496 if (!range_in_vma(vma, start, end)) 7497 return false; 7498 return true; 7499 } 7500 7501 /* 7502 * Determine if start,end range within vma could be mapped by shared pmd. 7503 * If yes, adjust start and end to cover range associated with possible 7504 * shared pmd mappings. 7505 */ 7506 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7507 unsigned long *start, unsigned long *end) 7508 { 7509 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 7510 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7511 7512 /* 7513 * vma needs to span at least one aligned PUD size, and the range 7514 * must be at least partially within in. 7515 */ 7516 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 7517 (*end <= v_start) || (*start >= v_end)) 7518 return; 7519 7520 /* Extend the range to be PUD aligned for a worst case scenario */ 7521 if (*start > v_start) 7522 *start = ALIGN_DOWN(*start, PUD_SIZE); 7523 7524 if (*end < v_end) 7525 *end = ALIGN(*end, PUD_SIZE); 7526 } 7527 7528 /* 7529 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7530 * and returns the corresponding pte. While this is not necessary for the 7531 * !shared pmd case because we can allocate the pmd later as well, it makes the 7532 * code much cleaner. pmd allocation is essential for the shared case because 7533 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 7534 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 7535 * bad pmd for sharing. 7536 */ 7537 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7538 unsigned long addr, pud_t *pud) 7539 { 7540 struct address_space *mapping = vma->vm_file->f_mapping; 7541 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 7542 vma->vm_pgoff; 7543 struct vm_area_struct *svma; 7544 unsigned long saddr; 7545 pte_t *spte = NULL; 7546 pte_t *pte; 7547 7548 i_mmap_lock_read(mapping); 7549 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 7550 if (svma == vma) 7551 continue; 7552 7553 saddr = page_table_shareable(svma, vma, addr, idx); 7554 if (saddr) { 7555 spte = hugetlb_walk(svma, saddr, 7556 vma_mmu_pagesize(svma)); 7557 if (spte) { 7558 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); 7559 break; 7560 } 7561 } 7562 } 7563 7564 if (!spte) 7565 goto out; 7566 7567 spin_lock(&mm->page_table_lock); 7568 if (pud_none(*pud)) { 7569 pud_populate(mm, pud, 7570 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7571 mm_inc_nr_pmds(mm); 7572 } else { 7573 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); 7574 } 7575 spin_unlock(&mm->page_table_lock); 7576 out: 7577 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7578 i_mmap_unlock_read(mapping); 7579 return pte; 7580 } 7581 7582 /* 7583 * unmap huge page backed by shared pte. 7584 * 7585 * Called with page table lock held. 7586 * 7587 * returns: 1 successfully unmapped a shared pte page 7588 * 0 the underlying pte page is not shared, or it is the last user 7589 */ 7590 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7591 unsigned long addr, pte_t *ptep) 7592 { 7593 unsigned long sz = huge_page_size(hstate_vma(vma)); 7594 pgd_t *pgd = pgd_offset(mm, addr); 7595 p4d_t *p4d = p4d_offset(pgd, addr); 7596 pud_t *pud = pud_offset(p4d, addr); 7597 7598 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7599 hugetlb_vma_assert_locked(vma); 7600 if (sz != PMD_SIZE) 7601 return 0; 7602 if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) 7603 return 0; 7604 7605 pud_clear(pud); 7606 ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); 7607 mm_dec_nr_pmds(mm); 7608 return 1; 7609 } 7610 7611 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7612 7613 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7614 unsigned long addr, pud_t *pud) 7615 { 7616 return NULL; 7617 } 7618 7619 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7620 unsigned long addr, pte_t *ptep) 7621 { 7622 return 0; 7623 } 7624 7625 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7626 unsigned long *start, unsigned long *end) 7627 { 7628 } 7629 7630 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7631 { 7632 return false; 7633 } 7634 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ 7635 7636 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7637 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7638 unsigned long addr, unsigned long sz) 7639 { 7640 pgd_t *pgd; 7641 p4d_t *p4d; 7642 pud_t *pud; 7643 pte_t *pte = NULL; 7644 7645 pgd = pgd_offset(mm, addr); 7646 p4d = p4d_alloc(mm, pgd, addr); 7647 if (!p4d) 7648 return NULL; 7649 pud = pud_alloc(mm, p4d, addr); 7650 if (pud) { 7651 if (sz == PUD_SIZE) { 7652 pte = (pte_t *)pud; 7653 } else { 7654 BUG_ON(sz != PMD_SIZE); 7655 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7656 pte = huge_pmd_share(mm, vma, addr, pud); 7657 else 7658 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7659 } 7660 } 7661 7662 if (pte) { 7663 pte_t pteval = ptep_get_lockless(pte); 7664 7665 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7666 } 7667 7668 return pte; 7669 } 7670 7671 /* 7672 * huge_pte_offset() - Walk the page table to resolve the hugepage 7673 * entry at address @addr 7674 * 7675 * Return: Pointer to page table entry (PUD or PMD) for 7676 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7677 * size @sz doesn't match the hugepage size at this level of the page 7678 * table. 7679 */ 7680 pte_t *huge_pte_offset(struct mm_struct *mm, 7681 unsigned long addr, unsigned long sz) 7682 { 7683 pgd_t *pgd; 7684 p4d_t *p4d; 7685 pud_t *pud; 7686 pmd_t *pmd; 7687 7688 pgd = pgd_offset(mm, addr); 7689 if (!pgd_present(*pgd)) 7690 return NULL; 7691 p4d = p4d_offset(pgd, addr); 7692 if (!p4d_present(*p4d)) 7693 return NULL; 7694 7695 pud = pud_offset(p4d, addr); 7696 if (sz == PUD_SIZE) 7697 /* must be pud huge, non-present or none */ 7698 return (pte_t *)pud; 7699 if (!pud_present(*pud)) 7700 return NULL; 7701 /* must have a valid entry and size to go further */ 7702 7703 pmd = pmd_offset(pud, addr); 7704 /* must be pmd huge, non-present or none */ 7705 return (pte_t *)pmd; 7706 } 7707 7708 /* 7709 * Return a mask that can be used to update an address to the last huge 7710 * page in a page table page mapping size. Used to skip non-present 7711 * page table entries when linearly scanning address ranges. Architectures 7712 * with unique huge page to page table relationships can define their own 7713 * version of this routine. 7714 */ 7715 unsigned long hugetlb_mask_last_page(struct hstate *h) 7716 { 7717 unsigned long hp_size = huge_page_size(h); 7718 7719 if (hp_size == PUD_SIZE) 7720 return P4D_SIZE - PUD_SIZE; 7721 else if (hp_size == PMD_SIZE) 7722 return PUD_SIZE - PMD_SIZE; 7723 else 7724 return 0UL; 7725 } 7726 7727 #else 7728 7729 /* See description above. Architectures can provide their own version. */ 7730 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7731 { 7732 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 7733 if (huge_page_size(h) == PMD_SIZE) 7734 return PUD_SIZE - PMD_SIZE; 7735 #endif 7736 return 0UL; 7737 } 7738 7739 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7740 7741 /** 7742 * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio 7743 * @folio: the folio to isolate 7744 * @list: the list to add the folio to on success 7745 * 7746 * Isolate an allocated (refcount > 0) hugetlb folio, marking it as 7747 * isolated/non-migratable, and moving it from the active list to the 7748 * given list. 7749 * 7750 * Isolation will fail if @folio is not an allocated hugetlb folio, or if 7751 * it is already isolated/non-migratable. 7752 * 7753 * On success, an additional folio reference is taken that must be dropped 7754 * using folio_putback_hugetlb() to undo the isolation. 7755 * 7756 * Return: True if isolation worked, otherwise False. 7757 */ 7758 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) 7759 { 7760 bool ret = true; 7761 7762 spin_lock_irq(&hugetlb_lock); 7763 if (!folio_test_hugetlb(folio) || 7764 !folio_test_hugetlb_migratable(folio) || 7765 !folio_try_get(folio)) { 7766 ret = false; 7767 goto unlock; 7768 } 7769 folio_clear_hugetlb_migratable(folio); 7770 list_move_tail(&folio->lru, list); 7771 unlock: 7772 spin_unlock_irq(&hugetlb_lock); 7773 return ret; 7774 } 7775 7776 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7777 { 7778 int ret = 0; 7779 7780 *hugetlb = false; 7781 spin_lock_irq(&hugetlb_lock); 7782 if (folio_test_hugetlb(folio)) { 7783 *hugetlb = true; 7784 if (folio_test_hugetlb_freed(folio)) 7785 ret = 0; 7786 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7787 ret = folio_try_get(folio); 7788 else 7789 ret = -EBUSY; 7790 } 7791 spin_unlock_irq(&hugetlb_lock); 7792 return ret; 7793 } 7794 7795 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7796 bool *migratable_cleared) 7797 { 7798 int ret; 7799 7800 spin_lock_irq(&hugetlb_lock); 7801 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7802 spin_unlock_irq(&hugetlb_lock); 7803 return ret; 7804 } 7805 7806 /** 7807 * folio_putback_hugetlb - unisolate a hugetlb folio 7808 * @folio: the isolated hugetlb folio 7809 * 7810 * Putback/un-isolate the hugetlb folio that was previous isolated using 7811 * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it 7812 * back onto the active list. 7813 * 7814 * Will drop the additional folio reference obtained through 7815 * folio_isolate_hugetlb(). 7816 */ 7817 void folio_putback_hugetlb(struct folio *folio) 7818 { 7819 spin_lock_irq(&hugetlb_lock); 7820 folio_set_hugetlb_migratable(folio); 7821 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7822 spin_unlock_irq(&hugetlb_lock); 7823 folio_put(folio); 7824 } 7825 7826 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7827 { 7828 struct hstate *h = folio_hstate(old_folio); 7829 7830 hugetlb_cgroup_migrate(old_folio, new_folio); 7831 set_page_owner_migrate_reason(&new_folio->page, reason); 7832 7833 /* 7834 * transfer temporary state of the new hugetlb folio. This is 7835 * reverse to other transitions because the newpage is going to 7836 * be final while the old one will be freed so it takes over 7837 * the temporary status. 7838 * 7839 * Also note that we have to transfer the per-node surplus state 7840 * here as well otherwise the global surplus count will not match 7841 * the per-node's. 7842 */ 7843 if (folio_test_hugetlb_temporary(new_folio)) { 7844 int old_nid = folio_nid(old_folio); 7845 int new_nid = folio_nid(new_folio); 7846 7847 folio_set_hugetlb_temporary(old_folio); 7848 folio_clear_hugetlb_temporary(new_folio); 7849 7850 7851 /* 7852 * There is no need to transfer the per-node surplus state 7853 * when we do not cross the node. 7854 */ 7855 if (new_nid == old_nid) 7856 return; 7857 spin_lock_irq(&hugetlb_lock); 7858 if (h->surplus_huge_pages_node[old_nid]) { 7859 h->surplus_huge_pages_node[old_nid]--; 7860 h->surplus_huge_pages_node[new_nid]++; 7861 } 7862 spin_unlock_irq(&hugetlb_lock); 7863 } 7864 7865 /* 7866 * Our old folio is isolated and has "migratable" cleared until it 7867 * is putback. As migration succeeded, set the new folio "migratable" 7868 * and add it to the active list. 7869 */ 7870 spin_lock_irq(&hugetlb_lock); 7871 folio_set_hugetlb_migratable(new_folio); 7872 list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist); 7873 spin_unlock_irq(&hugetlb_lock); 7874 } 7875 7876 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7877 unsigned long start, 7878 unsigned long end) 7879 { 7880 struct hstate *h = hstate_vma(vma); 7881 unsigned long sz = huge_page_size(h); 7882 struct mm_struct *mm = vma->vm_mm; 7883 struct mmu_notifier_range range; 7884 unsigned long address; 7885 spinlock_t *ptl; 7886 pte_t *ptep; 7887 7888 if (!(vma->vm_flags & VM_MAYSHARE)) 7889 return; 7890 7891 if (start >= end) 7892 return; 7893 7894 flush_cache_range(vma, start, end); 7895 /* 7896 * No need to call adjust_range_if_pmd_sharing_possible(), because 7897 * we have already done the PUD_SIZE alignment. 7898 */ 7899 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7900 start, end); 7901 mmu_notifier_invalidate_range_start(&range); 7902 hugetlb_vma_lock_write(vma); 7903 i_mmap_lock_write(vma->vm_file->f_mapping); 7904 for (address = start; address < end; address += PUD_SIZE) { 7905 ptep = hugetlb_walk(vma, address, sz); 7906 if (!ptep) 7907 continue; 7908 ptl = huge_pte_lock(h, mm, ptep); 7909 huge_pmd_unshare(mm, vma, address, ptep); 7910 spin_unlock(ptl); 7911 } 7912 flush_hugetlb_tlb_range(vma, start, end); 7913 i_mmap_unlock_write(vma->vm_file->f_mapping); 7914 hugetlb_vma_unlock_write(vma); 7915 /* 7916 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7917 * Documentation/mm/mmu_notifier.rst. 7918 */ 7919 mmu_notifier_invalidate_range_end(&range); 7920 } 7921 7922 /* 7923 * This function will unconditionally remove all the shared pmd pgtable entries 7924 * within the specific vma for a hugetlbfs memory range. 7925 */ 7926 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7927 { 7928 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7929 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7930 } 7931