1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 36 #include <asm/page.h> 37 #include <asm/pgalloc.h> 38 #include <asm/tlb.h> 39 40 #include <linux/io.h> 41 #include <linux/hugetlb.h> 42 #include <linux/hugetlb_cgroup.h> 43 #include <linux/node.h> 44 #include <linux/page_owner.h> 45 #include "internal.h" 46 #include "hugetlb_vmemmap.h" 47 48 int hugetlb_max_hstate __read_mostly; 49 unsigned int default_hstate_idx; 50 struct hstate hstates[HUGE_MAX_HSTATE]; 51 52 #ifdef CONFIG_CMA 53 static struct cma *hugetlb_cma[MAX_NUMNODES]; 54 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 55 static bool hugetlb_cma_page(struct page *page, unsigned int order) 56 { 57 return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page, 58 1 << order); 59 } 60 #else 61 static bool hugetlb_cma_page(struct page *page, unsigned int order) 62 { 63 return false; 64 } 65 #endif 66 static unsigned long hugetlb_cma_size __initdata; 67 68 /* 69 * Minimum page order among possible hugepage sizes, set to a proper value 70 * at boot time. 71 */ 72 static unsigned int minimum_order __read_mostly = UINT_MAX; 73 74 __initdata LIST_HEAD(huge_boot_pages); 75 76 /* for command line parsing */ 77 static struct hstate * __initdata parsed_hstate; 78 static unsigned long __initdata default_hstate_max_huge_pages; 79 static bool __initdata parsed_valid_hugepagesz = true; 80 static bool __initdata parsed_default_hugepagesz; 81 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 82 83 /* 84 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 85 * free_huge_pages, and surplus_huge_pages. 86 */ 87 DEFINE_SPINLOCK(hugetlb_lock); 88 89 /* 90 * Serializes faults on the same logical page. This is used to 91 * prevent spurious OOMs when the hugepage pool is fully utilized. 92 */ 93 static int num_fault_mutexes; 94 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 95 96 /* Forward declaration */ 97 static int hugetlb_acct_memory(struct hstate *h, long delta); 98 99 static inline bool subpool_is_free(struct hugepage_subpool *spool) 100 { 101 if (spool->count) 102 return false; 103 if (spool->max_hpages != -1) 104 return spool->used_hpages == 0; 105 if (spool->min_hpages != -1) 106 return spool->rsv_hpages == spool->min_hpages; 107 108 return true; 109 } 110 111 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 112 unsigned long irq_flags) 113 { 114 spin_unlock_irqrestore(&spool->lock, irq_flags); 115 116 /* If no pages are used, and no other handles to the subpool 117 * remain, give up any reservations based on minimum size and 118 * free the subpool */ 119 if (subpool_is_free(spool)) { 120 if (spool->min_hpages != -1) 121 hugetlb_acct_memory(spool->hstate, 122 -spool->min_hpages); 123 kfree(spool); 124 } 125 } 126 127 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 128 long min_hpages) 129 { 130 struct hugepage_subpool *spool; 131 132 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 133 if (!spool) 134 return NULL; 135 136 spin_lock_init(&spool->lock); 137 spool->count = 1; 138 spool->max_hpages = max_hpages; 139 spool->hstate = h; 140 spool->min_hpages = min_hpages; 141 142 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 143 kfree(spool); 144 return NULL; 145 } 146 spool->rsv_hpages = min_hpages; 147 148 return spool; 149 } 150 151 void hugepage_put_subpool(struct hugepage_subpool *spool) 152 { 153 unsigned long flags; 154 155 spin_lock_irqsave(&spool->lock, flags); 156 BUG_ON(!spool->count); 157 spool->count--; 158 unlock_or_release_subpool(spool, flags); 159 } 160 161 /* 162 * Subpool accounting for allocating and reserving pages. 163 * Return -ENOMEM if there are not enough resources to satisfy the 164 * request. Otherwise, return the number of pages by which the 165 * global pools must be adjusted (upward). The returned value may 166 * only be different than the passed value (delta) in the case where 167 * a subpool minimum size must be maintained. 168 */ 169 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 170 long delta) 171 { 172 long ret = delta; 173 174 if (!spool) 175 return ret; 176 177 spin_lock_irq(&spool->lock); 178 179 if (spool->max_hpages != -1) { /* maximum size accounting */ 180 if ((spool->used_hpages + delta) <= spool->max_hpages) 181 spool->used_hpages += delta; 182 else { 183 ret = -ENOMEM; 184 goto unlock_ret; 185 } 186 } 187 188 /* minimum size accounting */ 189 if (spool->min_hpages != -1 && spool->rsv_hpages) { 190 if (delta > spool->rsv_hpages) { 191 /* 192 * Asking for more reserves than those already taken on 193 * behalf of subpool. Return difference. 194 */ 195 ret = delta - spool->rsv_hpages; 196 spool->rsv_hpages = 0; 197 } else { 198 ret = 0; /* reserves already accounted for */ 199 spool->rsv_hpages -= delta; 200 } 201 } 202 203 unlock_ret: 204 spin_unlock_irq(&spool->lock); 205 return ret; 206 } 207 208 /* 209 * Subpool accounting for freeing and unreserving pages. 210 * Return the number of global page reservations that must be dropped. 211 * The return value may only be different than the passed value (delta) 212 * in the case where a subpool minimum size must be maintained. 213 */ 214 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 215 long delta) 216 { 217 long ret = delta; 218 unsigned long flags; 219 220 if (!spool) 221 return delta; 222 223 spin_lock_irqsave(&spool->lock, flags); 224 225 if (spool->max_hpages != -1) /* maximum size accounting */ 226 spool->used_hpages -= delta; 227 228 /* minimum size accounting */ 229 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 230 if (spool->rsv_hpages + delta <= spool->min_hpages) 231 ret = 0; 232 else 233 ret = spool->rsv_hpages + delta - spool->min_hpages; 234 235 spool->rsv_hpages += delta; 236 if (spool->rsv_hpages > spool->min_hpages) 237 spool->rsv_hpages = spool->min_hpages; 238 } 239 240 /* 241 * If hugetlbfs_put_super couldn't free spool due to an outstanding 242 * quota reference, free it now. 243 */ 244 unlock_or_release_subpool(spool, flags); 245 246 return ret; 247 } 248 249 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 250 { 251 return HUGETLBFS_SB(inode->i_sb)->spool; 252 } 253 254 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 255 { 256 return subpool_inode(file_inode(vma->vm_file)); 257 } 258 259 /* Helper that removes a struct file_region from the resv_map cache and returns 260 * it for use. 261 */ 262 static struct file_region * 263 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 264 { 265 struct file_region *nrg = NULL; 266 267 VM_BUG_ON(resv->region_cache_count <= 0); 268 269 resv->region_cache_count--; 270 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 271 list_del(&nrg->link); 272 273 nrg->from = from; 274 nrg->to = to; 275 276 return nrg; 277 } 278 279 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 280 struct file_region *rg) 281 { 282 #ifdef CONFIG_CGROUP_HUGETLB 283 nrg->reservation_counter = rg->reservation_counter; 284 nrg->css = rg->css; 285 if (rg->css) 286 css_get(rg->css); 287 #endif 288 } 289 290 /* Helper that records hugetlb_cgroup uncharge info. */ 291 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 292 struct hstate *h, 293 struct resv_map *resv, 294 struct file_region *nrg) 295 { 296 #ifdef CONFIG_CGROUP_HUGETLB 297 if (h_cg) { 298 nrg->reservation_counter = 299 &h_cg->rsvd_hugepage[hstate_index(h)]; 300 nrg->css = &h_cg->css; 301 /* 302 * The caller will hold exactly one h_cg->css reference for the 303 * whole contiguous reservation region. But this area might be 304 * scattered when there are already some file_regions reside in 305 * it. As a result, many file_regions may share only one css 306 * reference. In order to ensure that one file_region must hold 307 * exactly one h_cg->css reference, we should do css_get for 308 * each file_region and leave the reference held by caller 309 * untouched. 310 */ 311 css_get(&h_cg->css); 312 if (!resv->pages_per_hpage) 313 resv->pages_per_hpage = pages_per_huge_page(h); 314 /* pages_per_hpage should be the same for all entries in 315 * a resv_map. 316 */ 317 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 318 } else { 319 nrg->reservation_counter = NULL; 320 nrg->css = NULL; 321 } 322 #endif 323 } 324 325 static void put_uncharge_info(struct file_region *rg) 326 { 327 #ifdef CONFIG_CGROUP_HUGETLB 328 if (rg->css) 329 css_put(rg->css); 330 #endif 331 } 332 333 static bool has_same_uncharge_info(struct file_region *rg, 334 struct file_region *org) 335 { 336 #ifdef CONFIG_CGROUP_HUGETLB 337 return rg->reservation_counter == org->reservation_counter && 338 rg->css == org->css; 339 340 #else 341 return true; 342 #endif 343 } 344 345 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 346 { 347 struct file_region *nrg = NULL, *prg = NULL; 348 349 prg = list_prev_entry(rg, link); 350 if (&prg->link != &resv->regions && prg->to == rg->from && 351 has_same_uncharge_info(prg, rg)) { 352 prg->to = rg->to; 353 354 list_del(&rg->link); 355 put_uncharge_info(rg); 356 kfree(rg); 357 358 rg = prg; 359 } 360 361 nrg = list_next_entry(rg, link); 362 if (&nrg->link != &resv->regions && nrg->from == rg->to && 363 has_same_uncharge_info(nrg, rg)) { 364 nrg->from = rg->from; 365 366 list_del(&rg->link); 367 put_uncharge_info(rg); 368 kfree(rg); 369 } 370 } 371 372 static inline long 373 hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from, 374 long to, struct hstate *h, struct hugetlb_cgroup *cg, 375 long *regions_needed) 376 { 377 struct file_region *nrg; 378 379 if (!regions_needed) { 380 nrg = get_file_region_entry_from_cache(map, from, to); 381 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 382 list_add(&nrg->link, rg->link.prev); 383 coalesce_file_region(map, nrg); 384 } else 385 *regions_needed += 1; 386 387 return to - from; 388 } 389 390 /* 391 * Must be called with resv->lock held. 392 * 393 * Calling this with regions_needed != NULL will count the number of pages 394 * to be added but will not modify the linked list. And regions_needed will 395 * indicate the number of file_regions needed in the cache to carry out to add 396 * the regions for this range. 397 */ 398 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 399 struct hugetlb_cgroup *h_cg, 400 struct hstate *h, long *regions_needed) 401 { 402 long add = 0; 403 struct list_head *head = &resv->regions; 404 long last_accounted_offset = f; 405 struct file_region *rg = NULL, *trg = NULL; 406 407 if (regions_needed) 408 *regions_needed = 0; 409 410 /* In this loop, we essentially handle an entry for the range 411 * [last_accounted_offset, rg->from), at every iteration, with some 412 * bounds checking. 413 */ 414 list_for_each_entry_safe(rg, trg, head, link) { 415 /* Skip irrelevant regions that start before our range. */ 416 if (rg->from < f) { 417 /* If this region ends after the last accounted offset, 418 * then we need to update last_accounted_offset. 419 */ 420 if (rg->to > last_accounted_offset) 421 last_accounted_offset = rg->to; 422 continue; 423 } 424 425 /* When we find a region that starts beyond our range, we've 426 * finished. 427 */ 428 if (rg->from >= t) 429 break; 430 431 /* Add an entry for last_accounted_offset -> rg->from, and 432 * update last_accounted_offset. 433 */ 434 if (rg->from > last_accounted_offset) 435 add += hugetlb_resv_map_add(resv, rg, 436 last_accounted_offset, 437 rg->from, h, h_cg, 438 regions_needed); 439 440 last_accounted_offset = rg->to; 441 } 442 443 /* Handle the case where our range extends beyond 444 * last_accounted_offset. 445 */ 446 if (last_accounted_offset < t) 447 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 448 t, h, h_cg, regions_needed); 449 450 return add; 451 } 452 453 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 454 */ 455 static int allocate_file_region_entries(struct resv_map *resv, 456 int regions_needed) 457 __must_hold(&resv->lock) 458 { 459 struct list_head allocated_regions; 460 int to_allocate = 0, i = 0; 461 struct file_region *trg = NULL, *rg = NULL; 462 463 VM_BUG_ON(regions_needed < 0); 464 465 INIT_LIST_HEAD(&allocated_regions); 466 467 /* 468 * Check for sufficient descriptors in the cache to accommodate 469 * the number of in progress add operations plus regions_needed. 470 * 471 * This is a while loop because when we drop the lock, some other call 472 * to region_add or region_del may have consumed some region_entries, 473 * so we keep looping here until we finally have enough entries for 474 * (adds_in_progress + regions_needed). 475 */ 476 while (resv->region_cache_count < 477 (resv->adds_in_progress + regions_needed)) { 478 to_allocate = resv->adds_in_progress + regions_needed - 479 resv->region_cache_count; 480 481 /* At this point, we should have enough entries in the cache 482 * for all the existing adds_in_progress. We should only be 483 * needing to allocate for regions_needed. 484 */ 485 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 486 487 spin_unlock(&resv->lock); 488 for (i = 0; i < to_allocate; i++) { 489 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 490 if (!trg) 491 goto out_of_memory; 492 list_add(&trg->link, &allocated_regions); 493 } 494 495 spin_lock(&resv->lock); 496 497 list_splice(&allocated_regions, &resv->region_cache); 498 resv->region_cache_count += to_allocate; 499 } 500 501 return 0; 502 503 out_of_memory: 504 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 505 list_del(&rg->link); 506 kfree(rg); 507 } 508 return -ENOMEM; 509 } 510 511 /* 512 * Add the huge page range represented by [f, t) to the reserve 513 * map. Regions will be taken from the cache to fill in this range. 514 * Sufficient regions should exist in the cache due to the previous 515 * call to region_chg with the same range, but in some cases the cache will not 516 * have sufficient entries due to races with other code doing region_add or 517 * region_del. The extra needed entries will be allocated. 518 * 519 * regions_needed is the out value provided by a previous call to region_chg. 520 * 521 * Return the number of new huge pages added to the map. This number is greater 522 * than or equal to zero. If file_region entries needed to be allocated for 523 * this operation and we were not able to allocate, it returns -ENOMEM. 524 * region_add of regions of length 1 never allocate file_regions and cannot 525 * fail; region_chg will always allocate at least 1 entry and a region_add for 526 * 1 page will only require at most 1 entry. 527 */ 528 static long region_add(struct resv_map *resv, long f, long t, 529 long in_regions_needed, struct hstate *h, 530 struct hugetlb_cgroup *h_cg) 531 { 532 long add = 0, actual_regions_needed = 0; 533 534 spin_lock(&resv->lock); 535 retry: 536 537 /* Count how many regions are actually needed to execute this add. */ 538 add_reservation_in_range(resv, f, t, NULL, NULL, 539 &actual_regions_needed); 540 541 /* 542 * Check for sufficient descriptors in the cache to accommodate 543 * this add operation. Note that actual_regions_needed may be greater 544 * than in_regions_needed, as the resv_map may have been modified since 545 * the region_chg call. In this case, we need to make sure that we 546 * allocate extra entries, such that we have enough for all the 547 * existing adds_in_progress, plus the excess needed for this 548 * operation. 549 */ 550 if (actual_regions_needed > in_regions_needed && 551 resv->region_cache_count < 552 resv->adds_in_progress + 553 (actual_regions_needed - in_regions_needed)) { 554 /* region_add operation of range 1 should never need to 555 * allocate file_region entries. 556 */ 557 VM_BUG_ON(t - f <= 1); 558 559 if (allocate_file_region_entries( 560 resv, actual_regions_needed - in_regions_needed)) { 561 return -ENOMEM; 562 } 563 564 goto retry; 565 } 566 567 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 568 569 resv->adds_in_progress -= in_regions_needed; 570 571 spin_unlock(&resv->lock); 572 return add; 573 } 574 575 /* 576 * Examine the existing reserve map and determine how many 577 * huge pages in the specified range [f, t) are NOT currently 578 * represented. This routine is called before a subsequent 579 * call to region_add that will actually modify the reserve 580 * map to add the specified range [f, t). region_chg does 581 * not change the number of huge pages represented by the 582 * map. A number of new file_region structures is added to the cache as a 583 * placeholder, for the subsequent region_add call to use. At least 1 584 * file_region structure is added. 585 * 586 * out_regions_needed is the number of regions added to the 587 * resv->adds_in_progress. This value needs to be provided to a follow up call 588 * to region_add or region_abort for proper accounting. 589 * 590 * Returns the number of huge pages that need to be added to the existing 591 * reservation map for the range [f, t). This number is greater or equal to 592 * zero. -ENOMEM is returned if a new file_region structure or cache entry 593 * is needed and can not be allocated. 594 */ 595 static long region_chg(struct resv_map *resv, long f, long t, 596 long *out_regions_needed) 597 { 598 long chg = 0; 599 600 spin_lock(&resv->lock); 601 602 /* Count how many hugepages in this range are NOT represented. */ 603 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 604 out_regions_needed); 605 606 if (*out_regions_needed == 0) 607 *out_regions_needed = 1; 608 609 if (allocate_file_region_entries(resv, *out_regions_needed)) 610 return -ENOMEM; 611 612 resv->adds_in_progress += *out_regions_needed; 613 614 spin_unlock(&resv->lock); 615 return chg; 616 } 617 618 /* 619 * Abort the in progress add operation. The adds_in_progress field 620 * of the resv_map keeps track of the operations in progress between 621 * calls to region_chg and region_add. Operations are sometimes 622 * aborted after the call to region_chg. In such cases, region_abort 623 * is called to decrement the adds_in_progress counter. regions_needed 624 * is the value returned by the region_chg call, it is used to decrement 625 * the adds_in_progress counter. 626 * 627 * NOTE: The range arguments [f, t) are not needed or used in this 628 * routine. They are kept to make reading the calling code easier as 629 * arguments will match the associated region_chg call. 630 */ 631 static void region_abort(struct resv_map *resv, long f, long t, 632 long regions_needed) 633 { 634 spin_lock(&resv->lock); 635 VM_BUG_ON(!resv->region_cache_count); 636 resv->adds_in_progress -= regions_needed; 637 spin_unlock(&resv->lock); 638 } 639 640 /* 641 * Delete the specified range [f, t) from the reserve map. If the 642 * t parameter is LONG_MAX, this indicates that ALL regions after f 643 * should be deleted. Locate the regions which intersect [f, t) 644 * and either trim, delete or split the existing regions. 645 * 646 * Returns the number of huge pages deleted from the reserve map. 647 * In the normal case, the return value is zero or more. In the 648 * case where a region must be split, a new region descriptor must 649 * be allocated. If the allocation fails, -ENOMEM will be returned. 650 * NOTE: If the parameter t == LONG_MAX, then we will never split 651 * a region and possibly return -ENOMEM. Callers specifying 652 * t == LONG_MAX do not need to check for -ENOMEM error. 653 */ 654 static long region_del(struct resv_map *resv, long f, long t) 655 { 656 struct list_head *head = &resv->regions; 657 struct file_region *rg, *trg; 658 struct file_region *nrg = NULL; 659 long del = 0; 660 661 retry: 662 spin_lock(&resv->lock); 663 list_for_each_entry_safe(rg, trg, head, link) { 664 /* 665 * Skip regions before the range to be deleted. file_region 666 * ranges are normally of the form [from, to). However, there 667 * may be a "placeholder" entry in the map which is of the form 668 * (from, to) with from == to. Check for placeholder entries 669 * at the beginning of the range to be deleted. 670 */ 671 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 672 continue; 673 674 if (rg->from >= t) 675 break; 676 677 if (f > rg->from && t < rg->to) { /* Must split region */ 678 /* 679 * Check for an entry in the cache before dropping 680 * lock and attempting allocation. 681 */ 682 if (!nrg && 683 resv->region_cache_count > resv->adds_in_progress) { 684 nrg = list_first_entry(&resv->region_cache, 685 struct file_region, 686 link); 687 list_del(&nrg->link); 688 resv->region_cache_count--; 689 } 690 691 if (!nrg) { 692 spin_unlock(&resv->lock); 693 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 694 if (!nrg) 695 return -ENOMEM; 696 goto retry; 697 } 698 699 del += t - f; 700 hugetlb_cgroup_uncharge_file_region( 701 resv, rg, t - f, false); 702 703 /* New entry for end of split region */ 704 nrg->from = t; 705 nrg->to = rg->to; 706 707 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 708 709 INIT_LIST_HEAD(&nrg->link); 710 711 /* Original entry is trimmed */ 712 rg->to = f; 713 714 list_add(&nrg->link, &rg->link); 715 nrg = NULL; 716 break; 717 } 718 719 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 720 del += rg->to - rg->from; 721 hugetlb_cgroup_uncharge_file_region(resv, rg, 722 rg->to - rg->from, true); 723 list_del(&rg->link); 724 kfree(rg); 725 continue; 726 } 727 728 if (f <= rg->from) { /* Trim beginning of region */ 729 hugetlb_cgroup_uncharge_file_region(resv, rg, 730 t - rg->from, false); 731 732 del += t - rg->from; 733 rg->from = t; 734 } else { /* Trim end of region */ 735 hugetlb_cgroup_uncharge_file_region(resv, rg, 736 rg->to - f, false); 737 738 del += rg->to - f; 739 rg->to = f; 740 } 741 } 742 743 spin_unlock(&resv->lock); 744 kfree(nrg); 745 return del; 746 } 747 748 /* 749 * A rare out of memory error was encountered which prevented removal of 750 * the reserve map region for a page. The huge page itself was free'ed 751 * and removed from the page cache. This routine will adjust the subpool 752 * usage count, and the global reserve count if needed. By incrementing 753 * these counts, the reserve map entry which could not be deleted will 754 * appear as a "reserved" entry instead of simply dangling with incorrect 755 * counts. 756 */ 757 void hugetlb_fix_reserve_counts(struct inode *inode) 758 { 759 struct hugepage_subpool *spool = subpool_inode(inode); 760 long rsv_adjust; 761 bool reserved = false; 762 763 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 764 if (rsv_adjust > 0) { 765 struct hstate *h = hstate_inode(inode); 766 767 if (!hugetlb_acct_memory(h, 1)) 768 reserved = true; 769 } else if (!rsv_adjust) { 770 reserved = true; 771 } 772 773 if (!reserved) 774 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 775 } 776 777 /* 778 * Count and return the number of huge pages in the reserve map 779 * that intersect with the range [f, t). 780 */ 781 static long region_count(struct resv_map *resv, long f, long t) 782 { 783 struct list_head *head = &resv->regions; 784 struct file_region *rg; 785 long chg = 0; 786 787 spin_lock(&resv->lock); 788 /* Locate each segment we overlap with, and count that overlap. */ 789 list_for_each_entry(rg, head, link) { 790 long seg_from; 791 long seg_to; 792 793 if (rg->to <= f) 794 continue; 795 if (rg->from >= t) 796 break; 797 798 seg_from = max(rg->from, f); 799 seg_to = min(rg->to, t); 800 801 chg += seg_to - seg_from; 802 } 803 spin_unlock(&resv->lock); 804 805 return chg; 806 } 807 808 /* 809 * Convert the address within this vma to the page offset within 810 * the mapping, in pagecache page units; huge pages here. 811 */ 812 static pgoff_t vma_hugecache_offset(struct hstate *h, 813 struct vm_area_struct *vma, unsigned long address) 814 { 815 return ((address - vma->vm_start) >> huge_page_shift(h)) + 816 (vma->vm_pgoff >> huge_page_order(h)); 817 } 818 819 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 820 unsigned long address) 821 { 822 return vma_hugecache_offset(hstate_vma(vma), vma, address); 823 } 824 EXPORT_SYMBOL_GPL(linear_hugepage_index); 825 826 /* 827 * Return the size of the pages allocated when backing a VMA. In the majority 828 * cases this will be same size as used by the page table entries. 829 */ 830 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 831 { 832 if (vma->vm_ops && vma->vm_ops->pagesize) 833 return vma->vm_ops->pagesize(vma); 834 return PAGE_SIZE; 835 } 836 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 837 838 /* 839 * Return the page size being used by the MMU to back a VMA. In the majority 840 * of cases, the page size used by the kernel matches the MMU size. On 841 * architectures where it differs, an architecture-specific 'strong' 842 * version of this symbol is required. 843 */ 844 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 845 { 846 return vma_kernel_pagesize(vma); 847 } 848 849 /* 850 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 851 * bits of the reservation map pointer, which are always clear due to 852 * alignment. 853 */ 854 #define HPAGE_RESV_OWNER (1UL << 0) 855 #define HPAGE_RESV_UNMAPPED (1UL << 1) 856 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 857 858 /* 859 * These helpers are used to track how many pages are reserved for 860 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 861 * is guaranteed to have their future faults succeed. 862 * 863 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 864 * the reserve counters are updated with the hugetlb_lock held. It is safe 865 * to reset the VMA at fork() time as it is not in use yet and there is no 866 * chance of the global counters getting corrupted as a result of the values. 867 * 868 * The private mapping reservation is represented in a subtly different 869 * manner to a shared mapping. A shared mapping has a region map associated 870 * with the underlying file, this region map represents the backing file 871 * pages which have ever had a reservation assigned which this persists even 872 * after the page is instantiated. A private mapping has a region map 873 * associated with the original mmap which is attached to all VMAs which 874 * reference it, this region map represents those offsets which have consumed 875 * reservation ie. where pages have been instantiated. 876 */ 877 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 878 { 879 return (unsigned long)vma->vm_private_data; 880 } 881 882 static void set_vma_private_data(struct vm_area_struct *vma, 883 unsigned long value) 884 { 885 vma->vm_private_data = (void *)value; 886 } 887 888 static void 889 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 890 struct hugetlb_cgroup *h_cg, 891 struct hstate *h) 892 { 893 #ifdef CONFIG_CGROUP_HUGETLB 894 if (!h_cg || !h) { 895 resv_map->reservation_counter = NULL; 896 resv_map->pages_per_hpage = 0; 897 resv_map->css = NULL; 898 } else { 899 resv_map->reservation_counter = 900 &h_cg->rsvd_hugepage[hstate_index(h)]; 901 resv_map->pages_per_hpage = pages_per_huge_page(h); 902 resv_map->css = &h_cg->css; 903 } 904 #endif 905 } 906 907 struct resv_map *resv_map_alloc(void) 908 { 909 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 910 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 911 912 if (!resv_map || !rg) { 913 kfree(resv_map); 914 kfree(rg); 915 return NULL; 916 } 917 918 kref_init(&resv_map->refs); 919 spin_lock_init(&resv_map->lock); 920 INIT_LIST_HEAD(&resv_map->regions); 921 922 resv_map->adds_in_progress = 0; 923 /* 924 * Initialize these to 0. On shared mappings, 0's here indicate these 925 * fields don't do cgroup accounting. On private mappings, these will be 926 * re-initialized to the proper values, to indicate that hugetlb cgroup 927 * reservations are to be un-charged from here. 928 */ 929 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 930 931 INIT_LIST_HEAD(&resv_map->region_cache); 932 list_add(&rg->link, &resv_map->region_cache); 933 resv_map->region_cache_count = 1; 934 935 return resv_map; 936 } 937 938 void resv_map_release(struct kref *ref) 939 { 940 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 941 struct list_head *head = &resv_map->region_cache; 942 struct file_region *rg, *trg; 943 944 /* Clear out any active regions before we release the map. */ 945 region_del(resv_map, 0, LONG_MAX); 946 947 /* ... and any entries left in the cache */ 948 list_for_each_entry_safe(rg, trg, head, link) { 949 list_del(&rg->link); 950 kfree(rg); 951 } 952 953 VM_BUG_ON(resv_map->adds_in_progress); 954 955 kfree(resv_map); 956 } 957 958 static inline struct resv_map *inode_resv_map(struct inode *inode) 959 { 960 /* 961 * At inode evict time, i_mapping may not point to the original 962 * address space within the inode. This original address space 963 * contains the pointer to the resv_map. So, always use the 964 * address space embedded within the inode. 965 * The VERY common case is inode->mapping == &inode->i_data but, 966 * this may not be true for device special inodes. 967 */ 968 return (struct resv_map *)(&inode->i_data)->private_data; 969 } 970 971 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 972 { 973 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 974 if (vma->vm_flags & VM_MAYSHARE) { 975 struct address_space *mapping = vma->vm_file->f_mapping; 976 struct inode *inode = mapping->host; 977 978 return inode_resv_map(inode); 979 980 } else { 981 return (struct resv_map *)(get_vma_private_data(vma) & 982 ~HPAGE_RESV_MASK); 983 } 984 } 985 986 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 987 { 988 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 989 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 990 991 set_vma_private_data(vma, (get_vma_private_data(vma) & 992 HPAGE_RESV_MASK) | (unsigned long)map); 993 } 994 995 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 996 { 997 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 998 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 999 1000 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1001 } 1002 1003 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1004 { 1005 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1006 1007 return (get_vma_private_data(vma) & flag) != 0; 1008 } 1009 1010 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 1011 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 1012 { 1013 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1014 if (!(vma->vm_flags & VM_MAYSHARE)) 1015 vma->vm_private_data = (void *)0; 1016 } 1017 1018 /* 1019 * Reset and decrement one ref on hugepage private reservation. 1020 * Called with mm->mmap_sem writer semaphore held. 1021 * This function should be only used by move_vma() and operate on 1022 * same sized vma. It should never come here with last ref on the 1023 * reservation. 1024 */ 1025 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1026 { 1027 /* 1028 * Clear the old hugetlb private page reservation. 1029 * It has already been transferred to new_vma. 1030 * 1031 * During a mremap() operation of a hugetlb vma we call move_vma() 1032 * which copies vma into new_vma and unmaps vma. After the copy 1033 * operation both new_vma and vma share a reference to the resv_map 1034 * struct, and at that point vma is about to be unmapped. We don't 1035 * want to return the reservation to the pool at unmap of vma because 1036 * the reservation still lives on in new_vma, so simply decrement the 1037 * ref here and remove the resv_map reference from this vma. 1038 */ 1039 struct resv_map *reservations = vma_resv_map(vma); 1040 1041 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1042 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1043 kref_put(&reservations->refs, resv_map_release); 1044 } 1045 1046 reset_vma_resv_huge_pages(vma); 1047 } 1048 1049 /* Returns true if the VMA has associated reserve pages */ 1050 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1051 { 1052 if (vma->vm_flags & VM_NORESERVE) { 1053 /* 1054 * This address is already reserved by other process(chg == 0), 1055 * so, we should decrement reserved count. Without decrementing, 1056 * reserve count remains after releasing inode, because this 1057 * allocated page will go into page cache and is regarded as 1058 * coming from reserved pool in releasing step. Currently, we 1059 * don't have any other solution to deal with this situation 1060 * properly, so add work-around here. 1061 */ 1062 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1063 return true; 1064 else 1065 return false; 1066 } 1067 1068 /* Shared mappings always use reserves */ 1069 if (vma->vm_flags & VM_MAYSHARE) { 1070 /* 1071 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1072 * be a region map for all pages. The only situation where 1073 * there is no region map is if a hole was punched via 1074 * fallocate. In this case, there really are no reserves to 1075 * use. This situation is indicated if chg != 0. 1076 */ 1077 if (chg) 1078 return false; 1079 else 1080 return true; 1081 } 1082 1083 /* 1084 * Only the process that called mmap() has reserves for 1085 * private mappings. 1086 */ 1087 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1088 /* 1089 * Like the shared case above, a hole punch or truncate 1090 * could have been performed on the private mapping. 1091 * Examine the value of chg to determine if reserves 1092 * actually exist or were previously consumed. 1093 * Very Subtle - The value of chg comes from a previous 1094 * call to vma_needs_reserves(). The reserve map for 1095 * private mappings has different (opposite) semantics 1096 * than that of shared mappings. vma_needs_reserves() 1097 * has already taken this difference in semantics into 1098 * account. Therefore, the meaning of chg is the same 1099 * as in the shared case above. Code could easily be 1100 * combined, but keeping it separate draws attention to 1101 * subtle differences. 1102 */ 1103 if (chg) 1104 return false; 1105 else 1106 return true; 1107 } 1108 1109 return false; 1110 } 1111 1112 static void enqueue_huge_page(struct hstate *h, struct page *page) 1113 { 1114 int nid = page_to_nid(page); 1115 1116 lockdep_assert_held(&hugetlb_lock); 1117 VM_BUG_ON_PAGE(page_count(page), page); 1118 1119 list_move(&page->lru, &h->hugepage_freelists[nid]); 1120 h->free_huge_pages++; 1121 h->free_huge_pages_node[nid]++; 1122 SetHPageFreed(page); 1123 } 1124 1125 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 1126 { 1127 struct page *page; 1128 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1129 1130 lockdep_assert_held(&hugetlb_lock); 1131 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { 1132 if (pin && !is_pinnable_page(page)) 1133 continue; 1134 1135 if (PageHWPoison(page)) 1136 continue; 1137 1138 list_move(&page->lru, &h->hugepage_activelist); 1139 set_page_refcounted(page); 1140 ClearHPageFreed(page); 1141 h->free_huge_pages--; 1142 h->free_huge_pages_node[nid]--; 1143 return page; 1144 } 1145 1146 return NULL; 1147 } 1148 1149 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 1150 nodemask_t *nmask) 1151 { 1152 unsigned int cpuset_mems_cookie; 1153 struct zonelist *zonelist; 1154 struct zone *zone; 1155 struct zoneref *z; 1156 int node = NUMA_NO_NODE; 1157 1158 zonelist = node_zonelist(nid, gfp_mask); 1159 1160 retry_cpuset: 1161 cpuset_mems_cookie = read_mems_allowed_begin(); 1162 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1163 struct page *page; 1164 1165 if (!cpuset_zone_allowed(zone, gfp_mask)) 1166 continue; 1167 /* 1168 * no need to ask again on the same node. Pool is node rather than 1169 * zone aware 1170 */ 1171 if (zone_to_nid(zone) == node) 1172 continue; 1173 node = zone_to_nid(zone); 1174 1175 page = dequeue_huge_page_node_exact(h, node); 1176 if (page) 1177 return page; 1178 } 1179 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1180 goto retry_cpuset; 1181 1182 return NULL; 1183 } 1184 1185 static struct page *dequeue_huge_page_vma(struct hstate *h, 1186 struct vm_area_struct *vma, 1187 unsigned long address, int avoid_reserve, 1188 long chg) 1189 { 1190 struct page *page = NULL; 1191 struct mempolicy *mpol; 1192 gfp_t gfp_mask; 1193 nodemask_t *nodemask; 1194 int nid; 1195 1196 /* 1197 * A child process with MAP_PRIVATE mappings created by their parent 1198 * have no page reserves. This check ensures that reservations are 1199 * not "stolen". The child may still get SIGKILLed 1200 */ 1201 if (!vma_has_reserves(vma, chg) && 1202 h->free_huge_pages - h->resv_huge_pages == 0) 1203 goto err; 1204 1205 /* If reserves cannot be used, ensure enough pages are in the pool */ 1206 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 1207 goto err; 1208 1209 gfp_mask = htlb_alloc_mask(h); 1210 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1211 1212 if (mpol_is_preferred_many(mpol)) { 1213 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1214 1215 /* Fallback to all nodes if page==NULL */ 1216 nodemask = NULL; 1217 } 1218 1219 if (!page) 1220 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1221 1222 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 1223 SetHPageRestoreReserve(page); 1224 h->resv_huge_pages--; 1225 } 1226 1227 mpol_cond_put(mpol); 1228 return page; 1229 1230 err: 1231 return NULL; 1232 } 1233 1234 /* 1235 * common helper functions for hstate_next_node_to_{alloc|free}. 1236 * We may have allocated or freed a huge page based on a different 1237 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1238 * be outside of *nodes_allowed. Ensure that we use an allowed 1239 * node for alloc or free. 1240 */ 1241 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1242 { 1243 nid = next_node_in(nid, *nodes_allowed); 1244 VM_BUG_ON(nid >= MAX_NUMNODES); 1245 1246 return nid; 1247 } 1248 1249 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1250 { 1251 if (!node_isset(nid, *nodes_allowed)) 1252 nid = next_node_allowed(nid, nodes_allowed); 1253 return nid; 1254 } 1255 1256 /* 1257 * returns the previously saved node ["this node"] from which to 1258 * allocate a persistent huge page for the pool and advance the 1259 * next node from which to allocate, handling wrap at end of node 1260 * mask. 1261 */ 1262 static int hstate_next_node_to_alloc(struct hstate *h, 1263 nodemask_t *nodes_allowed) 1264 { 1265 int nid; 1266 1267 VM_BUG_ON(!nodes_allowed); 1268 1269 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1270 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1271 1272 return nid; 1273 } 1274 1275 /* 1276 * helper for remove_pool_huge_page() - return the previously saved 1277 * node ["this node"] from which to free a huge page. Advance the 1278 * next node id whether or not we find a free huge page to free so 1279 * that the next attempt to free addresses the next node. 1280 */ 1281 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1282 { 1283 int nid; 1284 1285 VM_BUG_ON(!nodes_allowed); 1286 1287 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1288 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1289 1290 return nid; 1291 } 1292 1293 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1294 for (nr_nodes = nodes_weight(*mask); \ 1295 nr_nodes > 0 && \ 1296 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1297 nr_nodes--) 1298 1299 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1300 for (nr_nodes = nodes_weight(*mask); \ 1301 nr_nodes > 0 && \ 1302 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1303 nr_nodes--) 1304 1305 /* used to demote non-gigantic_huge pages as well */ 1306 static void __destroy_compound_gigantic_page(struct page *page, 1307 unsigned int order, bool demote) 1308 { 1309 int i; 1310 int nr_pages = 1 << order; 1311 struct page *p = page + 1; 1312 1313 atomic_set(compound_mapcount_ptr(page), 0); 1314 atomic_set(compound_pincount_ptr(page), 0); 1315 1316 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1317 p->mapping = NULL; 1318 clear_compound_head(p); 1319 if (!demote) 1320 set_page_refcounted(p); 1321 } 1322 1323 set_compound_order(page, 0); 1324 #ifdef CONFIG_64BIT 1325 page[1].compound_nr = 0; 1326 #endif 1327 __ClearPageHead(page); 1328 } 1329 1330 static void destroy_compound_hugetlb_page_for_demote(struct page *page, 1331 unsigned int order) 1332 { 1333 __destroy_compound_gigantic_page(page, order, true); 1334 } 1335 1336 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1337 static void destroy_compound_gigantic_page(struct page *page, 1338 unsigned int order) 1339 { 1340 __destroy_compound_gigantic_page(page, order, false); 1341 } 1342 1343 static void free_gigantic_page(struct page *page, unsigned int order) 1344 { 1345 /* 1346 * If the page isn't allocated using the cma allocator, 1347 * cma_release() returns false. 1348 */ 1349 #ifdef CONFIG_CMA 1350 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) 1351 return; 1352 #endif 1353 1354 free_contig_range(page_to_pfn(page), 1 << order); 1355 } 1356 1357 #ifdef CONFIG_CONTIG_ALLOC 1358 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1359 int nid, nodemask_t *nodemask) 1360 { 1361 unsigned long nr_pages = pages_per_huge_page(h); 1362 if (nid == NUMA_NO_NODE) 1363 nid = numa_mem_id(); 1364 1365 #ifdef CONFIG_CMA 1366 { 1367 struct page *page; 1368 int node; 1369 1370 if (hugetlb_cma[nid]) { 1371 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1372 huge_page_order(h), true); 1373 if (page) 1374 return page; 1375 } 1376 1377 if (!(gfp_mask & __GFP_THISNODE)) { 1378 for_each_node_mask(node, *nodemask) { 1379 if (node == nid || !hugetlb_cma[node]) 1380 continue; 1381 1382 page = cma_alloc(hugetlb_cma[node], nr_pages, 1383 huge_page_order(h), true); 1384 if (page) 1385 return page; 1386 } 1387 } 1388 } 1389 #endif 1390 1391 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1392 } 1393 1394 #else /* !CONFIG_CONTIG_ALLOC */ 1395 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1396 int nid, nodemask_t *nodemask) 1397 { 1398 return NULL; 1399 } 1400 #endif /* CONFIG_CONTIG_ALLOC */ 1401 1402 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1403 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1404 int nid, nodemask_t *nodemask) 1405 { 1406 return NULL; 1407 } 1408 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1409 static inline void destroy_compound_gigantic_page(struct page *page, 1410 unsigned int order) { } 1411 #endif 1412 1413 /* 1414 * Remove hugetlb page from lists, and update dtor so that page appears 1415 * as just a compound page. 1416 * 1417 * A reference is held on the page, except in the case of demote. 1418 * 1419 * Must be called with hugetlb lock held. 1420 */ 1421 static void __remove_hugetlb_page(struct hstate *h, struct page *page, 1422 bool adjust_surplus, 1423 bool demote) 1424 { 1425 int nid = page_to_nid(page); 1426 1427 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1428 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); 1429 1430 lockdep_assert_held(&hugetlb_lock); 1431 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1432 return; 1433 1434 list_del(&page->lru); 1435 1436 if (HPageFreed(page)) { 1437 h->free_huge_pages--; 1438 h->free_huge_pages_node[nid]--; 1439 } 1440 if (adjust_surplus) { 1441 h->surplus_huge_pages--; 1442 h->surplus_huge_pages_node[nid]--; 1443 } 1444 1445 /* 1446 * Very subtle 1447 * 1448 * For non-gigantic pages set the destructor to the normal compound 1449 * page dtor. This is needed in case someone takes an additional 1450 * temporary ref to the page, and freeing is delayed until they drop 1451 * their reference. 1452 * 1453 * For gigantic pages set the destructor to the null dtor. This 1454 * destructor will never be called. Before freeing the gigantic 1455 * page destroy_compound_gigantic_page will turn the compound page 1456 * into a simple group of pages. After this the destructor does not 1457 * apply. 1458 * 1459 * This handles the case where more than one ref is held when and 1460 * after update_and_free_page is called. 1461 * 1462 * In the case of demote we do not ref count the page as it will soon 1463 * be turned into a page of smaller size. 1464 */ 1465 if (!demote) 1466 set_page_refcounted(page); 1467 if (hstate_is_gigantic(h)) 1468 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1469 else 1470 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 1471 1472 h->nr_huge_pages--; 1473 h->nr_huge_pages_node[nid]--; 1474 } 1475 1476 static void remove_hugetlb_page(struct hstate *h, struct page *page, 1477 bool adjust_surplus) 1478 { 1479 __remove_hugetlb_page(h, page, adjust_surplus, false); 1480 } 1481 1482 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page, 1483 bool adjust_surplus) 1484 { 1485 __remove_hugetlb_page(h, page, adjust_surplus, true); 1486 } 1487 1488 static void add_hugetlb_page(struct hstate *h, struct page *page, 1489 bool adjust_surplus) 1490 { 1491 int zeroed; 1492 int nid = page_to_nid(page); 1493 1494 VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page); 1495 1496 lockdep_assert_held(&hugetlb_lock); 1497 1498 INIT_LIST_HEAD(&page->lru); 1499 h->nr_huge_pages++; 1500 h->nr_huge_pages_node[nid]++; 1501 1502 if (adjust_surplus) { 1503 h->surplus_huge_pages++; 1504 h->surplus_huge_pages_node[nid]++; 1505 } 1506 1507 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1508 set_page_private(page, 0); 1509 SetHPageVmemmapOptimized(page); 1510 1511 /* 1512 * This page is about to be managed by the hugetlb allocator and 1513 * should have no users. Drop our reference, and check for others 1514 * just in case. 1515 */ 1516 zeroed = put_page_testzero(page); 1517 if (!zeroed) 1518 /* 1519 * It is VERY unlikely soneone else has taken a ref on 1520 * the page. In this case, we simply return as the 1521 * hugetlb destructor (free_huge_page) will be called 1522 * when this other ref is dropped. 1523 */ 1524 return; 1525 1526 arch_clear_hugepage_flags(page); 1527 enqueue_huge_page(h, page); 1528 } 1529 1530 static void __update_and_free_page(struct hstate *h, struct page *page) 1531 { 1532 int i; 1533 struct page *subpage = page; 1534 1535 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1536 return; 1537 1538 if (alloc_huge_page_vmemmap(h, page)) { 1539 spin_lock_irq(&hugetlb_lock); 1540 /* 1541 * If we cannot allocate vmemmap pages, just refuse to free the 1542 * page and put the page back on the hugetlb free list and treat 1543 * as a surplus page. 1544 */ 1545 add_hugetlb_page(h, page, true); 1546 spin_unlock_irq(&hugetlb_lock); 1547 return; 1548 } 1549 1550 for (i = 0; i < pages_per_huge_page(h); 1551 i++, subpage = mem_map_next(subpage, page, i)) { 1552 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1553 1 << PG_referenced | 1 << PG_dirty | 1554 1 << PG_active | 1 << PG_private | 1555 1 << PG_writeback); 1556 } 1557 1558 /* 1559 * Non-gigantic pages demoted from CMA allocated gigantic pages 1560 * need to be given back to CMA in free_gigantic_page. 1561 */ 1562 if (hstate_is_gigantic(h) || 1563 hugetlb_cma_page(page, huge_page_order(h))) { 1564 destroy_compound_gigantic_page(page, huge_page_order(h)); 1565 free_gigantic_page(page, huge_page_order(h)); 1566 } else { 1567 __free_pages(page, huge_page_order(h)); 1568 } 1569 } 1570 1571 /* 1572 * As update_and_free_page() can be called under any context, so we cannot 1573 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1574 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1575 * the vmemmap pages. 1576 * 1577 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1578 * freed and frees them one-by-one. As the page->mapping pointer is going 1579 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1580 * structure of a lockless linked list of huge pages to be freed. 1581 */ 1582 static LLIST_HEAD(hpage_freelist); 1583 1584 static void free_hpage_workfn(struct work_struct *work) 1585 { 1586 struct llist_node *node; 1587 1588 node = llist_del_all(&hpage_freelist); 1589 1590 while (node) { 1591 struct page *page; 1592 struct hstate *h; 1593 1594 page = container_of((struct address_space **)node, 1595 struct page, mapping); 1596 node = node->next; 1597 page->mapping = NULL; 1598 /* 1599 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate() 1600 * is going to trigger because a previous call to 1601 * remove_hugetlb_page() will set_compound_page_dtor(page, 1602 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly. 1603 */ 1604 h = size_to_hstate(page_size(page)); 1605 1606 __update_and_free_page(h, page); 1607 1608 cond_resched(); 1609 } 1610 } 1611 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1612 1613 static inline void flush_free_hpage_work(struct hstate *h) 1614 { 1615 if (free_vmemmap_pages_per_hpage(h)) 1616 flush_work(&free_hpage_work); 1617 } 1618 1619 static void update_and_free_page(struct hstate *h, struct page *page, 1620 bool atomic) 1621 { 1622 if (!HPageVmemmapOptimized(page) || !atomic) { 1623 __update_and_free_page(h, page); 1624 return; 1625 } 1626 1627 /* 1628 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1629 * 1630 * Only call schedule_work() if hpage_freelist is previously 1631 * empty. Otherwise, schedule_work() had been called but the workfn 1632 * hasn't retrieved the list yet. 1633 */ 1634 if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist)) 1635 schedule_work(&free_hpage_work); 1636 } 1637 1638 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1639 { 1640 struct page *page, *t_page; 1641 1642 list_for_each_entry_safe(page, t_page, list, lru) { 1643 update_and_free_page(h, page, false); 1644 cond_resched(); 1645 } 1646 } 1647 1648 struct hstate *size_to_hstate(unsigned long size) 1649 { 1650 struct hstate *h; 1651 1652 for_each_hstate(h) { 1653 if (huge_page_size(h) == size) 1654 return h; 1655 } 1656 return NULL; 1657 } 1658 1659 void free_huge_page(struct page *page) 1660 { 1661 /* 1662 * Can't pass hstate in here because it is called from the 1663 * compound page destructor. 1664 */ 1665 struct hstate *h = page_hstate(page); 1666 int nid = page_to_nid(page); 1667 struct hugepage_subpool *spool = hugetlb_page_subpool(page); 1668 bool restore_reserve; 1669 unsigned long flags; 1670 1671 VM_BUG_ON_PAGE(page_count(page), page); 1672 VM_BUG_ON_PAGE(page_mapcount(page), page); 1673 1674 hugetlb_set_page_subpool(page, NULL); 1675 page->mapping = NULL; 1676 restore_reserve = HPageRestoreReserve(page); 1677 ClearHPageRestoreReserve(page); 1678 1679 /* 1680 * If HPageRestoreReserve was set on page, page allocation consumed a 1681 * reservation. If the page was associated with a subpool, there 1682 * would have been a page reserved in the subpool before allocation 1683 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1684 * reservation, do not call hugepage_subpool_put_pages() as this will 1685 * remove the reserved page from the subpool. 1686 */ 1687 if (!restore_reserve) { 1688 /* 1689 * A return code of zero implies that the subpool will be 1690 * under its minimum size if the reservation is not restored 1691 * after page is free. Therefore, force restore_reserve 1692 * operation. 1693 */ 1694 if (hugepage_subpool_put_pages(spool, 1) == 0) 1695 restore_reserve = true; 1696 } 1697 1698 spin_lock_irqsave(&hugetlb_lock, flags); 1699 ClearHPageMigratable(page); 1700 hugetlb_cgroup_uncharge_page(hstate_index(h), 1701 pages_per_huge_page(h), page); 1702 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 1703 pages_per_huge_page(h), page); 1704 if (restore_reserve) 1705 h->resv_huge_pages++; 1706 1707 if (HPageTemporary(page)) { 1708 remove_hugetlb_page(h, page, false); 1709 spin_unlock_irqrestore(&hugetlb_lock, flags); 1710 update_and_free_page(h, page, true); 1711 } else if (h->surplus_huge_pages_node[nid]) { 1712 /* remove the page from active list */ 1713 remove_hugetlb_page(h, page, true); 1714 spin_unlock_irqrestore(&hugetlb_lock, flags); 1715 update_and_free_page(h, page, true); 1716 } else { 1717 arch_clear_hugepage_flags(page); 1718 enqueue_huge_page(h, page); 1719 spin_unlock_irqrestore(&hugetlb_lock, flags); 1720 } 1721 } 1722 1723 /* 1724 * Must be called with the hugetlb lock held 1725 */ 1726 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1727 { 1728 lockdep_assert_held(&hugetlb_lock); 1729 h->nr_huge_pages++; 1730 h->nr_huge_pages_node[nid]++; 1731 } 1732 1733 static void __prep_new_huge_page(struct hstate *h, struct page *page) 1734 { 1735 free_huge_page_vmemmap(h, page); 1736 INIT_LIST_HEAD(&page->lru); 1737 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1738 hugetlb_set_page_subpool(page, NULL); 1739 set_hugetlb_cgroup(page, NULL); 1740 set_hugetlb_cgroup_rsvd(page, NULL); 1741 } 1742 1743 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1744 { 1745 __prep_new_huge_page(h, page); 1746 spin_lock_irq(&hugetlb_lock); 1747 __prep_account_new_huge_page(h, nid); 1748 spin_unlock_irq(&hugetlb_lock); 1749 } 1750 1751 static bool __prep_compound_gigantic_page(struct page *page, unsigned int order, 1752 bool demote) 1753 { 1754 int i, j; 1755 int nr_pages = 1 << order; 1756 struct page *p = page + 1; 1757 1758 /* we rely on prep_new_huge_page to set the destructor */ 1759 set_compound_order(page, order); 1760 __ClearPageReserved(page); 1761 __SetPageHead(page); 1762 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1763 /* 1764 * For gigantic hugepages allocated through bootmem at 1765 * boot, it's safer to be consistent with the not-gigantic 1766 * hugepages and clear the PG_reserved bit from all tail pages 1767 * too. Otherwise drivers using get_user_pages() to access tail 1768 * pages may get the reference counting wrong if they see 1769 * PG_reserved set on a tail page (despite the head page not 1770 * having PG_reserved set). Enforcing this consistency between 1771 * head and tail pages allows drivers to optimize away a check 1772 * on the head page when they need know if put_page() is needed 1773 * after get_user_pages(). 1774 */ 1775 __ClearPageReserved(p); 1776 /* 1777 * Subtle and very unlikely 1778 * 1779 * Gigantic 'page allocators' such as memblock or cma will 1780 * return a set of pages with each page ref counted. We need 1781 * to turn this set of pages into a compound page with tail 1782 * page ref counts set to zero. Code such as speculative page 1783 * cache adding could take a ref on a 'to be' tail page. 1784 * We need to respect any increased ref count, and only set 1785 * the ref count to zero if count is currently 1. If count 1786 * is not 1, we return an error. An error return indicates 1787 * the set of pages can not be converted to a gigantic page. 1788 * The caller who allocated the pages should then discard the 1789 * pages using the appropriate free interface. 1790 * 1791 * In the case of demote, the ref count will be zero. 1792 */ 1793 if (!demote) { 1794 if (!page_ref_freeze(p, 1)) { 1795 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 1796 goto out_error; 1797 } 1798 } else { 1799 VM_BUG_ON_PAGE(page_count(p), p); 1800 } 1801 set_compound_head(p, page); 1802 } 1803 atomic_set(compound_mapcount_ptr(page), -1); 1804 atomic_set(compound_pincount_ptr(page), 0); 1805 return true; 1806 1807 out_error: 1808 /* undo tail page modifications made above */ 1809 p = page + 1; 1810 for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) { 1811 clear_compound_head(p); 1812 set_page_refcounted(p); 1813 } 1814 /* need to clear PG_reserved on remaining tail pages */ 1815 for (; j < nr_pages; j++, p = mem_map_next(p, page, j)) 1816 __ClearPageReserved(p); 1817 set_compound_order(page, 0); 1818 #ifdef CONFIG_64BIT 1819 page[1].compound_nr = 0; 1820 #endif 1821 __ClearPageHead(page); 1822 return false; 1823 } 1824 1825 static bool prep_compound_gigantic_page(struct page *page, unsigned int order) 1826 { 1827 return __prep_compound_gigantic_page(page, order, false); 1828 } 1829 1830 static bool prep_compound_gigantic_page_for_demote(struct page *page, 1831 unsigned int order) 1832 { 1833 return __prep_compound_gigantic_page(page, order, true); 1834 } 1835 1836 /* 1837 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1838 * transparent huge pages. See the PageTransHuge() documentation for more 1839 * details. 1840 */ 1841 int PageHuge(struct page *page) 1842 { 1843 if (!PageCompound(page)) 1844 return 0; 1845 1846 page = compound_head(page); 1847 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1848 } 1849 EXPORT_SYMBOL_GPL(PageHuge); 1850 1851 /* 1852 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1853 * normal or transparent huge pages. 1854 */ 1855 int PageHeadHuge(struct page *page_head) 1856 { 1857 if (!PageHead(page_head)) 1858 return 0; 1859 1860 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR; 1861 } 1862 EXPORT_SYMBOL_GPL(PageHeadHuge); 1863 1864 /* 1865 * Find and lock address space (mapping) in write mode. 1866 * 1867 * Upon entry, the page is locked which means that page_mapping() is 1868 * stable. Due to locking order, we can only trylock_write. If we can 1869 * not get the lock, simply return NULL to caller. 1870 */ 1871 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 1872 { 1873 struct address_space *mapping = page_mapping(hpage); 1874 1875 if (!mapping) 1876 return mapping; 1877 1878 if (i_mmap_trylock_write(mapping)) 1879 return mapping; 1880 1881 return NULL; 1882 } 1883 1884 pgoff_t hugetlb_basepage_index(struct page *page) 1885 { 1886 struct page *page_head = compound_head(page); 1887 pgoff_t index = page_index(page_head); 1888 unsigned long compound_idx; 1889 1890 if (compound_order(page_head) >= MAX_ORDER) 1891 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1892 else 1893 compound_idx = page - page_head; 1894 1895 return (index << compound_order(page_head)) + compound_idx; 1896 } 1897 1898 static struct page *alloc_buddy_huge_page(struct hstate *h, 1899 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1900 nodemask_t *node_alloc_noretry) 1901 { 1902 int order = huge_page_order(h); 1903 struct page *page; 1904 bool alloc_try_hard = true; 1905 1906 /* 1907 * By default we always try hard to allocate the page with 1908 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 1909 * a loop (to adjust global huge page counts) and previous allocation 1910 * failed, do not continue to try hard on the same node. Use the 1911 * node_alloc_noretry bitmap to manage this state information. 1912 */ 1913 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1914 alloc_try_hard = false; 1915 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 1916 if (alloc_try_hard) 1917 gfp_mask |= __GFP_RETRY_MAYFAIL; 1918 if (nid == NUMA_NO_NODE) 1919 nid = numa_mem_id(); 1920 page = __alloc_pages(gfp_mask, order, nid, nmask); 1921 if (page) 1922 __count_vm_event(HTLB_BUDDY_PGALLOC); 1923 else 1924 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1925 1926 /* 1927 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 1928 * indicates an overall state change. Clear bit so that we resume 1929 * normal 'try hard' allocations. 1930 */ 1931 if (node_alloc_noretry && page && !alloc_try_hard) 1932 node_clear(nid, *node_alloc_noretry); 1933 1934 /* 1935 * If we tried hard to get a page but failed, set bit so that 1936 * subsequent attempts will not try as hard until there is an 1937 * overall state change. 1938 */ 1939 if (node_alloc_noretry && !page && alloc_try_hard) 1940 node_set(nid, *node_alloc_noretry); 1941 1942 return page; 1943 } 1944 1945 /* 1946 * Common helper to allocate a fresh hugetlb page. All specific allocators 1947 * should use this function to get new hugetlb pages 1948 */ 1949 static struct page *alloc_fresh_huge_page(struct hstate *h, 1950 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1951 nodemask_t *node_alloc_noretry) 1952 { 1953 struct page *page; 1954 bool retry = false; 1955 1956 retry: 1957 if (hstate_is_gigantic(h)) 1958 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); 1959 else 1960 page = alloc_buddy_huge_page(h, gfp_mask, 1961 nid, nmask, node_alloc_noretry); 1962 if (!page) 1963 return NULL; 1964 1965 if (hstate_is_gigantic(h)) { 1966 if (!prep_compound_gigantic_page(page, huge_page_order(h))) { 1967 /* 1968 * Rare failure to convert pages to compound page. 1969 * Free pages and try again - ONCE! 1970 */ 1971 free_gigantic_page(page, huge_page_order(h)); 1972 if (!retry) { 1973 retry = true; 1974 goto retry; 1975 } 1976 return NULL; 1977 } 1978 } 1979 prep_new_huge_page(h, page, page_to_nid(page)); 1980 1981 return page; 1982 } 1983 1984 /* 1985 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 1986 * manner. 1987 */ 1988 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1989 nodemask_t *node_alloc_noretry) 1990 { 1991 struct page *page; 1992 int nr_nodes, node; 1993 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 1994 1995 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1996 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, 1997 node_alloc_noretry); 1998 if (page) 1999 break; 2000 } 2001 2002 if (!page) 2003 return 0; 2004 2005 put_page(page); /* free it into the hugepage allocator */ 2006 2007 return 1; 2008 } 2009 2010 /* 2011 * Remove huge page from pool from next node to free. Attempt to keep 2012 * persistent huge pages more or less balanced over allowed nodes. 2013 * This routine only 'removes' the hugetlb page. The caller must make 2014 * an additional call to free the page to low level allocators. 2015 * Called with hugetlb_lock locked. 2016 */ 2017 static struct page *remove_pool_huge_page(struct hstate *h, 2018 nodemask_t *nodes_allowed, 2019 bool acct_surplus) 2020 { 2021 int nr_nodes, node; 2022 struct page *page = NULL; 2023 2024 lockdep_assert_held(&hugetlb_lock); 2025 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2026 /* 2027 * If we're returning unused surplus pages, only examine 2028 * nodes with surplus pages. 2029 */ 2030 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2031 !list_empty(&h->hugepage_freelists[node])) { 2032 page = list_entry(h->hugepage_freelists[node].next, 2033 struct page, lru); 2034 remove_hugetlb_page(h, page, acct_surplus); 2035 break; 2036 } 2037 } 2038 2039 return page; 2040 } 2041 2042 /* 2043 * Dissolve a given free hugepage into free buddy pages. This function does 2044 * nothing for in-use hugepages and non-hugepages. 2045 * This function returns values like below: 2046 * 2047 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2048 * when the system is under memory pressure and the feature of 2049 * freeing unused vmemmap pages associated with each hugetlb page 2050 * is enabled. 2051 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2052 * (allocated or reserved.) 2053 * 0: successfully dissolved free hugepages or the page is not a 2054 * hugepage (considered as already dissolved) 2055 */ 2056 int dissolve_free_huge_page(struct page *page) 2057 { 2058 int rc = -EBUSY; 2059 2060 retry: 2061 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2062 if (!PageHuge(page)) 2063 return 0; 2064 2065 spin_lock_irq(&hugetlb_lock); 2066 if (!PageHuge(page)) { 2067 rc = 0; 2068 goto out; 2069 } 2070 2071 if (!page_count(page)) { 2072 struct page *head = compound_head(page); 2073 struct hstate *h = page_hstate(head); 2074 if (h->free_huge_pages - h->resv_huge_pages == 0) 2075 goto out; 2076 2077 /* 2078 * We should make sure that the page is already on the free list 2079 * when it is dissolved. 2080 */ 2081 if (unlikely(!HPageFreed(head))) { 2082 spin_unlock_irq(&hugetlb_lock); 2083 cond_resched(); 2084 2085 /* 2086 * Theoretically, we should return -EBUSY when we 2087 * encounter this race. In fact, we have a chance 2088 * to successfully dissolve the page if we do a 2089 * retry. Because the race window is quite small. 2090 * If we seize this opportunity, it is an optimization 2091 * for increasing the success rate of dissolving page. 2092 */ 2093 goto retry; 2094 } 2095 2096 remove_hugetlb_page(h, head, false); 2097 h->max_huge_pages--; 2098 spin_unlock_irq(&hugetlb_lock); 2099 2100 /* 2101 * Normally update_and_free_page will allocate required vmemmmap 2102 * before freeing the page. update_and_free_page will fail to 2103 * free the page if it can not allocate required vmemmap. We 2104 * need to adjust max_huge_pages if the page is not freed. 2105 * Attempt to allocate vmemmmap here so that we can take 2106 * appropriate action on failure. 2107 */ 2108 rc = alloc_huge_page_vmemmap(h, head); 2109 if (!rc) { 2110 /* 2111 * Move PageHWPoison flag from head page to the raw 2112 * error page, which makes any subpages rather than 2113 * the error page reusable. 2114 */ 2115 if (PageHWPoison(head) && page != head) { 2116 SetPageHWPoison(page); 2117 ClearPageHWPoison(head); 2118 } 2119 update_and_free_page(h, head, false); 2120 } else { 2121 spin_lock_irq(&hugetlb_lock); 2122 add_hugetlb_page(h, head, false); 2123 h->max_huge_pages++; 2124 spin_unlock_irq(&hugetlb_lock); 2125 } 2126 2127 return rc; 2128 } 2129 out: 2130 spin_unlock_irq(&hugetlb_lock); 2131 return rc; 2132 } 2133 2134 /* 2135 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2136 * make specified memory blocks removable from the system. 2137 * Note that this will dissolve a free gigantic hugepage completely, if any 2138 * part of it lies within the given range. 2139 * Also note that if dissolve_free_huge_page() returns with an error, all 2140 * free hugepages that were dissolved before that error are lost. 2141 */ 2142 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2143 { 2144 unsigned long pfn; 2145 struct page *page; 2146 int rc = 0; 2147 2148 if (!hugepages_supported()) 2149 return rc; 2150 2151 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 2152 page = pfn_to_page(pfn); 2153 rc = dissolve_free_huge_page(page); 2154 if (rc) 2155 break; 2156 } 2157 2158 return rc; 2159 } 2160 2161 /* 2162 * Allocates a fresh surplus page from the page allocator. 2163 */ 2164 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 2165 int nid, nodemask_t *nmask, bool zero_ref) 2166 { 2167 struct page *page = NULL; 2168 bool retry = false; 2169 2170 if (hstate_is_gigantic(h)) 2171 return NULL; 2172 2173 spin_lock_irq(&hugetlb_lock); 2174 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2175 goto out_unlock; 2176 spin_unlock_irq(&hugetlb_lock); 2177 2178 retry: 2179 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 2180 if (!page) 2181 return NULL; 2182 2183 spin_lock_irq(&hugetlb_lock); 2184 /* 2185 * We could have raced with the pool size change. 2186 * Double check that and simply deallocate the new page 2187 * if we would end up overcommiting the surpluses. Abuse 2188 * temporary page to workaround the nasty free_huge_page 2189 * codeflow 2190 */ 2191 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2192 SetHPageTemporary(page); 2193 spin_unlock_irq(&hugetlb_lock); 2194 put_page(page); 2195 return NULL; 2196 } 2197 2198 if (zero_ref) { 2199 /* 2200 * Caller requires a page with zero ref count. 2201 * We will drop ref count here. If someone else is holding 2202 * a ref, the page will be freed when they drop it. Abuse 2203 * temporary page flag to accomplish this. 2204 */ 2205 SetHPageTemporary(page); 2206 if (!put_page_testzero(page)) { 2207 /* 2208 * Unexpected inflated ref count on freshly allocated 2209 * huge. Retry once. 2210 */ 2211 pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n"); 2212 spin_unlock_irq(&hugetlb_lock); 2213 if (retry) 2214 return NULL; 2215 2216 retry = true; 2217 goto retry; 2218 } 2219 ClearHPageTemporary(page); 2220 } 2221 2222 h->surplus_huge_pages++; 2223 h->surplus_huge_pages_node[page_to_nid(page)]++; 2224 2225 out_unlock: 2226 spin_unlock_irq(&hugetlb_lock); 2227 2228 return page; 2229 } 2230 2231 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 2232 int nid, nodemask_t *nmask) 2233 { 2234 struct page *page; 2235 2236 if (hstate_is_gigantic(h)) 2237 return NULL; 2238 2239 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 2240 if (!page) 2241 return NULL; 2242 2243 /* 2244 * We do not account these pages as surplus because they are only 2245 * temporary and will be released properly on the last reference 2246 */ 2247 SetHPageTemporary(page); 2248 2249 return page; 2250 } 2251 2252 /* 2253 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2254 */ 2255 static 2256 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 2257 struct vm_area_struct *vma, unsigned long addr) 2258 { 2259 struct page *page = NULL; 2260 struct mempolicy *mpol; 2261 gfp_t gfp_mask = htlb_alloc_mask(h); 2262 int nid; 2263 nodemask_t *nodemask; 2264 2265 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2266 if (mpol_is_preferred_many(mpol)) { 2267 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2268 2269 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2270 page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false); 2271 2272 /* Fallback to all nodes if page==NULL */ 2273 nodemask = NULL; 2274 } 2275 2276 if (!page) 2277 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false); 2278 mpol_cond_put(mpol); 2279 return page; 2280 } 2281 2282 /* page migration callback function */ 2283 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 2284 nodemask_t *nmask, gfp_t gfp_mask) 2285 { 2286 spin_lock_irq(&hugetlb_lock); 2287 if (h->free_huge_pages - h->resv_huge_pages > 0) { 2288 struct page *page; 2289 2290 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 2291 if (page) { 2292 spin_unlock_irq(&hugetlb_lock); 2293 return page; 2294 } 2295 } 2296 spin_unlock_irq(&hugetlb_lock); 2297 2298 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 2299 } 2300 2301 /* mempolicy aware migration callback */ 2302 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 2303 unsigned long address) 2304 { 2305 struct mempolicy *mpol; 2306 nodemask_t *nodemask; 2307 struct page *page; 2308 gfp_t gfp_mask; 2309 int node; 2310 2311 gfp_mask = htlb_alloc_mask(h); 2312 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2313 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); 2314 mpol_cond_put(mpol); 2315 2316 return page; 2317 } 2318 2319 /* 2320 * Increase the hugetlb pool such that it can accommodate a reservation 2321 * of size 'delta'. 2322 */ 2323 static int gather_surplus_pages(struct hstate *h, long delta) 2324 __must_hold(&hugetlb_lock) 2325 { 2326 struct list_head surplus_list; 2327 struct page *page, *tmp; 2328 int ret; 2329 long i; 2330 long needed, allocated; 2331 bool alloc_ok = true; 2332 2333 lockdep_assert_held(&hugetlb_lock); 2334 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2335 if (needed <= 0) { 2336 h->resv_huge_pages += delta; 2337 return 0; 2338 } 2339 2340 allocated = 0; 2341 INIT_LIST_HEAD(&surplus_list); 2342 2343 ret = -ENOMEM; 2344 retry: 2345 spin_unlock_irq(&hugetlb_lock); 2346 for (i = 0; i < needed; i++) { 2347 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 2348 NUMA_NO_NODE, NULL, true); 2349 if (!page) { 2350 alloc_ok = false; 2351 break; 2352 } 2353 list_add(&page->lru, &surplus_list); 2354 cond_resched(); 2355 } 2356 allocated += i; 2357 2358 /* 2359 * After retaking hugetlb_lock, we need to recalculate 'needed' 2360 * because either resv_huge_pages or free_huge_pages may have changed. 2361 */ 2362 spin_lock_irq(&hugetlb_lock); 2363 needed = (h->resv_huge_pages + delta) - 2364 (h->free_huge_pages + allocated); 2365 if (needed > 0) { 2366 if (alloc_ok) 2367 goto retry; 2368 /* 2369 * We were not able to allocate enough pages to 2370 * satisfy the entire reservation so we free what 2371 * we've allocated so far. 2372 */ 2373 goto free; 2374 } 2375 /* 2376 * The surplus_list now contains _at_least_ the number of extra pages 2377 * needed to accommodate the reservation. Add the appropriate number 2378 * of pages to the hugetlb pool and free the extras back to the buddy 2379 * allocator. Commit the entire reservation here to prevent another 2380 * process from stealing the pages as they are added to the pool but 2381 * before they are reserved. 2382 */ 2383 needed += allocated; 2384 h->resv_huge_pages += delta; 2385 ret = 0; 2386 2387 /* Free the needed pages to the hugetlb pool */ 2388 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 2389 if ((--needed) < 0) 2390 break; 2391 /* Add the page to the hugetlb allocator */ 2392 enqueue_huge_page(h, page); 2393 } 2394 free: 2395 spin_unlock_irq(&hugetlb_lock); 2396 2397 /* 2398 * Free unnecessary surplus pages to the buddy allocator. 2399 * Pages have no ref count, call free_huge_page directly. 2400 */ 2401 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2402 free_huge_page(page); 2403 spin_lock_irq(&hugetlb_lock); 2404 2405 return ret; 2406 } 2407 2408 /* 2409 * This routine has two main purposes: 2410 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2411 * in unused_resv_pages. This corresponds to the prior adjustments made 2412 * to the associated reservation map. 2413 * 2) Free any unused surplus pages that may have been allocated to satisfy 2414 * the reservation. As many as unused_resv_pages may be freed. 2415 */ 2416 static void return_unused_surplus_pages(struct hstate *h, 2417 unsigned long unused_resv_pages) 2418 { 2419 unsigned long nr_pages; 2420 struct page *page; 2421 LIST_HEAD(page_list); 2422 2423 lockdep_assert_held(&hugetlb_lock); 2424 /* Uncommit the reservation */ 2425 h->resv_huge_pages -= unused_resv_pages; 2426 2427 /* Cannot return gigantic pages currently */ 2428 if (hstate_is_gigantic(h)) 2429 goto out; 2430 2431 /* 2432 * Part (or even all) of the reservation could have been backed 2433 * by pre-allocated pages. Only free surplus pages. 2434 */ 2435 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2436 2437 /* 2438 * We want to release as many surplus pages as possible, spread 2439 * evenly across all nodes with memory. Iterate across these nodes 2440 * until we can no longer free unreserved surplus pages. This occurs 2441 * when the nodes with surplus pages have no free pages. 2442 * remove_pool_huge_page() will balance the freed pages across the 2443 * on-line nodes with memory and will handle the hstate accounting. 2444 */ 2445 while (nr_pages--) { 2446 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2447 if (!page) 2448 goto out; 2449 2450 list_add(&page->lru, &page_list); 2451 } 2452 2453 out: 2454 spin_unlock_irq(&hugetlb_lock); 2455 update_and_free_pages_bulk(h, &page_list); 2456 spin_lock_irq(&hugetlb_lock); 2457 } 2458 2459 2460 /* 2461 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2462 * are used by the huge page allocation routines to manage reservations. 2463 * 2464 * vma_needs_reservation is called to determine if the huge page at addr 2465 * within the vma has an associated reservation. If a reservation is 2466 * needed, the value 1 is returned. The caller is then responsible for 2467 * managing the global reservation and subpool usage counts. After 2468 * the huge page has been allocated, vma_commit_reservation is called 2469 * to add the page to the reservation map. If the page allocation fails, 2470 * the reservation must be ended instead of committed. vma_end_reservation 2471 * is called in such cases. 2472 * 2473 * In the normal case, vma_commit_reservation returns the same value 2474 * as the preceding vma_needs_reservation call. The only time this 2475 * is not the case is if a reserve map was changed between calls. It 2476 * is the responsibility of the caller to notice the difference and 2477 * take appropriate action. 2478 * 2479 * vma_add_reservation is used in error paths where a reservation must 2480 * be restored when a newly allocated huge page must be freed. It is 2481 * to be called after calling vma_needs_reservation to determine if a 2482 * reservation exists. 2483 * 2484 * vma_del_reservation is used in error paths where an entry in the reserve 2485 * map was created during huge page allocation and must be removed. It is to 2486 * be called after calling vma_needs_reservation to determine if a reservation 2487 * exists. 2488 */ 2489 enum vma_resv_mode { 2490 VMA_NEEDS_RESV, 2491 VMA_COMMIT_RESV, 2492 VMA_END_RESV, 2493 VMA_ADD_RESV, 2494 VMA_DEL_RESV, 2495 }; 2496 static long __vma_reservation_common(struct hstate *h, 2497 struct vm_area_struct *vma, unsigned long addr, 2498 enum vma_resv_mode mode) 2499 { 2500 struct resv_map *resv; 2501 pgoff_t idx; 2502 long ret; 2503 long dummy_out_regions_needed; 2504 2505 resv = vma_resv_map(vma); 2506 if (!resv) 2507 return 1; 2508 2509 idx = vma_hugecache_offset(h, vma, addr); 2510 switch (mode) { 2511 case VMA_NEEDS_RESV: 2512 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2513 /* We assume that vma_reservation_* routines always operate on 2514 * 1 page, and that adding to resv map a 1 page entry can only 2515 * ever require 1 region. 2516 */ 2517 VM_BUG_ON(dummy_out_regions_needed != 1); 2518 break; 2519 case VMA_COMMIT_RESV: 2520 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2521 /* region_add calls of range 1 should never fail. */ 2522 VM_BUG_ON(ret < 0); 2523 break; 2524 case VMA_END_RESV: 2525 region_abort(resv, idx, idx + 1, 1); 2526 ret = 0; 2527 break; 2528 case VMA_ADD_RESV: 2529 if (vma->vm_flags & VM_MAYSHARE) { 2530 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2531 /* region_add calls of range 1 should never fail. */ 2532 VM_BUG_ON(ret < 0); 2533 } else { 2534 region_abort(resv, idx, idx + 1, 1); 2535 ret = region_del(resv, idx, idx + 1); 2536 } 2537 break; 2538 case VMA_DEL_RESV: 2539 if (vma->vm_flags & VM_MAYSHARE) { 2540 region_abort(resv, idx, idx + 1, 1); 2541 ret = region_del(resv, idx, idx + 1); 2542 } else { 2543 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2544 /* region_add calls of range 1 should never fail. */ 2545 VM_BUG_ON(ret < 0); 2546 } 2547 break; 2548 default: 2549 BUG(); 2550 } 2551 2552 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2553 return ret; 2554 /* 2555 * We know private mapping must have HPAGE_RESV_OWNER set. 2556 * 2557 * In most cases, reserves always exist for private mappings. 2558 * However, a file associated with mapping could have been 2559 * hole punched or truncated after reserves were consumed. 2560 * As subsequent fault on such a range will not use reserves. 2561 * Subtle - The reserve map for private mappings has the 2562 * opposite meaning than that of shared mappings. If NO 2563 * entry is in the reserve map, it means a reservation exists. 2564 * If an entry exists in the reserve map, it means the 2565 * reservation has already been consumed. As a result, the 2566 * return value of this routine is the opposite of the 2567 * value returned from reserve map manipulation routines above. 2568 */ 2569 if (ret > 0) 2570 return 0; 2571 if (ret == 0) 2572 return 1; 2573 return ret; 2574 } 2575 2576 static long vma_needs_reservation(struct hstate *h, 2577 struct vm_area_struct *vma, unsigned long addr) 2578 { 2579 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2580 } 2581 2582 static long vma_commit_reservation(struct hstate *h, 2583 struct vm_area_struct *vma, unsigned long addr) 2584 { 2585 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2586 } 2587 2588 static void vma_end_reservation(struct hstate *h, 2589 struct vm_area_struct *vma, unsigned long addr) 2590 { 2591 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2592 } 2593 2594 static long vma_add_reservation(struct hstate *h, 2595 struct vm_area_struct *vma, unsigned long addr) 2596 { 2597 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2598 } 2599 2600 static long vma_del_reservation(struct hstate *h, 2601 struct vm_area_struct *vma, unsigned long addr) 2602 { 2603 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2604 } 2605 2606 /* 2607 * This routine is called to restore reservation information on error paths. 2608 * It should ONLY be called for pages allocated via alloc_huge_page(), and 2609 * the hugetlb mutex should remain held when calling this routine. 2610 * 2611 * It handles two specific cases: 2612 * 1) A reservation was in place and the page consumed the reservation. 2613 * HPageRestoreReserve is set in the page. 2614 * 2) No reservation was in place for the page, so HPageRestoreReserve is 2615 * not set. However, alloc_huge_page always updates the reserve map. 2616 * 2617 * In case 1, free_huge_page later in the error path will increment the 2618 * global reserve count. But, free_huge_page does not have enough context 2619 * to adjust the reservation map. This case deals primarily with private 2620 * mappings. Adjust the reserve map here to be consistent with global 2621 * reserve count adjustments to be made by free_huge_page. Make sure the 2622 * reserve map indicates there is a reservation present. 2623 * 2624 * In case 2, simply undo reserve map modifications done by alloc_huge_page. 2625 */ 2626 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2627 unsigned long address, struct page *page) 2628 { 2629 long rc = vma_needs_reservation(h, vma, address); 2630 2631 if (HPageRestoreReserve(page)) { 2632 if (unlikely(rc < 0)) 2633 /* 2634 * Rare out of memory condition in reserve map 2635 * manipulation. Clear HPageRestoreReserve so that 2636 * global reserve count will not be incremented 2637 * by free_huge_page. This will make it appear 2638 * as though the reservation for this page was 2639 * consumed. This may prevent the task from 2640 * faulting in the page at a later time. This 2641 * is better than inconsistent global huge page 2642 * accounting of reserve counts. 2643 */ 2644 ClearHPageRestoreReserve(page); 2645 else if (rc) 2646 (void)vma_add_reservation(h, vma, address); 2647 else 2648 vma_end_reservation(h, vma, address); 2649 } else { 2650 if (!rc) { 2651 /* 2652 * This indicates there is an entry in the reserve map 2653 * not added by alloc_huge_page. We know it was added 2654 * before the alloc_huge_page call, otherwise 2655 * HPageRestoreReserve would be set on the page. 2656 * Remove the entry so that a subsequent allocation 2657 * does not consume a reservation. 2658 */ 2659 rc = vma_del_reservation(h, vma, address); 2660 if (rc < 0) 2661 /* 2662 * VERY rare out of memory condition. Since 2663 * we can not delete the entry, set 2664 * HPageRestoreReserve so that the reserve 2665 * count will be incremented when the page 2666 * is freed. This reserve will be consumed 2667 * on a subsequent allocation. 2668 */ 2669 SetHPageRestoreReserve(page); 2670 } else if (rc < 0) { 2671 /* 2672 * Rare out of memory condition from 2673 * vma_needs_reservation call. Memory allocation is 2674 * only attempted if a new entry is needed. Therefore, 2675 * this implies there is not an entry in the 2676 * reserve map. 2677 * 2678 * For shared mappings, no entry in the map indicates 2679 * no reservation. We are done. 2680 */ 2681 if (!(vma->vm_flags & VM_MAYSHARE)) 2682 /* 2683 * For private mappings, no entry indicates 2684 * a reservation is present. Since we can 2685 * not add an entry, set SetHPageRestoreReserve 2686 * on the page so reserve count will be 2687 * incremented when freed. This reserve will 2688 * be consumed on a subsequent allocation. 2689 */ 2690 SetHPageRestoreReserve(page); 2691 } else 2692 /* 2693 * No reservation present, do nothing 2694 */ 2695 vma_end_reservation(h, vma, address); 2696 } 2697 } 2698 2699 /* 2700 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one 2701 * @h: struct hstate old page belongs to 2702 * @old_page: Old page to dissolve 2703 * @list: List to isolate the page in case we need to 2704 * Returns 0 on success, otherwise negated error. 2705 */ 2706 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, 2707 struct list_head *list) 2708 { 2709 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2710 int nid = page_to_nid(old_page); 2711 bool alloc_retry = false; 2712 struct page *new_page; 2713 int ret = 0; 2714 2715 /* 2716 * Before dissolving the page, we need to allocate a new one for the 2717 * pool to remain stable. Here, we allocate the page and 'prep' it 2718 * by doing everything but actually updating counters and adding to 2719 * the pool. This simplifies and let us do most of the processing 2720 * under the lock. 2721 */ 2722 alloc_retry: 2723 new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); 2724 if (!new_page) 2725 return -ENOMEM; 2726 /* 2727 * If all goes well, this page will be directly added to the free 2728 * list in the pool. For this the ref count needs to be zero. 2729 * Attempt to drop now, and retry once if needed. It is VERY 2730 * unlikely there is another ref on the page. 2731 * 2732 * If someone else has a reference to the page, it will be freed 2733 * when they drop their ref. Abuse temporary page flag to accomplish 2734 * this. Retry once if there is an inflated ref count. 2735 */ 2736 SetHPageTemporary(new_page); 2737 if (!put_page_testzero(new_page)) { 2738 if (alloc_retry) 2739 return -EBUSY; 2740 2741 alloc_retry = true; 2742 goto alloc_retry; 2743 } 2744 ClearHPageTemporary(new_page); 2745 2746 __prep_new_huge_page(h, new_page); 2747 2748 retry: 2749 spin_lock_irq(&hugetlb_lock); 2750 if (!PageHuge(old_page)) { 2751 /* 2752 * Freed from under us. Drop new_page too. 2753 */ 2754 goto free_new; 2755 } else if (page_count(old_page)) { 2756 /* 2757 * Someone has grabbed the page, try to isolate it here. 2758 * Fail with -EBUSY if not possible. 2759 */ 2760 spin_unlock_irq(&hugetlb_lock); 2761 if (!isolate_huge_page(old_page, list)) 2762 ret = -EBUSY; 2763 spin_lock_irq(&hugetlb_lock); 2764 goto free_new; 2765 } else if (!HPageFreed(old_page)) { 2766 /* 2767 * Page's refcount is 0 but it has not been enqueued in the 2768 * freelist yet. Race window is small, so we can succeed here if 2769 * we retry. 2770 */ 2771 spin_unlock_irq(&hugetlb_lock); 2772 cond_resched(); 2773 goto retry; 2774 } else { 2775 /* 2776 * Ok, old_page is still a genuine free hugepage. Remove it from 2777 * the freelist and decrease the counters. These will be 2778 * incremented again when calling __prep_account_new_huge_page() 2779 * and enqueue_huge_page() for new_page. The counters will remain 2780 * stable since this happens under the lock. 2781 */ 2782 remove_hugetlb_page(h, old_page, false); 2783 2784 /* 2785 * Ref count on new page is already zero as it was dropped 2786 * earlier. It can be directly added to the pool free list. 2787 */ 2788 __prep_account_new_huge_page(h, nid); 2789 enqueue_huge_page(h, new_page); 2790 2791 /* 2792 * Pages have been replaced, we can safely free the old one. 2793 */ 2794 spin_unlock_irq(&hugetlb_lock); 2795 update_and_free_page(h, old_page, false); 2796 } 2797 2798 return ret; 2799 2800 free_new: 2801 spin_unlock_irq(&hugetlb_lock); 2802 /* Page has a zero ref count, but needs a ref to be freed */ 2803 set_page_refcounted(new_page); 2804 update_and_free_page(h, new_page, false); 2805 2806 return ret; 2807 } 2808 2809 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2810 { 2811 struct hstate *h; 2812 struct page *head; 2813 int ret = -EBUSY; 2814 2815 /* 2816 * The page might have been dissolved from under our feet, so make sure 2817 * to carefully check the state under the lock. 2818 * Return success when racing as if we dissolved the page ourselves. 2819 */ 2820 spin_lock_irq(&hugetlb_lock); 2821 if (PageHuge(page)) { 2822 head = compound_head(page); 2823 h = page_hstate(head); 2824 } else { 2825 spin_unlock_irq(&hugetlb_lock); 2826 return 0; 2827 } 2828 spin_unlock_irq(&hugetlb_lock); 2829 2830 /* 2831 * Fence off gigantic pages as there is a cyclic dependency between 2832 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2833 * of bailing out right away without further retrying. 2834 */ 2835 if (hstate_is_gigantic(h)) 2836 return -ENOMEM; 2837 2838 if (page_count(head) && isolate_huge_page(head, list)) 2839 ret = 0; 2840 else if (!page_count(head)) 2841 ret = alloc_and_dissolve_huge_page(h, head, list); 2842 2843 return ret; 2844 } 2845 2846 struct page *alloc_huge_page(struct vm_area_struct *vma, 2847 unsigned long addr, int avoid_reserve) 2848 { 2849 struct hugepage_subpool *spool = subpool_vma(vma); 2850 struct hstate *h = hstate_vma(vma); 2851 struct page *page; 2852 long map_chg, map_commit; 2853 long gbl_chg; 2854 int ret, idx; 2855 struct hugetlb_cgroup *h_cg; 2856 bool deferred_reserve; 2857 2858 idx = hstate_index(h); 2859 /* 2860 * Examine the region/reserve map to determine if the process 2861 * has a reservation for the page to be allocated. A return 2862 * code of zero indicates a reservation exists (no change). 2863 */ 2864 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2865 if (map_chg < 0) 2866 return ERR_PTR(-ENOMEM); 2867 2868 /* 2869 * Processes that did not create the mapping will have no 2870 * reserves as indicated by the region/reserve map. Check 2871 * that the allocation will not exceed the subpool limit. 2872 * Allocations for MAP_NORESERVE mappings also need to be 2873 * checked against any subpool limit. 2874 */ 2875 if (map_chg || avoid_reserve) { 2876 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2877 if (gbl_chg < 0) { 2878 vma_end_reservation(h, vma, addr); 2879 return ERR_PTR(-ENOSPC); 2880 } 2881 2882 /* 2883 * Even though there was no reservation in the region/reserve 2884 * map, there could be reservations associated with the 2885 * subpool that can be used. This would be indicated if the 2886 * return value of hugepage_subpool_get_pages() is zero. 2887 * However, if avoid_reserve is specified we still avoid even 2888 * the subpool reservations. 2889 */ 2890 if (avoid_reserve) 2891 gbl_chg = 1; 2892 } 2893 2894 /* If this allocation is not consuming a reservation, charge it now. 2895 */ 2896 deferred_reserve = map_chg || avoid_reserve; 2897 if (deferred_reserve) { 2898 ret = hugetlb_cgroup_charge_cgroup_rsvd( 2899 idx, pages_per_huge_page(h), &h_cg); 2900 if (ret) 2901 goto out_subpool_put; 2902 } 2903 2904 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2905 if (ret) 2906 goto out_uncharge_cgroup_reservation; 2907 2908 spin_lock_irq(&hugetlb_lock); 2909 /* 2910 * glb_chg is passed to indicate whether or not a page must be taken 2911 * from the global free pool (global change). gbl_chg == 0 indicates 2912 * a reservation exists for the allocation. 2913 */ 2914 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2915 if (!page) { 2916 spin_unlock_irq(&hugetlb_lock); 2917 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 2918 if (!page) 2919 goto out_uncharge_cgroup; 2920 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2921 SetHPageRestoreReserve(page); 2922 h->resv_huge_pages--; 2923 } 2924 spin_lock_irq(&hugetlb_lock); 2925 list_add(&page->lru, &h->hugepage_activelist); 2926 /* Fall through */ 2927 } 2928 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2929 /* If allocation is not consuming a reservation, also store the 2930 * hugetlb_cgroup pointer on the page. 2931 */ 2932 if (deferred_reserve) { 2933 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 2934 h_cg, page); 2935 } 2936 2937 spin_unlock_irq(&hugetlb_lock); 2938 2939 hugetlb_set_page_subpool(page, spool); 2940 2941 map_commit = vma_commit_reservation(h, vma, addr); 2942 if (unlikely(map_chg > map_commit)) { 2943 /* 2944 * The page was added to the reservation map between 2945 * vma_needs_reservation and vma_commit_reservation. 2946 * This indicates a race with hugetlb_reserve_pages. 2947 * Adjust for the subpool count incremented above AND 2948 * in hugetlb_reserve_pages for the same page. Also, 2949 * the reservation count added in hugetlb_reserve_pages 2950 * no longer applies. 2951 */ 2952 long rsv_adjust; 2953 2954 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2955 hugetlb_acct_memory(h, -rsv_adjust); 2956 if (deferred_reserve) 2957 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 2958 pages_per_huge_page(h), page); 2959 } 2960 return page; 2961 2962 out_uncharge_cgroup: 2963 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2964 out_uncharge_cgroup_reservation: 2965 if (deferred_reserve) 2966 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 2967 h_cg); 2968 out_subpool_put: 2969 if (map_chg || avoid_reserve) 2970 hugepage_subpool_put_pages(spool, 1); 2971 vma_end_reservation(h, vma, addr); 2972 return ERR_PTR(-ENOSPC); 2973 } 2974 2975 int alloc_bootmem_huge_page(struct hstate *h, int nid) 2976 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 2977 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 2978 { 2979 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 2980 int nr_nodes, node; 2981 2982 if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) 2983 return 0; 2984 /* do node specific alloc */ 2985 if (nid != NUMA_NO_NODE) { 2986 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 2987 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 2988 if (!m) 2989 return 0; 2990 goto found; 2991 } 2992 /* allocate from next node when distributing huge pages */ 2993 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2994 m = memblock_alloc_try_nid_raw( 2995 huge_page_size(h), huge_page_size(h), 2996 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 2997 /* 2998 * Use the beginning of the huge page to store the 2999 * huge_bootmem_page struct (until gather_bootmem 3000 * puts them into the mem_map). 3001 */ 3002 if (!m) 3003 return 0; 3004 goto found; 3005 } 3006 3007 found: 3008 /* Put them into a private list first because mem_map is not up yet */ 3009 INIT_LIST_HEAD(&m->list); 3010 list_add(&m->list, &huge_boot_pages); 3011 m->hstate = h; 3012 return 1; 3013 } 3014 3015 /* 3016 * Put bootmem huge pages into the standard lists after mem_map is up. 3017 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3018 */ 3019 static void __init gather_bootmem_prealloc(void) 3020 { 3021 struct huge_bootmem_page *m; 3022 3023 list_for_each_entry(m, &huge_boot_pages, list) { 3024 struct page *page = virt_to_page(m); 3025 struct hstate *h = m->hstate; 3026 3027 VM_BUG_ON(!hstate_is_gigantic(h)); 3028 WARN_ON(page_count(page) != 1); 3029 if (prep_compound_gigantic_page(page, huge_page_order(h))) { 3030 WARN_ON(PageReserved(page)); 3031 prep_new_huge_page(h, page, page_to_nid(page)); 3032 put_page(page); /* add to the hugepage allocator */ 3033 } else { 3034 /* VERY unlikely inflated ref count on a tail page */ 3035 free_gigantic_page(page, huge_page_order(h)); 3036 } 3037 3038 /* 3039 * We need to restore the 'stolen' pages to totalram_pages 3040 * in order to fix confusing memory reports from free(1) and 3041 * other side-effects, like CommitLimit going negative. 3042 */ 3043 adjust_managed_page_count(page, pages_per_huge_page(h)); 3044 cond_resched(); 3045 } 3046 } 3047 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3048 { 3049 unsigned long i; 3050 char buf[32]; 3051 3052 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3053 if (hstate_is_gigantic(h)) { 3054 if (!alloc_bootmem_huge_page(h, nid)) 3055 break; 3056 } else { 3057 struct page *page; 3058 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3059 3060 page = alloc_fresh_huge_page(h, gfp_mask, nid, 3061 &node_states[N_MEMORY], NULL); 3062 if (!page) 3063 break; 3064 put_page(page); /* free it into the hugepage allocator */ 3065 } 3066 cond_resched(); 3067 } 3068 if (i == h->max_huge_pages_node[nid]) 3069 return; 3070 3071 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3072 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3073 h->max_huge_pages_node[nid], buf, nid, i); 3074 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3075 h->max_huge_pages_node[nid] = i; 3076 } 3077 3078 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3079 { 3080 unsigned long i; 3081 nodemask_t *node_alloc_noretry; 3082 bool node_specific_alloc = false; 3083 3084 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3085 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3086 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3087 return; 3088 } 3089 3090 /* do node specific alloc */ 3091 for (i = 0; i < nr_online_nodes; i++) { 3092 if (h->max_huge_pages_node[i] > 0) { 3093 hugetlb_hstate_alloc_pages_onenode(h, i); 3094 node_specific_alloc = true; 3095 } 3096 } 3097 3098 if (node_specific_alloc) 3099 return; 3100 3101 /* below will do all node balanced alloc */ 3102 if (!hstate_is_gigantic(h)) { 3103 /* 3104 * Bit mask controlling how hard we retry per-node allocations. 3105 * Ignore errors as lower level routines can deal with 3106 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3107 * time, we are likely in bigger trouble. 3108 */ 3109 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3110 GFP_KERNEL); 3111 } else { 3112 /* allocations done at boot time */ 3113 node_alloc_noretry = NULL; 3114 } 3115 3116 /* bit mask controlling how hard we retry per-node allocations */ 3117 if (node_alloc_noretry) 3118 nodes_clear(*node_alloc_noretry); 3119 3120 for (i = 0; i < h->max_huge_pages; ++i) { 3121 if (hstate_is_gigantic(h)) { 3122 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3123 break; 3124 } else if (!alloc_pool_huge_page(h, 3125 &node_states[N_MEMORY], 3126 node_alloc_noretry)) 3127 break; 3128 cond_resched(); 3129 } 3130 if (i < h->max_huge_pages) { 3131 char buf[32]; 3132 3133 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3134 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3135 h->max_huge_pages, buf, i); 3136 h->max_huge_pages = i; 3137 } 3138 kfree(node_alloc_noretry); 3139 } 3140 3141 static void __init hugetlb_init_hstates(void) 3142 { 3143 struct hstate *h, *h2; 3144 3145 for_each_hstate(h) { 3146 if (minimum_order > huge_page_order(h)) 3147 minimum_order = huge_page_order(h); 3148 3149 /* oversize hugepages were init'ed in early boot */ 3150 if (!hstate_is_gigantic(h)) 3151 hugetlb_hstate_alloc_pages(h); 3152 3153 /* 3154 * Set demote order for each hstate. Note that 3155 * h->demote_order is initially 0. 3156 * - We can not demote gigantic pages if runtime freeing 3157 * is not supported, so skip this. 3158 * - If CMA allocation is possible, we can not demote 3159 * HUGETLB_PAGE_ORDER or smaller size pages. 3160 */ 3161 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3162 continue; 3163 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3164 continue; 3165 for_each_hstate(h2) { 3166 if (h2 == h) 3167 continue; 3168 if (h2->order < h->order && 3169 h2->order > h->demote_order) 3170 h->demote_order = h2->order; 3171 } 3172 } 3173 VM_BUG_ON(minimum_order == UINT_MAX); 3174 } 3175 3176 static void __init report_hugepages(void) 3177 { 3178 struct hstate *h; 3179 3180 for_each_hstate(h) { 3181 char buf[32]; 3182 3183 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3184 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 3185 buf, h->free_huge_pages); 3186 } 3187 } 3188 3189 #ifdef CONFIG_HIGHMEM 3190 static void try_to_free_low(struct hstate *h, unsigned long count, 3191 nodemask_t *nodes_allowed) 3192 { 3193 int i; 3194 LIST_HEAD(page_list); 3195 3196 lockdep_assert_held(&hugetlb_lock); 3197 if (hstate_is_gigantic(h)) 3198 return; 3199 3200 /* 3201 * Collect pages to be freed on a list, and free after dropping lock 3202 */ 3203 for_each_node_mask(i, *nodes_allowed) { 3204 struct page *page, *next; 3205 struct list_head *freel = &h->hugepage_freelists[i]; 3206 list_for_each_entry_safe(page, next, freel, lru) { 3207 if (count >= h->nr_huge_pages) 3208 goto out; 3209 if (PageHighMem(page)) 3210 continue; 3211 remove_hugetlb_page(h, page, false); 3212 list_add(&page->lru, &page_list); 3213 } 3214 } 3215 3216 out: 3217 spin_unlock_irq(&hugetlb_lock); 3218 update_and_free_pages_bulk(h, &page_list); 3219 spin_lock_irq(&hugetlb_lock); 3220 } 3221 #else 3222 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3223 nodemask_t *nodes_allowed) 3224 { 3225 } 3226 #endif 3227 3228 /* 3229 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3230 * balanced by operating on them in a round-robin fashion. 3231 * Returns 1 if an adjustment was made. 3232 */ 3233 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3234 int delta) 3235 { 3236 int nr_nodes, node; 3237 3238 lockdep_assert_held(&hugetlb_lock); 3239 VM_BUG_ON(delta != -1 && delta != 1); 3240 3241 if (delta < 0) { 3242 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3243 if (h->surplus_huge_pages_node[node]) 3244 goto found; 3245 } 3246 } else { 3247 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3248 if (h->surplus_huge_pages_node[node] < 3249 h->nr_huge_pages_node[node]) 3250 goto found; 3251 } 3252 } 3253 return 0; 3254 3255 found: 3256 h->surplus_huge_pages += delta; 3257 h->surplus_huge_pages_node[node] += delta; 3258 return 1; 3259 } 3260 3261 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3262 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3263 nodemask_t *nodes_allowed) 3264 { 3265 unsigned long min_count, ret; 3266 struct page *page; 3267 LIST_HEAD(page_list); 3268 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3269 3270 /* 3271 * Bit mask controlling how hard we retry per-node allocations. 3272 * If we can not allocate the bit mask, do not attempt to allocate 3273 * the requested huge pages. 3274 */ 3275 if (node_alloc_noretry) 3276 nodes_clear(*node_alloc_noretry); 3277 else 3278 return -ENOMEM; 3279 3280 /* 3281 * resize_lock mutex prevents concurrent adjustments to number of 3282 * pages in hstate via the proc/sysfs interfaces. 3283 */ 3284 mutex_lock(&h->resize_lock); 3285 flush_free_hpage_work(h); 3286 spin_lock_irq(&hugetlb_lock); 3287 3288 /* 3289 * Check for a node specific request. 3290 * Changing node specific huge page count may require a corresponding 3291 * change to the global count. In any case, the passed node mask 3292 * (nodes_allowed) will restrict alloc/free to the specified node. 3293 */ 3294 if (nid != NUMA_NO_NODE) { 3295 unsigned long old_count = count; 3296 3297 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3298 /* 3299 * User may have specified a large count value which caused the 3300 * above calculation to overflow. In this case, they wanted 3301 * to allocate as many huge pages as possible. Set count to 3302 * largest possible value to align with their intention. 3303 */ 3304 if (count < old_count) 3305 count = ULONG_MAX; 3306 } 3307 3308 /* 3309 * Gigantic pages runtime allocation depend on the capability for large 3310 * page range allocation. 3311 * If the system does not provide this feature, return an error when 3312 * the user tries to allocate gigantic pages but let the user free the 3313 * boottime allocated gigantic pages. 3314 */ 3315 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3316 if (count > persistent_huge_pages(h)) { 3317 spin_unlock_irq(&hugetlb_lock); 3318 mutex_unlock(&h->resize_lock); 3319 NODEMASK_FREE(node_alloc_noretry); 3320 return -EINVAL; 3321 } 3322 /* Fall through to decrease pool */ 3323 } 3324 3325 /* 3326 * Increase the pool size 3327 * First take pages out of surplus state. Then make up the 3328 * remaining difference by allocating fresh huge pages. 3329 * 3330 * We might race with alloc_surplus_huge_page() here and be unable 3331 * to convert a surplus huge page to a normal huge page. That is 3332 * not critical, though, it just means the overall size of the 3333 * pool might be one hugepage larger than it needs to be, but 3334 * within all the constraints specified by the sysctls. 3335 */ 3336 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3337 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3338 break; 3339 } 3340 3341 while (count > persistent_huge_pages(h)) { 3342 /* 3343 * If this allocation races such that we no longer need the 3344 * page, free_huge_page will handle it by freeing the page 3345 * and reducing the surplus. 3346 */ 3347 spin_unlock_irq(&hugetlb_lock); 3348 3349 /* yield cpu to avoid soft lockup */ 3350 cond_resched(); 3351 3352 ret = alloc_pool_huge_page(h, nodes_allowed, 3353 node_alloc_noretry); 3354 spin_lock_irq(&hugetlb_lock); 3355 if (!ret) 3356 goto out; 3357 3358 /* Bail for signals. Probably ctrl-c from user */ 3359 if (signal_pending(current)) 3360 goto out; 3361 } 3362 3363 /* 3364 * Decrease the pool size 3365 * First return free pages to the buddy allocator (being careful 3366 * to keep enough around to satisfy reservations). Then place 3367 * pages into surplus state as needed so the pool will shrink 3368 * to the desired size as pages become free. 3369 * 3370 * By placing pages into the surplus state independent of the 3371 * overcommit value, we are allowing the surplus pool size to 3372 * exceed overcommit. There are few sane options here. Since 3373 * alloc_surplus_huge_page() is checking the global counter, 3374 * though, we'll note that we're not allowed to exceed surplus 3375 * and won't grow the pool anywhere else. Not until one of the 3376 * sysctls are changed, or the surplus pages go out of use. 3377 */ 3378 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3379 min_count = max(count, min_count); 3380 try_to_free_low(h, min_count, nodes_allowed); 3381 3382 /* 3383 * Collect pages to be removed on list without dropping lock 3384 */ 3385 while (min_count < persistent_huge_pages(h)) { 3386 page = remove_pool_huge_page(h, nodes_allowed, 0); 3387 if (!page) 3388 break; 3389 3390 list_add(&page->lru, &page_list); 3391 } 3392 /* free the pages after dropping lock */ 3393 spin_unlock_irq(&hugetlb_lock); 3394 update_and_free_pages_bulk(h, &page_list); 3395 flush_free_hpage_work(h); 3396 spin_lock_irq(&hugetlb_lock); 3397 3398 while (count < persistent_huge_pages(h)) { 3399 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3400 break; 3401 } 3402 out: 3403 h->max_huge_pages = persistent_huge_pages(h); 3404 spin_unlock_irq(&hugetlb_lock); 3405 mutex_unlock(&h->resize_lock); 3406 3407 NODEMASK_FREE(node_alloc_noretry); 3408 3409 return 0; 3410 } 3411 3412 static int demote_free_huge_page(struct hstate *h, struct page *page) 3413 { 3414 int i, nid = page_to_nid(page); 3415 struct hstate *target_hstate; 3416 int rc = 0; 3417 3418 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3419 3420 remove_hugetlb_page_for_demote(h, page, false); 3421 spin_unlock_irq(&hugetlb_lock); 3422 3423 rc = alloc_huge_page_vmemmap(h, page); 3424 if (rc) { 3425 /* Allocation of vmemmmap failed, we can not demote page */ 3426 spin_lock_irq(&hugetlb_lock); 3427 set_page_refcounted(page); 3428 add_hugetlb_page(h, page, false); 3429 return rc; 3430 } 3431 3432 /* 3433 * Use destroy_compound_hugetlb_page_for_demote for all huge page 3434 * sizes as it will not ref count pages. 3435 */ 3436 destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h)); 3437 3438 /* 3439 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3440 * Without the mutex, pages added to target hstate could be marked 3441 * as surplus. 3442 * 3443 * Note that we already hold h->resize_lock. To prevent deadlock, 3444 * use the convention of always taking larger size hstate mutex first. 3445 */ 3446 mutex_lock(&target_hstate->resize_lock); 3447 for (i = 0; i < pages_per_huge_page(h); 3448 i += pages_per_huge_page(target_hstate)) { 3449 if (hstate_is_gigantic(target_hstate)) 3450 prep_compound_gigantic_page_for_demote(page + i, 3451 target_hstate->order); 3452 else 3453 prep_compound_page(page + i, target_hstate->order); 3454 set_page_private(page + i, 0); 3455 set_page_refcounted(page + i); 3456 prep_new_huge_page(target_hstate, page + i, nid); 3457 put_page(page + i); 3458 } 3459 mutex_unlock(&target_hstate->resize_lock); 3460 3461 spin_lock_irq(&hugetlb_lock); 3462 3463 /* 3464 * Not absolutely necessary, but for consistency update max_huge_pages 3465 * based on pool changes for the demoted page. 3466 */ 3467 h->max_huge_pages--; 3468 target_hstate->max_huge_pages += pages_per_huge_page(h); 3469 3470 return rc; 3471 } 3472 3473 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3474 __must_hold(&hugetlb_lock) 3475 { 3476 int nr_nodes, node; 3477 struct page *page; 3478 3479 lockdep_assert_held(&hugetlb_lock); 3480 3481 /* We should never get here if no demote order */ 3482 if (!h->demote_order) { 3483 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3484 return -EINVAL; /* internal error */ 3485 } 3486 3487 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3488 list_for_each_entry(page, &h->hugepage_freelists[node], lru) { 3489 if (PageHWPoison(page)) 3490 continue; 3491 3492 return demote_free_huge_page(h, page); 3493 } 3494 } 3495 3496 /* 3497 * Only way to get here is if all pages on free lists are poisoned. 3498 * Return -EBUSY so that caller will not retry. 3499 */ 3500 return -EBUSY; 3501 } 3502 3503 #define HSTATE_ATTR_RO(_name) \ 3504 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3505 3506 #define HSTATE_ATTR_WO(_name) \ 3507 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3508 3509 #define HSTATE_ATTR(_name) \ 3510 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3511 3512 static struct kobject *hugepages_kobj; 3513 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3514 3515 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3516 3517 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3518 { 3519 int i; 3520 3521 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3522 if (hstate_kobjs[i] == kobj) { 3523 if (nidp) 3524 *nidp = NUMA_NO_NODE; 3525 return &hstates[i]; 3526 } 3527 3528 return kobj_to_node_hstate(kobj, nidp); 3529 } 3530 3531 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3532 struct kobj_attribute *attr, char *buf) 3533 { 3534 struct hstate *h; 3535 unsigned long nr_huge_pages; 3536 int nid; 3537 3538 h = kobj_to_hstate(kobj, &nid); 3539 if (nid == NUMA_NO_NODE) 3540 nr_huge_pages = h->nr_huge_pages; 3541 else 3542 nr_huge_pages = h->nr_huge_pages_node[nid]; 3543 3544 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3545 } 3546 3547 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3548 struct hstate *h, int nid, 3549 unsigned long count, size_t len) 3550 { 3551 int err; 3552 nodemask_t nodes_allowed, *n_mask; 3553 3554 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3555 return -EINVAL; 3556 3557 if (nid == NUMA_NO_NODE) { 3558 /* 3559 * global hstate attribute 3560 */ 3561 if (!(obey_mempolicy && 3562 init_nodemask_of_mempolicy(&nodes_allowed))) 3563 n_mask = &node_states[N_MEMORY]; 3564 else 3565 n_mask = &nodes_allowed; 3566 } else { 3567 /* 3568 * Node specific request. count adjustment happens in 3569 * set_max_huge_pages() after acquiring hugetlb_lock. 3570 */ 3571 init_nodemask_of_node(&nodes_allowed, nid); 3572 n_mask = &nodes_allowed; 3573 } 3574 3575 err = set_max_huge_pages(h, count, nid, n_mask); 3576 3577 return err ? err : len; 3578 } 3579 3580 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3581 struct kobject *kobj, const char *buf, 3582 size_t len) 3583 { 3584 struct hstate *h; 3585 unsigned long count; 3586 int nid; 3587 int err; 3588 3589 err = kstrtoul(buf, 10, &count); 3590 if (err) 3591 return err; 3592 3593 h = kobj_to_hstate(kobj, &nid); 3594 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3595 } 3596 3597 static ssize_t nr_hugepages_show(struct kobject *kobj, 3598 struct kobj_attribute *attr, char *buf) 3599 { 3600 return nr_hugepages_show_common(kobj, attr, buf); 3601 } 3602 3603 static ssize_t nr_hugepages_store(struct kobject *kobj, 3604 struct kobj_attribute *attr, const char *buf, size_t len) 3605 { 3606 return nr_hugepages_store_common(false, kobj, buf, len); 3607 } 3608 HSTATE_ATTR(nr_hugepages); 3609 3610 #ifdef CONFIG_NUMA 3611 3612 /* 3613 * hstate attribute for optionally mempolicy-based constraint on persistent 3614 * huge page alloc/free. 3615 */ 3616 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3617 struct kobj_attribute *attr, 3618 char *buf) 3619 { 3620 return nr_hugepages_show_common(kobj, attr, buf); 3621 } 3622 3623 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3624 struct kobj_attribute *attr, const char *buf, size_t len) 3625 { 3626 return nr_hugepages_store_common(true, kobj, buf, len); 3627 } 3628 HSTATE_ATTR(nr_hugepages_mempolicy); 3629 #endif 3630 3631 3632 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3633 struct kobj_attribute *attr, char *buf) 3634 { 3635 struct hstate *h = kobj_to_hstate(kobj, NULL); 3636 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3637 } 3638 3639 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3640 struct kobj_attribute *attr, const char *buf, size_t count) 3641 { 3642 int err; 3643 unsigned long input; 3644 struct hstate *h = kobj_to_hstate(kobj, NULL); 3645 3646 if (hstate_is_gigantic(h)) 3647 return -EINVAL; 3648 3649 err = kstrtoul(buf, 10, &input); 3650 if (err) 3651 return err; 3652 3653 spin_lock_irq(&hugetlb_lock); 3654 h->nr_overcommit_huge_pages = input; 3655 spin_unlock_irq(&hugetlb_lock); 3656 3657 return count; 3658 } 3659 HSTATE_ATTR(nr_overcommit_hugepages); 3660 3661 static ssize_t free_hugepages_show(struct kobject *kobj, 3662 struct kobj_attribute *attr, char *buf) 3663 { 3664 struct hstate *h; 3665 unsigned long free_huge_pages; 3666 int nid; 3667 3668 h = kobj_to_hstate(kobj, &nid); 3669 if (nid == NUMA_NO_NODE) 3670 free_huge_pages = h->free_huge_pages; 3671 else 3672 free_huge_pages = h->free_huge_pages_node[nid]; 3673 3674 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3675 } 3676 HSTATE_ATTR_RO(free_hugepages); 3677 3678 static ssize_t resv_hugepages_show(struct kobject *kobj, 3679 struct kobj_attribute *attr, char *buf) 3680 { 3681 struct hstate *h = kobj_to_hstate(kobj, NULL); 3682 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3683 } 3684 HSTATE_ATTR_RO(resv_hugepages); 3685 3686 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3687 struct kobj_attribute *attr, char *buf) 3688 { 3689 struct hstate *h; 3690 unsigned long surplus_huge_pages; 3691 int nid; 3692 3693 h = kobj_to_hstate(kobj, &nid); 3694 if (nid == NUMA_NO_NODE) 3695 surplus_huge_pages = h->surplus_huge_pages; 3696 else 3697 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3698 3699 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3700 } 3701 HSTATE_ATTR_RO(surplus_hugepages); 3702 3703 static ssize_t demote_store(struct kobject *kobj, 3704 struct kobj_attribute *attr, const char *buf, size_t len) 3705 { 3706 unsigned long nr_demote; 3707 unsigned long nr_available; 3708 nodemask_t nodes_allowed, *n_mask; 3709 struct hstate *h; 3710 int err = 0; 3711 int nid; 3712 3713 err = kstrtoul(buf, 10, &nr_demote); 3714 if (err) 3715 return err; 3716 h = kobj_to_hstate(kobj, &nid); 3717 3718 if (nid != NUMA_NO_NODE) { 3719 init_nodemask_of_node(&nodes_allowed, nid); 3720 n_mask = &nodes_allowed; 3721 } else { 3722 n_mask = &node_states[N_MEMORY]; 3723 } 3724 3725 /* Synchronize with other sysfs operations modifying huge pages */ 3726 mutex_lock(&h->resize_lock); 3727 spin_lock_irq(&hugetlb_lock); 3728 3729 while (nr_demote) { 3730 /* 3731 * Check for available pages to demote each time thorough the 3732 * loop as demote_pool_huge_page will drop hugetlb_lock. 3733 */ 3734 if (nid != NUMA_NO_NODE) 3735 nr_available = h->free_huge_pages_node[nid]; 3736 else 3737 nr_available = h->free_huge_pages; 3738 nr_available -= h->resv_huge_pages; 3739 if (!nr_available) 3740 break; 3741 3742 err = demote_pool_huge_page(h, n_mask); 3743 if (err) 3744 break; 3745 3746 nr_demote--; 3747 } 3748 3749 spin_unlock_irq(&hugetlb_lock); 3750 mutex_unlock(&h->resize_lock); 3751 3752 if (err) 3753 return err; 3754 return len; 3755 } 3756 HSTATE_ATTR_WO(demote); 3757 3758 static ssize_t demote_size_show(struct kobject *kobj, 3759 struct kobj_attribute *attr, char *buf) 3760 { 3761 int nid; 3762 struct hstate *h = kobj_to_hstate(kobj, &nid); 3763 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3764 3765 return sysfs_emit(buf, "%lukB\n", demote_size); 3766 } 3767 3768 static ssize_t demote_size_store(struct kobject *kobj, 3769 struct kobj_attribute *attr, 3770 const char *buf, size_t count) 3771 { 3772 struct hstate *h, *demote_hstate; 3773 unsigned long demote_size; 3774 unsigned int demote_order; 3775 int nid; 3776 3777 demote_size = (unsigned long)memparse(buf, NULL); 3778 3779 demote_hstate = size_to_hstate(demote_size); 3780 if (!demote_hstate) 3781 return -EINVAL; 3782 demote_order = demote_hstate->order; 3783 if (demote_order < HUGETLB_PAGE_ORDER) 3784 return -EINVAL; 3785 3786 /* demote order must be smaller than hstate order */ 3787 h = kobj_to_hstate(kobj, &nid); 3788 if (demote_order >= h->order) 3789 return -EINVAL; 3790 3791 /* resize_lock synchronizes access to demote size and writes */ 3792 mutex_lock(&h->resize_lock); 3793 h->demote_order = demote_order; 3794 mutex_unlock(&h->resize_lock); 3795 3796 return count; 3797 } 3798 HSTATE_ATTR(demote_size); 3799 3800 static struct attribute *hstate_attrs[] = { 3801 &nr_hugepages_attr.attr, 3802 &nr_overcommit_hugepages_attr.attr, 3803 &free_hugepages_attr.attr, 3804 &resv_hugepages_attr.attr, 3805 &surplus_hugepages_attr.attr, 3806 #ifdef CONFIG_NUMA 3807 &nr_hugepages_mempolicy_attr.attr, 3808 #endif 3809 NULL, 3810 }; 3811 3812 static const struct attribute_group hstate_attr_group = { 3813 .attrs = hstate_attrs, 3814 }; 3815 3816 static struct attribute *hstate_demote_attrs[] = { 3817 &demote_size_attr.attr, 3818 &demote_attr.attr, 3819 NULL, 3820 }; 3821 3822 static const struct attribute_group hstate_demote_attr_group = { 3823 .attrs = hstate_demote_attrs, 3824 }; 3825 3826 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 3827 struct kobject **hstate_kobjs, 3828 const struct attribute_group *hstate_attr_group) 3829 { 3830 int retval; 3831 int hi = hstate_index(h); 3832 3833 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 3834 if (!hstate_kobjs[hi]) 3835 return -ENOMEM; 3836 3837 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 3838 if (retval) { 3839 kobject_put(hstate_kobjs[hi]); 3840 hstate_kobjs[hi] = NULL; 3841 } 3842 3843 if (h->demote_order) { 3844 if (sysfs_create_group(hstate_kobjs[hi], 3845 &hstate_demote_attr_group)) 3846 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 3847 } 3848 3849 return retval; 3850 } 3851 3852 static void __init hugetlb_sysfs_init(void) 3853 { 3854 struct hstate *h; 3855 int err; 3856 3857 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 3858 if (!hugepages_kobj) 3859 return; 3860 3861 for_each_hstate(h) { 3862 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 3863 hstate_kobjs, &hstate_attr_group); 3864 if (err) 3865 pr_err("HugeTLB: Unable to add hstate %s", h->name); 3866 } 3867 } 3868 3869 #ifdef CONFIG_NUMA 3870 3871 /* 3872 * node_hstate/s - associate per node hstate attributes, via their kobjects, 3873 * with node devices in node_devices[] using a parallel array. The array 3874 * index of a node device or _hstate == node id. 3875 * This is here to avoid any static dependency of the node device driver, in 3876 * the base kernel, on the hugetlb module. 3877 */ 3878 struct node_hstate { 3879 struct kobject *hugepages_kobj; 3880 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3881 }; 3882 static struct node_hstate node_hstates[MAX_NUMNODES]; 3883 3884 /* 3885 * A subset of global hstate attributes for node devices 3886 */ 3887 static struct attribute *per_node_hstate_attrs[] = { 3888 &nr_hugepages_attr.attr, 3889 &free_hugepages_attr.attr, 3890 &surplus_hugepages_attr.attr, 3891 NULL, 3892 }; 3893 3894 static const struct attribute_group per_node_hstate_attr_group = { 3895 .attrs = per_node_hstate_attrs, 3896 }; 3897 3898 /* 3899 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 3900 * Returns node id via non-NULL nidp. 3901 */ 3902 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 3903 { 3904 int nid; 3905 3906 for (nid = 0; nid < nr_node_ids; nid++) { 3907 struct node_hstate *nhs = &node_hstates[nid]; 3908 int i; 3909 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3910 if (nhs->hstate_kobjs[i] == kobj) { 3911 if (nidp) 3912 *nidp = nid; 3913 return &hstates[i]; 3914 } 3915 } 3916 3917 BUG(); 3918 return NULL; 3919 } 3920 3921 /* 3922 * Unregister hstate attributes from a single node device. 3923 * No-op if no hstate attributes attached. 3924 */ 3925 static void hugetlb_unregister_node(struct node *node) 3926 { 3927 struct hstate *h; 3928 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3929 3930 if (!nhs->hugepages_kobj) 3931 return; /* no hstate attributes */ 3932 3933 for_each_hstate(h) { 3934 int idx = hstate_index(h); 3935 if (nhs->hstate_kobjs[idx]) { 3936 kobject_put(nhs->hstate_kobjs[idx]); 3937 nhs->hstate_kobjs[idx] = NULL; 3938 } 3939 } 3940 3941 kobject_put(nhs->hugepages_kobj); 3942 nhs->hugepages_kobj = NULL; 3943 } 3944 3945 3946 /* 3947 * Register hstate attributes for a single node device. 3948 * No-op if attributes already registered. 3949 */ 3950 static void hugetlb_register_node(struct node *node) 3951 { 3952 struct hstate *h; 3953 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3954 int err; 3955 3956 if (nhs->hugepages_kobj) 3957 return; /* already allocated */ 3958 3959 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 3960 &node->dev.kobj); 3961 if (!nhs->hugepages_kobj) 3962 return; 3963 3964 for_each_hstate(h) { 3965 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 3966 nhs->hstate_kobjs, 3967 &per_node_hstate_attr_group); 3968 if (err) { 3969 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 3970 h->name, node->dev.id); 3971 hugetlb_unregister_node(node); 3972 break; 3973 } 3974 } 3975 } 3976 3977 /* 3978 * hugetlb init time: register hstate attributes for all registered node 3979 * devices of nodes that have memory. All on-line nodes should have 3980 * registered their associated device by this time. 3981 */ 3982 static void __init hugetlb_register_all_nodes(void) 3983 { 3984 int nid; 3985 3986 for_each_node_state(nid, N_MEMORY) { 3987 struct node *node = node_devices[nid]; 3988 if (node->dev.id == nid) 3989 hugetlb_register_node(node); 3990 } 3991 3992 /* 3993 * Let the node device driver know we're here so it can 3994 * [un]register hstate attributes on node hotplug. 3995 */ 3996 register_hugetlbfs_with_node(hugetlb_register_node, 3997 hugetlb_unregister_node); 3998 } 3999 #else /* !CONFIG_NUMA */ 4000 4001 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4002 { 4003 BUG(); 4004 if (nidp) 4005 *nidp = -1; 4006 return NULL; 4007 } 4008 4009 static void hugetlb_register_all_nodes(void) { } 4010 4011 #endif 4012 4013 static int __init hugetlb_init(void) 4014 { 4015 int i; 4016 4017 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4018 __NR_HPAGEFLAGS); 4019 4020 if (!hugepages_supported()) { 4021 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4022 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4023 return 0; 4024 } 4025 4026 /* 4027 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4028 * architectures depend on setup being done here. 4029 */ 4030 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4031 if (!parsed_default_hugepagesz) { 4032 /* 4033 * If we did not parse a default huge page size, set 4034 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4035 * number of huge pages for this default size was implicitly 4036 * specified, set that here as well. 4037 * Note that the implicit setting will overwrite an explicit 4038 * setting. A warning will be printed in this case. 4039 */ 4040 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4041 if (default_hstate_max_huge_pages) { 4042 if (default_hstate.max_huge_pages) { 4043 char buf[32]; 4044 4045 string_get_size(huge_page_size(&default_hstate), 4046 1, STRING_UNITS_2, buf, 32); 4047 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4048 default_hstate.max_huge_pages, buf); 4049 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4050 default_hstate_max_huge_pages); 4051 } 4052 default_hstate.max_huge_pages = 4053 default_hstate_max_huge_pages; 4054 4055 for (i = 0; i < nr_online_nodes; i++) 4056 default_hstate.max_huge_pages_node[i] = 4057 default_hugepages_in_node[i]; 4058 } 4059 } 4060 4061 hugetlb_cma_check(); 4062 hugetlb_init_hstates(); 4063 gather_bootmem_prealloc(); 4064 report_hugepages(); 4065 4066 hugetlb_sysfs_init(); 4067 hugetlb_register_all_nodes(); 4068 hugetlb_cgroup_file_init(); 4069 4070 #ifdef CONFIG_SMP 4071 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4072 #else 4073 num_fault_mutexes = 1; 4074 #endif 4075 hugetlb_fault_mutex_table = 4076 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4077 GFP_KERNEL); 4078 BUG_ON(!hugetlb_fault_mutex_table); 4079 4080 for (i = 0; i < num_fault_mutexes; i++) 4081 mutex_init(&hugetlb_fault_mutex_table[i]); 4082 return 0; 4083 } 4084 subsys_initcall(hugetlb_init); 4085 4086 /* Overwritten by architectures with more huge page sizes */ 4087 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4088 { 4089 return size == HPAGE_SIZE; 4090 } 4091 4092 void __init hugetlb_add_hstate(unsigned int order) 4093 { 4094 struct hstate *h; 4095 unsigned long i; 4096 4097 if (size_to_hstate(PAGE_SIZE << order)) { 4098 return; 4099 } 4100 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4101 BUG_ON(order == 0); 4102 h = &hstates[hugetlb_max_hstate++]; 4103 mutex_init(&h->resize_lock); 4104 h->order = order; 4105 h->mask = ~(huge_page_size(h) - 1); 4106 for (i = 0; i < MAX_NUMNODES; ++i) 4107 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4108 INIT_LIST_HEAD(&h->hugepage_activelist); 4109 h->next_nid_to_alloc = first_memory_node; 4110 h->next_nid_to_free = first_memory_node; 4111 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4112 huge_page_size(h)/1024); 4113 hugetlb_vmemmap_init(h); 4114 4115 parsed_hstate = h; 4116 } 4117 4118 bool __init __weak hugetlb_node_alloc_supported(void) 4119 { 4120 return true; 4121 } 4122 /* 4123 * hugepages command line processing 4124 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4125 * specification. If not, ignore the hugepages value. hugepages can also 4126 * be the first huge page command line option in which case it implicitly 4127 * specifies the number of huge pages for the default size. 4128 */ 4129 static int __init hugepages_setup(char *s) 4130 { 4131 unsigned long *mhp; 4132 static unsigned long *last_mhp; 4133 int node = NUMA_NO_NODE; 4134 int count; 4135 unsigned long tmp; 4136 char *p = s; 4137 4138 if (!parsed_valid_hugepagesz) { 4139 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4140 parsed_valid_hugepagesz = true; 4141 return 0; 4142 } 4143 4144 /* 4145 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4146 * yet, so this hugepages= parameter goes to the "default hstate". 4147 * Otherwise, it goes with the previously parsed hugepagesz or 4148 * default_hugepagesz. 4149 */ 4150 else if (!hugetlb_max_hstate) 4151 mhp = &default_hstate_max_huge_pages; 4152 else 4153 mhp = &parsed_hstate->max_huge_pages; 4154 4155 if (mhp == last_mhp) { 4156 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4157 return 0; 4158 } 4159 4160 while (*p) { 4161 count = 0; 4162 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4163 goto invalid; 4164 /* Parameter is node format */ 4165 if (p[count] == ':') { 4166 if (!hugetlb_node_alloc_supported()) { 4167 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4168 return 0; 4169 } 4170 if (tmp >= nr_online_nodes) 4171 goto invalid; 4172 node = array_index_nospec(tmp, nr_online_nodes); 4173 p += count + 1; 4174 /* Parse hugepages */ 4175 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4176 goto invalid; 4177 if (!hugetlb_max_hstate) 4178 default_hugepages_in_node[node] = tmp; 4179 else 4180 parsed_hstate->max_huge_pages_node[node] = tmp; 4181 *mhp += tmp; 4182 /* Go to parse next node*/ 4183 if (p[count] == ',') 4184 p += count + 1; 4185 else 4186 break; 4187 } else { 4188 if (p != s) 4189 goto invalid; 4190 *mhp = tmp; 4191 break; 4192 } 4193 } 4194 4195 /* 4196 * Global state is always initialized later in hugetlb_init. 4197 * But we need to allocate gigantic hstates here early to still 4198 * use the bootmem allocator. 4199 */ 4200 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4201 hugetlb_hstate_alloc_pages(parsed_hstate); 4202 4203 last_mhp = mhp; 4204 4205 return 1; 4206 4207 invalid: 4208 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4209 return 0; 4210 } 4211 __setup("hugepages=", hugepages_setup); 4212 4213 /* 4214 * hugepagesz command line processing 4215 * A specific huge page size can only be specified once with hugepagesz. 4216 * hugepagesz is followed by hugepages on the command line. The global 4217 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4218 * hugepagesz argument was valid. 4219 */ 4220 static int __init hugepagesz_setup(char *s) 4221 { 4222 unsigned long size; 4223 struct hstate *h; 4224 4225 parsed_valid_hugepagesz = false; 4226 size = (unsigned long)memparse(s, NULL); 4227 4228 if (!arch_hugetlb_valid_size(size)) { 4229 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4230 return 0; 4231 } 4232 4233 h = size_to_hstate(size); 4234 if (h) { 4235 /* 4236 * hstate for this size already exists. This is normally 4237 * an error, but is allowed if the existing hstate is the 4238 * default hstate. More specifically, it is only allowed if 4239 * the number of huge pages for the default hstate was not 4240 * previously specified. 4241 */ 4242 if (!parsed_default_hugepagesz || h != &default_hstate || 4243 default_hstate.max_huge_pages) { 4244 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4245 return 0; 4246 } 4247 4248 /* 4249 * No need to call hugetlb_add_hstate() as hstate already 4250 * exists. But, do set parsed_hstate so that a following 4251 * hugepages= parameter will be applied to this hstate. 4252 */ 4253 parsed_hstate = h; 4254 parsed_valid_hugepagesz = true; 4255 return 1; 4256 } 4257 4258 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4259 parsed_valid_hugepagesz = true; 4260 return 1; 4261 } 4262 __setup("hugepagesz=", hugepagesz_setup); 4263 4264 /* 4265 * default_hugepagesz command line input 4266 * Only one instance of default_hugepagesz allowed on command line. 4267 */ 4268 static int __init default_hugepagesz_setup(char *s) 4269 { 4270 unsigned long size; 4271 int i; 4272 4273 parsed_valid_hugepagesz = false; 4274 if (parsed_default_hugepagesz) { 4275 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4276 return 0; 4277 } 4278 4279 size = (unsigned long)memparse(s, NULL); 4280 4281 if (!arch_hugetlb_valid_size(size)) { 4282 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4283 return 0; 4284 } 4285 4286 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4287 parsed_valid_hugepagesz = true; 4288 parsed_default_hugepagesz = true; 4289 default_hstate_idx = hstate_index(size_to_hstate(size)); 4290 4291 /* 4292 * The number of default huge pages (for this size) could have been 4293 * specified as the first hugetlb parameter: hugepages=X. If so, 4294 * then default_hstate_max_huge_pages is set. If the default huge 4295 * page size is gigantic (>= MAX_ORDER), then the pages must be 4296 * allocated here from bootmem allocator. 4297 */ 4298 if (default_hstate_max_huge_pages) { 4299 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4300 for (i = 0; i < nr_online_nodes; i++) 4301 default_hstate.max_huge_pages_node[i] = 4302 default_hugepages_in_node[i]; 4303 if (hstate_is_gigantic(&default_hstate)) 4304 hugetlb_hstate_alloc_pages(&default_hstate); 4305 default_hstate_max_huge_pages = 0; 4306 } 4307 4308 return 1; 4309 } 4310 __setup("default_hugepagesz=", default_hugepagesz_setup); 4311 4312 static unsigned int allowed_mems_nr(struct hstate *h) 4313 { 4314 int node; 4315 unsigned int nr = 0; 4316 nodemask_t *mpol_allowed; 4317 unsigned int *array = h->free_huge_pages_node; 4318 gfp_t gfp_mask = htlb_alloc_mask(h); 4319 4320 mpol_allowed = policy_nodemask_current(gfp_mask); 4321 4322 for_each_node_mask(node, cpuset_current_mems_allowed) { 4323 if (!mpol_allowed || node_isset(node, *mpol_allowed)) 4324 nr += array[node]; 4325 } 4326 4327 return nr; 4328 } 4329 4330 #ifdef CONFIG_SYSCTL 4331 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4332 void *buffer, size_t *length, 4333 loff_t *ppos, unsigned long *out) 4334 { 4335 struct ctl_table dup_table; 4336 4337 /* 4338 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4339 * can duplicate the @table and alter the duplicate of it. 4340 */ 4341 dup_table = *table; 4342 dup_table.data = out; 4343 4344 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4345 } 4346 4347 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4348 struct ctl_table *table, int write, 4349 void *buffer, size_t *length, loff_t *ppos) 4350 { 4351 struct hstate *h = &default_hstate; 4352 unsigned long tmp = h->max_huge_pages; 4353 int ret; 4354 4355 if (!hugepages_supported()) 4356 return -EOPNOTSUPP; 4357 4358 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4359 &tmp); 4360 if (ret) 4361 goto out; 4362 4363 if (write) 4364 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4365 NUMA_NO_NODE, tmp, *length); 4366 out: 4367 return ret; 4368 } 4369 4370 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4371 void *buffer, size_t *length, loff_t *ppos) 4372 { 4373 4374 return hugetlb_sysctl_handler_common(false, table, write, 4375 buffer, length, ppos); 4376 } 4377 4378 #ifdef CONFIG_NUMA 4379 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4380 void *buffer, size_t *length, loff_t *ppos) 4381 { 4382 return hugetlb_sysctl_handler_common(true, table, write, 4383 buffer, length, ppos); 4384 } 4385 #endif /* CONFIG_NUMA */ 4386 4387 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4388 void *buffer, size_t *length, loff_t *ppos) 4389 { 4390 struct hstate *h = &default_hstate; 4391 unsigned long tmp; 4392 int ret; 4393 4394 if (!hugepages_supported()) 4395 return -EOPNOTSUPP; 4396 4397 tmp = h->nr_overcommit_huge_pages; 4398 4399 if (write && hstate_is_gigantic(h)) 4400 return -EINVAL; 4401 4402 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4403 &tmp); 4404 if (ret) 4405 goto out; 4406 4407 if (write) { 4408 spin_lock_irq(&hugetlb_lock); 4409 h->nr_overcommit_huge_pages = tmp; 4410 spin_unlock_irq(&hugetlb_lock); 4411 } 4412 out: 4413 return ret; 4414 } 4415 4416 #endif /* CONFIG_SYSCTL */ 4417 4418 void hugetlb_report_meminfo(struct seq_file *m) 4419 { 4420 struct hstate *h; 4421 unsigned long total = 0; 4422 4423 if (!hugepages_supported()) 4424 return; 4425 4426 for_each_hstate(h) { 4427 unsigned long count = h->nr_huge_pages; 4428 4429 total += huge_page_size(h) * count; 4430 4431 if (h == &default_hstate) 4432 seq_printf(m, 4433 "HugePages_Total: %5lu\n" 4434 "HugePages_Free: %5lu\n" 4435 "HugePages_Rsvd: %5lu\n" 4436 "HugePages_Surp: %5lu\n" 4437 "Hugepagesize: %8lu kB\n", 4438 count, 4439 h->free_huge_pages, 4440 h->resv_huge_pages, 4441 h->surplus_huge_pages, 4442 huge_page_size(h) / SZ_1K); 4443 } 4444 4445 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4446 } 4447 4448 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4449 { 4450 struct hstate *h = &default_hstate; 4451 4452 if (!hugepages_supported()) 4453 return 0; 4454 4455 return sysfs_emit_at(buf, len, 4456 "Node %d HugePages_Total: %5u\n" 4457 "Node %d HugePages_Free: %5u\n" 4458 "Node %d HugePages_Surp: %5u\n", 4459 nid, h->nr_huge_pages_node[nid], 4460 nid, h->free_huge_pages_node[nid], 4461 nid, h->surplus_huge_pages_node[nid]); 4462 } 4463 4464 void hugetlb_show_meminfo(void) 4465 { 4466 struct hstate *h; 4467 int nid; 4468 4469 if (!hugepages_supported()) 4470 return; 4471 4472 for_each_node_state(nid, N_MEMORY) 4473 for_each_hstate(h) 4474 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4475 nid, 4476 h->nr_huge_pages_node[nid], 4477 h->free_huge_pages_node[nid], 4478 h->surplus_huge_pages_node[nid], 4479 huge_page_size(h) / SZ_1K); 4480 } 4481 4482 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4483 { 4484 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4485 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 4486 } 4487 4488 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4489 unsigned long hugetlb_total_pages(void) 4490 { 4491 struct hstate *h; 4492 unsigned long nr_total_pages = 0; 4493 4494 for_each_hstate(h) 4495 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4496 return nr_total_pages; 4497 } 4498 4499 static int hugetlb_acct_memory(struct hstate *h, long delta) 4500 { 4501 int ret = -ENOMEM; 4502 4503 if (!delta) 4504 return 0; 4505 4506 spin_lock_irq(&hugetlb_lock); 4507 /* 4508 * When cpuset is configured, it breaks the strict hugetlb page 4509 * reservation as the accounting is done on a global variable. Such 4510 * reservation is completely rubbish in the presence of cpuset because 4511 * the reservation is not checked against page availability for the 4512 * current cpuset. Application can still potentially OOM'ed by kernel 4513 * with lack of free htlb page in cpuset that the task is in. 4514 * Attempt to enforce strict accounting with cpuset is almost 4515 * impossible (or too ugly) because cpuset is too fluid that 4516 * task or memory node can be dynamically moved between cpusets. 4517 * 4518 * The change of semantics for shared hugetlb mapping with cpuset is 4519 * undesirable. However, in order to preserve some of the semantics, 4520 * we fall back to check against current free page availability as 4521 * a best attempt and hopefully to minimize the impact of changing 4522 * semantics that cpuset has. 4523 * 4524 * Apart from cpuset, we also have memory policy mechanism that 4525 * also determines from which node the kernel will allocate memory 4526 * in a NUMA system. So similar to cpuset, we also should consider 4527 * the memory policy of the current task. Similar to the description 4528 * above. 4529 */ 4530 if (delta > 0) { 4531 if (gather_surplus_pages(h, delta) < 0) 4532 goto out; 4533 4534 if (delta > allowed_mems_nr(h)) { 4535 return_unused_surplus_pages(h, delta); 4536 goto out; 4537 } 4538 } 4539 4540 ret = 0; 4541 if (delta < 0) 4542 return_unused_surplus_pages(h, (unsigned long) -delta); 4543 4544 out: 4545 spin_unlock_irq(&hugetlb_lock); 4546 return ret; 4547 } 4548 4549 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4550 { 4551 struct resv_map *resv = vma_resv_map(vma); 4552 4553 /* 4554 * This new VMA should share its siblings reservation map if present. 4555 * The VMA will only ever have a valid reservation map pointer where 4556 * it is being copied for another still existing VMA. As that VMA 4557 * has a reference to the reservation map it cannot disappear until 4558 * after this open call completes. It is therefore safe to take a 4559 * new reference here without additional locking. 4560 */ 4561 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4562 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4563 kref_get(&resv->refs); 4564 } 4565 } 4566 4567 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4568 { 4569 struct hstate *h = hstate_vma(vma); 4570 struct resv_map *resv = vma_resv_map(vma); 4571 struct hugepage_subpool *spool = subpool_vma(vma); 4572 unsigned long reserve, start, end; 4573 long gbl_reserve; 4574 4575 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4576 return; 4577 4578 start = vma_hugecache_offset(h, vma, vma->vm_start); 4579 end = vma_hugecache_offset(h, vma, vma->vm_end); 4580 4581 reserve = (end - start) - region_count(resv, start, end); 4582 hugetlb_cgroup_uncharge_counter(resv, start, end); 4583 if (reserve) { 4584 /* 4585 * Decrement reserve counts. The global reserve count may be 4586 * adjusted if the subpool has a minimum size. 4587 */ 4588 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4589 hugetlb_acct_memory(h, -gbl_reserve); 4590 } 4591 4592 kref_put(&resv->refs, resv_map_release); 4593 } 4594 4595 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4596 { 4597 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4598 return -EINVAL; 4599 return 0; 4600 } 4601 4602 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4603 { 4604 return huge_page_size(hstate_vma(vma)); 4605 } 4606 4607 /* 4608 * We cannot handle pagefaults against hugetlb pages at all. They cause 4609 * handle_mm_fault() to try to instantiate regular-sized pages in the 4610 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4611 * this far. 4612 */ 4613 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4614 { 4615 BUG(); 4616 return 0; 4617 } 4618 4619 /* 4620 * When a new function is introduced to vm_operations_struct and added 4621 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4622 * This is because under System V memory model, mappings created via 4623 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4624 * their original vm_ops are overwritten with shm_vm_ops. 4625 */ 4626 const struct vm_operations_struct hugetlb_vm_ops = { 4627 .fault = hugetlb_vm_op_fault, 4628 .open = hugetlb_vm_op_open, 4629 .close = hugetlb_vm_op_close, 4630 .may_split = hugetlb_vm_op_split, 4631 .pagesize = hugetlb_vm_op_pagesize, 4632 }; 4633 4634 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4635 int writable) 4636 { 4637 pte_t entry; 4638 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4639 4640 if (writable) { 4641 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4642 vma->vm_page_prot))); 4643 } else { 4644 entry = huge_pte_wrprotect(mk_huge_pte(page, 4645 vma->vm_page_prot)); 4646 } 4647 entry = pte_mkyoung(entry); 4648 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4649 4650 return entry; 4651 } 4652 4653 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4654 unsigned long address, pte_t *ptep) 4655 { 4656 pte_t entry; 4657 4658 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4659 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4660 update_mmu_cache(vma, address, ptep); 4661 } 4662 4663 bool is_hugetlb_entry_migration(pte_t pte) 4664 { 4665 swp_entry_t swp; 4666 4667 if (huge_pte_none(pte) || pte_present(pte)) 4668 return false; 4669 swp = pte_to_swp_entry(pte); 4670 if (is_migration_entry(swp)) 4671 return true; 4672 else 4673 return false; 4674 } 4675 4676 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 4677 { 4678 swp_entry_t swp; 4679 4680 if (huge_pte_none(pte) || pte_present(pte)) 4681 return false; 4682 swp = pte_to_swp_entry(pte); 4683 if (is_hwpoison_entry(swp)) 4684 return true; 4685 else 4686 return false; 4687 } 4688 4689 static void 4690 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4691 struct page *new_page) 4692 { 4693 __SetPageUptodate(new_page); 4694 hugepage_add_new_anon_rmap(new_page, vma, addr); 4695 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); 4696 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4697 ClearHPageRestoreReserve(new_page); 4698 SetHPageMigratable(new_page); 4699 } 4700 4701 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 4702 struct vm_area_struct *vma) 4703 { 4704 pte_t *src_pte, *dst_pte, entry, dst_entry; 4705 struct page *ptepage; 4706 unsigned long addr; 4707 bool cow = is_cow_mapping(vma->vm_flags); 4708 struct hstate *h = hstate_vma(vma); 4709 unsigned long sz = huge_page_size(h); 4710 unsigned long npages = pages_per_huge_page(h); 4711 struct address_space *mapping = vma->vm_file->f_mapping; 4712 struct mmu_notifier_range range; 4713 int ret = 0; 4714 4715 if (cow) { 4716 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, 4717 vma->vm_start, 4718 vma->vm_end); 4719 mmu_notifier_invalidate_range_start(&range); 4720 } else { 4721 /* 4722 * For shared mappings i_mmap_rwsem must be held to call 4723 * huge_pte_alloc, otherwise the returned ptep could go 4724 * away if part of a shared pmd and another thread calls 4725 * huge_pmd_unshare. 4726 */ 4727 i_mmap_lock_read(mapping); 4728 } 4729 4730 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 4731 spinlock_t *src_ptl, *dst_ptl; 4732 src_pte = huge_pte_offset(src, addr, sz); 4733 if (!src_pte) 4734 continue; 4735 dst_pte = huge_pte_alloc(dst, vma, addr, sz); 4736 if (!dst_pte) { 4737 ret = -ENOMEM; 4738 break; 4739 } 4740 4741 /* 4742 * If the pagetables are shared don't copy or take references. 4743 * dst_pte == src_pte is the common case of src/dest sharing. 4744 * 4745 * However, src could have 'unshared' and dst shares with 4746 * another vma. If dst_pte !none, this implies sharing. 4747 * Check here before taking page table lock, and once again 4748 * after taking the lock below. 4749 */ 4750 dst_entry = huge_ptep_get(dst_pte); 4751 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) 4752 continue; 4753 4754 dst_ptl = huge_pte_lock(h, dst, dst_pte); 4755 src_ptl = huge_pte_lockptr(h, src, src_pte); 4756 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4757 entry = huge_ptep_get(src_pte); 4758 dst_entry = huge_ptep_get(dst_pte); 4759 again: 4760 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { 4761 /* 4762 * Skip if src entry none. Also, skip in the 4763 * unlikely case dst entry !none as this implies 4764 * sharing with another vma. 4765 */ 4766 ; 4767 } else if (unlikely(is_hugetlb_entry_migration(entry) || 4768 is_hugetlb_entry_hwpoisoned(entry))) { 4769 swp_entry_t swp_entry = pte_to_swp_entry(entry); 4770 4771 if (is_writable_migration_entry(swp_entry) && cow) { 4772 /* 4773 * COW mappings require pages in both 4774 * parent and child to be set to read. 4775 */ 4776 swp_entry = make_readable_migration_entry( 4777 swp_offset(swp_entry)); 4778 entry = swp_entry_to_pte(swp_entry); 4779 set_huge_swap_pte_at(src, addr, src_pte, 4780 entry, sz); 4781 } 4782 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); 4783 } else { 4784 entry = huge_ptep_get(src_pte); 4785 ptepage = pte_page(entry); 4786 get_page(ptepage); 4787 4788 /* 4789 * This is a rare case where we see pinned hugetlb 4790 * pages while they're prone to COW. We need to do the 4791 * COW earlier during fork. 4792 * 4793 * When pre-allocating the page or copying data, we 4794 * need to be without the pgtable locks since we could 4795 * sleep during the process. 4796 */ 4797 if (unlikely(page_needs_cow_for_dma(vma, ptepage))) { 4798 pte_t src_pte_old = entry; 4799 struct page *new; 4800 4801 spin_unlock(src_ptl); 4802 spin_unlock(dst_ptl); 4803 /* Do not use reserve as it's private owned */ 4804 new = alloc_huge_page(vma, addr, 1); 4805 if (IS_ERR(new)) { 4806 put_page(ptepage); 4807 ret = PTR_ERR(new); 4808 break; 4809 } 4810 copy_user_huge_page(new, ptepage, addr, vma, 4811 npages); 4812 put_page(ptepage); 4813 4814 /* Install the new huge page if src pte stable */ 4815 dst_ptl = huge_pte_lock(h, dst, dst_pte); 4816 src_ptl = huge_pte_lockptr(h, src, src_pte); 4817 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4818 entry = huge_ptep_get(src_pte); 4819 if (!pte_same(src_pte_old, entry)) { 4820 restore_reserve_on_error(h, vma, addr, 4821 new); 4822 put_page(new); 4823 /* dst_entry won't change as in child */ 4824 goto again; 4825 } 4826 hugetlb_install_page(vma, dst_pte, addr, new); 4827 spin_unlock(src_ptl); 4828 spin_unlock(dst_ptl); 4829 continue; 4830 } 4831 4832 if (cow) { 4833 /* 4834 * No need to notify as we are downgrading page 4835 * table protection not changing it to point 4836 * to a new page. 4837 * 4838 * See Documentation/vm/mmu_notifier.rst 4839 */ 4840 huge_ptep_set_wrprotect(src, addr, src_pte); 4841 entry = huge_pte_wrprotect(entry); 4842 } 4843 4844 page_dup_rmap(ptepage, true); 4845 set_huge_pte_at(dst, addr, dst_pte, entry); 4846 hugetlb_count_add(npages, dst); 4847 } 4848 spin_unlock(src_ptl); 4849 spin_unlock(dst_ptl); 4850 } 4851 4852 if (cow) 4853 mmu_notifier_invalidate_range_end(&range); 4854 else 4855 i_mmap_unlock_read(mapping); 4856 4857 return ret; 4858 } 4859 4860 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 4861 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 4862 { 4863 struct hstate *h = hstate_vma(vma); 4864 struct mm_struct *mm = vma->vm_mm; 4865 spinlock_t *src_ptl, *dst_ptl; 4866 pte_t pte; 4867 4868 dst_ptl = huge_pte_lock(h, mm, dst_pte); 4869 src_ptl = huge_pte_lockptr(h, mm, src_pte); 4870 4871 /* 4872 * We don't have to worry about the ordering of src and dst ptlocks 4873 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock. 4874 */ 4875 if (src_ptl != dst_ptl) 4876 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4877 4878 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 4879 set_huge_pte_at(mm, new_addr, dst_pte, pte); 4880 4881 if (src_ptl != dst_ptl) 4882 spin_unlock(src_ptl); 4883 spin_unlock(dst_ptl); 4884 } 4885 4886 int move_hugetlb_page_tables(struct vm_area_struct *vma, 4887 struct vm_area_struct *new_vma, 4888 unsigned long old_addr, unsigned long new_addr, 4889 unsigned long len) 4890 { 4891 struct hstate *h = hstate_vma(vma); 4892 struct address_space *mapping = vma->vm_file->f_mapping; 4893 unsigned long sz = huge_page_size(h); 4894 struct mm_struct *mm = vma->vm_mm; 4895 unsigned long old_end = old_addr + len; 4896 unsigned long old_addr_copy; 4897 pte_t *src_pte, *dst_pte; 4898 struct mmu_notifier_range range; 4899 4900 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr, 4901 old_end); 4902 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 4903 mmu_notifier_invalidate_range_start(&range); 4904 /* Prevent race with file truncation */ 4905 i_mmap_lock_write(mapping); 4906 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 4907 src_pte = huge_pte_offset(mm, old_addr, sz); 4908 if (!src_pte) 4909 continue; 4910 if (huge_pte_none(huge_ptep_get(src_pte))) 4911 continue; 4912 4913 /* old_addr arg to huge_pmd_unshare() is a pointer and so the 4914 * arg may be modified. Pass a copy instead to preserve the 4915 * value in old_addr. 4916 */ 4917 old_addr_copy = old_addr; 4918 4919 if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) 4920 continue; 4921 4922 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 4923 if (!dst_pte) 4924 break; 4925 4926 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 4927 } 4928 flush_tlb_range(vma, old_end - len, old_end); 4929 mmu_notifier_invalidate_range_end(&range); 4930 i_mmap_unlock_write(mapping); 4931 4932 return len + old_addr - old_end; 4933 } 4934 4935 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 4936 unsigned long start, unsigned long end, 4937 struct page *ref_page) 4938 { 4939 struct mm_struct *mm = vma->vm_mm; 4940 unsigned long address; 4941 pte_t *ptep; 4942 pte_t pte; 4943 spinlock_t *ptl; 4944 struct page *page; 4945 struct hstate *h = hstate_vma(vma); 4946 unsigned long sz = huge_page_size(h); 4947 struct mmu_notifier_range range; 4948 bool force_flush = false; 4949 4950 WARN_ON(!is_vm_hugetlb_page(vma)); 4951 BUG_ON(start & ~huge_page_mask(h)); 4952 BUG_ON(end & ~huge_page_mask(h)); 4953 4954 /* 4955 * This is a hugetlb vma, all the pte entries should point 4956 * to huge page. 4957 */ 4958 tlb_change_page_size(tlb, sz); 4959 tlb_start_vma(tlb, vma); 4960 4961 /* 4962 * If sharing possible, alert mmu notifiers of worst case. 4963 */ 4964 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, 4965 end); 4966 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 4967 mmu_notifier_invalidate_range_start(&range); 4968 address = start; 4969 for (; address < end; address += sz) { 4970 ptep = huge_pte_offset(mm, address, sz); 4971 if (!ptep) 4972 continue; 4973 4974 ptl = huge_pte_lock(h, mm, ptep); 4975 if (huge_pmd_unshare(mm, vma, &address, ptep)) { 4976 spin_unlock(ptl); 4977 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 4978 force_flush = true; 4979 continue; 4980 } 4981 4982 pte = huge_ptep_get(ptep); 4983 if (huge_pte_none(pte)) { 4984 spin_unlock(ptl); 4985 continue; 4986 } 4987 4988 /* 4989 * Migrating hugepage or HWPoisoned hugepage is already 4990 * unmapped and its refcount is dropped, so just clear pte here. 4991 */ 4992 if (unlikely(!pte_present(pte))) { 4993 huge_pte_clear(mm, address, ptep, sz); 4994 spin_unlock(ptl); 4995 continue; 4996 } 4997 4998 page = pte_page(pte); 4999 /* 5000 * If a reference page is supplied, it is because a specific 5001 * page is being unmapped, not a range. Ensure the page we 5002 * are about to unmap is the actual page of interest. 5003 */ 5004 if (ref_page) { 5005 if (page != ref_page) { 5006 spin_unlock(ptl); 5007 continue; 5008 } 5009 /* 5010 * Mark the VMA as having unmapped its page so that 5011 * future faults in this VMA will fail rather than 5012 * looking like data was lost 5013 */ 5014 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5015 } 5016 5017 pte = huge_ptep_get_and_clear(mm, address, ptep); 5018 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5019 if (huge_pte_dirty(pte)) 5020 set_page_dirty(page); 5021 5022 hugetlb_count_sub(pages_per_huge_page(h), mm); 5023 page_remove_rmap(page, vma, true); 5024 5025 spin_unlock(ptl); 5026 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5027 /* 5028 * Bail out after unmapping reference page if supplied 5029 */ 5030 if (ref_page) 5031 break; 5032 } 5033 mmu_notifier_invalidate_range_end(&range); 5034 tlb_end_vma(tlb, vma); 5035 5036 /* 5037 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5038 * could defer the flush until now, since by holding i_mmap_rwsem we 5039 * guaranteed that the last refernece would not be dropped. But we must 5040 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5041 * dropped and the last reference to the shared PMDs page might be 5042 * dropped as well. 5043 * 5044 * In theory we could defer the freeing of the PMD pages as well, but 5045 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5046 * detect sharing, so we cannot defer the release of the page either. 5047 * Instead, do flush now. 5048 */ 5049 if (force_flush) 5050 tlb_flush_mmu_tlbonly(tlb); 5051 } 5052 5053 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5054 struct vm_area_struct *vma, unsigned long start, 5055 unsigned long end, struct page *ref_page) 5056 { 5057 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 5058 5059 /* 5060 * Clear this flag so that x86's huge_pmd_share page_table_shareable 5061 * test will fail on a vma being torn down, and not grab a page table 5062 * on its way out. We're lucky that the flag has such an appropriate 5063 * name, and can in fact be safely cleared here. We could clear it 5064 * before the __unmap_hugepage_range above, but all that's necessary 5065 * is to clear it before releasing the i_mmap_rwsem. This works 5066 * because in the context this is called, the VMA is about to be 5067 * destroyed and the i_mmap_rwsem is held. 5068 */ 5069 vma->vm_flags &= ~VM_MAYSHARE; 5070 } 5071 5072 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5073 unsigned long end, struct page *ref_page) 5074 { 5075 struct mmu_gather tlb; 5076 5077 tlb_gather_mmu(&tlb, vma->vm_mm); 5078 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 5079 tlb_finish_mmu(&tlb); 5080 } 5081 5082 /* 5083 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5084 * mapping it owns the reserve page for. The intention is to unmap the page 5085 * from other VMAs and let the children be SIGKILLed if they are faulting the 5086 * same region. 5087 */ 5088 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5089 struct page *page, unsigned long address) 5090 { 5091 struct hstate *h = hstate_vma(vma); 5092 struct vm_area_struct *iter_vma; 5093 struct address_space *mapping; 5094 pgoff_t pgoff; 5095 5096 /* 5097 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5098 * from page cache lookup which is in HPAGE_SIZE units. 5099 */ 5100 address = address & huge_page_mask(h); 5101 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5102 vma->vm_pgoff; 5103 mapping = vma->vm_file->f_mapping; 5104 5105 /* 5106 * Take the mapping lock for the duration of the table walk. As 5107 * this mapping should be shared between all the VMAs, 5108 * __unmap_hugepage_range() is called as the lock is already held 5109 */ 5110 i_mmap_lock_write(mapping); 5111 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5112 /* Do not unmap the current VMA */ 5113 if (iter_vma == vma) 5114 continue; 5115 5116 /* 5117 * Shared VMAs have their own reserves and do not affect 5118 * MAP_PRIVATE accounting but it is possible that a shared 5119 * VMA is using the same page so check and skip such VMAs. 5120 */ 5121 if (iter_vma->vm_flags & VM_MAYSHARE) 5122 continue; 5123 5124 /* 5125 * Unmap the page from other VMAs without their own reserves. 5126 * They get marked to be SIGKILLed if they fault in these 5127 * areas. This is because a future no-page fault on this VMA 5128 * could insert a zeroed page instead of the data existing 5129 * from the time of fork. This would look like data corruption 5130 */ 5131 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5132 unmap_hugepage_range(iter_vma, address, 5133 address + huge_page_size(h), page); 5134 } 5135 i_mmap_unlock_write(mapping); 5136 } 5137 5138 /* 5139 * Hugetlb_cow() should be called with page lock of the original hugepage held. 5140 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5141 * cannot race with other handlers or page migration. 5142 * Keep the pte_same checks anyway to make transition from the mutex easier. 5143 */ 5144 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 5145 unsigned long address, pte_t *ptep, 5146 struct page *pagecache_page, spinlock_t *ptl) 5147 { 5148 pte_t pte; 5149 struct hstate *h = hstate_vma(vma); 5150 struct page *old_page, *new_page; 5151 int outside_reserve = 0; 5152 vm_fault_t ret = 0; 5153 unsigned long haddr = address & huge_page_mask(h); 5154 struct mmu_notifier_range range; 5155 5156 pte = huge_ptep_get(ptep); 5157 old_page = pte_page(pte); 5158 5159 retry_avoidcopy: 5160 /* If no-one else is actually using this page, avoid the copy 5161 * and just make the page writable */ 5162 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 5163 page_move_anon_rmap(old_page, vma); 5164 set_huge_ptep_writable(vma, haddr, ptep); 5165 return 0; 5166 } 5167 5168 /* 5169 * If the process that created a MAP_PRIVATE mapping is about to 5170 * perform a COW due to a shared page count, attempt to satisfy 5171 * the allocation without using the existing reserves. The pagecache 5172 * page is used to determine if the reserve at this address was 5173 * consumed or not. If reserves were used, a partial faulted mapping 5174 * at the time of fork() could consume its reserves on COW instead 5175 * of the full address range. 5176 */ 5177 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5178 old_page != pagecache_page) 5179 outside_reserve = 1; 5180 5181 get_page(old_page); 5182 5183 /* 5184 * Drop page table lock as buddy allocator may be called. It will 5185 * be acquired again before returning to the caller, as expected. 5186 */ 5187 spin_unlock(ptl); 5188 new_page = alloc_huge_page(vma, haddr, outside_reserve); 5189 5190 if (IS_ERR(new_page)) { 5191 /* 5192 * If a process owning a MAP_PRIVATE mapping fails to COW, 5193 * it is due to references held by a child and an insufficient 5194 * huge page pool. To guarantee the original mappers 5195 * reliability, unmap the page from child processes. The child 5196 * may get SIGKILLed if it later faults. 5197 */ 5198 if (outside_reserve) { 5199 struct address_space *mapping = vma->vm_file->f_mapping; 5200 pgoff_t idx; 5201 u32 hash; 5202 5203 put_page(old_page); 5204 BUG_ON(huge_pte_none(pte)); 5205 /* 5206 * Drop hugetlb_fault_mutex and i_mmap_rwsem before 5207 * unmapping. unmapping needs to hold i_mmap_rwsem 5208 * in write mode. Dropping i_mmap_rwsem in read mode 5209 * here is OK as COW mappings do not interact with 5210 * PMD sharing. 5211 * 5212 * Reacquire both after unmap operation. 5213 */ 5214 idx = vma_hugecache_offset(h, vma, haddr); 5215 hash = hugetlb_fault_mutex_hash(mapping, idx); 5216 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5217 i_mmap_unlock_read(mapping); 5218 5219 unmap_ref_private(mm, vma, old_page, haddr); 5220 5221 i_mmap_lock_read(mapping); 5222 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5223 spin_lock(ptl); 5224 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5225 if (likely(ptep && 5226 pte_same(huge_ptep_get(ptep), pte))) 5227 goto retry_avoidcopy; 5228 /* 5229 * race occurs while re-acquiring page table 5230 * lock, and our job is done. 5231 */ 5232 return 0; 5233 } 5234 5235 ret = vmf_error(PTR_ERR(new_page)); 5236 goto out_release_old; 5237 } 5238 5239 /* 5240 * When the original hugepage is shared one, it does not have 5241 * anon_vma prepared. 5242 */ 5243 if (unlikely(anon_vma_prepare(vma))) { 5244 ret = VM_FAULT_OOM; 5245 goto out_release_all; 5246 } 5247 5248 copy_user_huge_page(new_page, old_page, address, vma, 5249 pages_per_huge_page(h)); 5250 __SetPageUptodate(new_page); 5251 5252 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, 5253 haddr + huge_page_size(h)); 5254 mmu_notifier_invalidate_range_start(&range); 5255 5256 /* 5257 * Retake the page table lock to check for racing updates 5258 * before the page tables are altered 5259 */ 5260 spin_lock(ptl); 5261 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5262 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5263 ClearHPageRestoreReserve(new_page); 5264 5265 /* Break COW */ 5266 huge_ptep_clear_flush(vma, haddr, ptep); 5267 mmu_notifier_invalidate_range(mm, range.start, range.end); 5268 page_remove_rmap(old_page, vma, true); 5269 hugepage_add_new_anon_rmap(new_page, vma, haddr); 5270 set_huge_pte_at(mm, haddr, ptep, 5271 make_huge_pte(vma, new_page, 1)); 5272 SetHPageMigratable(new_page); 5273 /* Make the old page be freed below */ 5274 new_page = old_page; 5275 } 5276 spin_unlock(ptl); 5277 mmu_notifier_invalidate_range_end(&range); 5278 out_release_all: 5279 /* No restore in case of successful pagetable update (Break COW) */ 5280 if (new_page != old_page) 5281 restore_reserve_on_error(h, vma, haddr, new_page); 5282 put_page(new_page); 5283 out_release_old: 5284 put_page(old_page); 5285 5286 spin_lock(ptl); /* Caller expects lock to be held */ 5287 return ret; 5288 } 5289 5290 /* Return the pagecache page at a given address within a VMA */ 5291 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 5292 struct vm_area_struct *vma, unsigned long address) 5293 { 5294 struct address_space *mapping; 5295 pgoff_t idx; 5296 5297 mapping = vma->vm_file->f_mapping; 5298 idx = vma_hugecache_offset(h, vma, address); 5299 5300 return find_lock_page(mapping, idx); 5301 } 5302 5303 /* 5304 * Return whether there is a pagecache page to back given address within VMA. 5305 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 5306 */ 5307 static bool hugetlbfs_pagecache_present(struct hstate *h, 5308 struct vm_area_struct *vma, unsigned long address) 5309 { 5310 struct address_space *mapping; 5311 pgoff_t idx; 5312 struct page *page; 5313 5314 mapping = vma->vm_file->f_mapping; 5315 idx = vma_hugecache_offset(h, vma, address); 5316 5317 page = find_get_page(mapping, idx); 5318 if (page) 5319 put_page(page); 5320 return page != NULL; 5321 } 5322 5323 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 5324 pgoff_t idx) 5325 { 5326 struct inode *inode = mapping->host; 5327 struct hstate *h = hstate_inode(inode); 5328 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 5329 5330 if (err) 5331 return err; 5332 ClearHPageRestoreReserve(page); 5333 5334 /* 5335 * set page dirty so that it will not be removed from cache/file 5336 * by non-hugetlbfs specific code paths. 5337 */ 5338 set_page_dirty(page); 5339 5340 spin_lock(&inode->i_lock); 5341 inode->i_blocks += blocks_per_huge_page(h); 5342 spin_unlock(&inode->i_lock); 5343 return 0; 5344 } 5345 5346 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5347 struct address_space *mapping, 5348 pgoff_t idx, 5349 unsigned int flags, 5350 unsigned long haddr, 5351 unsigned long addr, 5352 unsigned long reason) 5353 { 5354 vm_fault_t ret; 5355 u32 hash; 5356 struct vm_fault vmf = { 5357 .vma = vma, 5358 .address = haddr, 5359 .real_address = addr, 5360 .flags = flags, 5361 5362 /* 5363 * Hard to debug if it ends up being 5364 * used by a callee that assumes 5365 * something about the other 5366 * uninitialized fields... same as in 5367 * memory.c 5368 */ 5369 }; 5370 5371 /* 5372 * hugetlb_fault_mutex and i_mmap_rwsem must be 5373 * dropped before handling userfault. Reacquire 5374 * after handling fault to make calling code simpler. 5375 */ 5376 hash = hugetlb_fault_mutex_hash(mapping, idx); 5377 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5378 i_mmap_unlock_read(mapping); 5379 ret = handle_userfault(&vmf, reason); 5380 i_mmap_lock_read(mapping); 5381 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5382 5383 return ret; 5384 } 5385 5386 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5387 struct vm_area_struct *vma, 5388 struct address_space *mapping, pgoff_t idx, 5389 unsigned long address, pte_t *ptep, unsigned int flags) 5390 { 5391 struct hstate *h = hstate_vma(vma); 5392 vm_fault_t ret = VM_FAULT_SIGBUS; 5393 int anon_rmap = 0; 5394 unsigned long size; 5395 struct page *page; 5396 pte_t new_pte; 5397 spinlock_t *ptl; 5398 unsigned long haddr = address & huge_page_mask(h); 5399 bool new_page, new_pagecache_page = false; 5400 5401 /* 5402 * Currently, we are forced to kill the process in the event the 5403 * original mapper has unmapped pages from the child due to a failed 5404 * COW. Warn that such a situation has occurred as it may not be obvious 5405 */ 5406 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5407 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5408 current->pid); 5409 return ret; 5410 } 5411 5412 /* 5413 * We can not race with truncation due to holding i_mmap_rwsem. 5414 * i_size is modified when holding i_mmap_rwsem, so check here 5415 * once for faults beyond end of file. 5416 */ 5417 size = i_size_read(mapping->host) >> huge_page_shift(h); 5418 if (idx >= size) 5419 goto out; 5420 5421 retry: 5422 new_page = false; 5423 page = find_lock_page(mapping, idx); 5424 if (!page) { 5425 /* Check for page in userfault range */ 5426 if (userfaultfd_missing(vma)) { 5427 ret = hugetlb_handle_userfault(vma, mapping, idx, 5428 flags, haddr, address, 5429 VM_UFFD_MISSING); 5430 goto out; 5431 } 5432 5433 page = alloc_huge_page(vma, haddr, 0); 5434 if (IS_ERR(page)) { 5435 /* 5436 * Returning error will result in faulting task being 5437 * sent SIGBUS. The hugetlb fault mutex prevents two 5438 * tasks from racing to fault in the same page which 5439 * could result in false unable to allocate errors. 5440 * Page migration does not take the fault mutex, but 5441 * does a clear then write of pte's under page table 5442 * lock. Page fault code could race with migration, 5443 * notice the clear pte and try to allocate a page 5444 * here. Before returning error, get ptl and make 5445 * sure there really is no pte entry. 5446 */ 5447 ptl = huge_pte_lock(h, mm, ptep); 5448 ret = 0; 5449 if (huge_pte_none(huge_ptep_get(ptep))) 5450 ret = vmf_error(PTR_ERR(page)); 5451 spin_unlock(ptl); 5452 goto out; 5453 } 5454 clear_huge_page(page, address, pages_per_huge_page(h)); 5455 __SetPageUptodate(page); 5456 new_page = true; 5457 5458 if (vma->vm_flags & VM_MAYSHARE) { 5459 int err = huge_add_to_page_cache(page, mapping, idx); 5460 if (err) { 5461 put_page(page); 5462 if (err == -EEXIST) 5463 goto retry; 5464 goto out; 5465 } 5466 new_pagecache_page = true; 5467 } else { 5468 lock_page(page); 5469 if (unlikely(anon_vma_prepare(vma))) { 5470 ret = VM_FAULT_OOM; 5471 goto backout_unlocked; 5472 } 5473 anon_rmap = 1; 5474 } 5475 } else { 5476 /* 5477 * If memory error occurs between mmap() and fault, some process 5478 * don't have hwpoisoned swap entry for errored virtual address. 5479 * So we need to block hugepage fault by PG_hwpoison bit check. 5480 */ 5481 if (unlikely(PageHWPoison(page))) { 5482 ret = VM_FAULT_HWPOISON_LARGE | 5483 VM_FAULT_SET_HINDEX(hstate_index(h)); 5484 goto backout_unlocked; 5485 } 5486 5487 /* Check for page in userfault range. */ 5488 if (userfaultfd_minor(vma)) { 5489 unlock_page(page); 5490 put_page(page); 5491 ret = hugetlb_handle_userfault(vma, mapping, idx, 5492 flags, haddr, address, 5493 VM_UFFD_MINOR); 5494 goto out; 5495 } 5496 } 5497 5498 /* 5499 * If we are going to COW a private mapping later, we examine the 5500 * pending reservations for this page now. This will ensure that 5501 * any allocations necessary to record that reservation occur outside 5502 * the spinlock. 5503 */ 5504 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5505 if (vma_needs_reservation(h, vma, haddr) < 0) { 5506 ret = VM_FAULT_OOM; 5507 goto backout_unlocked; 5508 } 5509 /* Just decrements count, does not deallocate */ 5510 vma_end_reservation(h, vma, haddr); 5511 } 5512 5513 ptl = huge_pte_lock(h, mm, ptep); 5514 ret = 0; 5515 if (!huge_pte_none(huge_ptep_get(ptep))) 5516 goto backout; 5517 5518 if (anon_rmap) { 5519 ClearHPageRestoreReserve(page); 5520 hugepage_add_new_anon_rmap(page, vma, haddr); 5521 } else 5522 page_dup_rmap(page, true); 5523 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 5524 && (vma->vm_flags & VM_SHARED))); 5525 set_huge_pte_at(mm, haddr, ptep, new_pte); 5526 5527 hugetlb_count_add(pages_per_huge_page(h), mm); 5528 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5529 /* Optimization, do the COW without a second fault */ 5530 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); 5531 } 5532 5533 spin_unlock(ptl); 5534 5535 /* 5536 * Only set HPageMigratable in newly allocated pages. Existing pages 5537 * found in the pagecache may not have HPageMigratableset if they have 5538 * been isolated for migration. 5539 */ 5540 if (new_page) 5541 SetHPageMigratable(page); 5542 5543 unlock_page(page); 5544 out: 5545 return ret; 5546 5547 backout: 5548 spin_unlock(ptl); 5549 backout_unlocked: 5550 unlock_page(page); 5551 /* restore reserve for newly allocated pages not in page cache */ 5552 if (new_page && !new_pagecache_page) 5553 restore_reserve_on_error(h, vma, haddr, page); 5554 put_page(page); 5555 goto out; 5556 } 5557 5558 #ifdef CONFIG_SMP 5559 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5560 { 5561 unsigned long key[2]; 5562 u32 hash; 5563 5564 key[0] = (unsigned long) mapping; 5565 key[1] = idx; 5566 5567 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 5568 5569 return hash & (num_fault_mutexes - 1); 5570 } 5571 #else 5572 /* 5573 * For uniprocessor systems we always use a single mutex, so just 5574 * return 0 and avoid the hashing overhead. 5575 */ 5576 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5577 { 5578 return 0; 5579 } 5580 #endif 5581 5582 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 5583 unsigned long address, unsigned int flags) 5584 { 5585 pte_t *ptep, entry; 5586 spinlock_t *ptl; 5587 vm_fault_t ret; 5588 u32 hash; 5589 pgoff_t idx; 5590 struct page *page = NULL; 5591 struct page *pagecache_page = NULL; 5592 struct hstate *h = hstate_vma(vma); 5593 struct address_space *mapping; 5594 int need_wait_lock = 0; 5595 unsigned long haddr = address & huge_page_mask(h); 5596 5597 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5598 if (ptep) { 5599 /* 5600 * Since we hold no locks, ptep could be stale. That is 5601 * OK as we are only making decisions based on content and 5602 * not actually modifying content here. 5603 */ 5604 entry = huge_ptep_get(ptep); 5605 if (unlikely(is_hugetlb_entry_migration(entry))) { 5606 migration_entry_wait_huge(vma, mm, ptep); 5607 return 0; 5608 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 5609 return VM_FAULT_HWPOISON_LARGE | 5610 VM_FAULT_SET_HINDEX(hstate_index(h)); 5611 } 5612 5613 /* 5614 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold 5615 * until finished with ptep. This serves two purposes: 5616 * 1) It prevents huge_pmd_unshare from being called elsewhere 5617 * and making the ptep no longer valid. 5618 * 2) It synchronizes us with i_size modifications during truncation. 5619 * 5620 * ptep could have already be assigned via huge_pte_offset. That 5621 * is OK, as huge_pte_alloc will return the same value unless 5622 * something has changed. 5623 */ 5624 mapping = vma->vm_file->f_mapping; 5625 i_mmap_lock_read(mapping); 5626 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 5627 if (!ptep) { 5628 i_mmap_unlock_read(mapping); 5629 return VM_FAULT_OOM; 5630 } 5631 5632 /* 5633 * Serialize hugepage allocation and instantiation, so that we don't 5634 * get spurious allocation failures if two CPUs race to instantiate 5635 * the same page in the page cache. 5636 */ 5637 idx = vma_hugecache_offset(h, vma, haddr); 5638 hash = hugetlb_fault_mutex_hash(mapping, idx); 5639 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5640 5641 entry = huge_ptep_get(ptep); 5642 if (huge_pte_none(entry)) { 5643 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 5644 goto out_mutex; 5645 } 5646 5647 ret = 0; 5648 5649 /* 5650 * entry could be a migration/hwpoison entry at this point, so this 5651 * check prevents the kernel from going below assuming that we have 5652 * an active hugepage in pagecache. This goto expects the 2nd page 5653 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 5654 * properly handle it. 5655 */ 5656 if (!pte_present(entry)) 5657 goto out_mutex; 5658 5659 /* 5660 * If we are going to COW the mapping later, we examine the pending 5661 * reservations for this page now. This will ensure that any 5662 * allocations necessary to record that reservation occur outside the 5663 * spinlock. For private mappings, we also lookup the pagecache 5664 * page now as it is used to determine if a reservation has been 5665 * consumed. 5666 */ 5667 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 5668 if (vma_needs_reservation(h, vma, haddr) < 0) { 5669 ret = VM_FAULT_OOM; 5670 goto out_mutex; 5671 } 5672 /* Just decrements count, does not deallocate */ 5673 vma_end_reservation(h, vma, haddr); 5674 5675 if (!(vma->vm_flags & VM_MAYSHARE)) 5676 pagecache_page = hugetlbfs_pagecache_page(h, 5677 vma, haddr); 5678 } 5679 5680 ptl = huge_pte_lock(h, mm, ptep); 5681 5682 /* Check for a racing update before calling hugetlb_cow */ 5683 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 5684 goto out_ptl; 5685 5686 /* 5687 * hugetlb_cow() requires page locks of pte_page(entry) and 5688 * pagecache_page, so here we need take the former one 5689 * when page != pagecache_page or !pagecache_page. 5690 */ 5691 page = pte_page(entry); 5692 if (page != pagecache_page) 5693 if (!trylock_page(page)) { 5694 need_wait_lock = 1; 5695 goto out_ptl; 5696 } 5697 5698 get_page(page); 5699 5700 if (flags & FAULT_FLAG_WRITE) { 5701 if (!huge_pte_write(entry)) { 5702 ret = hugetlb_cow(mm, vma, address, ptep, 5703 pagecache_page, ptl); 5704 goto out_put_page; 5705 } 5706 entry = huge_pte_mkdirty(entry); 5707 } 5708 entry = pte_mkyoung(entry); 5709 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 5710 flags & FAULT_FLAG_WRITE)) 5711 update_mmu_cache(vma, haddr, ptep); 5712 out_put_page: 5713 if (page != pagecache_page) 5714 unlock_page(page); 5715 put_page(page); 5716 out_ptl: 5717 spin_unlock(ptl); 5718 5719 if (pagecache_page) { 5720 unlock_page(pagecache_page); 5721 put_page(pagecache_page); 5722 } 5723 out_mutex: 5724 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5725 i_mmap_unlock_read(mapping); 5726 /* 5727 * Generally it's safe to hold refcount during waiting page lock. But 5728 * here we just wait to defer the next page fault to avoid busy loop and 5729 * the page is not used after unlocked before returning from the current 5730 * page fault. So we are safe from accessing freed page, even if we wait 5731 * here without taking refcount. 5732 */ 5733 if (need_wait_lock) 5734 wait_on_page_locked(page); 5735 return ret; 5736 } 5737 5738 #ifdef CONFIG_USERFAULTFD 5739 /* 5740 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 5741 * modifications for huge pages. 5742 */ 5743 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 5744 pte_t *dst_pte, 5745 struct vm_area_struct *dst_vma, 5746 unsigned long dst_addr, 5747 unsigned long src_addr, 5748 enum mcopy_atomic_mode mode, 5749 struct page **pagep) 5750 { 5751 bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); 5752 struct hstate *h = hstate_vma(dst_vma); 5753 struct address_space *mapping = dst_vma->vm_file->f_mapping; 5754 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 5755 unsigned long size; 5756 int vm_shared = dst_vma->vm_flags & VM_SHARED; 5757 pte_t _dst_pte; 5758 spinlock_t *ptl; 5759 int ret = -ENOMEM; 5760 struct page *page; 5761 int writable; 5762 bool page_in_pagecache = false; 5763 5764 if (is_continue) { 5765 ret = -EFAULT; 5766 page = find_lock_page(mapping, idx); 5767 if (!page) 5768 goto out; 5769 page_in_pagecache = true; 5770 } else if (!*pagep) { 5771 /* If a page already exists, then it's UFFDIO_COPY for 5772 * a non-missing case. Return -EEXIST. 5773 */ 5774 if (vm_shared && 5775 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 5776 ret = -EEXIST; 5777 goto out; 5778 } 5779 5780 page = alloc_huge_page(dst_vma, dst_addr, 0); 5781 if (IS_ERR(page)) { 5782 ret = -ENOMEM; 5783 goto out; 5784 } 5785 5786 ret = copy_huge_page_from_user(page, 5787 (const void __user *) src_addr, 5788 pages_per_huge_page(h), false); 5789 5790 /* fallback to copy_from_user outside mmap_lock */ 5791 if (unlikely(ret)) { 5792 ret = -ENOENT; 5793 /* Free the allocated page which may have 5794 * consumed a reservation. 5795 */ 5796 restore_reserve_on_error(h, dst_vma, dst_addr, page); 5797 put_page(page); 5798 5799 /* Allocate a temporary page to hold the copied 5800 * contents. 5801 */ 5802 page = alloc_huge_page_vma(h, dst_vma, dst_addr); 5803 if (!page) { 5804 ret = -ENOMEM; 5805 goto out; 5806 } 5807 *pagep = page; 5808 /* Set the outparam pagep and return to the caller to 5809 * copy the contents outside the lock. Don't free the 5810 * page. 5811 */ 5812 goto out; 5813 } 5814 } else { 5815 if (vm_shared && 5816 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 5817 put_page(*pagep); 5818 ret = -EEXIST; 5819 *pagep = NULL; 5820 goto out; 5821 } 5822 5823 page = alloc_huge_page(dst_vma, dst_addr, 0); 5824 if (IS_ERR(page)) { 5825 ret = -ENOMEM; 5826 *pagep = NULL; 5827 goto out; 5828 } 5829 copy_user_huge_page(page, *pagep, dst_addr, dst_vma, 5830 pages_per_huge_page(h)); 5831 put_page(*pagep); 5832 *pagep = NULL; 5833 } 5834 5835 /* 5836 * The memory barrier inside __SetPageUptodate makes sure that 5837 * preceding stores to the page contents become visible before 5838 * the set_pte_at() write. 5839 */ 5840 __SetPageUptodate(page); 5841 5842 /* Add shared, newly allocated pages to the page cache. */ 5843 if (vm_shared && !is_continue) { 5844 size = i_size_read(mapping->host) >> huge_page_shift(h); 5845 ret = -EFAULT; 5846 if (idx >= size) 5847 goto out_release_nounlock; 5848 5849 /* 5850 * Serialization between remove_inode_hugepages() and 5851 * huge_add_to_page_cache() below happens through the 5852 * hugetlb_fault_mutex_table that here must be hold by 5853 * the caller. 5854 */ 5855 ret = huge_add_to_page_cache(page, mapping, idx); 5856 if (ret) 5857 goto out_release_nounlock; 5858 page_in_pagecache = true; 5859 } 5860 5861 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); 5862 spin_lock(ptl); 5863 5864 /* 5865 * Recheck the i_size after holding PT lock to make sure not 5866 * to leave any page mapped (as page_mapped()) beyond the end 5867 * of the i_size (remove_inode_hugepages() is strict about 5868 * enforcing that). If we bail out here, we'll also leave a 5869 * page in the radix tree in the vm_shared case beyond the end 5870 * of the i_size, but remove_inode_hugepages() will take care 5871 * of it as soon as we drop the hugetlb_fault_mutex_table. 5872 */ 5873 size = i_size_read(mapping->host) >> huge_page_shift(h); 5874 ret = -EFAULT; 5875 if (idx >= size) 5876 goto out_release_unlock; 5877 5878 ret = -EEXIST; 5879 if (!huge_pte_none(huge_ptep_get(dst_pte))) 5880 goto out_release_unlock; 5881 5882 if (vm_shared) { 5883 page_dup_rmap(page, true); 5884 } else { 5885 ClearHPageRestoreReserve(page); 5886 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 5887 } 5888 5889 /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */ 5890 if (is_continue && !vm_shared) 5891 writable = 0; 5892 else 5893 writable = dst_vma->vm_flags & VM_WRITE; 5894 5895 _dst_pte = make_huge_pte(dst_vma, page, writable); 5896 if (writable) 5897 _dst_pte = huge_pte_mkdirty(_dst_pte); 5898 _dst_pte = pte_mkyoung(_dst_pte); 5899 5900 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 5901 5902 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, 5903 dst_vma->vm_flags & VM_WRITE); 5904 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 5905 5906 /* No need to invalidate - it was non-present before */ 5907 update_mmu_cache(dst_vma, dst_addr, dst_pte); 5908 5909 spin_unlock(ptl); 5910 if (!is_continue) 5911 SetHPageMigratable(page); 5912 if (vm_shared || is_continue) 5913 unlock_page(page); 5914 ret = 0; 5915 out: 5916 return ret; 5917 out_release_unlock: 5918 spin_unlock(ptl); 5919 if (vm_shared || is_continue) 5920 unlock_page(page); 5921 out_release_nounlock: 5922 if (!page_in_pagecache) 5923 restore_reserve_on_error(h, dst_vma, dst_addr, page); 5924 put_page(page); 5925 goto out; 5926 } 5927 #endif /* CONFIG_USERFAULTFD */ 5928 5929 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, 5930 int refs, struct page **pages, 5931 struct vm_area_struct **vmas) 5932 { 5933 int nr; 5934 5935 for (nr = 0; nr < refs; nr++) { 5936 if (likely(pages)) 5937 pages[nr] = mem_map_offset(page, nr); 5938 if (vmas) 5939 vmas[nr] = vma; 5940 } 5941 } 5942 5943 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 5944 struct page **pages, struct vm_area_struct **vmas, 5945 unsigned long *position, unsigned long *nr_pages, 5946 long i, unsigned int flags, int *locked) 5947 { 5948 unsigned long pfn_offset; 5949 unsigned long vaddr = *position; 5950 unsigned long remainder = *nr_pages; 5951 struct hstate *h = hstate_vma(vma); 5952 int err = -EFAULT, refs; 5953 5954 while (vaddr < vma->vm_end && remainder) { 5955 pte_t *pte; 5956 spinlock_t *ptl = NULL; 5957 int absent; 5958 struct page *page; 5959 5960 /* 5961 * If we have a pending SIGKILL, don't keep faulting pages and 5962 * potentially allocating memory. 5963 */ 5964 if (fatal_signal_pending(current)) { 5965 remainder = 0; 5966 break; 5967 } 5968 5969 /* 5970 * Some archs (sparc64, sh*) have multiple pte_ts to 5971 * each hugepage. We have to make sure we get the 5972 * first, for the page indexing below to work. 5973 * 5974 * Note that page table lock is not held when pte is null. 5975 */ 5976 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 5977 huge_page_size(h)); 5978 if (pte) 5979 ptl = huge_pte_lock(h, mm, pte); 5980 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 5981 5982 /* 5983 * When coredumping, it suits get_dump_page if we just return 5984 * an error where there's an empty slot with no huge pagecache 5985 * to back it. This way, we avoid allocating a hugepage, and 5986 * the sparse dumpfile avoids allocating disk blocks, but its 5987 * huge holes still show up with zeroes where they need to be. 5988 */ 5989 if (absent && (flags & FOLL_DUMP) && 5990 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 5991 if (pte) 5992 spin_unlock(ptl); 5993 remainder = 0; 5994 break; 5995 } 5996 5997 /* 5998 * We need call hugetlb_fault for both hugepages under migration 5999 * (in which case hugetlb_fault waits for the migration,) and 6000 * hwpoisoned hugepages (in which case we need to prevent the 6001 * caller from accessing to them.) In order to do this, we use 6002 * here is_swap_pte instead of is_hugetlb_entry_migration and 6003 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 6004 * both cases, and because we can't follow correct pages 6005 * directly from any kind of swap entries. 6006 */ 6007 if (absent || is_swap_pte(huge_ptep_get(pte)) || 6008 ((flags & FOLL_WRITE) && 6009 !huge_pte_write(huge_ptep_get(pte)))) { 6010 vm_fault_t ret; 6011 unsigned int fault_flags = 0; 6012 6013 if (pte) 6014 spin_unlock(ptl); 6015 if (flags & FOLL_WRITE) 6016 fault_flags |= FAULT_FLAG_WRITE; 6017 if (locked) 6018 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6019 FAULT_FLAG_KILLABLE; 6020 if (flags & FOLL_NOWAIT) 6021 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6022 FAULT_FLAG_RETRY_NOWAIT; 6023 if (flags & FOLL_TRIED) { 6024 /* 6025 * Note: FAULT_FLAG_ALLOW_RETRY and 6026 * FAULT_FLAG_TRIED can co-exist 6027 */ 6028 fault_flags |= FAULT_FLAG_TRIED; 6029 } 6030 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 6031 if (ret & VM_FAULT_ERROR) { 6032 err = vm_fault_to_errno(ret, flags); 6033 remainder = 0; 6034 break; 6035 } 6036 if (ret & VM_FAULT_RETRY) { 6037 if (locked && 6038 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 6039 *locked = 0; 6040 *nr_pages = 0; 6041 /* 6042 * VM_FAULT_RETRY must not return an 6043 * error, it will return zero 6044 * instead. 6045 * 6046 * No need to update "position" as the 6047 * caller will not check it after 6048 * *nr_pages is set to 0. 6049 */ 6050 return i; 6051 } 6052 continue; 6053 } 6054 6055 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 6056 page = pte_page(huge_ptep_get(pte)); 6057 6058 /* 6059 * If subpage information not requested, update counters 6060 * and skip the same_page loop below. 6061 */ 6062 if (!pages && !vmas && !pfn_offset && 6063 (vaddr + huge_page_size(h) < vma->vm_end) && 6064 (remainder >= pages_per_huge_page(h))) { 6065 vaddr += huge_page_size(h); 6066 remainder -= pages_per_huge_page(h); 6067 i += pages_per_huge_page(h); 6068 spin_unlock(ptl); 6069 continue; 6070 } 6071 6072 /* vaddr may not be aligned to PAGE_SIZE */ 6073 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, 6074 (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); 6075 6076 if (pages || vmas) 6077 record_subpages_vmas(mem_map_offset(page, pfn_offset), 6078 vma, refs, 6079 likely(pages) ? pages + i : NULL, 6080 vmas ? vmas + i : NULL); 6081 6082 if (pages) { 6083 /* 6084 * try_grab_folio() should always succeed here, 6085 * because: a) we hold the ptl lock, and b) we've just 6086 * checked that the huge page is present in the page 6087 * tables. If the huge page is present, then the tail 6088 * pages must also be present. The ptl prevents the 6089 * head page and tail pages from being rearranged in 6090 * any way. So this page must be available at this 6091 * point, unless the page refcount overflowed: 6092 */ 6093 if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs, 6094 flags))) { 6095 spin_unlock(ptl); 6096 remainder = 0; 6097 err = -ENOMEM; 6098 break; 6099 } 6100 } 6101 6102 vaddr += (refs << PAGE_SHIFT); 6103 remainder -= refs; 6104 i += refs; 6105 6106 spin_unlock(ptl); 6107 } 6108 *nr_pages = remainder; 6109 /* 6110 * setting position is actually required only if remainder is 6111 * not zero but it's faster not to add a "if (remainder)" 6112 * branch. 6113 */ 6114 *position = vaddr; 6115 6116 return i ? i : err; 6117 } 6118 6119 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 6120 unsigned long address, unsigned long end, pgprot_t newprot) 6121 { 6122 struct mm_struct *mm = vma->vm_mm; 6123 unsigned long start = address; 6124 pte_t *ptep; 6125 pte_t pte; 6126 struct hstate *h = hstate_vma(vma); 6127 unsigned long pages = 0; 6128 bool shared_pmd = false; 6129 struct mmu_notifier_range range; 6130 6131 /* 6132 * In the case of shared PMDs, the area to flush could be beyond 6133 * start/end. Set range.start/range.end to cover the maximum possible 6134 * range if PMD sharing is possible. 6135 */ 6136 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6137 0, vma, mm, start, end); 6138 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6139 6140 BUG_ON(address >= end); 6141 flush_cache_range(vma, range.start, range.end); 6142 6143 mmu_notifier_invalidate_range_start(&range); 6144 i_mmap_lock_write(vma->vm_file->f_mapping); 6145 for (; address < end; address += huge_page_size(h)) { 6146 spinlock_t *ptl; 6147 ptep = huge_pte_offset(mm, address, huge_page_size(h)); 6148 if (!ptep) 6149 continue; 6150 ptl = huge_pte_lock(h, mm, ptep); 6151 if (huge_pmd_unshare(mm, vma, &address, ptep)) { 6152 pages++; 6153 spin_unlock(ptl); 6154 shared_pmd = true; 6155 continue; 6156 } 6157 pte = huge_ptep_get(ptep); 6158 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6159 spin_unlock(ptl); 6160 continue; 6161 } 6162 if (unlikely(is_hugetlb_entry_migration(pte))) { 6163 swp_entry_t entry = pte_to_swp_entry(pte); 6164 6165 if (is_writable_migration_entry(entry)) { 6166 pte_t newpte; 6167 6168 entry = make_readable_migration_entry( 6169 swp_offset(entry)); 6170 newpte = swp_entry_to_pte(entry); 6171 set_huge_swap_pte_at(mm, address, ptep, 6172 newpte, huge_page_size(h)); 6173 pages++; 6174 } 6175 spin_unlock(ptl); 6176 continue; 6177 } 6178 if (!huge_pte_none(pte)) { 6179 pte_t old_pte; 6180 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6181 6182 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6183 pte = huge_pte_modify(old_pte, newprot); 6184 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6185 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6186 pages++; 6187 } 6188 spin_unlock(ptl); 6189 } 6190 /* 6191 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6192 * may have cleared our pud entry and done put_page on the page table: 6193 * once we release i_mmap_rwsem, another task can do the final put_page 6194 * and that page table be reused and filled with junk. If we actually 6195 * did unshare a page of pmds, flush the range corresponding to the pud. 6196 */ 6197 if (shared_pmd) 6198 flush_hugetlb_tlb_range(vma, range.start, range.end); 6199 else 6200 flush_hugetlb_tlb_range(vma, start, end); 6201 /* 6202 * No need to call mmu_notifier_invalidate_range() we are downgrading 6203 * page table protection not changing it to point to a new page. 6204 * 6205 * See Documentation/vm/mmu_notifier.rst 6206 */ 6207 i_mmap_unlock_write(vma->vm_file->f_mapping); 6208 mmu_notifier_invalidate_range_end(&range); 6209 6210 return pages << h->order; 6211 } 6212 6213 /* Return true if reservation was successful, false otherwise. */ 6214 bool hugetlb_reserve_pages(struct inode *inode, 6215 long from, long to, 6216 struct vm_area_struct *vma, 6217 vm_flags_t vm_flags) 6218 { 6219 long chg, add = -1; 6220 struct hstate *h = hstate_inode(inode); 6221 struct hugepage_subpool *spool = subpool_inode(inode); 6222 struct resv_map *resv_map; 6223 struct hugetlb_cgroup *h_cg = NULL; 6224 long gbl_reserve, regions_needed = 0; 6225 6226 /* This should never happen */ 6227 if (from > to) { 6228 VM_WARN(1, "%s called with a negative range\n", __func__); 6229 return false; 6230 } 6231 6232 /* 6233 * Only apply hugepage reservation if asked. At fault time, an 6234 * attempt will be made for VM_NORESERVE to allocate a page 6235 * without using reserves 6236 */ 6237 if (vm_flags & VM_NORESERVE) 6238 return true; 6239 6240 /* 6241 * Shared mappings base their reservation on the number of pages that 6242 * are already allocated on behalf of the file. Private mappings need 6243 * to reserve the full area even if read-only as mprotect() may be 6244 * called to make the mapping read-write. Assume !vma is a shm mapping 6245 */ 6246 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6247 /* 6248 * resv_map can not be NULL as hugetlb_reserve_pages is only 6249 * called for inodes for which resv_maps were created (see 6250 * hugetlbfs_get_inode). 6251 */ 6252 resv_map = inode_resv_map(inode); 6253 6254 chg = region_chg(resv_map, from, to, ®ions_needed); 6255 6256 } else { 6257 /* Private mapping. */ 6258 resv_map = resv_map_alloc(); 6259 if (!resv_map) 6260 return false; 6261 6262 chg = to - from; 6263 6264 set_vma_resv_map(vma, resv_map); 6265 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6266 } 6267 6268 if (chg < 0) 6269 goto out_err; 6270 6271 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6272 chg * pages_per_huge_page(h), &h_cg) < 0) 6273 goto out_err; 6274 6275 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6276 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6277 * of the resv_map. 6278 */ 6279 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6280 } 6281 6282 /* 6283 * There must be enough pages in the subpool for the mapping. If 6284 * the subpool has a minimum size, there may be some global 6285 * reservations already in place (gbl_reserve). 6286 */ 6287 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6288 if (gbl_reserve < 0) 6289 goto out_uncharge_cgroup; 6290 6291 /* 6292 * Check enough hugepages are available for the reservation. 6293 * Hand the pages back to the subpool if there are not 6294 */ 6295 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6296 goto out_put_pages; 6297 6298 /* 6299 * Account for the reservations made. Shared mappings record regions 6300 * that have reservations as they are shared by multiple VMAs. 6301 * When the last VMA disappears, the region map says how much 6302 * the reservation was and the page cache tells how much of 6303 * the reservation was consumed. Private mappings are per-VMA and 6304 * only the consumed reservations are tracked. When the VMA 6305 * disappears, the original reservation is the VMA size and the 6306 * consumed reservations are stored in the map. Hence, nothing 6307 * else has to be done for private mappings here 6308 */ 6309 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6310 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6311 6312 if (unlikely(add < 0)) { 6313 hugetlb_acct_memory(h, -gbl_reserve); 6314 goto out_put_pages; 6315 } else if (unlikely(chg > add)) { 6316 /* 6317 * pages in this range were added to the reserve 6318 * map between region_chg and region_add. This 6319 * indicates a race with alloc_huge_page. Adjust 6320 * the subpool and reserve counts modified above 6321 * based on the difference. 6322 */ 6323 long rsv_adjust; 6324 6325 /* 6326 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6327 * reference to h_cg->css. See comment below for detail. 6328 */ 6329 hugetlb_cgroup_uncharge_cgroup_rsvd( 6330 hstate_index(h), 6331 (chg - add) * pages_per_huge_page(h), h_cg); 6332 6333 rsv_adjust = hugepage_subpool_put_pages(spool, 6334 chg - add); 6335 hugetlb_acct_memory(h, -rsv_adjust); 6336 } else if (h_cg) { 6337 /* 6338 * The file_regions will hold their own reference to 6339 * h_cg->css. So we should release the reference held 6340 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6341 * done. 6342 */ 6343 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6344 } 6345 } 6346 return true; 6347 6348 out_put_pages: 6349 /* put back original number of pages, chg */ 6350 (void)hugepage_subpool_put_pages(spool, chg); 6351 out_uncharge_cgroup: 6352 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6353 chg * pages_per_huge_page(h), h_cg); 6354 out_err: 6355 if (!vma || vma->vm_flags & VM_MAYSHARE) 6356 /* Only call region_abort if the region_chg succeeded but the 6357 * region_add failed or didn't run. 6358 */ 6359 if (chg >= 0 && add < 0) 6360 region_abort(resv_map, from, to, regions_needed); 6361 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6362 kref_put(&resv_map->refs, resv_map_release); 6363 return false; 6364 } 6365 6366 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6367 long freed) 6368 { 6369 struct hstate *h = hstate_inode(inode); 6370 struct resv_map *resv_map = inode_resv_map(inode); 6371 long chg = 0; 6372 struct hugepage_subpool *spool = subpool_inode(inode); 6373 long gbl_reserve; 6374 6375 /* 6376 * Since this routine can be called in the evict inode path for all 6377 * hugetlbfs inodes, resv_map could be NULL. 6378 */ 6379 if (resv_map) { 6380 chg = region_del(resv_map, start, end); 6381 /* 6382 * region_del() can fail in the rare case where a region 6383 * must be split and another region descriptor can not be 6384 * allocated. If end == LONG_MAX, it will not fail. 6385 */ 6386 if (chg < 0) 6387 return chg; 6388 } 6389 6390 spin_lock(&inode->i_lock); 6391 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6392 spin_unlock(&inode->i_lock); 6393 6394 /* 6395 * If the subpool has a minimum size, the number of global 6396 * reservations to be released may be adjusted. 6397 * 6398 * Note that !resv_map implies freed == 0. So (chg - freed) 6399 * won't go negative. 6400 */ 6401 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6402 hugetlb_acct_memory(h, -gbl_reserve); 6403 6404 return 0; 6405 } 6406 6407 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6408 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6409 struct vm_area_struct *vma, 6410 unsigned long addr, pgoff_t idx) 6411 { 6412 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6413 svma->vm_start; 6414 unsigned long sbase = saddr & PUD_MASK; 6415 unsigned long s_end = sbase + PUD_SIZE; 6416 6417 /* Allow segments to share if only one is marked locked */ 6418 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 6419 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 6420 6421 /* 6422 * match the virtual addresses, permission and the alignment of the 6423 * page table page. 6424 */ 6425 if (pmd_index(addr) != pmd_index(saddr) || 6426 vm_flags != svm_flags || 6427 !range_in_vma(svma, sbase, s_end)) 6428 return 0; 6429 6430 return saddr; 6431 } 6432 6433 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 6434 { 6435 unsigned long base = addr & PUD_MASK; 6436 unsigned long end = base + PUD_SIZE; 6437 6438 /* 6439 * check on proper vm_flags and page table alignment 6440 */ 6441 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) 6442 return true; 6443 return false; 6444 } 6445 6446 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6447 { 6448 #ifdef CONFIG_USERFAULTFD 6449 if (uffd_disable_huge_pmd_share(vma)) 6450 return false; 6451 #endif 6452 return vma_shareable(vma, addr); 6453 } 6454 6455 /* 6456 * Determine if start,end range within vma could be mapped by shared pmd. 6457 * If yes, adjust start and end to cover range associated with possible 6458 * shared pmd mappings. 6459 */ 6460 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6461 unsigned long *start, unsigned long *end) 6462 { 6463 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 6464 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 6465 6466 /* 6467 * vma needs to span at least one aligned PUD size, and the range 6468 * must be at least partially within in. 6469 */ 6470 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 6471 (*end <= v_start) || (*start >= v_end)) 6472 return; 6473 6474 /* Extend the range to be PUD aligned for a worst case scenario */ 6475 if (*start > v_start) 6476 *start = ALIGN_DOWN(*start, PUD_SIZE); 6477 6478 if (*end < v_end) 6479 *end = ALIGN(*end, PUD_SIZE); 6480 } 6481 6482 /* 6483 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 6484 * and returns the corresponding pte. While this is not necessary for the 6485 * !shared pmd case because we can allocate the pmd later as well, it makes the 6486 * code much cleaner. 6487 * 6488 * This routine must be called with i_mmap_rwsem held in at least read mode if 6489 * sharing is possible. For hugetlbfs, this prevents removal of any page 6490 * table entries associated with the address space. This is important as we 6491 * are setting up sharing based on existing page table entries (mappings). 6492 */ 6493 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6494 unsigned long addr, pud_t *pud) 6495 { 6496 struct address_space *mapping = vma->vm_file->f_mapping; 6497 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 6498 vma->vm_pgoff; 6499 struct vm_area_struct *svma; 6500 unsigned long saddr; 6501 pte_t *spte = NULL; 6502 pte_t *pte; 6503 spinlock_t *ptl; 6504 6505 i_mmap_assert_locked(mapping); 6506 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 6507 if (svma == vma) 6508 continue; 6509 6510 saddr = page_table_shareable(svma, vma, addr, idx); 6511 if (saddr) { 6512 spte = huge_pte_offset(svma->vm_mm, saddr, 6513 vma_mmu_pagesize(svma)); 6514 if (spte) { 6515 get_page(virt_to_page(spte)); 6516 break; 6517 } 6518 } 6519 } 6520 6521 if (!spte) 6522 goto out; 6523 6524 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 6525 if (pud_none(*pud)) { 6526 pud_populate(mm, pud, 6527 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 6528 mm_inc_nr_pmds(mm); 6529 } else { 6530 put_page(virt_to_page(spte)); 6531 } 6532 spin_unlock(ptl); 6533 out: 6534 pte = (pte_t *)pmd_alloc(mm, pud, addr); 6535 return pte; 6536 } 6537 6538 /* 6539 * unmap huge page backed by shared pte. 6540 * 6541 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 6542 * indicated by page_count > 1, unmap is achieved by clearing pud and 6543 * decrementing the ref count. If count == 1, the pte page is not shared. 6544 * 6545 * Called with page table lock held and i_mmap_rwsem held in write mode. 6546 * 6547 * returns: 1 successfully unmapped a shared pte page 6548 * 0 the underlying pte page is not shared, or it is the last user 6549 */ 6550 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 6551 unsigned long *addr, pte_t *ptep) 6552 { 6553 pgd_t *pgd = pgd_offset(mm, *addr); 6554 p4d_t *p4d = p4d_offset(pgd, *addr); 6555 pud_t *pud = pud_offset(p4d, *addr); 6556 6557 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 6558 BUG_ON(page_count(virt_to_page(ptep)) == 0); 6559 if (page_count(virt_to_page(ptep)) == 1) 6560 return 0; 6561 6562 pud_clear(pud); 6563 put_page(virt_to_page(ptep)); 6564 mm_dec_nr_pmds(mm); 6565 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 6566 return 1; 6567 } 6568 6569 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 6570 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6571 unsigned long addr, pud_t *pud) 6572 { 6573 return NULL; 6574 } 6575 6576 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 6577 unsigned long *addr, pte_t *ptep) 6578 { 6579 return 0; 6580 } 6581 6582 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6583 unsigned long *start, unsigned long *end) 6584 { 6585 } 6586 6587 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6588 { 6589 return false; 6590 } 6591 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 6592 6593 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 6594 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 6595 unsigned long addr, unsigned long sz) 6596 { 6597 pgd_t *pgd; 6598 p4d_t *p4d; 6599 pud_t *pud; 6600 pte_t *pte = NULL; 6601 6602 pgd = pgd_offset(mm, addr); 6603 p4d = p4d_alloc(mm, pgd, addr); 6604 if (!p4d) 6605 return NULL; 6606 pud = pud_alloc(mm, p4d, addr); 6607 if (pud) { 6608 if (sz == PUD_SIZE) { 6609 pte = (pte_t *)pud; 6610 } else { 6611 BUG_ON(sz != PMD_SIZE); 6612 if (want_pmd_share(vma, addr) && pud_none(*pud)) 6613 pte = huge_pmd_share(mm, vma, addr, pud); 6614 else 6615 pte = (pte_t *)pmd_alloc(mm, pud, addr); 6616 } 6617 } 6618 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 6619 6620 return pte; 6621 } 6622 6623 /* 6624 * huge_pte_offset() - Walk the page table to resolve the hugepage 6625 * entry at address @addr 6626 * 6627 * Return: Pointer to page table entry (PUD or PMD) for 6628 * address @addr, or NULL if a !p*d_present() entry is encountered and the 6629 * size @sz doesn't match the hugepage size at this level of the page 6630 * table. 6631 */ 6632 pte_t *huge_pte_offset(struct mm_struct *mm, 6633 unsigned long addr, unsigned long sz) 6634 { 6635 pgd_t *pgd; 6636 p4d_t *p4d; 6637 pud_t *pud; 6638 pmd_t *pmd; 6639 6640 pgd = pgd_offset(mm, addr); 6641 if (!pgd_present(*pgd)) 6642 return NULL; 6643 p4d = p4d_offset(pgd, addr); 6644 if (!p4d_present(*p4d)) 6645 return NULL; 6646 6647 pud = pud_offset(p4d, addr); 6648 if (sz == PUD_SIZE) 6649 /* must be pud huge, non-present or none */ 6650 return (pte_t *)pud; 6651 if (!pud_present(*pud)) 6652 return NULL; 6653 /* must have a valid entry and size to go further */ 6654 6655 pmd = pmd_offset(pud, addr); 6656 /* must be pmd huge, non-present or none */ 6657 return (pte_t *)pmd; 6658 } 6659 6660 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 6661 6662 /* 6663 * These functions are overwritable if your architecture needs its own 6664 * behavior. 6665 */ 6666 struct page * __weak 6667 follow_huge_addr(struct mm_struct *mm, unsigned long address, 6668 int write) 6669 { 6670 return ERR_PTR(-EINVAL); 6671 } 6672 6673 struct page * __weak 6674 follow_huge_pd(struct vm_area_struct *vma, 6675 unsigned long address, hugepd_t hpd, int flags, int pdshift) 6676 { 6677 WARN(1, "hugepd follow called with no support for hugepage directory format\n"); 6678 return NULL; 6679 } 6680 6681 struct page * __weak 6682 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 6683 pmd_t *pmd, int flags) 6684 { 6685 struct page *page = NULL; 6686 spinlock_t *ptl; 6687 pte_t pte; 6688 6689 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 6690 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 6691 (FOLL_PIN | FOLL_GET))) 6692 return NULL; 6693 6694 retry: 6695 ptl = pmd_lockptr(mm, pmd); 6696 spin_lock(ptl); 6697 /* 6698 * make sure that the address range covered by this pmd is not 6699 * unmapped from other threads. 6700 */ 6701 if (!pmd_huge(*pmd)) 6702 goto out; 6703 pte = huge_ptep_get((pte_t *)pmd); 6704 if (pte_present(pte)) { 6705 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 6706 /* 6707 * try_grab_page() should always succeed here, because: a) we 6708 * hold the pmd (ptl) lock, and b) we've just checked that the 6709 * huge pmd (head) page is present in the page tables. The ptl 6710 * prevents the head page and tail pages from being rearranged 6711 * in any way. So this page must be available at this point, 6712 * unless the page refcount overflowed: 6713 */ 6714 if (WARN_ON_ONCE(!try_grab_page(page, flags))) { 6715 page = NULL; 6716 goto out; 6717 } 6718 } else { 6719 if (is_hugetlb_entry_migration(pte)) { 6720 spin_unlock(ptl); 6721 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 6722 goto retry; 6723 } 6724 /* 6725 * hwpoisoned entry is treated as no_page_table in 6726 * follow_page_mask(). 6727 */ 6728 } 6729 out: 6730 spin_unlock(ptl); 6731 return page; 6732 } 6733 6734 struct page * __weak 6735 follow_huge_pud(struct mm_struct *mm, unsigned long address, 6736 pud_t *pud, int flags) 6737 { 6738 if (flags & (FOLL_GET | FOLL_PIN)) 6739 return NULL; 6740 6741 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 6742 } 6743 6744 struct page * __weak 6745 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) 6746 { 6747 if (flags & (FOLL_GET | FOLL_PIN)) 6748 return NULL; 6749 6750 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); 6751 } 6752 6753 bool isolate_huge_page(struct page *page, struct list_head *list) 6754 { 6755 bool ret = true; 6756 6757 spin_lock_irq(&hugetlb_lock); 6758 if (!PageHeadHuge(page) || 6759 !HPageMigratable(page) || 6760 !get_page_unless_zero(page)) { 6761 ret = false; 6762 goto unlock; 6763 } 6764 ClearHPageMigratable(page); 6765 list_move_tail(&page->lru, list); 6766 unlock: 6767 spin_unlock_irq(&hugetlb_lock); 6768 return ret; 6769 } 6770 6771 int get_hwpoison_huge_page(struct page *page, bool *hugetlb) 6772 { 6773 int ret = 0; 6774 6775 *hugetlb = false; 6776 spin_lock_irq(&hugetlb_lock); 6777 if (PageHeadHuge(page)) { 6778 *hugetlb = true; 6779 if (HPageFreed(page) || HPageMigratable(page)) 6780 ret = get_page_unless_zero(page); 6781 else 6782 ret = -EBUSY; 6783 } 6784 spin_unlock_irq(&hugetlb_lock); 6785 return ret; 6786 } 6787 6788 int get_huge_page_for_hwpoison(unsigned long pfn, int flags) 6789 { 6790 int ret; 6791 6792 spin_lock_irq(&hugetlb_lock); 6793 ret = __get_huge_page_for_hwpoison(pfn, flags); 6794 spin_unlock_irq(&hugetlb_lock); 6795 return ret; 6796 } 6797 6798 void putback_active_hugepage(struct page *page) 6799 { 6800 spin_lock_irq(&hugetlb_lock); 6801 SetHPageMigratable(page); 6802 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 6803 spin_unlock_irq(&hugetlb_lock); 6804 put_page(page); 6805 } 6806 6807 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) 6808 { 6809 struct hstate *h = page_hstate(oldpage); 6810 6811 hugetlb_cgroup_migrate(oldpage, newpage); 6812 set_page_owner_migrate_reason(newpage, reason); 6813 6814 /* 6815 * transfer temporary state of the new huge page. This is 6816 * reverse to other transitions because the newpage is going to 6817 * be final while the old one will be freed so it takes over 6818 * the temporary status. 6819 * 6820 * Also note that we have to transfer the per-node surplus state 6821 * here as well otherwise the global surplus count will not match 6822 * the per-node's. 6823 */ 6824 if (HPageTemporary(newpage)) { 6825 int old_nid = page_to_nid(oldpage); 6826 int new_nid = page_to_nid(newpage); 6827 6828 SetHPageTemporary(oldpage); 6829 ClearHPageTemporary(newpage); 6830 6831 /* 6832 * There is no need to transfer the per-node surplus state 6833 * when we do not cross the node. 6834 */ 6835 if (new_nid == old_nid) 6836 return; 6837 spin_lock_irq(&hugetlb_lock); 6838 if (h->surplus_huge_pages_node[old_nid]) { 6839 h->surplus_huge_pages_node[old_nid]--; 6840 h->surplus_huge_pages_node[new_nid]++; 6841 } 6842 spin_unlock_irq(&hugetlb_lock); 6843 } 6844 } 6845 6846 /* 6847 * This function will unconditionally remove all the shared pmd pgtable entries 6848 * within the specific vma for a hugetlbfs memory range. 6849 */ 6850 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 6851 { 6852 struct hstate *h = hstate_vma(vma); 6853 unsigned long sz = huge_page_size(h); 6854 struct mm_struct *mm = vma->vm_mm; 6855 struct mmu_notifier_range range; 6856 unsigned long address, start, end; 6857 spinlock_t *ptl; 6858 pte_t *ptep; 6859 6860 if (!(vma->vm_flags & VM_MAYSHARE)) 6861 return; 6862 6863 start = ALIGN(vma->vm_start, PUD_SIZE); 6864 end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 6865 6866 if (start >= end) 6867 return; 6868 6869 /* 6870 * No need to call adjust_range_if_pmd_sharing_possible(), because 6871 * we have already done the PUD_SIZE alignment. 6872 */ 6873 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 6874 start, end); 6875 mmu_notifier_invalidate_range_start(&range); 6876 i_mmap_lock_write(vma->vm_file->f_mapping); 6877 for (address = start; address < end; address += PUD_SIZE) { 6878 unsigned long tmp = address; 6879 6880 ptep = huge_pte_offset(mm, address, sz); 6881 if (!ptep) 6882 continue; 6883 ptl = huge_pte_lock(h, mm, ptep); 6884 /* We don't want 'address' to be changed */ 6885 huge_pmd_unshare(mm, vma, &tmp, ptep); 6886 spin_unlock(ptl); 6887 } 6888 flush_hugetlb_tlb_range(vma, start, end); 6889 i_mmap_unlock_write(vma->vm_file->f_mapping); 6890 /* 6891 * No need to call mmu_notifier_invalidate_range(), see 6892 * Documentation/vm/mmu_notifier.rst. 6893 */ 6894 mmu_notifier_invalidate_range_end(&range); 6895 } 6896 6897 #ifdef CONFIG_CMA 6898 static bool cma_reserve_called __initdata; 6899 6900 static int __init cmdline_parse_hugetlb_cma(char *p) 6901 { 6902 int nid, count = 0; 6903 unsigned long tmp; 6904 char *s = p; 6905 6906 while (*s) { 6907 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 6908 break; 6909 6910 if (s[count] == ':') { 6911 if (tmp >= MAX_NUMNODES) 6912 break; 6913 nid = array_index_nospec(tmp, MAX_NUMNODES); 6914 6915 s += count + 1; 6916 tmp = memparse(s, &s); 6917 hugetlb_cma_size_in_node[nid] = tmp; 6918 hugetlb_cma_size += tmp; 6919 6920 /* 6921 * Skip the separator if have one, otherwise 6922 * break the parsing. 6923 */ 6924 if (*s == ',') 6925 s++; 6926 else 6927 break; 6928 } else { 6929 hugetlb_cma_size = memparse(p, &p); 6930 break; 6931 } 6932 } 6933 6934 return 0; 6935 } 6936 6937 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 6938 6939 void __init hugetlb_cma_reserve(int order) 6940 { 6941 unsigned long size, reserved, per_node; 6942 bool node_specific_cma_alloc = false; 6943 int nid; 6944 6945 cma_reserve_called = true; 6946 6947 if (!hugetlb_cma_size) 6948 return; 6949 6950 for (nid = 0; nid < MAX_NUMNODES; nid++) { 6951 if (hugetlb_cma_size_in_node[nid] == 0) 6952 continue; 6953 6954 if (!node_state(nid, N_ONLINE)) { 6955 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 6956 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 6957 hugetlb_cma_size_in_node[nid] = 0; 6958 continue; 6959 } 6960 6961 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 6962 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 6963 nid, (PAGE_SIZE << order) / SZ_1M); 6964 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 6965 hugetlb_cma_size_in_node[nid] = 0; 6966 } else { 6967 node_specific_cma_alloc = true; 6968 } 6969 } 6970 6971 /* Validate the CMA size again in case some invalid nodes specified. */ 6972 if (!hugetlb_cma_size) 6973 return; 6974 6975 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 6976 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 6977 (PAGE_SIZE << order) / SZ_1M); 6978 hugetlb_cma_size = 0; 6979 return; 6980 } 6981 6982 if (!node_specific_cma_alloc) { 6983 /* 6984 * If 3 GB area is requested on a machine with 4 numa nodes, 6985 * let's allocate 1 GB on first three nodes and ignore the last one. 6986 */ 6987 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 6988 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 6989 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 6990 } 6991 6992 reserved = 0; 6993 for_each_node_state(nid, N_ONLINE) { 6994 int res; 6995 char name[CMA_MAX_NAME]; 6996 6997 if (node_specific_cma_alloc) { 6998 if (hugetlb_cma_size_in_node[nid] == 0) 6999 continue; 7000 7001 size = hugetlb_cma_size_in_node[nid]; 7002 } else { 7003 size = min(per_node, hugetlb_cma_size - reserved); 7004 } 7005 7006 size = round_up(size, PAGE_SIZE << order); 7007 7008 snprintf(name, sizeof(name), "hugetlb%d", nid); 7009 /* 7010 * Note that 'order per bit' is based on smallest size that 7011 * may be returned to CMA allocator in the case of 7012 * huge page demotion. 7013 */ 7014 res = cma_declare_contiguous_nid(0, size, 0, 7015 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7016 0, false, name, 7017 &hugetlb_cma[nid], nid); 7018 if (res) { 7019 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7020 res, nid); 7021 continue; 7022 } 7023 7024 reserved += size; 7025 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7026 size / SZ_1M, nid); 7027 7028 if (reserved >= hugetlb_cma_size) 7029 break; 7030 } 7031 7032 if (!reserved) 7033 /* 7034 * hugetlb_cma_size is used to determine if allocations from 7035 * cma are possible. Set to zero if no cma regions are set up. 7036 */ 7037 hugetlb_cma_size = 0; 7038 } 7039 7040 void __init hugetlb_cma_check(void) 7041 { 7042 if (!hugetlb_cma_size || cma_reserve_called) 7043 return; 7044 7045 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7046 } 7047 7048 #endif /* CONFIG_CMA */ 7049